~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/core/page_pool.c

Version: ~ [ linux-6.4-rc3 ] ~ [ linux-6.3.4 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.30 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.113 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.180 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.243 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.283 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.315 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0
  2  *
  3  * page_pool.c
  4  *      Author: Jesper Dangaard Brouer <netoptimizer@brouer.com>
  5  *      Copyright (C) 2016 Red Hat, Inc.
  6  */
  7 
  8 #include <linux/types.h>
  9 #include <linux/kernel.h>
 10 #include <linux/slab.h>
 11 #include <linux/device.h>
 12 
 13 #include <net/page_pool.h>
 14 #include <net/xdp.h>
 15 
 16 #include <linux/dma-direction.h>
 17 #include <linux/dma-mapping.h>
 18 #include <linux/page-flags.h>
 19 #include <linux/mm.h> /* for __put_page() */
 20 
 21 #include <trace/events/page_pool.h>
 22 
 23 #define DEFER_TIME (msecs_to_jiffies(1000))
 24 #define DEFER_WARN_INTERVAL (60 * HZ)
 25 
 26 static int page_pool_init(struct page_pool *pool,
 27                           const struct page_pool_params *params)
 28 {
 29         unsigned int ring_qsize = 1024; /* Default */
 30 
 31         memcpy(&pool->p, params, sizeof(pool->p));
 32 
 33         /* Validate only known flags were used */
 34         if (pool->p.flags & ~(PP_FLAG_ALL))
 35                 return -EINVAL;
 36 
 37         if (pool->p.pool_size)
 38                 ring_qsize = pool->p.pool_size;
 39 
 40         /* Sanity limit mem that can be pinned down */
 41         if (ring_qsize > 32768)
 42                 return -E2BIG;
 43 
 44         /* DMA direction is either DMA_FROM_DEVICE or DMA_BIDIRECTIONAL.
 45          * DMA_BIDIRECTIONAL is for allowing page used for DMA sending,
 46          * which is the XDP_TX use-case.
 47          */
 48         if (pool->p.flags & PP_FLAG_DMA_MAP) {
 49                 if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
 50                     (pool->p.dma_dir != DMA_BIDIRECTIONAL))
 51                         return -EINVAL;
 52         }
 53 
 54         if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) {
 55                 /* In order to request DMA-sync-for-device the page
 56                  * needs to be mapped
 57                  */
 58                 if (!(pool->p.flags & PP_FLAG_DMA_MAP))
 59                         return -EINVAL;
 60 
 61                 if (!pool->p.max_len)
 62                         return -EINVAL;
 63 
 64                 /* pool->p.offset has to be set according to the address
 65                  * offset used by the DMA engine to start copying rx data
 66                  */
 67         }
 68 
 69         if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
 70                 return -ENOMEM;
 71 
 72         atomic_set(&pool->pages_state_release_cnt, 0);
 73 
 74         /* Driver calling page_pool_create() also call page_pool_destroy() */
 75         refcount_set(&pool->user_cnt, 1);
 76 
 77         if (pool->p.flags & PP_FLAG_DMA_MAP)
 78                 get_device(pool->p.dev);
 79 
 80         return 0;
 81 }
 82 
 83 struct page_pool *page_pool_create(const struct page_pool_params *params)
 84 {
 85         struct page_pool *pool;
 86         int err;
 87 
 88         pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid);
 89         if (!pool)
 90                 return ERR_PTR(-ENOMEM);
 91 
 92         err = page_pool_init(pool, params);
 93         if (err < 0) {
 94                 pr_warn("%s() gave up with errno %d\n", __func__, err);
 95                 kfree(pool);
 96                 return ERR_PTR(err);
 97         }
 98 
 99         return pool;
100 }
101 EXPORT_SYMBOL(page_pool_create);
102 
103 static void page_pool_return_page(struct page_pool *pool, struct page *page);
104 
105 noinline
106 static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
107 {
108         struct ptr_ring *r = &pool->ring;
109         struct page *page;
110         int pref_nid; /* preferred NUMA node */
111 
112         /* Quicker fallback, avoid locks when ring is empty */
113         if (__ptr_ring_empty(r))
114                 return NULL;
115 
116         /* Softirq guarantee CPU and thus NUMA node is stable. This,
117          * assumes CPU refilling driver RX-ring will also run RX-NAPI.
118          */
119 #ifdef CONFIG_NUMA
120         pref_nid = (pool->p.nid == NUMA_NO_NODE) ? numa_mem_id() : pool->p.nid;
121 #else
122         /* Ignore pool->p.nid setting if !CONFIG_NUMA, helps compiler */
123         pref_nid = numa_mem_id(); /* will be zero like page_to_nid() */
124 #endif
125 
126         /* Slower-path: Get pages from locked ring queue */
127         spin_lock(&r->consumer_lock);
128 
129         /* Refill alloc array, but only if NUMA match */
130         do {
131                 page = __ptr_ring_consume(r);
132                 if (unlikely(!page))
133                         break;
134 
135                 if (likely(page_to_nid(page) == pref_nid)) {
136                         pool->alloc.cache[pool->alloc.count++] = page;
137                 } else {
138                         /* NUMA mismatch;
139                          * (1) release 1 page to page-allocator and
140                          * (2) break out to fallthrough to alloc_pages_node.
141                          * This limit stress on page buddy alloactor.
142                          */
143                         page_pool_return_page(pool, page);
144                         page = NULL;
145                         break;
146                 }
147         } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
148 
149         /* Return last page */
150         if (likely(pool->alloc.count > 0))
151                 page = pool->alloc.cache[--pool->alloc.count];
152 
153         spin_unlock(&r->consumer_lock);
154         return page;
155 }
156 
157 /* fast path */
158 static struct page *__page_pool_get_cached(struct page_pool *pool)
159 {
160         struct page *page;
161 
162         /* Caller MUST guarantee safe non-concurrent access, e.g. softirq */
163         if (likely(pool->alloc.count)) {
164                 /* Fast-path */
165                 page = pool->alloc.cache[--pool->alloc.count];
166         } else {
167                 page = page_pool_refill_alloc_cache(pool);
168         }
169 
170         return page;
171 }
172 
173 static void page_pool_dma_sync_for_device(struct page_pool *pool,
174                                           struct page *page,
175                                           unsigned int dma_sync_size)
176 {
177         dma_addr_t dma_addr = page_pool_get_dma_addr(page);
178 
179         dma_sync_size = min(dma_sync_size, pool->p.max_len);
180         dma_sync_single_range_for_device(pool->p.dev, dma_addr,
181                                          pool->p.offset, dma_sync_size,
182                                          pool->p.dma_dir);
183 }
184 
185 /* slow path */
186 noinline
187 static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
188                                                  gfp_t _gfp)
189 {
190         struct page *page;
191         gfp_t gfp = _gfp;
192         dma_addr_t dma;
193 
194         /* We could always set __GFP_COMP, and avoid this branch, as
195          * prep_new_page() can handle order-0 with __GFP_COMP.
196          */
197         if (pool->p.order)
198                 gfp |= __GFP_COMP;
199 
200         /* FUTURE development:
201          *
202          * Current slow-path essentially falls back to single page
203          * allocations, which doesn't improve performance.  This code
204          * need bulk allocation support from the page allocator code.
205          */
206 
207         /* Cache was empty, do real allocation */
208 #ifdef CONFIG_NUMA
209         page = alloc_pages_node(pool->p.nid, gfp, pool->p.order);
210 #else
211         page = alloc_pages(gfp, pool->p.order);
212 #endif
213         if (!page)
214                 return NULL;
215 
216         if (!(pool->p.flags & PP_FLAG_DMA_MAP))
217                 goto skip_dma_map;
218 
219         /* Setup DMA mapping: use 'struct page' area for storing DMA-addr
220          * since dma_addr_t can be either 32 or 64 bits and does not always fit
221          * into page private data (i.e 32bit cpu with 64bit DMA caps)
222          * This mapping is kept for lifetime of page, until leaving pool.
223          */
224         dma = dma_map_page_attrs(pool->p.dev, page, 0,
225                                  (PAGE_SIZE << pool->p.order),
226                                  pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
227         if (dma_mapping_error(pool->p.dev, dma)) {
228                 put_page(page);
229                 return NULL;
230         }
231         page_pool_set_dma_addr(page, dma);
232 
233         if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
234                 page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
235 
236 skip_dma_map:
237         /* Track how many pages are held 'in-flight' */
238         pool->pages_state_hold_cnt++;
239 
240         trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt);
241 
242         /* When page just alloc'ed is should/must have refcnt 1. */
243         return page;
244 }
245 
246 /* For using page_pool replace: alloc_pages() API calls, but provide
247  * synchronization guarantee for allocation side.
248  */
249 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
250 {
251         struct page *page;
252 
253         /* Fast-path: Get a page from cache */
254         page = __page_pool_get_cached(pool);
255         if (page)
256                 return page;
257 
258         /* Slow-path: cache empty, do real allocation */
259         page = __page_pool_alloc_pages_slow(pool, gfp);
260         return page;
261 }
262 EXPORT_SYMBOL(page_pool_alloc_pages);
263 
264 /* Calculate distance between two u32 values, valid if distance is below 2^(31)
265  *  https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution
266  */
267 #define _distance(a, b) (s32)((a) - (b))
268 
269 static s32 page_pool_inflight(struct page_pool *pool)
270 {
271         u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
272         u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt);
273         s32 inflight;
274 
275         inflight = _distance(hold_cnt, release_cnt);
276 
277         trace_page_pool_release(pool, inflight, hold_cnt, release_cnt);
278         WARN(inflight < 0, "Negative(%d) inflight packet-pages", inflight);
279 
280         return inflight;
281 }
282 
283 /* Disconnects a page (from a page_pool).  API users can have a need
284  * to disconnect a page (from a page_pool), to allow it to be used as
285  * a regular page (that will eventually be returned to the normal
286  * page-allocator via put_page).
287  */
288 void page_pool_release_page(struct page_pool *pool, struct page *page)
289 {
290         dma_addr_t dma;
291         int count;
292 
293         if (!(pool->p.flags & PP_FLAG_DMA_MAP))
294                 /* Always account for inflight pages, even if we didn't
295                  * map them
296                  */
297                 goto skip_dma_unmap;
298 
299         dma = page_pool_get_dma_addr(page);
300 
301         /* When page is unmapped, it cannot be returned to our pool */
302         dma_unmap_page_attrs(pool->p.dev, dma,
303                              PAGE_SIZE << pool->p.order, pool->p.dma_dir,
304                              DMA_ATTR_SKIP_CPU_SYNC);
305         page_pool_set_dma_addr(page, 0);
306 skip_dma_unmap:
307         /* This may be the last page returned, releasing the pool, so
308          * it is not safe to reference pool afterwards.
309          */
310         count = atomic_inc_return(&pool->pages_state_release_cnt);
311         trace_page_pool_state_release(pool, page, count);
312 }
313 EXPORT_SYMBOL(page_pool_release_page);
314 
315 /* Return a page to the page allocator, cleaning up our state */
316 static void page_pool_return_page(struct page_pool *pool, struct page *page)
317 {
318         page_pool_release_page(pool, page);
319 
320         put_page(page);
321         /* An optimization would be to call __free_pages(page, pool->p.order)
322          * knowing page is not part of page-cache (thus avoiding a
323          * __page_cache_release() call).
324          */
325 }
326 
327 static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page)
328 {
329         int ret;
330         /* BH protection not needed if current is serving softirq */
331         if (in_serving_softirq())
332                 ret = ptr_ring_produce(&pool->ring, page);
333         else
334                 ret = ptr_ring_produce_bh(&pool->ring, page);
335 
336         return (ret == 0) ? true : false;
337 }
338 
339 /* Only allow direct recycling in special circumstances, into the
340  * alloc side cache.  E.g. during RX-NAPI processing for XDP_DROP use-case.
341  *
342  * Caller must provide appropriate safe context.
343  */
344 static bool page_pool_recycle_in_cache(struct page *page,
345                                        struct page_pool *pool)
346 {
347         if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE))
348                 return false;
349 
350         /* Caller MUST have verified/know (page_ref_count(page) == 1) */
351         pool->alloc.cache[pool->alloc.count++] = page;
352         return true;
353 }
354 
355 /* If the page refcnt == 1, this will try to recycle the page.
356  * if PP_FLAG_DMA_SYNC_DEV is set, we'll try to sync the DMA area for
357  * the configured size min(dma_sync_size, pool->max_len).
358  * If the page refcnt != 1, then the page will be returned to memory
359  * subsystem.
360  */
361 static __always_inline struct page *
362 __page_pool_put_page(struct page_pool *pool, struct page *page,
363                      unsigned int dma_sync_size, bool allow_direct)
364 {
365         /* This allocator is optimized for the XDP mode that uses
366          * one-frame-per-page, but have fallbacks that act like the
367          * regular page allocator APIs.
368          *
369          * refcnt == 1 means page_pool owns page, and can recycle it.
370          *
371          * page is NOT reusable when allocated when system is under
372          * some pressure. (page_is_pfmemalloc)
373          */
374         if (likely(page_ref_count(page) == 1 && !page_is_pfmemalloc(page))) {
375                 /* Read barrier done in page_ref_count / READ_ONCE */
376 
377                 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
378                         page_pool_dma_sync_for_device(pool, page,
379                                                       dma_sync_size);
380 
381                 if (allow_direct && in_serving_softirq() &&
382                     page_pool_recycle_in_cache(page, pool))
383                         return NULL;
384 
385                 /* Page found as candidate for recycling */
386                 return page;
387         }
388         /* Fallback/non-XDP mode: API user have elevated refcnt.
389          *
390          * Many drivers split up the page into fragments, and some
391          * want to keep doing this to save memory and do refcnt based
392          * recycling. Support this use case too, to ease drivers
393          * switching between XDP/non-XDP.
394          *
395          * In-case page_pool maintains the DMA mapping, API user must
396          * call page_pool_put_page once.  In this elevated refcnt
397          * case, the DMA is unmapped/released, as driver is likely
398          * doing refcnt based recycle tricks, meaning another process
399          * will be invoking put_page.
400          */
401         /* Do not replace this with page_pool_return_page() */
402         page_pool_release_page(pool, page);
403         put_page(page);
404 
405         return NULL;
406 }
407 
408 void page_pool_put_page(struct page_pool *pool, struct page *page,
409                         unsigned int dma_sync_size, bool allow_direct)
410 {
411         page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct);
412         if (page && !page_pool_recycle_in_ring(pool, page)) {
413                 /* Cache full, fallback to free pages */
414                 page_pool_return_page(pool, page);
415         }
416 }
417 EXPORT_SYMBOL(page_pool_put_page);
418 
419 /* Caller must not use data area after call, as this function overwrites it */
420 void page_pool_put_page_bulk(struct page_pool *pool, void **data,
421                              int count)
422 {
423         int i, bulk_len = 0;
424 
425         for (i = 0; i < count; i++) {
426                 struct page *page = virt_to_head_page(data[i]);
427 
428                 page = __page_pool_put_page(pool, page, -1, false);
429                 /* Approved for bulk recycling in ptr_ring cache */
430                 if (page)
431                         data[bulk_len++] = page;
432         }
433 
434         if (unlikely(!bulk_len))
435                 return;
436 
437         /* Bulk producer into ptr_ring page_pool cache */
438         page_pool_ring_lock(pool);
439         for (i = 0; i < bulk_len; i++) {
440                 if (__ptr_ring_produce(&pool->ring, data[i]))
441                         break; /* ring full */
442         }
443         page_pool_ring_unlock(pool);
444 
445         /* Hopefully all pages was return into ptr_ring */
446         if (likely(i == bulk_len))
447                 return;
448 
449         /* ptr_ring cache full, free remaining pages outside producer lock
450          * since put_page() with refcnt == 1 can be an expensive operation
451          */
452         for (; i < bulk_len; i++)
453                 page_pool_return_page(pool, data[i]);
454 }
455 EXPORT_SYMBOL(page_pool_put_page_bulk);
456 
457 static void page_pool_empty_ring(struct page_pool *pool)
458 {
459         struct page *page;
460 
461         /* Empty recycle ring */
462         while ((page = ptr_ring_consume_bh(&pool->ring))) {
463                 /* Verify the refcnt invariant of cached pages */
464                 if (!(page_ref_count(page) == 1))
465                         pr_crit("%s() page_pool refcnt %d violation\n",
466                                 __func__, page_ref_count(page));
467 
468                 page_pool_return_page(pool, page);
469         }
470 }
471 
472 static void page_pool_free(struct page_pool *pool)
473 {
474         if (pool->disconnect)
475                 pool->disconnect(pool);
476 
477         ptr_ring_cleanup(&pool->ring, NULL);
478 
479         if (pool->p.flags & PP_FLAG_DMA_MAP)
480                 put_device(pool->p.dev);
481 
482         kfree(pool);
483 }
484 
485 static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
486 {
487         struct page *page;
488 
489         if (pool->destroy_cnt)
490                 return;
491 
492         /* Empty alloc cache, assume caller made sure this is
493          * no-longer in use, and page_pool_alloc_pages() cannot be
494          * call concurrently.
495          */
496         while (pool->alloc.count) {
497                 page = pool->alloc.cache[--pool->alloc.count];
498                 page_pool_return_page(pool, page);
499         }
500 }
501 
502 static void page_pool_scrub(struct page_pool *pool)
503 {
504         page_pool_empty_alloc_cache_once(pool);
505         pool->destroy_cnt++;
506 
507         /* No more consumers should exist, but producers could still
508          * be in-flight.
509          */
510         page_pool_empty_ring(pool);
511 }
512 
513 static int page_pool_release(struct page_pool *pool)
514 {
515         int inflight;
516 
517         page_pool_scrub(pool);
518         inflight = page_pool_inflight(pool);
519         if (!inflight)
520                 page_pool_free(pool);
521 
522         return inflight;
523 }
524 
525 static void page_pool_release_retry(struct work_struct *wq)
526 {
527         struct delayed_work *dwq = to_delayed_work(wq);
528         struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw);
529         int inflight;
530 
531         inflight = page_pool_release(pool);
532         if (!inflight)
533                 return;
534 
535         /* Periodic warning */
536         if (time_after_eq(jiffies, pool->defer_warn)) {
537                 int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ;
538 
539                 pr_warn("%s() stalled pool shutdown %d inflight %d sec\n",
540                         __func__, inflight, sec);
541                 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
542         }
543 
544         /* Still not ready to be disconnected, retry later */
545         schedule_delayed_work(&pool->release_dw, DEFER_TIME);
546 }
547 
548 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *))
549 {
550         refcount_inc(&pool->user_cnt);
551         pool->disconnect = disconnect;
552 }
553 
554 void page_pool_destroy(struct page_pool *pool)
555 {
556         if (!pool)
557                 return;
558 
559         if (!page_pool_put(pool))
560                 return;
561 
562         if (!page_pool_release(pool))
563                 return;
564 
565         pool->defer_start = jiffies;
566         pool->defer_warn  = jiffies + DEFER_WARN_INTERVAL;
567 
568         INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry);
569         schedule_delayed_work(&pool->release_dw, DEFER_TIME);
570 }
571 EXPORT_SYMBOL(page_pool_destroy);
572 
573 /* Caller must provide appropriate safe context, e.g. NAPI. */
574 void page_pool_update_nid(struct page_pool *pool, int new_nid)
575 {
576         struct page *page;
577 
578         trace_page_pool_update_nid(pool, new_nid);
579         pool->p.nid = new_nid;
580 
581         /* Flush pool alloc cache, as refill will check NUMA node */
582         while (pool->alloc.count) {
583                 page = pool->alloc.cache[--pool->alloc.count];
584                 page_pool_return_page(pool, page);
585         }
586 }
587 EXPORT_SYMBOL(page_pool_update_nid);
588 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp