~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/drm/ttm/ttm_bo_driver.h

Version: ~ [ linux-4.19-rc4 ] ~ [ linux-4.18.8 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.70 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.127 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.156 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.122 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.57 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.31.14 ] ~ [ linux-2.6.30.10 ] ~ [ linux-2.6.29.6 ] ~ [ linux-2.6.28.10 ] ~ [ linux-2.6.27.62 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /**************************************************************************
  2  *
  3  * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
  4  * All Rights Reserved.
  5  *
  6  * Permission is hereby granted, free of charge, to any person obtaining a
  7  * copy of this software and associated documentation files (the
  8  * "Software"), to deal in the Software without restriction, including
  9  * without limitation the rights to use, copy, modify, merge, publish,
 10  * distribute, sub license, and/or sell copies of the Software, and to
 11  * permit persons to whom the Software is furnished to do so, subject to
 12  * the following conditions:
 13  *
 14  * The above copyright notice and this permission notice (including the
 15  * next paragraph) shall be included in all copies or substantial portions
 16  * of the Software.
 17  *
 18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25  *
 26  **************************************************************************/
 27 /*
 28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 29  */
 30 #ifndef _TTM_BO_DRIVER_H_
 31 #define _TTM_BO_DRIVER_H_
 32 
 33 #include <drm/drm_mm.h>
 34 #include <drm/drm_global.h>
 35 #include <drm/drm_vma_manager.h>
 36 #include <linux/workqueue.h>
 37 #include <linux/fs.h>
 38 #include <linux/spinlock.h>
 39 #include <linux/reservation.h>
 40 
 41 #include "ttm_bo_api.h"
 42 #include "ttm_memory.h"
 43 #include "ttm_module.h"
 44 #include "ttm_placement.h"
 45 #include "ttm_tt.h"
 46 
 47 #define TTM_MAX_BO_PRIORITY     4U
 48 
 49 #define TTM_MEMTYPE_FLAG_FIXED         (1 << 0) /* Fixed (on-card) PCI memory */
 50 #define TTM_MEMTYPE_FLAG_MAPPABLE      (1 << 1) /* Memory mappable */
 51 #define TTM_MEMTYPE_FLAG_CMA           (1 << 3) /* Can't map aperture */
 52 
 53 struct ttm_mem_type_manager;
 54 
 55 struct ttm_mem_type_manager_func {
 56         /**
 57          * struct ttm_mem_type_manager member init
 58          *
 59          * @man: Pointer to a memory type manager.
 60          * @p_size: Implementation dependent, but typically the size of the
 61          * range to be managed in pages.
 62          *
 63          * Called to initialize a private range manager. The function is
 64          * expected to initialize the man::priv member.
 65          * Returns 0 on success, negative error code on failure.
 66          */
 67         int  (*init)(struct ttm_mem_type_manager *man, unsigned long p_size);
 68 
 69         /**
 70          * struct ttm_mem_type_manager member takedown
 71          *
 72          * @man: Pointer to a memory type manager.
 73          *
 74          * Called to undo the setup done in init. All allocated resources
 75          * should be freed.
 76          */
 77         int  (*takedown)(struct ttm_mem_type_manager *man);
 78 
 79         /**
 80          * struct ttm_mem_type_manager member get_node
 81          *
 82          * @man: Pointer to a memory type manager.
 83          * @bo: Pointer to the buffer object we're allocating space for.
 84          * @placement: Placement details.
 85          * @flags: Additional placement flags.
 86          * @mem: Pointer to a struct ttm_mem_reg to be filled in.
 87          *
 88          * This function should allocate space in the memory type managed
 89          * by @man. Placement details if
 90          * applicable are given by @placement. If successful,
 91          * @mem::mm_node should be set to a non-null value, and
 92          * @mem::start should be set to a value identifying the beginning
 93          * of the range allocated, and the function should return zero.
 94          * If the memory region accommodate the buffer object, @mem::mm_node
 95          * should be set to NULL, and the function should return 0.
 96          * If a system error occurred, preventing the request to be fulfilled,
 97          * the function should return a negative error code.
 98          *
 99          * Note that @mem::mm_node will only be dereferenced by
100          * struct ttm_mem_type_manager functions and optionally by the driver,
101          * which has knowledge of the underlying type.
102          *
103          * This function may not be called from within atomic context, so
104          * an implementation can and must use either a mutex or a spinlock to
105          * protect any data structures managing the space.
106          */
107         int  (*get_node)(struct ttm_mem_type_manager *man,
108                          struct ttm_buffer_object *bo,
109                          const struct ttm_place *place,
110                          struct ttm_mem_reg *mem);
111 
112         /**
113          * struct ttm_mem_type_manager member put_node
114          *
115          * @man: Pointer to a memory type manager.
116          * @mem: Pointer to a struct ttm_mem_reg to be filled in.
117          *
118          * This function frees memory type resources previously allocated
119          * and that are identified by @mem::mm_node and @mem::start. May not
120          * be called from within atomic context.
121          */
122         void (*put_node)(struct ttm_mem_type_manager *man,
123                          struct ttm_mem_reg *mem);
124 
125         /**
126          * struct ttm_mem_type_manager member debug
127          *
128          * @man: Pointer to a memory type manager.
129          * @printer: Prefix to be used in printout to identify the caller.
130          *
131          * This function is called to print out the state of the memory
132          * type manager to aid debugging of out-of-memory conditions.
133          * It may not be called from within atomic context.
134          */
135         void (*debug)(struct ttm_mem_type_manager *man,
136                       struct drm_printer *printer);
137 };
138 
139 /**
140  * struct ttm_mem_type_manager
141  *
142  * @has_type: The memory type has been initialized.
143  * @use_type: The memory type is enabled.
144  * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
145  * managed by this memory type.
146  * @gpu_offset: If used, the GPU offset of the first managed page of
147  * fixed memory or the first managed location in an aperture.
148  * @size: Size of the managed region.
149  * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
150  * as defined in ttm_placement_common.h
151  * @default_caching: The default caching policy used for a buffer object
152  * placed in this memory type if the user doesn't provide one.
153  * @func: structure pointer implementing the range manager. See above
154  * @priv: Driver private closure for @func.
155  * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures
156  * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
157  * reserved by the TTM vm system.
158  * @io_reserve_lru: Optional lru list for unreserving io mem regions.
159  * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain
160  * @move_lock: lock for move fence
161  * static information. bdev::driver::io_mem_free is never used.
162  * @lru: The lru list for this memory type.
163  * @move: The fence of the last pipelined move operation.
164  *
165  * This structure is used to identify and manage memory types for a device.
166  * It's set up by the ttm_bo_driver::init_mem_type method.
167  */
168 
169 
170 
171 struct ttm_mem_type_manager {
172         struct ttm_bo_device *bdev;
173 
174         /*
175          * No protection. Constant from start.
176          */
177 
178         bool has_type;
179         bool use_type;
180         uint32_t flags;
181         uint64_t gpu_offset; /* GPU address space is independent of CPU word size */
182         uint64_t size;
183         uint32_t available_caching;
184         uint32_t default_caching;
185         const struct ttm_mem_type_manager_func *func;
186         void *priv;
187         struct mutex io_reserve_mutex;
188         bool use_io_reserve_lru;
189         bool io_reserve_fastpath;
190         spinlock_t move_lock;
191 
192         /*
193          * Protected by @io_reserve_mutex:
194          */
195 
196         struct list_head io_reserve_lru;
197 
198         /*
199          * Protected by the global->lru_lock.
200          */
201 
202         struct list_head lru[TTM_MAX_BO_PRIORITY];
203 
204         /*
205          * Protected by @move_lock.
206          */
207         struct dma_fence *move;
208 };
209 
210 /**
211  * struct ttm_bo_driver
212  *
213  * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
214  * @invalidate_caches: Callback to invalidate read caches when a buffer object
215  * has been evicted.
216  * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager
217  * structure.
218  * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
219  * @move: Callback for a driver to hook in accelerated functions to
220  * move a buffer.
221  * If set to NULL, a potentially slow memcpy() move is used.
222  */
223 
224 struct ttm_bo_driver {
225         /**
226          * ttm_tt_create
227          *
228          * @bo: The buffer object to create the ttm for.
229          * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
230          *
231          * Create a struct ttm_tt to back data with system memory pages.
232          * No pages are actually allocated.
233          * Returns:
234          * NULL: Out of memory.
235          */
236         struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo,
237                                         uint32_t page_flags);
238 
239         /**
240          * ttm_tt_populate
241          *
242          * @ttm: The struct ttm_tt to contain the backing pages.
243          *
244          * Allocate all backing pages
245          * Returns:
246          * -ENOMEM: Out of memory.
247          */
248         int (*ttm_tt_populate)(struct ttm_tt *ttm,
249                         struct ttm_operation_ctx *ctx);
250 
251         /**
252          * ttm_tt_unpopulate
253          *
254          * @ttm: The struct ttm_tt to contain the backing pages.
255          *
256          * Free all backing page
257          */
258         void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
259 
260         /**
261          * struct ttm_bo_driver member invalidate_caches
262          *
263          * @bdev: the buffer object device.
264          * @flags: new placement of the rebound buffer object.
265          *
266          * A previosly evicted buffer has been rebound in a
267          * potentially new location. Tell the driver that it might
268          * consider invalidating read (texture) caches on the next command
269          * submission as a consequence.
270          */
271 
272         int (*invalidate_caches)(struct ttm_bo_device *bdev, uint32_t flags);
273         int (*init_mem_type)(struct ttm_bo_device *bdev, uint32_t type,
274                              struct ttm_mem_type_manager *man);
275 
276         /**
277          * struct ttm_bo_driver member eviction_valuable
278          *
279          * @bo: the buffer object to be evicted
280          * @place: placement we need room for
281          *
282          * Check with the driver if it is valuable to evict a BO to make room
283          * for a certain placement.
284          */
285         bool (*eviction_valuable)(struct ttm_buffer_object *bo,
286                                   const struct ttm_place *place);
287         /**
288          * struct ttm_bo_driver member evict_flags:
289          *
290          * @bo: the buffer object to be evicted
291          *
292          * Return the bo flags for a buffer which is not mapped to the hardware.
293          * These will be placed in proposed_flags so that when the move is
294          * finished, they'll end up in bo->mem.flags
295          */
296 
297         void (*evict_flags)(struct ttm_buffer_object *bo,
298                             struct ttm_placement *placement);
299 
300         /**
301          * struct ttm_bo_driver member move:
302          *
303          * @bo: the buffer to move
304          * @evict: whether this motion is evicting the buffer from
305          * the graphics address space
306          * @ctx: context for this move with parameters
307          * @new_mem: the new memory region receiving the buffer
308          *
309          * Move a buffer between two memory regions.
310          */
311         int (*move)(struct ttm_buffer_object *bo, bool evict,
312                     struct ttm_operation_ctx *ctx,
313                     struct ttm_mem_reg *new_mem);
314 
315         /**
316          * struct ttm_bo_driver_member verify_access
317          *
318          * @bo: Pointer to a buffer object.
319          * @filp: Pointer to a struct file trying to access the object.
320          *
321          * Called from the map / write / read methods to verify that the
322          * caller is permitted to access the buffer object.
323          * This member may be set to NULL, which will refuse this kind of
324          * access for all buffer objects.
325          * This function should return 0 if access is granted, -EPERM otherwise.
326          */
327         int (*verify_access)(struct ttm_buffer_object *bo,
328                              struct file *filp);
329 
330         /**
331          * Hook to notify driver about a driver move so it
332          * can do tiling things and book-keeping.
333          *
334          * @evict: whether this move is evicting the buffer from the graphics
335          * address space
336          */
337         void (*move_notify)(struct ttm_buffer_object *bo,
338                             bool evict,
339                             struct ttm_mem_reg *new_mem);
340         /* notify the driver we are taking a fault on this BO
341          * and have reserved it */
342         int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
343 
344         /**
345          * notify the driver that we're about to swap out this bo
346          */
347         void (*swap_notify)(struct ttm_buffer_object *bo);
348 
349         /**
350          * Driver callback on when mapping io memory (for bo_move_memcpy
351          * for instance). TTM will take care to call io_mem_free whenever
352          * the mapping is not use anymore. io_mem_reserve & io_mem_free
353          * are balanced.
354          */
355         int (*io_mem_reserve)(struct ttm_bo_device *bdev,
356                               struct ttm_mem_reg *mem);
357         void (*io_mem_free)(struct ttm_bo_device *bdev,
358                             struct ttm_mem_reg *mem);
359 
360         /**
361          * Return the pfn for a given page_offset inside the BO.
362          *
363          * @bo: the BO to look up the pfn for
364          * @page_offset: the offset to look up
365          */
366         unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo,
367                                     unsigned long page_offset);
368 
369         /**
370          * Read/write memory buffers for ptrace access
371          *
372          * @bo: the BO to access
373          * @offset: the offset from the start of the BO
374          * @buf: pointer to source/destination buffer
375          * @len: number of bytes to copy
376          * @write: whether to read (0) from or write (non-0) to BO
377          *
378          * If successful, this function should return the number of
379          * bytes copied, -EIO otherwise. If the number of bytes
380          * returned is < len, the function may be called again with
381          * the remainder of the buffer to copy.
382          */
383         int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset,
384                              void *buf, int len, int write);
385 };
386 
387 /**
388  * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global.
389  */
390 
391 struct ttm_bo_global_ref {
392         struct drm_global_reference ref;
393         struct ttm_mem_global *mem_glob;
394 };
395 
396 /**
397  * struct ttm_bo_global - Buffer object driver global data.
398  *
399  * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
400  * @dummy_read_page: Pointer to a dummy page used for mapping requests
401  * of unpopulated pages.
402  * @shrink: A shrink callback object used for buffer object swap.
403  * @device_list_mutex: Mutex protecting the device list.
404  * This mutex is held while traversing the device list for pm options.
405  * @lru_lock: Spinlock protecting the bo subsystem lru lists.
406  * @device_list: List of buffer object devices.
407  * @swap_lru: Lru list of buffer objects used for swapping.
408  */
409 
410 struct ttm_bo_global {
411 
412         /**
413          * Constant after init.
414          */
415 
416         struct kobject kobj;
417         struct ttm_mem_global *mem_glob;
418         struct page *dummy_read_page;
419         struct mutex device_list_mutex;
420         spinlock_t lru_lock;
421 
422         /**
423          * Protected by device_list_mutex.
424          */
425         struct list_head device_list;
426 
427         /**
428          * Protected by the lru_lock.
429          */
430         struct list_head swap_lru[TTM_MAX_BO_PRIORITY];
431 
432         /**
433          * Internal protection.
434          */
435         atomic_t bo_count;
436 };
437 
438 
439 #define TTM_NUM_MEM_TYPES 8
440 
441 /**
442  * struct ttm_bo_device - Buffer object driver device-specific data.
443  *
444  * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
445  * @man: An array of mem_type_managers.
446  * @vma_manager: Address space manager
447  * lru_lock: Spinlock that protects the buffer+device lru lists and
448  * ddestroy lists.
449  * @dev_mapping: A pointer to the struct address_space representing the
450  * device address space.
451  * @wq: Work queue structure for the delayed delete workqueue.
452  * @no_retry: Don't retry allocation if it fails
453  *
454  */
455 
456 struct ttm_bo_device {
457 
458         /*
459          * Constant after bo device init / atomic.
460          */
461         struct list_head device_list;
462         struct ttm_bo_global *glob;
463         struct ttm_bo_driver *driver;
464         struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
465 
466         /*
467          * Protected by internal locks.
468          */
469         struct drm_vma_offset_manager vma_manager;
470 
471         /*
472          * Protected by the global:lru lock.
473          */
474         struct list_head ddestroy;
475 
476         /*
477          * Protected by load / firstopen / lastclose /unload sync.
478          */
479 
480         struct address_space *dev_mapping;
481 
482         /*
483          * Internal protection.
484          */
485 
486         struct delayed_work wq;
487 
488         bool need_dma32;
489 
490         bool no_retry;
491 };
492 
493 /**
494  * ttm_flag_masked
495  *
496  * @old: Pointer to the result and original value.
497  * @new: New value of bits.
498  * @mask: Mask of bits to change.
499  *
500  * Convenience function to change a number of bits identified by a mask.
501  */
502 
503 static inline uint32_t
504 ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
505 {
506         *old ^= (*old ^ new) & mask;
507         return *old;
508 }
509 
510 /*
511  * ttm_bo.c
512  */
513 
514 /**
515  * ttm_mem_reg_is_pci
516  *
517  * @bdev: Pointer to a struct ttm_bo_device.
518  * @mem: A valid struct ttm_mem_reg.
519  *
520  * Returns true if the memory described by @mem is PCI memory,
521  * false otherwise.
522  */
523 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
524 
525 /**
526  * ttm_bo_mem_space
527  *
528  * @bo: Pointer to a struct ttm_buffer_object. the data of which
529  * we want to allocate space for.
530  * @proposed_placement: Proposed new placement for the buffer object.
531  * @mem: A struct ttm_mem_reg.
532  * @interruptible: Sleep interruptible when sliping.
533  * @no_wait_gpu: Return immediately if the GPU is busy.
534  *
535  * Allocate memory space for the buffer object pointed to by @bo, using
536  * the placement flags in @mem, potentially evicting other idle buffer objects.
537  * This function may sleep while waiting for space to become available.
538  * Returns:
539  * -EBUSY: No space available (only if no_wait == 1).
540  * -ENOMEM: Could not allocate memory for the buffer object, either due to
541  * fragmentation or concurrent allocators.
542  * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
543  */
544 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
545                      struct ttm_placement *placement,
546                      struct ttm_mem_reg *mem,
547                      struct ttm_operation_ctx *ctx);
548 
549 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem);
550 void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
551                            struct ttm_mem_reg *mem);
552 
553 void ttm_bo_global_release(struct drm_global_reference *ref);
554 int ttm_bo_global_init(struct drm_global_reference *ref);
555 
556 int ttm_bo_device_release(struct ttm_bo_device *bdev);
557 
558 /**
559  * ttm_bo_device_init
560  *
561  * @bdev: A pointer to a struct ttm_bo_device to initialize.
562  * @glob: A pointer to an initialized struct ttm_bo_global.
563  * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
564  * @mapping: The address space to use for this bo.
565  * @file_page_offset: Offset into the device address space that is available
566  * for buffer data. This ensures compatibility with other users of the
567  * address space.
568  *
569  * Initializes a struct ttm_bo_device:
570  * Returns:
571  * !0: Failure.
572  */
573 int ttm_bo_device_init(struct ttm_bo_device *bdev, struct ttm_bo_global *glob,
574                        struct ttm_bo_driver *driver,
575                        struct address_space *mapping,
576                        uint64_t file_page_offset, bool need_dma32);
577 
578 /**
579  * ttm_bo_unmap_virtual
580  *
581  * @bo: tear down the virtual mappings for this BO
582  */
583 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
584 
585 /**
586  * ttm_bo_unmap_virtual
587  *
588  * @bo: tear down the virtual mappings for this BO
589  *
590  * The caller must take ttm_mem_io_lock before calling this function.
591  */
592 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
593 
594 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
595 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
596 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible);
597 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
598 
599 void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
600 void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
601 
602 /**
603  * __ttm_bo_reserve:
604  *
605  * @bo: A pointer to a struct ttm_buffer_object.
606  * @interruptible: Sleep interruptible if waiting.
607  * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
608  * @ticket: ticket used to acquire the ww_mutex.
609  *
610  * Will not remove reserved buffers from the lru lists.
611  * Otherwise identical to ttm_bo_reserve.
612  *
613  * Returns:
614  * -EDEADLK: The reservation may cause a deadlock.
615  * Release all buffer reservations, wait for @bo to become unreserved and
616  * try again. (only if use_sequence == 1).
617  * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
618  * a signal. Release all buffer reservations and return to user-space.
619  * -EBUSY: The function needed to sleep, but @no_wait was true
620  * -EALREADY: Bo already reserved using @ticket. This error code will only
621  * be returned if @use_ticket is set to true.
622  */
623 static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
624                                    bool interruptible, bool no_wait,
625                                    struct ww_acquire_ctx *ticket)
626 {
627         int ret = 0;
628 
629         if (no_wait) {
630                 bool success;
631                 if (WARN_ON(ticket))
632                         return -EBUSY;
633 
634                 success = reservation_object_trylock(bo->resv);
635                 return success ? 0 : -EBUSY;
636         }
637 
638         if (interruptible)
639                 ret = reservation_object_lock_interruptible(bo->resv, ticket);
640         else
641                 ret = reservation_object_lock(bo->resv, ticket);
642         if (ret == -EINTR)
643                 return -ERESTARTSYS;
644         return ret;
645 }
646 
647 /**
648  * ttm_bo_reserve:
649  *
650  * @bo: A pointer to a struct ttm_buffer_object.
651  * @interruptible: Sleep interruptible if waiting.
652  * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
653  * @ticket: ticket used to acquire the ww_mutex.
654  *
655  * Locks a buffer object for validation. (Or prevents other processes from
656  * locking it for validation) and removes it from lru lists, while taking
657  * a number of measures to prevent deadlocks.
658  *
659  * Deadlocks may occur when two processes try to reserve multiple buffers in
660  * different order, either by will or as a result of a buffer being evicted
661  * to make room for a buffer already reserved. (Buffers are reserved before
662  * they are evicted). The following algorithm prevents such deadlocks from
663  * occurring:
664  * Processes attempting to reserve multiple buffers other than for eviction,
665  * (typically execbuf), should first obtain a unique 32-bit
666  * validation sequence number,
667  * and call this function with @use_ticket == 1 and @ticket->stamp == the unique
668  * sequence number. If upon call of this function, the buffer object is already
669  * reserved, the validation sequence is checked against the validation
670  * sequence of the process currently reserving the buffer,
671  * and if the current validation sequence is greater than that of the process
672  * holding the reservation, the function returns -EDEADLK. Otherwise it sleeps
673  * waiting for the buffer to become unreserved, after which it retries
674  * reserving.
675  * The caller should, when receiving an -EDEADLK error
676  * release all its buffer reservations, wait for @bo to become unreserved, and
677  * then rerun the validation with the same validation sequence. This procedure
678  * will always guarantee that the process with the lowest validation sequence
679  * will eventually succeed, preventing both deadlocks and starvation.
680  *
681  * Returns:
682  * -EDEADLK: The reservation may cause a deadlock.
683  * Release all buffer reservations, wait for @bo to become unreserved and
684  * try again. (only if use_sequence == 1).
685  * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
686  * a signal. Release all buffer reservations and return to user-space.
687  * -EBUSY: The function needed to sleep, but @no_wait was true
688  * -EALREADY: Bo already reserved using @ticket. This error code will only
689  * be returned if @use_ticket is set to true.
690  */
691 static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
692                                  bool interruptible, bool no_wait,
693                                  struct ww_acquire_ctx *ticket)
694 {
695         int ret;
696 
697         WARN_ON(!kref_read(&bo->kref));
698 
699         ret = __ttm_bo_reserve(bo, interruptible, no_wait, ticket);
700         if (likely(ret == 0))
701                 ttm_bo_del_sub_from_lru(bo);
702 
703         return ret;
704 }
705 
706 /**
707  * ttm_bo_reserve_slowpath:
708  * @bo: A pointer to a struct ttm_buffer_object.
709  * @interruptible: Sleep interruptible if waiting.
710  * @sequence: Set (@bo)->sequence to this value after lock
711  *
712  * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
713  * from all our other reservations. Because there are no other reservations
714  * held by us, this function cannot deadlock any more.
715  */
716 static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
717                                           bool interruptible,
718                                           struct ww_acquire_ctx *ticket)
719 {
720         int ret = 0;
721 
722         WARN_ON(!kref_read(&bo->kref));
723 
724         if (interruptible)
725                 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
726                                                        ticket);
727         else
728                 ww_mutex_lock_slow(&bo->resv->lock, ticket);
729 
730         if (likely(ret == 0))
731                 ttm_bo_del_sub_from_lru(bo);
732         else if (ret == -EINTR)
733                 ret = -ERESTARTSYS;
734 
735         return ret;
736 }
737 
738 /**
739  * ttm_bo_unreserve
740  *
741  * @bo: A pointer to a struct ttm_buffer_object.
742  *
743  * Unreserve a previous reservation of @bo.
744  */
745 static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
746 {
747         if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
748                 spin_lock(&bo->bdev->glob->lru_lock);
749                 ttm_bo_add_to_lru(bo);
750                 spin_unlock(&bo->bdev->glob->lru_lock);
751         }
752         reservation_object_unlock(bo->resv);
753 }
754 
755 /*
756  * ttm_bo_util.c
757  */
758 
759 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
760                        struct ttm_mem_reg *mem);
761 void ttm_mem_io_free(struct ttm_bo_device *bdev,
762                      struct ttm_mem_reg *mem);
763 /**
764  * ttm_bo_move_ttm
765  *
766  * @bo: A pointer to a struct ttm_buffer_object.
767  * @interruptible: Sleep interruptible if waiting.
768  * @no_wait_gpu: Return immediately if the GPU is busy.
769  * @new_mem: struct ttm_mem_reg indicating where to move.
770  *
771  * Optimized move function for a buffer object with both old and
772  * new placement backed by a TTM. The function will, if successful,
773  * free any old aperture space, and set (@new_mem)->mm_node to NULL,
774  * and update the (@bo)->mem placement flags. If unsuccessful, the old
775  * data remains untouched, and it's up to the caller to free the
776  * memory space indicated by @new_mem.
777  * Returns:
778  * !0: Failure.
779  */
780 
781 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
782                     struct ttm_operation_ctx *ctx,
783                     struct ttm_mem_reg *new_mem);
784 
785 /**
786  * ttm_bo_move_memcpy
787  *
788  * @bo: A pointer to a struct ttm_buffer_object.
789  * @interruptible: Sleep interruptible if waiting.
790  * @no_wait_gpu: Return immediately if the GPU is busy.
791  * @new_mem: struct ttm_mem_reg indicating where to move.
792  *
793  * Fallback move function for a mappable buffer object in mappable memory.
794  * The function will, if successful,
795  * free any old aperture space, and set (@new_mem)->mm_node to NULL,
796  * and update the (@bo)->mem placement flags. If unsuccessful, the old
797  * data remains untouched, and it's up to the caller to free the
798  * memory space indicated by @new_mem.
799  * Returns:
800  * !0: Failure.
801  */
802 
803 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
804                        struct ttm_operation_ctx *ctx,
805                        struct ttm_mem_reg *new_mem);
806 
807 /**
808  * ttm_bo_free_old_node
809  *
810  * @bo: A pointer to a struct ttm_buffer_object.
811  *
812  * Utility function to free an old placement after a successful move.
813  */
814 void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
815 
816 /**
817  * ttm_bo_move_accel_cleanup.
818  *
819  * @bo: A pointer to a struct ttm_buffer_object.
820  * @fence: A fence object that signals when moving is complete.
821  * @evict: This is an evict move. Don't return until the buffer is idle.
822  * @new_mem: struct ttm_mem_reg indicating where to move.
823  *
824  * Accelerated move function to be called when an accelerated move
825  * has been scheduled. The function will create a new temporary buffer object
826  * representing the old placement, and put the sync object on both buffer
827  * objects. After that the newly created buffer object is unref'd to be
828  * destroyed when the move is complete. This will help pipeline
829  * buffer moves.
830  */
831 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
832                               struct dma_fence *fence, bool evict,
833                               struct ttm_mem_reg *new_mem);
834 
835 /**
836  * ttm_bo_pipeline_move.
837  *
838  * @bo: A pointer to a struct ttm_buffer_object.
839  * @fence: A fence object that signals when moving is complete.
840  * @evict: This is an evict move. Don't return until the buffer is idle.
841  * @new_mem: struct ttm_mem_reg indicating where to move.
842  *
843  * Function for pipelining accelerated moves. Either free the memory
844  * immediately or hang it on a temporary buffer object.
845  */
846 int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
847                          struct dma_fence *fence, bool evict,
848                          struct ttm_mem_reg *new_mem);
849 
850 /**
851  * ttm_bo_pipeline_gutting.
852  *
853  * @bo: A pointer to a struct ttm_buffer_object.
854  *
855  * Pipelined gutting a BO of it's backing store.
856  */
857 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo);
858 
859 /**
860  * ttm_io_prot
861  *
862  * @c_state: Caching state.
863  * @tmp: Page protection flag for a normal, cached mapping.
864  *
865  * Utility function that returns the pgprot_t that should be used for
866  * setting up a PTE with the caching model indicated by @c_state.
867  */
868 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
869 
870 extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
871 
872 #endif
873 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp