~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/drm/gpu_scheduler.h

Version: ~ [ linux-5.13-rc5 ] ~ [ linux-5.12.9 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.42 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.124 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.193 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.235 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.271 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.271 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Copyright 2015 Advanced Micro Devices, Inc.
  3  *
  4  * Permission is hereby granted, free of charge, to any person obtaining a
  5  * copy of this software and associated documentation files (the "Software"),
  6  * to deal in the Software without restriction, including without limitation
  7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8  * and/or sell copies of the Software, and to permit persons to whom the
  9  * Software is furnished to do so, subject to the following conditions:
 10  *
 11  * The above copyright notice and this permission notice shall be included in
 12  * all copies or substantial portions of the Software.
 13  *
 14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20  * OTHER DEALINGS IN THE SOFTWARE.
 21  *
 22  */
 23 
 24 #ifndef _DRM_GPU_SCHEDULER_H_
 25 #define _DRM_GPU_SCHEDULER_H_
 26 
 27 #include <drm/spsc_queue.h>
 28 #include <linux/dma-fence.h>
 29 
 30 struct drm_gpu_scheduler;
 31 struct drm_sched_rq;
 32 
 33 enum drm_sched_priority {
 34         DRM_SCHED_PRIORITY_MIN,
 35         DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN,
 36         DRM_SCHED_PRIORITY_NORMAL,
 37         DRM_SCHED_PRIORITY_HIGH_SW,
 38         DRM_SCHED_PRIORITY_HIGH_HW,
 39         DRM_SCHED_PRIORITY_KERNEL,
 40         DRM_SCHED_PRIORITY_MAX,
 41         DRM_SCHED_PRIORITY_INVALID = -1,
 42         DRM_SCHED_PRIORITY_UNSET = -2
 43 };
 44 
 45 /**
 46  * A scheduler entity is a wrapper around a job queue or a group
 47  * of other entities. Entities take turns emitting jobs from their
 48  * job queues to corresponding hardware ring based on scheduling
 49  * policy.
 50 */
 51 struct drm_sched_entity {
 52         struct list_head                list;
 53         struct drm_sched_rq             *rq;
 54         spinlock_t                      rq_lock;
 55         struct drm_gpu_scheduler        *sched;
 56 
 57         spinlock_t                      queue_lock;
 58         struct spsc_queue               job_queue;
 59 
 60         atomic_t                        fence_seq;
 61         uint64_t                        fence_context;
 62 
 63         struct dma_fence                *dependency;
 64         struct dma_fence_cb             cb;
 65         atomic_t                        *guilty; /* points to ctx's guilty */
 66 };
 67 
 68 /**
 69  * Run queue is a set of entities scheduling command submissions for
 70  * one specific ring. It implements the scheduling policy that selects
 71  * the next entity to emit commands from.
 72 */
 73 struct drm_sched_rq {
 74         spinlock_t                      lock;
 75         struct list_head                entities;
 76         struct drm_sched_entity         *current_entity;
 77 };
 78 
 79 struct drm_sched_fence {
 80         struct dma_fence                scheduled;
 81         struct dma_fence                finished;
 82         struct dma_fence_cb             cb;
 83         struct dma_fence                *parent;
 84         struct drm_gpu_scheduler        *sched;
 85         spinlock_t                      lock;
 86         void                            *owner;
 87 };
 88 
 89 struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
 90 
 91 struct drm_sched_job {
 92         struct spsc_node                queue_node;
 93         struct drm_gpu_scheduler        *sched;
 94         struct drm_sched_fence          *s_fence;
 95         struct dma_fence_cb             finish_cb;
 96         struct work_struct              finish_work;
 97         struct list_head                node;
 98         struct delayed_work             work_tdr;
 99         uint64_t                        id;
100         atomic_t                        karma;
101         enum drm_sched_priority         s_priority;
102 };
103 
104 static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
105                                             int threshold)
106 {
107         return (s_job && atomic_inc_return(&s_job->karma) > threshold);
108 }
109 
110 /**
111  * Define the backend operations called by the scheduler,
112  * these functions should be implemented in driver side
113 */
114 struct drm_sched_backend_ops {
115         struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
116                                         struct drm_sched_entity *s_entity);
117         struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
118         void (*timedout_job)(struct drm_sched_job *sched_job);
119         void (*free_job)(struct drm_sched_job *sched_job);
120 };
121 
122 /**
123  * One scheduler is implemented for each hardware ring
124 */
125 struct drm_gpu_scheduler {
126         const struct drm_sched_backend_ops      *ops;
127         uint32_t                        hw_submission_limit;
128         long                            timeout;
129         const char                      *name;
130         struct drm_sched_rq             sched_rq[DRM_SCHED_PRIORITY_MAX];
131         wait_queue_head_t               wake_up_worker;
132         wait_queue_head_t               job_scheduled;
133         atomic_t                        hw_rq_count;
134         atomic64_t                      job_id_count;
135         struct task_struct              *thread;
136         struct list_head                ring_mirror_list;
137         spinlock_t                      job_list_lock;
138         int                             hang_limit;
139 };
140 
141 int drm_sched_init(struct drm_gpu_scheduler *sched,
142                    const struct drm_sched_backend_ops *ops,
143                    uint32_t hw_submission, unsigned hang_limit, long timeout,
144                    const char *name);
145 void drm_sched_fini(struct drm_gpu_scheduler *sched);
146 
147 int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
148                           struct drm_sched_entity *entity,
149                           struct drm_sched_rq *rq,
150                           uint32_t jobs, atomic_t *guilty);
151 void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
152                            struct drm_sched_entity *entity);
153 void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
154                                struct drm_sched_entity *entity);
155 void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
156                              struct drm_sched_rq *rq);
157 
158 struct drm_sched_fence *drm_sched_fence_create(
159         struct drm_sched_entity *s_entity, void *owner);
160 void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
161 void drm_sched_fence_finished(struct drm_sched_fence *fence);
162 int drm_sched_job_init(struct drm_sched_job *job,
163                        struct drm_gpu_scheduler *sched,
164                        struct drm_sched_entity *entity,
165                        void *owner);
166 void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
167                             struct drm_sched_job *job);
168 void drm_sched_job_recovery(struct drm_gpu_scheduler *sched);
169 bool drm_sched_dependency_optimized(struct dma_fence* fence,
170                                     struct drm_sched_entity *entity);
171 void drm_sched_job_kickout(struct drm_sched_job *s_job);
172 
173 #endif
174 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp