~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/drm/gpu_scheduler.h

Version: ~ [ linux-5.12 ] ~ [ linux-5.11.16 ] ~ [ linux-5.10.32 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.114 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.188 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.231 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.267 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.267 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Copyright 2015 Advanced Micro Devices, Inc.
  3  *
  4  * Permission is hereby granted, free of charge, to any person obtaining a
  5  * copy of this software and associated documentation files (the "Software"),
  6  * to deal in the Software without restriction, including without limitation
  7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8  * and/or sell copies of the Software, and to permit persons to whom the
  9  * Software is furnished to do so, subject to the following conditions:
 10  *
 11  * The above copyright notice and this permission notice shall be included in
 12  * all copies or substantial portions of the Software.
 13  *
 14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20  * OTHER DEALINGS IN THE SOFTWARE.
 21  *
 22  */
 23 
 24 #ifndef _DRM_GPU_SCHEDULER_H_
 25 #define _DRM_GPU_SCHEDULER_H_
 26 
 27 #include <drm/spsc_queue.h>
 28 #include <linux/dma-fence.h>
 29 
 30 #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
 31 
 32 struct drm_gpu_scheduler;
 33 struct drm_sched_rq;
 34 
 35 enum drm_sched_priority {
 36         DRM_SCHED_PRIORITY_MIN,
 37         DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN,
 38         DRM_SCHED_PRIORITY_NORMAL,
 39         DRM_SCHED_PRIORITY_HIGH_SW,
 40         DRM_SCHED_PRIORITY_HIGH_HW,
 41         DRM_SCHED_PRIORITY_KERNEL,
 42         DRM_SCHED_PRIORITY_MAX,
 43         DRM_SCHED_PRIORITY_INVALID = -1,
 44         DRM_SCHED_PRIORITY_UNSET = -2
 45 };
 46 
 47 /**
 48  * struct drm_sched_entity - A wrapper around a job queue (typically
 49  * attached to the DRM file_priv).
 50  *
 51  * @list: used to append this struct to the list of entities in the
 52  *        runqueue.
 53  * @rq: runqueue on which this entity is currently scheduled.
 54  * @rq_list: a list of run queues on which jobs from this entity can
 55  *           be scheduled
 56  * @num_rq_list: number of run queues in the rq_list
 57  * @rq_lock: lock to modify the runqueue to which this entity belongs.
 58  * @job_queue: the list of jobs of this entity.
 59  * @fence_seq: a linearly increasing seqno incremented with each
 60  *             new &drm_sched_fence which is part of the entity.
 61  * @fence_context: a unique context for all the fences which belong
 62  *                 to this entity.
 63  *                 The &drm_sched_fence.scheduled uses the
 64  *                 fence_context but &drm_sched_fence.finished uses
 65  *                 fence_context + 1.
 66  * @dependency: the dependency fence of the job which is on the top
 67  *              of the job queue.
 68  * @cb: callback for the dependency fence above.
 69  * @guilty: points to ctx's guilty.
 70  * @fini_status: contains the exit status in case the process was signalled.
 71  * @last_scheduled: points to the finished fence of the last scheduled job.
 72  * @last_user: last group leader pushing a job into the entity.
 73  * @stopped: Marks the enity as removed from rq and destined for termination.
 74  *
 75  * Entities will emit jobs in order to their corresponding hardware
 76  * ring, and the scheduler will alternate between entities based on
 77  * scheduling policy.
 78  */
 79 struct drm_sched_entity {
 80         struct list_head                list;
 81         struct drm_sched_rq             *rq;
 82         struct drm_sched_rq             **rq_list;
 83         unsigned int                    num_rq_list;
 84         spinlock_t                      rq_lock;
 85 
 86         struct spsc_queue               job_queue;
 87 
 88         atomic_t                        fence_seq;
 89         uint64_t                        fence_context;
 90 
 91         struct dma_fence                *dependency;
 92         struct dma_fence_cb             cb;
 93         atomic_t                        *guilty;
 94         struct dma_fence                *last_scheduled;
 95         struct task_struct              *last_user;
 96         bool                            stopped;
 97 };
 98 
 99 /**
100  * struct drm_sched_rq - queue of entities to be scheduled.
101  *
102  * @lock: to modify the entities list.
103  * @sched: the scheduler to which this rq belongs to.
104  * @entities: list of the entities to be scheduled.
105  * @current_entity: the entity which is to be scheduled.
106  *
107  * Run queue is a set of entities scheduling command submissions for
108  * one specific ring. It implements the scheduling policy that selects
109  * the next entity to emit commands from.
110  */
111 struct drm_sched_rq {
112         spinlock_t                      lock;
113         struct drm_gpu_scheduler        *sched;
114         struct list_head                entities;
115         struct drm_sched_entity         *current_entity;
116 };
117 
118 /**
119  * struct drm_sched_fence - fences corresponding to the scheduling of a job.
120  */
121 struct drm_sched_fence {
122         /**
123          * @scheduled: this fence is what will be signaled by the scheduler
124          * when the job is scheduled.
125          */
126         struct dma_fence                scheduled;
127 
128         /**
129          * @finished: this fence is what will be signaled by the scheduler
130          * when the job is completed.
131          *
132          * When setting up an out fence for the job, you should use
133          * this, since it's available immediately upon
134          * drm_sched_job_init(), and the fence returned by the driver
135          * from run_job() won't be created until the dependencies have
136          * resolved.
137          */
138         struct dma_fence                finished;
139 
140         /**
141          * @cb: the callback for the parent fence below.
142          */
143         struct dma_fence_cb             cb;
144         /**
145          * @parent: the fence returned by &drm_sched_backend_ops.run_job
146          * when scheduling the job on hardware. We signal the
147          * &drm_sched_fence.finished fence once parent is signalled.
148          */
149         struct dma_fence                *parent;
150         /**
151          * @sched: the scheduler instance to which the job having this struct
152          * belongs to.
153          */
154         struct drm_gpu_scheduler        *sched;
155         /**
156          * @lock: the lock used by the scheduled and the finished fences.
157          */
158         spinlock_t                      lock;
159         /**
160          * @owner: job owner for debugging
161          */
162         void                            *owner;
163 };
164 
165 struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
166 
167 /**
168  * struct drm_sched_job - A job to be run by an entity.
169  *
170  * @queue_node: used to append this struct to the queue of jobs in an entity.
171  * @sched: the scheduler instance on which this job is scheduled.
172  * @s_fence: contains the fences for the scheduling of job.
173  * @finish_cb: the callback for the finished fence.
174  * @finish_work: schedules the function @drm_sched_job_finish once the job has
175  *               finished to remove the job from the
176  *               @drm_gpu_scheduler.ring_mirror_list.
177  * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list.
178  * @id: a unique id assigned to each job scheduled on the scheduler.
179  * @karma: increment on every hang caused by this job. If this exceeds the hang
180  *         limit of the scheduler then the job is marked guilty and will not
181  *         be scheduled further.
182  * @s_priority: the priority of the job.
183  * @entity: the entity to which this job belongs.
184  *
185  * A job is created by the driver using drm_sched_job_init(), and
186  * should call drm_sched_entity_push_job() once it wants the scheduler
187  * to schedule the job.
188  */
189 struct drm_sched_job {
190         struct spsc_node                queue_node;
191         struct drm_gpu_scheduler        *sched;
192         struct drm_sched_fence          *s_fence;
193         struct dma_fence_cb             finish_cb;
194         struct work_struct              finish_work;
195         struct list_head                node;
196         uint64_t                        id;
197         atomic_t                        karma;
198         enum drm_sched_priority         s_priority;
199         struct drm_sched_entity  *entity;
200 };
201 
202 static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
203                                             int threshold)
204 {
205         return (s_job && atomic_inc_return(&s_job->karma) > threshold);
206 }
207 
208 /**
209  * struct drm_sched_backend_ops
210  *
211  * Define the backend operations called by the scheduler,
212  * these functions should be implemented in driver side.
213  */
214 struct drm_sched_backend_ops {
215         /**
216          * @dependency: Called when the scheduler is considering scheduling
217          * this job next, to get another struct dma_fence for this job to
218          * block on.  Once it returns NULL, run_job() may be called.
219          */
220         struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
221                                         struct drm_sched_entity *s_entity);
222 
223         /**
224          * @run_job: Called to execute the job once all of the dependencies
225          * have been resolved.  This may be called multiple times, if
226          * timedout_job() has happened and drm_sched_job_recovery()
227          * decides to try it again.
228          */
229         struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
230 
231         /**
232          * @timedout_job: Called when a job has taken too long to execute,
233          * to trigger GPU recovery.
234          */
235         void (*timedout_job)(struct drm_sched_job *sched_job);
236 
237         /**
238          * @free_job: Called once the job's finished fence has been signaled
239          * and it's time to clean it up.
240          */
241         void (*free_job)(struct drm_sched_job *sched_job);
242 };
243 
244 /**
245  * struct drm_gpu_scheduler
246  *
247  * @ops: backend operations provided by the driver.
248  * @hw_submission_limit: the max size of the hardware queue.
249  * @timeout: the time after which a job is removed from the scheduler.
250  * @name: name of the ring for which this scheduler is being used.
251  * @sched_rq: priority wise array of run queues.
252  * @wake_up_worker: the wait queue on which the scheduler sleeps until a job
253  *                  is ready to be scheduled.
254  * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
255  *                 waits on this wait queue until all the scheduled jobs are
256  *                 finished.
257  * @hw_rq_count: the number of jobs currently in the hardware queue.
258  * @job_id_count: used to assign unique id to the each job.
259  * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
260  *            timeout interval is over.
261  * @thread: the kthread on which the scheduler which run.
262  * @ring_mirror_list: the list of jobs which are currently in the job queue.
263  * @job_list_lock: lock to protect the ring_mirror_list.
264  * @hang_limit: once the hangs by a job crosses this limit then it is marked
265  *              guilty and it will be considered for scheduling further.
266  * @num_jobs: the number of jobs in queue in the scheduler
267  *
268  * One scheduler is implemented for each hardware ring.
269  */
270 struct drm_gpu_scheduler {
271         const struct drm_sched_backend_ops      *ops;
272         uint32_t                        hw_submission_limit;
273         long                            timeout;
274         const char                      *name;
275         struct drm_sched_rq             sched_rq[DRM_SCHED_PRIORITY_MAX];
276         wait_queue_head_t               wake_up_worker;
277         wait_queue_head_t               job_scheduled;
278         atomic_t                        hw_rq_count;
279         atomic64_t                      job_id_count;
280         struct delayed_work             work_tdr;
281         struct task_struct              *thread;
282         struct list_head                ring_mirror_list;
283         spinlock_t                      job_list_lock;
284         int                             hang_limit;
285         atomic_t                        num_jobs;
286 };
287 
288 int drm_sched_init(struct drm_gpu_scheduler *sched,
289                    const struct drm_sched_backend_ops *ops,
290                    uint32_t hw_submission, unsigned hang_limit, long timeout,
291                    const char *name);
292 void drm_sched_fini(struct drm_gpu_scheduler *sched);
293 int drm_sched_job_init(struct drm_sched_job *job,
294                        struct drm_sched_entity *entity,
295                        void *owner);
296 void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
297 void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
298                             struct drm_sched_job *job);
299 void drm_sched_job_recovery(struct drm_gpu_scheduler *sched);
300 bool drm_sched_dependency_optimized(struct dma_fence* fence,
301                                     struct drm_sched_entity *entity);
302 void drm_sched_job_kickout(struct drm_sched_job *s_job);
303 
304 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
305                              struct drm_sched_entity *entity);
306 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
307                                 struct drm_sched_entity *entity);
308 
309 int drm_sched_entity_init(struct drm_sched_entity *entity,
310                           struct drm_sched_rq **rq_list,
311                           unsigned int num_rq_list,
312                           atomic_t *guilty);
313 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
314 void drm_sched_entity_fini(struct drm_sched_entity *entity);
315 void drm_sched_entity_destroy(struct drm_sched_entity *entity);
316 void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
317 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
318 void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
319                                struct drm_sched_entity *entity);
320 void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
321                                    enum drm_sched_priority priority);
322 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
323 
324 struct drm_sched_fence *drm_sched_fence_create(
325         struct drm_sched_entity *s_entity, void *owner);
326 void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
327 void drm_sched_fence_finished(struct drm_sched_fence *fence);
328 
329 #endif
330 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp