2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
28 #include "gpu_scheduler.h"
30 #define CREATE_TRACE_POINTS
31 #include "gpu_sched_trace.h"
33 static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
34 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
36 struct kmem_cache *sched_fence_slab;
37 atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
39 /* Initialize a given run queue struct */
40 static void amd_sched_rq_init(struct amd_sched_rq *rq)
42 spin_lock_init(&rq->lock);
43 INIT_LIST_HEAD(&rq->entities);
44 rq->current_entity = NULL;
47 static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
48 struct amd_sched_entity *entity)
51 list_add_tail(&entity->list, &rq->entities);
52 spin_unlock(&rq->lock);
55 static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
56 struct amd_sched_entity *entity)
59 list_del_init(&entity->list);
60 if (rq->current_entity == entity)
61 rq->current_entity = NULL;
62 spin_unlock(&rq->lock);
66 * Select an entity which could provide a job to run
68 * @rq The run queue to check.
70 * Try to find a ready entity, returns NULL if none found.
72 static struct amd_sched_entity *
73 amd_sched_rq_select_entity(struct amd_sched_rq *rq)
75 struct amd_sched_entity *entity;
79 entity = rq->current_entity;
81 list_for_each_entry_continue(entity, &rq->entities, list) {
82 if (amd_sched_entity_is_ready(entity)) {
83 rq->current_entity = entity;
84 spin_unlock(&rq->lock);
90 list_for_each_entry(entity, &rq->entities, list) {
92 if (amd_sched_entity_is_ready(entity)) {
93 rq->current_entity = entity;
94 spin_unlock(&rq->lock);
98 if (entity == rq->current_entity)
102 spin_unlock(&rq->lock);
108 * Init a context entity used by scheduler when submit to HW ring.
110 * @sched The pointer to the scheduler
111 * @entity The pointer to a valid amd_sched_entity
112 * @rq The run queue this entity belongs
113 * @kernel If this is an entity for the kernel
114 * @jobs The max number of jobs in the job queue
116 * return 0 if succeed. negative error code on failure
118 int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
119 struct amd_sched_entity *entity,
120 struct amd_sched_rq *rq,
125 if (!(sched && entity && rq))
128 memset(entity, 0, sizeof(struct amd_sched_entity));
129 INIT_LIST_HEAD(&entity->list);
131 entity->sched = sched;
133 spin_lock_init(&entity->queue_lock);
134 r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
138 atomic_set(&entity->fence_seq, 0);
139 entity->fence_context = fence_context_alloc(1);
141 /* Add the entity to the run queue */
142 amd_sched_rq_add_entity(rq, entity);
148 * Query if entity is initialized
150 * @sched Pointer to scheduler instance
151 * @entity The pointer to a valid scheduler entity
153 * return true if entity is initialized, false otherwise
155 static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
156 struct amd_sched_entity *entity)
158 return entity->sched == sched &&
163 * Check if entity is idle
165 * @entity The pointer to a valid scheduler entity
167 * Return true if entity don't has any unscheduled jobs.
169 static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
172 if (kfifo_is_empty(&entity->job_queue))
179 * Check if entity is ready
181 * @entity The pointer to a valid scheduler entity
183 * Return true if entity could provide a job.
185 static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
187 if (kfifo_is_empty(&entity->job_queue))
190 if (ACCESS_ONCE(entity->dependency))
197 * Destroy a context entity
199 * @sched Pointer to scheduler instance
200 * @entity The pointer to a valid scheduler entity
202 * Cleanup and free the allocated resources.
204 void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
205 struct amd_sched_entity *entity)
207 struct amd_sched_rq *rq = entity->rq;
209 if (!amd_sched_entity_is_initialized(sched, entity))
213 * The client will not queue more IBs during this fini, consume existing
216 wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
218 amd_sched_rq_remove_entity(rq, entity);
219 kfifo_free(&entity->job_queue);
222 static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
224 struct amd_sched_entity *entity =
225 container_of(cb, struct amd_sched_entity, cb);
226 entity->dependency = NULL;
228 amd_sched_wakeup(entity->sched);
231 static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
233 struct amd_gpu_scheduler *sched = entity->sched;
234 struct fence * fence = entity->dependency;
235 struct amd_sched_fence *s_fence;
237 if (fence->context == entity->fence_context) {
238 /* We can ignore fences from ourself */
239 fence_put(entity->dependency);
243 s_fence = to_amd_sched_fence(fence);
244 if (s_fence && s_fence->sched == sched) {
245 /* Fence is from the same scheduler */
246 if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &fence->flags)) {
247 /* Ignore it when it is already scheduled */
248 fence_put(entity->dependency);
252 /* Wait for fence to be scheduled */
253 entity->cb.func = amd_sched_entity_wakeup;
254 list_add_tail(&entity->cb.node, &s_fence->scheduled_cb);
258 if (!fence_add_callback(entity->dependency, &entity->cb,
259 amd_sched_entity_wakeup))
262 fence_put(entity->dependency);
266 static struct amd_sched_job *
267 amd_sched_entity_pop_job(struct amd_sched_entity *entity)
269 struct amd_gpu_scheduler *sched = entity->sched;
270 struct amd_sched_job *sched_job;
272 if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
275 while ((entity->dependency = sched->ops->dependency(sched_job)))
276 if (amd_sched_entity_add_dependency_cb(entity))
283 * Helper to submit a job to the job queue
285 * @sched_job The pointer to job required to submit
287 * Returns true if we could submit the job.
289 static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
291 struct amd_gpu_scheduler *sched = sched_job->sched;
292 struct amd_sched_entity *entity = sched_job->s_entity;
293 bool added, first = false;
295 spin_lock(&entity->queue_lock);
296 added = kfifo_in(&entity->job_queue, &sched_job,
297 sizeof(sched_job)) == sizeof(sched_job);
299 if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
302 spin_unlock(&entity->queue_lock);
304 /* first job wakes up scheduler */
306 amd_sched_wakeup(sched);
312 * Submit a job to the job queue
314 * @sched_job The pointer to job required to submit
316 * Returns 0 for success, negative error code otherwise.
318 void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
320 struct amd_sched_entity *entity = sched_job->s_entity;
322 trace_amd_sched_job(sched_job);
323 wait_event(entity->sched->job_scheduled,
324 amd_sched_entity_in(sched_job));
328 * Return ture if we can push more jobs to the hw.
330 static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
332 return atomic_read(&sched->hw_rq_count) <
333 sched->hw_submission_limit;
337 * Wake up the scheduler when it is ready
339 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
341 if (amd_sched_ready(sched))
342 wake_up_interruptible(&sched->wake_up_worker);
346 * Select next entity to process
348 static struct amd_sched_entity *
349 amd_sched_select_entity(struct amd_gpu_scheduler *sched)
351 struct amd_sched_entity *entity;
353 if (!amd_sched_ready(sched))
356 /* Kernel run queue has higher priority than normal run queue*/
357 entity = amd_sched_rq_select_entity(&sched->kernel_rq);
359 entity = amd_sched_rq_select_entity(&sched->sched_rq);
364 static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
366 struct amd_sched_fence *s_fence =
367 container_of(cb, struct amd_sched_fence, cb);
368 struct amd_gpu_scheduler *sched = s_fence->sched;
371 atomic_dec(&sched->hw_rq_count);
372 amd_sched_fence_signal(s_fence);
373 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
374 cancel_delayed_work(&s_fence->dwork);
375 spin_lock_irqsave(&sched->fence_list_lock, flags);
376 list_del_init(&s_fence->list);
377 spin_unlock_irqrestore(&sched->fence_list_lock, flags);
379 trace_amd_sched_process_job(s_fence);
380 fence_put(&s_fence->base);
381 wake_up_interruptible(&sched->wake_up_worker);
384 static void amd_sched_fence_work_func(struct work_struct *work)
386 struct amd_sched_fence *s_fence =
387 container_of(work, struct amd_sched_fence, dwork.work);
388 struct amd_gpu_scheduler *sched = s_fence->sched;
389 struct amd_sched_fence *entity, *tmp;
392 DRM_ERROR("[%s] scheduler is timeout!\n", sched->name);
394 /* Clean all pending fences */
395 spin_lock_irqsave(&sched->fence_list_lock, flags);
396 list_for_each_entry_safe(entity, tmp, &sched->fence_list, list) {
397 DRM_ERROR(" fence no %d\n", entity->base.seqno);
398 cancel_delayed_work(&entity->dwork);
399 list_del_init(&entity->list);
400 fence_put(&entity->base);
402 spin_unlock_irqrestore(&sched->fence_list_lock, flags);
405 static int amd_sched_main(void *param)
407 struct sched_param sparam = {.sched_priority = 1};
408 struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
411 spin_lock_init(&sched->fence_list_lock);
412 INIT_LIST_HEAD(&sched->fence_list);
413 sched_setscheduler(current, SCHED_FIFO, &sparam);
415 while (!kthread_should_stop()) {
416 struct amd_sched_entity *entity;
417 struct amd_sched_fence *s_fence;
418 struct amd_sched_job *sched_job;
422 wait_event_interruptible(sched->wake_up_worker,
423 (entity = amd_sched_select_entity(sched)) ||
424 kthread_should_stop());
429 sched_job = amd_sched_entity_pop_job(entity);
433 s_fence = sched_job->s_fence;
435 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
436 INIT_DELAYED_WORK(&s_fence->dwork, amd_sched_fence_work_func);
437 schedule_delayed_work(&s_fence->dwork, sched->timeout);
438 spin_lock_irqsave(&sched->fence_list_lock, flags);
439 list_add_tail(&s_fence->list, &sched->fence_list);
440 spin_unlock_irqrestore(&sched->fence_list_lock, flags);
443 atomic_inc(&sched->hw_rq_count);
444 fence = sched->ops->run_job(sched_job);
445 amd_sched_fence_scheduled(s_fence);
447 r = fence_add_callback(fence, &s_fence->cb,
448 amd_sched_process_job);
450 amd_sched_process_job(fence, &s_fence->cb);
452 DRM_ERROR("fence add callback failed (%d)\n", r);
455 DRM_ERROR("Failed to run job!\n");
456 amd_sched_process_job(NULL, &s_fence->cb);
459 count = kfifo_out(&entity->job_queue, &sched_job,
461 WARN_ON(count != sizeof(sched_job));
462 wake_up(&sched->job_scheduled);
468 * Init a gpu scheduler instance
470 * @sched The pointer to the scheduler
471 * @ops The backend operations for this scheduler.
472 * @hw_submissions Number of hw submissions to do.
473 * @name Name used for debugging
475 * Return 0 on success, otherwise error code.
477 int amd_sched_init(struct amd_gpu_scheduler *sched,
478 struct amd_sched_backend_ops *ops,
479 unsigned hw_submission, long timeout, const char *name)
482 sched->hw_submission_limit = hw_submission;
484 sched->timeout = timeout;
485 amd_sched_rq_init(&sched->sched_rq);
486 amd_sched_rq_init(&sched->kernel_rq);
488 init_waitqueue_head(&sched->wake_up_worker);
489 init_waitqueue_head(&sched->job_scheduled);
490 atomic_set(&sched->hw_rq_count, 0);
491 if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
492 sched_fence_slab = kmem_cache_create(
493 "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
494 SLAB_HWCACHE_ALIGN, NULL);
495 if (!sched_fence_slab)
499 /* Each scheduler will run on a seperate kernel thread */
500 sched->thread = kthread_run(amd_sched_main, sched, sched->name);
501 if (IS_ERR(sched->thread)) {
502 DRM_ERROR("Failed to create scheduler for %s.\n", name);
503 return PTR_ERR(sched->thread);
510 * Destroy a gpu scheduler
512 * @sched The pointer to the scheduler
514 void amd_sched_fini(struct amd_gpu_scheduler *sched)
517 kthread_stop(sched->thread);
518 if (atomic_dec_and_test(&sched_fence_slab_ref))
519 kmem_cache_destroy(sched_fence_slab);