X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=kernel%2Fkernel%2Fworkqueue.c;fp=kernel%2Fkernel%2Fworkqueue.c;h=d5b0f4fc0eb0c82a4a2fd09d5cc79f44a2335926;hb=52f993b8e89487ec9ee15a7fb4979e0f09a45b27;hp=efd1f5822f4e23072f9b79778548300f1593ea03;hpb=c189ccac5702322ed843fe17057035b7222a59b6;p=kvmfornfv.git diff --git a/kernel/kernel/workqueue.c b/kernel/kernel/workqueue.c index efd1f5822..d5b0f4fc0 100644 --- a/kernel/kernel/workqueue.c +++ b/kernel/kernel/workqueue.c @@ -683,6 +683,35 @@ static void set_work_pool_and_clear_pending(struct work_struct *work, */ smp_wmb(); set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); + /* + * The following mb guarantees that previous clear of a PENDING bit + * will not be reordered with any speculative LOADS or STORES from + * work->current_func, which is executed afterwards. This possible + * reordering can lead to a missed execution on attempt to qeueue + * the same @work. E.g. consider this case: + * + * CPU#0 CPU#1 + * ---------------------------- -------------------------------- + * + * 1 STORE event_indicated + * 2 queue_work_on() { + * 3 test_and_set_bit(PENDING) + * 4 } set_..._and_clear_pending() { + * 5 set_work_data() # clear bit + * 6 smp_mb() + * 7 work->current_func() { + * 8 LOAD event_indicated + * } + * + * Without an explicit full barrier speculative LOAD on line 8 can + * be executed before CPU#0 does STORE on line 1. If that happens, + * CPU#0 observes the PENDING bit is still set and new execution of + * a @work is not queued in a hope, that CPU#1 will eventually + * finish the queued @work. Meanwhile CPU#1 does not see + * event_indicated is set, because speculative LOAD was executed + * before actual STORE. + */ + smp_mb(); } static void clear_work_data(struct work_struct *work) @@ -1106,9 +1135,11 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq) * As both pwqs and pools are RCU protected, the * following lock operations are safe. */ + rcu_read_lock(); local_spin_lock_irq(pendingb_lock, &pwq->pool->lock); put_pwq(pwq); local_spin_unlock_irq(pendingb_lock, &pwq->pool->lock); + rcu_read_unlock(); } } @@ -4466,6 +4497,17 @@ static void rebind_workers(struct worker_pool *pool) pool->attrs->cpumask) < 0); spin_lock_irq(&pool->lock); + + /* + * XXX: CPU hotplug notifiers are weird and can call DOWN_FAILED + * w/o preceding DOWN_PREPARE. Work around it. CPU hotplug is + * being reworked and this can go away in time. + */ + if (!(pool->flags & POOL_DISASSOCIATED)) { + spin_unlock_irq(&pool->lock); + return; + } + pool->flags &= ~POOL_DISASSOCIATED; for_each_pool_worker(worker, pool) {