These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / char / random.c
index eb47efe..29abd5f 100644 (file)
@@ -409,6 +409,9 @@ static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
 static DECLARE_WAIT_QUEUE_HEAD(urandom_init_wait);
 static struct fasync_struct *fasync;
 
+static DEFINE_SPINLOCK(random_ready_list_lock);
+static LIST_HEAD(random_ready_list);
+
 /**********************************************************************
  *
  * OS independent entropy store.   Here are the functions which handle
@@ -589,6 +592,22 @@ static void fast_mix(struct fast_pool *f)
        f->count++;
 }
 
+static void process_random_ready_list(void)
+{
+       unsigned long flags;
+       struct random_ready_callback *rdy, *tmp;
+
+       spin_lock_irqsave(&random_ready_list_lock, flags);
+       list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) {
+               struct module *owner = rdy->owner;
+
+               list_del_init(&rdy->list);
+               rdy->func(rdy);
+               module_put(owner);
+       }
+       spin_unlock_irqrestore(&random_ready_list_lock, flags);
+}
+
 /*
  * Credit (or debit) the entropy store with n bits of entropy.
  * Use credit_entropy_bits_safe() if the value comes from userspace
@@ -660,7 +679,8 @@ retry:
                r->entropy_total = 0;
                if (r == &nonblocking_pool) {
                        prandom_reseed_late();
-                       wake_up_interruptible(&urandom_init_wait);
+                       process_random_ready_list();
+                       wake_up_all(&urandom_init_wait);
                        pr_notice("random: %s pool is initialized\n", r->name);
                }
        }
@@ -1240,6 +1260,64 @@ void get_random_bytes(void *buf, int nbytes)
 }
 EXPORT_SYMBOL(get_random_bytes);
 
+/*
+ * Add a callback function that will be invoked when the nonblocking
+ * pool is initialised.
+ *
+ * returns: 0 if callback is successfully added
+ *         -EALREADY if pool is already initialised (callback not called)
+ *         -ENOENT if module for callback is not alive
+ */
+int add_random_ready_callback(struct random_ready_callback *rdy)
+{
+       struct module *owner;
+       unsigned long flags;
+       int err = -EALREADY;
+
+       if (likely(nonblocking_pool.initialized))
+               return err;
+
+       owner = rdy->owner;
+       if (!try_module_get(owner))
+               return -ENOENT;
+
+       spin_lock_irqsave(&random_ready_list_lock, flags);
+       if (nonblocking_pool.initialized)
+               goto out;
+
+       owner = NULL;
+
+       list_add(&rdy->list, &random_ready_list);
+       err = 0;
+
+out:
+       spin_unlock_irqrestore(&random_ready_list_lock, flags);
+
+       module_put(owner);
+
+       return err;
+}
+EXPORT_SYMBOL(add_random_ready_callback);
+
+/*
+ * Delete a previously registered readiness callback function.
+ */
+void del_random_ready_callback(struct random_ready_callback *rdy)
+{
+       unsigned long flags;
+       struct module *owner = NULL;
+
+       spin_lock_irqsave(&random_ready_list_lock, flags);
+       if (!list_empty(&rdy->list)) {
+               list_del_init(&rdy->list);
+               owner = rdy->owner;
+       }
+       spin_unlock_irqrestore(&random_ready_list_lock, flags);
+
+       module_put(owner);
+}
+EXPORT_SYMBOL(del_random_ready_callback);
+
 /*
  * This function will use the architecture-specific hardware random
  * number generator if it is available.  The arch-specific hw RNG will