These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / acpi / ec.c
index 5e8fed4..b420fb4 100644 (file)
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
 #define ACPI_EC_FLAG_BURST     0x10    /* burst mode */
 #define ACPI_EC_FLAG_SCI       0x20    /* EC-SCI occurred */
 
+/*
+ * The SCI_EVT clearing timing is not defined by the ACPI specification.
+ * This leads to lots of practical timing issues for the host EC driver.
+ * The following variations are defined (from the target EC firmware's
+ * perspective):
+ * STATUS: After indicating SCI_EVT edge triggered IRQ to the host, the
+ *         target can clear SCI_EVT at any time so long as the host can see
+ *         the indication by reading the status register (EC_SC). So the
+ *         host should re-check SCI_EVT after the first time the SCI_EVT
+ *         indication is seen, which is the same time the query request
+ *         (QR_EC) is written to the command register (EC_CMD). SCI_EVT set
+ *         at any later time could indicate another event. Normally such
+ *         kind of EC firmware has implemented an event queue and will
+ *         return 0x00 to indicate "no outstanding event".
+ * QUERY: After seeing the query request (QR_EC) written to the command
+ *        register (EC_CMD) by the host and having prepared the responding
+ *        event value in the data register (EC_DATA), the target can safely
+ *        clear SCI_EVT because the target can confirm that the current
+ *        event is being handled by the host. The host then should check
+ *        SCI_EVT right after reading the event response from the data
+ *        register (EC_DATA).
+ * EVENT: After seeing the event response read from the data register
+ *        (EC_DATA) by the host, the target can clear SCI_EVT. As the
+ *        target requires time to notice the change in the data register
+ *        (EC_DATA), the host may be required to wait additional guarding
+ *        time before checking the SCI_EVT again. Such guarding may not be
+ *        necessary if the host is notified via another IRQ.
+ */
+#define ACPI_EC_EVT_TIMING_STATUS      0x00
+#define ACPI_EC_EVT_TIMING_QUERY       0x01
+#define ACPI_EC_EVT_TIMING_EVENT       0x02
+
 /* EC commands */
 enum ec_command {
        ACPI_EC_COMMAND_READ = 0x80,
@@ -70,13 +98,13 @@ enum ec_command {
 
 #define ACPI_EC_DELAY          500     /* Wait 500ms max. during EC ops */
 #define ACPI_EC_UDELAY_GLK     1000    /* Wait 1ms max. to get global lock */
-#define ACPI_EC_MSI_UDELAY     550     /* Wait 550us for MSI EC */
-#define ACPI_EC_UDELAY_POLL    1000    /* Wait 1ms for EC transaction polling */
+#define ACPI_EC_UDELAY_POLL    550     /* Wait 1ms for EC transaction polling */
 #define ACPI_EC_CLEAR_MAX      100     /* Maximum number of events to query
                                         * when trying to clear the EC */
 
 enum {
        EC_FLAGS_QUERY_PENDING,         /* Query is pending */
+       EC_FLAGS_QUERY_GUARDING,        /* Guard for SCI_EVT check */
        EC_FLAGS_HANDLERS_INSTALLED,    /* Handlers for GPE and
                                         * OpReg are installed */
        EC_FLAGS_STARTED,               /* Driver is started */
@@ -93,6 +121,16 @@ static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
 module_param(ec_delay, uint, 0644);
 MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
 
+static bool ec_busy_polling __read_mostly;
+module_param(ec_busy_polling, bool, 0644);
+MODULE_PARM_DESC(ec_busy_polling, "Use busy polling to advance EC transaction");
+
+static unsigned int ec_polling_guard __read_mostly = ACPI_EC_UDELAY_POLL;
+module_param(ec_polling_guard, uint, 0644);
+MODULE_PARM_DESC(ec_polling_guard, "Guard time(us) between EC accesses in polling modes");
+
+static unsigned int ec_event_clearing __read_mostly = ACPI_EC_EVT_TIMING_QUERY;
+
 /*
  * If the number of false interrupts per one transaction exceeds
  * this threshold, will think there is a GPE storm happened and
@@ -121,16 +159,22 @@ struct transaction {
        u8 wlen;
        u8 rlen;
        u8 flags;
-       unsigned long timestamp;
+};
+
+struct acpi_ec_query {
+       struct transaction transaction;
+       struct work_struct work;
+       struct acpi_ec_query_handler *handler;
 };
 
 static int acpi_ec_query(struct acpi_ec *ec, u8 *data);
 static void advance_transaction(struct acpi_ec *ec);
+static void acpi_ec_event_handler(struct work_struct *work);
+static void acpi_ec_event_processor(struct work_struct *work);
 
 struct acpi_ec *boot_ec, *first_ec;
 EXPORT_SYMBOL(first_ec);
 
-static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */
 static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
 static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
 static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
@@ -218,7 +262,7 @@ static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
 {
        u8 x = inb(ec->data_addr);
 
-       ec->curr->timestamp = jiffies;
+       ec->timestamp = jiffies;
        ec_dbg_raw("EC_DATA(R) = 0x%2.2x", x);
        return x;
 }
@@ -227,14 +271,14 @@ static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
 {
        ec_dbg_raw("EC_SC(W) = 0x%2.2x", command);
        outb(command, ec->command_addr);
-       ec->curr->timestamp = jiffies;
+       ec->timestamp = jiffies;
 }
 
 static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
 {
        ec_dbg_raw("EC_DATA(W) = 0x%2.2x", data);
        outb(data, ec->data_addr);
-       ec->curr->timestamp = jiffies;
+       ec->timestamp = jiffies;
 }
 
 #ifdef DEBUG
@@ -267,7 +311,7 @@ static inline bool acpi_ec_is_gpe_raised(struct acpi_ec *ec)
        acpi_event_status gpe_status = 0;
 
        (void)acpi_get_gpe_status(NULL, ec->gpe, &gpe_status);
-       return (gpe_status & ACPI_EVENT_FLAG_SET) ? true : false;
+       return (gpe_status & ACPI_EVENT_FLAG_STATUS_SET) ? true : false;
 }
 
 static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open)
@@ -379,19 +423,63 @@ static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
 static void acpi_ec_submit_query(struct acpi_ec *ec)
 {
        if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
-               ec_dbg_req("Event started");
+               ec_dbg_evt("Command(%s) submitted/blocked",
+                          acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
+               ec->nr_pending_queries++;
                schedule_work(&ec->work);
        }
 }
 
 static void acpi_ec_complete_query(struct acpi_ec *ec)
 {
-       if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
+       if (test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
                clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
-               ec_dbg_req("Event stopped");
+               ec_dbg_evt("Command(%s) unblocked",
+                          acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
        }
 }
 
+static bool acpi_ec_guard_event(struct acpi_ec *ec)
+{
+       bool guarded = true;
+       unsigned long flags;
+
+       spin_lock_irqsave(&ec->lock, flags);
+       /*
+        * If firmware SCI_EVT clearing timing is "event", we actually
+        * don't know when the SCI_EVT will be cleared by firmware after
+        * evaluating _Qxx, so we need to re-check SCI_EVT after waiting an
+        * acceptable period.
+        *
+        * The guarding period begins when EC_FLAGS_QUERY_PENDING is
+        * flagged, which means SCI_EVT check has just been performed.
+        * But if the current transaction is ACPI_EC_COMMAND_QUERY, the
+        * guarding should have already been performed (via
+        * EC_FLAGS_QUERY_GUARDING) and should not be applied so that the
+        * ACPI_EC_COMMAND_QUERY transaction can be transitioned into
+        * ACPI_EC_COMMAND_POLL state immediately.
+        */
+       if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS ||
+           ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY ||
+           !test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags) ||
+           (ec->curr && ec->curr->command == ACPI_EC_COMMAND_QUERY))
+               guarded = false;
+       spin_unlock_irqrestore(&ec->lock, flags);
+       return guarded;
+}
+
+static int ec_transaction_polled(struct acpi_ec *ec)
+{
+       unsigned long flags;
+       int ret = 0;
+
+       spin_lock_irqsave(&ec->lock, flags);
+       if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL))
+               ret = 1;
+       spin_unlock_irqrestore(&ec->lock, flags);
+       return ret;
+}
+
 static int ec_transaction_completed(struct acpi_ec *ec)
 {
        unsigned long flags;
@@ -404,6 +492,22 @@ static int ec_transaction_completed(struct acpi_ec *ec)
        return ret;
 }
 
+static inline void ec_transaction_transition(struct acpi_ec *ec, unsigned long flag)
+{
+       ec->curr->flags |= flag;
+       if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
+               if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS &&
+                   flag == ACPI_EC_COMMAND_POLL)
+                       acpi_ec_complete_query(ec);
+               if (ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY &&
+                   flag == ACPI_EC_COMMAND_COMPLETE)
+                       acpi_ec_complete_query(ec);
+               if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
+                   flag == ACPI_EC_COMMAND_COMPLETE)
+                       set_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags);
+       }
+}
+
 static void advance_transaction(struct acpi_ec *ec)
 {
        struct transaction *t;
@@ -420,6 +524,18 @@ static void advance_transaction(struct acpi_ec *ec)
        acpi_ec_clear_gpe(ec);
        status = acpi_ec_read_status(ec);
        t = ec->curr;
+       /*
+        * Another IRQ or a guarded polling mode advancement is detected,
+        * the next QR_EC submission is then allowed.
+        */
+       if (!t || !(t->flags & ACPI_EC_COMMAND_POLL)) {
+               if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
+                   (!ec->nr_pending_queries ||
+                    test_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags))) {
+                       clear_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags);
+                       acpi_ec_complete_query(ec);
+               }
+       }
        if (!t)
                goto err;
        if (t->flags & ACPI_EC_COMMAND_POLL) {
@@ -432,17 +548,17 @@ static void advance_transaction(struct acpi_ec *ec)
                        if ((status & ACPI_EC_FLAG_OBF) == 1) {
                                t->rdata[t->ri++] = acpi_ec_read_data(ec);
                                if (t->rlen == t->ri) {
-                                       t->flags |= ACPI_EC_COMMAND_COMPLETE;
+                                       ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
                                        if (t->command == ACPI_EC_COMMAND_QUERY)
-                                               ec_dbg_req("Command(%s) hardware completion",
-                                                          acpi_ec_cmd_string(t->command));
+                                               ec_dbg_evt("Command(%s) completed by hardware",
+                                                          acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
                                        wakeup = true;
                                }
                        } else
                                goto err;
                } else if (t->wlen == t->wi &&
                           (status & ACPI_EC_FLAG_IBF) == 0) {
-                       t->flags |= ACPI_EC_COMMAND_COMPLETE;
+                       ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
                        wakeup = true;
                }
                goto out;
@@ -450,17 +566,15 @@ static void advance_transaction(struct acpi_ec *ec)
                if (EC_FLAGS_QUERY_HANDSHAKE &&
                    !(status & ACPI_EC_FLAG_SCI) &&
                    (t->command == ACPI_EC_COMMAND_QUERY)) {
-                       t->flags |= ACPI_EC_COMMAND_POLL;
-                       acpi_ec_complete_query(ec);
+                       ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL);
                        t->rdata[t->ri++] = 0x00;
-                       t->flags |= ACPI_EC_COMMAND_COMPLETE;
-                       ec_dbg_req("Command(%s) software completion",
-                                  acpi_ec_cmd_string(t->command));
+                       ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
+                       ec_dbg_evt("Command(%s) completed by software",
+                                  acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
                        wakeup = true;
                } else if ((status & ACPI_EC_FLAG_IBF) == 0) {
                        acpi_ec_write_cmd(ec, t->command);
-                       t->flags |= ACPI_EC_COMMAND_POLL;
-                       acpi_ec_complete_query(ec);
+                       ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL);
                } else
                        goto err;
                goto out;
@@ -490,8 +604,41 @@ static void start_transaction(struct acpi_ec *ec)
 {
        ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
        ec->curr->flags = 0;
-       ec->curr->timestamp = jiffies;
-       advance_transaction(ec);
+}
+
+static int ec_guard(struct acpi_ec *ec)
+{
+       unsigned long guard = usecs_to_jiffies(ec_polling_guard);
+       unsigned long timeout = ec->timestamp + guard;
+
+       /* Ensure guarding period before polling EC status */
+       do {
+               if (ec_busy_polling) {
+                       /* Perform busy polling */
+                       if (ec_transaction_completed(ec))
+                               return 0;
+                       udelay(jiffies_to_usecs(guard));
+               } else {
+                       /*
+                        * Perform wait polling
+                        * 1. Wait the transaction to be completed by the
+                        *    GPE handler after the transaction enters
+                        *    ACPI_EC_COMMAND_POLL state.
+                        * 2. A special guarding logic is also required
+                        *    for event clearing mode "event" before the
+                        *    transaction enters ACPI_EC_COMMAND_POLL
+                        *    state.
+                        */
+                       if (!ec_transaction_polled(ec) &&
+                           !acpi_ec_guard_event(ec))
+                               break;
+                       if (wait_event_timeout(ec->wait,
+                                              ec_transaction_completed(ec),
+                                              guard))
+                               return 0;
+               }
+       } while (time_before(jiffies, timeout));
+       return -ETIME;
 }
 
 static int ec_poll(struct acpi_ec *ec)
@@ -502,25 +649,11 @@ static int ec_poll(struct acpi_ec *ec)
        while (repeat--) {
                unsigned long delay = jiffies +
                        msecs_to_jiffies(ec_delay);
-               unsigned long usecs = ACPI_EC_UDELAY_POLL;
                do {
-                       /* don't sleep with disabled interrupts */
-                       if (EC_FLAGS_MSI || irqs_disabled()) {
-                               usecs = ACPI_EC_MSI_UDELAY;
-                               udelay(usecs);
-                               if (ec_transaction_completed(ec))
-                                       return 0;
-                       } else {
-                               if (wait_event_timeout(ec->wait,
-                                               ec_transaction_completed(ec),
-                                               usecs_to_jiffies(usecs)))
-                                       return 0;
-                       }
+                       if (!ec_guard(ec))
+                               return 0;
                        spin_lock_irqsave(&ec->lock, flags);
-                       if (time_after(jiffies,
-                                       ec->curr->timestamp +
-                                       usecs_to_jiffies(usecs)))
-                               advance_transaction(ec);
+                       advance_transaction(ec);
                        spin_unlock_irqrestore(&ec->lock, flags);
                } while (time_before(jiffies, delay));
                pr_debug("controller reset, restart transaction\n");
@@ -537,8 +670,6 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
        unsigned long tmp;
        int ret = 0;
 
-       if (EC_FLAGS_MSI)
-               udelay(ACPI_EC_MSI_UDELAY);
        /* start transaction */
        spin_lock_irqsave(&ec->lock, tmp);
        /* Enable GPE for command processing (IBF=0/OBF=1) */
@@ -552,7 +683,9 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
        ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t->command));
        start_transaction(ec);
        spin_unlock_irqrestore(&ec->lock, tmp);
+
        ret = ec_poll(ec);
+
        spin_lock_irqsave(&ec->lock, tmp);
        if (t->irq_count == ec_storm_threshold)
                acpi_ec_clear_storm(ec, EC_FLAGS_COMMAND_STORM);
@@ -575,6 +708,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
                return -EINVAL;
        if (t->rdata)
                memset(t->rdata, 0, t->rlen);
+
        mutex_lock(&ec->mutex);
        if (ec->global_lock) {
                status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
@@ -586,8 +720,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
 
        status = acpi_ec_transaction_unlocked(ec, t);
 
-       if (test_bit(EC_FLAGS_COMMAND_STORM, &ec->flags))
-               msleep(1);
        if (ec->global_lock)
                acpi_release_global_lock(glk);
 unlock:
@@ -813,6 +945,23 @@ acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler)
        return handler;
 }
 
+static struct acpi_ec_query_handler *
+acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value)
+{
+       struct acpi_ec_query_handler *handler;
+       bool found = false;
+
+       mutex_lock(&ec->mutex);
+       list_for_each_entry(handler, &ec->list, node) {
+               if (value == handler->query_bit) {
+                       found = true;
+                       break;
+               }
+       }
+       mutex_unlock(&ec->mutex);
+       return found ? acpi_ec_get_query_handler(handler) : NULL;
+}
+
 static void acpi_ec_query_handler_release(struct kref *kref)
 {
        struct acpi_ec_query_handler *handler =
@@ -848,14 +997,15 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
 }
 EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler);
 
-void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
+static void acpi_ec_remove_query_handlers(struct acpi_ec *ec,
+                                         bool remove_all, u8 query_bit)
 {
        struct acpi_ec_query_handler *handler, *tmp;
        LIST_HEAD(free_list);
 
        mutex_lock(&ec->mutex);
        list_for_each_entry_safe(handler, tmp, &ec->list, node) {
-               if (query_bit == handler->query_bit) {
+               if (remove_all || query_bit == handler->query_bit) {
                        list_del_init(&handler->node);
                        list_add(&handler->node, &free_list);
                }
@@ -864,70 +1014,150 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
        list_for_each_entry_safe(handler, tmp, &free_list, node)
                acpi_ec_put_query_handler(handler);
 }
+
+void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
+{
+       acpi_ec_remove_query_handlers(ec, false, query_bit);
+}
 EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
 
-static void acpi_ec_run(void *cxt)
+static struct acpi_ec_query *acpi_ec_create_query(u8 *pval)
 {
-       struct acpi_ec_query_handler *handler = cxt;
+       struct acpi_ec_query *q;
+       struct transaction *t;
+
+       q = kzalloc(sizeof (struct acpi_ec_query), GFP_KERNEL);
+       if (!q)
+               return NULL;
+       INIT_WORK(&q->work, acpi_ec_event_processor);
+       t = &q->transaction;
+       t->command = ACPI_EC_COMMAND_QUERY;
+       t->rdata = pval;
+       t->rlen = 1;
+       return q;
+}
+
+static void acpi_ec_delete_query(struct acpi_ec_query *q)
+{
+       if (q) {
+               if (q->handler)
+                       acpi_ec_put_query_handler(q->handler);
+               kfree(q);
+       }
+}
+
+static void acpi_ec_event_processor(struct work_struct *work)
+{
+       struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work);
+       struct acpi_ec_query_handler *handler = q->handler;
 
-       if (!handler)
-               return;
        ec_dbg_evt("Query(0x%02x) started", handler->query_bit);
        if (handler->func)
                handler->func(handler->data);
        else if (handler->handle)
                acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
        ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit);
-       acpi_ec_put_query_handler(handler);
+       acpi_ec_delete_query(q);
 }
 
 static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
 {
        u8 value = 0;
        int result;
-       acpi_status status;
-       struct acpi_ec_query_handler *handler;
-       struct transaction t = {.command = ACPI_EC_COMMAND_QUERY,
-                               .wdata = NULL, .rdata = &value,
-                               .wlen = 0, .rlen = 1};
+       struct acpi_ec_query *q;
+
+       q = acpi_ec_create_query(&value);
+       if (!q)
+               return -ENOMEM;
 
        /*
         * Query the EC to find out which _Qxx method we need to evaluate.
         * Note that successful completion of the query causes the ACPI_EC_SCI
         * bit to be cleared (and thus clearing the interrupt source).
         */
-       result = acpi_ec_transaction(ec, &t);
+       result = acpi_ec_transaction(ec, &q->transaction);
+       if (!value)
+               result = -ENODATA;
+       if (result)
+               goto err_exit;
+
+       q->handler = acpi_ec_get_query_handler_by_value(ec, value);
+       if (!q->handler) {
+               result = -ENODATA;
+               goto err_exit;
+       }
+
+       /*
+        * It is reported that _Qxx are evaluated in a parallel way on
+        * Windows:
+        * https://bugzilla.kernel.org/show_bug.cgi?id=94411
+        *
+        * Put this log entry before schedule_work() in order to make
+        * it appearing before any other log entries occurred during the
+        * work queue execution.
+        */
+       ec_dbg_evt("Query(0x%02x) scheduled", value);
+       if (!schedule_work(&q->work)) {
+               ec_dbg_evt("Query(0x%02x) overlapped", value);
+               result = -EBUSY;
+       }
+
+err_exit:
        if (result)
-               return result;
+               acpi_ec_delete_query(q);
        if (data)
                *data = value;
-       if (!value)
-               return -ENODATA;
+       return result;
+}
 
-       mutex_lock(&ec->mutex);
-       list_for_each_entry(handler, &ec->list, node) {
-               if (value == handler->query_bit) {
-                       /* have custom handler for this bit */
-                       handler = acpi_ec_get_query_handler(handler);
-                       ec_dbg_evt("Query(0x%02x) scheduled",
-                                  handler->query_bit);
-                       status = acpi_os_execute((handler->func) ?
-                               OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER,
-                               acpi_ec_run, handler);
-                       if (ACPI_FAILURE(status))
-                               result = -EBUSY;
-                       break;
+static void acpi_ec_check_event(struct acpi_ec *ec)
+{
+       unsigned long flags;
+
+       if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT) {
+               if (ec_guard(ec)) {
+                       spin_lock_irqsave(&ec->lock, flags);
+                       /*
+                        * Take care of the SCI_EVT unless no one else is
+                        * taking care of it.
+                        */
+                       if (!ec->curr)
+                               advance_transaction(ec);
+                       spin_unlock_irqrestore(&ec->lock, flags);
                }
        }
-       mutex_unlock(&ec->mutex);
-       return result;
 }
 
-static void acpi_ec_gpe_poller(struct work_struct *work)
+static void acpi_ec_event_handler(struct work_struct *work)
 {
+       unsigned long flags;
        struct acpi_ec *ec = container_of(work, struct acpi_ec, work);
 
-       acpi_ec_query(ec, NULL);
+       ec_dbg_evt("Event started");
+
+       spin_lock_irqsave(&ec->lock, flags);
+       while (ec->nr_pending_queries) {
+               spin_unlock_irqrestore(&ec->lock, flags);
+               (void)acpi_ec_query(ec, NULL);
+               spin_lock_irqsave(&ec->lock, flags);
+               ec->nr_pending_queries--;
+               /*
+                * Before exit, make sure that this work item can be
+                * scheduled again. There might be QR_EC failures, leaving
+                * EC_FLAGS_QUERY_PENDING uncleared and preventing this work
+                * item from being scheduled again.
+                */
+               if (!ec->nr_pending_queries) {
+                       if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS ||
+                           ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY)
+                               acpi_ec_complete_query(ec);
+               }
+       }
+       spin_unlock_irqrestore(&ec->lock, flags);
+
+       ec_dbg_evt("Event stopped");
+
+       acpi_ec_check_event(ec);
 }
 
 static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
@@ -961,7 +1191,7 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
        if (function != ACPI_READ && function != ACPI_WRITE)
                return AE_BAD_PARAMETER;
 
-       if (EC_FLAGS_MSI || bits > 8)
+       if (ec_busy_polling || bits > 8)
                acpi_ec_burst_enable(ec);
 
        for (i = 0; i < bytes; ++i, ++address, ++value)
@@ -969,7 +1199,7 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
                        acpi_ec_read(ec, address, value) :
                        acpi_ec_write(ec, address, *value);
 
-       if (EC_FLAGS_MSI || bits > 8)
+       if (ec_busy_polling || bits > 8)
                acpi_ec_burst_disable(ec);
 
        switch (result) {
@@ -1002,7 +1232,8 @@ static struct acpi_ec *make_acpi_ec(void)
        init_waitqueue_head(&ec->wait);
        INIT_LIST_HEAD(&ec->list);
        spin_lock_init(&ec->lock);
-       INIT_WORK(&ec->work, acpi_ec_gpe_poller);
+       INIT_WORK(&ec->work, acpi_ec_event_handler);
+       ec->timestamp = jiffies;
        return ec;
 }
 
@@ -1162,19 +1393,13 @@ static int acpi_ec_add(struct acpi_device *device)
 static int acpi_ec_remove(struct acpi_device *device)
 {
        struct acpi_ec *ec;
-       struct acpi_ec_query_handler *handler, *tmp;
 
        if (!device)
                return -EINVAL;
 
        ec = acpi_driver_data(device);
        ec_remove_handlers(ec);
-       mutex_lock(&ec->mutex);
-       list_for_each_entry_safe(handler, tmp, &ec->list, node) {
-               list_del(&handler->node);
-               kfree(handler);
-       }
-       mutex_unlock(&ec->mutex);
+       acpi_ec_remove_query_handlers(ec, true, 0);
        release_region(ec->data_addr, 1);
        release_region(ec->command_addr, 1);
        device->driver_data = NULL;
@@ -1237,30 +1462,13 @@ static int ec_validate_ecdt(const struct dmi_system_id *id)
        return 0;
 }
 
-/* MSI EC needs special treatment, enable it */
-static int ec_flag_msi(const struct dmi_system_id *id)
-{
-       pr_debug("Detected MSI hardware, enabling workarounds.\n");
-       EC_FLAGS_MSI = 1;
-       EC_FLAGS_VALIDATE_ECDT = 1;
-       return 0;
-}
-
-/*
- * Clevo M720 notebook actually works ok with IRQ mode, if we lifted
- * the GPE storm threshold back to 20
- */
-static int ec_enlarge_storm_threshold(const struct dmi_system_id *id)
-{
-       pr_debug("Setting the EC GPE storm threshold to 20\n");
-       ec_storm_threshold  = 20;
-       return 0;
-}
-
+#if 0
 /*
- * Acer EC firmware refuses to respond QR_EC when SCI_EVT is not set, for
- * which case, we complete the QR_EC without issuing it to the firmware.
- * https://bugzilla.kernel.org/show_bug.cgi?id=86211
+ * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not
+ * set, for which case, we complete the QR_EC without issuing it to the
+ * firmware.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=82611
+ * https://bugzilla.kernel.org/show_bug.cgi?id=97381
  */
 static int ec_flag_query_handshake(const struct dmi_system_id *id)
 {
@@ -1268,6 +1476,7 @@ static int ec_flag_query_handshake(const struct dmi_system_id *id)
        EC_FLAGS_QUERY_HANDSHAKE = 1;
        return 0;
 }
+#endif
 
 /*
  * On some hardware it is necessary to clear events accumulated by the EC during
@@ -1290,6 +1499,7 @@ static int ec_clear_on_resume(const struct dmi_system_id *id)
 {
        pr_debug("Detected system needing EC poll on resume.\n");
        EC_FLAGS_CLEAR_ON_RESUME = 1;
+       ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
        return 0;
 }
 
@@ -1299,29 +1509,9 @@ static struct dmi_system_id ec_dmi_table[] __initdata = {
        DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
        DMI_MATCH(DMI_BOARD_NAME, "JFL92") }, NULL},
        {
-       ec_flag_msi, "MSI hardware", {
-       DMI_MATCH(DMI_BIOS_VENDOR, "Micro-Star")}, NULL},
-       {
-       ec_flag_msi, "MSI hardware", {
-       DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star")}, NULL},
-       {
-       ec_flag_msi, "MSI hardware", {
-       DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star")}, NULL},
-       {
-       ec_flag_msi, "MSI hardware", {
-       DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR")}, NULL},
-       {
-       ec_flag_msi, "Quanta hardware", {
-       DMI_MATCH(DMI_SYS_VENDOR, "Quanta"),
-       DMI_MATCH(DMI_PRODUCT_NAME, "TW8/SW8/DW8"),}, NULL},
-       {
-       ec_flag_msi, "Quanta hardware", {
-       DMI_MATCH(DMI_SYS_VENDOR, "Quanta"),
-       DMI_MATCH(DMI_PRODUCT_NAME, "TW9/SW9"),}, NULL},
-       {
-       ec_flag_msi, "Clevo W350etq", {
-       DMI_MATCH(DMI_SYS_VENDOR, "CLEVO CO."),
-       DMI_MATCH(DMI_PRODUCT_NAME, "W35_37ET"),}, NULL},
+       ec_validate_ecdt, "MSI MS-171F", {
+       DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"),
+       DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),}, NULL},
        {
        ec_validate_ecdt, "ASUS hardware", {
        DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL},
@@ -1329,10 +1519,6 @@ static struct dmi_system_id ec_dmi_table[] __initdata = {
        ec_validate_ecdt, "ASUS hardware", {
        DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc.") }, NULL},
        {
-       ec_enlarge_storm_threshold, "CLEVO hardware", {
-       DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
-       DMI_MATCH(DMI_PRODUCT_NAME, "M720T/M730T"),}, NULL},
-       {
        ec_skip_dsdt_scan, "HP Folio 13", {
        DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
        DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13"),}, NULL},
@@ -1343,9 +1529,6 @@ static struct dmi_system_id ec_dmi_table[] __initdata = {
        {
        ec_clear_on_resume, "Samsung hardware", {
        DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
-       {
-       ec_flag_query_handshake, "Acer hardware", {
-       DMI_MATCH(DMI_SYS_VENDOR, "Acer"), }, NULL},
        {},
 };
 
@@ -1427,6 +1610,43 @@ error:
        return -ENODEV;
 }
 
+static int param_set_event_clearing(const char *val, struct kernel_param *kp)
+{
+       int result = 0;
+
+       if (!strncmp(val, "status", sizeof("status") - 1)) {
+               ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
+               pr_info("Assuming SCI_EVT clearing on EC_SC accesses\n");
+       } else if (!strncmp(val, "query", sizeof("query") - 1)) {
+               ec_event_clearing = ACPI_EC_EVT_TIMING_QUERY;
+               pr_info("Assuming SCI_EVT clearing on QR_EC writes\n");
+       } else if (!strncmp(val, "event", sizeof("event") - 1)) {
+               ec_event_clearing = ACPI_EC_EVT_TIMING_EVENT;
+               pr_info("Assuming SCI_EVT clearing on event reads\n");
+       } else
+               result = -EINVAL;
+       return result;
+}
+
+static int param_get_event_clearing(char *buffer, struct kernel_param *kp)
+{
+       switch (ec_event_clearing) {
+       case ACPI_EC_EVT_TIMING_STATUS:
+               return sprintf(buffer, "status");
+       case ACPI_EC_EVT_TIMING_QUERY:
+               return sprintf(buffer, "query");
+       case ACPI_EC_EVT_TIMING_EVENT:
+               return sprintf(buffer, "event");
+       default:
+               return sprintf(buffer, "invalid");
+       }
+       return 0;
+}
+
+module_param_call(ec_event_clearing, param_set_event_clearing, param_get_event_clearing,
+                 NULL, 0644);
+MODULE_PARM_DESC(ec_event_clearing, "Assumed SCI_EVT clearing timing");
+
 static struct acpi_driver acpi_ec_driver = {
        .name = "ec",
        .class = ACPI_EC_CLASS,