2 * linux/drivers/mmc/core/core.c
4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/completion.h>
17 #include <linux/device.h>
18 #include <linux/delay.h>
19 #include <linux/pagemap.h>
20 #include <linux/err.h>
21 #include <linux/leds.h>
22 #include <linux/scatterlist.h>
23 #include <linux/log2.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm_wakeup.h>
27 #include <linux/suspend.h>
28 #include <linux/fault-inject.h>
29 #include <linux/random.h>
30 #include <linux/slab.h>
33 #include <linux/mmc/card.h>
34 #include <linux/mmc/host.h>
35 #include <linux/mmc/mmc.h>
36 #include <linux/mmc/sd.h>
37 #include <linux/mmc/slot-gpio.h>
49 /* If the device is not responding */
50 #define MMC_CORE_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
53 * Background operations can take a long time, depending on the housekeeping
54 * operations the card has to perform.
56 #define MMC_BKOPS_MAX_TIMEOUT (4 * 60 * 1000) /* max time to wait in ms */
58 static struct workqueue_struct *workqueue;
59 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
62 * Enabling software CRCs on the data blocks can be a significant (30%)
63 * performance cost, and for other reasons may not always be desired.
64 * So we allow it it to be disabled.
67 module_param(use_spi_crc, bool, 0);
70 * Internal function. Schedule delayed work in the MMC work queue.
72 static int mmc_schedule_delayed_work(struct delayed_work *work,
75 return queue_delayed_work(workqueue, work, delay);
79 * Internal function. Flush all scheduled work from the MMC work queue.
81 static void mmc_flush_scheduled_work(void)
83 flush_workqueue(workqueue);
86 #ifdef CONFIG_FAIL_MMC_REQUEST
89 * Internal function. Inject random data errors.
90 * If mmc_data is NULL no errors are injected.
92 static void mmc_should_fail_request(struct mmc_host *host,
93 struct mmc_request *mrq)
95 struct mmc_command *cmd = mrq->cmd;
96 struct mmc_data *data = mrq->data;
97 static const int data_errors[] = {
106 if (cmd->error || data->error ||
107 !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
110 data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
111 data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
114 #else /* CONFIG_FAIL_MMC_REQUEST */
116 static inline void mmc_should_fail_request(struct mmc_host *host,
117 struct mmc_request *mrq)
121 #endif /* CONFIG_FAIL_MMC_REQUEST */
124 * mmc_request_done - finish processing an MMC request
125 * @host: MMC host which completed request
126 * @mrq: MMC request which request
128 * MMC drivers should call this function when they have completed
129 * their processing of a request.
131 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
133 struct mmc_command *cmd = mrq->cmd;
134 int err = cmd->error;
136 if (err && cmd->retries && mmc_host_is_spi(host)) {
137 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
141 if (err && cmd->retries && !mmc_card_removed(host->card)) {
143 * Request starter must handle retries - see
144 * mmc_wait_for_req_done().
149 mmc_should_fail_request(host, mrq);
151 led_trigger_event(host->led, LED_OFF);
154 pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
155 mmc_hostname(host), mrq->sbc->opcode,
157 mrq->sbc->resp[0], mrq->sbc->resp[1],
158 mrq->sbc->resp[2], mrq->sbc->resp[3]);
161 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
162 mmc_hostname(host), cmd->opcode, err,
163 cmd->resp[0], cmd->resp[1],
164 cmd->resp[2], cmd->resp[3]);
167 pr_debug("%s: %d bytes transferred: %d\n",
169 mrq->data->bytes_xfered, mrq->data->error);
173 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
174 mmc_hostname(host), mrq->stop->opcode,
176 mrq->stop->resp[0], mrq->stop->resp[1],
177 mrq->stop->resp[2], mrq->stop->resp[3]);
183 mmc_host_clk_release(host);
187 EXPORT_SYMBOL(mmc_request_done);
189 static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
191 #ifdef CONFIG_MMC_DEBUG
193 struct scatterlist *sg;
195 if (mmc_card_removed(host->card))
199 pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
200 mmc_hostname(host), mrq->sbc->opcode,
201 mrq->sbc->arg, mrq->sbc->flags);
204 pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
205 mmc_hostname(host), mrq->cmd->opcode,
206 mrq->cmd->arg, mrq->cmd->flags);
209 pr_debug("%s: blksz %d blocks %d flags %08x "
210 "tsac %d ms nsac %d\n",
211 mmc_hostname(host), mrq->data->blksz,
212 mrq->data->blocks, mrq->data->flags,
213 mrq->data->timeout_ns / 1000000,
214 mrq->data->timeout_clks);
218 pr_debug("%s: CMD%u arg %08x flags %08x\n",
219 mmc_hostname(host), mrq->stop->opcode,
220 mrq->stop->arg, mrq->stop->flags);
223 WARN_ON(!host->claimed);
232 BUG_ON(mrq->data->blksz > host->max_blk_size);
233 BUG_ON(mrq->data->blocks > host->max_blk_count);
234 BUG_ON(mrq->data->blocks * mrq->data->blksz >
237 #ifdef CONFIG_MMC_DEBUG
239 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
241 BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
244 mrq->cmd->data = mrq->data;
245 mrq->data->error = 0;
246 mrq->data->mrq = mrq;
248 mrq->data->stop = mrq->stop;
249 mrq->stop->error = 0;
250 mrq->stop->mrq = mrq;
253 mmc_host_clk_hold(host);
254 led_trigger_event(host->led, LED_FULL);
255 host->ops->request(host, mrq);
261 * mmc_start_bkops - start BKOPS for supported cards
262 * @card: MMC card to start BKOPS
263 * @form_exception: A flag to indicate if this function was
264 * called due to an exception raised by the card
266 * Start background operations whenever requested.
267 * When the urgent BKOPS bit is set in a R1 command response
268 * then background operations should be started immediately.
270 void mmc_start_bkops(struct mmc_card *card, bool from_exception)
274 bool use_busy_signal;
278 if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
281 err = mmc_read_bkops_status(card);
283 pr_err("%s: Failed to read bkops status: %d\n",
284 mmc_hostname(card->host), err);
288 if (!card->ext_csd.raw_bkops_status)
291 if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
295 mmc_claim_host(card->host);
296 if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
297 timeout = MMC_BKOPS_MAX_TIMEOUT;
298 use_busy_signal = true;
301 use_busy_signal = false;
304 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
305 EXT_CSD_BKOPS_START, 1, timeout,
306 use_busy_signal, true, false);
308 pr_warn("%s: Error %d starting bkops\n",
309 mmc_hostname(card->host), err);
314 * For urgent bkops status (LEVEL_2 and more)
315 * bkops executed synchronously, otherwise
316 * the operation is in progress
318 if (!use_busy_signal)
319 mmc_card_set_doing_bkops(card);
321 mmc_release_host(card->host);
323 EXPORT_SYMBOL(mmc_start_bkops);
326 * mmc_wait_data_done() - done callback for data request
327 * @mrq: done data request
329 * Wakes up mmc context, passed as a callback to host controller driver
331 static void mmc_wait_data_done(struct mmc_request *mrq)
333 struct mmc_context_info *context_info = &mrq->host->context_info;
335 context_info->is_done_rcv = true;
336 wake_up_interruptible(&context_info->wait);
339 static void mmc_wait_done(struct mmc_request *mrq)
341 complete(&mrq->completion);
345 *__mmc_start_data_req() - starts data request
346 * @host: MMC host to start the request
347 * @mrq: data request to start
349 * Sets the done callback to be called when request is completed by the card.
350 * Starts data mmc request execution
352 static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
356 mrq->done = mmc_wait_data_done;
359 err = mmc_start_request(host, mrq);
361 mrq->cmd->error = err;
362 mmc_wait_data_done(mrq);
368 static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
372 init_completion(&mrq->completion);
373 mrq->done = mmc_wait_done;
375 err = mmc_start_request(host, mrq);
377 mrq->cmd->error = err;
378 complete(&mrq->completion);
385 * mmc_wait_for_data_req_done() - wait for request completed
386 * @host: MMC host to prepare the command.
387 * @mrq: MMC request to wait for
389 * Blocks MMC context till host controller will ack end of data request
390 * execution or new request notification arrives from the block layer.
391 * Handles command retries.
393 * Returns enum mmc_blk_status after checking errors.
395 static int mmc_wait_for_data_req_done(struct mmc_host *host,
396 struct mmc_request *mrq,
397 struct mmc_async_req *next_req)
399 struct mmc_command *cmd;
400 struct mmc_context_info *context_info = &host->context_info;
405 wait_event_interruptible(context_info->wait,
406 (context_info->is_done_rcv ||
407 context_info->is_new_req));
408 spin_lock_irqsave(&context_info->lock, flags);
409 context_info->is_waiting_last_req = false;
410 spin_unlock_irqrestore(&context_info->lock, flags);
411 if (context_info->is_done_rcv) {
412 context_info->is_done_rcv = false;
413 context_info->is_new_req = false;
416 if (!cmd->error || !cmd->retries ||
417 mmc_card_removed(host->card)) {
418 err = host->areq->err_check(host->card,
420 break; /* return err */
422 pr_info("%s: req failed (CMD%u): %d, retrying...\n",
424 cmd->opcode, cmd->error);
427 host->ops->request(host, mrq);
428 continue; /* wait for done/new event again */
430 } else if (context_info->is_new_req) {
431 context_info->is_new_req = false;
433 err = MMC_BLK_NEW_REQUEST;
434 break; /* return err */
441 static void mmc_wait_for_req_done(struct mmc_host *host,
442 struct mmc_request *mrq)
444 struct mmc_command *cmd;
447 wait_for_completion(&mrq->completion);
452 * If host has timed out waiting for the sanitize
453 * to complete, card might be still in programming state
454 * so let's try to bring the card out of programming
457 if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) {
458 if (!mmc_interrupt_hpi(host->card)) {
459 pr_warn("%s: %s: Interrupted sanitize\n",
460 mmc_hostname(host), __func__);
464 pr_err("%s: %s: Failed to interrupt sanitize\n",
465 mmc_hostname(host), __func__);
468 if (!cmd->error || !cmd->retries ||
469 mmc_card_removed(host->card))
472 pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
473 mmc_hostname(host), cmd->opcode, cmd->error);
476 host->ops->request(host, mrq);
481 * mmc_pre_req - Prepare for a new request
482 * @host: MMC host to prepare command
483 * @mrq: MMC request to prepare for
484 * @is_first_req: true if there is no previous started request
485 * that may run in parellel to this call, otherwise false
487 * mmc_pre_req() is called in prior to mmc_start_req() to let
488 * host prepare for the new request. Preparation of a request may be
489 * performed while another request is running on the host.
491 static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
494 if (host->ops->pre_req) {
495 mmc_host_clk_hold(host);
496 host->ops->pre_req(host, mrq, is_first_req);
497 mmc_host_clk_release(host);
502 * mmc_post_req - Post process a completed request
503 * @host: MMC host to post process command
504 * @mrq: MMC request to post process for
505 * @err: Error, if non zero, clean up any resources made in pre_req
507 * Let the host post process a completed request. Post processing of
508 * a request may be performed while another reuqest is running.
510 static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
513 if (host->ops->post_req) {
514 mmc_host_clk_hold(host);
515 host->ops->post_req(host, mrq, err);
516 mmc_host_clk_release(host);
521 * mmc_start_req - start a non-blocking request
522 * @host: MMC host to start command
523 * @areq: async request to start
524 * @error: out parameter returns 0 for success, otherwise non zero
526 * Start a new MMC custom command request for a host.
527 * If there is on ongoing async request wait for completion
528 * of that request and start the new one and return.
529 * Does not wait for the new request to complete.
531 * Returns the completed request, NULL in case of none completed.
532 * Wait for the an ongoing request (previoulsy started) to complete and
533 * return the completed request. If there is no ongoing request, NULL
534 * is returned without waiting. NULL is not an error condition.
536 struct mmc_async_req *mmc_start_req(struct mmc_host *host,
537 struct mmc_async_req *areq, int *error)
541 struct mmc_async_req *data = host->areq;
543 /* Prepare a new request */
545 mmc_pre_req(host, areq->mrq, !host->areq);
548 err = mmc_wait_for_data_req_done(host, host->areq->mrq, areq);
549 if (err == MMC_BLK_NEW_REQUEST) {
553 * The previous request was not completed,
559 * Check BKOPS urgency for each R1 response
561 if (host->card && mmc_card_mmc(host->card) &&
562 ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
563 (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
564 (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) {
566 /* Cancel the prepared request */
568 mmc_post_req(host, areq->mrq, -EINVAL);
570 mmc_start_bkops(host->card, true);
572 /* prepare the request again */
574 mmc_pre_req(host, areq->mrq, !host->areq);
579 start_err = __mmc_start_data_req(host, areq->mrq);
582 mmc_post_req(host, host->areq->mrq, 0);
584 /* Cancel a prepared request if it was not started. */
585 if ((err || start_err) && areq)
586 mmc_post_req(host, areq->mrq, -EINVAL);
597 EXPORT_SYMBOL(mmc_start_req);
600 * mmc_wait_for_req - start a request and wait for completion
601 * @host: MMC host to start command
602 * @mrq: MMC request to start
604 * Start a new MMC custom command request for a host, and wait
605 * for the command to complete. Does not attempt to parse the
608 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
610 __mmc_start_req(host, mrq);
611 mmc_wait_for_req_done(host, mrq);
613 EXPORT_SYMBOL(mmc_wait_for_req);
616 * mmc_interrupt_hpi - Issue for High priority Interrupt
617 * @card: the MMC card associated with the HPI transfer
619 * Issued High Priority Interrupt, and check for card status
620 * until out-of prg-state.
622 int mmc_interrupt_hpi(struct mmc_card *card)
626 unsigned long prg_wait;
630 if (!card->ext_csd.hpi_en) {
631 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
635 mmc_claim_host(card->host);
636 err = mmc_send_status(card, &status);
638 pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
642 switch (R1_CURRENT_STATE(status)) {
648 * In idle and transfer states, HPI is not needed and the caller
649 * can issue the next intended command immediately
655 /* In all other states, it's illegal to issue HPI */
656 pr_debug("%s: HPI cannot be sent. Card state=%d\n",
657 mmc_hostname(card->host), R1_CURRENT_STATE(status));
662 err = mmc_send_hpi_cmd(card, &status);
666 prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
668 err = mmc_send_status(card, &status);
670 if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
672 if (time_after(jiffies, prg_wait))
677 mmc_release_host(card->host);
680 EXPORT_SYMBOL(mmc_interrupt_hpi);
683 * mmc_wait_for_cmd - start a command and wait for completion
684 * @host: MMC host to start command
685 * @cmd: MMC command to start
686 * @retries: maximum number of retries
688 * Start a new MMC command for a host, and wait for the command
689 * to complete. Return any error that occurred while the command
690 * was executing. Do not attempt to parse the response.
692 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
694 struct mmc_request mrq = {NULL};
696 WARN_ON(!host->claimed);
698 memset(cmd->resp, 0, sizeof(cmd->resp));
699 cmd->retries = retries;
704 mmc_wait_for_req(host, &mrq);
709 EXPORT_SYMBOL(mmc_wait_for_cmd);
712 * mmc_stop_bkops - stop ongoing BKOPS
713 * @card: MMC card to check BKOPS
715 * Send HPI command to stop ongoing background operations to
716 * allow rapid servicing of foreground operations, e.g. read/
717 * writes. Wait until the card comes out of the programming state
718 * to avoid errors in servicing read/write requests.
720 int mmc_stop_bkops(struct mmc_card *card)
725 err = mmc_interrupt_hpi(card);
728 * If err is EINVAL, we can't issue an HPI.
729 * It should complete the BKOPS.
731 if (!err || (err == -EINVAL)) {
732 mmc_card_clr_doing_bkops(card);
738 EXPORT_SYMBOL(mmc_stop_bkops);
740 int mmc_read_bkops_status(struct mmc_card *card)
745 mmc_claim_host(card->host);
746 err = mmc_get_ext_csd(card, &ext_csd);
747 mmc_release_host(card->host);
751 card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
752 card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
756 EXPORT_SYMBOL(mmc_read_bkops_status);
759 * mmc_set_data_timeout - set the timeout for a data command
760 * @data: data phase for command
761 * @card: the MMC card associated with the data transfer
763 * Computes the data timeout parameters according to the
764 * correct algorithm given the card type.
766 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
771 * SDIO cards only define an upper 1 s limit on access.
773 if (mmc_card_sdio(card)) {
774 data->timeout_ns = 1000000000;
775 data->timeout_clks = 0;
780 * SD cards use a 100 multiplier rather than 10
782 mult = mmc_card_sd(card) ? 100 : 10;
785 * Scale up the multiplier (and therefore the timeout) by
786 * the r2w factor for writes.
788 if (data->flags & MMC_DATA_WRITE)
789 mult <<= card->csd.r2w_factor;
791 data->timeout_ns = card->csd.tacc_ns * mult;
792 data->timeout_clks = card->csd.tacc_clks * mult;
795 * SD cards also have an upper limit on the timeout.
797 if (mmc_card_sd(card)) {
798 unsigned int timeout_us, limit_us;
800 timeout_us = data->timeout_ns / 1000;
801 if (mmc_host_clk_rate(card->host))
802 timeout_us += data->timeout_clks * 1000 /
803 (mmc_host_clk_rate(card->host) / 1000);
805 if (data->flags & MMC_DATA_WRITE)
807 * The MMC spec "It is strongly recommended
808 * for hosts to implement more than 500ms
809 * timeout value even if the card indicates
810 * the 250ms maximum busy length." Even the
811 * previous value of 300ms is known to be
812 * insufficient for some cards.
819 * SDHC cards always use these fixed values.
821 if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
822 data->timeout_ns = limit_us * 1000;
823 data->timeout_clks = 0;
826 /* assign limit value if invalid */
828 data->timeout_ns = limit_us * 1000;
832 * Some cards require longer data read timeout than indicated in CSD.
833 * Address this by setting the read timeout to a "reasonably high"
834 * value. For the cards tested, 300ms has proven enough. If necessary,
835 * this value can be increased if other problematic cards require this.
837 if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
838 data->timeout_ns = 300000000;
839 data->timeout_clks = 0;
843 * Some cards need very high timeouts if driven in SPI mode.
844 * The worst observed timeout was 900ms after writing a
845 * continuous stream of data until the internal logic
848 if (mmc_host_is_spi(card->host)) {
849 if (data->flags & MMC_DATA_WRITE) {
850 if (data->timeout_ns < 1000000000)
851 data->timeout_ns = 1000000000; /* 1s */
853 if (data->timeout_ns < 100000000)
854 data->timeout_ns = 100000000; /* 100ms */
858 EXPORT_SYMBOL(mmc_set_data_timeout);
861 * mmc_align_data_size - pads a transfer size to a more optimal value
862 * @card: the MMC card associated with the data transfer
863 * @sz: original transfer size
865 * Pads the original data size with a number of extra bytes in
866 * order to avoid controller bugs and/or performance hits
867 * (e.g. some controllers revert to PIO for certain sizes).
869 * Returns the improved size, which might be unmodified.
871 * Note that this function is only relevant when issuing a
872 * single scatter gather entry.
874 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
877 * FIXME: We don't have a system for the controller to tell
878 * the core about its problems yet, so for now we just 32-bit
881 sz = ((sz + 3) / 4) * 4;
885 EXPORT_SYMBOL(mmc_align_data_size);
888 * __mmc_claim_host - exclusively claim a host
889 * @host: mmc host to claim
890 * @abort: whether or not the operation should be aborted
892 * Claim a host for a set of operations. If @abort is non null and
893 * dereference a non-zero value then this will return prematurely with
894 * that non-zero value without acquiring the lock. Returns zero
895 * with the lock held otherwise.
897 int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
899 DECLARE_WAITQUEUE(wait, current);
906 add_wait_queue(&host->wq, &wait);
907 spin_lock_irqsave(&host->lock, flags);
909 set_current_state(TASK_UNINTERRUPTIBLE);
910 stop = abort ? atomic_read(abort) : 0;
911 if (stop || !host->claimed || host->claimer == current)
913 spin_unlock_irqrestore(&host->lock, flags);
915 spin_lock_irqsave(&host->lock, flags);
917 set_current_state(TASK_RUNNING);
920 host->claimer = current;
921 host->claim_cnt += 1;
922 if (host->claim_cnt == 1)
926 spin_unlock_irqrestore(&host->lock, flags);
927 remove_wait_queue(&host->wq, &wait);
930 pm_runtime_get_sync(mmc_dev(host));
934 EXPORT_SYMBOL(__mmc_claim_host);
937 * mmc_release_host - release a host
938 * @host: mmc host to release
940 * Release a MMC host, allowing others to claim the host
941 * for their operations.
943 void mmc_release_host(struct mmc_host *host)
947 WARN_ON(!host->claimed);
949 spin_lock_irqsave(&host->lock, flags);
950 if (--host->claim_cnt) {
951 /* Release for nested claim */
952 spin_unlock_irqrestore(&host->lock, flags);
955 host->claimer = NULL;
956 spin_unlock_irqrestore(&host->lock, flags);
958 pm_runtime_mark_last_busy(mmc_dev(host));
959 pm_runtime_put_autosuspend(mmc_dev(host));
962 EXPORT_SYMBOL(mmc_release_host);
965 * This is a helper function, which fetches a runtime pm reference for the
966 * card device and also claims the host.
968 void mmc_get_card(struct mmc_card *card)
970 pm_runtime_get_sync(&card->dev);
971 mmc_claim_host(card->host);
973 EXPORT_SYMBOL(mmc_get_card);
976 * This is a helper function, which releases the host and drops the runtime
977 * pm reference for the card device.
979 void mmc_put_card(struct mmc_card *card)
981 mmc_release_host(card->host);
982 pm_runtime_mark_last_busy(&card->dev);
983 pm_runtime_put_autosuspend(&card->dev);
985 EXPORT_SYMBOL(mmc_put_card);
988 * Internal function that does the actual ios call to the host driver,
989 * optionally printing some debug output.
991 static inline void mmc_set_ios(struct mmc_host *host)
993 struct mmc_ios *ios = &host->ios;
995 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
996 "width %u timing %u\n",
997 mmc_hostname(host), ios->clock, ios->bus_mode,
998 ios->power_mode, ios->chip_select, ios->vdd,
999 ios->bus_width, ios->timing);
1002 mmc_set_ungated(host);
1003 host->ops->set_ios(host, ios);
1007 * Control chip select pin on a host.
1009 void mmc_set_chip_select(struct mmc_host *host, int mode)
1011 mmc_host_clk_hold(host);
1012 host->ios.chip_select = mode;
1014 mmc_host_clk_release(host);
1018 * Sets the host clock to the highest possible frequency that
1021 static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
1023 WARN_ON(hz && hz < host->f_min);
1025 if (hz > host->f_max)
1028 host->ios.clock = hz;
1032 void mmc_set_clock(struct mmc_host *host, unsigned int hz)
1034 mmc_host_clk_hold(host);
1035 __mmc_set_clock(host, hz);
1036 mmc_host_clk_release(host);
1039 #ifdef CONFIG_MMC_CLKGATE
1041 * This gates the clock by setting it to 0 Hz.
1043 void mmc_gate_clock(struct mmc_host *host)
1045 unsigned long flags;
1047 spin_lock_irqsave(&host->clk_lock, flags);
1048 host->clk_old = host->ios.clock;
1049 host->ios.clock = 0;
1050 host->clk_gated = true;
1051 spin_unlock_irqrestore(&host->clk_lock, flags);
1056 * This restores the clock from gating by using the cached
1059 void mmc_ungate_clock(struct mmc_host *host)
1062 * We should previously have gated the clock, so the clock shall
1063 * be 0 here! The clock may however be 0 during initialization,
1064 * when some request operations are performed before setting
1065 * the frequency. When ungate is requested in that situation
1066 * we just ignore the call.
1068 if (host->clk_old) {
1069 BUG_ON(host->ios.clock);
1070 /* This call will also set host->clk_gated to false */
1071 __mmc_set_clock(host, host->clk_old);
1075 void mmc_set_ungated(struct mmc_host *host)
1077 unsigned long flags;
1080 * We've been given a new frequency while the clock is gated,
1081 * so make sure we regard this as ungating it.
1083 spin_lock_irqsave(&host->clk_lock, flags);
1084 host->clk_gated = false;
1085 spin_unlock_irqrestore(&host->clk_lock, flags);
1089 void mmc_set_ungated(struct mmc_host *host)
1094 int mmc_execute_tuning(struct mmc_card *card)
1096 struct mmc_host *host = card->host;
1100 if (!host->ops->execute_tuning)
1103 if (mmc_card_mmc(card))
1104 opcode = MMC_SEND_TUNING_BLOCK_HS200;
1106 opcode = MMC_SEND_TUNING_BLOCK;
1108 mmc_host_clk_hold(host);
1109 err = host->ops->execute_tuning(host, opcode);
1110 mmc_host_clk_release(host);
1113 pr_err("%s: tuning execution failed\n", mmc_hostname(host));
1119 * Change the bus mode (open drain/push-pull) of a host.
1121 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
1123 mmc_host_clk_hold(host);
1124 host->ios.bus_mode = mode;
1126 mmc_host_clk_release(host);
1130 * Change data bus width of a host.
1132 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
1134 mmc_host_clk_hold(host);
1135 host->ios.bus_width = width;
1137 mmc_host_clk_release(host);
1141 * Set initial state after a power cycle or a hw_reset.
1143 void mmc_set_initial_state(struct mmc_host *host)
1145 if (mmc_host_is_spi(host))
1146 host->ios.chip_select = MMC_CS_HIGH;
1148 host->ios.chip_select = MMC_CS_DONTCARE;
1149 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1150 host->ios.bus_width = MMC_BUS_WIDTH_1;
1151 host->ios.timing = MMC_TIMING_LEGACY;
1157 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
1158 * @vdd: voltage (mV)
1159 * @low_bits: prefer low bits in boundary cases
1161 * This function returns the OCR bit number according to the provided @vdd
1162 * value. If conversion is not possible a negative errno value returned.
1164 * Depending on the @low_bits flag the function prefers low or high OCR bits
1165 * on boundary voltages. For example,
1166 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
1167 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
1169 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
1171 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
1173 const int max_bit = ilog2(MMC_VDD_35_36);
1176 if (vdd < 1650 || vdd > 3600)
1179 if (vdd >= 1650 && vdd <= 1950)
1180 return ilog2(MMC_VDD_165_195);
1185 /* Base 2000 mV, step 100 mV, bit's base 8. */
1186 bit = (vdd - 2000) / 100 + 8;
1193 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
1194 * @vdd_min: minimum voltage value (mV)
1195 * @vdd_max: maximum voltage value (mV)
1197 * This function returns the OCR mask bits according to the provided @vdd_min
1198 * and @vdd_max values. If conversion is not possible the function returns 0.
1200 * Notes wrt boundary cases:
1201 * This function sets the OCR bits for all boundary voltages, for example
1202 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
1203 * MMC_VDD_34_35 mask.
1205 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
1209 if (vdd_max < vdd_min)
1212 /* Prefer high bits for the boundary vdd_max values. */
1213 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
1217 /* Prefer low bits for the boundary vdd_min values. */
1218 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
1222 /* Fill the mask, from max bit to min bit. */
1223 while (vdd_max >= vdd_min)
1224 mask |= 1 << vdd_max--;
1228 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
1233 * mmc_of_parse_voltage - return mask of supported voltages
1234 * @np: The device node need to be parsed.
1235 * @mask: mask of voltages available for MMC/SD/SDIO
1237 * 1. Return zero on success.
1238 * 2. Return negative errno: voltage-range is invalid.
1240 int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
1242 const u32 *voltage_ranges;
1245 voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
1246 num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
1247 if (!voltage_ranges || !num_ranges) {
1248 pr_info("%s: voltage-ranges unspecified\n", np->full_name);
1252 for (i = 0; i < num_ranges; i++) {
1253 const int j = i * 2;
1256 ocr_mask = mmc_vddrange_to_ocrmask(
1257 be32_to_cpu(voltage_ranges[j]),
1258 be32_to_cpu(voltage_ranges[j + 1]));
1260 pr_err("%s: voltage-range #%d is invalid\n",
1269 EXPORT_SYMBOL(mmc_of_parse_voltage);
1271 #endif /* CONFIG_OF */
1273 static int mmc_of_get_func_num(struct device_node *node)
1278 ret = of_property_read_u32(node, "reg", ®);
1285 struct device_node *mmc_of_find_child_device(struct mmc_host *host,
1288 struct device_node *node;
1290 if (!host->parent || !host->parent->of_node)
1293 for_each_child_of_node(host->parent->of_node, node) {
1294 if (mmc_of_get_func_num(node) == func_num)
1301 #ifdef CONFIG_REGULATOR
1304 * mmc_regulator_get_ocrmask - return mask of supported voltages
1305 * @supply: regulator to use
1307 * This returns either a negative errno, or a mask of voltages that
1308 * can be provided to MMC/SD/SDIO devices using the specified voltage
1309 * regulator. This would normally be called before registering the
1312 int mmc_regulator_get_ocrmask(struct regulator *supply)
1320 count = regulator_count_voltages(supply);
1324 for (i = 0; i < count; i++) {
1325 vdd_uV = regulator_list_voltage(supply, i);
1329 vdd_mV = vdd_uV / 1000;
1330 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1334 vdd_uV = regulator_get_voltage(supply);
1338 vdd_mV = vdd_uV / 1000;
1339 result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1344 EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
1347 * mmc_regulator_set_ocr - set regulator to match host->ios voltage
1348 * @mmc: the host to regulate
1349 * @supply: regulator to use
1350 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
1352 * Returns zero on success, else negative errno.
1354 * MMC host drivers may use this to enable or disable a regulator using
1355 * a particular supply voltage. This would normally be called from the
1358 int mmc_regulator_set_ocr(struct mmc_host *mmc,
1359 struct regulator *supply,
1360 unsigned short vdd_bit)
1369 * REVISIT mmc_vddrange_to_ocrmask() may have set some
1370 * bits this regulator doesn't quite support ... don't
1371 * be too picky, most cards and regulators are OK with
1372 * a 0.1V range goof (it's a small error percentage).
1374 tmp = vdd_bit - ilog2(MMC_VDD_165_195);
1376 min_uV = 1650 * 1000;
1377 max_uV = 1950 * 1000;
1379 min_uV = 1900 * 1000 + tmp * 100 * 1000;
1380 max_uV = min_uV + 100 * 1000;
1383 result = regulator_set_voltage(supply, min_uV, max_uV);
1384 if (result == 0 && !mmc->regulator_enabled) {
1385 result = regulator_enable(supply);
1387 mmc->regulator_enabled = true;
1389 } else if (mmc->regulator_enabled) {
1390 result = regulator_disable(supply);
1392 mmc->regulator_enabled = false;
1396 dev_err(mmc_dev(mmc),
1397 "could not set regulator OCR (%d)\n", result);
1400 EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
1402 #endif /* CONFIG_REGULATOR */
1404 int mmc_regulator_get_supply(struct mmc_host *mmc)
1406 struct device *dev = mmc_dev(mmc);
1409 mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
1410 mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
1412 if (IS_ERR(mmc->supply.vmmc)) {
1413 if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
1414 return -EPROBE_DEFER;
1415 dev_info(dev, "No vmmc regulator found\n");
1417 ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
1419 mmc->ocr_avail = ret;
1421 dev_warn(dev, "Failed getting OCR mask: %d\n", ret);
1424 if (IS_ERR(mmc->supply.vqmmc)) {
1425 if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
1426 return -EPROBE_DEFER;
1427 dev_info(dev, "No vqmmc regulator found\n");
1432 EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
1435 * Mask off any voltages we don't support and select
1436 * the lowest voltage
1438 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1443 * Sanity check the voltages that the card claims to
1447 dev_warn(mmc_dev(host),
1448 "card claims to support voltages below defined range\n");
1452 ocr &= host->ocr_avail;
1454 dev_warn(mmc_dev(host), "no support for card's volts\n");
1458 if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
1461 mmc_power_cycle(host, ocr);
1465 if (bit != host->ios.vdd)
1466 dev_warn(mmc_dev(host), "exceeding card's volts\n");
1472 int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
1475 int old_signal_voltage = host->ios.signal_voltage;
1477 host->ios.signal_voltage = signal_voltage;
1478 if (host->ops->start_signal_voltage_switch) {
1479 mmc_host_clk_hold(host);
1480 err = host->ops->start_signal_voltage_switch(host, &host->ios);
1481 mmc_host_clk_release(host);
1485 host->ios.signal_voltage = old_signal_voltage;
1491 int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
1493 struct mmc_command cmd = {0};
1500 * Send CMD11 only if the request is to switch the card to
1503 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1504 return __mmc_set_signal_voltage(host, signal_voltage);
1507 * If we cannot switch voltages, return failure so the caller
1508 * can continue without UHS mode
1510 if (!host->ops->start_signal_voltage_switch)
1512 if (!host->ops->card_busy)
1513 pr_warn("%s: cannot verify signal voltage switch\n",
1514 mmc_hostname(host));
1516 mmc_host_clk_hold(host);
1518 cmd.opcode = SD_SWITCH_VOLTAGE;
1520 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1522 err = mmc_wait_for_cmd(host, &cmd, 0);
1526 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) {
1531 * The card should drive cmd and dat[0:3] low immediately
1532 * after the response of cmd11, but wait 1 ms to be sure
1535 if (host->ops->card_busy && !host->ops->card_busy(host)) {
1540 * During a signal voltage level switch, the clock must be gated
1541 * for 5 ms according to the SD spec
1543 clock = host->ios.clock;
1544 host->ios.clock = 0;
1547 if (__mmc_set_signal_voltage(host, signal_voltage)) {
1549 * Voltages may not have been switched, but we've already
1550 * sent CMD11, so a power cycle is required anyway
1556 /* Keep clock gated for at least 5 ms */
1558 host->ios.clock = clock;
1561 /* Wait for at least 1 ms according to spec */
1565 * Failure to switch is indicated by the card holding
1568 if (host->ops->card_busy && host->ops->card_busy(host))
1573 pr_debug("%s: Signal voltage switch failed, "
1574 "power cycling card\n", mmc_hostname(host));
1575 mmc_power_cycle(host, ocr);
1579 mmc_host_clk_release(host);
1585 * Select timing parameters for host.
1587 void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1589 mmc_host_clk_hold(host);
1590 host->ios.timing = timing;
1592 mmc_host_clk_release(host);
1596 * Select appropriate driver type for host.
1598 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1600 mmc_host_clk_hold(host);
1601 host->ios.drv_type = drv_type;
1603 mmc_host_clk_release(host);
1607 * Apply power to the MMC stack. This is a two-stage process.
1608 * First, we enable power to the card without the clock running.
1609 * We then wait a bit for the power to stabilise. Finally,
1610 * enable the bus drivers and clock to the card.
1612 * We must _NOT_ enable the clock prior to power stablising.
1614 * If a host does all the power sequencing itself, ignore the
1615 * initial MMC_POWER_UP stage.
1617 void mmc_power_up(struct mmc_host *host, u32 ocr)
1619 if (host->ios.power_mode == MMC_POWER_ON)
1622 mmc_host_clk_hold(host);
1624 mmc_pwrseq_pre_power_on(host);
1626 host->ios.vdd = fls(ocr) - 1;
1627 host->ios.power_mode = MMC_POWER_UP;
1628 /* Set initial state and call mmc_set_ios */
1629 mmc_set_initial_state(host);
1631 /* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
1632 if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330) == 0)
1633 dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
1634 else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180) == 0)
1635 dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
1636 else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120) == 0)
1637 dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
1640 * This delay should be sufficient to allow the power supply
1641 * to reach the minimum voltage.
1645 mmc_pwrseq_post_power_on(host);
1647 host->ios.clock = host->f_init;
1649 host->ios.power_mode = MMC_POWER_ON;
1653 * This delay must be at least 74 clock sizes, or 1 ms, or the
1654 * time required to reach a stable voltage.
1658 mmc_host_clk_release(host);
1661 void mmc_power_off(struct mmc_host *host)
1663 if (host->ios.power_mode == MMC_POWER_OFF)
1666 mmc_host_clk_hold(host);
1668 mmc_pwrseq_power_off(host);
1670 host->ios.clock = 0;
1673 host->ios.power_mode = MMC_POWER_OFF;
1674 /* Set initial state and call mmc_set_ios */
1675 mmc_set_initial_state(host);
1678 * Some configurations, such as the 802.11 SDIO card in the OLPC
1679 * XO-1.5, require a short delay after poweroff before the card
1680 * can be successfully turned on again.
1684 mmc_host_clk_release(host);
1687 void mmc_power_cycle(struct mmc_host *host, u32 ocr)
1689 mmc_power_off(host);
1690 /* Wait at least 1 ms according to SD spec */
1692 mmc_power_up(host, ocr);
1696 * Cleanup when the last reference to the bus operator is dropped.
1698 static void __mmc_release_bus(struct mmc_host *host)
1701 BUG_ON(host->bus_refs);
1702 BUG_ON(!host->bus_dead);
1704 host->bus_ops = NULL;
1708 * Increase reference count of bus operator
1710 static inline void mmc_bus_get(struct mmc_host *host)
1712 unsigned long flags;
1714 spin_lock_irqsave(&host->lock, flags);
1716 spin_unlock_irqrestore(&host->lock, flags);
1720 * Decrease reference count of bus operator and free it if
1721 * it is the last reference.
1723 static inline void mmc_bus_put(struct mmc_host *host)
1725 unsigned long flags;
1727 spin_lock_irqsave(&host->lock, flags);
1729 if ((host->bus_refs == 0) && host->bus_ops)
1730 __mmc_release_bus(host);
1731 spin_unlock_irqrestore(&host->lock, flags);
1735 * Assign a mmc bus handler to a host. Only one bus handler may control a
1736 * host at any given time.
1738 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1740 unsigned long flags;
1745 WARN_ON(!host->claimed);
1747 spin_lock_irqsave(&host->lock, flags);
1749 BUG_ON(host->bus_ops);
1750 BUG_ON(host->bus_refs);
1752 host->bus_ops = ops;
1756 spin_unlock_irqrestore(&host->lock, flags);
1760 * Remove the current bus handler from a host.
1762 void mmc_detach_bus(struct mmc_host *host)
1764 unsigned long flags;
1768 WARN_ON(!host->claimed);
1769 WARN_ON(!host->bus_ops);
1771 spin_lock_irqsave(&host->lock, flags);
1775 spin_unlock_irqrestore(&host->lock, flags);
1780 static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
1783 #ifdef CONFIG_MMC_DEBUG
1784 unsigned long flags;
1785 spin_lock_irqsave(&host->lock, flags);
1786 WARN_ON(host->removed);
1787 spin_unlock_irqrestore(&host->lock, flags);
1791 * If the device is configured as wakeup, we prevent a new sleep for
1792 * 5 s to give provision for user space to consume the event.
1794 if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) &&
1795 device_can_wakeup(mmc_dev(host)))
1796 pm_wakeup_event(mmc_dev(host), 5000);
1798 host->detect_change = 1;
1799 mmc_schedule_delayed_work(&host->detect, delay);
1803 * mmc_detect_change - process change of state on a MMC socket
1804 * @host: host which changed state.
1805 * @delay: optional delay to wait before detection (jiffies)
1807 * MMC drivers should call this when they detect a card has been
1808 * inserted or removed. The MMC layer will confirm that any
1809 * present card is still functional, and initialize any newly
1812 void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1814 _mmc_detect_change(host, delay, true);
1816 EXPORT_SYMBOL(mmc_detect_change);
1818 void mmc_init_erase(struct mmc_card *card)
1822 if (is_power_of_2(card->erase_size))
1823 card->erase_shift = ffs(card->erase_size) - 1;
1825 card->erase_shift = 0;
1828 * It is possible to erase an arbitrarily large area of an SD or MMC
1829 * card. That is not desirable because it can take a long time
1830 * (minutes) potentially delaying more important I/O, and also the
1831 * timeout calculations become increasingly hugely over-estimated.
1832 * Consequently, 'pref_erase' is defined as a guide to limit erases
1833 * to that size and alignment.
1835 * For SD cards that define Allocation Unit size, limit erases to one
1836 * Allocation Unit at a time. For MMC cards that define High Capacity
1837 * Erase Size, whether it is switched on or not, limit to that size.
1838 * Otherwise just have a stab at a good value. For modern cards it
1839 * will end up being 4MiB. Note that if the value is too small, it
1840 * can end up taking longer to erase.
1842 if (mmc_card_sd(card) && card->ssr.au) {
1843 card->pref_erase = card->ssr.au;
1844 card->erase_shift = ffs(card->ssr.au) - 1;
1845 } else if (card->ext_csd.hc_erase_size) {
1846 card->pref_erase = card->ext_csd.hc_erase_size;
1847 } else if (card->erase_size) {
1848 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
1850 card->pref_erase = 512 * 1024 / 512;
1852 card->pref_erase = 1024 * 1024 / 512;
1854 card->pref_erase = 2 * 1024 * 1024 / 512;
1856 card->pref_erase = 4 * 1024 * 1024 / 512;
1857 if (card->pref_erase < card->erase_size)
1858 card->pref_erase = card->erase_size;
1860 sz = card->pref_erase % card->erase_size;
1862 card->pref_erase += card->erase_size - sz;
1865 card->pref_erase = 0;
1868 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1869 unsigned int arg, unsigned int qty)
1871 unsigned int erase_timeout;
1873 if (arg == MMC_DISCARD_ARG ||
1874 (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
1875 erase_timeout = card->ext_csd.trim_timeout;
1876 } else if (card->ext_csd.erase_group_def & 1) {
1877 /* High Capacity Erase Group Size uses HC timeouts */
1878 if (arg == MMC_TRIM_ARG)
1879 erase_timeout = card->ext_csd.trim_timeout;
1881 erase_timeout = card->ext_csd.hc_erase_timeout;
1883 /* CSD Erase Group Size uses write timeout */
1884 unsigned int mult = (10 << card->csd.r2w_factor);
1885 unsigned int timeout_clks = card->csd.tacc_clks * mult;
1886 unsigned int timeout_us;
1888 /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
1889 if (card->csd.tacc_ns < 1000000)
1890 timeout_us = (card->csd.tacc_ns * mult) / 1000;
1892 timeout_us = (card->csd.tacc_ns / 1000) * mult;
1895 * ios.clock is only a target. The real clock rate might be
1896 * less but not that much less, so fudge it by multiplying by 2.
1899 timeout_us += (timeout_clks * 1000) /
1900 (mmc_host_clk_rate(card->host) / 1000);
1902 erase_timeout = timeout_us / 1000;
1905 * Theoretically, the calculation could underflow so round up
1906 * to 1ms in that case.
1912 /* Multiplier for secure operations */
1913 if (arg & MMC_SECURE_ARGS) {
1914 if (arg == MMC_SECURE_ERASE_ARG)
1915 erase_timeout *= card->ext_csd.sec_erase_mult;
1917 erase_timeout *= card->ext_csd.sec_trim_mult;
1920 erase_timeout *= qty;
1923 * Ensure at least a 1 second timeout for SPI as per
1924 * 'mmc_set_data_timeout()'
1926 if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
1927 erase_timeout = 1000;
1929 return erase_timeout;
1932 static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
1936 unsigned int erase_timeout;
1938 if (card->ssr.erase_timeout) {
1939 /* Erase timeout specified in SD Status Register (SSR) */
1940 erase_timeout = card->ssr.erase_timeout * qty +
1941 card->ssr.erase_offset;
1944 * Erase timeout not specified in SD Status Register (SSR) so
1945 * use 250ms per write block.
1947 erase_timeout = 250 * qty;
1950 /* Must not be less than 1 second */
1951 if (erase_timeout < 1000)
1952 erase_timeout = 1000;
1954 return erase_timeout;
1957 static unsigned int mmc_erase_timeout(struct mmc_card *card,
1961 if (mmc_card_sd(card))
1962 return mmc_sd_erase_timeout(card, arg, qty);
1964 return mmc_mmc_erase_timeout(card, arg, qty);
1967 static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1968 unsigned int to, unsigned int arg)
1970 struct mmc_command cmd = {0};
1971 unsigned int qty = 0;
1972 unsigned long timeout;
1976 * qty is used to calculate the erase timeout which depends on how many
1977 * erase groups (or allocation units in SD terminology) are affected.
1978 * We count erasing part of an erase group as one erase group.
1979 * For SD, the allocation units are always a power of 2. For MMC, the
1980 * erase group size is almost certainly also power of 2, but it does not
1981 * seem to insist on that in the JEDEC standard, so we fall back to
1982 * division in that case. SD may not specify an allocation unit size,
1983 * in which case the timeout is based on the number of write blocks.
1985 * Note that the timeout for secure trim 2 will only be correct if the
1986 * number of erase groups specified is the same as the total of all
1987 * preceding secure trim 1 commands. Since the power may have been
1988 * lost since the secure trim 1 commands occurred, it is generally
1989 * impossible to calculate the secure trim 2 timeout correctly.
1991 if (card->erase_shift)
1992 qty += ((to >> card->erase_shift) -
1993 (from >> card->erase_shift)) + 1;
1994 else if (mmc_card_sd(card))
1995 qty += to - from + 1;
1997 qty += ((to / card->erase_size) -
1998 (from / card->erase_size)) + 1;
2000 if (!mmc_card_blockaddr(card)) {
2005 if (mmc_card_sd(card))
2006 cmd.opcode = SD_ERASE_WR_BLK_START;
2008 cmd.opcode = MMC_ERASE_GROUP_START;
2010 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2011 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2013 pr_err("mmc_erase: group start error %d, "
2014 "status %#x\n", err, cmd.resp[0]);
2019 memset(&cmd, 0, sizeof(struct mmc_command));
2020 if (mmc_card_sd(card))
2021 cmd.opcode = SD_ERASE_WR_BLK_END;
2023 cmd.opcode = MMC_ERASE_GROUP_END;
2025 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2026 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2028 pr_err("mmc_erase: group end error %d, status %#x\n",
2034 memset(&cmd, 0, sizeof(struct mmc_command));
2035 cmd.opcode = MMC_ERASE;
2037 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
2038 cmd.busy_timeout = mmc_erase_timeout(card, arg, qty);
2039 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2041 pr_err("mmc_erase: erase error %d, status %#x\n",
2047 if (mmc_host_is_spi(card->host))
2050 timeout = jiffies + msecs_to_jiffies(MMC_CORE_TIMEOUT_MS);
2052 memset(&cmd, 0, sizeof(struct mmc_command));
2053 cmd.opcode = MMC_SEND_STATUS;
2054 cmd.arg = card->rca << 16;
2055 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
2056 /* Do not retry else we can't see errors */
2057 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2058 if (err || (cmd.resp[0] & 0xFDF92000)) {
2059 pr_err("error %d requesting status %#x\n",
2065 /* Timeout if the device never becomes ready for data and
2066 * never leaves the program state.
2068 if (time_after(jiffies, timeout)) {
2069 pr_err("%s: Card stuck in programming state! %s\n",
2070 mmc_hostname(card->host), __func__);
2075 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
2076 (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
2082 * mmc_erase - erase sectors.
2083 * @card: card to erase
2084 * @from: first sector to erase
2085 * @nr: number of sectors to erase
2086 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
2088 * Caller must claim host before calling this function.
2090 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
2093 unsigned int rem, to = from + nr;
2095 if (!(card->host->caps & MMC_CAP_ERASE) ||
2096 !(card->csd.cmdclass & CCC_ERASE))
2099 if (!card->erase_size)
2102 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
2105 if ((arg & MMC_SECURE_ARGS) &&
2106 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
2109 if ((arg & MMC_TRIM_ARGS) &&
2110 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
2113 if (arg == MMC_SECURE_ERASE_ARG) {
2114 if (from % card->erase_size || nr % card->erase_size)
2118 if (arg == MMC_ERASE_ARG) {
2119 rem = from % card->erase_size;
2121 rem = card->erase_size - rem;
2128 rem = nr % card->erase_size;
2141 /* 'from' and 'to' are inclusive */
2144 return mmc_do_erase(card, from, to, arg);
2146 EXPORT_SYMBOL(mmc_erase);
2148 int mmc_can_erase(struct mmc_card *card)
2150 if ((card->host->caps & MMC_CAP_ERASE) &&
2151 (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
2155 EXPORT_SYMBOL(mmc_can_erase);
2157 int mmc_can_trim(struct mmc_card *card)
2159 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
2163 EXPORT_SYMBOL(mmc_can_trim);
2165 int mmc_can_discard(struct mmc_card *card)
2168 * As there's no way to detect the discard support bit at v4.5
2169 * use the s/w feature support filed.
2171 if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
2175 EXPORT_SYMBOL(mmc_can_discard);
2177 int mmc_can_sanitize(struct mmc_card *card)
2179 if (!mmc_can_trim(card) && !mmc_can_erase(card))
2181 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
2185 EXPORT_SYMBOL(mmc_can_sanitize);
2187 int mmc_can_secure_erase_trim(struct mmc_card *card)
2189 if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
2190 !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
2194 EXPORT_SYMBOL(mmc_can_secure_erase_trim);
2196 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
2199 if (!card->erase_size)
2201 if (from % card->erase_size || nr % card->erase_size)
2205 EXPORT_SYMBOL(mmc_erase_group_aligned);
2207 static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
2210 struct mmc_host *host = card->host;
2211 unsigned int max_discard, x, y, qty = 0, max_qty, timeout;
2212 unsigned int last_timeout = 0;
2214 if (card->erase_shift)
2215 max_qty = UINT_MAX >> card->erase_shift;
2216 else if (mmc_card_sd(card))
2219 max_qty = UINT_MAX / card->erase_size;
2221 /* Find the largest qty with an OK timeout */
2224 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
2225 timeout = mmc_erase_timeout(card, arg, qty + x);
2226 if (timeout > host->max_busy_timeout)
2228 if (timeout < last_timeout)
2230 last_timeout = timeout;
2242 /* Convert qty to sectors */
2243 if (card->erase_shift)
2244 max_discard = --qty << card->erase_shift;
2245 else if (mmc_card_sd(card))
2248 max_discard = --qty * card->erase_size;
2253 unsigned int mmc_calc_max_discard(struct mmc_card *card)
2255 struct mmc_host *host = card->host;
2256 unsigned int max_discard, max_trim;
2258 if (!host->max_busy_timeout)
2262 * Without erase_group_def set, MMC erase timeout depends on clock
2263 * frequence which can change. In that case, the best choice is
2264 * just the preferred erase size.
2266 if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
2267 return card->pref_erase;
2269 max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
2270 if (mmc_can_trim(card)) {
2271 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
2272 if (max_trim < max_discard)
2273 max_discard = max_trim;
2274 } else if (max_discard < card->erase_size) {
2277 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
2278 mmc_hostname(host), max_discard, host->max_busy_timeout);
2281 EXPORT_SYMBOL(mmc_calc_max_discard);
2283 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
2285 struct mmc_command cmd = {0};
2287 if (mmc_card_blockaddr(card) || mmc_card_ddr52(card))
2290 cmd.opcode = MMC_SET_BLOCKLEN;
2292 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2293 return mmc_wait_for_cmd(card->host, &cmd, 5);
2295 EXPORT_SYMBOL(mmc_set_blocklen);
2297 int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
2300 struct mmc_command cmd = {0};
2302 cmd.opcode = MMC_SET_BLOCK_COUNT;
2303 cmd.arg = blockcount & 0x0000FFFF;
2306 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2307 return mmc_wait_for_cmd(card->host, &cmd, 5);
2309 EXPORT_SYMBOL(mmc_set_blockcount);
2311 static void mmc_hw_reset_for_init(struct mmc_host *host)
2313 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
2315 mmc_host_clk_hold(host);
2316 host->ops->hw_reset(host);
2317 mmc_host_clk_release(host);
2320 int mmc_hw_reset(struct mmc_host *host)
2328 if (!host->bus_ops || host->bus_dead || !host->bus_ops->reset) {
2333 ret = host->bus_ops->reset(host);
2336 pr_warn("%s: tried to reset card\n", mmc_hostname(host));
2340 EXPORT_SYMBOL(mmc_hw_reset);
2342 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
2344 host->f_init = freq;
2346 #ifdef CONFIG_MMC_DEBUG
2347 pr_info("%s: %s: trying to init card at %u Hz\n",
2348 mmc_hostname(host), __func__, host->f_init);
2350 mmc_power_up(host, host->ocr_avail);
2353 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
2354 * do a hardware reset if possible.
2356 mmc_hw_reset_for_init(host);
2359 * sdio_reset sends CMD52 to reset card. Since we do not know
2360 * if the card is being re-initialized, just send it. CMD52
2361 * should be ignored by SD/eMMC cards.
2366 mmc_send_if_cond(host, host->ocr_avail);
2368 /* Order's important: probe SDIO, then SD, then MMC */
2369 if (!mmc_attach_sdio(host))
2371 if (!mmc_attach_sd(host))
2373 if (!mmc_attach_mmc(host))
2376 mmc_power_off(host);
2380 int _mmc_detect_card_removed(struct mmc_host *host)
2384 if (host->caps & MMC_CAP_NONREMOVABLE)
2387 if (!host->card || mmc_card_removed(host->card))
2390 ret = host->bus_ops->alive(host);
2393 * Card detect status and alive check may be out of sync if card is
2394 * removed slowly, when card detect switch changes while card/slot
2395 * pads are still contacted in hardware (refer to "SD Card Mechanical
2396 * Addendum, Appendix C: Card Detection Switch"). So reschedule a
2397 * detect work 200ms later for this case.
2399 if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
2400 mmc_detect_change(host, msecs_to_jiffies(200));
2401 pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
2405 mmc_card_set_removed(host->card);
2406 pr_debug("%s: card remove detected\n", mmc_hostname(host));
2412 int mmc_detect_card_removed(struct mmc_host *host)
2414 struct mmc_card *card = host->card;
2417 WARN_ON(!host->claimed);
2422 ret = mmc_card_removed(card);
2424 * The card will be considered unchanged unless we have been asked to
2425 * detect a change or host requires polling to provide card detection.
2427 if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
2430 host->detect_change = 0;
2432 ret = _mmc_detect_card_removed(host);
2433 if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) {
2435 * Schedule a detect work as soon as possible to let a
2436 * rescan handle the card removal.
2438 cancel_delayed_work(&host->detect);
2439 _mmc_detect_change(host, 0, false);
2445 EXPORT_SYMBOL(mmc_detect_card_removed);
2447 void mmc_rescan(struct work_struct *work)
2449 struct mmc_host *host =
2450 container_of(work, struct mmc_host, detect.work);
2453 if (host->trigger_card_event && host->ops->card_event) {
2454 host->ops->card_event(host);
2455 host->trigger_card_event = false;
2458 if (host->rescan_disable)
2461 /* If there is a non-removable card registered, only scan once */
2462 if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered)
2464 host->rescan_entered = 1;
2469 * if there is a _removable_ card registered, check whether it is
2472 if (host->bus_ops && !host->bus_dead
2473 && !(host->caps & MMC_CAP_NONREMOVABLE))
2474 host->bus_ops->detect(host);
2476 host->detect_change = 0;
2479 * Let mmc_bus_put() free the bus/bus_ops if we've found that
2480 * the card is no longer present.
2485 /* if there still is a card present, stop here */
2486 if (host->bus_ops != NULL) {
2492 * Only we can add a new handler, so it's safe to
2493 * release the lock here.
2497 if (!(host->caps & MMC_CAP_NONREMOVABLE) && host->ops->get_cd &&
2498 host->ops->get_cd(host) == 0) {
2499 mmc_claim_host(host);
2500 mmc_power_off(host);
2501 mmc_release_host(host);
2505 mmc_claim_host(host);
2506 for (i = 0; i < ARRAY_SIZE(freqs); i++) {
2507 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
2509 if (freqs[i] <= host->f_min)
2512 mmc_release_host(host);
2515 if (host->caps & MMC_CAP_NEEDS_POLL)
2516 mmc_schedule_delayed_work(&host->detect, HZ);
2519 void mmc_start_host(struct mmc_host *host)
2521 host->f_init = max(freqs[0], host->f_min);
2522 host->rescan_disable = 0;
2523 host->ios.power_mode = MMC_POWER_UNDEFINED;
2524 if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)
2525 mmc_power_off(host);
2527 mmc_power_up(host, host->ocr_avail);
2528 mmc_gpiod_request_cd_irq(host);
2529 _mmc_detect_change(host, 0, false);
2532 void mmc_stop_host(struct mmc_host *host)
2534 #ifdef CONFIG_MMC_DEBUG
2535 unsigned long flags;
2536 spin_lock_irqsave(&host->lock, flags);
2538 spin_unlock_irqrestore(&host->lock, flags);
2540 if (host->slot.cd_irq >= 0)
2541 disable_irq(host->slot.cd_irq);
2543 host->rescan_disable = 1;
2544 cancel_delayed_work_sync(&host->detect);
2545 mmc_flush_scheduled_work();
2547 /* clear pm flags now and let card drivers set them as needed */
2551 if (host->bus_ops && !host->bus_dead) {
2552 /* Calling bus_ops->remove() with a claimed host can deadlock */
2553 host->bus_ops->remove(host);
2554 mmc_claim_host(host);
2555 mmc_detach_bus(host);
2556 mmc_power_off(host);
2557 mmc_release_host(host);
2565 mmc_power_off(host);
2568 int mmc_power_save_host(struct mmc_host *host)
2572 #ifdef CONFIG_MMC_DEBUG
2573 pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
2578 if (!host->bus_ops || host->bus_dead) {
2583 if (host->bus_ops->power_save)
2584 ret = host->bus_ops->power_save(host);
2588 mmc_power_off(host);
2592 EXPORT_SYMBOL(mmc_power_save_host);
2594 int mmc_power_restore_host(struct mmc_host *host)
2598 #ifdef CONFIG_MMC_DEBUG
2599 pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
2604 if (!host->bus_ops || host->bus_dead) {
2609 mmc_power_up(host, host->card->ocr);
2610 ret = host->bus_ops->power_restore(host);
2616 EXPORT_SYMBOL(mmc_power_restore_host);
2619 * Flush the cache to the non-volatile storage.
2621 int mmc_flush_cache(struct mmc_card *card)
2625 if (mmc_card_mmc(card) &&
2626 (card->ext_csd.cache_size > 0) &&
2627 (card->ext_csd.cache_ctrl & 1)) {
2628 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2629 EXT_CSD_FLUSH_CACHE, 1, 0);
2631 pr_err("%s: cache flush error %d\n",
2632 mmc_hostname(card->host), err);
2637 EXPORT_SYMBOL(mmc_flush_cache);
2641 /* Do the card removal on suspend if card is assumed removeable
2642 * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2645 int mmc_pm_notify(struct notifier_block *notify_block,
2646 unsigned long mode, void *unused)
2648 struct mmc_host *host = container_of(
2649 notify_block, struct mmc_host, pm_notify);
2650 unsigned long flags;
2654 case PM_HIBERNATION_PREPARE:
2655 case PM_SUSPEND_PREPARE:
2656 case PM_RESTORE_PREPARE:
2657 spin_lock_irqsave(&host->lock, flags);
2658 host->rescan_disable = 1;
2659 spin_unlock_irqrestore(&host->lock, flags);
2660 cancel_delayed_work_sync(&host->detect);
2665 /* Validate prerequisites for suspend */
2666 if (host->bus_ops->pre_suspend)
2667 err = host->bus_ops->pre_suspend(host);
2671 /* Calling bus_ops->remove() with a claimed host can deadlock */
2672 host->bus_ops->remove(host);
2673 mmc_claim_host(host);
2674 mmc_detach_bus(host);
2675 mmc_power_off(host);
2676 mmc_release_host(host);
2680 case PM_POST_SUSPEND:
2681 case PM_POST_HIBERNATION:
2682 case PM_POST_RESTORE:
2684 spin_lock_irqsave(&host->lock, flags);
2685 host->rescan_disable = 0;
2686 spin_unlock_irqrestore(&host->lock, flags);
2687 _mmc_detect_change(host, 0, false);
2696 * mmc_init_context_info() - init synchronization context
2699 * Init struct context_info needed to implement asynchronous
2700 * request mechanism, used by mmc core, host driver and mmc requests
2703 void mmc_init_context_info(struct mmc_host *host)
2705 spin_lock_init(&host->context_info.lock);
2706 host->context_info.is_new_req = false;
2707 host->context_info.is_done_rcv = false;
2708 host->context_info.is_waiting_last_req = false;
2709 init_waitqueue_head(&host->context_info.wait);
2712 static int __init mmc_init(void)
2716 workqueue = alloc_ordered_workqueue("kmmcd", 0);
2720 ret = mmc_register_bus();
2722 goto destroy_workqueue;
2724 ret = mmc_register_host_class();
2726 goto unregister_bus;
2728 ret = sdio_register_bus();
2730 goto unregister_host_class;
2734 unregister_host_class:
2735 mmc_unregister_host_class();
2737 mmc_unregister_bus();
2739 destroy_workqueue(workqueue);
2744 static void __exit mmc_exit(void)
2746 sdio_unregister_bus();
2747 mmc_unregister_host_class();
2748 mmc_unregister_bus();
2749 destroy_workqueue(workqueue);
2752 subsys_initcall(mmc_init);
2753 module_exit(mmc_exit);
2755 MODULE_LICENSE("GPL");