1 // SPDX-License-Identifier: GPL-2.0+
3 * Driver for Realtek PCI-Express card reader
5 * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
8 * Wei WANG (wei_wang@realsil.com.cn)
9 * Micky Ching (micky_ching@realsil.com.cn)
12 #include <linux/blkdev.h>
13 #include <linux/kthread.h>
14 #include <linux/sched.h>
18 /***********************************************************************
19 * Scatter-gather transfer buffer access routines
20 ***********************************************************************/
23 * Copy a buffer of length buflen to/from the srb's transfer buffer.
24 * (Note: for scatter-gather transfers (srb->use_sg > 0), srb->request_buffer
25 * points to a list of s-g entries and we ignore srb->request_bufflen.
26 * For non-scatter-gather transfers, srb->request_buffer points to the
27 * transfer buffer itself and srb->request_bufflen is the buffer's length.)
28 * Update the *index and *offset variables so that the next copy will
29 * pick up from where this one left off.
32 unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer,
34 struct scsi_cmnd *srb,
37 enum xfer_buf_dir dir)
41 /* If not using scatter-gather, just transfer the data directly. */
42 if (scsi_sg_count(srb) == 0) {
43 unsigned char *sgbuffer;
45 if (*offset >= scsi_bufflen(srb))
47 cnt = min(buflen, scsi_bufflen(srb) - *offset);
49 sgbuffer = (unsigned char *)scsi_sglist(srb) + *offset;
51 if (dir == TO_XFER_BUF)
52 memcpy(sgbuffer, buffer, cnt);
54 memcpy(buffer, sgbuffer, cnt);
58 * Using scatter-gather. We have to go through the list one entry
59 * at a time. Each s-g entry contains some number of pages, and
60 * each page has to be kmap()'ed separately.
63 struct scatterlist *sg =
64 (struct scatterlist *)scsi_sglist(srb)
68 * This loop handles a single s-g list entry, which may
69 * include multiple pages. Find the initial page structure
70 * and the starting offset within the page, and update
71 * the *offset and *index values for the next loop.
74 while (cnt < buflen && *index < scsi_sg_count(srb)) {
75 struct page *page = sg_page(sg) +
76 ((sg->offset + *offset) >> PAGE_SHIFT);
77 unsigned int poff = (sg->offset + *offset) &
79 unsigned int sglen = sg->length - *offset;
81 if (sglen > buflen - cnt) {
82 /* Transfer ends within this s-g entry */
86 /* Transfer continues to next s-g entry */
93 unsigned int plen = min(sglen, (unsigned int)
95 unsigned char *ptr = kmap(page);
97 if (dir == TO_XFER_BUF)
98 memcpy(ptr + poff, buffer + cnt, plen);
100 memcpy(buffer + cnt, ptr + poff, plen);
103 /* Start at the beginning of the next page */
112 /* Return the amount actually transferred */
117 * Store the contents of buffer into srb's transfer buffer and set the
120 void rtsx_stor_set_xfer_buf(unsigned char *buffer,
121 unsigned int buflen, struct scsi_cmnd *srb)
123 unsigned int index = 0, offset = 0;
125 rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
127 if (buflen < scsi_bufflen(srb))
128 scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
131 void rtsx_stor_get_xfer_buf(unsigned char *buffer,
132 unsigned int buflen, struct scsi_cmnd *srb)
134 unsigned int index = 0, offset = 0;
136 rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
138 if (buflen < scsi_bufflen(srb))
139 scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
142 /***********************************************************************
144 ***********************************************************************/
147 * Invoke the transport and basic error-handling/recovery methods
149 * This is used to send the message to the device and receive the response.
151 void rtsx_invoke_transport(struct scsi_cmnd *srb, struct rtsx_chip *chip)
155 result = rtsx_scsi_handler(srb, chip);
158 * if the command gets aborted by the higher layers, we need to
159 * short-circuit all other processing.
161 if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
162 dev_dbg(rtsx_dev(chip), "-- command was aborted\n");
163 srb->result = DID_ABORT << 16;
167 /* if there is a transport error, reset and don't auto-sense */
168 if (result == TRANSPORT_ERROR) {
169 dev_dbg(rtsx_dev(chip), "-- transport indicates error, resetting\n");
170 srb->result = DID_ERROR << 16;
174 srb->result = SAM_STAT_GOOD;
177 * If we have a failure, we're going to do a REQUEST_SENSE
178 * automatically. Note that we differentiate between a command
179 * "failure" and an "error" in the transport mechanism.
181 if (result == TRANSPORT_FAILED) {
182 /* set the result so the higher layers expect this data */
183 srb->result = SAM_STAT_CHECK_CONDITION;
184 memcpy(srb->sense_buffer,
185 (unsigned char *)&chip->sense_buffer[SCSI_LUN(srb)],
186 sizeof(struct sense_data_t));
195 void rtsx_add_cmd(struct rtsx_chip *chip,
196 u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
198 __le32 *cb = (__le32 *)(chip->host_cmds_ptr);
201 val |= (u32)(cmd_type & 0x03) << 30;
202 val |= (u32)(reg_addr & 0x3FFF) << 16;
203 val |= (u32)mask << 8;
206 spin_lock_irq(&chip->rtsx->reg_lock);
207 if (chip->ci < (HOST_CMDS_BUF_LEN / 4))
208 cb[(chip->ci)++] = cpu_to_le32(val);
210 spin_unlock_irq(&chip->rtsx->reg_lock);
213 void rtsx_send_cmd_no_wait(struct rtsx_chip *chip)
217 rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
219 val |= (u32)(chip->ci * 4) & 0x00FFFFFF;
220 /* Hardware Auto Response */
222 rtsx_writel(chip, RTSX_HCBCTLR, val);
225 int rtsx_send_cmd(struct rtsx_chip *chip, u8 card, int timeout)
227 struct rtsx_dev *rtsx = chip->rtsx;
228 struct completion trans_done;
234 rtsx->check_card_cd = SD_EXIST;
235 else if (card == MS_CARD)
236 rtsx->check_card_cd = MS_EXIST;
237 else if (card == XD_CARD)
238 rtsx->check_card_cd = XD_EXIST;
240 rtsx->check_card_cd = 0;
242 spin_lock_irq(&rtsx->reg_lock);
244 /* set up data structures for the wakeup system */
245 rtsx->done = &trans_done;
246 rtsx->trans_result = TRANS_NOT_READY;
247 init_completion(&trans_done);
248 rtsx->trans_state = STATE_TRANS_CMD;
250 rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
252 val |= (u32)(chip->ci * 4) & 0x00FFFFFF;
253 /* Hardware Auto Response */
255 rtsx_writel(chip, RTSX_HCBCTLR, val);
257 spin_unlock_irq(&rtsx->reg_lock);
259 /* Wait for TRANS_OK_INT */
260 timeleft = wait_for_completion_interruptible_timeout(
261 &trans_done, msecs_to_jiffies(timeout));
263 dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
266 goto finish_send_cmd;
269 spin_lock_irq(&rtsx->reg_lock);
270 if (rtsx->trans_result == TRANS_RESULT_FAIL)
272 else if (rtsx->trans_result == TRANS_RESULT_OK)
275 spin_unlock_irq(&rtsx->reg_lock);
279 rtsx->trans_state = STATE_TRANS_NONE;
282 rtsx_stop_cmd(chip, card);
287 static inline void rtsx_add_sg_tbl(
288 struct rtsx_chip *chip, u32 addr, u32 len, u8 option)
290 __le64 *sgb = (__le64 *)(chip->host_sg_tbl_ptr);
298 temp_opt = option & (~RTSX_SG_END);
303 val = ((u64)addr << 32) | ((u64)temp_len << 12) | temp_opt;
305 if (chip->sgi < (HOST_SG_TBL_BUF_LEN / 8))
306 sgb[(chip->sgi)++] = cpu_to_le64(val);
313 static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
314 struct scatterlist *sg, int num_sg,
316 unsigned int *offset, int size,
317 enum dma_data_direction dma_dir,
320 struct rtsx_dev *rtsx = chip->rtsx;
321 struct completion trans_done;
323 int sg_cnt, i, resid;
326 struct scatterlist *sg_ptr;
329 if (!sg || (num_sg <= 0) || !offset || !index)
332 if (dma_dir == DMA_TO_DEVICE)
333 dir = HOST_TO_DEVICE;
334 else if (dma_dir == DMA_FROM_DEVICE)
335 dir = DEVICE_TO_HOST;
340 rtsx->check_card_cd = SD_EXIST;
341 else if (card == MS_CARD)
342 rtsx->check_card_cd = MS_EXIST;
343 else if (card == XD_CARD)
344 rtsx->check_card_cd = XD_EXIST;
346 rtsx->check_card_cd = 0;
348 spin_lock_irq(&rtsx->reg_lock);
350 /* set up data structures for the wakeup system */
351 rtsx->done = &trans_done;
353 rtsx->trans_state = STATE_TRANS_SG;
354 rtsx->trans_result = TRANS_NOT_READY;
356 spin_unlock_irq(&rtsx->reg_lock);
358 sg_cnt = dma_map_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
364 * Usually the next entry will be @sg@ + 1, but if this sg element
365 * is part of a chained scatterlist, it could jump to the start of
366 * a new scatterlist array. So here we use sg_next to move to
369 for (i = 0; i < *index; i++)
370 sg_ptr = sg_next(sg_ptr);
371 for (i = *index; i < sg_cnt; i++) {
376 addr = sg_dma_address(sg_ptr);
377 len = sg_dma_len(sg_ptr);
379 dev_dbg(rtsx_dev(chip), "DMA addr: 0x%x, Len: 0x%x\n",
380 (unsigned int)addr, len);
381 dev_dbg(rtsx_dev(chip), "*index = %d, *offset = %d\n",
386 if ((len - *offset) > resid) {
391 resid -= (len - *offset);
396 if ((i == (sg_cnt - 1)) || !resid)
397 option = RTSX_SG_VALID | RTSX_SG_END | RTSX_SG_TRANS_DATA;
399 option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
401 rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
406 sg_ptr = sg_next(sg_ptr);
409 dev_dbg(rtsx_dev(chip), "SG table count = %d\n", chip->sgi);
411 val |= (u32)(dir & 0x01) << 29;
414 spin_lock_irq(&rtsx->reg_lock);
416 init_completion(&trans_done);
418 rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr);
419 rtsx_writel(chip, RTSX_HDBCTLR, val);
421 spin_unlock_irq(&rtsx->reg_lock);
423 timeleft = wait_for_completion_interruptible_timeout(
424 &trans_done, msecs_to_jiffies(timeout));
426 dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
428 dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
434 spin_lock_irq(&rtsx->reg_lock);
435 if (rtsx->trans_result == TRANS_RESULT_FAIL) {
437 spin_unlock_irq(&rtsx->reg_lock);
440 spin_unlock_irq(&rtsx->reg_lock);
442 /* Wait for TRANS_OK_INT */
443 spin_lock_irq(&rtsx->reg_lock);
444 if (rtsx->trans_result == TRANS_NOT_READY) {
445 init_completion(&trans_done);
446 spin_unlock_irq(&rtsx->reg_lock);
447 timeleft = wait_for_completion_interruptible_timeout(
448 &trans_done, msecs_to_jiffies(timeout));
450 dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
452 dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
458 spin_unlock_irq(&rtsx->reg_lock);
461 spin_lock_irq(&rtsx->reg_lock);
462 if (rtsx->trans_result == TRANS_RESULT_FAIL)
464 else if (rtsx->trans_result == TRANS_RESULT_OK)
467 spin_unlock_irq(&rtsx->reg_lock);
471 rtsx->trans_state = STATE_TRANS_NONE;
472 dma_unmap_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
475 rtsx_stop_cmd(chip, card);
480 static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card,
481 struct scatterlist *sg, int num_sg,
482 enum dma_data_direction dma_dir,
485 struct rtsx_dev *rtsx = chip->rtsx;
486 struct completion trans_done;
491 struct scatterlist *sg_ptr;
493 if (!sg || (num_sg <= 0))
496 if (dma_dir == DMA_TO_DEVICE)
497 dir = HOST_TO_DEVICE;
498 else if (dma_dir == DMA_FROM_DEVICE)
499 dir = DEVICE_TO_HOST;
504 rtsx->check_card_cd = SD_EXIST;
505 else if (card == MS_CARD)
506 rtsx->check_card_cd = MS_EXIST;
507 else if (card == XD_CARD)
508 rtsx->check_card_cd = XD_EXIST;
510 rtsx->check_card_cd = 0;
512 spin_lock_irq(&rtsx->reg_lock);
514 /* set up data structures for the wakeup system */
515 rtsx->done = &trans_done;
517 rtsx->trans_state = STATE_TRANS_SG;
518 rtsx->trans_result = TRANS_NOT_READY;
520 spin_unlock_irq(&rtsx->reg_lock);
522 buf_cnt = dma_map_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
526 for (i = 0; i <= buf_cnt / (HOST_SG_TBL_BUF_LEN / 8); i++) {
530 if (i == buf_cnt / (HOST_SG_TBL_BUF_LEN / 8))
531 sg_cnt = buf_cnt % (HOST_SG_TBL_BUF_LEN / 8);
533 sg_cnt = HOST_SG_TBL_BUF_LEN / 8;
536 for (j = 0; j < sg_cnt; j++) {
537 dma_addr_t addr = sg_dma_address(sg_ptr);
538 unsigned int len = sg_dma_len(sg_ptr);
541 dev_dbg(rtsx_dev(chip), "DMA addr: 0x%x, Len: 0x%x\n",
542 (unsigned int)addr, len);
544 if (j == (sg_cnt - 1))
545 option = RTSX_SG_VALID | RTSX_SG_END | RTSX_SG_TRANS_DATA;
547 option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
549 rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
551 sg_ptr = sg_next(sg_ptr);
554 dev_dbg(rtsx_dev(chip), "SG table count = %d\n", chip->sgi);
556 val |= (u32)(dir & 0x01) << 29;
559 spin_lock_irq(&rtsx->reg_lock);
561 init_completion(&trans_done);
563 rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr);
564 rtsx_writel(chip, RTSX_HDBCTLR, val);
566 spin_unlock_irq(&rtsx->reg_lock);
568 timeleft = wait_for_completion_interruptible_timeout(
569 &trans_done, msecs_to_jiffies(timeout));
571 dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
573 dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
579 spin_lock_irq(&rtsx->reg_lock);
580 if (rtsx->trans_result == TRANS_RESULT_FAIL) {
582 spin_unlock_irq(&rtsx->reg_lock);
585 spin_unlock_irq(&rtsx->reg_lock);
590 /* Wait for TRANS_OK_INT */
591 spin_lock_irq(&rtsx->reg_lock);
592 if (rtsx->trans_result == TRANS_NOT_READY) {
593 init_completion(&trans_done);
594 spin_unlock_irq(&rtsx->reg_lock);
595 timeleft = wait_for_completion_interruptible_timeout(
596 &trans_done, msecs_to_jiffies(timeout));
598 dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
600 dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
606 spin_unlock_irq(&rtsx->reg_lock);
609 spin_lock_irq(&rtsx->reg_lock);
610 if (rtsx->trans_result == TRANS_RESULT_FAIL)
612 else if (rtsx->trans_result == TRANS_RESULT_OK)
615 spin_unlock_irq(&rtsx->reg_lock);
619 rtsx->trans_state = STATE_TRANS_NONE;
620 dma_unmap_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
623 rtsx_stop_cmd(chip, card);
628 static int rtsx_transfer_buf(struct rtsx_chip *chip, u8 card, void *buf,
629 size_t len, enum dma_data_direction dma_dir,
632 struct rtsx_dev *rtsx = chip->rtsx;
633 struct completion trans_done;
640 if (!buf || (len <= 0))
643 if (dma_dir == DMA_TO_DEVICE)
644 dir = HOST_TO_DEVICE;
645 else if (dma_dir == DMA_FROM_DEVICE)
646 dir = DEVICE_TO_HOST;
650 addr = dma_map_single(&rtsx->pci->dev, buf, len, dma_dir);
651 if (dma_mapping_error(&rtsx->pci->dev, addr))
655 rtsx->check_card_cd = SD_EXIST;
656 else if (card == MS_CARD)
657 rtsx->check_card_cd = MS_EXIST;
658 else if (card == XD_CARD)
659 rtsx->check_card_cd = XD_EXIST;
661 rtsx->check_card_cd = 0;
663 val |= (u32)(dir & 0x01) << 29;
664 val |= (u32)(len & 0x00FFFFFF);
666 spin_lock_irq(&rtsx->reg_lock);
668 /* set up data structures for the wakeup system */
669 rtsx->done = &trans_done;
671 init_completion(&trans_done);
673 rtsx->trans_state = STATE_TRANS_BUF;
674 rtsx->trans_result = TRANS_NOT_READY;
676 rtsx_writel(chip, RTSX_HDBAR, addr);
677 rtsx_writel(chip, RTSX_HDBCTLR, val);
679 spin_unlock_irq(&rtsx->reg_lock);
681 /* Wait for TRANS_OK_INT */
682 timeleft = wait_for_completion_interruptible_timeout(
683 &trans_done, msecs_to_jiffies(timeout));
685 dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
687 dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
693 spin_lock_irq(&rtsx->reg_lock);
694 if (rtsx->trans_result == TRANS_RESULT_FAIL)
696 else if (rtsx->trans_result == TRANS_RESULT_OK)
699 spin_unlock_irq(&rtsx->reg_lock);
703 rtsx->trans_state = STATE_TRANS_NONE;
704 dma_unmap_single(&rtsx->pci->dev, addr, len, dma_dir);
707 rtsx_stop_cmd(chip, card);
712 int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card,
713 void *buf, size_t len, int use_sg,
714 unsigned int *index, unsigned int *offset,
715 enum dma_data_direction dma_dir, int timeout)
719 /* don't transfer data during abort processing */
720 if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
724 struct scatterlist *sg = buf;
726 err = rtsx_transfer_sglist_adma_partial(chip, card, sg, use_sg,
727 index, offset, (int)len,
730 err = rtsx_transfer_buf(chip, card,
731 buf, len, dma_dir, timeout);
734 if (RTSX_TST_DELINK(chip)) {
735 RTSX_CLR_DELINK(chip);
736 chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
737 rtsx_reinit_cards(chip, 1);
744 int rtsx_transfer_data(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
745 int use_sg, enum dma_data_direction dma_dir, int timeout)
749 dev_dbg(rtsx_dev(chip), "use_sg = %d\n", use_sg);
751 /* don't transfer data during abort processing */
752 if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
756 err = rtsx_transfer_sglist_adma(chip, card, buf,
757 use_sg, dma_dir, timeout);
759 err = rtsx_transfer_buf(chip, card, buf, len, dma_dir, timeout);
763 if (RTSX_TST_DELINK(chip)) {
764 RTSX_CLR_DELINK(chip);
765 chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
766 rtsx_reinit_cards(chip, 1);