Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / drivers / staging / rts5208 / rtsx_transport.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Driver for Realtek PCI-Express card reader
4  *
5  * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
6  *
7  * Author:
8  *   Wei WANG (wei_wang@realsil.com.cn)
9  *   Micky Ching (micky_ching@realsil.com.cn)
10  */
11
12 #include <linux/blkdev.h>
13 #include <linux/kthread.h>
14 #include <linux/sched.h>
15
16 #include "rtsx.h"
17
18 /***********************************************************************
19  * Scatter-gather transfer buffer access routines
20  ***********************************************************************/
21
22 /*
23  * Copy a buffer of length buflen to/from the srb's transfer buffer.
24  * (Note: for scatter-gather transfers (srb->use_sg > 0), srb->request_buffer
25  * points to a list of s-g entries and we ignore srb->request_bufflen.
26  * For non-scatter-gather transfers, srb->request_buffer points to the
27  * transfer buffer itself and srb->request_bufflen is the buffer's length.)
28  * Update the *index and *offset variables so that the next copy will
29  * pick up from where this one left off.
30  */
31
32 unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer,
33                                        unsigned int buflen,
34                                        struct scsi_cmnd *srb,
35                                        unsigned int *index,
36                                        unsigned int *offset,
37                                        enum xfer_buf_dir dir)
38 {
39         unsigned int cnt;
40
41         /* If not using scatter-gather, just transfer the data directly. */
42         if (scsi_sg_count(srb) == 0) {
43                 unsigned char *sgbuffer;
44
45                 if (*offset >= scsi_bufflen(srb))
46                         return 0;
47                 cnt = min(buflen, scsi_bufflen(srb) - *offset);
48
49                 sgbuffer = (unsigned char *)scsi_sglist(srb) + *offset;
50
51                 if (dir == TO_XFER_BUF)
52                         memcpy(sgbuffer, buffer, cnt);
53                 else
54                         memcpy(buffer, sgbuffer, cnt);
55                 *offset += cnt;
56
57         /*
58          * Using scatter-gather.  We have to go through the list one entry
59          * at a time.  Each s-g entry contains some number of pages, and
60          * each page has to be kmap()'ed separately.
61          */
62         } else {
63                 struct scatterlist *sg =
64                                 (struct scatterlist *)scsi_sglist(srb)
65                                 + *index;
66
67                 /*
68                  * This loop handles a single s-g list entry, which may
69                  * include multiple pages.  Find the initial page structure
70                  * and the starting offset within the page, and update
71                  * the *offset and *index values for the next loop.
72                  */
73                 cnt = 0;
74                 while (cnt < buflen && *index < scsi_sg_count(srb)) {
75                         struct page *page = sg_page(sg) +
76                                         ((sg->offset + *offset) >> PAGE_SHIFT);
77                         unsigned int poff = (sg->offset + *offset) &
78                                             (PAGE_SIZE - 1);
79                         unsigned int sglen = sg->length - *offset;
80
81                         if (sglen > buflen - cnt) {
82                                 /* Transfer ends within this s-g entry */
83                                 sglen = buflen - cnt;
84                                 *offset += sglen;
85                         } else {
86                                 /* Transfer continues to next s-g entry */
87                                 *offset = 0;
88                                 ++*index;
89                                 ++sg;
90                         }
91
92                         while (sglen > 0) {
93                                 unsigned int plen = min(sglen, (unsigned int)
94                                                 PAGE_SIZE - poff);
95                                 unsigned char *ptr = kmap(page);
96
97                                 if (dir == TO_XFER_BUF)
98                                         memcpy(ptr + poff, buffer + cnt, plen);
99                                 else
100                                         memcpy(buffer + cnt, ptr + poff, plen);
101                                 kunmap(page);
102
103                                 /* Start at the beginning of the next page */
104                                 poff = 0;
105                                 ++page;
106                                 cnt += plen;
107                                 sglen -= plen;
108                         }
109                 }
110         }
111
112         /* Return the amount actually transferred */
113         return cnt;
114 }
115
116 /*
117  * Store the contents of buffer into srb's transfer buffer and set the
118  * SCSI residue.
119  */
120 void rtsx_stor_set_xfer_buf(unsigned char *buffer,
121                             unsigned int buflen, struct scsi_cmnd *srb)
122 {
123         unsigned int index = 0, offset = 0;
124
125         rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
126                                   TO_XFER_BUF);
127         if (buflen < scsi_bufflen(srb))
128                 scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
129 }
130
131 void rtsx_stor_get_xfer_buf(unsigned char *buffer,
132                             unsigned int buflen, struct scsi_cmnd *srb)
133 {
134         unsigned int index = 0, offset = 0;
135
136         rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
137                                   FROM_XFER_BUF);
138         if (buflen < scsi_bufflen(srb))
139                 scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
140 }
141
142 /***********************************************************************
143  * Transport routines
144  ***********************************************************************/
145
146 /*
147  * Invoke the transport and basic error-handling/recovery methods
148  *
149  * This is used to send the message to the device and receive the response.
150  */
151 void rtsx_invoke_transport(struct scsi_cmnd *srb, struct rtsx_chip *chip)
152 {
153         int result;
154
155         result = rtsx_scsi_handler(srb, chip);
156
157         /*
158          * if the command gets aborted by the higher layers, we need to
159          * short-circuit all other processing.
160          */
161         if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
162                 dev_dbg(rtsx_dev(chip), "-- command was aborted\n");
163                 srb->result = DID_ABORT << 16;
164                 goto handle_errors;
165         }
166
167         /* if there is a transport error, reset and don't auto-sense */
168         if (result == TRANSPORT_ERROR) {
169                 dev_dbg(rtsx_dev(chip), "-- transport indicates error, resetting\n");
170                 srb->result = DID_ERROR << 16;
171                 goto handle_errors;
172         }
173
174         srb->result = SAM_STAT_GOOD;
175
176         /*
177          * If we have a failure, we're going to do a REQUEST_SENSE
178          * automatically.  Note that we differentiate between a command
179          * "failure" and an "error" in the transport mechanism.
180          */
181         if (result == TRANSPORT_FAILED) {
182                 /* set the result so the higher layers expect this data */
183                 srb->result = SAM_STAT_CHECK_CONDITION;
184                 memcpy(srb->sense_buffer,
185                        (unsigned char *)&chip->sense_buffer[SCSI_LUN(srb)],
186                        sizeof(struct sense_data_t));
187         }
188
189         return;
190
191 handle_errors:
192         return;
193 }
194
195 void rtsx_add_cmd(struct rtsx_chip *chip,
196                   u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
197 {
198         __le32 *cb = (__le32 *)(chip->host_cmds_ptr);
199         u32 val = 0;
200
201         val |= (u32)(cmd_type & 0x03) << 30;
202         val |= (u32)(reg_addr & 0x3FFF) << 16;
203         val |= (u32)mask << 8;
204         val |= (u32)data;
205
206         spin_lock_irq(&chip->rtsx->reg_lock);
207         if (chip->ci < (HOST_CMDS_BUF_LEN / 4))
208                 cb[(chip->ci)++] = cpu_to_le32(val);
209
210         spin_unlock_irq(&chip->rtsx->reg_lock);
211 }
212
213 void rtsx_send_cmd_no_wait(struct rtsx_chip *chip)
214 {
215         u32 val = BIT(31);
216
217         rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
218
219         val |= (u32)(chip->ci * 4) & 0x00FFFFFF;
220         /* Hardware Auto Response */
221         val |= 0x40000000;
222         rtsx_writel(chip, RTSX_HCBCTLR, val);
223 }
224
225 int rtsx_send_cmd(struct rtsx_chip *chip, u8 card, int timeout)
226 {
227         struct rtsx_dev *rtsx = chip->rtsx;
228         struct completion trans_done;
229         u32 val = BIT(31);
230         long timeleft;
231         int err = 0;
232
233         if (card == SD_CARD)
234                 rtsx->check_card_cd = SD_EXIST;
235         else if (card == MS_CARD)
236                 rtsx->check_card_cd = MS_EXIST;
237         else if (card == XD_CARD)
238                 rtsx->check_card_cd = XD_EXIST;
239         else
240                 rtsx->check_card_cd = 0;
241
242         spin_lock_irq(&rtsx->reg_lock);
243
244         /* set up data structures for the wakeup system */
245         rtsx->done = &trans_done;
246         rtsx->trans_result = TRANS_NOT_READY;
247         init_completion(&trans_done);
248         rtsx->trans_state = STATE_TRANS_CMD;
249
250         rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
251
252         val |= (u32)(chip->ci * 4) & 0x00FFFFFF;
253         /* Hardware Auto Response */
254         val |= 0x40000000;
255         rtsx_writel(chip, RTSX_HCBCTLR, val);
256
257         spin_unlock_irq(&rtsx->reg_lock);
258
259         /* Wait for TRANS_OK_INT */
260         timeleft = wait_for_completion_interruptible_timeout(
261                 &trans_done, msecs_to_jiffies(timeout));
262         if (timeleft <= 0) {
263                 dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
264                         chip->int_reg);
265                 err = -ETIMEDOUT;
266                 goto finish_send_cmd;
267         }
268
269         spin_lock_irq(&rtsx->reg_lock);
270         if (rtsx->trans_result == TRANS_RESULT_FAIL)
271                 err = -EIO;
272         else if (rtsx->trans_result == TRANS_RESULT_OK)
273                 err = 0;
274
275         spin_unlock_irq(&rtsx->reg_lock);
276
277 finish_send_cmd:
278         rtsx->done = NULL;
279         rtsx->trans_state = STATE_TRANS_NONE;
280
281         if (err < 0)
282                 rtsx_stop_cmd(chip, card);
283
284         return err;
285 }
286
287 static inline void rtsx_add_sg_tbl(
288         struct rtsx_chip *chip, u32 addr, u32 len, u8 option)
289 {
290         __le64 *sgb = (__le64 *)(chip->host_sg_tbl_ptr);
291         u64 val = 0;
292         u32 temp_len = 0;
293         u8  temp_opt = 0;
294
295         do {
296                 if (len > 0x80000) {
297                         temp_len = 0x80000;
298                         temp_opt = option & (~RTSX_SG_END);
299                 } else {
300                         temp_len = len;
301                         temp_opt = option;
302                 }
303                 val = ((u64)addr << 32) | ((u64)temp_len << 12) | temp_opt;
304
305                 if (chip->sgi < (HOST_SG_TBL_BUF_LEN / 8))
306                         sgb[(chip->sgi)++] = cpu_to_le64(val);
307
308                 len -= temp_len;
309                 addr += temp_len;
310         } while (len);
311 }
312
313 static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
314                                              struct scatterlist *sg, int num_sg,
315                                              unsigned int *index,
316                                              unsigned int *offset, int size,
317                                              enum dma_data_direction dma_dir,
318                                              int timeout)
319 {
320         struct rtsx_dev *rtsx = chip->rtsx;
321         struct completion trans_done;
322         u8 dir;
323         int sg_cnt, i, resid;
324         int err = 0;
325         long timeleft;
326         struct scatterlist *sg_ptr;
327         u32 val = TRIG_DMA;
328
329         if (!sg || (num_sg <= 0) || !offset || !index)
330                 return -EIO;
331
332         if (dma_dir == DMA_TO_DEVICE)
333                 dir = HOST_TO_DEVICE;
334         else if (dma_dir == DMA_FROM_DEVICE)
335                 dir = DEVICE_TO_HOST;
336         else
337                 return -ENXIO;
338
339         if (card == SD_CARD)
340                 rtsx->check_card_cd = SD_EXIST;
341         else if (card == MS_CARD)
342                 rtsx->check_card_cd = MS_EXIST;
343         else if (card == XD_CARD)
344                 rtsx->check_card_cd = XD_EXIST;
345         else
346                 rtsx->check_card_cd = 0;
347
348         spin_lock_irq(&rtsx->reg_lock);
349
350         /* set up data structures for the wakeup system */
351         rtsx->done = &trans_done;
352
353         rtsx->trans_state = STATE_TRANS_SG;
354         rtsx->trans_result = TRANS_NOT_READY;
355
356         spin_unlock_irq(&rtsx->reg_lock);
357
358         sg_cnt = dma_map_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
359
360         resid = size;
361         sg_ptr = sg;
362         chip->sgi = 0;
363         /*
364          * Usually the next entry will be @sg@ + 1, but if this sg element
365          * is part of a chained scatterlist, it could jump to the start of
366          * a new scatterlist array. So here we use sg_next to move to
367          * the proper sg.
368          */
369         for (i = 0; i < *index; i++)
370                 sg_ptr = sg_next(sg_ptr);
371         for (i = *index; i < sg_cnt; i++) {
372                 dma_addr_t addr;
373                 unsigned int len;
374                 u8 option;
375
376                 addr = sg_dma_address(sg_ptr);
377                 len = sg_dma_len(sg_ptr);
378
379                 dev_dbg(rtsx_dev(chip), "DMA addr: 0x%x, Len: 0x%x\n",
380                         (unsigned int)addr, len);
381                 dev_dbg(rtsx_dev(chip), "*index = %d, *offset = %d\n",
382                         *index, *offset);
383
384                 addr += *offset;
385
386                 if ((len - *offset) > resid) {
387                         *offset += resid;
388                         len = resid;
389                         resid = 0;
390                 } else {
391                         resid -= (len - *offset);
392                         len -= *offset;
393                         *offset = 0;
394                         *index = *index + 1;
395                 }
396                 if ((i == (sg_cnt - 1)) || !resid)
397                         option = RTSX_SG_VALID | RTSX_SG_END | RTSX_SG_TRANS_DATA;
398                 else
399                         option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
400
401                 rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
402
403                 if (!resid)
404                         break;
405
406                 sg_ptr = sg_next(sg_ptr);
407         }
408
409         dev_dbg(rtsx_dev(chip), "SG table count = %d\n", chip->sgi);
410
411         val |= (u32)(dir & 0x01) << 29;
412         val |= ADMA_MODE;
413
414         spin_lock_irq(&rtsx->reg_lock);
415
416         init_completion(&trans_done);
417
418         rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr);
419         rtsx_writel(chip, RTSX_HDBCTLR, val);
420
421         spin_unlock_irq(&rtsx->reg_lock);
422
423         timeleft = wait_for_completion_interruptible_timeout(
424                 &trans_done, msecs_to_jiffies(timeout));
425         if (timeleft <= 0) {
426                 dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
427                         __func__, __LINE__);
428                 dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
429                         chip->int_reg);
430                 err = -ETIMEDOUT;
431                 goto out;
432         }
433
434         spin_lock_irq(&rtsx->reg_lock);
435         if (rtsx->trans_result == TRANS_RESULT_FAIL) {
436                 err = -EIO;
437                 spin_unlock_irq(&rtsx->reg_lock);
438                 goto out;
439         }
440         spin_unlock_irq(&rtsx->reg_lock);
441
442         /* Wait for TRANS_OK_INT */
443         spin_lock_irq(&rtsx->reg_lock);
444         if (rtsx->trans_result == TRANS_NOT_READY) {
445                 init_completion(&trans_done);
446                 spin_unlock_irq(&rtsx->reg_lock);
447                 timeleft = wait_for_completion_interruptible_timeout(
448                         &trans_done, msecs_to_jiffies(timeout));
449                 if (timeleft <= 0) {
450                         dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
451                                 __func__, __LINE__);
452                         dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
453                                 chip->int_reg);
454                         err = -ETIMEDOUT;
455                         goto out;
456                 }
457         } else {
458                 spin_unlock_irq(&rtsx->reg_lock);
459         }
460
461         spin_lock_irq(&rtsx->reg_lock);
462         if (rtsx->trans_result == TRANS_RESULT_FAIL)
463                 err = -EIO;
464         else if (rtsx->trans_result == TRANS_RESULT_OK)
465                 err = 0;
466
467         spin_unlock_irq(&rtsx->reg_lock);
468
469 out:
470         rtsx->done = NULL;
471         rtsx->trans_state = STATE_TRANS_NONE;
472         dma_unmap_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
473
474         if (err < 0)
475                 rtsx_stop_cmd(chip, card);
476
477         return err;
478 }
479
480 static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card,
481                                      struct scatterlist *sg, int num_sg,
482                                      enum dma_data_direction dma_dir,
483                                      int timeout)
484 {
485         struct rtsx_dev *rtsx = chip->rtsx;
486         struct completion trans_done;
487         u8 dir;
488         int buf_cnt, i;
489         int err = 0;
490         long timeleft;
491         struct scatterlist *sg_ptr;
492
493         if (!sg || (num_sg <= 0))
494                 return -EIO;
495
496         if (dma_dir == DMA_TO_DEVICE)
497                 dir = HOST_TO_DEVICE;
498         else if (dma_dir == DMA_FROM_DEVICE)
499                 dir = DEVICE_TO_HOST;
500         else
501                 return -ENXIO;
502
503         if (card == SD_CARD)
504                 rtsx->check_card_cd = SD_EXIST;
505         else if (card == MS_CARD)
506                 rtsx->check_card_cd = MS_EXIST;
507         else if (card == XD_CARD)
508                 rtsx->check_card_cd = XD_EXIST;
509         else
510                 rtsx->check_card_cd = 0;
511
512         spin_lock_irq(&rtsx->reg_lock);
513
514         /* set up data structures for the wakeup system */
515         rtsx->done = &trans_done;
516
517         rtsx->trans_state = STATE_TRANS_SG;
518         rtsx->trans_result = TRANS_NOT_READY;
519
520         spin_unlock_irq(&rtsx->reg_lock);
521
522         buf_cnt = dma_map_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
523
524         sg_ptr = sg;
525
526         for (i = 0; i <= buf_cnt / (HOST_SG_TBL_BUF_LEN / 8); i++) {
527                 u32 val = TRIG_DMA;
528                 int sg_cnt, j;
529
530                 if (i == buf_cnt / (HOST_SG_TBL_BUF_LEN / 8))
531                         sg_cnt = buf_cnt % (HOST_SG_TBL_BUF_LEN / 8);
532                 else
533                         sg_cnt = HOST_SG_TBL_BUF_LEN / 8;
534
535                 chip->sgi = 0;
536                 for (j = 0; j < sg_cnt; j++) {
537                         dma_addr_t addr = sg_dma_address(sg_ptr);
538                         unsigned int len = sg_dma_len(sg_ptr);
539                         u8 option;
540
541                         dev_dbg(rtsx_dev(chip), "DMA addr: 0x%x, Len: 0x%x\n",
542                                 (unsigned int)addr, len);
543
544                         if (j == (sg_cnt - 1))
545                                 option = RTSX_SG_VALID | RTSX_SG_END | RTSX_SG_TRANS_DATA;
546                         else
547                                 option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
548
549                         rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
550
551                         sg_ptr = sg_next(sg_ptr);
552                 }
553
554                 dev_dbg(rtsx_dev(chip), "SG table count = %d\n", chip->sgi);
555
556                 val |= (u32)(dir & 0x01) << 29;
557                 val |= ADMA_MODE;
558
559                 spin_lock_irq(&rtsx->reg_lock);
560
561                 init_completion(&trans_done);
562
563                 rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr);
564                 rtsx_writel(chip, RTSX_HDBCTLR, val);
565
566                 spin_unlock_irq(&rtsx->reg_lock);
567
568                 timeleft = wait_for_completion_interruptible_timeout(
569                         &trans_done, msecs_to_jiffies(timeout));
570                 if (timeleft <= 0) {
571                         dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
572                                 __func__, __LINE__);
573                         dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
574                                 chip->int_reg);
575                         err = -ETIMEDOUT;
576                         goto out;
577                 }
578
579                 spin_lock_irq(&rtsx->reg_lock);
580                 if (rtsx->trans_result == TRANS_RESULT_FAIL) {
581                         err = -EIO;
582                         spin_unlock_irq(&rtsx->reg_lock);
583                         goto out;
584                 }
585                 spin_unlock_irq(&rtsx->reg_lock);
586
587                 sg_ptr += sg_cnt;
588         }
589
590         /* Wait for TRANS_OK_INT */
591         spin_lock_irq(&rtsx->reg_lock);
592         if (rtsx->trans_result == TRANS_NOT_READY) {
593                 init_completion(&trans_done);
594                 spin_unlock_irq(&rtsx->reg_lock);
595                 timeleft = wait_for_completion_interruptible_timeout(
596                         &trans_done, msecs_to_jiffies(timeout));
597                 if (timeleft <= 0) {
598                         dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
599                                 __func__, __LINE__);
600                         dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
601                                 chip->int_reg);
602                         err = -ETIMEDOUT;
603                         goto out;
604                 }
605         } else {
606                 spin_unlock_irq(&rtsx->reg_lock);
607         }
608
609         spin_lock_irq(&rtsx->reg_lock);
610         if (rtsx->trans_result == TRANS_RESULT_FAIL)
611                 err = -EIO;
612         else if (rtsx->trans_result == TRANS_RESULT_OK)
613                 err = 0;
614
615         spin_unlock_irq(&rtsx->reg_lock);
616
617 out:
618         rtsx->done = NULL;
619         rtsx->trans_state = STATE_TRANS_NONE;
620         dma_unmap_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
621
622         if (err < 0)
623                 rtsx_stop_cmd(chip, card);
624
625         return err;
626 }
627
628 static int rtsx_transfer_buf(struct rtsx_chip *chip, u8 card, void *buf,
629                              size_t len, enum dma_data_direction dma_dir,
630                              int timeout)
631 {
632         struct rtsx_dev *rtsx = chip->rtsx;
633         struct completion trans_done;
634         dma_addr_t addr;
635         u8 dir;
636         int err = 0;
637         u32 val = BIT(31);
638         long timeleft;
639
640         if (!buf || (len <= 0))
641                 return -EIO;
642
643         if (dma_dir == DMA_TO_DEVICE)
644                 dir = HOST_TO_DEVICE;
645         else if (dma_dir == DMA_FROM_DEVICE)
646                 dir = DEVICE_TO_HOST;
647         else
648                 return -ENXIO;
649
650         addr = dma_map_single(&rtsx->pci->dev, buf, len, dma_dir);
651         if (dma_mapping_error(&rtsx->pci->dev, addr))
652                 return -ENOMEM;
653
654         if (card == SD_CARD)
655                 rtsx->check_card_cd = SD_EXIST;
656         else if (card == MS_CARD)
657                 rtsx->check_card_cd = MS_EXIST;
658         else if (card == XD_CARD)
659                 rtsx->check_card_cd = XD_EXIST;
660         else
661                 rtsx->check_card_cd = 0;
662
663         val |= (u32)(dir & 0x01) << 29;
664         val |= (u32)(len & 0x00FFFFFF);
665
666         spin_lock_irq(&rtsx->reg_lock);
667
668         /* set up data structures for the wakeup system */
669         rtsx->done = &trans_done;
670
671         init_completion(&trans_done);
672
673         rtsx->trans_state = STATE_TRANS_BUF;
674         rtsx->trans_result = TRANS_NOT_READY;
675
676         rtsx_writel(chip, RTSX_HDBAR, addr);
677         rtsx_writel(chip, RTSX_HDBCTLR, val);
678
679         spin_unlock_irq(&rtsx->reg_lock);
680
681         /* Wait for TRANS_OK_INT */
682         timeleft = wait_for_completion_interruptible_timeout(
683                 &trans_done, msecs_to_jiffies(timeout));
684         if (timeleft <= 0) {
685                 dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
686                         __func__, __LINE__);
687                 dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
688                         chip->int_reg);
689                 err = -ETIMEDOUT;
690                 goto out;
691         }
692
693         spin_lock_irq(&rtsx->reg_lock);
694         if (rtsx->trans_result == TRANS_RESULT_FAIL)
695                 err = -EIO;
696         else if (rtsx->trans_result == TRANS_RESULT_OK)
697                 err = 0;
698
699         spin_unlock_irq(&rtsx->reg_lock);
700
701 out:
702         rtsx->done = NULL;
703         rtsx->trans_state = STATE_TRANS_NONE;
704         dma_unmap_single(&rtsx->pci->dev, addr, len, dma_dir);
705
706         if (err < 0)
707                 rtsx_stop_cmd(chip, card);
708
709         return err;
710 }
711
712 int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card,
713                                void *buf, size_t len, int use_sg,
714                                unsigned int *index, unsigned int *offset,
715                                enum dma_data_direction dma_dir, int timeout)
716 {
717         int err = 0;
718
719         /* don't transfer data during abort processing */
720         if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
721                 return -EIO;
722
723         if (use_sg) {
724                 struct scatterlist *sg = buf;
725
726                 err = rtsx_transfer_sglist_adma_partial(chip, card, sg, use_sg,
727                                                         index, offset, (int)len,
728                                                         dma_dir, timeout);
729         } else {
730                 err = rtsx_transfer_buf(chip, card,
731                                         buf, len, dma_dir, timeout);
732         }
733         if (err < 0) {
734                 if (RTSX_TST_DELINK(chip)) {
735                         RTSX_CLR_DELINK(chip);
736                         chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
737                         rtsx_reinit_cards(chip, 1);
738                 }
739         }
740
741         return err;
742 }
743
744 int rtsx_transfer_data(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
745                        int use_sg, enum dma_data_direction dma_dir, int timeout)
746 {
747         int err = 0;
748
749         dev_dbg(rtsx_dev(chip), "use_sg = %d\n", use_sg);
750
751         /* don't transfer data during abort processing */
752         if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
753                 return -EIO;
754
755         if (use_sg) {
756                 err = rtsx_transfer_sglist_adma(chip, card, buf,
757                                                 use_sg, dma_dir, timeout);
758         } else {
759                 err = rtsx_transfer_buf(chip, card, buf, len, dma_dir, timeout);
760         }
761
762         if (err < 0) {
763                 if (RTSX_TST_DELINK(chip)) {
764                         RTSX_CLR_DELINK(chip);
765                         chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
766                         rtsx_reinit_cards(chip, 1);
767                 }
768         }
769
770         return err;
771 }
772