Linux-libre 4.4.228-gnu
[librecmc/linux-libre.git] / drivers / media / pci / ivtv / ivtv-udma.c
1 /*
2     User DMA
3
4     Copyright (C) 2003-2004  Kevin Thayer <nufan_wfk at yahoo.com>
5     Copyright (C) 2004  Chris Kennedy <c@groovy.org>
6     Copyright (C) 2005-2007  Hans Verkuil <hverkuil@xs4all.nl>
7
8     This program is free software; you can redistribute it and/or modify
9     it under the terms of the GNU General Public License as published by
10     the Free Software Foundation; either version 2 of the License, or
11     (at your option) any later version.
12
13     This program is distributed in the hope that it will be useful,
14     but WITHOUT ANY WARRANTY; without even the implied warranty of
15     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16     GNU General Public License for more details.
17
18     You should have received a copy of the GNU General Public License
19     along with this program; if not, write to the Free Software
20     Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  */
22
23 #include "ivtv-driver.h"
24 #include "ivtv-udma.h"
25
26 void ivtv_udma_get_page_info(struct ivtv_dma_page_info *dma_page, unsigned long first, unsigned long size)
27 {
28         dma_page->uaddr = first & PAGE_MASK;
29         dma_page->offset = first & ~PAGE_MASK;
30         dma_page->tail = 1 + ((first+size-1) & ~PAGE_MASK);
31         dma_page->first = (first & PAGE_MASK) >> PAGE_SHIFT;
32         dma_page->last = ((first+size-1) & PAGE_MASK) >> PAGE_SHIFT;
33         dma_page->page_count = dma_page->last - dma_page->first + 1;
34         if (dma_page->page_count == 1) dma_page->tail -= dma_page->offset;
35 }
36
37 int ivtv_udma_fill_sg_list (struct ivtv_user_dma *dma, struct ivtv_dma_page_info *dma_page, int map_offset)
38 {
39         int i, offset;
40         unsigned long flags;
41
42         if (map_offset < 0)
43                 return map_offset;
44
45         offset = dma_page->offset;
46
47         /* Fill SG Array with new values */
48         for (i = 0; i < dma_page->page_count; i++) {
49                 unsigned int len = (i == dma_page->page_count - 1) ?
50                         dma_page->tail : PAGE_SIZE - offset;
51
52                 if (PageHighMem(dma->map[map_offset])) {
53                         void *src;
54
55                         if (dma->bouncemap[map_offset] == NULL)
56                                 dma->bouncemap[map_offset] = alloc_page(GFP_KERNEL);
57                         if (dma->bouncemap[map_offset] == NULL)
58                                 return -1;
59                         local_irq_save(flags);
60                         src = kmap_atomic(dma->map[map_offset]) + offset;
61                         memcpy(page_address(dma->bouncemap[map_offset]) + offset, src, len);
62                         kunmap_atomic(src);
63                         local_irq_restore(flags);
64                         sg_set_page(&dma->SGlist[map_offset], dma->bouncemap[map_offset], len, offset);
65                 }
66                 else {
67                         sg_set_page(&dma->SGlist[map_offset], dma->map[map_offset], len, offset);
68                 }
69                 offset = 0;
70                 map_offset++;
71         }
72         return map_offset;
73 }
74
75 void ivtv_udma_fill_sg_array (struct ivtv_user_dma *dma, u32 buffer_offset, u32 buffer_offset_2, u32 split) {
76         int i;
77         struct scatterlist *sg;
78
79         for (i = 0, sg = dma->SGlist; i < dma->SG_length; i++, sg = sg_next(sg)) {
80                 dma->SGarray[i].size = cpu_to_le32(sg_dma_len(sg));
81                 dma->SGarray[i].src = cpu_to_le32(sg_dma_address(sg));
82                 dma->SGarray[i].dst = cpu_to_le32(buffer_offset);
83                 buffer_offset += sg_dma_len(sg);
84
85                 split -= sg_dma_len(sg);
86                 if (split == 0)
87                         buffer_offset = buffer_offset_2;
88         }
89 }
90
91 /* User DMA Buffers */
92 void ivtv_udma_alloc(struct ivtv *itv)
93 {
94         if (itv->udma.SG_handle == 0) {
95                 /* Map DMA Page Array Buffer */
96                 itv->udma.SG_handle = pci_map_single(itv->pdev, itv->udma.SGarray,
97                            sizeof(itv->udma.SGarray), PCI_DMA_TODEVICE);
98                 ivtv_udma_sync_for_cpu(itv);
99         }
100 }
101
102 int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
103                        void __user *userbuf, int size_in_bytes)
104 {
105         struct ivtv_dma_page_info user_dma;
106         struct ivtv_user_dma *dma = &itv->udma;
107         int i, err;
108
109         IVTV_DEBUG_DMA("ivtv_udma_setup, dst: 0x%08x\n", (unsigned int)ivtv_dest_addr);
110
111         /* Still in USE */
112         if (dma->SG_length || dma->page_count) {
113                 IVTV_DEBUG_WARN("ivtv_udma_setup: SG_length %d page_count %d still full?\n",
114                            dma->SG_length, dma->page_count);
115                 return -EBUSY;
116         }
117
118         ivtv_udma_get_page_info(&user_dma, (unsigned long)userbuf, size_in_bytes);
119
120         if (user_dma.page_count <= 0) {
121                 IVTV_DEBUG_WARN("ivtv_udma_setup: Error %d page_count from %d bytes %d offset\n",
122                            user_dma.page_count, size_in_bytes, user_dma.offset);
123                 return -EINVAL;
124         }
125
126         /* Get user pages for DMA Xfer */
127         err = get_user_pages_unlocked(current, current->mm,
128                         user_dma.uaddr, user_dma.page_count, dma->map,
129                         FOLL_FORCE);
130
131         if (user_dma.page_count != err) {
132                 IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
133                            err, user_dma.page_count);
134                 if (err >= 0) {
135                         for (i = 0; i < err; i++)
136                                 put_page(dma->map[i]);
137                         return -EINVAL;
138                 }
139                 return err;
140         }
141
142         dma->page_count = user_dma.page_count;
143
144         /* Fill SG List with new values */
145         if (ivtv_udma_fill_sg_list(dma, &user_dma, 0) < 0) {
146                 for (i = 0; i < dma->page_count; i++) {
147                         put_page(dma->map[i]);
148                 }
149                 dma->page_count = 0;
150                 return -ENOMEM;
151         }
152
153         /* Map SG List */
154         dma->SG_length = pci_map_sg(itv->pdev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);
155
156         /* Fill SG Array with new values */
157         ivtv_udma_fill_sg_array (dma, ivtv_dest_addr, 0, -1);
158
159         /* Tag SG Array with Interrupt Bit */
160         dma->SGarray[dma->SG_length - 1].size |= cpu_to_le32(0x80000000);
161
162         ivtv_udma_sync_for_device(itv);
163         return dma->page_count;
164 }
165
166 void ivtv_udma_unmap(struct ivtv *itv)
167 {
168         struct ivtv_user_dma *dma = &itv->udma;
169         int i;
170
171         IVTV_DEBUG_INFO("ivtv_unmap_user_dma\n");
172
173         /* Nothing to free */
174         if (dma->page_count == 0)
175                 return;
176
177         /* Unmap Scatterlist */
178         if (dma->SG_length) {
179                 pci_unmap_sg(itv->pdev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);
180                 dma->SG_length = 0;
181         }
182         /* sync DMA */
183         ivtv_udma_sync_for_cpu(itv);
184
185         /* Release User Pages */
186         for (i = 0; i < dma->page_count; i++) {
187                 put_page(dma->map[i]);
188         }
189         dma->page_count = 0;
190 }
191
192 void ivtv_udma_free(struct ivtv *itv)
193 {
194         int i;
195
196         /* Unmap SG Array */
197         if (itv->udma.SG_handle) {
198                 pci_unmap_single(itv->pdev, itv->udma.SG_handle,
199                          sizeof(itv->udma.SGarray), PCI_DMA_TODEVICE);
200         }
201
202         /* Unmap Scatterlist */
203         if (itv->udma.SG_length) {
204                 pci_unmap_sg(itv->pdev, itv->udma.SGlist, itv->udma.page_count, PCI_DMA_TODEVICE);
205         }
206
207         for (i = 0; i < IVTV_DMA_SG_OSD_ENT; i++) {
208                 if (itv->udma.bouncemap[i])
209                         __free_page(itv->udma.bouncemap[i]);
210         }
211 }
212
213 void ivtv_udma_start(struct ivtv *itv)
214 {
215         IVTV_DEBUG_DMA("start UDMA\n");
216         write_reg(itv->udma.SG_handle, IVTV_REG_DECDMAADDR);
217         write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
218         set_bit(IVTV_F_I_DMA, &itv->i_flags);
219         set_bit(IVTV_F_I_UDMA, &itv->i_flags);
220         clear_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags);
221 }
222
223 void ivtv_udma_prepare(struct ivtv *itv)
224 {
225         unsigned long flags;
226
227         spin_lock_irqsave(&itv->dma_reg_lock, flags);
228         if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
229                 ivtv_udma_start(itv);
230         else
231                 set_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags);
232         spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
233 }