1 /* SPDX-License-Identifier: GPL-2.0+ */
2 #include <linux/module.h>
3 #include <linux/init.h>
5 #include <linux/kernel.h> /* printk() */
6 #include <linux/slab.h> /* kmalloc() */
7 #include <linux/fs.h> /* everything... */
8 #include <linux/errno.h> /* error codes */
9 #include <linux/types.h> /* size_t */
10 #include <linux/cdev.h>
11 #include <linux/uaccess.h> /* copy_*_user */
12 #include <linux/highmem.h>
13 #include <linux/pagemap.h>
14 #include "kpc_dma_driver.h"
17 /********** Helper Functions **********/
19 unsigned int count_pages(unsigned long iov_base, size_t iov_len)
21 unsigned long first = (iov_base & PAGE_MASK) >> PAGE_SHIFT;
22 unsigned long last = ((iov_base+iov_len-1) & PAGE_MASK) >> PAGE_SHIFT;
24 return last - first + 1;
28 unsigned int count_parts_for_sge(struct scatterlist *sg)
30 return DIV_ROUND_UP(sg_dma_len(sg), 0x80000);
33 /********** Transfer Helpers **********/
34 static int kpc_dma_transfer(struct dev_private_data *priv,
35 unsigned long iov_base, size_t iov_len)
39 struct kpc_dma_device *ldev;
40 struct aio_cb_data *acd;
41 DECLARE_COMPLETION_ONSTACK(done);
43 struct scatterlist *sg;
45 struct kpc_dma_descriptor *desc;
56 acd = kzalloc(sizeof(*acd), GFP_KERNEL);
58 dev_err(&priv->ldev->pldev->dev, "Couldn't kmalloc space for for the aio data\n");
61 memset(acd, 0x66, sizeof(struct aio_cb_data));
64 acd->ldev = priv->ldev;
68 acd->page_count = count_pages(iov_base, iov_len);
70 // Allocate an array of page pointers
71 acd->user_pages = kzalloc(sizeof(struct page *) * acd->page_count, GFP_KERNEL);
72 if (!acd->user_pages) {
73 dev_err(&priv->ldev->pldev->dev, "Couldn't kmalloc space for for the page pointers\n");
75 goto err_alloc_userpages;
78 // Lock the user buffer pages in memory, and hold on to the page pointers (for the sglist)
79 down_read(¤t->mm->mmap_sem); /* get memory map semaphore */
80 rv = get_user_pages(iov_base, acd->page_count, FOLL_TOUCH | FOLL_WRITE | FOLL_GET, acd->user_pages, NULL);
81 up_read(¤t->mm->mmap_sem); /* release the semaphore */
82 if (rv != acd->page_count) {
83 dev_err(&priv->ldev->pldev->dev, "Couldn't get_user_pages (%ld)\n", rv);
84 goto err_get_user_pages;
87 // Allocate and setup the sg_table (scatterlist entries)
88 rv = sg_alloc_table_from_pages(&acd->sgt, acd->user_pages, acd->page_count, iov_base & (PAGE_SIZE-1), iov_len, GFP_KERNEL);
90 dev_err(&priv->ldev->pldev->dev, "Couldn't alloc sg_table (%ld)\n", rv);
91 goto err_alloc_sg_table;
94 // Setup the DMA mapping for all the sg entries
95 acd->mapped_entry_count = dma_map_sg(&ldev->pldev->dev, acd->sgt.sgl, acd->sgt.nents, ldev->dir);
96 if (acd->mapped_entry_count <= 0) {
97 dev_err(&priv->ldev->pldev->dev, "Couldn't dma_map_sg (%d)\n", acd->mapped_entry_count);
101 // Calculate how many descriptors are actually needed for this transfer.
102 for_each_sg(acd->sgt.sgl, sg, acd->mapped_entry_count, i) {
103 desc_needed += count_parts_for_sge(sg);
108 // Figoure out how many descriptors are available and return an error if there aren't enough
109 num_descrs_avail = count_descriptors_available(ldev);
110 dev_dbg(&priv->ldev->pldev->dev, " mapped_entry_count = %d num_descrs_needed = %d num_descrs_avail = %d\n", acd->mapped_entry_count, desc_needed, num_descrs_avail);
111 if (desc_needed >= ldev->desc_pool_cnt) {
112 dev_warn(&priv->ldev->pldev->dev, " mapped_entry_count = %d num_descrs_needed = %d num_descrs_avail = %d TOO MANY to ever complete!\n", acd->mapped_entry_count, desc_needed, num_descrs_avail);
114 goto err_descr_too_many;
116 if (desc_needed > num_descrs_avail) {
117 dev_warn(&priv->ldev->pldev->dev, " mapped_entry_count = %d num_descrs_needed = %d num_descrs_avail = %d Too many to complete right now.\n", acd->mapped_entry_count, desc_needed, num_descrs_avail);
119 goto err_descr_too_many;
122 // Loop through all the sg table entries and fill out a descriptor for each one.
123 desc = ldev->desc_next;
124 card_addr = acd->priv->card_addr;
125 for_each_sg(acd->sgt.sgl, sg, acd->mapped_entry_count, i) {
126 pcnt = count_parts_for_sge(sg);
127 for (p = 0 ; p < pcnt ; p++) {
128 // Fill out the descriptor
129 BUG_ON(desc == NULL);
132 desc->DescByteCount = 0x80000;
134 desc->DescByteCount = sg_dma_len(sg) - (p * 0x80000);
136 desc->DescBufferByteCount = desc->DescByteCount;
138 desc->DescControlFlags |= DMA_DESC_CTL_IRQONERR;
139 if (i == 0 && p == 0)
140 desc->DescControlFlags |= DMA_DESC_CTL_SOP;
141 if (i == acd->mapped_entry_count-1 && p == pcnt-1)
142 desc->DescControlFlags |= DMA_DESC_CTL_EOP | DMA_DESC_CTL_IRQONDONE;
144 desc->DescCardAddrLS = (card_addr & 0xFFFFFFFF);
145 desc->DescCardAddrMS = (card_addr >> 32) & 0xF;
146 card_addr += desc->DescByteCount;
148 dma_addr = sg_dma_address(sg) + (p * 0x80000);
149 desc->DescSystemAddrLS = (dma_addr & 0x00000000FFFFFFFF) >> 0;
150 desc->DescSystemAddrMS = (dma_addr & 0xFFFFFFFF00000000) >> 32;
152 user_ctl = acd->priv->user_ctl;
153 if (i == acd->mapped_entry_count-1 && p == pcnt-1) {
154 user_ctl = acd->priv->user_ctl_last;
156 desc->DescUserControlLS = (user_ctl & 0x00000000FFFFFFFF) >> 0;
157 desc->DescUserControlMS = (user_ctl & 0xFFFFFFFF00000000) >> 32;
159 if (i == acd->mapped_entry_count-1 && p == pcnt-1)
162 dev_dbg(&priv->ldev->pldev->dev, " Filled descriptor %p (acd = %p)\n", desc, desc->acd);
164 ldev->desc_next = desc->Next;
169 // Send the filled descriptors off to the hardware to process!
170 SetEngineSWPtr(ldev, ldev->desc_next);
174 rv = wait_for_completion_interruptible(&done);
176 * If the user aborted (rv == -ERESTARTSYS), we're no longer responsible
177 * for cleaning up the acd
179 if (rv == -ERESTARTSYS)
189 dma_unmap_sg(&ldev->pldev->dev, acd->sgt.sgl, acd->sgt.nents, ldev->dir);
190 sg_free_table(&acd->sgt);
193 for (i = 0 ; i < acd->page_count ; i++) {
194 put_page(acd->user_pages[i]);
197 kfree(acd->user_pages);
200 dev_dbg(&priv->ldev->pldev->dev, "%s returning with error %ld\n", __func__, rv);
204 void transfer_complete_cb(struct aio_cb_data *acd, size_t xfr_count, u32 flags)
209 BUG_ON(acd->user_pages == NULL);
210 BUG_ON(acd->sgt.sgl == NULL);
211 BUG_ON(acd->ldev == NULL);
212 BUG_ON(acd->ldev->pldev == NULL);
214 for (i = 0 ; i < acd->page_count ; i++) {
215 if (!PageReserved(acd->user_pages[i])) {
216 set_page_dirty(acd->user_pages[i]);
220 dma_unmap_sg(&acd->ldev->pldev->dev, acd->sgt.sgl, acd->sgt.nents, acd->ldev->dir);
222 for (i = 0 ; i < acd->page_count ; i++) {
223 put_page(acd->user_pages[i]);
226 sg_free_table(&acd->sgt);
228 kfree(acd->user_pages);
236 * There's no completion, so we're responsible for cleaning up
243 /********** Fileops **********/
245 int kpc_dma_open(struct inode *inode, struct file *filp)
247 struct dev_private_data *priv;
248 struct kpc_dma_device *ldev = kpc_dma_lookup_device(iminor(inode));
253 if (!atomic_dec_and_test(&ldev->open_count)) {
254 atomic_inc(&ldev->open_count);
255 return -EBUSY; /* already open */
258 priv = kzalloc(sizeof(struct dev_private_data), GFP_KERNEL);
263 filp->private_data = priv;
269 int kpc_dma_close(struct inode *inode, struct file *filp)
271 struct kpc_dma_descriptor *cur;
272 struct dev_private_data *priv = (struct dev_private_data *)filp->private_data;
273 struct kpc_dma_device *eng = priv->ldev;
277 stop_dma_engine(eng);
279 cur = eng->desc_completed->Next;
280 while (cur != eng->desc_next) {
281 dev_dbg(&eng->pldev->dev, "Aborting descriptor %p (acd = %p)\n", cur, cur->acd);
282 if (cur->DescControlFlags & DMA_DESC_CTL_EOP) {
284 transfer_complete_cb(cur->acd, 0, ACD_FLAG_ABORT);
288 eng->desc_completed = cur;
293 start_dma_engine(eng);
297 atomic_inc(&priv->ldev->open_count); /* release the device */
303 ssize_t kpc_dma_read(struct file *filp, char __user *user_buf, size_t count, loff_t *ppos)
305 struct dev_private_data *priv = (struct dev_private_data *)filp->private_data;
307 if (priv->ldev->dir != DMA_FROM_DEVICE)
310 return kpc_dma_transfer(priv, (unsigned long)user_buf, count);
314 ssize_t kpc_dma_write(struct file *filp, const char __user *user_buf, size_t count, loff_t *ppos)
316 struct dev_private_data *priv = (struct dev_private_data *)filp->private_data;
318 if (priv->ldev->dir != DMA_TO_DEVICE)
321 return kpc_dma_transfer(priv, (unsigned long)user_buf, count);
325 long kpc_dma_ioctl(struct file *filp, unsigned int ioctl_num, unsigned long ioctl_param)
327 struct dev_private_data *priv = (struct dev_private_data *)filp->private_data;
330 case KND_IOCTL_SET_CARD_ADDR:
331 priv->card_addr = ioctl_param; return priv->card_addr;
332 case KND_IOCTL_SET_USER_CTL:
333 priv->user_ctl = ioctl_param; return priv->user_ctl;
334 case KND_IOCTL_SET_USER_CTL_LAST:
335 priv->user_ctl_last = ioctl_param; return priv->user_ctl_last;
336 case KND_IOCTL_GET_USER_STS:
337 return priv->user_sts;
343 const struct file_operations kpc_dma_fops = {
344 .owner = THIS_MODULE,
345 .open = kpc_dma_open,
346 .release = kpc_dma_close,
347 .read = kpc_dma_read,
348 .write = kpc_dma_write,
349 .unlocked_ioctl = kpc_dma_ioctl,