Linux-libre 5.4.39-gnu
[librecmc/linux-libre.git] / sound / soc / samsung / idma.c
1 // SPDX-License-Identifier: GPL-2.0+
2 //
3 // idma.c - I2S0 internal DMA driver
4 //
5 // Copyright (c) 2011 Samsung Electronics Co., Ltd.
6 //              http://www.samsung.com
7
8 #include <linux/interrupt.h>
9 #include <linux/platform_device.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <sound/pcm.h>
14 #include <sound/pcm_params.h>
15 #include <sound/soc.h>
16
17 #include "i2s.h"
18 #include "idma.h"
19 #include "i2s-regs.h"
20
21 #define ST_RUNNING              (1<<0)
22 #define ST_OPENED               (1<<1)
23
24 static const struct snd_pcm_hardware idma_hardware = {
25         .info = SNDRV_PCM_INFO_INTERLEAVED |
26                     SNDRV_PCM_INFO_BLOCK_TRANSFER |
27                     SNDRV_PCM_INFO_MMAP |
28                     SNDRV_PCM_INFO_MMAP_VALID |
29                     SNDRV_PCM_INFO_PAUSE |
30                     SNDRV_PCM_INFO_RESUME,
31         .buffer_bytes_max = MAX_IDMA_BUFFER,
32         .period_bytes_min = 128,
33         .period_bytes_max = MAX_IDMA_PERIOD,
34         .periods_min = 1,
35         .periods_max = 2,
36 };
37
38 struct idma_ctrl {
39         spinlock_t      lock;
40         int             state;
41         dma_addr_t      start;
42         dma_addr_t      pos;
43         dma_addr_t      end;
44         dma_addr_t      period;
45         dma_addr_t      periodsz;
46         void            *token;
47         void            (*cb)(void *dt, int bytes_xfer);
48 };
49
50 static struct idma_info {
51         spinlock_t      lock;
52         void             __iomem  *regs;
53         dma_addr_t      lp_tx_addr;
54 } idma;
55
56 static int idma_irq;
57
58 static void idma_getpos(dma_addr_t *src)
59 {
60         *src = idma.lp_tx_addr +
61                 (readl(idma.regs + I2STRNCNT) & 0xffffff) * 4;
62 }
63
64 static int idma_enqueue(struct snd_pcm_substream *substream)
65 {
66         struct snd_pcm_runtime *runtime = substream->runtime;
67         struct idma_ctrl *prtd = substream->runtime->private_data;
68         u32 val;
69
70         spin_lock(&prtd->lock);
71         prtd->token = (void *) substream;
72         spin_unlock(&prtd->lock);
73
74         /* Internal DMA Level0 Interrupt Address */
75         val = idma.lp_tx_addr + prtd->periodsz;
76         writel(val, idma.regs + I2SLVL0ADDR);
77
78         /* Start address0 of I2S internal DMA operation. */
79         val = idma.lp_tx_addr;
80         writel(val, idma.regs + I2SSTR0);
81
82         /*
83          * Transfer block size for I2S internal DMA.
84          * Should decide transfer size before start dma operation
85          */
86         val = readl(idma.regs + I2SSIZE);
87         val &= ~(I2SSIZE_TRNMSK << I2SSIZE_SHIFT);
88         val |= (((runtime->dma_bytes >> 2) &
89                         I2SSIZE_TRNMSK) << I2SSIZE_SHIFT);
90         writel(val, idma.regs + I2SSIZE);
91
92         val = readl(idma.regs + I2SAHB);
93         val |= AHB_INTENLVL0;
94         writel(val, idma.regs + I2SAHB);
95
96         return 0;
97 }
98
99 static void idma_setcallbk(struct snd_pcm_substream *substream,
100                                 void (*cb)(void *, int))
101 {
102         struct idma_ctrl *prtd = substream->runtime->private_data;
103
104         spin_lock(&prtd->lock);
105         prtd->cb = cb;
106         spin_unlock(&prtd->lock);
107 }
108
109 static void idma_control(int op)
110 {
111         u32 val = readl(idma.regs + I2SAHB);
112
113         spin_lock(&idma.lock);
114
115         switch (op) {
116         case LPAM_DMA_START:
117                 val |= (AHB_INTENLVL0 | AHB_DMAEN);
118                 break;
119         case LPAM_DMA_STOP:
120                 val &= ~(AHB_INTENLVL0 | AHB_DMAEN);
121                 break;
122         default:
123                 spin_unlock(&idma.lock);
124                 return;
125         }
126
127         writel(val, idma.regs + I2SAHB);
128         spin_unlock(&idma.lock);
129 }
130
131 static void idma_done(void *id, int bytes_xfer)
132 {
133         struct snd_pcm_substream *substream = id;
134         struct idma_ctrl *prtd = substream->runtime->private_data;
135
136         if (prtd && (prtd->state & ST_RUNNING))
137                 snd_pcm_period_elapsed(substream);
138 }
139
140 static int idma_hw_params(struct snd_pcm_substream *substream,
141                                 struct snd_pcm_hw_params *params)
142 {
143         struct snd_pcm_runtime *runtime = substream->runtime;
144         struct idma_ctrl *prtd = substream->runtime->private_data;
145         u32 mod = readl(idma.regs + I2SMOD);
146         u32 ahb = readl(idma.regs + I2SAHB);
147
148         ahb |= (AHB_DMARLD | AHB_INTMASK);
149         mod |= MOD_TXS_IDMA;
150         writel(ahb, idma.regs + I2SAHB);
151         writel(mod, idma.regs + I2SMOD);
152
153         snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
154         runtime->dma_bytes = params_buffer_bytes(params);
155
156         prtd->start = prtd->pos = runtime->dma_addr;
157         prtd->period = params_periods(params);
158         prtd->periodsz = params_period_bytes(params);
159         prtd->end = runtime->dma_addr + runtime->dma_bytes;
160
161         idma_setcallbk(substream, idma_done);
162
163         return 0;
164 }
165
166 static int idma_hw_free(struct snd_pcm_substream *substream)
167 {
168         snd_pcm_set_runtime_buffer(substream, NULL);
169
170         return 0;
171 }
172
173 static int idma_prepare(struct snd_pcm_substream *substream)
174 {
175         struct idma_ctrl *prtd = substream->runtime->private_data;
176
177         prtd->pos = prtd->start;
178
179         /* flush the DMA channel */
180         idma_control(LPAM_DMA_STOP);
181         idma_enqueue(substream);
182
183         return 0;
184 }
185
186 static int idma_trigger(struct snd_pcm_substream *substream, int cmd)
187 {
188         struct idma_ctrl *prtd = substream->runtime->private_data;
189         int ret = 0;
190
191         spin_lock(&prtd->lock);
192
193         switch (cmd) {
194         case SNDRV_PCM_TRIGGER_RESUME:
195         case SNDRV_PCM_TRIGGER_START:
196         case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
197                 prtd->state |= ST_RUNNING;
198                 idma_control(LPAM_DMA_START);
199                 break;
200
201         case SNDRV_PCM_TRIGGER_SUSPEND:
202         case SNDRV_PCM_TRIGGER_STOP:
203         case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
204                 prtd->state &= ~ST_RUNNING;
205                 idma_control(LPAM_DMA_STOP);
206                 break;
207
208         default:
209                 ret = -EINVAL;
210                 break;
211         }
212
213         spin_unlock(&prtd->lock);
214
215         return ret;
216 }
217
218 static snd_pcm_uframes_t
219         idma_pointer(struct snd_pcm_substream *substream)
220 {
221         struct snd_pcm_runtime *runtime = substream->runtime;
222         struct idma_ctrl *prtd = runtime->private_data;
223         dma_addr_t src;
224         unsigned long res;
225
226         spin_lock(&prtd->lock);
227
228         idma_getpos(&src);
229         res = src - prtd->start;
230
231         spin_unlock(&prtd->lock);
232
233         return bytes_to_frames(substream->runtime, res);
234 }
235
236 static int idma_mmap(struct snd_pcm_substream *substream,
237         struct vm_area_struct *vma)
238 {
239         struct snd_pcm_runtime *runtime = substream->runtime;
240         unsigned long size, offset;
241         int ret;
242
243         /* From snd_pcm_lib_mmap_iomem */
244         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
245         size = vma->vm_end - vma->vm_start;
246         offset = vma->vm_pgoff << PAGE_SHIFT;
247         ret = io_remap_pfn_range(vma, vma->vm_start,
248                         (runtime->dma_addr + offset) >> PAGE_SHIFT,
249                         size, vma->vm_page_prot);
250
251         return ret;
252 }
253
254 static irqreturn_t iis_irq(int irqno, void *dev_id)
255 {
256         struct idma_ctrl *prtd = (struct idma_ctrl *)dev_id;
257         u32 iisahb, val, addr;
258
259         iisahb  = readl(idma.regs + I2SAHB);
260
261         val = (iisahb & AHB_LVL0INT) ? AHB_CLRLVL0INT : 0;
262
263         if (val) {
264                 iisahb |= val;
265                 writel(iisahb, idma.regs + I2SAHB);
266
267                 addr = readl(idma.regs + I2SLVL0ADDR) - idma.lp_tx_addr;
268                 addr += prtd->periodsz;
269                 addr %= (u32)(prtd->end - prtd->start);
270                 addr += idma.lp_tx_addr;
271
272                 writel(addr, idma.regs + I2SLVL0ADDR);
273
274                 if (prtd->cb)
275                         prtd->cb(prtd->token, prtd->period);
276         }
277
278         return IRQ_HANDLED;
279 }
280
281 static int idma_open(struct snd_pcm_substream *substream)
282 {
283         struct snd_pcm_runtime *runtime = substream->runtime;
284         struct idma_ctrl *prtd;
285         int ret;
286
287         snd_soc_set_runtime_hwparams(substream, &idma_hardware);
288
289         prtd = kzalloc(sizeof(struct idma_ctrl), GFP_KERNEL);
290         if (prtd == NULL)
291                 return -ENOMEM;
292
293         ret = request_irq(idma_irq, iis_irq, 0, "i2s", prtd);
294         if (ret < 0) {
295                 pr_err("fail to claim i2s irq , ret = %d\n", ret);
296                 kfree(prtd);
297                 return ret;
298         }
299
300         spin_lock_init(&prtd->lock);
301
302         runtime->private_data = prtd;
303
304         return 0;
305 }
306
307 static int idma_close(struct snd_pcm_substream *substream)
308 {
309         struct snd_pcm_runtime *runtime = substream->runtime;
310         struct idma_ctrl *prtd = runtime->private_data;
311
312         free_irq(idma_irq, prtd);
313
314         if (!prtd)
315                 pr_err("idma_close called with prtd == NULL\n");
316
317         kfree(prtd);
318
319         return 0;
320 }
321
322 static const struct snd_pcm_ops idma_ops = {
323         .open           = idma_open,
324         .close          = idma_close,
325         .ioctl          = snd_pcm_lib_ioctl,
326         .trigger        = idma_trigger,
327         .pointer        = idma_pointer,
328         .mmap           = idma_mmap,
329         .hw_params      = idma_hw_params,
330         .hw_free        = idma_hw_free,
331         .prepare        = idma_prepare,
332 };
333
334 static void idma_free(struct snd_pcm *pcm)
335 {
336         struct snd_pcm_substream *substream;
337         struct snd_dma_buffer *buf;
338
339         substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
340         if (!substream)
341                 return;
342
343         buf = &substream->dma_buffer;
344         if (!buf->area)
345                 return;
346
347         iounmap((void __iomem *)buf->area);
348
349         buf->area = NULL;
350         buf->addr = 0;
351 }
352
353 static int preallocate_idma_buffer(struct snd_pcm *pcm, int stream)
354 {
355         struct snd_pcm_substream *substream = pcm->streams[stream].substream;
356         struct snd_dma_buffer *buf = &substream->dma_buffer;
357
358         buf->dev.dev = pcm->card->dev;
359         buf->private_data = NULL;
360
361         /* Assign PCM buffer pointers */
362         buf->dev.type = SNDRV_DMA_TYPE_CONTINUOUS;
363         buf->addr = idma.lp_tx_addr;
364         buf->bytes = idma_hardware.buffer_bytes_max;
365         buf->area = (unsigned char * __force)ioremap(buf->addr, buf->bytes);
366
367         return 0;
368 }
369
370 static int idma_new(struct snd_soc_pcm_runtime *rtd)
371 {
372         struct snd_card *card = rtd->card->snd_card;
373         struct snd_pcm *pcm = rtd->pcm;
374         int ret;
375
376         ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
377         if (ret)
378                 return ret;
379
380         if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
381                 ret = preallocate_idma_buffer(pcm,
382                                 SNDRV_PCM_STREAM_PLAYBACK);
383         }
384
385         return ret;
386 }
387
388 void idma_reg_addr_init(void __iomem *regs, dma_addr_t addr)
389 {
390         spin_lock_init(&idma.lock);
391         idma.regs = regs;
392         idma.lp_tx_addr = addr;
393 }
394 EXPORT_SYMBOL_GPL(idma_reg_addr_init);
395
396 static const struct snd_soc_component_driver asoc_idma_platform = {
397         .ops = &idma_ops,
398         .pcm_new = idma_new,
399         .pcm_free = idma_free,
400 };
401
402 static int asoc_idma_platform_probe(struct platform_device *pdev)
403 {
404         idma_irq = platform_get_irq(pdev, 0);
405         if (idma_irq < 0)
406                 return idma_irq;
407
408         return devm_snd_soc_register_component(&pdev->dev, &asoc_idma_platform,
409                                                NULL, 0);
410 }
411
412 static struct platform_driver asoc_idma_driver = {
413         .driver = {
414                 .name = "samsung-idma",
415         },
416
417         .probe = asoc_idma_platform_probe,
418 };
419
420 module_platform_driver(asoc_idma_driver);
421
422 MODULE_AUTHOR("Jaswinder Singh, <jassisinghbrar@gmail.com>");
423 MODULE_DESCRIPTION("Samsung ASoC IDMA Driver");
424 MODULE_LICENSE("GPL");