Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / drivers / video / fbdev / mmp / hw / mmp_ctrl.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * linux/drivers/video/mmp/hw/mmp_ctrl.c
4  * Marvell MMP series Display Controller support
5  *
6  * Copyright (C) 2012 Marvell Technology Group Ltd.
7  * Authors:  Guoqing Li <ligq@marvell.com>
8  *          Lisa Du <cldu@marvell.com>
9  *          Zhou Zhu <zzhu3@marvell.com>
10  */
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/interrupt.h>
17 #include <linux/slab.h>
18 #include <linux/delay.h>
19 #include <linux/platform_device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/clk.h>
22 #include <linux/err.h>
23 #include <linux/vmalloc.h>
24 #include <linux/uaccess.h>
25 #include <linux/kthread.h>
26 #include <linux/io.h>
27
28 #include "mmp_ctrl.h"
29
30 static irqreturn_t ctrl_handle_irq(int irq, void *dev_id)
31 {
32         struct mmphw_ctrl *ctrl = (struct mmphw_ctrl *)dev_id;
33         u32 isr, imask, tmp;
34
35         isr = readl_relaxed(ctrl->reg_base + SPU_IRQ_ISR);
36         imask = readl_relaxed(ctrl->reg_base + SPU_IRQ_ENA);
37
38         do {
39                 /* clear clock only */
40                 tmp = readl_relaxed(ctrl->reg_base + SPU_IRQ_ISR);
41                 if (tmp & isr)
42                         writel_relaxed(~isr, ctrl->reg_base + SPU_IRQ_ISR);
43         } while ((isr = readl_relaxed(ctrl->reg_base + SPU_IRQ_ISR)) & imask);
44
45         return IRQ_HANDLED;
46 }
47
48 static u32 fmt_to_reg(struct mmp_overlay *overlay, int pix_fmt)
49 {
50         u32 rbswap = 0, uvswap = 0, yuvswap = 0,
51                 csc_en = 0, val = 0,
52                 vid = overlay_is_vid(overlay);
53
54         switch (pix_fmt) {
55         case PIXFMT_RGB565:
56         case PIXFMT_RGB1555:
57         case PIXFMT_RGB888PACK:
58         case PIXFMT_RGB888UNPACK:
59         case PIXFMT_RGBA888:
60                 rbswap = 1;
61                 break;
62         case PIXFMT_VYUY:
63         case PIXFMT_YVU422P:
64         case PIXFMT_YVU420P:
65                 uvswap = 1;
66                 break;
67         case PIXFMT_YUYV:
68                 yuvswap = 1;
69                 break;
70         default:
71                 break;
72         }
73
74         switch (pix_fmt) {
75         case PIXFMT_RGB565:
76         case PIXFMT_BGR565:
77                 break;
78         case PIXFMT_RGB1555:
79         case PIXFMT_BGR1555:
80                 val = 0x1;
81                 break;
82         case PIXFMT_RGB888PACK:
83         case PIXFMT_BGR888PACK:
84                 val = 0x2;
85                 break;
86         case PIXFMT_RGB888UNPACK:
87         case PIXFMT_BGR888UNPACK:
88                 val = 0x3;
89                 break;
90         case PIXFMT_RGBA888:
91         case PIXFMT_BGRA888:
92                 val = 0x4;
93                 break;
94         case PIXFMT_UYVY:
95         case PIXFMT_VYUY:
96         case PIXFMT_YUYV:
97                 val = 0x5;
98                 csc_en = 1;
99                 break;
100         case PIXFMT_YUV422P:
101         case PIXFMT_YVU422P:
102                 val = 0x6;
103                 csc_en = 1;
104                 break;
105         case PIXFMT_YUV420P:
106         case PIXFMT_YVU420P:
107                 val = 0x7;
108                 csc_en = 1;
109                 break;
110         default:
111                 break;
112         }
113
114         return (dma_palette(0) | dma_fmt(vid, val) |
115                 dma_swaprb(vid, rbswap) | dma_swapuv(vid, uvswap) |
116                 dma_swapyuv(vid, yuvswap) | dma_csc(vid, csc_en));
117 }
118
119 static void dmafetch_set_fmt(struct mmp_overlay *overlay)
120 {
121         u32 tmp;
122         struct mmp_path *path = overlay->path;
123         tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id));
124         tmp &= ~dma_mask(overlay_is_vid(overlay));
125         tmp |= fmt_to_reg(overlay, overlay->win.pix_fmt);
126         writel_relaxed(tmp, ctrl_regs(path) + dma_ctrl(0, path->id));
127 }
128
129 static void overlay_set_win(struct mmp_overlay *overlay, struct mmp_win *win)
130 {
131         struct lcd_regs *regs = path_regs(overlay->path);
132
133         /* assert win supported */
134         memcpy(&overlay->win, win, sizeof(struct mmp_win));
135
136         mutex_lock(&overlay->access_ok);
137
138         if (overlay_is_vid(overlay)) {
139                 writel_relaxed(win->pitch[0], &regs->v_pitch_yc);
140                 writel_relaxed(win->pitch[2] << 16 |
141                                 win->pitch[1], &regs->v_pitch_uv);
142
143                 writel_relaxed((win->ysrc << 16) | win->xsrc, &regs->v_size);
144                 writel_relaxed((win->ydst << 16) | win->xdst, &regs->v_size_z);
145                 writel_relaxed(win->ypos << 16 | win->xpos, &regs->v_start);
146         } else {
147                 writel_relaxed(win->pitch[0], &regs->g_pitch);
148
149                 writel_relaxed((win->ysrc << 16) | win->xsrc, &regs->g_size);
150                 writel_relaxed((win->ydst << 16) | win->xdst, &regs->g_size_z);
151                 writel_relaxed(win->ypos << 16 | win->xpos, &regs->g_start);
152         }
153
154         dmafetch_set_fmt(overlay);
155         mutex_unlock(&overlay->access_ok);
156 }
157
158 static void dmafetch_onoff(struct mmp_overlay *overlay, int on)
159 {
160         u32 mask = overlay_is_vid(overlay) ? CFG_DMA_ENA_MASK :
161                    CFG_GRA_ENA_MASK;
162         u32 enable = overlay_is_vid(overlay) ? CFG_DMA_ENA(1) : CFG_GRA_ENA(1);
163         u32 tmp;
164         struct mmp_path *path = overlay->path;
165
166         mutex_lock(&overlay->access_ok);
167         tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id));
168         tmp &= ~mask;
169         tmp |= (on ? enable : 0);
170         writel(tmp, ctrl_regs(path) + dma_ctrl(0, path->id));
171         mutex_unlock(&overlay->access_ok);
172 }
173
174 static void path_enabledisable(struct mmp_path *path, int on)
175 {
176         u32 tmp;
177         mutex_lock(&path->access_ok);
178         tmp = readl_relaxed(ctrl_regs(path) + LCD_SCLK(path));
179         if (on)
180                 tmp &= ~SCLK_DISABLE;
181         else
182                 tmp |= SCLK_DISABLE;
183         writel_relaxed(tmp, ctrl_regs(path) + LCD_SCLK(path));
184         mutex_unlock(&path->access_ok);
185 }
186
187 static void path_onoff(struct mmp_path *path, int on)
188 {
189         if (path->status == on) {
190                 dev_info(path->dev, "path %s is already %s\n",
191                                 path->name, stat_name(path->status));
192                 return;
193         }
194
195         if (on) {
196                 path_enabledisable(path, 1);
197
198                 if (path->panel && path->panel->set_onoff)
199                         path->panel->set_onoff(path->panel, 1);
200         } else {
201                 if (path->panel && path->panel->set_onoff)
202                         path->panel->set_onoff(path->panel, 0);
203
204                 path_enabledisable(path, 0);
205         }
206         path->status = on;
207 }
208
209 static void overlay_set_onoff(struct mmp_overlay *overlay, int on)
210 {
211         if (overlay->status == on) {
212                 dev_info(overlay_to_ctrl(overlay)->dev, "overlay %s is already %s\n",
213                         overlay->path->name, stat_name(overlay->status));
214                 return;
215         }
216         overlay->status = on;
217         dmafetch_onoff(overlay, on);
218         if (overlay->path->ops.check_status(overlay->path)
219                         != overlay->path->status)
220                 path_onoff(overlay->path, on);
221 }
222
223 static void overlay_set_fetch(struct mmp_overlay *overlay, int fetch_id)
224 {
225         overlay->dmafetch_id = fetch_id;
226 }
227
228 static int overlay_set_addr(struct mmp_overlay *overlay, struct mmp_addr *addr)
229 {
230         struct lcd_regs *regs = path_regs(overlay->path);
231
232         /* FIXME: assert addr supported */
233         memcpy(&overlay->addr, addr, sizeof(struct mmp_addr));
234
235         if (overlay_is_vid(overlay)) {
236                 writel_relaxed(addr->phys[0], &regs->v_y0);
237                 writel_relaxed(addr->phys[1], &regs->v_u0);
238                 writel_relaxed(addr->phys[2], &regs->v_v0);
239         } else
240                 writel_relaxed(addr->phys[0], &regs->g_0);
241
242         return overlay->addr.phys[0];
243 }
244
245 static void path_set_mode(struct mmp_path *path, struct mmp_mode *mode)
246 {
247         struct lcd_regs *regs = path_regs(path);
248         u32 total_x, total_y, vsync_ctrl, tmp, sclk_src, sclk_div,
249                 link_config = path_to_path_plat(path)->link_config,
250                 dsi_rbswap = path_to_path_plat(path)->link_config;
251
252         /* FIXME: assert videomode supported */
253         memcpy(&path->mode, mode, sizeof(struct mmp_mode));
254
255         mutex_lock(&path->access_ok);
256
257         /* polarity of timing signals */
258         tmp = readl_relaxed(ctrl_regs(path) + intf_ctrl(path->id)) & 0x1;
259         tmp |= mode->vsync_invert ? 0 : 0x8;
260         tmp |= mode->hsync_invert ? 0 : 0x4;
261         tmp |= link_config & CFG_DUMBMODE_MASK;
262         tmp |= CFG_DUMB_ENA(1);
263         writel_relaxed(tmp, ctrl_regs(path) + intf_ctrl(path->id));
264
265         /* interface rb_swap setting */
266         tmp = readl_relaxed(ctrl_regs(path) + intf_rbswap_ctrl(path->id)) &
267                 (~(CFG_INTFRBSWAP_MASK));
268         tmp |= dsi_rbswap & CFG_INTFRBSWAP_MASK;
269         writel_relaxed(tmp, ctrl_regs(path) + intf_rbswap_ctrl(path->id));
270
271         writel_relaxed((mode->yres << 16) | mode->xres, &regs->screen_active);
272         writel_relaxed((mode->left_margin << 16) | mode->right_margin,
273                 &regs->screen_h_porch);
274         writel_relaxed((mode->upper_margin << 16) | mode->lower_margin,
275                 &regs->screen_v_porch);
276         total_x = mode->xres + mode->left_margin + mode->right_margin +
277                 mode->hsync_len;
278         total_y = mode->yres + mode->upper_margin + mode->lower_margin +
279                 mode->vsync_len;
280         writel_relaxed((total_y << 16) | total_x, &regs->screen_size);
281
282         /* vsync ctrl */
283         if (path->output_type == PATH_OUT_DSI)
284                 vsync_ctrl = 0x01330133;
285         else
286                 vsync_ctrl = ((mode->xres + mode->right_margin) << 16)
287                                         | (mode->xres + mode->right_margin);
288         writel_relaxed(vsync_ctrl, &regs->vsync_ctrl);
289
290         /* set pixclock div */
291         sclk_src = clk_get_rate(path_to_ctrl(path)->clk);
292         sclk_div = sclk_src / mode->pixclock_freq;
293         if (sclk_div * mode->pixclock_freq < sclk_src)
294                 sclk_div++;
295
296         dev_info(path->dev, "%s sclk_src %d sclk_div 0x%x pclk %d\n",
297                         __func__, sclk_src, sclk_div, mode->pixclock_freq);
298
299         tmp = readl_relaxed(ctrl_regs(path) + LCD_SCLK(path));
300         tmp &= ~CLK_INT_DIV_MASK;
301         tmp |= sclk_div;
302         writel_relaxed(tmp, ctrl_regs(path) + LCD_SCLK(path));
303
304         mutex_unlock(&path->access_ok);
305 }
306
307 static struct mmp_overlay_ops mmphw_overlay_ops = {
308         .set_fetch = overlay_set_fetch,
309         .set_onoff = overlay_set_onoff,
310         .set_win = overlay_set_win,
311         .set_addr = overlay_set_addr,
312 };
313
314 static void ctrl_set_default(struct mmphw_ctrl *ctrl)
315 {
316         u32 tmp, irq_mask;
317
318         /*
319          * LCD Global control(LCD_TOP_CTRL) should be configed before
320          * any other LCD registers read/write, or there maybe issues.
321          */
322         tmp = readl_relaxed(ctrl->reg_base + LCD_TOP_CTRL);
323         tmp |= 0xfff0;
324         writel_relaxed(tmp, ctrl->reg_base + LCD_TOP_CTRL);
325
326
327         /* disable all interrupts */
328         irq_mask = path_imasks(0) | err_imask(0) |
329                    path_imasks(1) | err_imask(1);
330         tmp = readl_relaxed(ctrl->reg_base + SPU_IRQ_ENA);
331         tmp &= ~irq_mask;
332         tmp |= irq_mask;
333         writel_relaxed(tmp, ctrl->reg_base + SPU_IRQ_ENA);
334 }
335
336 static void path_set_default(struct mmp_path *path)
337 {
338         struct lcd_regs *regs = path_regs(path);
339         u32 dma_ctrl1, mask, tmp, path_config;
340
341         path_config = path_to_path_plat(path)->path_config;
342
343         /* Configure IOPAD: should be parallel only */
344         if (PATH_OUT_PARALLEL == path->output_type) {
345                 mask = CFG_IOPADMODE_MASK | CFG_BURST_MASK | CFG_BOUNDARY_MASK;
346                 tmp = readl_relaxed(ctrl_regs(path) + SPU_IOPAD_CONTROL);
347                 tmp &= ~mask;
348                 tmp |= path_config;
349                 writel_relaxed(tmp, ctrl_regs(path) + SPU_IOPAD_CONTROL);
350         }
351
352         /* Select path clock source */
353         tmp = readl_relaxed(ctrl_regs(path) + LCD_SCLK(path));
354         tmp &= ~SCLK_SRC_SEL_MASK;
355         tmp |= path_config;
356         writel_relaxed(tmp, ctrl_regs(path) + LCD_SCLK(path));
357
358         /*
359          * Configure default bits: vsync triggers DMA,
360          * power save enable, configure alpha registers to
361          * display 100% graphics, and set pixel command.
362          */
363         dma_ctrl1 = 0x2032ff81;
364
365         dma_ctrl1 |= CFG_VSYNC_INV_MASK;
366         writel_relaxed(dma_ctrl1, ctrl_regs(path) + dma_ctrl(1, path->id));
367
368         /* Configure default register values */
369         writel_relaxed(0x00000000, &regs->blank_color);
370         writel_relaxed(0x00000000, &regs->g_1);
371         writel_relaxed(0x00000000, &regs->g_start);
372
373         /*
374          * 1.enable multiple burst request in DMA AXI
375          * bus arbiter for faster read if not tv path;
376          * 2.enable horizontal smooth filter;
377          */
378         mask = CFG_GRA_HSMOOTH_MASK | CFG_DMA_HSMOOTH_MASK | CFG_ARBFAST_ENA(1);
379         tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id));
380         tmp |= mask;
381         if (PATH_TV == path->id)
382                 tmp &= ~CFG_ARBFAST_ENA(1);
383         writel_relaxed(tmp, ctrl_regs(path) + dma_ctrl(0, path->id));
384 }
385
386 static int path_init(struct mmphw_path_plat *path_plat,
387                 struct mmp_mach_path_config *config)
388 {
389         struct mmphw_ctrl *ctrl = path_plat->ctrl;
390         struct mmp_path_info *path_info;
391         struct mmp_path *path = NULL;
392
393         dev_info(ctrl->dev, "%s: %s\n", __func__, config->name);
394
395         /* init driver data */
396         path_info = kzalloc(sizeof(*path_info), GFP_KERNEL);
397         if (!path_info)
398                 return 0;
399
400         path_info->name = config->name;
401         path_info->id = path_plat->id;
402         path_info->dev = ctrl->dev;
403         path_info->overlay_num = config->overlay_num;
404         path_info->overlay_ops = &mmphw_overlay_ops;
405         path_info->set_mode = path_set_mode;
406         path_info->plat_data = path_plat;
407
408         /* create/register platform device */
409         path = mmp_register_path(path_info);
410         if (!path) {
411                 kfree(path_info);
412                 return 0;
413         }
414         path_plat->path = path;
415         path_plat->path_config = config->path_config;
416         path_plat->link_config = config->link_config;
417         path_plat->dsi_rbswap = config->dsi_rbswap;
418         path_set_default(path);
419
420         kfree(path_info);
421         return 1;
422 }
423
424 static void path_deinit(struct mmphw_path_plat *path_plat)
425 {
426         if (!path_plat)
427                 return;
428
429         mmp_unregister_path(path_plat->path);
430 }
431
432 static int mmphw_probe(struct platform_device *pdev)
433 {
434         struct mmp_mach_plat_info *mi;
435         struct resource *res;
436         int ret, i, irq;
437         struct mmphw_path_plat *path_plat;
438         struct mmphw_ctrl *ctrl = NULL;
439
440         /* get resources from platform data */
441         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
442         if (res == NULL) {
443                 dev_err(&pdev->dev, "%s: no IO memory defined\n", __func__);
444                 ret = -ENOENT;
445                 goto failed;
446         }
447
448         irq = platform_get_irq(pdev, 0);
449         if (irq < 0) {
450                 dev_err(&pdev->dev, "%s: no IRQ defined\n", __func__);
451                 ret = -ENOENT;
452                 goto failed;
453         }
454
455         /* get configs from platform data */
456         mi = pdev->dev.platform_data;
457         if (mi == NULL || !mi->path_num || !mi->paths) {
458                 dev_err(&pdev->dev, "%s: no platform data defined\n", __func__);
459                 ret = -EINVAL;
460                 goto failed;
461         }
462
463         /* allocate */
464         ctrl = devm_kzalloc(&pdev->dev,
465                             struct_size(ctrl, path_plats, mi->path_num),
466                             GFP_KERNEL);
467         if (!ctrl) {
468                 ret = -ENOMEM;
469                 goto failed;
470         }
471
472         ctrl->name = mi->name;
473         ctrl->path_num = mi->path_num;
474         ctrl->dev = &pdev->dev;
475         ctrl->irq = irq;
476         platform_set_drvdata(pdev, ctrl);
477         mutex_init(&ctrl->access_ok);
478
479         /* map registers.*/
480         if (!devm_request_mem_region(ctrl->dev, res->start,
481                         resource_size(res), ctrl->name)) {
482                 dev_err(ctrl->dev,
483                         "can't request region for resource %pR\n", res);
484                 ret = -EINVAL;
485                 goto failed;
486         }
487
488         ctrl->reg_base = devm_ioremap_nocache(ctrl->dev,
489                         res->start, resource_size(res));
490         if (ctrl->reg_base == NULL) {
491                 dev_err(ctrl->dev, "%s: res %pR map failed\n", __func__, res);
492                 ret = -ENOMEM;
493                 goto failed;
494         }
495
496         /* request irq */
497         ret = devm_request_irq(ctrl->dev, ctrl->irq, ctrl_handle_irq,
498                 IRQF_SHARED, "lcd_controller", ctrl);
499         if (ret < 0) {
500                 dev_err(ctrl->dev, "%s unable to request IRQ %d\n",
501                                 __func__, ctrl->irq);
502                 ret = -ENXIO;
503                 goto failed;
504         }
505
506         /* get clock */
507         ctrl->clk = devm_clk_get(ctrl->dev, mi->clk_name);
508         if (IS_ERR(ctrl->clk)) {
509                 dev_err(ctrl->dev, "unable to get clk %s\n", mi->clk_name);
510                 ret = -ENOENT;
511                 goto failed;
512         }
513         clk_prepare_enable(ctrl->clk);
514
515         /* init global regs */
516         ctrl_set_default(ctrl);
517
518         /* init pathes from machine info and register them */
519         for (i = 0; i < ctrl->path_num; i++) {
520                 /* get from config and machine info */
521                 path_plat = &ctrl->path_plats[i];
522                 path_plat->id = i;
523                 path_plat->ctrl = ctrl;
524
525                 /* path init */
526                 if (!path_init(path_plat, &mi->paths[i])) {
527                         ret = -EINVAL;
528                         goto failed_path_init;
529                 }
530         }
531
532 #ifdef CONFIG_MMP_DISP_SPI
533         ret = lcd_spi_register(ctrl);
534         if (ret < 0)
535                 goto failed_path_init;
536 #endif
537
538         dev_info(ctrl->dev, "device init done\n");
539
540         return 0;
541
542 failed_path_init:
543         for (i = 0; i < ctrl->path_num; i++) {
544                 path_plat = &ctrl->path_plats[i];
545                 path_deinit(path_plat);
546         }
547
548         clk_disable_unprepare(ctrl->clk);
549 failed:
550         dev_err(&pdev->dev, "device init failed\n");
551
552         return ret;
553 }
554
555 static struct platform_driver mmphw_driver = {
556         .driver         = {
557                 .name   = "mmp-disp",
558         },
559         .probe          = mmphw_probe,
560 };
561
562 static int mmphw_init(void)
563 {
564         return platform_driver_register(&mmphw_driver);
565 }
566 module_init(mmphw_init);
567
568 MODULE_AUTHOR("Li Guoqing<ligq@marvell.com>");
569 MODULE_DESCRIPTION("Framebuffer driver for mmp");
570 MODULE_LICENSE("GPL");