Cleanup glamo platform_data.
[librecmc/librecmc.git] / target / linux / s3c24xx / files-2.6.30 / drivers / mfd / glamo / glamo-core.c
1 /* Smedia Glamo 336x/337x driver
2  *
3  * (C) 2007 by Openmoko, Inc.
4  * Author: Harald Welte <laforge@openmoko.org>
5  * All rights reserved.
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License as
9  * published by the Free Software Foundation; either version 2 of
10  * the License, or (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
20  * MA 02111-1307 USA
21  */
22
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/errno.h>
26 #include <linux/string.h>
27 #include <linux/mm.h>
28 #include <linux/delay.h>
29 #include <linux/fb.h>
30 #include <linux/init.h>
31 #include <linux/irq.h>
32 #include <linux/interrupt.h>
33 #include <linux/workqueue.h>
34 #include <linux/platform_device.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/spinlock.h>
37 #include <linux/mfd/core.h>
38 #include <linux/mfd/glamo.h>
39 #include <linux/spi/glamo.h>
40 #include <linux/glamo-gpio.h>
41 #include <linux/glamofb.h>
42 #include <linux/io.h>
43
44 #include <asm/div64.h>
45
46 #ifdef CONFIG_PM
47 #include <linux/pm.h>
48 #endif
49
50 #include "glamo-regs.h"
51 #include "glamo-core.h"
52
53 #define GLAMO_MEM_REFRESH_COUNT 0x100
54
55 /*
56  * Glamo internal settings
57  *
58  * We run the memory interface from the faster PLLB on 2.6.28 kernels and
59  * above.  Couple of GTA02 users report trouble with memory bus when they
60  * upgraded from 2.6.24.  So this parameter allows reversion to 2.6.24
61  * scheme if their Glamo chip needs it.
62  *
63  * you can override the faster default on kernel commandline using
64  *
65  *   glamo3362.slow_memory=1
66  *
67  * for example
68  */
69
70 static int slow_memory = 0;
71 module_param(slow_memory, int, 0644);
72
73 struct reg_range {
74         int start;
75         int count;
76         char *name;
77         char dump;
78 };
79 struct reg_range reg_range[] = {
80         { 0x0000, 0x76,         "General",      1 },
81         { 0x0200, 0x18,         "Host Bus",     1 },
82         { 0x0300, 0x38,         "Memory",       1 },
83 /*      { 0x0400, 0x100,        "Sensor",       0 }, */
84 /*              { 0x0500, 0x300,        "ISP",          0 }, */
85 /*              { 0x0800, 0x400,        "JPEG",         0 }, */
86 /*              { 0x0c00, 0xcc,         "MPEG",         0 }, */
87         { 0x1100, 0xb2,         "LCD 1",        1 },
88         { 0x1200, 0x64,         "LCD 2",        1 },
89         { 0x1400, 0x40,         "MMC",          1 },
90 /*              { 0x1500, 0x080,        "MPU 0",        0 },
91         { 0x1580, 0x080,        "MPU 1",        0 },
92         { 0x1600, 0x080,        "Cmd Queue",    0 },
93         { 0x1680, 0x080,        "RISC CPU",     0 },
94         { 0x1700, 0x400,        "2D Unit",      0 },
95         { 0x1b00, 0x900,        "3D Unit",      0 }, */
96 };
97
98 static inline void __reg_write(struct glamo_core *glamo,
99                                 u_int16_t reg, u_int16_t val)
100 {
101         writew(val, glamo->base + reg);
102 }
103
104 static inline u_int16_t __reg_read(struct glamo_core *glamo,
105                                    u_int16_t reg)
106 {
107         return readw(glamo->base + reg);
108 }
109
110 static void __reg_set_bit_mask(struct glamo_core *glamo,
111                                 u_int16_t reg, u_int16_t mask,
112                                 u_int16_t val)
113 {
114         u_int16_t tmp;
115
116         val &= mask;
117
118         tmp = __reg_read(glamo, reg);
119         tmp &= ~mask;
120         tmp |= val;
121         __reg_write(glamo, reg, tmp);
122 }
123
124 static void reg_set_bit_mask(struct glamo_core *glamo,
125                                 u_int16_t reg, u_int16_t mask,
126                                 u_int16_t val)
127 {
128         spin_lock(&glamo->lock);
129         __reg_set_bit_mask(glamo, reg, mask, val);
130         spin_unlock(&glamo->lock);
131 }
132
133 static inline void __reg_set_bit(struct glamo_core *glamo,
134                                  u_int16_t reg, u_int16_t bit)
135 {
136         __reg_set_bit_mask(glamo, reg, bit, 0xffff);
137 }
138
139 static inline void __reg_clear_bit(struct glamo_core *glamo,
140                                    u_int16_t reg, u_int16_t bit)
141 {
142         __reg_set_bit_mask(glamo, reg, bit, 0);
143 }
144
145 /***********************************************************************
146  * resources of sibling devices
147  ***********************************************************************/
148
149 static struct resource glamo_fb_resources[] = {
150         {
151                 .name   = "glamo-fb-regs",
152                 .start  = GLAMO_REGOFS_LCD,
153                 .end    = GLAMO_REGOFS_MMC - 1,
154                 .flags  = IORESOURCE_MEM,
155         }, {
156                 .name   = "glamo-fb-mem",
157                 .start  = GLAMO_OFFSET_FB,
158                 .end    = GLAMO_OFFSET_FB + GLAMO_FB_SIZE - 1,
159                 .flags  = IORESOURCE_MEM,
160         },
161 };
162
163 static struct resource glamo_mmc_resources[] = {
164         {
165                 .start  = GLAMO_REGOFS_MMC,
166                 .end    = GLAMO_REGOFS_MPROC0 - 1,
167                 .flags  = IORESOURCE_MEM
168         }, {
169                 .start  = IRQ_GLAMO_MMC,
170                 .end    = IRQ_GLAMO_MMC,
171                 .flags  = IORESOURCE_IRQ,
172         }, { /* our data buffer for MMC transfers */
173                 .start  = GLAMO_OFFSET_FB + GLAMO_FB_SIZE,
174                 .end    = GLAMO_OFFSET_FB + GLAMO_FB_SIZE +
175                                   GLAMO_MMC_BUFFER_SIZE - 1,
176                 .flags  = IORESOURCE_MEM
177         },
178 };
179
180 enum glamo_cells {
181         GLAMO_CELL_FB,
182         GLAMO_CELL_MMC,
183         GLAMO_CELL_SPI_GPIO
184 };
185
186 static struct mfd_cell glamo_cells[] = {
187         [GLAMO_CELL_FB] = {
188                 .name = "glamo-fb",
189                 .num_resources = ARRAY_SIZE(glamo_fb_resources),
190                 .resources = glamo_fb_resources,
191         },
192         [GLAMO_CELL_MMC] = {
193                 .name = "glamo-mci",
194                 .num_resources = ARRAY_SIZE(glamo_mmc_resources),
195                 .resources = glamo_mmc_resources,
196         },
197         [GLAMO_CELL_SPI_GPIO] = {
198                 .name = "glamo-spi-gpio",
199         },
200 };
201
202
203 /***********************************************************************
204  * IRQ demultiplexer
205  ***********************************************************************/
206 #define irq2glamo(x)    (x - IRQ_GLAMO(0))
207
208 static void glamo_ack_irq(unsigned int irq)
209 {
210         struct glamo_core *glamo = (struct glamo_core*)get_irq_chip_data(irq);
211         /* clear interrupt source */
212         __reg_write(glamo, GLAMO_REG_IRQ_CLEAR,
213                     1 << irq2glamo(irq));
214 }
215
216 static void glamo_mask_irq(unsigned int irq)
217 {
218         struct glamo_core *glamo = (struct glamo_core*)get_irq_chip_data(irq);
219         u_int16_t tmp;
220
221         /* clear bit in enable register */
222         tmp = __reg_read(glamo, GLAMO_REG_IRQ_ENABLE);
223         tmp &= ~(1 << irq2glamo(irq));
224         __reg_write(glamo, GLAMO_REG_IRQ_ENABLE, tmp);
225 }
226
227 static void glamo_unmask_irq(unsigned int irq)
228 {
229         struct glamo_core *glamo = (struct glamo_core*)get_irq_chip_data(irq);
230         u_int16_t tmp;
231
232         /* set bit in enable register */
233         tmp = __reg_read(glamo, GLAMO_REG_IRQ_ENABLE);
234         tmp |= (1 << irq2glamo(irq));
235         __reg_write(glamo, GLAMO_REG_IRQ_ENABLE, tmp);
236 }
237
238 static struct irq_chip glamo_irq_chip = {
239         .name   = "glamo",
240         .ack    = glamo_ack_irq,
241         .mask   = glamo_mask_irq,
242         .unmask = glamo_unmask_irq,
243 };
244
245 static void glamo_irq_demux_handler(unsigned int irq, struct irq_desc *desc)
246 {
247         struct glamo_core *glamo = get_irq_desc_chip_data(desc);
248         desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
249
250         if (unlikely(desc->status & IRQ_INPROGRESS)) {
251                 desc->status |= (IRQ_PENDING | IRQ_MASKED);
252                 desc->chip->mask(irq);
253                 desc->chip->ack(irq);
254                 return;
255         }
256         kstat_incr_irqs_this_cpu(irq, desc);
257
258         desc->chip->ack(irq);
259         desc->status |= IRQ_INPROGRESS;
260
261         do {
262                 u_int16_t irqstatus;
263                 int i;
264
265                 if (unlikely((desc->status &
266                                 (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) ==
267                                 (IRQ_PENDING | IRQ_MASKED))) {
268                         /* dealing with pending IRQ, unmasking */
269                         desc->chip->unmask(irq);
270                         desc->status &= ~IRQ_MASKED;
271                 }
272
273                 desc->status &= ~IRQ_PENDING;
274
275                 /* read IRQ status register */
276                 irqstatus = __reg_read(glamo, GLAMO_REG_IRQ_STATUS);
277                 for (i = 0; i < 9; i++)
278                         if (irqstatus & (1 << i))
279                                 desc_handle_irq(IRQ_GLAMO(i),
280                                     irq_desc+IRQ_GLAMO(i));
281
282         } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
283
284         desc->status &= ~IRQ_INPROGRESS;
285 }
286
287
288 static ssize_t regs_write(struct device *dev, struct device_attribute *attr,
289                            const char *buf, size_t count)
290 {
291         unsigned long reg = simple_strtoul(buf, NULL, 10);
292         struct glamo_core *glamo = dev_get_drvdata(dev);
293
294         while (*buf && (*buf != ' '))
295                 buf++;
296         if (*buf != ' ')
297                 return -EINVAL;
298         while (*buf && (*buf == ' '))
299                 buf++;
300         if (!*buf)
301                 return -EINVAL;
302
303         printk(KERN_INFO"reg 0x%02lX <-- 0x%04lX\n",
304                reg, simple_strtoul(buf, NULL, 10));
305
306         __reg_write(glamo, reg, simple_strtoul(buf, NULL, 10));
307
308         return count;
309 }
310
311 static ssize_t regs_read(struct device *dev, struct device_attribute *attr,
312                         char *buf)
313 {
314         struct glamo_core *glamo = dev_get_drvdata(dev);
315         int n, n1 = 0, r;
316         char * end = buf;
317
318         spin_lock(&glamo->lock);
319
320         for (r = 0; r < ARRAY_SIZE(reg_range); r++) {
321                 if (!reg_range[r].dump)
322                         continue;
323                 n1 = 0;
324                 end += sprintf(end, "\n%s\n", reg_range[r].name);
325                 for (n = reg_range[r].start;
326                      n < reg_range[r].start + reg_range[r].count; n += 2) {
327                         if (((n1++) & 7) == 0)
328                                 end += sprintf(end, "\n%04X:  ", n);
329                         end += sprintf(end, "%04x ", __reg_read(glamo, n));
330                 }
331                 end += sprintf(end, "\n");
332                 if (!attr) {
333                         printk("%s", buf);
334                         end = buf;
335                 }
336         }
337         spin_unlock(&glamo->lock);
338
339         return end - buf;
340 }
341
342 static DEVICE_ATTR(regs, 0644, regs_read, regs_write);
343 static struct attribute *glamo_sysfs_entries[] = {
344         &dev_attr_regs.attr,
345         NULL
346 };
347 static struct attribute_group glamo_attr_group = {
348         .name   = NULL,
349         .attrs  = glamo_sysfs_entries,
350 };
351
352
353
354 /***********************************************************************
355  * 'engine' support
356  ***********************************************************************/
357
358 int __glamo_engine_enable(struct glamo_core *glamo, enum glamo_engine engine)
359 {
360         switch (engine) {
361         case GLAMO_ENGINE_LCD:
362                 __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
363                                    GLAMO_HOSTBUS2_MMIO_EN_LCD,
364                                    GLAMO_HOSTBUS2_MMIO_EN_LCD);
365                 __reg_write(glamo, GLAMO_REG_CLOCK_LCD,
366                             GLAMO_CLOCK_LCD_EN_M5CLK |
367                             GLAMO_CLOCK_LCD_EN_DHCLK |
368                             GLAMO_CLOCK_LCD_EN_DMCLK |
369                             GLAMO_CLOCK_LCD_EN_DCLK |
370                             GLAMO_CLOCK_LCD_DG_M5CLK |
371                             GLAMO_CLOCK_LCD_DG_DMCLK);
372                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
373                             GLAMO_CLOCK_GEN51_EN_DIV_DHCLK |
374                             GLAMO_CLOCK_GEN51_EN_DIV_DMCLK |
375                             GLAMO_CLOCK_GEN51_EN_DIV_DCLK, 0xffff);
376                 break;
377         case GLAMO_ENGINE_MMC:
378                 __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
379                                    GLAMO_HOSTBUS2_MMIO_EN_MMC,
380                                    GLAMO_HOSTBUS2_MMIO_EN_MMC);
381                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_MMC,
382                                    GLAMO_CLOCK_MMC_EN_M9CLK |
383                                    GLAMO_CLOCK_MMC_EN_TCLK |
384                                    GLAMO_CLOCK_MMC_DG_M9CLK |
385                                    GLAMO_CLOCK_MMC_DG_TCLK, 0xffff);
386                 /* enable the TCLK divider clk input */
387                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
388                                                  GLAMO_CLOCK_GEN51_EN_DIV_TCLK,
389                                                  GLAMO_CLOCK_GEN51_EN_DIV_TCLK);
390                 break;
391         case GLAMO_ENGINE_2D:
392                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_2D,
393                                    GLAMO_CLOCK_2D_EN_M7CLK |
394                                    GLAMO_CLOCK_2D_EN_GCLK |
395                                    GLAMO_CLOCK_2D_DG_M7CLK |
396                                    GLAMO_CLOCK_2D_DG_GCLK, 0xffff);
397                 __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
398                                    GLAMO_HOSTBUS2_MMIO_EN_2D,
399                                    GLAMO_HOSTBUS2_MMIO_EN_2D);
400                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
401                                    GLAMO_CLOCK_GEN51_EN_DIV_GCLK,
402                                                    0xffff);
403                 break;
404         case GLAMO_ENGINE_CMDQ:
405                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_2D,
406                                    GLAMO_CLOCK_2D_EN_M6CLK, 0xffff);
407                 __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
408                                    GLAMO_HOSTBUS2_MMIO_EN_CQ,
409                                    GLAMO_HOSTBUS2_MMIO_EN_CQ);
410                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
411                                    GLAMO_CLOCK_GEN51_EN_DIV_MCLK,
412                                                    0xffff);
413                 break;
414         /* FIXME: Implementation */
415         default:
416                 return -EINVAL;
417         }
418
419         glamo->engine_enabled_bitfield |= 1 << engine;
420
421         return 0;
422 }
423
424 int glamo_engine_enable(struct glamo_core *glamo, enum glamo_engine engine)
425 {
426         int ret;
427
428         spin_lock(&glamo->lock);
429
430         ret = __glamo_engine_enable(glamo, engine);
431
432         spin_unlock(&glamo->lock);
433
434         return ret;
435 }
436 EXPORT_SYMBOL_GPL(glamo_engine_enable);
437
438 int __glamo_engine_disable(struct glamo_core *glamo, enum glamo_engine engine)
439 {
440         switch (engine) {
441         case GLAMO_ENGINE_LCD:
442                 /* remove pixel clock to LCM */
443                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_LCD,
444                             GLAMO_CLOCK_LCD_EN_DCLK, 0);
445                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_LCD,
446                             GLAMO_CLOCK_LCD_EN_DHCLK |
447                             GLAMO_CLOCK_LCD_EN_DMCLK, 0);
448                 /* kill memory clock */
449                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_LCD,
450                             GLAMO_CLOCK_LCD_EN_M5CLK, 0);
451                 /* stop dividing the clocks */
452                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
453                             GLAMO_CLOCK_GEN51_EN_DIV_DHCLK |
454                             GLAMO_CLOCK_GEN51_EN_DIV_DMCLK |
455                             GLAMO_CLOCK_GEN51_EN_DIV_DCLK, 0);
456                 break;
457
458         case GLAMO_ENGINE_MMC:
459                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_MMC,
460                                                    GLAMO_CLOCK_MMC_EN_M9CLK |
461                                                    GLAMO_CLOCK_MMC_EN_TCLK |
462                                                    GLAMO_CLOCK_MMC_DG_M9CLK |
463                                                    GLAMO_CLOCK_MMC_DG_TCLK, 0);
464                 /* disable the TCLK divider clk input */
465                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
466                                         GLAMO_CLOCK_GEN51_EN_DIV_TCLK, 0);
467                 break;
468         case GLAMO_ENGINE_CMDQ:
469                         __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_2D,
470                                            GLAMO_CLOCK_2D_EN_M6CLK,
471                                                            0);
472                         __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
473                                            GLAMO_HOSTBUS2_MMIO_EN_CQ,
474                                            GLAMO_HOSTBUS2_MMIO_EN_CQ);
475 /*                      __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
476                                            GLAMO_CLOCK_GEN51_EN_DIV_MCLK,
477                                                            0);*/
478                 break;
479         case GLAMO_ENGINE_2D:
480                         __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_2D,
481                                            GLAMO_CLOCK_2D_EN_M7CLK |
482                                                            GLAMO_CLOCK_2D_EN_GCLK |
483                                                            GLAMO_CLOCK_2D_DG_M7CLK |
484                                                            GLAMO_CLOCK_2D_DG_GCLK,
485                                                            0);
486                         __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
487                                            GLAMO_HOSTBUS2_MMIO_EN_2D,
488                                            GLAMO_HOSTBUS2_MMIO_EN_2D);
489                         __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
490                                            GLAMO_CLOCK_GEN51_EN_DIV_GCLK,
491                                                            0);
492                 break;
493         default:
494                 return -EINVAL;
495         }
496
497         glamo->engine_enabled_bitfield &= ~(1 << engine);
498
499         return 0;
500 }
501 int glamo_engine_disable(struct glamo_core *glamo, enum glamo_engine engine)
502 {
503         int ret;
504
505         spin_lock(&glamo->lock);
506
507         ret = __glamo_engine_disable(glamo, engine);
508
509         spin_unlock(&glamo->lock);
510
511         return ret;
512 }
513 EXPORT_SYMBOL_GPL(glamo_engine_disable);
514
515 static const u_int16_t engine_clock_regs[__NUM_GLAMO_ENGINES] = {
516         [GLAMO_ENGINE_LCD]      = GLAMO_REG_CLOCK_LCD,
517         [GLAMO_ENGINE_MMC]      = GLAMO_REG_CLOCK_MMC,
518         [GLAMO_ENGINE_ISP]      = GLAMO_REG_CLOCK_ISP,
519         [GLAMO_ENGINE_JPEG]     = GLAMO_REG_CLOCK_JPEG,
520         [GLAMO_ENGINE_3D]       = GLAMO_REG_CLOCK_3D,
521         [GLAMO_ENGINE_2D]       = GLAMO_REG_CLOCK_2D,
522         [GLAMO_ENGINE_MPEG_ENC] = GLAMO_REG_CLOCK_MPEG,
523         [GLAMO_ENGINE_MPEG_DEC] = GLAMO_REG_CLOCK_MPEG,
524 };
525
526 void glamo_engine_clkreg_set(struct glamo_core *glamo,
527                              enum glamo_engine engine,
528                              u_int16_t mask, u_int16_t val)
529 {
530         reg_set_bit_mask(glamo, engine_clock_regs[engine], mask, val);
531 }
532 EXPORT_SYMBOL_GPL(glamo_engine_clkreg_set);
533
534 u_int16_t glamo_engine_clkreg_get(struct glamo_core *glamo,
535                                   enum glamo_engine engine)
536 {
537         u_int16_t val;
538
539         spin_lock(&glamo->lock);
540         val = __reg_read(glamo, engine_clock_regs[engine]);
541         spin_unlock(&glamo->lock);
542
543         return val;
544 }
545 EXPORT_SYMBOL_GPL(glamo_engine_clkreg_get);
546
547 struct glamo_script reset_regs[] = {
548         [GLAMO_ENGINE_LCD] = {
549                 GLAMO_REG_CLOCK_LCD, GLAMO_CLOCK_LCD_RESET
550         },
551 #if 0
552         [GLAMO_ENGINE_HOST] = {
553                 GLAMO_REG_CLOCK_HOST, GLAMO_CLOCK_HOST_RESET
554         },
555         [GLAMO_ENGINE_MEM] = {
556                 GLAMO_REG_CLOCK_MEM, GLAMO_CLOCK_MEM_RESET
557         },
558 #endif
559         [GLAMO_ENGINE_MMC] = {
560                 GLAMO_REG_CLOCK_MMC, GLAMO_CLOCK_MMC_RESET
561         },
562         [GLAMO_ENGINE_CMDQ] = {
563                 GLAMO_REG_CLOCK_2D, GLAMO_CLOCK_2D_CQ_RESET
564         },
565         [GLAMO_ENGINE_2D] = {
566                 GLAMO_REG_CLOCK_2D, GLAMO_CLOCK_2D_RESET
567         },
568         [GLAMO_ENGINE_JPEG] = {
569                 GLAMO_REG_CLOCK_JPEG, GLAMO_CLOCK_JPEG_RESET
570         },
571 };
572
573 void glamo_engine_reset(struct glamo_core *glamo, enum glamo_engine engine)
574 {
575         struct glamo_script *rst;
576
577         if (engine >= ARRAY_SIZE(reset_regs)) {
578                 dev_warn(&glamo->pdev->dev, "unknown engine %u ", engine);
579                 return;
580         }
581
582         rst = &reset_regs[engine];
583
584         spin_lock(&glamo->lock);
585         __reg_set_bit(glamo, rst->reg, rst->val);
586         __reg_clear_bit(glamo, rst->reg, rst->val);
587         spin_unlock(&glamo->lock);
588 }
589 EXPORT_SYMBOL_GPL(glamo_engine_reset);
590
591 void glamo_lcm_reset(struct platform_device *pdev, int level)
592 {
593         struct glamo_core *glamo = dev_get_drvdata(&pdev->dev);
594         if (!glamo)
595                 return;
596
597         glamo_gpio_setpin(glamo, GLAMO_GPIO4, level);
598         glamo_gpio_cfgpin(glamo, GLAMO_GPIO4_OUTPUT);
599 }
600 EXPORT_SYMBOL_GPL(glamo_lcm_reset);
601
602 enum glamo_pll {
603         GLAMO_PLL1,
604         GLAMO_PLL2,
605 };
606
607 static int glamo_pll_rate(struct glamo_core *glamo,
608                           enum glamo_pll pll)
609 {
610         u_int16_t reg;
611         unsigned int div = 512;
612         unsigned int osci = glamo->pdata->osci_clock_rate;
613
614         if (osci == 32768)
615                 div = 1;
616
617         switch (pll) {
618         case GLAMO_PLL1:
619                 reg = __reg_read(glamo, GLAMO_REG_PLL_GEN1);
620                 break;
621         case GLAMO_PLL2:
622                 reg = __reg_read(glamo, GLAMO_REG_PLL_GEN3);
623                 break;
624         default:
625                 return -EINVAL;
626         }
627         return (osci/div)*reg;
628 }
629
630 int glamo_engine_reclock(struct glamo_core *glamo,
631                          enum glamo_engine engine,
632                          int ps)
633 {
634         int pll, khz;
635         u_int16_t reg, mask, val = 0;
636
637         if (!ps)
638                 return 0;
639
640         switch (engine) {
641         case GLAMO_ENGINE_LCD:
642                 pll = GLAMO_PLL1;
643                 reg = GLAMO_REG_CLOCK_GEN7;
644                 mask = 0xff;
645                 break;
646         default:
647                 dev_warn(&glamo->pdev->dev,
648                          "reclock of engine 0x%x not supported\n", engine);
649                 return -EINVAL;
650                 break;
651         }
652
653         pll = glamo_pll_rate(glamo, pll);
654         khz = 1000000000UL / ps;
655
656         if (khz)
657                 val = (pll / khz) / 1000;
658
659         dev_dbg(&glamo->pdev->dev,
660                         "PLL %d, kHZ %d, div %d\n", pll, khz, val);
661
662         if (val) {
663                 val--;
664                 reg_set_bit_mask(glamo, reg, mask, val);
665                 mdelay(5); /* wait some time to stabilize */
666
667                 return 0;
668         } else {
669                 return -EINVAL;
670         }
671 }
672 EXPORT_SYMBOL_GPL(glamo_engine_reclock);
673
674 /***********************************************************************
675  * script support
676  ***********************************************************************/
677
678 int glamo_run_script(struct glamo_core *glamo, struct glamo_script *script,
679                      int len, int may_sleep)
680 {
681         int i;
682
683         for (i = 0; i < len; i++) {
684                 struct glamo_script *line = &script[i];
685
686                 switch (line->reg) {
687                 case 0xffff:
688                         return 0;
689                 case 0xfffe:
690                         if (may_sleep)
691                                 msleep(line->val);
692                         else
693                                 mdelay(line->val * 4);
694                         break;
695                 case 0xfffd:
696                         /* spin until PLLs lock */
697                         while ((__reg_read(glamo, GLAMO_REG_PLL_GEN5) & 3) != 3)
698                                 ;
699                         break;
700
701                 /*
702                  * couple of people reported artefacts with 2.6.28 changes, this
703                  * allows reversion to 2.6.24 settings
704                  */
705
706                 case 0x200:
707                         switch (slow_memory) {
708                         /* choice 1 is the most conservative */
709                         case 1: /* 3 waits on Async BB R & W, Use PLL 1 for mem bus */
710                                 __reg_write(glamo, script[i].reg, 0xef0);
711                                 break;
712                         case 2: /* 2 waits on Async BB R & W, Use PLL 1 for mem bus */
713                                 __reg_write(glamo, script[i].reg, 0xea0);
714                                 break;
715                         case 3: /* 1 waits on Async BB R & W, Use PLL 1 for mem bus */
716                                 __reg_write(glamo, script[i].reg, 0xe50);
717                                 break;
718                         case 4: /* 0 waits on Async BB R & W, Use PLL 1 for mem bus */
719                                 __reg_write(glamo, script[i].reg, 0xe00);
720                                 break;
721
722                         /* using PLL2 for memory bus increases CPU bandwidth significantly */
723                         case 5: /* 3 waits on Async BB R & W, Use PLL 2 for mem bus */
724                                 __reg_write(glamo, script[i].reg, 0xef3);
725                                 break;
726                         case 6: /* 2 waits on Async BB R & W, Use PLL 2 for mem bus */
727                                 __reg_write(glamo, script[i].reg, 0xea3);
728                                 break;
729                         case 7: /* 1 waits on Async BB R & W, Use PLL 2 for mem bus */
730                                 __reg_write(glamo, script[i].reg, 0xe53);
731                                 break;
732                         /* default of 0 or >7 is fastest */
733                         default: /* 0 waits on Async BB R & W, Use PLL 2 for mem bus */
734                                 __reg_write(glamo, script[i].reg, 0xe03);
735                                 break;
736                         }
737                         break;
738
739                 default:
740                         __reg_write(glamo, script[i].reg, script[i].val);
741                         break;
742                 }
743         }
744
745         return 0;
746 }
747 EXPORT_SYMBOL(glamo_run_script);
748
749 static struct glamo_script glamo_init_script[] = {
750         { GLAMO_REG_CLOCK_HOST,         0x1000 },
751                 { 0xfffe, 2 },
752         { GLAMO_REG_CLOCK_MEMORY,       0x1000 },
753         { GLAMO_REG_CLOCK_MEMORY,       0x2000 },
754         { GLAMO_REG_CLOCK_LCD,          0x1000 },
755         { GLAMO_REG_CLOCK_MMC,          0x1000 },
756         { GLAMO_REG_CLOCK_ISP,          0x1000 },
757         { GLAMO_REG_CLOCK_ISP,          0x3000 },
758         { GLAMO_REG_CLOCK_JPEG,         0x1000 },
759         { GLAMO_REG_CLOCK_3D,           0x1000 },
760         { GLAMO_REG_CLOCK_3D,           0x3000 },
761         { GLAMO_REG_CLOCK_2D,           0x1000 },
762         { GLAMO_REG_CLOCK_2D,           0x3000 },
763         { GLAMO_REG_CLOCK_RISC1,        0x1000 },
764         { GLAMO_REG_CLOCK_MPEG,         0x3000 },
765         { GLAMO_REG_CLOCK_MPEG,         0x3000 },
766         { GLAMO_REG_CLOCK_MPROC,        0x1000 /*0x100f*/ },
767                 { 0xfffe, 2 },
768         { GLAMO_REG_CLOCK_HOST,         0x0000 },
769         { GLAMO_REG_CLOCK_MEMORY,       0x0000 },
770         { GLAMO_REG_CLOCK_LCD,          0x0000 },
771         { GLAMO_REG_CLOCK_MMC,          0x0000 },
772 #if 0
773 /* unused engines must be left in reset to stop MMC block read "blackouts" */
774         { GLAMO_REG_CLOCK_ISP,          0x0000 },
775         { GLAMO_REG_CLOCK_ISP,          0x0000 },
776         { GLAMO_REG_CLOCK_JPEG,         0x0000 },
777         { GLAMO_REG_CLOCK_3D,           0x0000 },
778         { GLAMO_REG_CLOCK_3D,           0x0000 },
779         { GLAMO_REG_CLOCK_2D,           0x0000 },
780         { GLAMO_REG_CLOCK_2D,           0x0000 },
781         { GLAMO_REG_CLOCK_RISC1,        0x0000 },
782         { GLAMO_REG_CLOCK_MPEG,         0x0000 },
783         { GLAMO_REG_CLOCK_MPEG,         0x0000 },
784 #endif
785         { GLAMO_REG_PLL_GEN1,           0x05db },       /* 48MHz */
786         { GLAMO_REG_PLL_GEN3,           0x0aba },       /* 90MHz */
787         { 0xfffd, 0 },
788         /*
789          * b9 of this register MUST be zero to get any interrupts on INT#
790          * the other set bits enable all the engine interrupt sources
791          */
792         { GLAMO_REG_IRQ_ENABLE,         0x01ff },
793         { GLAMO_REG_CLOCK_GEN6,         0x2000 },
794         { GLAMO_REG_CLOCK_GEN7,         0x0101 },
795         { GLAMO_REG_CLOCK_GEN8,         0x0100 },
796         { GLAMO_REG_CLOCK_HOST,         0x000d },
797         /*
798          * b7..b4 = 0 = no wait states on read or write
799          * b0 = 1 select PLL2 for Host interface, b1 = enable it
800          */
801         { 0x200,        0x0e03 /* this is replaced by script parser */ },
802         { 0x202,        0x07ff },
803         { 0x212,        0x0000 },
804         { 0x214,        0x4000 },
805         { 0x216,        0xf00e },
806
807         /* S-Media recommended "set tiling mode to 512 mode for memory access
808          * more efficiency when 640x480" */
809         { GLAMO_REG_MEM_TYPE,           0x0c74 }, /* 8MB, 16 word pg wr+rd */
810         { GLAMO_REG_MEM_GEN,            0xafaf }, /* 63 grants min + max */
811
812         { GLAMO_REGOFS_HOSTBUS + 2,     0xffff }, /* enable  on MMIO*/
813
814         { GLAMO_REG_MEM_TIMING1,        0x0108 },
815         { GLAMO_REG_MEM_TIMING2,        0x0010 }, /* Taa = 3 MCLK */
816         { GLAMO_REG_MEM_TIMING3,        0x0000 },
817         { GLAMO_REG_MEM_TIMING4,        0x0000 }, /* CE1# delay fall/rise */
818         { GLAMO_REG_MEM_TIMING5,        0x0000 }, /* UB# LB# */
819         { GLAMO_REG_MEM_TIMING6,        0x0000 }, /* OE# */
820         { GLAMO_REG_MEM_TIMING7,        0x0000 }, /* WE# */
821         { GLAMO_REG_MEM_TIMING8,        0x1002 }, /* MCLK delay, was 0x1000 */
822         { GLAMO_REG_MEM_TIMING9,        0x6006 },
823         { GLAMO_REG_MEM_TIMING10,       0x00ff },
824         { GLAMO_REG_MEM_TIMING11,       0x0001 },
825         { GLAMO_REG_MEM_POWER1,         0x0020 },
826         { GLAMO_REG_MEM_POWER2,         0x0000 },
827         { GLAMO_REG_MEM_DRAM1,          0x0000 },
828                 { 0xfffe, 1 },
829         { GLAMO_REG_MEM_DRAM1,          0xc100 },
830                 { 0xfffe, 1 },
831         { GLAMO_REG_MEM_DRAM1,          0xe100 },
832         { GLAMO_REG_MEM_DRAM2,          0x01d6 },
833         { GLAMO_REG_CLOCK_MEMORY,       0x000b },
834         { GLAMO_REG_GPIO_GEN1,          0x000f },
835         { GLAMO_REG_GPIO_GEN2,          0x111e },
836         { GLAMO_REG_GPIO_GEN3,          0xccc3 },
837         { GLAMO_REG_GPIO_GEN4,          0x111e },
838         { GLAMO_REG_GPIO_GEN5,          0x000f },
839 };
840 #if 0
841 static struct glamo_script glamo_resume_script[] = {
842
843         { GLAMO_REG_PLL_GEN1,           0x05db },       /* 48MHz */
844         { GLAMO_REG_PLL_GEN3,           0x0aba },       /* 90MHz */
845         { GLAMO_REG_DFT_GEN6, 1 },
846                 { 0xfffe, 100 },
847                 { 0xfffd, 0 },
848         { 0x200,        0x0e03 },
849
850         /*
851          * b9 of this register MUST be zero to get any interrupts on INT#
852          * the other set bits enable all the engine interrupt sources
853          */
854         { GLAMO_REG_IRQ_ENABLE,         0x01ff },
855         { GLAMO_REG_CLOCK_HOST,         0x0018 },
856         { GLAMO_REG_CLOCK_GEN5_1, 0x18b1 },
857
858         { GLAMO_REG_MEM_DRAM1,          0x0000 },
859                 { 0xfffe, 1 },
860         { GLAMO_REG_MEM_DRAM1,          0xc100 },
861                 { 0xfffe, 1 },
862         { GLAMO_REG_MEM_DRAM1,          0xe100 },
863         { GLAMO_REG_MEM_DRAM2,          0x01d6 },
864         { GLAMO_REG_CLOCK_MEMORY,       0x000b },
865 };
866 #endif
867
868 enum glamo_power {
869         GLAMO_POWER_ON,
870         GLAMO_POWER_SUSPEND,
871 };
872
873 static void glamo_power(struct glamo_core *glamo,
874                         enum glamo_power new_state)
875 {
876         int n;
877         unsigned long flags;
878
879         spin_lock_irqsave(&glamo->lock, flags);
880
881         dev_info(&glamo->pdev->dev, "***** glamo_power -> %d\n", new_state);
882
883         /*
884 Power management
885 static const REG_VALUE_MASK_TYPE reg_powerOn[] =
886 {
887     { REG_GEN_DFT6,     REG_BIT_ALL,    REG_DATA(1u << 0)           },
888     { REG_GEN_PLL3,     0u,             REG_DATA(1u << 13)          },
889     { REG_GEN_MEM_CLK,  REG_BIT_ALL,    REG_BIT_EN_MOCACLK          },
890     { REG_MEM_DRAM2,    0u,             REG_BIT_EN_DEEP_POWER_DOWN  },
891     { REG_MEM_DRAM1,    0u,             REG_BIT_SELF_REFRESH        }
892 };
893
894 static const REG_VALUE_MASK_TYPE reg_powerStandby[] =
895 {
896     { REG_MEM_DRAM1,    REG_BIT_ALL,    REG_BIT_SELF_REFRESH    },
897     { REG_GEN_MEM_CLK,  0u,             REG_BIT_EN_MOCACLK      },
898     { REG_GEN_PLL3,     REG_BIT_ALL,    REG_DATA(1u << 13)      },
899     { REG_GEN_DFT5,     REG_BIT_ALL,    REG_DATA(1u << 0)       }
900 };
901
902 static const REG_VALUE_MASK_TYPE reg_powerSuspend[] =
903 {
904     { REG_MEM_DRAM2,    REG_BIT_ALL,    REG_BIT_EN_DEEP_POWER_DOWN  },
905     { REG_GEN_MEM_CLK,  0u,             REG_BIT_EN_MOCACLK          },
906     { REG_GEN_PLL3,     REG_BIT_ALL,    REG_DATA(1u << 13)          },
907     { REG_GEN_DFT5,     REG_BIT_ALL,    REG_DATA(1u << 0)           }
908 };
909 */
910
911         switch (new_state) {
912         case GLAMO_POWER_ON:
913
914                 /*
915                  * glamo state on resume is nondeterministic in some
916                  * fundamental way, it has also been observed that the
917                  * Glamo reset pin can get asserted by, eg, touching it with
918                  * a scope probe.  So the only answer is to roll with it and
919                  * force an external reset on the Glamo during resume.
920                  */
921
922                 (glamo->pdata->glamo_external_reset)(0);
923                 udelay(10);
924                 (glamo->pdata->glamo_external_reset)(1);
925                 mdelay(5);
926
927                 glamo_run_script(glamo, glamo_init_script,
928                          ARRAY_SIZE(glamo_init_script), 0);
929
930                 break;
931
932         case GLAMO_POWER_SUSPEND:
933
934                 /* nuke interrupts */
935                 __reg_write(glamo, GLAMO_REG_IRQ_ENABLE, 0x200);
936
937                 /* stash a copy of which engines were running */
938                 glamo->engine_enabled_bitfield_suspend =
939                                                  glamo->engine_enabled_bitfield;
940
941                 /* take down each engine before we kill mem and pll */
942                 for (n = 0; n < __NUM_GLAMO_ENGINES; n++)
943                         if (glamo->engine_enabled_bitfield & (1 << n))
944                                 __glamo_engine_disable(glamo, n);
945
946                 /* enable self-refresh */
947
948                 __reg_write(glamo, GLAMO_REG_MEM_DRAM1,
949                                         GLAMO_MEM_DRAM1_EN_DRAM_REFRESH |
950                                         GLAMO_MEM_DRAM1_EN_GATE_CKE |
951                                         GLAMO_MEM_DRAM1_SELF_REFRESH |
952                                         GLAMO_MEM_REFRESH_COUNT);
953                 __reg_write(glamo, GLAMO_REG_MEM_DRAM1,
954                                         GLAMO_MEM_DRAM1_EN_MODEREG_SET |
955                                         GLAMO_MEM_DRAM1_EN_DRAM_REFRESH |
956                                         GLAMO_MEM_DRAM1_EN_GATE_CKE |
957                                         GLAMO_MEM_DRAM1_SELF_REFRESH |
958                                         GLAMO_MEM_REFRESH_COUNT);
959
960                 /* force RAM into deep powerdown */
961
962                 __reg_write(glamo, GLAMO_REG_MEM_DRAM2,
963                                         GLAMO_MEM_DRAM2_DEEP_PWRDOWN |
964                                         (7 << 6) | /* tRC */
965                                         (1 << 4) | /* tRP */
966                                         (1 << 2) | /* tRCD */
967                                         2); /* CAS latency */
968
969                 /* disable clocks to memory */
970                 __reg_write(glamo, GLAMO_REG_CLOCK_MEMORY, 0);
971
972                 /* all dividers from OSCI */
973                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1, 0x400, 0x400);
974
975                 /* PLL2 into bypass */
976                 __reg_set_bit_mask(glamo, GLAMO_REG_PLL_GEN3, 1 << 12, 1 << 12);
977
978                 __reg_write(glamo, 0x200, 0x0e00);
979
980
981                 /* kill PLLS 1 then 2 */
982                 __reg_write(glamo, GLAMO_REG_DFT_GEN5, 0x0001);
983                 __reg_set_bit_mask(glamo, GLAMO_REG_PLL_GEN3, 1 << 13, 1 << 13);
984
985                 break;
986         }
987
988         spin_unlock_irqrestore(&glamo->lock, flags);
989 }
990
991 #if 0
992 #define MEMDETECT_RETRY 6
993 static unsigned int detect_memsize(struct glamo_core *glamo)
994 {
995         int i;
996
997         /*static const u_int16_t pattern[] = {
998                 0x1111, 0x8a8a, 0x2222, 0x7a7a,
999                 0x3333, 0x6a6a, 0x4444, 0x5a5a,
1000                 0x5555, 0x4a4a, 0x6666, 0x3a3a,
1001                 0x7777, 0x2a2a, 0x8888, 0x1a1a
1002         }; */
1003
1004         for (i = 0; i < MEMDETECT_RETRY; i++) {
1005                 switch (glamo->type) {
1006                 case 3600:
1007                         __reg_write(glamo, GLAMO_REG_MEM_TYPE, 0x0072);
1008                         __reg_write(glamo, GLAMO_REG_MEM_DRAM1, 0xc100);
1009                         break;
1010                 case 3650:
1011                         switch (glamo->revision) {
1012                         case GLAMO_CORE_REV_A0:
1013                                 if (i & 1)
1014                                         __reg_write(glamo, GLAMO_REG_MEM_TYPE,
1015                                                     0x097a);
1016                                 else
1017                                         __reg_write(glamo, GLAMO_REG_MEM_TYPE,
1018                                                     0x0173);
1019
1020                                 __reg_write(glamo, GLAMO_REG_MEM_DRAM1, 0x0000);
1021                                 msleep(1);
1022                                 __reg_write(glamo, GLAMO_REG_MEM_DRAM1, 0xc100);
1023                                 break;
1024                         default:
1025                                 if (i & 1)
1026                                         __reg_write(glamo, GLAMO_REG_MEM_TYPE,
1027                                                     0x0972);
1028                                 else
1029                                         __reg_write(glamo, GLAMO_REG_MEM_TYPE,
1030                                                     0x0872);
1031
1032                                 __reg_write(glamo, GLAMO_REG_MEM_DRAM1, 0x0000);
1033                                 msleep(1);
1034                                 __reg_write(glamo, GLAMO_REG_MEM_DRAM1, 0xe100);
1035                                 break;
1036                         }
1037                         break;
1038                 case 3700:
1039                         /* FIXME */
1040                 default:
1041                         break;
1042                 }
1043
1044 #if 0
1045                 /* FIXME: finish implementation */
1046                 for (j = 0; j < 8; j++) {
1047                         __
1048 #endif
1049         }
1050
1051         return 0;
1052 }
1053 #endif
1054
1055 /* Find out if we can support this version of the Glamo chip */
1056 static int glamo_supported(struct glamo_core *glamo)
1057 {
1058         u_int16_t dev_id, rev_id; /*, memsize; */
1059
1060         dev_id = __reg_read(glamo, GLAMO_REG_DEVICE_ID);
1061         rev_id = __reg_read(glamo, GLAMO_REG_REVISION_ID);
1062
1063         switch (dev_id) {
1064         case 0x3650:
1065                 switch (rev_id) {
1066                 case GLAMO_CORE_REV_A2:
1067                         break;
1068                 case GLAMO_CORE_REV_A0:
1069                 case GLAMO_CORE_REV_A1:
1070                 case GLAMO_CORE_REV_A3:
1071                         dev_warn(&glamo->pdev->dev, "untested core revision "
1072                                  "%04x, your mileage may vary\n", rev_id);
1073                         break;
1074                 default:
1075                         dev_warn(&glamo->pdev->dev, "unknown glamo revision "
1076                                  "%04x, your mileage may vary\n", rev_id);
1077                         /* maybe should abort ? */
1078                 }
1079                 break;
1080         case 0x3600:
1081         case 0x3700:
1082         default:
1083                 dev_err(&glamo->pdev->dev, "unsupported Glamo device %04x\n",
1084                         dev_id);
1085                 return 0;
1086         }
1087
1088         dev_dbg(&glamo->pdev->dev, "Detected Glamo core %04x Revision %04x "
1089                  "(%uHz CPU / %uHz Memory)\n", dev_id, rev_id,
1090                  glamo_pll_rate(glamo, GLAMO_PLL1),
1091                  glamo_pll_rate(glamo, GLAMO_PLL2));
1092
1093         return 1;
1094 }
1095
1096 static int __init glamo_probe(struct platform_device *pdev)
1097 {
1098         int rc = 0, irq;
1099         struct glamo_core *glamo;
1100
1101         glamo = kmalloc(GFP_KERNEL, sizeof(*glamo));
1102         if (!glamo)
1103                 return -ENOMEM;
1104
1105         spin_lock_init(&glamo->lock);
1106         glamo->pdev = pdev;
1107         glamo->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1108         glamo->irq = platform_get_irq(pdev, 0);
1109         glamo->pdata = pdev->dev.platform_data;
1110         if (!glamo->mem || !glamo->pdata) {
1111                 dev_err(&pdev->dev, "platform device with no MEM/PDATA ?\n");
1112                 rc = -ENOENT;
1113                 goto bail_free;
1114         }
1115
1116         /* register a number of sibling devices whoise IOMEM resources
1117          * are siblings of pdev's IOMEM resource */
1118
1119         /* only remap the generic, hostbus and memory controller registers */
1120         glamo->base = ioremap(glamo->mem->start, 0x4000 /*GLAMO_REGOFS_VIDCAP*/);
1121         if (!glamo->base) {
1122                 dev_err(&pdev->dev, "failed to ioremap() memory region\n");
1123                 goto bail_free;
1124         }
1125
1126         platform_set_drvdata(pdev, glamo);
1127
1128         (glamo->pdata->glamo_external_reset)(0);
1129         udelay(10);
1130         (glamo->pdata->glamo_external_reset)(1);
1131         mdelay(10);
1132
1133         /*
1134          * finally set the mfd interrupts up
1135          * can't do them earlier or sibling probes blow up
1136          */
1137
1138         for (irq = IRQ_GLAMO(0); irq <= IRQ_GLAMO(8); irq++) {
1139                 set_irq_chip_and_handler(irq, &glamo_irq_chip, handle_level_irq);
1140                 set_irq_flags(irq, IRQF_VALID);
1141                 set_irq_chip_data(irq, glamo);
1142         }
1143
1144         if (glamo->pdata->glamo_irq_is_wired &&
1145             !glamo->pdata->glamo_irq_is_wired()) {
1146                 set_irq_chained_handler(glamo->irq, glamo_irq_demux_handler);
1147                 set_irq_type(glamo->irq, IRQ_TYPE_EDGE_FALLING);
1148                 set_irq_chip_data(glamo->irq, glamo);
1149                 dev_info(&pdev->dev, "Glamo interrupt registered\n");
1150                 glamo->irq_works = 1;
1151         } else {
1152                 dev_err(&pdev->dev, "Glamo interrupt not used\n");
1153                 glamo->irq_works = 0;
1154         }
1155
1156         /* confirm it isn't insane version */
1157         if (!glamo_supported(glamo)) {
1158                 dev_err(&pdev->dev, "This Glamo is not supported\n");
1159                 goto bail_irq;
1160         }
1161
1162         /* sysfs */
1163         rc = sysfs_create_group(&pdev->dev.kobj, &glamo_attr_group);
1164         if (rc < 0) {
1165                 dev_err(&pdev->dev, "cannot create sysfs group\n");
1166                 goto bail_irq;
1167         }
1168
1169         /* init the chip with canned register set */
1170
1171         dev_dbg(&glamo->pdev->dev, "running init script\n");
1172         glamo_run_script(glamo, glamo_init_script,
1173                          ARRAY_SIZE(glamo_init_script), 1);
1174
1175         dev_info(&glamo->pdev->dev, "Glamo core PLL1: %uHz, PLL2: %uHz\n",
1176                  glamo_pll_rate(glamo, GLAMO_PLL1),
1177                  glamo_pll_rate(glamo, GLAMO_PLL2));
1178
1179         /* register siblings */
1180         glamo->pdata->mmc_data->core = glamo;
1181         glamo_cells[GLAMO_CELL_MMC].platform_data = glamo->pdata->mmc_data;
1182         glamo_cells[GLAMO_CELL_MMC].data_size =
1183                 sizeof(struct glamo_mmc_platform_data);
1184
1185         glamo->pdata->fb_data->core = glamo;
1186         glamo_cells[GLAMO_CELL_FB].platform_data = glamo->pdata->fb_data;
1187         glamo_cells[GLAMO_CELL_FB].data_size = sizeof(struct glamo_fb_platform_data);
1188
1189         glamo->pdata->spigpio_data->core = glamo;
1190         glamo_cells[GLAMO_CELL_SPI_GPIO].platform_data =
1191                 glamo->pdata->spigpio_data;
1192         glamo_cells[GLAMO_CELL_SPI_GPIO].data_size =
1193                 sizeof(struct glamo_spigpio_platform_data);
1194
1195         mfd_add_devices(&pdev->dev, pdev->id, glamo_cells,
1196                               ARRAY_SIZE(glamo_cells),
1197                                                   glamo->mem, 0);
1198
1199         /* only request the generic, hostbus and memory controller MMIO */
1200         glamo->mem = request_mem_region(glamo->mem->start,
1201                                         GLAMO_REGOFS_VIDCAP, "glamo-core");
1202         if (!glamo->mem) {
1203                 dev_err(&pdev->dev, "failed to request memory region\n");
1204                 goto bail_irq;
1205         }
1206
1207         return 0;
1208
1209 bail_irq:
1210         disable_irq(glamo->irq);
1211         set_irq_chained_handler(glamo->irq, NULL);
1212         set_irq_chip_data(glamo->irq, NULL);
1213
1214         for (irq = IRQ_GLAMO(0); irq <= IRQ_GLAMO(8); irq++) {
1215                 set_irq_flags(irq, 0);
1216                 set_irq_chip(irq, NULL);
1217                 set_irq_chip_data(irq, NULL);
1218         }
1219
1220         iounmap(glamo->base);
1221 bail_free:
1222         platform_set_drvdata(pdev, NULL);
1223         kfree(glamo);
1224
1225         return rc;
1226 }
1227
1228 static int glamo_remove(struct platform_device *pdev)
1229 {
1230         struct glamo_core *glamo = platform_get_drvdata(pdev);
1231         int irq;
1232
1233         disable_irq(glamo->irq);
1234         set_irq_chained_handler(glamo->irq, NULL);
1235         set_irq_chip_data(glamo->irq, NULL);
1236
1237         for (irq = IRQ_GLAMO(0); irq <= IRQ_GLAMO(8); irq++) {
1238                 set_irq_flags(irq, 0);
1239                 set_irq_chip(irq, NULL);
1240                 set_irq_chip_data(irq, NULL);
1241         }
1242
1243         platform_set_drvdata(pdev, NULL);
1244         mfd_remove_devices(&pdev->dev);
1245         iounmap(glamo->base);
1246         release_mem_region(glamo->mem->start, GLAMO_REGOFS_VIDCAP);
1247         kfree(glamo);
1248
1249         return 0;
1250 }
1251
1252 #ifdef CONFIG_PM
1253
1254 static int glamo_suspend(struct platform_device *pdev, pm_message_t state)
1255 {
1256         struct glamo_core *glamo = dev_get_drvdata(&pdev->dev);
1257         glamo->suspending = 1;
1258         glamo_power(glamo, GLAMO_POWER_SUSPEND);
1259
1260         return 0;
1261 }
1262
1263 static int glamo_resume(struct platform_device *pdev)
1264 {
1265         struct glamo_core *glamo = dev_get_drvdata(&pdev->dev);
1266         glamo_power(glamo, GLAMO_POWER_ON);
1267         glamo->suspending = 0;
1268
1269         return 0;
1270 }
1271
1272 #else
1273 #define glamo_suspend NULL
1274 #define glamo_resume  NULL
1275 #endif
1276
1277 static struct platform_driver glamo_driver = {
1278         .probe          = glamo_probe,
1279         .remove         = glamo_remove,
1280         .suspend        = glamo_suspend,
1281         .resume = glamo_resume,
1282         .driver         = {
1283                 .name   = "glamo3362",
1284                 .owner  = THIS_MODULE,
1285         },
1286 };
1287
1288 static int __devinit glamo_init(void)
1289 {
1290         return platform_driver_register(&glamo_driver);
1291 }
1292
1293 static void __exit glamo_cleanup(void)
1294 {
1295         platform_driver_unregister(&glamo_driver);
1296 }
1297
1298 module_init(glamo_init);
1299 module_exit(glamo_cleanup);
1300
1301 MODULE_AUTHOR("Harald Welte <laforge@openmoko.org>");
1302 MODULE_DESCRIPTION("Smedia Glamo 336x/337x core/resource driver");
1303 MODULE_LICENSE("GPL");