Linux-libre 4.7-rc7-gnu
[librecmc/linux-libre.git] / drivers / gpu / drm / radeon / ni.c
1 /*
2  * Copyright 2010 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24 #include <linux/firmware.h>
25 #include <linux/slab.h>
26 #include <linux/module.h>
27 #include <drm/drmP.h>
28 #include "radeon.h"
29 #include "radeon_asic.h"
30 #include "radeon_audio.h"
31 #include <drm/radeon_drm.h>
32 #include "nid.h"
33 #include "atom.h"
34 #include "ni_reg.h"
35 #include "cayman_blit_shaders.h"
36 #include "radeon_ucode.h"
37 #include "clearstate_cayman.h"
38
39 /*
40  * Indirect registers accessor
41  */
42 u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg)
43 {
44         unsigned long flags;
45         u32 r;
46
47         spin_lock_irqsave(&rdev->smc_idx_lock, flags);
48         WREG32(TN_SMC_IND_INDEX_0, (reg));
49         r = RREG32(TN_SMC_IND_DATA_0);
50         spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
51         return r;
52 }
53
54 void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
55 {
56         unsigned long flags;
57
58         spin_lock_irqsave(&rdev->smc_idx_lock, flags);
59         WREG32(TN_SMC_IND_INDEX_0, (reg));
60         WREG32(TN_SMC_IND_DATA_0, (v));
61         spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
62 }
63
64 static const u32 tn_rlc_save_restore_register_list[] =
65 {
66         0x98fc,
67         0x98f0,
68         0x9834,
69         0x9838,
70         0x9870,
71         0x9874,
72         0x8a14,
73         0x8b24,
74         0x8bcc,
75         0x8b10,
76         0x8c30,
77         0x8d00,
78         0x8d04,
79         0x8c00,
80         0x8c04,
81         0x8c10,
82         0x8c14,
83         0x8d8c,
84         0x8cf0,
85         0x8e38,
86         0x9508,
87         0x9688,
88         0x9608,
89         0x960c,
90         0x9610,
91         0x9614,
92         0x88c4,
93         0x8978,
94         0x88d4,
95         0x900c,
96         0x9100,
97         0x913c,
98         0x90e8,
99         0x9354,
100         0xa008,
101         0x98f8,
102         0x9148,
103         0x914c,
104         0x3f94,
105         0x98f4,
106         0x9b7c,
107         0x3f8c,
108         0x8950,
109         0x8954,
110         0x8a18,
111         0x8b28,
112         0x9144,
113         0x3f90,
114         0x915c,
115         0x9160,
116         0x9178,
117         0x917c,
118         0x9180,
119         0x918c,
120         0x9190,
121         0x9194,
122         0x9198,
123         0x919c,
124         0x91a8,
125         0x91ac,
126         0x91b0,
127         0x91b4,
128         0x91b8,
129         0x91c4,
130         0x91c8,
131         0x91cc,
132         0x91d0,
133         0x91d4,
134         0x91e0,
135         0x91e4,
136         0x91ec,
137         0x91f0,
138         0x91f4,
139         0x9200,
140         0x9204,
141         0x929c,
142         0x8030,
143         0x9150,
144         0x9a60,
145         0x920c,
146         0x9210,
147         0x9228,
148         0x922c,
149         0x9244,
150         0x9248,
151         0x91e8,
152         0x9294,
153         0x9208,
154         0x9224,
155         0x9240,
156         0x9220,
157         0x923c,
158         0x9258,
159         0x9744,
160         0xa200,
161         0xa204,
162         0xa208,
163         0xa20c,
164         0x8d58,
165         0x9030,
166         0x9034,
167         0x9038,
168         0x903c,
169         0x9040,
170         0x9654,
171         0x897c,
172         0xa210,
173         0xa214,
174         0x9868,
175         0xa02c,
176         0x9664,
177         0x9698,
178         0x949c,
179         0x8e10,
180         0x8e18,
181         0x8c50,
182         0x8c58,
183         0x8c60,
184         0x8c68,
185         0x89b4,
186         0x9830,
187         0x802c,
188 };
189
190 extern bool evergreen_is_display_hung(struct radeon_device *rdev);
191 extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
192 extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
193 extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
194 extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
195 extern void evergreen_mc_program(struct radeon_device *rdev);
196 extern void evergreen_irq_suspend(struct radeon_device *rdev);
197 extern int evergreen_mc_init(struct radeon_device *rdev);
198 extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
199 extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
200 extern void evergreen_program_aspm(struct radeon_device *rdev);
201 extern void sumo_rlc_fini(struct radeon_device *rdev);
202 extern int sumo_rlc_init(struct radeon_device *rdev);
203 extern void evergreen_gpu_pci_config_reset(struct radeon_device *rdev);
204
205 /* Firmware Names */
206 /*(DEBLOBBED)*/
207
208
209 static const u32 cayman_golden_registers2[] =
210 {
211         0x3e5c, 0xffffffff, 0x00000000,
212         0x3e48, 0xffffffff, 0x00000000,
213         0x3e4c, 0xffffffff, 0x00000000,
214         0x3e64, 0xffffffff, 0x00000000,
215         0x3e50, 0xffffffff, 0x00000000,
216         0x3e60, 0xffffffff, 0x00000000
217 };
218
219 static const u32 cayman_golden_registers[] =
220 {
221         0x5eb4, 0xffffffff, 0x00000002,
222         0x5e78, 0x8f311ff1, 0x001000f0,
223         0x3f90, 0xffff0000, 0xff000000,
224         0x9148, 0xffff0000, 0xff000000,
225         0x3f94, 0xffff0000, 0xff000000,
226         0x914c, 0xffff0000, 0xff000000,
227         0xc78, 0x00000080, 0x00000080,
228         0xbd4, 0x70073777, 0x00011003,
229         0xd02c, 0xbfffff1f, 0x08421000,
230         0xd0b8, 0x73773777, 0x02011003,
231         0x5bc0, 0x00200000, 0x50100000,
232         0x98f8, 0x33773777, 0x02011003,
233         0x98fc, 0xffffffff, 0x76541032,
234         0x7030, 0x31000311, 0x00000011,
235         0x2f48, 0x33773777, 0x42010001,
236         0x6b28, 0x00000010, 0x00000012,
237         0x7728, 0x00000010, 0x00000012,
238         0x10328, 0x00000010, 0x00000012,
239         0x10f28, 0x00000010, 0x00000012,
240         0x11b28, 0x00000010, 0x00000012,
241         0x12728, 0x00000010, 0x00000012,
242         0x240c, 0x000007ff, 0x00000000,
243         0x8a14, 0xf000001f, 0x00000007,
244         0x8b24, 0x3fff3fff, 0x00ff0fff,
245         0x8b10, 0x0000ff0f, 0x00000000,
246         0x28a4c, 0x07ffffff, 0x06000000,
247         0x10c, 0x00000001, 0x00010003,
248         0xa02c, 0xffffffff, 0x0000009b,
249         0x913c, 0x0000010f, 0x01000100,
250         0x8c04, 0xf8ff00ff, 0x40600060,
251         0x28350, 0x00000f01, 0x00000000,
252         0x9508, 0x3700001f, 0x00000002,
253         0x960c, 0xffffffff, 0x54763210,
254         0x88c4, 0x001f3ae3, 0x00000082,
255         0x88d0, 0xffffffff, 0x0f40df40,
256         0x88d4, 0x0000001f, 0x00000010,
257         0x8974, 0xffffffff, 0x00000000
258 };
259
260 static const u32 dvst_golden_registers2[] =
261 {
262         0x8f8, 0xffffffff, 0,
263         0x8fc, 0x00380000, 0,
264         0x8f8, 0xffffffff, 1,
265         0x8fc, 0x0e000000, 0
266 };
267
268 static const u32 dvst_golden_registers[] =
269 {
270         0x690, 0x3fff3fff, 0x20c00033,
271         0x918c, 0x0fff0fff, 0x00010006,
272         0x91a8, 0x0fff0fff, 0x00010006,
273         0x9150, 0xffffdfff, 0x6e944040,
274         0x917c, 0x0fff0fff, 0x00030002,
275         0x9198, 0x0fff0fff, 0x00030002,
276         0x915c, 0x0fff0fff, 0x00010000,
277         0x3f90, 0xffff0001, 0xff000000,
278         0x9178, 0x0fff0fff, 0x00070000,
279         0x9194, 0x0fff0fff, 0x00070000,
280         0x9148, 0xffff0001, 0xff000000,
281         0x9190, 0x0fff0fff, 0x00090008,
282         0x91ac, 0x0fff0fff, 0x00090008,
283         0x3f94, 0xffff0000, 0xff000000,
284         0x914c, 0xffff0000, 0xff000000,
285         0x929c, 0x00000fff, 0x00000001,
286         0x55e4, 0xff607fff, 0xfc000100,
287         0x8a18, 0xff000fff, 0x00000100,
288         0x8b28, 0xff000fff, 0x00000100,
289         0x9144, 0xfffc0fff, 0x00000100,
290         0x6ed8, 0x00010101, 0x00010000,
291         0x9830, 0xffffffff, 0x00000000,
292         0x9834, 0xf00fffff, 0x00000400,
293         0x9838, 0xfffffffe, 0x00000000,
294         0xd0c0, 0xff000fff, 0x00000100,
295         0xd02c, 0xbfffff1f, 0x08421000,
296         0xd0b8, 0x73773777, 0x12010001,
297         0x5bb0, 0x000000f0, 0x00000070,
298         0x98f8, 0x73773777, 0x12010001,
299         0x98fc, 0xffffffff, 0x00000010,
300         0x9b7c, 0x00ff0000, 0x00fc0000,
301         0x8030, 0x00001f0f, 0x0000100a,
302         0x2f48, 0x73773777, 0x12010001,
303         0x2408, 0x00030000, 0x000c007f,
304         0x8a14, 0xf000003f, 0x00000007,
305         0x8b24, 0x3fff3fff, 0x00ff0fff,
306         0x8b10, 0x0000ff0f, 0x00000000,
307         0x28a4c, 0x07ffffff, 0x06000000,
308         0x4d8, 0x00000fff, 0x00000100,
309         0xa008, 0xffffffff, 0x00010000,
310         0x913c, 0xffff03ff, 0x01000100,
311         0x8c00, 0x000000ff, 0x00000003,
312         0x8c04, 0xf8ff00ff, 0x40600060,
313         0x8cf0, 0x1fff1fff, 0x08e00410,
314         0x28350, 0x00000f01, 0x00000000,
315         0x9508, 0xf700071f, 0x00000002,
316         0x960c, 0xffffffff, 0x54763210,
317         0x20ef8, 0x01ff01ff, 0x00000002,
318         0x20e98, 0xfffffbff, 0x00200000,
319         0x2015c, 0xffffffff, 0x00000f40,
320         0x88c4, 0x001f3ae3, 0x00000082,
321         0x8978, 0x3fffffff, 0x04050140,
322         0x88d4, 0x0000001f, 0x00000010,
323         0x8974, 0xffffffff, 0x00000000
324 };
325
326 static const u32 scrapper_golden_registers[] =
327 {
328         0x690, 0x3fff3fff, 0x20c00033,
329         0x918c, 0x0fff0fff, 0x00010006,
330         0x918c, 0x0fff0fff, 0x00010006,
331         0x91a8, 0x0fff0fff, 0x00010006,
332         0x91a8, 0x0fff0fff, 0x00010006,
333         0x9150, 0xffffdfff, 0x6e944040,
334         0x9150, 0xffffdfff, 0x6e944040,
335         0x917c, 0x0fff0fff, 0x00030002,
336         0x917c, 0x0fff0fff, 0x00030002,
337         0x9198, 0x0fff0fff, 0x00030002,
338         0x9198, 0x0fff0fff, 0x00030002,
339         0x915c, 0x0fff0fff, 0x00010000,
340         0x915c, 0x0fff0fff, 0x00010000,
341         0x3f90, 0xffff0001, 0xff000000,
342         0x3f90, 0xffff0001, 0xff000000,
343         0x9178, 0x0fff0fff, 0x00070000,
344         0x9178, 0x0fff0fff, 0x00070000,
345         0x9194, 0x0fff0fff, 0x00070000,
346         0x9194, 0x0fff0fff, 0x00070000,
347         0x9148, 0xffff0001, 0xff000000,
348         0x9148, 0xffff0001, 0xff000000,
349         0x9190, 0x0fff0fff, 0x00090008,
350         0x9190, 0x0fff0fff, 0x00090008,
351         0x91ac, 0x0fff0fff, 0x00090008,
352         0x91ac, 0x0fff0fff, 0x00090008,
353         0x3f94, 0xffff0000, 0xff000000,
354         0x3f94, 0xffff0000, 0xff000000,
355         0x914c, 0xffff0000, 0xff000000,
356         0x914c, 0xffff0000, 0xff000000,
357         0x929c, 0x00000fff, 0x00000001,
358         0x929c, 0x00000fff, 0x00000001,
359         0x55e4, 0xff607fff, 0xfc000100,
360         0x8a18, 0xff000fff, 0x00000100,
361         0x8a18, 0xff000fff, 0x00000100,
362         0x8b28, 0xff000fff, 0x00000100,
363         0x8b28, 0xff000fff, 0x00000100,
364         0x9144, 0xfffc0fff, 0x00000100,
365         0x9144, 0xfffc0fff, 0x00000100,
366         0x6ed8, 0x00010101, 0x00010000,
367         0x9830, 0xffffffff, 0x00000000,
368         0x9830, 0xffffffff, 0x00000000,
369         0x9834, 0xf00fffff, 0x00000400,
370         0x9834, 0xf00fffff, 0x00000400,
371         0x9838, 0xfffffffe, 0x00000000,
372         0x9838, 0xfffffffe, 0x00000000,
373         0xd0c0, 0xff000fff, 0x00000100,
374         0xd02c, 0xbfffff1f, 0x08421000,
375         0xd02c, 0xbfffff1f, 0x08421000,
376         0xd0b8, 0x73773777, 0x12010001,
377         0xd0b8, 0x73773777, 0x12010001,
378         0x5bb0, 0x000000f0, 0x00000070,
379         0x98f8, 0x73773777, 0x12010001,
380         0x98f8, 0x73773777, 0x12010001,
381         0x98fc, 0xffffffff, 0x00000010,
382         0x98fc, 0xffffffff, 0x00000010,
383         0x9b7c, 0x00ff0000, 0x00fc0000,
384         0x9b7c, 0x00ff0000, 0x00fc0000,
385         0x8030, 0x00001f0f, 0x0000100a,
386         0x8030, 0x00001f0f, 0x0000100a,
387         0x2f48, 0x73773777, 0x12010001,
388         0x2f48, 0x73773777, 0x12010001,
389         0x2408, 0x00030000, 0x000c007f,
390         0x8a14, 0xf000003f, 0x00000007,
391         0x8a14, 0xf000003f, 0x00000007,
392         0x8b24, 0x3fff3fff, 0x00ff0fff,
393         0x8b24, 0x3fff3fff, 0x00ff0fff,
394         0x8b10, 0x0000ff0f, 0x00000000,
395         0x8b10, 0x0000ff0f, 0x00000000,
396         0x28a4c, 0x07ffffff, 0x06000000,
397         0x28a4c, 0x07ffffff, 0x06000000,
398         0x4d8, 0x00000fff, 0x00000100,
399         0x4d8, 0x00000fff, 0x00000100,
400         0xa008, 0xffffffff, 0x00010000,
401         0xa008, 0xffffffff, 0x00010000,
402         0x913c, 0xffff03ff, 0x01000100,
403         0x913c, 0xffff03ff, 0x01000100,
404         0x90e8, 0x001fffff, 0x010400c0,
405         0x8c00, 0x000000ff, 0x00000003,
406         0x8c00, 0x000000ff, 0x00000003,
407         0x8c04, 0xf8ff00ff, 0x40600060,
408         0x8c04, 0xf8ff00ff, 0x40600060,
409         0x8c30, 0x0000000f, 0x00040005,
410         0x8cf0, 0x1fff1fff, 0x08e00410,
411         0x8cf0, 0x1fff1fff, 0x08e00410,
412         0x900c, 0x00ffffff, 0x0017071f,
413         0x28350, 0x00000f01, 0x00000000,
414         0x28350, 0x00000f01, 0x00000000,
415         0x9508, 0xf700071f, 0x00000002,
416         0x9508, 0xf700071f, 0x00000002,
417         0x9688, 0x00300000, 0x0017000f,
418         0x960c, 0xffffffff, 0x54763210,
419         0x960c, 0xffffffff, 0x54763210,
420         0x20ef8, 0x01ff01ff, 0x00000002,
421         0x20e98, 0xfffffbff, 0x00200000,
422         0x2015c, 0xffffffff, 0x00000f40,
423         0x88c4, 0x001f3ae3, 0x00000082,
424         0x88c4, 0x001f3ae3, 0x00000082,
425         0x8978, 0x3fffffff, 0x04050140,
426         0x8978, 0x3fffffff, 0x04050140,
427         0x88d4, 0x0000001f, 0x00000010,
428         0x88d4, 0x0000001f, 0x00000010,
429         0x8974, 0xffffffff, 0x00000000,
430         0x8974, 0xffffffff, 0x00000000
431 };
432
433 static void ni_init_golden_registers(struct radeon_device *rdev)
434 {
435         switch (rdev->family) {
436         case CHIP_CAYMAN:
437                 radeon_program_register_sequence(rdev,
438                                                  cayman_golden_registers,
439                                                  (const u32)ARRAY_SIZE(cayman_golden_registers));
440                 radeon_program_register_sequence(rdev,
441                                                  cayman_golden_registers2,
442                                                  (const u32)ARRAY_SIZE(cayman_golden_registers2));
443                 break;
444         case CHIP_ARUBA:
445                 if ((rdev->pdev->device == 0x9900) ||
446                     (rdev->pdev->device == 0x9901) ||
447                     (rdev->pdev->device == 0x9903) ||
448                     (rdev->pdev->device == 0x9904) ||
449                     (rdev->pdev->device == 0x9905) ||
450                     (rdev->pdev->device == 0x9906) ||
451                     (rdev->pdev->device == 0x9907) ||
452                     (rdev->pdev->device == 0x9908) ||
453                     (rdev->pdev->device == 0x9909) ||
454                     (rdev->pdev->device == 0x990A) ||
455                     (rdev->pdev->device == 0x990B) ||
456                     (rdev->pdev->device == 0x990C) ||
457                     (rdev->pdev->device == 0x990D) ||
458                     (rdev->pdev->device == 0x990E) ||
459                     (rdev->pdev->device == 0x990F) ||
460                     (rdev->pdev->device == 0x9910) ||
461                     (rdev->pdev->device == 0x9913) ||
462                     (rdev->pdev->device == 0x9917) ||
463                     (rdev->pdev->device == 0x9918)) {
464                         radeon_program_register_sequence(rdev,
465                                                          dvst_golden_registers,
466                                                          (const u32)ARRAY_SIZE(dvst_golden_registers));
467                         radeon_program_register_sequence(rdev,
468                                                          dvst_golden_registers2,
469                                                          (const u32)ARRAY_SIZE(dvst_golden_registers2));
470                 } else {
471                         radeon_program_register_sequence(rdev,
472                                                          scrapper_golden_registers,
473                                                          (const u32)ARRAY_SIZE(scrapper_golden_registers));
474                         radeon_program_register_sequence(rdev,
475                                                          dvst_golden_registers2,
476                                                          (const u32)ARRAY_SIZE(dvst_golden_registers2));
477                 }
478                 break;
479         default:
480                 break;
481         }
482 }
483
484 #define BTC_IO_MC_REGS_SIZE 29
485
486 static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
487         {0x00000077, 0xff010100},
488         {0x00000078, 0x00000000},
489         {0x00000079, 0x00001434},
490         {0x0000007a, 0xcc08ec08},
491         {0x0000007b, 0x00040000},
492         {0x0000007c, 0x000080c0},
493         {0x0000007d, 0x09000000},
494         {0x0000007e, 0x00210404},
495         {0x00000081, 0x08a8e800},
496         {0x00000082, 0x00030444},
497         {0x00000083, 0x00000000},
498         {0x00000085, 0x00000001},
499         {0x00000086, 0x00000002},
500         {0x00000087, 0x48490000},
501         {0x00000088, 0x20244647},
502         {0x00000089, 0x00000005},
503         {0x0000008b, 0x66030000},
504         {0x0000008c, 0x00006603},
505         {0x0000008d, 0x00000100},
506         {0x0000008f, 0x00001c0a},
507         {0x00000090, 0xff000001},
508         {0x00000094, 0x00101101},
509         {0x00000095, 0x00000fff},
510         {0x00000096, 0x00116fff},
511         {0x00000097, 0x60010000},
512         {0x00000098, 0x10010000},
513         {0x00000099, 0x00006000},
514         {0x0000009a, 0x00001000},
515         {0x0000009f, 0x00946a00}
516 };
517
518 static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
519         {0x00000077, 0xff010100},
520         {0x00000078, 0x00000000},
521         {0x00000079, 0x00001434},
522         {0x0000007a, 0xcc08ec08},
523         {0x0000007b, 0x00040000},
524         {0x0000007c, 0x000080c0},
525         {0x0000007d, 0x09000000},
526         {0x0000007e, 0x00210404},
527         {0x00000081, 0x08a8e800},
528         {0x00000082, 0x00030444},
529         {0x00000083, 0x00000000},
530         {0x00000085, 0x00000001},
531         {0x00000086, 0x00000002},
532         {0x00000087, 0x48490000},
533         {0x00000088, 0x20244647},
534         {0x00000089, 0x00000005},
535         {0x0000008b, 0x66030000},
536         {0x0000008c, 0x00006603},
537         {0x0000008d, 0x00000100},
538         {0x0000008f, 0x00001c0a},
539         {0x00000090, 0xff000001},
540         {0x00000094, 0x00101101},
541         {0x00000095, 0x00000fff},
542         {0x00000096, 0x00116fff},
543         {0x00000097, 0x60010000},
544         {0x00000098, 0x10010000},
545         {0x00000099, 0x00006000},
546         {0x0000009a, 0x00001000},
547         {0x0000009f, 0x00936a00}
548 };
549
550 static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
551         {0x00000077, 0xff010100},
552         {0x00000078, 0x00000000},
553         {0x00000079, 0x00001434},
554         {0x0000007a, 0xcc08ec08},
555         {0x0000007b, 0x00040000},
556         {0x0000007c, 0x000080c0},
557         {0x0000007d, 0x09000000},
558         {0x0000007e, 0x00210404},
559         {0x00000081, 0x08a8e800},
560         {0x00000082, 0x00030444},
561         {0x00000083, 0x00000000},
562         {0x00000085, 0x00000001},
563         {0x00000086, 0x00000002},
564         {0x00000087, 0x48490000},
565         {0x00000088, 0x20244647},
566         {0x00000089, 0x00000005},
567         {0x0000008b, 0x66030000},
568         {0x0000008c, 0x00006603},
569         {0x0000008d, 0x00000100},
570         {0x0000008f, 0x00001c0a},
571         {0x00000090, 0xff000001},
572         {0x00000094, 0x00101101},
573         {0x00000095, 0x00000fff},
574         {0x00000096, 0x00116fff},
575         {0x00000097, 0x60010000},
576         {0x00000098, 0x10010000},
577         {0x00000099, 0x00006000},
578         {0x0000009a, 0x00001000},
579         {0x0000009f, 0x00916a00}
580 };
581
582 static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
583         {0x00000077, 0xff010100},
584         {0x00000078, 0x00000000},
585         {0x00000079, 0x00001434},
586         {0x0000007a, 0xcc08ec08},
587         {0x0000007b, 0x00040000},
588         {0x0000007c, 0x000080c0},
589         {0x0000007d, 0x09000000},
590         {0x0000007e, 0x00210404},
591         {0x00000081, 0x08a8e800},
592         {0x00000082, 0x00030444},
593         {0x00000083, 0x00000000},
594         {0x00000085, 0x00000001},
595         {0x00000086, 0x00000002},
596         {0x00000087, 0x48490000},
597         {0x00000088, 0x20244647},
598         {0x00000089, 0x00000005},
599         {0x0000008b, 0x66030000},
600         {0x0000008c, 0x00006603},
601         {0x0000008d, 0x00000100},
602         {0x0000008f, 0x00001c0a},
603         {0x00000090, 0xff000001},
604         {0x00000094, 0x00101101},
605         {0x00000095, 0x00000fff},
606         {0x00000096, 0x00116fff},
607         {0x00000097, 0x60010000},
608         {0x00000098, 0x10010000},
609         {0x00000099, 0x00006000},
610         {0x0000009a, 0x00001000},
611         {0x0000009f, 0x00976b00}
612 };
613
614 int ni_mc_load_microcode(struct radeon_device *rdev)
615 {
616         const __be32 *fw_data;
617         u32 mem_type, running, blackout = 0;
618         u32 *io_mc_regs;
619         int i, ucode_size, regs_size;
620
621         if (!rdev->mc_fw)
622                 return -EINVAL;
623
624         switch (rdev->family) {
625         case CHIP_BARTS:
626                 io_mc_regs = (u32 *)&barts_io_mc_regs;
627                 ucode_size = BTC_MC_UCODE_SIZE;
628                 regs_size = BTC_IO_MC_REGS_SIZE;
629                 break;
630         case CHIP_TURKS:
631                 io_mc_regs = (u32 *)&turks_io_mc_regs;
632                 ucode_size = BTC_MC_UCODE_SIZE;
633                 regs_size = BTC_IO_MC_REGS_SIZE;
634                 break;
635         case CHIP_CAICOS:
636         default:
637                 io_mc_regs = (u32 *)&caicos_io_mc_regs;
638                 ucode_size = BTC_MC_UCODE_SIZE;
639                 regs_size = BTC_IO_MC_REGS_SIZE;
640                 break;
641         case CHIP_CAYMAN:
642                 io_mc_regs = (u32 *)&cayman_io_mc_regs;
643                 ucode_size = CAYMAN_MC_UCODE_SIZE;
644                 regs_size = BTC_IO_MC_REGS_SIZE;
645                 break;
646         }
647
648         mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT;
649         running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
650
651         if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) {
652                 if (running) {
653                         blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
654                         WREG32(MC_SHARED_BLACKOUT_CNTL, 1);
655                 }
656
657                 /* reset the engine and set to writable */
658                 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
659                 WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
660
661                 /* load mc io regs */
662                 for (i = 0; i < regs_size; i++) {
663                         WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
664                         WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
665                 }
666                 /* load the MC ucode */
667                 fw_data = (const __be32 *)rdev->mc_fw->data;
668                 for (i = 0; i < ucode_size; i++)
669                         WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
670
671                 /* put the engine back into the active state */
672                 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
673                 WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
674                 WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
675
676                 /* wait for training to complete */
677                 for (i = 0; i < rdev->usec_timeout; i++) {
678                         if (RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD)
679                                 break;
680                         udelay(1);
681                 }
682
683                 if (running)
684                         WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
685         }
686
687         return 0;
688 }
689
690 int ni_init_microcode(struct radeon_device *rdev)
691 {
692         const char *chip_name;
693         const char *rlc_chip_name;
694         size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size;
695         size_t smc_req_size = 0;
696         char fw_name[30];
697         int err;
698
699         DRM_DEBUG("\n");
700
701         switch (rdev->family) {
702         case CHIP_BARTS:
703                 chip_name = "BARTS";
704                 rlc_chip_name = "BTC";
705                 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
706                 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
707                 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
708                 mc_req_size = BTC_MC_UCODE_SIZE * 4;
709                 smc_req_size = ALIGN(BARTS_SMC_UCODE_SIZE, 4);
710                 break;
711         case CHIP_TURKS:
712                 chip_name = "TURKS";
713                 rlc_chip_name = "BTC";
714                 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
715                 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
716                 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
717                 mc_req_size = BTC_MC_UCODE_SIZE * 4;
718                 smc_req_size = ALIGN(TURKS_SMC_UCODE_SIZE, 4);
719                 break;
720         case CHIP_CAICOS:
721                 chip_name = "CAICOS";
722                 rlc_chip_name = "BTC";
723                 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
724                 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
725                 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
726                 mc_req_size = BTC_MC_UCODE_SIZE * 4;
727                 smc_req_size = ALIGN(CAICOS_SMC_UCODE_SIZE, 4);
728                 break;
729         case CHIP_CAYMAN:
730                 chip_name = "CAYMAN";
731                 rlc_chip_name = "CAYMAN";
732                 pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
733                 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
734                 rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4;
735                 mc_req_size = CAYMAN_MC_UCODE_SIZE * 4;
736                 smc_req_size = ALIGN(CAYMAN_SMC_UCODE_SIZE, 4);
737                 break;
738         case CHIP_ARUBA:
739                 chip_name = "ARUBA";
740                 rlc_chip_name = "ARUBA";
741                 /* pfp/me same size as CAYMAN */
742                 pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
743                 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
744                 rlc_req_size = ARUBA_RLC_UCODE_SIZE * 4;
745                 mc_req_size = 0;
746                 break;
747         default: BUG();
748         }
749
750         DRM_INFO("Loading %s Microcode\n", chip_name);
751
752         snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
753         err = reject_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
754         if (err)
755                 goto out;
756         if (rdev->pfp_fw->size != pfp_req_size) {
757                 printk(KERN_ERR
758                        "ni_cp: Bogus length %zu in firmware \"%s\"\n",
759                        rdev->pfp_fw->size, fw_name);
760                 err = -EINVAL;
761                 goto out;
762         }
763
764         snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
765         err = reject_firmware(&rdev->me_fw, fw_name, rdev->dev);
766         if (err)
767                 goto out;
768         if (rdev->me_fw->size != me_req_size) {
769                 printk(KERN_ERR
770                        "ni_cp: Bogus length %zu in firmware \"%s\"\n",
771                        rdev->me_fw->size, fw_name);
772                 err = -EINVAL;
773         }
774
775         snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", rlc_chip_name);
776         err = reject_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
777         if (err)
778                 goto out;
779         if (rdev->rlc_fw->size != rlc_req_size) {
780                 printk(KERN_ERR
781                        "ni_rlc: Bogus length %zu in firmware \"%s\"\n",
782                        rdev->rlc_fw->size, fw_name);
783                 err = -EINVAL;
784         }
785
786         /* no MC ucode on TN */
787         if (!(rdev->flags & RADEON_IS_IGP)) {
788                 snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
789                 err = reject_firmware(&rdev->mc_fw, fw_name, rdev->dev);
790                 if (err)
791                         goto out;
792                 if (rdev->mc_fw->size != mc_req_size) {
793                         printk(KERN_ERR
794                                "ni_mc: Bogus length %zu in firmware \"%s\"\n",
795                                rdev->mc_fw->size, fw_name);
796                         err = -EINVAL;
797                 }
798         }
799
800         if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) {
801                 snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
802                 err = reject_firmware(&rdev->smc_fw, fw_name, rdev->dev);
803                 if (err) {
804                         printk(KERN_ERR
805                                "smc: error loading firmware \"%s\"\n",
806                                fw_name);
807                         release_firmware(rdev->smc_fw);
808                         rdev->smc_fw = NULL;
809                         err = 0;
810                 } else if (rdev->smc_fw->size != smc_req_size) {
811                         printk(KERN_ERR
812                                "ni_mc: Bogus length %zu in firmware \"%s\"\n",
813                                rdev->mc_fw->size, fw_name);
814                         err = -EINVAL;
815                 }
816         }
817
818 out:
819         if (err) {
820                 if (err != -EINVAL)
821                         printk(KERN_ERR
822                                "ni_cp: Failed to load firmware \"%s\"\n",
823                                fw_name);
824                 release_firmware(rdev->pfp_fw);
825                 rdev->pfp_fw = NULL;
826                 release_firmware(rdev->me_fw);
827                 rdev->me_fw = NULL;
828                 release_firmware(rdev->rlc_fw);
829                 rdev->rlc_fw = NULL;
830                 release_firmware(rdev->mc_fw);
831                 rdev->mc_fw = NULL;
832         }
833         return err;
834 }
835
836 /**
837  * cayman_get_allowed_info_register - fetch the register for the info ioctl
838  *
839  * @rdev: radeon_device pointer
840  * @reg: register offset in bytes
841  * @val: register value
842  *
843  * Returns 0 for success or -EINVAL for an invalid register
844  *
845  */
846 int cayman_get_allowed_info_register(struct radeon_device *rdev,
847                                      u32 reg, u32 *val)
848 {
849         switch (reg) {
850         case GRBM_STATUS:
851         case GRBM_STATUS_SE0:
852         case GRBM_STATUS_SE1:
853         case SRBM_STATUS:
854         case SRBM_STATUS2:
855         case (DMA_STATUS_REG + DMA0_REGISTER_OFFSET):
856         case (DMA_STATUS_REG + DMA1_REGISTER_OFFSET):
857         case UVD_STATUS:
858                 *val = RREG32(reg);
859                 return 0;
860         default:
861                 return -EINVAL;
862         }
863 }
864
865 int tn_get_temp(struct radeon_device *rdev)
866 {
867         u32 temp = RREG32_SMC(TN_CURRENT_GNB_TEMP) & 0x7ff;
868         int actual_temp = (temp / 8) - 49;
869
870         return actual_temp * 1000;
871 }
872
873 /*
874  * Core functions
875  */
876 static void cayman_gpu_init(struct radeon_device *rdev)
877 {
878         u32 gb_addr_config = 0;
879         u32 mc_shared_chmap, mc_arb_ramcfg;
880         u32 cgts_tcc_disable;
881         u32 sx_debug_1;
882         u32 smx_dc_ctl0;
883         u32 cgts_sm_ctrl_reg;
884         u32 hdp_host_path_cntl;
885         u32 tmp;
886         u32 disabled_rb_mask;
887         int i, j;
888
889         switch (rdev->family) {
890         case CHIP_CAYMAN:
891                 rdev->config.cayman.max_shader_engines = 2;
892                 rdev->config.cayman.max_pipes_per_simd = 4;
893                 rdev->config.cayman.max_tile_pipes = 8;
894                 rdev->config.cayman.max_simds_per_se = 12;
895                 rdev->config.cayman.max_backends_per_se = 4;
896                 rdev->config.cayman.max_texture_channel_caches = 8;
897                 rdev->config.cayman.max_gprs = 256;
898                 rdev->config.cayman.max_threads = 256;
899                 rdev->config.cayman.max_gs_threads = 32;
900                 rdev->config.cayman.max_stack_entries = 512;
901                 rdev->config.cayman.sx_num_of_sets = 8;
902                 rdev->config.cayman.sx_max_export_size = 256;
903                 rdev->config.cayman.sx_max_export_pos_size = 64;
904                 rdev->config.cayman.sx_max_export_smx_size = 192;
905                 rdev->config.cayman.max_hw_contexts = 8;
906                 rdev->config.cayman.sq_num_cf_insts = 2;
907
908                 rdev->config.cayman.sc_prim_fifo_size = 0x100;
909                 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
910                 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
911                 gb_addr_config = CAYMAN_GB_ADDR_CONFIG_GOLDEN;
912                 break;
913         case CHIP_ARUBA:
914         default:
915                 rdev->config.cayman.max_shader_engines = 1;
916                 rdev->config.cayman.max_pipes_per_simd = 4;
917                 rdev->config.cayman.max_tile_pipes = 2;
918                 if ((rdev->pdev->device == 0x9900) ||
919                     (rdev->pdev->device == 0x9901) ||
920                     (rdev->pdev->device == 0x9905) ||
921                     (rdev->pdev->device == 0x9906) ||
922                     (rdev->pdev->device == 0x9907) ||
923                     (rdev->pdev->device == 0x9908) ||
924                     (rdev->pdev->device == 0x9909) ||
925                     (rdev->pdev->device == 0x990B) ||
926                     (rdev->pdev->device == 0x990C) ||
927                     (rdev->pdev->device == 0x990F) ||
928                     (rdev->pdev->device == 0x9910) ||
929                     (rdev->pdev->device == 0x9917) ||
930                     (rdev->pdev->device == 0x9999) ||
931                     (rdev->pdev->device == 0x999C)) {
932                         rdev->config.cayman.max_simds_per_se = 6;
933                         rdev->config.cayman.max_backends_per_se = 2;
934                         rdev->config.cayman.max_hw_contexts = 8;
935                         rdev->config.cayman.sx_max_export_size = 256;
936                         rdev->config.cayman.sx_max_export_pos_size = 64;
937                         rdev->config.cayman.sx_max_export_smx_size = 192;
938                 } else if ((rdev->pdev->device == 0x9903) ||
939                            (rdev->pdev->device == 0x9904) ||
940                            (rdev->pdev->device == 0x990A) ||
941                            (rdev->pdev->device == 0x990D) ||
942                            (rdev->pdev->device == 0x990E) ||
943                            (rdev->pdev->device == 0x9913) ||
944                            (rdev->pdev->device == 0x9918) ||
945                            (rdev->pdev->device == 0x999D)) {
946                         rdev->config.cayman.max_simds_per_se = 4;
947                         rdev->config.cayman.max_backends_per_se = 2;
948                         rdev->config.cayman.max_hw_contexts = 8;
949                         rdev->config.cayman.sx_max_export_size = 256;
950                         rdev->config.cayman.sx_max_export_pos_size = 64;
951                         rdev->config.cayman.sx_max_export_smx_size = 192;
952                 } else if ((rdev->pdev->device == 0x9919) ||
953                            (rdev->pdev->device == 0x9990) ||
954                            (rdev->pdev->device == 0x9991) ||
955                            (rdev->pdev->device == 0x9994) ||
956                            (rdev->pdev->device == 0x9995) ||
957                            (rdev->pdev->device == 0x9996) ||
958                            (rdev->pdev->device == 0x999A) ||
959                            (rdev->pdev->device == 0x99A0)) {
960                         rdev->config.cayman.max_simds_per_se = 3;
961                         rdev->config.cayman.max_backends_per_se = 1;
962                         rdev->config.cayman.max_hw_contexts = 4;
963                         rdev->config.cayman.sx_max_export_size = 128;
964                         rdev->config.cayman.sx_max_export_pos_size = 32;
965                         rdev->config.cayman.sx_max_export_smx_size = 96;
966                 } else {
967                         rdev->config.cayman.max_simds_per_se = 2;
968                         rdev->config.cayman.max_backends_per_se = 1;
969                         rdev->config.cayman.max_hw_contexts = 4;
970                         rdev->config.cayman.sx_max_export_size = 128;
971                         rdev->config.cayman.sx_max_export_pos_size = 32;
972                         rdev->config.cayman.sx_max_export_smx_size = 96;
973                 }
974                 rdev->config.cayman.max_texture_channel_caches = 2;
975                 rdev->config.cayman.max_gprs = 256;
976                 rdev->config.cayman.max_threads = 256;
977                 rdev->config.cayman.max_gs_threads = 32;
978                 rdev->config.cayman.max_stack_entries = 512;
979                 rdev->config.cayman.sx_num_of_sets = 8;
980                 rdev->config.cayman.sq_num_cf_insts = 2;
981
982                 rdev->config.cayman.sc_prim_fifo_size = 0x40;
983                 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
984                 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
985                 gb_addr_config = ARUBA_GB_ADDR_CONFIG_GOLDEN;
986                 break;
987         }
988
989         /* Initialize HDP */
990         for (i = 0, j = 0; i < 32; i++, j += 0x18) {
991                 WREG32((0x2c14 + j), 0x00000000);
992                 WREG32((0x2c18 + j), 0x00000000);
993                 WREG32((0x2c1c + j), 0x00000000);
994                 WREG32((0x2c20 + j), 0x00000000);
995                 WREG32((0x2c24 + j), 0x00000000);
996         }
997
998         WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
999         WREG32(SRBM_INT_CNTL, 0x1);
1000         WREG32(SRBM_INT_ACK, 0x1);
1001
1002         evergreen_fix_pci_max_read_req_size(rdev);
1003
1004         mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
1005         mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
1006
1007         tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
1008         rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
1009         if (rdev->config.cayman.mem_row_size_in_kb > 4)
1010                 rdev->config.cayman.mem_row_size_in_kb = 4;
1011         /* XXX use MC settings? */
1012         rdev->config.cayman.shader_engine_tile_size = 32;
1013         rdev->config.cayman.num_gpus = 1;
1014         rdev->config.cayman.multi_gpu_tile_size = 64;
1015
1016         tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
1017         rdev->config.cayman.num_tile_pipes = (1 << tmp);
1018         tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
1019         rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
1020         tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT;
1021         rdev->config.cayman.num_shader_engines = tmp + 1;
1022         tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT;
1023         rdev->config.cayman.num_gpus = tmp + 1;
1024         tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT;
1025         rdev->config.cayman.multi_gpu_tile_size = 1 << tmp;
1026         tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
1027         rdev->config.cayman.mem_row_size_in_kb = 1 << tmp;
1028
1029
1030         /* setup tiling info dword.  gb_addr_config is not adequate since it does
1031          * not have bank info, so create a custom tiling dword.
1032          * bits 3:0   num_pipes
1033          * bits 7:4   num_banks
1034          * bits 11:8  group_size
1035          * bits 15:12 row_size
1036          */
1037         rdev->config.cayman.tile_config = 0;
1038         switch (rdev->config.cayman.num_tile_pipes) {
1039         case 1:
1040         default:
1041                 rdev->config.cayman.tile_config |= (0 << 0);
1042                 break;
1043         case 2:
1044                 rdev->config.cayman.tile_config |= (1 << 0);
1045                 break;
1046         case 4:
1047                 rdev->config.cayman.tile_config |= (2 << 0);
1048                 break;
1049         case 8:
1050                 rdev->config.cayman.tile_config |= (3 << 0);
1051                 break;
1052         }
1053
1054         /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
1055         if (rdev->flags & RADEON_IS_IGP)
1056                 rdev->config.cayman.tile_config |= 1 << 4;
1057         else {
1058                 switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
1059                 case 0: /* four banks */
1060                         rdev->config.cayman.tile_config |= 0 << 4;
1061                         break;
1062                 case 1: /* eight banks */
1063                         rdev->config.cayman.tile_config |= 1 << 4;
1064                         break;
1065                 case 2: /* sixteen banks */
1066                 default:
1067                         rdev->config.cayman.tile_config |= 2 << 4;
1068                         break;
1069                 }
1070         }
1071         rdev->config.cayman.tile_config |=
1072                 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
1073         rdev->config.cayman.tile_config |=
1074                 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
1075
1076         tmp = 0;
1077         for (i = (rdev->config.cayman.max_shader_engines - 1); i >= 0; i--) {
1078                 u32 rb_disable_bitmap;
1079
1080                 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1081                 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1082                 rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
1083                 tmp <<= 4;
1084                 tmp |= rb_disable_bitmap;
1085         }
1086         /* enabled rb are just the one not disabled :) */
1087         disabled_rb_mask = tmp;
1088         tmp = 0;
1089         for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++)
1090                 tmp |= (1 << i);
1091         /* if all the backends are disabled, fix it up here */
1092         if ((disabled_rb_mask & tmp) == tmp) {
1093                 for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++)
1094                         disabled_rb_mask &= ~(1 << i);
1095         }
1096
1097         for (i = 0; i < rdev->config.cayman.max_shader_engines; i++) {
1098                 u32 simd_disable_bitmap;
1099
1100                 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1101                 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1102                 simd_disable_bitmap = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16;
1103                 simd_disable_bitmap |= 0xffffffff << rdev->config.cayman.max_simds_per_se;
1104                 tmp <<= 16;
1105                 tmp |= simd_disable_bitmap;
1106         }
1107         rdev->config.cayman.active_simds = hweight32(~tmp);
1108
1109         WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
1110         WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
1111
1112         WREG32(GB_ADDR_CONFIG, gb_addr_config);
1113         WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
1114         if (ASIC_IS_DCE6(rdev))
1115                 WREG32(DMIF_ADDR_CALC, gb_addr_config);
1116         WREG32(HDP_ADDR_CONFIG, gb_addr_config);
1117         WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
1118         WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
1119         WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
1120         WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
1121         WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
1122
1123         if ((rdev->config.cayman.max_backends_per_se == 1) &&
1124             (rdev->flags & RADEON_IS_IGP)) {
1125                 if ((disabled_rb_mask & 3) == 2) {
1126                         /* RB1 disabled, RB0 enabled */
1127                         tmp = 0x00000000;
1128                 } else {
1129                         /* RB0 disabled, RB1 enabled */
1130                         tmp = 0x11111111;
1131                 }
1132         } else {
1133                 tmp = gb_addr_config & NUM_PIPES_MASK;
1134                 tmp = r6xx_remap_render_backend(rdev, tmp,
1135                                                 rdev->config.cayman.max_backends_per_se *
1136                                                 rdev->config.cayman.max_shader_engines,
1137                                                 CAYMAN_MAX_BACKENDS, disabled_rb_mask);
1138         }
1139         WREG32(GB_BACKEND_MAP, tmp);
1140
1141         cgts_tcc_disable = 0xffff0000;
1142         for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
1143                 cgts_tcc_disable &= ~(1 << (16 + i));
1144         WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
1145         WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable);
1146         WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable);
1147         WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
1148
1149         /* reprogram the shader complex */
1150         cgts_sm_ctrl_reg = RREG32(CGTS_SM_CTRL_REG);
1151         for (i = 0; i < 16; i++)
1152                 WREG32(CGTS_SM_CTRL_REG, OVERRIDE);
1153         WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg);
1154
1155         /* set HW defaults for 3D engine */
1156         WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
1157
1158         sx_debug_1 = RREG32(SX_DEBUG_1);
1159         sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
1160         WREG32(SX_DEBUG_1, sx_debug_1);
1161
1162         smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
1163         smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
1164         smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets);
1165         WREG32(SMX_DC_CTL0, smx_dc_ctl0);
1166
1167         WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE);
1168
1169         /* need to be explicitly zero-ed */
1170         WREG32(VGT_OFFCHIP_LDS_BASE, 0);
1171         WREG32(SQ_LSTMP_RING_BASE, 0);
1172         WREG32(SQ_HSTMP_RING_BASE, 0);
1173         WREG32(SQ_ESTMP_RING_BASE, 0);
1174         WREG32(SQ_GSTMP_RING_BASE, 0);
1175         WREG32(SQ_VSTMP_RING_BASE, 0);
1176         WREG32(SQ_PSTMP_RING_BASE, 0);
1177
1178         WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO);
1179
1180         WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) |
1181                                         POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) |
1182                                         SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1)));
1183
1184         WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) |
1185                                  SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) |
1186                                  SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size)));
1187
1188
1189         WREG32(VGT_NUM_INSTANCES, 1);
1190
1191         WREG32(CP_PERFMON_CNTL, 0);
1192
1193         WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) |
1194                                   FETCH_FIFO_HIWATER(0x4) |
1195                                   DONE_FIFO_HIWATER(0xe0) |
1196                                   ALU_UPDATE_FIFO_HIWATER(0x8)));
1197
1198         WREG32(SQ_GPR_RESOURCE_MGMT_1, NUM_CLAUSE_TEMP_GPRS(4));
1199         WREG32(SQ_CONFIG, (VC_ENABLE |
1200                            EXPORT_SRC_C |
1201                            GFX_PRIO(0) |
1202                            CS1_PRIO(0) |
1203                            CS2_PRIO(1)));
1204         WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, DYN_GPR_ENABLE);
1205
1206         WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
1207                                           FORCE_EOV_MAX_REZ_CNT(255)));
1208
1209         WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
1210                AUTO_INVLD_EN(ES_AND_GS_AUTO));
1211
1212         WREG32(VGT_GS_VERTEX_REUSE, 16);
1213         WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1214
1215         WREG32(CB_PERF_CTR0_SEL_0, 0);
1216         WREG32(CB_PERF_CTR0_SEL_1, 0);
1217         WREG32(CB_PERF_CTR1_SEL_0, 0);
1218         WREG32(CB_PERF_CTR1_SEL_1, 0);
1219         WREG32(CB_PERF_CTR2_SEL_0, 0);
1220         WREG32(CB_PERF_CTR2_SEL_1, 0);
1221         WREG32(CB_PERF_CTR3_SEL_0, 0);
1222         WREG32(CB_PERF_CTR3_SEL_1, 0);
1223
1224         tmp = RREG32(HDP_MISC_CNTL);
1225         tmp |= HDP_FLUSH_INVALIDATE_CACHE;
1226         WREG32(HDP_MISC_CNTL, tmp);
1227
1228         hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
1229         WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
1230
1231         WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
1232
1233         udelay(50);
1234
1235         /* set clockgating golden values on TN */
1236         if (rdev->family == CHIP_ARUBA) {
1237                 tmp = RREG32_CG(CG_CGTT_LOCAL_0);
1238                 tmp &= ~0x00380000;
1239                 WREG32_CG(CG_CGTT_LOCAL_0, tmp);
1240                 tmp = RREG32_CG(CG_CGTT_LOCAL_1);
1241                 tmp &= ~0x0e000000;
1242                 WREG32_CG(CG_CGTT_LOCAL_1, tmp);
1243         }
1244 }
1245
1246 /*
1247  * GART
1248  */
1249 void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev)
1250 {
1251         /* flush hdp cache */
1252         WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1253
1254         /* bits 0-7 are the VM contexts0-7 */
1255         WREG32(VM_INVALIDATE_REQUEST, 1);
1256 }
1257
1258 static int cayman_pcie_gart_enable(struct radeon_device *rdev)
1259 {
1260         int i, r;
1261
1262         if (rdev->gart.robj == NULL) {
1263                 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
1264                 return -EINVAL;
1265         }
1266         r = radeon_gart_table_vram_pin(rdev);
1267         if (r)
1268                 return r;
1269         /* Setup TLB control */
1270         WREG32(MC_VM_MX_L1_TLB_CNTL,
1271                (0xA << 7) |
1272                ENABLE_L1_TLB |
1273                ENABLE_L1_FRAGMENT_PROCESSING |
1274                SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1275                ENABLE_ADVANCED_DRIVER_MODEL |
1276                SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
1277         /* Setup L2 cache */
1278         WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
1279                ENABLE_L2_FRAGMENT_PROCESSING |
1280                ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1281                ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
1282                EFFECTIVE_L2_QUEUE_SIZE(7) |
1283                CONTEXT1_IDENTITY_ACCESS_MODE(1));
1284         WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
1285         WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
1286                BANK_SELECT(6) |
1287                L2_CACHE_BIGK_FRAGMENT_SIZE(6));
1288         /* setup context0 */
1289         WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
1290         WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
1291         WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
1292         WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
1293                         (u32)(rdev->dummy_page.addr >> 12));
1294         WREG32(VM_CONTEXT0_CNTL2, 0);
1295         WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
1296                                 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
1297
1298         WREG32(0x15D4, 0);
1299         WREG32(0x15D8, 0);
1300         WREG32(0x15DC, 0);
1301
1302         /* empty context1-7 */
1303         /* Assign the pt base to something valid for now; the pts used for
1304          * the VMs are determined by the application and setup and assigned
1305          * on the fly in the vm part of radeon_gart.c
1306          */
1307         for (i = 1; i < 8; i++) {
1308                 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
1309                 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2),
1310                         rdev->vm_manager.max_pfn - 1);
1311                 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
1312                        rdev->vm_manager.saved_table_addr[i]);
1313         }
1314
1315         /* enable context1-7 */
1316         WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
1317                (u32)(rdev->dummy_page.addr >> 12));
1318         WREG32(VM_CONTEXT1_CNTL2, 4);
1319         WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
1320                                 PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) |
1321                                 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
1322                                 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
1323                                 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
1324                                 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
1325                                 PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
1326                                 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
1327                                 VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
1328                                 VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
1329                                 READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
1330                                 READ_PROTECTION_FAULT_ENABLE_DEFAULT |
1331                                 WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
1332                                 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
1333
1334         cayman_pcie_gart_tlb_flush(rdev);
1335         DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1336                  (unsigned)(rdev->mc.gtt_size >> 20),
1337                  (unsigned long long)rdev->gart.table_addr);
1338         rdev->gart.ready = true;
1339         return 0;
1340 }
1341
1342 static void cayman_pcie_gart_disable(struct radeon_device *rdev)
1343 {
1344         unsigned i;
1345
1346         for (i = 1; i < 8; ++i) {
1347                 rdev->vm_manager.saved_table_addr[i] = RREG32(
1348                         VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2));
1349         }
1350
1351         /* Disable all tables */
1352         WREG32(VM_CONTEXT0_CNTL, 0);
1353         WREG32(VM_CONTEXT1_CNTL, 0);
1354         /* Setup TLB control */
1355         WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING |
1356                SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1357                SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
1358         /* Setup L2 cache */
1359         WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1360                ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
1361                EFFECTIVE_L2_QUEUE_SIZE(7) |
1362                CONTEXT1_IDENTITY_ACCESS_MODE(1));
1363         WREG32(VM_L2_CNTL2, 0);
1364         WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
1365                L2_CACHE_BIGK_FRAGMENT_SIZE(6));
1366         radeon_gart_table_vram_unpin(rdev);
1367 }
1368
1369 static void cayman_pcie_gart_fini(struct radeon_device *rdev)
1370 {
1371         cayman_pcie_gart_disable(rdev);
1372         radeon_gart_table_vram_free(rdev);
1373         radeon_gart_fini(rdev);
1374 }
1375
1376 void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
1377                               int ring, u32 cp_int_cntl)
1378 {
1379         u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
1380
1381         WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
1382         WREG32(CP_INT_CNTL, cp_int_cntl);
1383 }
1384
1385 /*
1386  * CP.
1387  */
1388 void cayman_fence_ring_emit(struct radeon_device *rdev,
1389                             struct radeon_fence *fence)
1390 {
1391         struct radeon_ring *ring = &rdev->ring[fence->ring];
1392         u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
1393         u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
1394                 PACKET3_SH_ACTION_ENA;
1395
1396         /* flush read cache over gart for this vmid */
1397         radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
1398         radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
1399         radeon_ring_write(ring, 0xFFFFFFFF);
1400         radeon_ring_write(ring, 0);
1401         radeon_ring_write(ring, 10); /* poll interval */
1402         /* EVENT_WRITE_EOP - flush caches, send int */
1403         radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
1404         radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
1405         radeon_ring_write(ring, lower_32_bits(addr));
1406         radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
1407         radeon_ring_write(ring, fence->seq);
1408         radeon_ring_write(ring, 0);
1409 }
1410
1411 void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1412 {
1413         struct radeon_ring *ring = &rdev->ring[ib->ring];
1414         unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
1415         u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
1416                 PACKET3_SH_ACTION_ENA;
1417
1418         /* set to DX10/11 mode */
1419         radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
1420         radeon_ring_write(ring, 1);
1421
1422         if (ring->rptr_save_reg) {
1423                 uint32_t next_rptr = ring->wptr + 3 + 4 + 8;
1424                 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1425                 radeon_ring_write(ring, ((ring->rptr_save_reg - 
1426                                           PACKET3_SET_CONFIG_REG_START) >> 2));
1427                 radeon_ring_write(ring, next_rptr);
1428         }
1429
1430         radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
1431         radeon_ring_write(ring,
1432 #ifdef __BIG_ENDIAN
1433                           (2 << 0) |
1434 #endif
1435                           (ib->gpu_addr & 0xFFFFFFFC));
1436         radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
1437         radeon_ring_write(ring, ib->length_dw | (vm_id << 24));
1438
1439         /* flush read cache over gart for this vmid */
1440         radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
1441         radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
1442         radeon_ring_write(ring, 0xFFFFFFFF);
1443         radeon_ring_write(ring, 0);
1444         radeon_ring_write(ring, (vm_id << 24) | 10); /* poll interval */
1445 }
1446
1447 static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
1448 {
1449         if (enable)
1450                 WREG32(CP_ME_CNTL, 0);
1451         else {
1452                 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
1453                         radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1454                 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
1455                 WREG32(SCRATCH_UMSK, 0);
1456                 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1457         }
1458 }
1459
1460 u32 cayman_gfx_get_rptr(struct radeon_device *rdev,
1461                         struct radeon_ring *ring)
1462 {
1463         u32 rptr;
1464
1465         if (rdev->wb.enabled)
1466                 rptr = rdev->wb.wb[ring->rptr_offs/4];
1467         else {
1468                 if (ring->idx == RADEON_RING_TYPE_GFX_INDEX)
1469                         rptr = RREG32(CP_RB0_RPTR);
1470                 else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX)
1471                         rptr = RREG32(CP_RB1_RPTR);
1472                 else
1473                         rptr = RREG32(CP_RB2_RPTR);
1474         }
1475
1476         return rptr;
1477 }
1478
1479 u32 cayman_gfx_get_wptr(struct radeon_device *rdev,
1480                         struct radeon_ring *ring)
1481 {
1482         u32 wptr;
1483
1484         if (ring->idx == RADEON_RING_TYPE_GFX_INDEX)
1485                 wptr = RREG32(CP_RB0_WPTR);
1486         else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX)
1487                 wptr = RREG32(CP_RB1_WPTR);
1488         else
1489                 wptr = RREG32(CP_RB2_WPTR);
1490
1491         return wptr;
1492 }
1493
1494 void cayman_gfx_set_wptr(struct radeon_device *rdev,
1495                          struct radeon_ring *ring)
1496 {
1497         if (ring->idx == RADEON_RING_TYPE_GFX_INDEX) {
1498                 WREG32(CP_RB0_WPTR, ring->wptr);
1499                 (void)RREG32(CP_RB0_WPTR);
1500         } else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX) {
1501                 WREG32(CP_RB1_WPTR, ring->wptr);
1502                 (void)RREG32(CP_RB1_WPTR);
1503         } else {
1504                 WREG32(CP_RB2_WPTR, ring->wptr);
1505                 (void)RREG32(CP_RB2_WPTR);
1506         }
1507 }
1508
1509 static int cayman_cp_load_microcode(struct radeon_device *rdev)
1510 {
1511         const __be32 *fw_data;
1512         int i;
1513
1514         if (!rdev->me_fw || !rdev->pfp_fw)
1515                 return -EINVAL;
1516
1517         cayman_cp_enable(rdev, false);
1518
1519         fw_data = (const __be32 *)rdev->pfp_fw->data;
1520         WREG32(CP_PFP_UCODE_ADDR, 0);
1521         for (i = 0; i < CAYMAN_PFP_UCODE_SIZE; i++)
1522                 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
1523         WREG32(CP_PFP_UCODE_ADDR, 0);
1524
1525         fw_data = (const __be32 *)rdev->me_fw->data;
1526         WREG32(CP_ME_RAM_WADDR, 0);
1527         for (i = 0; i < CAYMAN_PM4_UCODE_SIZE; i++)
1528                 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
1529
1530         WREG32(CP_PFP_UCODE_ADDR, 0);
1531         WREG32(CP_ME_RAM_WADDR, 0);
1532         WREG32(CP_ME_RAM_RADDR, 0);
1533         return 0;
1534 }
1535
1536 static int cayman_cp_start(struct radeon_device *rdev)
1537 {
1538         struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1539         int r, i;
1540
1541         r = radeon_ring_lock(rdev, ring, 7);
1542         if (r) {
1543                 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1544                 return r;
1545         }
1546         radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
1547         radeon_ring_write(ring, 0x1);
1548         radeon_ring_write(ring, 0x0);
1549         radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1);
1550         radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1551         radeon_ring_write(ring, 0);
1552         radeon_ring_write(ring, 0);
1553         radeon_ring_unlock_commit(rdev, ring, false);
1554
1555         cayman_cp_enable(rdev, true);
1556
1557         r = radeon_ring_lock(rdev, ring, cayman_default_size + 19);
1558         if (r) {
1559                 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1560                 return r;
1561         }
1562
1563         /* setup clear context state */
1564         radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1565         radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1566
1567         for (i = 0; i < cayman_default_size; i++)
1568                 radeon_ring_write(ring, cayman_default_state[i]);
1569
1570         radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1571         radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
1572
1573         /* set clear context state */
1574         radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
1575         radeon_ring_write(ring, 0);
1576
1577         /* SQ_VTX_BASE_VTX_LOC */
1578         radeon_ring_write(ring, 0xc0026f00);
1579         radeon_ring_write(ring, 0x00000000);
1580         radeon_ring_write(ring, 0x00000000);
1581         radeon_ring_write(ring, 0x00000000);
1582
1583         /* Clear consts */
1584         radeon_ring_write(ring, 0xc0036f00);
1585         radeon_ring_write(ring, 0x00000bc4);
1586         radeon_ring_write(ring, 0xffffffff);
1587         radeon_ring_write(ring, 0xffffffff);
1588         radeon_ring_write(ring, 0xffffffff);
1589
1590         radeon_ring_write(ring, 0xc0026900);
1591         radeon_ring_write(ring, 0x00000316);
1592         radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1593         radeon_ring_write(ring, 0x00000010); /*  */
1594
1595         radeon_ring_unlock_commit(rdev, ring, false);
1596
1597         /* XXX init other rings */
1598
1599         return 0;
1600 }
1601
1602 static void cayman_cp_fini(struct radeon_device *rdev)
1603 {
1604         struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1605         cayman_cp_enable(rdev, false);
1606         radeon_ring_fini(rdev, ring);
1607         radeon_scratch_free(rdev, ring->rptr_save_reg);
1608 }
1609
1610 static int cayman_cp_resume(struct radeon_device *rdev)
1611 {
1612         static const int ridx[] = {
1613                 RADEON_RING_TYPE_GFX_INDEX,
1614                 CAYMAN_RING_TYPE_CP1_INDEX,
1615                 CAYMAN_RING_TYPE_CP2_INDEX
1616         };
1617         static const unsigned cp_rb_cntl[] = {
1618                 CP_RB0_CNTL,
1619                 CP_RB1_CNTL,
1620                 CP_RB2_CNTL,
1621         };
1622         static const unsigned cp_rb_rptr_addr[] = {
1623                 CP_RB0_RPTR_ADDR,
1624                 CP_RB1_RPTR_ADDR,
1625                 CP_RB2_RPTR_ADDR
1626         };
1627         static const unsigned cp_rb_rptr_addr_hi[] = {
1628                 CP_RB0_RPTR_ADDR_HI,
1629                 CP_RB1_RPTR_ADDR_HI,
1630                 CP_RB2_RPTR_ADDR_HI
1631         };
1632         static const unsigned cp_rb_base[] = {
1633                 CP_RB0_BASE,
1634                 CP_RB1_BASE,
1635                 CP_RB2_BASE
1636         };
1637         static const unsigned cp_rb_rptr[] = {
1638                 CP_RB0_RPTR,
1639                 CP_RB1_RPTR,
1640                 CP_RB2_RPTR
1641         };
1642         static const unsigned cp_rb_wptr[] = {
1643                 CP_RB0_WPTR,
1644                 CP_RB1_WPTR,
1645                 CP_RB2_WPTR
1646         };
1647         struct radeon_ring *ring;
1648         int i, r;
1649
1650         /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
1651         WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
1652                                  SOFT_RESET_PA |
1653                                  SOFT_RESET_SH |
1654                                  SOFT_RESET_VGT |
1655                                  SOFT_RESET_SPI |
1656                                  SOFT_RESET_SX));
1657         RREG32(GRBM_SOFT_RESET);
1658         mdelay(15);
1659         WREG32(GRBM_SOFT_RESET, 0);
1660         RREG32(GRBM_SOFT_RESET);
1661
1662         WREG32(CP_SEM_WAIT_TIMER, 0x0);
1663         WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
1664
1665         /* Set the write pointer delay */
1666         WREG32(CP_RB_WPTR_DELAY, 0);
1667
1668         WREG32(CP_DEBUG, (1 << 27));
1669
1670         /* set the wb address whether it's enabled or not */
1671         WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
1672         WREG32(SCRATCH_UMSK, 0xff);
1673
1674         for (i = 0; i < 3; ++i) {
1675                 uint32_t rb_cntl;
1676                 uint64_t addr;
1677
1678                 /* Set ring buffer size */
1679                 ring = &rdev->ring[ridx[i]];
1680                 rb_cntl = order_base_2(ring->ring_size / 8);
1681                 rb_cntl |= order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8;
1682 #ifdef __BIG_ENDIAN
1683                 rb_cntl |= BUF_SWAP_32BIT;
1684 #endif
1685                 WREG32(cp_rb_cntl[i], rb_cntl);
1686
1687                 /* set the wb address whether it's enabled or not */
1688                 addr = rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET;
1689                 WREG32(cp_rb_rptr_addr[i], addr & 0xFFFFFFFC);
1690                 WREG32(cp_rb_rptr_addr_hi[i], upper_32_bits(addr) & 0xFF);
1691         }
1692
1693         /* set the rb base addr, this causes an internal reset of ALL rings */
1694         for (i = 0; i < 3; ++i) {
1695                 ring = &rdev->ring[ridx[i]];
1696                 WREG32(cp_rb_base[i], ring->gpu_addr >> 8);
1697         }
1698
1699         for (i = 0; i < 3; ++i) {
1700                 /* Initialize the ring buffer's read and write pointers */
1701                 ring = &rdev->ring[ridx[i]];
1702                 WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA);
1703
1704                 ring->wptr = 0;
1705                 WREG32(cp_rb_rptr[i], 0);
1706                 WREG32(cp_rb_wptr[i], ring->wptr);
1707
1708                 mdelay(1);
1709                 WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA);
1710         }
1711
1712         /* start the rings */
1713         cayman_cp_start(rdev);
1714         rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
1715         rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
1716         rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
1717         /* this only test cp0 */
1718         r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1719         if (r) {
1720                 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1721                 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
1722                 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
1723                 return r;
1724         }
1725
1726         if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
1727                 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
1728
1729         return 0;
1730 }
1731
1732 u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
1733 {
1734         u32 reset_mask = 0;
1735         u32 tmp;
1736
1737         /* GRBM_STATUS */
1738         tmp = RREG32(GRBM_STATUS);
1739         if (tmp & (PA_BUSY | SC_BUSY |
1740                    SH_BUSY | SX_BUSY |
1741                    TA_BUSY | VGT_BUSY |
1742                    DB_BUSY | CB_BUSY |
1743                    GDS_BUSY | SPI_BUSY |
1744                    IA_BUSY | IA_BUSY_NO_DMA))
1745                 reset_mask |= RADEON_RESET_GFX;
1746
1747         if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
1748                    CP_BUSY | CP_COHERENCY_BUSY))
1749                 reset_mask |= RADEON_RESET_CP;
1750
1751         if (tmp & GRBM_EE_BUSY)
1752                 reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
1753
1754         /* DMA_STATUS_REG 0 */
1755         tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
1756         if (!(tmp & DMA_IDLE))
1757                 reset_mask |= RADEON_RESET_DMA;
1758
1759         /* DMA_STATUS_REG 1 */
1760         tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
1761         if (!(tmp & DMA_IDLE))
1762                 reset_mask |= RADEON_RESET_DMA1;
1763
1764         /* SRBM_STATUS2 */
1765         tmp = RREG32(SRBM_STATUS2);
1766         if (tmp & DMA_BUSY)
1767                 reset_mask |= RADEON_RESET_DMA;
1768
1769         if (tmp & DMA1_BUSY)
1770                 reset_mask |= RADEON_RESET_DMA1;
1771
1772         /* SRBM_STATUS */
1773         tmp = RREG32(SRBM_STATUS);
1774         if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
1775                 reset_mask |= RADEON_RESET_RLC;
1776
1777         if (tmp & IH_BUSY)
1778                 reset_mask |= RADEON_RESET_IH;
1779
1780         if (tmp & SEM_BUSY)
1781                 reset_mask |= RADEON_RESET_SEM;
1782
1783         if (tmp & GRBM_RQ_PENDING)
1784                 reset_mask |= RADEON_RESET_GRBM;
1785
1786         if (tmp & VMC_BUSY)
1787                 reset_mask |= RADEON_RESET_VMC;
1788
1789         if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
1790                    MCC_BUSY | MCD_BUSY))
1791                 reset_mask |= RADEON_RESET_MC;
1792
1793         if (evergreen_is_display_hung(rdev))
1794                 reset_mask |= RADEON_RESET_DISPLAY;
1795
1796         /* VM_L2_STATUS */
1797         tmp = RREG32(VM_L2_STATUS);
1798         if (tmp & L2_BUSY)
1799                 reset_mask |= RADEON_RESET_VMC;
1800
1801         /* Skip MC reset as it's mostly likely not hung, just busy */
1802         if (reset_mask & RADEON_RESET_MC) {
1803                 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
1804                 reset_mask &= ~RADEON_RESET_MC;
1805         }
1806
1807         return reset_mask;
1808 }
1809
1810 static void cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
1811 {
1812         struct evergreen_mc_save save;
1813         u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
1814         u32 tmp;
1815
1816         if (reset_mask == 0)
1817                 return;
1818
1819         dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
1820
1821         evergreen_print_gpu_status_regs(rdev);
1822         dev_info(rdev->dev, "  VM_CONTEXT0_PROTECTION_FAULT_ADDR   0x%08X\n",
1823                  RREG32(0x14F8));
1824         dev_info(rdev->dev, "  VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
1825                  RREG32(0x14D8));
1826         dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
1827                  RREG32(0x14FC));
1828         dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1829                  RREG32(0x14DC));
1830
1831         /* Disable CP parsing/prefetching */
1832         WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
1833
1834         if (reset_mask & RADEON_RESET_DMA) {
1835                 /* dma0 */
1836                 tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
1837                 tmp &= ~DMA_RB_ENABLE;
1838                 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
1839         }
1840
1841         if (reset_mask & RADEON_RESET_DMA1) {
1842                 /* dma1 */
1843                 tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
1844                 tmp &= ~DMA_RB_ENABLE;
1845                 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
1846         }
1847
1848         udelay(50);
1849
1850         evergreen_mc_stop(rdev, &save);
1851         if (evergreen_mc_wait_for_idle(rdev)) {
1852                 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1853         }
1854
1855         if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
1856                 grbm_soft_reset = SOFT_RESET_CB |
1857                         SOFT_RESET_DB |
1858                         SOFT_RESET_GDS |
1859                         SOFT_RESET_PA |
1860                         SOFT_RESET_SC |
1861                         SOFT_RESET_SPI |
1862                         SOFT_RESET_SH |
1863                         SOFT_RESET_SX |
1864                         SOFT_RESET_TC |
1865                         SOFT_RESET_TA |
1866                         SOFT_RESET_VGT |
1867                         SOFT_RESET_IA;
1868         }
1869
1870         if (reset_mask & RADEON_RESET_CP) {
1871                 grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
1872
1873                 srbm_soft_reset |= SOFT_RESET_GRBM;
1874         }
1875
1876         if (reset_mask & RADEON_RESET_DMA)
1877                 srbm_soft_reset |= SOFT_RESET_DMA;
1878
1879         if (reset_mask & RADEON_RESET_DMA1)
1880                 srbm_soft_reset |= SOFT_RESET_DMA1;
1881
1882         if (reset_mask & RADEON_RESET_DISPLAY)
1883                 srbm_soft_reset |= SOFT_RESET_DC;
1884
1885         if (reset_mask & RADEON_RESET_RLC)
1886                 srbm_soft_reset |= SOFT_RESET_RLC;
1887
1888         if (reset_mask & RADEON_RESET_SEM)
1889                 srbm_soft_reset |= SOFT_RESET_SEM;
1890
1891         if (reset_mask & RADEON_RESET_IH)
1892                 srbm_soft_reset |= SOFT_RESET_IH;
1893
1894         if (reset_mask & RADEON_RESET_GRBM)
1895                 srbm_soft_reset |= SOFT_RESET_GRBM;
1896
1897         if (reset_mask & RADEON_RESET_VMC)
1898                 srbm_soft_reset |= SOFT_RESET_VMC;
1899
1900         if (!(rdev->flags & RADEON_IS_IGP)) {
1901                 if (reset_mask & RADEON_RESET_MC)
1902                         srbm_soft_reset |= SOFT_RESET_MC;
1903         }
1904
1905         if (grbm_soft_reset) {
1906                 tmp = RREG32(GRBM_SOFT_RESET);
1907                 tmp |= grbm_soft_reset;
1908                 dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
1909                 WREG32(GRBM_SOFT_RESET, tmp);
1910                 tmp = RREG32(GRBM_SOFT_RESET);
1911
1912                 udelay(50);
1913
1914                 tmp &= ~grbm_soft_reset;
1915                 WREG32(GRBM_SOFT_RESET, tmp);
1916                 tmp = RREG32(GRBM_SOFT_RESET);
1917         }
1918
1919         if (srbm_soft_reset) {
1920                 tmp = RREG32(SRBM_SOFT_RESET);
1921                 tmp |= srbm_soft_reset;
1922                 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1923                 WREG32(SRBM_SOFT_RESET, tmp);
1924                 tmp = RREG32(SRBM_SOFT_RESET);
1925
1926                 udelay(50);
1927
1928                 tmp &= ~srbm_soft_reset;
1929                 WREG32(SRBM_SOFT_RESET, tmp);
1930                 tmp = RREG32(SRBM_SOFT_RESET);
1931         }
1932
1933         /* Wait a little for things to settle down */
1934         udelay(50);
1935
1936         evergreen_mc_resume(rdev, &save);
1937         udelay(50);
1938
1939         evergreen_print_gpu_status_regs(rdev);
1940 }
1941
1942 int cayman_asic_reset(struct radeon_device *rdev, bool hard)
1943 {
1944         u32 reset_mask;
1945
1946         if (hard) {
1947                 evergreen_gpu_pci_config_reset(rdev);
1948                 return 0;
1949         }
1950
1951         reset_mask = cayman_gpu_check_soft_reset(rdev);
1952
1953         if (reset_mask)
1954                 r600_set_bios_scratch_engine_hung(rdev, true);
1955
1956         cayman_gpu_soft_reset(rdev, reset_mask);
1957
1958         reset_mask = cayman_gpu_check_soft_reset(rdev);
1959
1960         if (reset_mask)
1961                 evergreen_gpu_pci_config_reset(rdev);
1962
1963         r600_set_bios_scratch_engine_hung(rdev, false);
1964
1965         return 0;
1966 }
1967
1968 /**
1969  * cayman_gfx_is_lockup - Check if the GFX engine is locked up
1970  *
1971  * @rdev: radeon_device pointer
1972  * @ring: radeon_ring structure holding ring information
1973  *
1974  * Check if the GFX engine is locked up.
1975  * Returns true if the engine appears to be locked up, false if not.
1976  */
1977 bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1978 {
1979         u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
1980
1981         if (!(reset_mask & (RADEON_RESET_GFX |
1982                             RADEON_RESET_COMPUTE |
1983                             RADEON_RESET_CP))) {
1984                 radeon_ring_lockup_update(rdev, ring);
1985                 return false;
1986         }
1987         return radeon_ring_test_lockup(rdev, ring);
1988 }
1989
1990 static void cayman_uvd_init(struct radeon_device *rdev)
1991 {
1992         int r;
1993
1994         if (!rdev->has_uvd)
1995                 return;
1996
1997         r = radeon_uvd_init(rdev);
1998         if (r) {
1999                 dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
2000                 /*
2001                  * At this point rdev->uvd.vcpu_bo is NULL which trickles down
2002                  * to early fails uvd_v2_2_resume() and thus nothing happens
2003                  * there. So it is pointless to try to go through that code
2004                  * hence why we disable uvd here.
2005                  */
2006                 rdev->has_uvd = 0;
2007                 return;
2008         }
2009         rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
2010         r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
2011 }
2012
2013 static void cayman_uvd_start(struct radeon_device *rdev)
2014 {
2015         int r;
2016
2017         if (!rdev->has_uvd)
2018                 return;
2019
2020         r = uvd_v2_2_resume(rdev);
2021         if (r) {
2022                 dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
2023                 goto error;
2024         }
2025         r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
2026         if (r) {
2027                 dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
2028                 goto error;
2029         }
2030         return;
2031
2032 error:
2033         rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
2034 }
2035
2036 static void cayman_uvd_resume(struct radeon_device *rdev)
2037 {
2038         struct radeon_ring *ring;
2039         int r;
2040
2041         if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
2042                 return;
2043
2044         ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
2045         r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
2046         if (r) {
2047                 dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
2048                 return;
2049         }
2050         r = uvd_v1_0_init(rdev);
2051         if (r) {
2052                 dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
2053                 return;
2054         }
2055 }
2056
2057 static void cayman_vce_init(struct radeon_device *rdev)
2058 {
2059         int r;
2060
2061         /* Only set for CHIP_ARUBA */
2062         if (!rdev->has_vce)
2063                 return;
2064
2065         r = radeon_vce_init(rdev);
2066         if (r) {
2067                 dev_err(rdev->dev, "failed VCE (%d) init.\n", r);
2068                 /*
2069                  * At this point rdev->vce.vcpu_bo is NULL which trickles down
2070                  * to early fails cayman_vce_start() and thus nothing happens
2071                  * there. So it is pointless to try to go through that code
2072                  * hence why we disable vce here.
2073                  */
2074                 rdev->has_vce = 0;
2075                 return;
2076         }
2077         rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
2078         r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE1_INDEX], 4096);
2079         rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_obj = NULL;
2080         r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE2_INDEX], 4096);
2081 }
2082
2083 static void cayman_vce_start(struct radeon_device *rdev)
2084 {
2085         int r;
2086
2087         if (!rdev->has_vce)
2088                 return;
2089
2090         r = radeon_vce_resume(rdev);
2091         if (r) {
2092                 dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
2093                 goto error;
2094         }
2095         r = vce_v1_0_resume(rdev);
2096         if (r) {
2097                 dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
2098                 goto error;
2099         }
2100         r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE1_INDEX);
2101         if (r) {
2102                 dev_err(rdev->dev, "failed initializing VCE1 fences (%d).\n", r);
2103                 goto error;
2104         }
2105         r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE2_INDEX);
2106         if (r) {
2107                 dev_err(rdev->dev, "failed initializing VCE2 fences (%d).\n", r);
2108                 goto error;
2109         }
2110         return;
2111
2112 error:
2113         rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
2114         rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
2115 }
2116
2117 static void cayman_vce_resume(struct radeon_device *rdev)
2118 {
2119         struct radeon_ring *ring;
2120         int r;
2121
2122         if (!rdev->has_vce || !rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size)
2123                 return;
2124
2125         ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
2126         r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
2127         if (r) {
2128                 dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
2129                 return;
2130         }
2131         ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
2132         r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
2133         if (r) {
2134                 dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
2135                 return;
2136         }
2137         r = vce_v1_0_init(rdev);
2138         if (r) {
2139                 dev_err(rdev->dev, "failed initializing VCE (%d).\n", r);
2140                 return;
2141         }
2142 }
2143
2144 static int cayman_startup(struct radeon_device *rdev)
2145 {
2146         struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2147         int r;
2148
2149         /* enable pcie gen2 link */
2150         evergreen_pcie_gen2_enable(rdev);
2151         /* enable aspm */
2152         evergreen_program_aspm(rdev);
2153
2154         /* scratch needs to be initialized before MC */
2155         r = r600_vram_scratch_init(rdev);
2156         if (r)
2157                 return r;
2158
2159         evergreen_mc_program(rdev);
2160
2161         if (!(rdev->flags & RADEON_IS_IGP) && !rdev->pm.dpm_enabled) {
2162                 r = ni_mc_load_microcode(rdev);
2163                 if (r) {
2164                         DRM_ERROR("Failed to load MC firmware!\n");
2165                         return r;
2166                 }
2167         }
2168
2169         r = cayman_pcie_gart_enable(rdev);
2170         if (r)
2171                 return r;
2172         cayman_gpu_init(rdev);
2173
2174         /* allocate rlc buffers */
2175         if (rdev->flags & RADEON_IS_IGP) {
2176                 rdev->rlc.reg_list = tn_rlc_save_restore_register_list;
2177                 rdev->rlc.reg_list_size =
2178                         (u32)ARRAY_SIZE(tn_rlc_save_restore_register_list);
2179                 rdev->rlc.cs_data = cayman_cs_data;
2180                 r = sumo_rlc_init(rdev);
2181                 if (r) {
2182                         DRM_ERROR("Failed to init rlc BOs!\n");
2183                         return r;
2184                 }
2185         }
2186
2187         /* allocate wb buffer */
2188         r = radeon_wb_init(rdev);
2189         if (r)
2190                 return r;
2191
2192         r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
2193         if (r) {
2194                 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
2195                 return r;
2196         }
2197
2198         cayman_uvd_start(rdev);
2199         cayman_vce_start(rdev);
2200
2201         r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
2202         if (r) {
2203                 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
2204                 return r;
2205         }
2206
2207         r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
2208         if (r) {
2209                 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
2210                 return r;
2211         }
2212
2213         r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
2214         if (r) {
2215                 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
2216                 return r;
2217         }
2218
2219         r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
2220         if (r) {
2221                 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
2222                 return r;
2223         }
2224
2225         /* Enable IRQ */
2226         if (!rdev->irq.installed) {
2227                 r = radeon_irq_kms_init(rdev);
2228                 if (r)
2229                         return r;
2230         }
2231
2232         r = r600_irq_init(rdev);
2233         if (r) {
2234                 DRM_ERROR("radeon: IH init failed (%d).\n", r);
2235                 radeon_irq_kms_fini(rdev);
2236                 return r;
2237         }
2238         evergreen_irq_set(rdev);
2239
2240         r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
2241                              RADEON_CP_PACKET2);
2242         if (r)
2243                 return r;
2244
2245         ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
2246         r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
2247                              DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
2248         if (r)
2249                 return r;
2250
2251         ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
2252         r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
2253                              DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
2254         if (r)
2255                 return r;
2256
2257         r = cayman_cp_load_microcode(rdev);
2258         if (r)
2259                 return r;
2260         r = cayman_cp_resume(rdev);
2261         if (r)
2262                 return r;
2263
2264         r = cayman_dma_resume(rdev);
2265         if (r)
2266                 return r;
2267
2268         cayman_uvd_resume(rdev);
2269         cayman_vce_resume(rdev);
2270
2271         r = radeon_ib_pool_init(rdev);
2272         if (r) {
2273                 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2274                 return r;
2275         }
2276
2277         r = radeon_vm_manager_init(rdev);
2278         if (r) {
2279                 dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
2280                 return r;
2281         }
2282
2283         r = radeon_audio_init(rdev);
2284         if (r)
2285                 return r;
2286
2287         return 0;
2288 }
2289
2290 int cayman_resume(struct radeon_device *rdev)
2291 {
2292         int r;
2293
2294         /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
2295          * posting will perform necessary task to bring back GPU into good
2296          * shape.
2297          */
2298         /* post card */
2299         atom_asic_init(rdev->mode_info.atom_context);
2300
2301         /* init golden registers */
2302         ni_init_golden_registers(rdev);
2303
2304         if (rdev->pm.pm_method == PM_METHOD_DPM)
2305                 radeon_pm_resume(rdev);
2306
2307         rdev->accel_working = true;
2308         r = cayman_startup(rdev);
2309         if (r) {
2310                 DRM_ERROR("cayman startup failed on resume\n");
2311                 rdev->accel_working = false;
2312                 return r;
2313         }
2314         return r;
2315 }
2316
2317 int cayman_suspend(struct radeon_device *rdev)
2318 {
2319         radeon_pm_suspend(rdev);
2320         radeon_audio_fini(rdev);
2321         radeon_vm_manager_fini(rdev);
2322         cayman_cp_enable(rdev, false);
2323         cayman_dma_stop(rdev);
2324         if (rdev->has_uvd) {
2325                 uvd_v1_0_fini(rdev);
2326                 radeon_uvd_suspend(rdev);
2327         }
2328         evergreen_irq_suspend(rdev);
2329         radeon_wb_disable(rdev);
2330         cayman_pcie_gart_disable(rdev);
2331         return 0;
2332 }
2333
2334 /* Plan is to move initialization in that function and use
2335  * helper function so that radeon_device_init pretty much
2336  * do nothing more than calling asic specific function. This
2337  * should also allow to remove a bunch of callback function
2338  * like vram_info.
2339  */
2340 int cayman_init(struct radeon_device *rdev)
2341 {
2342         struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2343         int r;
2344
2345         /* Read BIOS */
2346         if (!radeon_get_bios(rdev)) {
2347                 if (ASIC_IS_AVIVO(rdev))
2348                         return -EINVAL;
2349         }
2350         /* Must be an ATOMBIOS */
2351         if (!rdev->is_atom_bios) {
2352                 dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
2353                 return -EINVAL;
2354         }
2355         r = radeon_atombios_init(rdev);
2356         if (r)
2357                 return r;
2358
2359         /* Post card if necessary */
2360         if (!radeon_card_posted(rdev)) {
2361                 if (!rdev->bios) {
2362                         dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2363                         return -EINVAL;
2364                 }
2365                 DRM_INFO("GPU not posted. posting now...\n");
2366                 atom_asic_init(rdev->mode_info.atom_context);
2367         }
2368         /* init golden registers */
2369         ni_init_golden_registers(rdev);
2370         /* Initialize scratch registers */
2371         r600_scratch_init(rdev);
2372         /* Initialize surface registers */
2373         radeon_surface_init(rdev);
2374         /* Initialize clocks */
2375         radeon_get_clock_info(rdev->ddev);
2376         /* Fence driver */
2377         r = radeon_fence_driver_init(rdev);
2378         if (r)
2379                 return r;
2380         /* initialize memory controller */
2381         r = evergreen_mc_init(rdev);
2382         if (r)
2383                 return r;
2384         /* Memory manager */
2385         r = radeon_bo_init(rdev);
2386         if (r)
2387                 return r;
2388
2389         if (rdev->flags & RADEON_IS_IGP) {
2390                 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2391                         r = ni_init_microcode(rdev);
2392                         if (r) {
2393                                 DRM_ERROR("Failed to load firmware!\n");
2394                                 return r;
2395                         }
2396                 }
2397         } else {
2398                 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
2399                         r = ni_init_microcode(rdev);
2400                         if (r) {
2401                                 DRM_ERROR("Failed to load firmware!\n");
2402                                 return r;
2403                         }
2404                 }
2405         }
2406
2407         /* Initialize power management */
2408         radeon_pm_init(rdev);
2409
2410         ring->ring_obj = NULL;
2411         r600_ring_init(rdev, ring, 1024 * 1024);
2412
2413         ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
2414         ring->ring_obj = NULL;
2415         r600_ring_init(rdev, ring, 64 * 1024);
2416
2417         ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
2418         ring->ring_obj = NULL;
2419         r600_ring_init(rdev, ring, 64 * 1024);
2420
2421         cayman_uvd_init(rdev);
2422         cayman_vce_init(rdev);
2423
2424         rdev->ih.ring_obj = NULL;
2425         r600_ih_ring_init(rdev, 64 * 1024);
2426
2427         r = r600_pcie_gart_init(rdev);
2428         if (r)
2429                 return r;
2430
2431         rdev->accel_working = true;
2432         r = cayman_startup(rdev);
2433         if (r) {
2434                 dev_err(rdev->dev, "disabling GPU acceleration\n");
2435                 cayman_cp_fini(rdev);
2436                 cayman_dma_fini(rdev);
2437                 r600_irq_fini(rdev);
2438                 if (rdev->flags & RADEON_IS_IGP)
2439                         sumo_rlc_fini(rdev);
2440                 radeon_wb_fini(rdev);
2441                 radeon_ib_pool_fini(rdev);
2442                 radeon_vm_manager_fini(rdev);
2443                 radeon_irq_kms_fini(rdev);
2444                 cayman_pcie_gart_fini(rdev);
2445                 rdev->accel_working = false;
2446         }
2447
2448         /* Don't start up if the MC ucode is missing.
2449          * The default clocks and voltages before the MC ucode
2450          * is loaded are not suffient for advanced operations.
2451          *
2452          * We can skip this check for TN, because there is no MC
2453          * ucode.
2454          */
2455         if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
2456                 DRM_ERROR("radeon: MC ucode required for NI+.\n");
2457                 return -EINVAL;
2458         }
2459
2460         return 0;
2461 }
2462
2463 void cayman_fini(struct radeon_device *rdev)
2464 {
2465         radeon_pm_fini(rdev);
2466         cayman_cp_fini(rdev);
2467         cayman_dma_fini(rdev);
2468         r600_irq_fini(rdev);
2469         if (rdev->flags & RADEON_IS_IGP)
2470                 sumo_rlc_fini(rdev);
2471         radeon_wb_fini(rdev);
2472         radeon_vm_manager_fini(rdev);
2473         radeon_ib_pool_fini(rdev);
2474         radeon_irq_kms_fini(rdev);
2475         uvd_v1_0_fini(rdev);
2476         radeon_uvd_fini(rdev);
2477         if (rdev->has_vce)
2478                 radeon_vce_fini(rdev);
2479         cayman_pcie_gart_fini(rdev);
2480         r600_vram_scratch_fini(rdev);
2481         radeon_gem_fini(rdev);
2482         radeon_fence_driver_fini(rdev);
2483         radeon_bo_fini(rdev);
2484         radeon_atombios_fini(rdev);
2485         kfree(rdev->bios);
2486         rdev->bios = NULL;
2487 }
2488
2489 /*
2490  * vm
2491  */
2492 int cayman_vm_init(struct radeon_device *rdev)
2493 {
2494         /* number of VMs */
2495         rdev->vm_manager.nvm = 8;
2496         /* base offset of vram pages */
2497         if (rdev->flags & RADEON_IS_IGP) {
2498                 u64 tmp = RREG32(FUS_MC_VM_FB_OFFSET);
2499                 tmp <<= 22;
2500                 rdev->vm_manager.vram_base_offset = tmp;
2501         } else
2502                 rdev->vm_manager.vram_base_offset = 0;
2503         return 0;
2504 }
2505
2506 void cayman_vm_fini(struct radeon_device *rdev)
2507 {
2508 }
2509
2510 /**
2511  * cayman_vm_decode_fault - print human readable fault info
2512  *
2513  * @rdev: radeon_device pointer
2514  * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
2515  * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
2516  *
2517  * Print human readable fault information (cayman/TN).
2518  */
2519 void cayman_vm_decode_fault(struct radeon_device *rdev,
2520                             u32 status, u32 addr)
2521 {
2522         u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
2523         u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
2524         u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
2525         char *block;
2526
2527         switch (mc_id) {
2528         case 32:
2529         case 16:
2530         case 96:
2531         case 80:
2532         case 160:
2533         case 144:
2534         case 224:
2535         case 208:
2536                 block = "CB";
2537                 break;
2538         case 33:
2539         case 17:
2540         case 97:
2541         case 81:
2542         case 161:
2543         case 145:
2544         case 225:
2545         case 209:
2546                 block = "CB_FMASK";
2547                 break;
2548         case 34:
2549         case 18:
2550         case 98:
2551         case 82:
2552         case 162:
2553         case 146:
2554         case 226:
2555         case 210:
2556                 block = "CB_CMASK";
2557                 break;
2558         case 35:
2559         case 19:
2560         case 99:
2561         case 83:
2562         case 163:
2563         case 147:
2564         case 227:
2565         case 211:
2566                 block = "CB_IMMED";
2567                 break;
2568         case 36:
2569         case 20:
2570         case 100:
2571         case 84:
2572         case 164:
2573         case 148:
2574         case 228:
2575         case 212:
2576                 block = "DB";
2577                 break;
2578         case 37:
2579         case 21:
2580         case 101:
2581         case 85:
2582         case 165:
2583         case 149:
2584         case 229:
2585         case 213:
2586                 block = "DB_HTILE";
2587                 break;
2588         case 38:
2589         case 22:
2590         case 102:
2591         case 86:
2592         case 166:
2593         case 150:
2594         case 230:
2595         case 214:
2596                 block = "SX";
2597                 break;
2598         case 39:
2599         case 23:
2600         case 103:
2601         case 87:
2602         case 167:
2603         case 151:
2604         case 231:
2605         case 215:
2606                 block = "DB_STEN";
2607                 break;
2608         case 40:
2609         case 24:
2610         case 104:
2611         case 88:
2612         case 232:
2613         case 216:
2614         case 168:
2615         case 152:
2616                 block = "TC_TFETCH";
2617                 break;
2618         case 41:
2619         case 25:
2620         case 105:
2621         case 89:
2622         case 233:
2623         case 217:
2624         case 169:
2625         case 153:
2626                 block = "TC_VFETCH";
2627                 break;
2628         case 42:
2629         case 26:
2630         case 106:
2631         case 90:
2632         case 234:
2633         case 218:
2634         case 170:
2635         case 154:
2636                 block = "VC";
2637                 break;
2638         case 112:
2639                 block = "CP";
2640                 break;
2641         case 113:
2642         case 114:
2643                 block = "SH";
2644                 break;
2645         case 115:
2646                 block = "VGT";
2647                 break;
2648         case 178:
2649                 block = "IH";
2650                 break;
2651         case 51:
2652                 block = "RLC";
2653                 break;
2654         case 55:
2655                 block = "DMA";
2656                 break;
2657         case 56:
2658                 block = "HDP";
2659                 break;
2660         default:
2661                 block = "unknown";
2662                 break;
2663         }
2664
2665         printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
2666                protections, vmid, addr,
2667                (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
2668                block, mc_id);
2669 }
2670
2671 /**
2672  * cayman_vm_flush - vm flush using the CP
2673  *
2674  * @rdev: radeon_device pointer
2675  *
2676  * Update the page table base and flush the VM TLB
2677  * using the CP (cayman-si).
2678  */
2679 void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
2680                      unsigned vm_id, uint64_t pd_addr)
2681 {
2682         radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2), 0));
2683         radeon_ring_write(ring, pd_addr >> 12);
2684
2685         /* flush hdp cache */
2686         radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
2687         radeon_ring_write(ring, 0x1);
2688
2689         /* bits 0-7 are the VM contexts0-7 */
2690         radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
2691         radeon_ring_write(ring, 1 << vm_id);
2692
2693         /* wait for the invalidate to complete */
2694         radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
2695         radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) |  /* always */
2696                                  WAIT_REG_MEM_ENGINE(0))); /* me */
2697         radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
2698         radeon_ring_write(ring, 0);
2699         radeon_ring_write(ring, 0); /* ref */
2700         radeon_ring_write(ring, 0); /* mask */
2701         radeon_ring_write(ring, 0x20); /* poll interval */
2702
2703         /* sync PFP to ME, otherwise we might get invalid PFP reads */
2704         radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
2705         radeon_ring_write(ring, 0x0);
2706 }
2707
2708 int tn_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk)
2709 {
2710         struct atom_clock_dividers dividers;
2711         int r, i;
2712
2713         r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
2714                                            ecclk, false, &dividers);
2715         if (r)
2716                 return r;
2717
2718         for (i = 0; i < 100; i++) {
2719                 if (RREG32(CG_ECLK_STATUS) & ECLK_STATUS)
2720                         break;
2721                 mdelay(10);
2722         }
2723         if (i == 100)
2724                 return -ETIMEDOUT;
2725
2726         WREG32_P(CG_ECLK_CNTL, dividers.post_div, ~(ECLK_DIR_CNTL_EN|ECLK_DIVIDER_MASK));
2727
2728         for (i = 0; i < 100; i++) {
2729                 if (RREG32(CG_ECLK_STATUS) & ECLK_STATUS)
2730                         break;
2731                 mdelay(10);
2732         }
2733         if (i == 100)
2734                 return -ETIMEDOUT;
2735
2736         return 0;
2737 }