common: Drop linux/delay.h from common header
[oweals/u-boot.git] / drivers / ddr / marvell / a38x / mv_ddr_plat.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) Marvell International Ltd. and its affiliates
4  */
5
6 #include "ddr3_init.h"
7 #include "mv_ddr_training_db.h"
8 #include "mv_ddr_regs.h"
9 #include "mv_ddr_sys_env_lib.h"
10 #include <linux/delay.h>
11
12 #define DDR_INTERFACES_NUM              1
13 #define DDR_INTERFACE_OCTETS_NUM        5
14
15 /*
16  * 1. L2 filter should be set at binary header to 0xD000000,
17  *    to avoid conflict with internal register IO.
18  * 2. U-Boot modifies internal registers base to 0xf100000,
19  *    and than should update L2 filter accordingly to 0xf000000 (3.75 GB)
20  */
21 #define L2_FILTER_FOR_MAX_MEMORY_SIZE   0xC0000000 /* temporary limit l2 filter to 3gb (LSP issue) */
22 #define ADDRESS_FILTERING_END_REGISTER  0x8c04
23
24 #define DYNAMIC_CS_SIZE_CONFIG
25 #define DISABLE_L2_FILTERING_DURING_DDR_TRAINING
26
27 /* Termal Sensor Registers */
28 #define TSEN_CONTROL_LSB_REG            0xE4070
29 #define TSEN_CONTROL_LSB_TC_TRIM_OFFSET 0
30 #define TSEN_CONTROL_LSB_TC_TRIM_MASK   (0x7 << TSEN_CONTROL_LSB_TC_TRIM_OFFSET)
31 #define TSEN_CONTROL_MSB_REG            0xE4074
32 #define TSEN_CONTROL_MSB_RST_OFFSET     8
33 #define TSEN_CONTROL_MSB_RST_MASK       (0x1 << TSEN_CONTROL_MSB_RST_OFFSET)
34 #define TSEN_STATUS_REG                 0xe4078
35 #define TSEN_STATUS_READOUT_VALID_OFFSET        10
36 #define TSEN_STATUS_READOUT_VALID_MASK  (0x1 <<                         \
37                                          TSEN_STATUS_READOUT_VALID_OFFSET)
38 #define TSEN_STATUS_TEMP_OUT_OFFSET     0
39 #define TSEN_STATUS_TEMP_OUT_MASK       (0x3ff << TSEN_STATUS_TEMP_OUT_OFFSET)
40
41 static struct dlb_config ddr3_dlb_config_table[] = {
42         {DLB_CTRL_REG, 0x2000005c},
43         {DLB_BUS_OPT_WT_REG, 0x00880000},
44         {DLB_AGING_REG, 0x0f7f007f},
45         {DLB_EVICTION_CTRL_REG, 0x0000129f},
46         {DLB_EVICTION_TIMERS_REG, 0x00ff0000},
47         {DLB_WTS_DIFF_CS_REG, 0x04030802},
48         {DLB_WTS_DIFF_BG_REG, 0x00000a02},
49         {DLB_WTS_SAME_BG_REG, 0x09000a01},
50         {DLB_WTS_CMDS_REG, 0x00020005},
51         {DLB_WTS_ATTR_PRIO_REG, 0x00060f10},
52         {DLB_QUEUE_MAP_REG, 0x00000543},
53         {DLB_SPLIT_REG, 0x00000000},
54         {DLB_USER_CMD_REG, 0x00000000},
55         {0x0, 0x0}
56 };
57
58 static struct dlb_config *sys_env_dlb_config_ptr_get(void)
59 {
60         return &ddr3_dlb_config_table[0];
61 }
62
63 static u8 a38x_bw_per_freq[MV_DDR_FREQ_LAST] = {
64         0x3,                    /* MV_DDR_FREQ_100 */
65         0x4,                    /* MV_DDR_FREQ_400 */
66         0x4,                    /* MV_DDR_FREQ_533 */
67         0x5,                    /* MV_DDR_FREQ_667 */
68         0x5,                    /* MV_DDR_FREQ_800 */
69         0x5,                    /* MV_DDR_FREQ_933 */
70         0x5,                    /* MV_DDR_FREQ_1066 */
71         0x3,                    /* MV_DDR_FREQ_311 */
72         0x3,                    /* MV_DDR_FREQ_333 */
73         0x4,                    /* MV_DDR_FREQ_467 */
74         0x5,                    /* MV_DDR_FREQ_850 */
75         0x5,                    /* MV_DDR_FREQ_600 */
76         0x3,                    /* MV_DDR_FREQ_300 */
77         0x5,                    /* MV_DDR_FREQ_900 */
78         0x3,                    /* MV_DDR_FREQ_360 */
79         0x5                     /* MV_DDR_FREQ_1000 */
80 };
81
82 static u8 a38x_rate_per_freq[MV_DDR_FREQ_LAST] = {
83         0x1,                    /* MV_DDR_FREQ_100 */
84         0x2,                    /* MV_DDR_FREQ_400 */
85         0x2,                    /* MV_DDR_FREQ_533 */
86         0x2,                    /* MV_DDR_FREQ_667 */
87         0x2,                    /* MV_DDR_FREQ_800 */
88         0x3,                    /* MV_DDR_FREQ_933 */
89         0x3,                    /* MV_DDR_FREQ_1066 */
90         0x1,                    /* MV_DDR_FREQ_311 */
91         0x1,                    /* MV_DDR_FREQ_333 */
92         0x2,                    /* MV_DDR_FREQ_467 */
93         0x2,                    /* MV_DDR_FREQ_850 */
94         0x2,                    /* MV_DDR_FREQ_600 */
95         0x1,                    /* MV_DDR_FREQ_300 */
96         0x2,                    /* MV_DDR_FREQ_900 */
97         0x1,                    /* MV_DDR_FREQ_360 */
98         0x2                     /* MV_DDR_FREQ_1000 */
99 };
100
101 static u16 a38x_vco_freq_per_sar_ref_clk_25_mhz[] = {
102         666,                    /* 0 */
103         1332,
104         800,
105         1600,
106         1066,
107         2132,
108         1200,
109         2400,
110         1332,
111         1332,
112         1500,
113         1500,
114         1600,                   /* 12 */
115         1600,
116         1700,
117         1700,
118         1866,
119         1866,
120         1800,                   /* 18 */
121         2000,
122         2000,
123         4000,
124         2132,
125         2132,
126         2300,
127         2300,
128         2400,
129         2400,
130         2500,
131         2500,
132         800
133 };
134
135 static u16 a38x_vco_freq_per_sar_ref_clk_40_mhz[] = {
136         666,                    /* 0 */
137         1332,
138         800,
139         800,                    /* 0x3 */
140         1066,
141         1066,                   /* 0x5 */
142         1200,
143         2400,
144         1332,
145         1332,
146         1500,                   /* 10 */
147         1600,                   /* 0xB */
148         1600,
149         1600,
150         1700,
151         1560,                   /* 0xF */
152         1866,
153         1866,
154         1800,
155         2000,
156         2000,                   /* 20 */
157         4000,
158         2132,
159         2132,
160         2300,
161         2300,
162         2400,
163         2400,
164         2500,
165         2500,
166         1800                    /* 30 - 0x1E */
167 };
168
169
170 static u32 async_mode_at_tf;
171
172 static u32 dq_bit_map_2_phy_pin[] = {
173         1, 0, 2, 6, 9, 8, 3, 7, /* 0 */
174         8, 9, 1, 7, 2, 6, 3, 0, /* 1 */
175         3, 9, 7, 8, 1, 0, 2, 6, /* 2 */
176         1, 0, 6, 2, 8, 3, 7, 9, /* 3 */
177         0, 1, 2, 9, 7, 8, 3, 6, /* 4 */
178 };
179
180 void mv_ddr_mem_scrubbing(void)
181 {
182         ddr3_new_tip_ecc_scrub();
183 }
184
185 static int ddr3_tip_a38x_set_divider(u8 dev_num, u32 if_id,
186                                      enum mv_ddr_freq freq);
187
188 /*
189  * Read temperature TJ value
190  */
191 static u32 ddr3_ctrl_get_junc_temp(u8 dev_num)
192 {
193         int reg = 0;
194
195         /* Initiates TSEN hardware reset once */
196         if ((reg_read(TSEN_CONTROL_MSB_REG) & TSEN_CONTROL_MSB_RST_MASK) == 0) {
197                 reg_bit_set(TSEN_CONTROL_MSB_REG, TSEN_CONTROL_MSB_RST_MASK);
198                 /* set Tsen Tc Trim to correct default value (errata #132698) */
199                 reg = reg_read(TSEN_CONTROL_LSB_REG);
200                 reg &= ~TSEN_CONTROL_LSB_TC_TRIM_MASK;
201                 reg |= 0x3 << TSEN_CONTROL_LSB_TC_TRIM_OFFSET;
202                 reg_write(TSEN_CONTROL_LSB_REG, reg);
203         }
204         mdelay(10);
205
206         /* Check if the readout field is valid */
207         if ((reg_read(TSEN_STATUS_REG) & TSEN_STATUS_READOUT_VALID_MASK) == 0) {
208                 printf("%s: TSEN not ready\n", __func__);
209                 return 0;
210         }
211
212         reg = reg_read(TSEN_STATUS_REG);
213         reg = (reg & TSEN_STATUS_TEMP_OUT_MASK) >> TSEN_STATUS_TEMP_OUT_OFFSET;
214
215         return ((((10000 * reg) / 21445) * 1000) - 272674) / 1000;
216 }
217
218 /*
219  * Name:     ddr3_tip_a38x_get_freq_config.
220  * Desc:
221  * Args:
222  * Notes:
223  * Returns:  MV_OK if success, other error code if fail.
224  */
225 static int ddr3_tip_a38x_get_freq_config(u8 dev_num, enum mv_ddr_freq freq,
226                                   struct hws_tip_freq_config_info
227                                   *freq_config_info)
228 {
229         if (a38x_bw_per_freq[freq] == 0xff)
230                 return MV_NOT_SUPPORTED;
231
232         if (freq_config_info == NULL)
233                 return MV_BAD_PARAM;
234
235         freq_config_info->bw_per_freq = a38x_bw_per_freq[freq];
236         freq_config_info->rate_per_freq = a38x_rate_per_freq[freq];
237         freq_config_info->is_supported = 1;
238
239         return MV_OK;
240 }
241
242 static void dunit_read(u32 addr, u32 mask, u32 *data)
243 {
244         *data = reg_read(addr) & mask;
245 }
246
247 static void dunit_write(u32 addr, u32 mask, u32 data)
248 {
249         u32 reg_val = data;
250
251         if (mask != MASK_ALL_BITS) {
252                 dunit_read(addr, MASK_ALL_BITS, &reg_val);
253                 reg_val &= (~mask);
254                 reg_val |= (data & mask);
255         }
256
257         reg_write(addr, reg_val);
258 }
259
260 #define ODPG_ENABLE_REG                         0x186d4
261 #define ODPG_EN_OFFS                            0
262 #define ODPG_EN_MASK                            0x1
263 #define ODPG_EN_ENA                             1
264 #define ODPG_EN_DONE                            0
265 #define ODPG_DIS_OFFS                           8
266 #define ODPG_DIS_MASK                           0x1
267 #define ODPG_DIS_DIS                            1
268 void mv_ddr_odpg_enable(void)
269 {
270         dunit_write(ODPG_ENABLE_REG,
271                     ODPG_EN_MASK << ODPG_EN_OFFS,
272                     ODPG_EN_ENA << ODPG_EN_OFFS);
273 }
274
275 void mv_ddr_odpg_disable(void)
276 {
277         dunit_write(ODPG_ENABLE_REG,
278                     ODPG_DIS_MASK << ODPG_DIS_OFFS,
279                     ODPG_DIS_DIS << ODPG_DIS_OFFS);
280 }
281
282 void mv_ddr_odpg_done_clr(void)
283 {
284         return;
285 }
286
287 int mv_ddr_is_odpg_done(u32 count)
288 {
289         u32 i, data;
290
291         for (i = 0; i < count; i++) {
292                 dunit_read(ODPG_ENABLE_REG, MASK_ALL_BITS, &data);
293                 if (((data >> ODPG_EN_OFFS) & ODPG_EN_MASK) ==
294                      ODPG_EN_DONE)
295                         break;
296         }
297
298         if (i >= count) {
299                 printf("%s: timeout\n", __func__);
300                 return MV_FAIL;
301         }
302
303         return MV_OK;
304 }
305
306 void mv_ddr_training_enable(void)
307 {
308         dunit_write(GLOB_CTRL_STATUS_REG,
309                     TRAINING_TRIGGER_MASK << TRAINING_TRIGGER_OFFS,
310                     TRAINING_TRIGGER_ENA << TRAINING_TRIGGER_OFFS);
311 }
312
313 #define DRAM_INIT_CTRL_STATUS_REG       0x18488
314 #define TRAINING_TRIGGER_OFFS           0
315 #define TRAINING_TRIGGER_MASK           0x1
316 #define TRAINING_TRIGGER_ENA            1
317 #define TRAINING_DONE_OFFS              1
318 #define TRAINING_DONE_MASK              0x1
319 #define TRAINING_DONE_DONE              1
320 #define TRAINING_DONE_NOT_DONE          0
321 #define TRAINING_RESULT_OFFS            2
322 #define TRAINING_RESULT_MASK            0x1
323 #define TRAINING_RESULT_PASS            0
324 #define TRAINING_RESULT_FAIL            1
325 int mv_ddr_is_training_done(u32 count, u32 *result)
326 {
327         u32 i, data;
328
329         if (result == NULL) {
330                 printf("%s: NULL result pointer found\n", __func__);
331                 return MV_FAIL;
332         }
333
334         for (i = 0; i < count; i++) {
335                 dunit_read(DRAM_INIT_CTRL_STATUS_REG, MASK_ALL_BITS, &data);
336                 if (((data >> TRAINING_DONE_OFFS) & TRAINING_DONE_MASK) ==
337                      TRAINING_DONE_DONE)
338                         break;
339         }
340
341         if (i >= count) {
342                 printf("%s: timeout\n", __func__);
343                 return MV_FAIL;
344         }
345
346         *result = (data >> TRAINING_RESULT_OFFS) & TRAINING_RESULT_MASK;
347
348         return MV_OK;
349 }
350
351 #define DM_PAD  10
352 u32 mv_ddr_dm_pad_get(void)
353 {
354         return DM_PAD;
355 }
356
357 /*
358  * Name:     ddr3_tip_a38x_select_ddr_controller.
359  * Desc:     Enable/Disable access to Marvell's server.
360  * Args:     dev_num     - device number
361  *           enable        - whether to enable or disable the server
362  * Notes:
363  * Returns:  MV_OK if success, other error code if fail.
364  */
365 static int ddr3_tip_a38x_select_ddr_controller(u8 dev_num, int enable)
366 {
367         u32 reg;
368
369         reg = reg_read(DUAL_DUNIT_CFG_REG);
370
371         if (enable)
372                 reg |= (1 << 6);
373         else
374                 reg &= ~(1 << 6);
375
376         reg_write(DUAL_DUNIT_CFG_REG, reg);
377
378         return MV_OK;
379 }
380
381 static u8 ddr3_tip_clock_mode(u32 frequency)
382 {
383         if ((frequency == MV_DDR_FREQ_LOW_FREQ) || (mv_ddr_freq_get(frequency) <= 400))
384                 return 1;
385
386         return 2;
387 }
388
389 static int mv_ddr_sar_freq_get(int dev_num, enum mv_ddr_freq *freq)
390 {
391         u32 reg, ref_clk_satr;
392
393         /* Read sample at reset setting */
394         reg = (reg_read(REG_DEVICE_SAR1_ADDR) >>
395                RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
396                 RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
397
398         ref_clk_satr = reg_read(DEVICE_SAMPLE_AT_RESET2_REG);
399         if (((ref_clk_satr >> DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_OFFSET) & 0x1) ==
400             DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_25MHZ) {
401                 switch (reg) {
402                 case 0x1:
403                         DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
404                                               ("Warning: Unsupported freq mode for 333Mhz configured(%d)\n",
405                                               reg));
406                         /* fallthrough */
407                 case 0x0:
408                         *freq = MV_DDR_FREQ_333;
409                         break;
410                 case 0x3:
411                         DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
412                                               ("Warning: Unsupported freq mode for 400Mhz configured(%d)\n",
413                                               reg));
414                         /* fallthrough */
415                 case 0x2:
416                         *freq = MV_DDR_FREQ_400;
417                         break;
418                 case 0xd:
419                         DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
420                                               ("Warning: Unsupported freq mode for 533Mhz configured(%d)\n",
421                                               reg));
422                         /* fallthrough */
423                 case 0x4:
424                         *freq = MV_DDR_FREQ_533;
425                         break;
426                 case 0x6:
427                         *freq = MV_DDR_FREQ_600;
428                         break;
429                 case 0x11:
430                 case 0x14:
431                         DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
432                                               ("Warning: Unsupported freq mode for 667Mhz configured(%d)\n",
433                                               reg));
434                         /* fallthrough */
435                 case 0x8:
436                         *freq = MV_DDR_FREQ_667;
437                         break;
438                 case 0x15:
439                 case 0x1b:
440                         DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
441                                               ("Warning: Unsupported freq mode for 800Mhz configured(%d)\n",
442                                               reg));
443                         /* fallthrough */
444                 case 0xc:
445                         *freq = MV_DDR_FREQ_800;
446                         break;
447                 case 0x10:
448                         *freq = MV_DDR_FREQ_933;
449                         break;
450                 case 0x12:
451                         *freq = MV_DDR_FREQ_900;
452                         break;
453                 case 0x13:
454                         *freq = MV_DDR_FREQ_933;
455                         break;
456                 default:
457                         *freq = 0;
458                         return MV_NOT_SUPPORTED;
459                 }
460         } else { /* REFCLK 40MHz case */
461                 switch (reg) {
462                 case 0x3:
463                         *freq = MV_DDR_FREQ_400;
464                         break;
465                 case 0x5:
466                         *freq = MV_DDR_FREQ_533;
467                         break;
468                 case 0xb:
469                         *freq = MV_DDR_FREQ_800;
470                         break;
471                 case 0x1e:
472                         *freq = MV_DDR_FREQ_900;
473                         break;
474                 default:
475                         *freq = 0;
476                         return MV_NOT_SUPPORTED;
477                 }
478         }
479
480         return MV_OK;
481 }
482
483 static int ddr3_tip_a38x_get_medium_freq(int dev_num, enum mv_ddr_freq *freq)
484 {
485         u32 reg, ref_clk_satr;
486
487         /* Read sample at reset setting */
488         reg = (reg_read(REG_DEVICE_SAR1_ADDR) >>
489         RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
490         RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
491
492         ref_clk_satr = reg_read(DEVICE_SAMPLE_AT_RESET2_REG);
493         if (((ref_clk_satr >> DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_OFFSET) & 0x1) ==
494             DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_25MHZ) {
495                 switch (reg) {
496                 case 0x0:
497                 case 0x1:
498                         /* Medium is same as TF to run PBS in this freq */
499                         *freq = MV_DDR_FREQ_333;
500                         break;
501                 case 0x2:
502                 case 0x3:
503                         /* Medium is same as TF to run PBS in this freq */
504                         *freq = MV_DDR_FREQ_400;
505                         break;
506                 case 0x4:
507                 case 0xd:
508                         /* Medium is same as TF to run PBS in this freq */
509                         *freq = MV_DDR_FREQ_533;
510                         break;
511                 case 0x8:
512                 case 0x10:
513                 case 0x11:
514                 case 0x14:
515                         *freq = MV_DDR_FREQ_333;
516                         break;
517                 case 0xc:
518                 case 0x15:
519                 case 0x1b:
520                         *freq = MV_DDR_FREQ_400;
521                         break;
522                 case 0x6:
523                         *freq = MV_DDR_FREQ_300;
524                         break;
525                 case 0x12:
526                         *freq = MV_DDR_FREQ_360;
527                         break;
528                 case 0x13:
529                         *freq = MV_DDR_FREQ_400;
530                         break;
531                 default:
532                         *freq = 0;
533                         return MV_NOT_SUPPORTED;
534                 }
535         } else { /* REFCLK 40MHz case */
536                 switch (reg) {
537                 case 0x3:
538                         /* Medium is same as TF to run PBS in this freq */
539                         *freq = MV_DDR_FREQ_400;
540                         break;
541                 case 0x5:
542                         /* Medium is same as TF to run PBS in this freq */
543                         *freq = MV_DDR_FREQ_533;
544                         break;
545                 case 0xb:
546                         *freq = MV_DDR_FREQ_400;
547                         break;
548                 case 0x1e:
549                         *freq = MV_DDR_FREQ_360;
550                         break;
551                 default:
552                         *freq = 0;
553                         return MV_NOT_SUPPORTED;
554                 }
555         }
556
557         return MV_OK;
558 }
559
560 static int ddr3_tip_a38x_get_device_info(u8 dev_num, struct ddr3_device_info *info_ptr)
561 {
562 #if defined(CONFIG_ARMADA_39X)
563         info_ptr->device_id = 0x6900;
564 #else
565         info_ptr->device_id = 0x6800;
566 #endif
567         info_ptr->ck_delay = ck_delay;
568
569         return MV_OK;
570 }
571
572 /* check indirect access to phy register file completed */
573 static int is_prfa_done(void)
574 {
575         u32 reg_val;
576         u32 iter = 0;
577
578         do {
579                 if (iter++ > MAX_POLLING_ITERATIONS) {
580                         printf("error: %s: polling timeout\n", __func__);
581                         return MV_FAIL;
582                 }
583                 dunit_read(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, &reg_val);
584                 reg_val >>= PRFA_REQ_OFFS;
585                 reg_val &= PRFA_REQ_MASK;
586         } while (reg_val == PRFA_REQ_ENA); /* request pending */
587
588         return MV_OK;
589 }
590
591 /* write to phy register thru indirect access */
592 static int prfa_write(enum hws_access_type phy_access, u32 phy,
593                       enum hws_ddr_phy phy_type, u32 addr,
594                       u32 data, enum hws_operation op_type)
595 {
596         u32 reg_val = ((data & PRFA_DATA_MASK) << PRFA_DATA_OFFS) |
597                       ((addr & PRFA_REG_NUM_MASK) << PRFA_REG_NUM_OFFS) |
598                       ((phy & PRFA_PUP_NUM_MASK) << PRFA_PUP_NUM_OFFS) |
599                       ((phy_type & PRFA_PUP_CTRL_DATA_MASK) << PRFA_PUP_CTRL_DATA_OFFS) |
600                       ((phy_access & PRFA_PUP_BCAST_WR_ENA_MASK) << PRFA_PUP_BCAST_WR_ENA_OFFS) |
601                       (((addr >> 6) & PRFA_REG_NUM_HI_MASK) << PRFA_REG_NUM_HI_OFFS) |
602                       ((op_type & PRFA_TYPE_MASK) << PRFA_TYPE_OFFS);
603         dunit_write(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, reg_val);
604         reg_val |= (PRFA_REQ_ENA << PRFA_REQ_OFFS);
605         dunit_write(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, reg_val);
606
607         /* polling for prfa request completion */
608         if (is_prfa_done() != MV_OK)
609                 return MV_FAIL;
610
611         return MV_OK;
612 }
613
614 /* read from phy register thru indirect access */
615 static int prfa_read(enum hws_access_type phy_access, u32 phy,
616                      enum hws_ddr_phy phy_type, u32 addr, u32 *data)
617 {
618         struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
619         u32 max_phy = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE);
620         u32 i, reg_val;
621
622         if (phy_access == ACCESS_TYPE_MULTICAST) {
623                 for (i = 0; i < max_phy; i++) {
624                         VALIDATE_BUS_ACTIVE(tm->bus_act_mask, i);
625                         if (prfa_write(ACCESS_TYPE_UNICAST, i, phy_type, addr, 0, OPERATION_READ) != MV_OK)
626                                 return MV_FAIL;
627                         dunit_read(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, &reg_val);
628                         data[i] = (reg_val >> PRFA_DATA_OFFS) & PRFA_DATA_MASK;
629                 }
630         } else {
631                 if (prfa_write(phy_access, phy, phy_type, addr, 0, OPERATION_READ) != MV_OK)
632                         return MV_FAIL;
633                 dunit_read(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, &reg_val);
634                 *data = (reg_val >> PRFA_DATA_OFFS) & PRFA_DATA_MASK;
635         }
636
637         return MV_OK;
638 }
639
640 static int mv_ddr_sw_db_init(u32 dev_num, u32 board_id)
641 {
642         struct hws_tip_config_func_db config_func;
643
644         /* new read leveling version */
645         config_func.mv_ddr_dunit_read = dunit_read;
646         config_func.mv_ddr_dunit_write = dunit_write;
647         config_func.tip_dunit_mux_select_func =
648                 ddr3_tip_a38x_select_ddr_controller;
649         config_func.tip_get_freq_config_info_func =
650                 ddr3_tip_a38x_get_freq_config;
651         config_func.tip_set_freq_divider_func = ddr3_tip_a38x_set_divider;
652         config_func.tip_get_device_info_func = ddr3_tip_a38x_get_device_info;
653         config_func.tip_get_temperature = ddr3_ctrl_get_junc_temp;
654         config_func.tip_get_clock_ratio = ddr3_tip_clock_mode;
655         config_func.tip_external_read = ddr3_tip_ext_read;
656         config_func.tip_external_write = ddr3_tip_ext_write;
657         config_func.mv_ddr_phy_read = prfa_read;
658         config_func.mv_ddr_phy_write = prfa_write;
659
660         ddr3_tip_init_config_func(dev_num, &config_func);
661
662         ddr3_tip_register_dq_table(dev_num, dq_bit_map_2_phy_pin);
663
664         /* set device attributes*/
665         ddr3_tip_dev_attr_init(dev_num);
666         ddr3_tip_dev_attr_set(dev_num, MV_ATTR_TIP_REV, MV_TIP_REV_4);
667         ddr3_tip_dev_attr_set(dev_num, MV_ATTR_PHY_EDGE, MV_DDR_PHY_EDGE_POSITIVE);
668         ddr3_tip_dev_attr_set(dev_num, MV_ATTR_OCTET_PER_INTERFACE, DDR_INTERFACE_OCTETS_NUM);
669 #ifdef CONFIG_ARMADA_39X
670         ddr3_tip_dev_attr_set(dev_num, MV_ATTR_INTERLEAVE_WA, 1);
671 #else
672         ddr3_tip_dev_attr_set(dev_num, MV_ATTR_INTERLEAVE_WA, 0);
673 #endif
674
675         ca_delay = 0;
676         delay_enable = 1;
677         dfs_low_freq = DFS_LOW_FREQ_VALUE;
678         calibration_update_control = 1;
679
680         ddr3_tip_a38x_get_medium_freq(dev_num, &medium_freq);
681
682         return MV_OK;
683 }
684
685 static int mv_ddr_training_mask_set(void)
686 {
687         struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
688         enum mv_ddr_freq ddr_freq = tm->interface_params[0].memory_freq;
689
690         mask_tune_func = (SET_LOW_FREQ_MASK_BIT |
691                           LOAD_PATTERN_MASK_BIT |
692                           SET_MEDIUM_FREQ_MASK_BIT | WRITE_LEVELING_MASK_BIT |
693                           WRITE_LEVELING_SUPP_MASK_BIT |
694                           READ_LEVELING_MASK_BIT |
695                           PBS_RX_MASK_BIT |
696                           PBS_TX_MASK_BIT |
697                           SET_TARGET_FREQ_MASK_BIT |
698                           WRITE_LEVELING_TF_MASK_BIT |
699                           WRITE_LEVELING_SUPP_TF_MASK_BIT |
700                           READ_LEVELING_TF_MASK_BIT |
701                           CENTRALIZATION_RX_MASK_BIT |
702                           CENTRALIZATION_TX_MASK_BIT);
703         rl_mid_freq_wa = 1;
704
705         if ((ddr_freq == MV_DDR_FREQ_333) || (ddr_freq == MV_DDR_FREQ_400)) {
706                 mask_tune_func = (WRITE_LEVELING_MASK_BIT |
707                                   LOAD_PATTERN_2_MASK_BIT |
708                                   WRITE_LEVELING_SUPP_MASK_BIT |
709                                   READ_LEVELING_MASK_BIT |
710                                   PBS_RX_MASK_BIT |
711                                   PBS_TX_MASK_BIT |
712                                   CENTRALIZATION_RX_MASK_BIT |
713                                   CENTRALIZATION_TX_MASK_BIT);
714                 rl_mid_freq_wa = 0; /* WA not needed if 333/400 is TF */
715         }
716
717         /* Supplementary not supported for ECC modes */
718         if (mv_ddr_is_ecc_ena()) {
719                 mask_tune_func &= ~WRITE_LEVELING_SUPP_TF_MASK_BIT;
720                 mask_tune_func &= ~WRITE_LEVELING_SUPP_MASK_BIT;
721                 mask_tune_func &= ~PBS_TX_MASK_BIT;
722                 mask_tune_func &= ~PBS_RX_MASK_BIT;
723         }
724
725         return MV_OK;
726 }
727
728 /* function: mv_ddr_set_calib_controller
729  * this function sets the controller which will control
730  * the calibration cycle in the end of the training.
731  * 1 - internal controller
732  * 2 - external controller
733  */
734 void mv_ddr_set_calib_controller(void)
735 {
736         calibration_update_control = CAL_UPDATE_CTRL_INT;
737 }
738
739 static int ddr3_tip_a38x_set_divider(u8 dev_num, u32 if_id,
740                                      enum mv_ddr_freq frequency)
741 {
742         u32 divider = 0;
743         u32 sar_val, ref_clk_satr;
744         u32 async_val;
745         u32 freq = mv_ddr_freq_get(frequency);
746
747         if (if_id != 0) {
748                 DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
749                                       ("A38x does not support interface 0x%x\n",
750                                        if_id));
751                 return MV_BAD_PARAM;
752         }
753
754         /* get VCO freq index */
755         sar_val = (reg_read(REG_DEVICE_SAR1_ADDR) >>
756                    RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
757                 RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
758
759         ref_clk_satr = reg_read(DEVICE_SAMPLE_AT_RESET2_REG);
760         if (((ref_clk_satr >> DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_OFFSET) & 0x1) ==
761             DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_25MHZ)
762                 divider = a38x_vco_freq_per_sar_ref_clk_25_mhz[sar_val] / freq;
763         else
764                 divider = a38x_vco_freq_per_sar_ref_clk_40_mhz[sar_val] / freq;
765
766         if ((async_mode_at_tf == 1) && (freq > 400)) {
767                 /* Set async mode */
768                 dunit_write(0x20220, 0x1000, 0x1000);
769                 dunit_write(0xe42f4, 0x200, 0x200);
770
771                 /* Wait for async mode setup */
772                 mdelay(5);
773
774                 /* Set KNL values */
775                 switch (frequency) {
776                 case MV_DDR_FREQ_467:
777                         async_val = 0x806f012;
778                         break;
779                 case MV_DDR_FREQ_533:
780                         async_val = 0x807f012;
781                         break;
782                 case MV_DDR_FREQ_600:
783                         async_val = 0x805f00a;
784                         break;
785                 case MV_DDR_FREQ_667:
786                         async_val = 0x809f012;
787                         break;
788                 case MV_DDR_FREQ_800:
789                         async_val = 0x807f00a;
790                         break;
791                 case MV_DDR_FREQ_850:
792                         async_val = 0x80cb012;
793                         break;
794                 case MV_DDR_FREQ_900:
795                         async_val = 0x80d7012;
796                         break;
797                 case MV_DDR_FREQ_933:
798                         async_val = 0x80df012;
799                         break;
800                 case MV_DDR_FREQ_1000:
801                         async_val = 0x80ef012;
802                         break;
803                 case MV_DDR_FREQ_1066:
804                         async_val = 0x80ff012;
805                         break;
806                 default:
807                         /* set MV_DDR_FREQ_667 as default */
808                         async_val = 0x809f012;
809                 }
810                 dunit_write(0xe42f0, 0xffffffff, async_val);
811         } else {
812                 /* Set sync mode */
813                 dunit_write(0x20220, 0x1000, 0x0);
814                 dunit_write(0xe42f4, 0x200, 0x0);
815
816                 /* cpupll_clkdiv_reset_mask */
817                 dunit_write(0xe4264, 0xff, 0x1f);
818
819                 /* cpupll_clkdiv_reload_smooth */
820                 dunit_write(0xe4260, (0xff << 8), (0x2 << 8));
821
822                 /* cpupll_clkdiv_relax_en */
823                 dunit_write(0xe4260, (0xff << 24), (0x2 << 24));
824
825                 /* write the divider */
826                 dunit_write(0xe4268, (0x3f << 8), (divider << 8));
827
828                 /* set cpupll_clkdiv_reload_ratio */
829                 dunit_write(0xe4264, (1 << 8), (1 << 8));
830
831                 /* undet cpupll_clkdiv_reload_ratio */
832                 dunit_write(0xe4264, (1 << 8), 0x0);
833
834                 /* clear cpupll_clkdiv_reload_force */
835                 dunit_write(0xe4260, (0xff << 8), 0x0);
836
837                 /* clear cpupll_clkdiv_relax_en */
838                 dunit_write(0xe4260, (0xff << 24), 0x0);
839
840                 /* clear cpupll_clkdiv_reset_mask */
841                 dunit_write(0xe4264, 0xff, 0x0);
842         }
843
844         /* Dunit training clock + 1:1/2:1 mode */
845         dunit_write(0x18488, (1 << 16), ((ddr3_tip_clock_mode(frequency) & 0x1) << 16));
846         dunit_write(0x1524, (1 << 15), ((ddr3_tip_clock_mode(frequency) - 1) << 15));
847
848         return MV_OK;
849 }
850
851 /*
852  * external read from memory
853  */
854 int ddr3_tip_ext_read(u32 dev_num, u32 if_id, u32 reg_addr,
855                       u32 num_of_bursts, u32 *data)
856 {
857         u32 burst_num;
858
859         for (burst_num = 0; burst_num < num_of_bursts * 8; burst_num++)
860                 data[burst_num] = readl(reg_addr + 4 * burst_num);
861
862         return MV_OK;
863 }
864
865 /*
866  * external write to memory
867  */
868 int ddr3_tip_ext_write(u32 dev_num, u32 if_id, u32 reg_addr,
869                        u32 num_of_bursts, u32 *data) {
870         u32 burst_num;
871
872         for (burst_num = 0; burst_num < num_of_bursts * 8; burst_num++)
873                 writel(data[burst_num], reg_addr + 4 * burst_num);
874
875         return MV_OK;
876 }
877
878 int mv_ddr_early_init(void)
879 {
880         struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
881
882         /* FIXME: change this configuration per ddr type
883          * configure a380 and a390 to work with receiver odt timing
884          * the odt_config is defined:
885          * '1' in ddr4
886          * '0' in ddr3
887          * here the parameter is run over in ddr4 and ddr3 to '1' (in ddr4 the default is '1')
888          * to configure the odt to work with timing restrictions
889          */
890
891         mv_ddr_sw_db_init(0, 0);
892
893         if (tm->interface_params[0].memory_freq != MV_DDR_FREQ_SAR)
894                 async_mode_at_tf = 1;
895
896         return MV_OK;
897 }
898
899 int mv_ddr_early_init2(void)
900 {
901         mv_ddr_training_mask_set();
902
903         return MV_OK;
904 }
905
906 int mv_ddr_pre_training_fixup(void)
907 {
908         return 0;
909 }
910
911 int mv_ddr_post_training_fixup(void)
912 {
913         return 0;
914 }
915
916 int ddr3_post_run_alg(void)
917 {
918         return MV_OK;
919 }
920
921 int ddr3_silicon_post_init(void)
922 {
923         struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
924
925         /* Set half bus width */
926         if (DDR3_IS_16BIT_DRAM_MODE(tm->bus_act_mask)) {
927                 CHECK_STATUS(ddr3_tip_if_write
928                              (0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE,
929                               SDRAM_CFG_REG, 0x0, 0x8000));
930         }
931
932         return MV_OK;
933 }
934
935 u32 mv_ddr_init_freq_get(void)
936 {
937         enum mv_ddr_freq freq;
938
939         mv_ddr_sar_freq_get(0, &freq);
940
941         return freq;
942 }
943
944 static u32 ddr3_get_bus_width(void)
945 {
946         u32 bus_width;
947
948         bus_width = (reg_read(SDRAM_CFG_REG) & 0x8000) >>
949                 BUS_IN_USE_OFFS;
950
951         return (bus_width == 0) ? 16 : 32;
952 }
953
954 static u32 ddr3_get_device_width(u32 cs)
955 {
956         u32 device_width;
957
958         device_width = (reg_read(SDRAM_ADDR_CTRL_REG) &
959                         (CS_STRUCT_MASK << CS_STRUCT_OFFS(cs))) >>
960                         CS_STRUCT_OFFS(cs);
961
962         return (device_width == 0) ? 8 : 16;
963 }
964
965 static u32 ddr3_get_device_size(u32 cs)
966 {
967         u32 device_size_low, device_size_high, device_size;
968         u32 data, cs_low_offset, cs_high_offset;
969
970         cs_low_offset = CS_SIZE_OFFS(cs);
971         cs_high_offset = CS_SIZE_HIGH_OFFS(cs);
972
973         data = reg_read(SDRAM_ADDR_CTRL_REG);
974         device_size_low = (data >> cs_low_offset) & 0x3;
975         device_size_high = (data >> cs_high_offset) & 0x1;
976
977         device_size = device_size_low | (device_size_high << 2);
978
979         switch (device_size) {
980         case 0:
981                 return 2048;
982         case 2:
983                 return 512;
984         case 3:
985                 return 1024;
986         case 4:
987                 return 4096;
988         case 5:
989                 return 8192;
990         case 1:
991         default:
992                 DEBUG_INIT_C("Error: Wrong device size of Cs: ", cs, 1);
993                 /* zeroes mem size in ddr3_calc_mem_cs_size */
994                 return 0;
995         }
996 }
997
998 int ddr3_calc_mem_cs_size(u32 cs, uint64_t *cs_size)
999 {
1000         u32 cs_mem_size;
1001
1002         /* Calculate in MiB */
1003         cs_mem_size = ((ddr3_get_bus_width() / ddr3_get_device_width(cs)) *
1004                        ddr3_get_device_size(cs)) / 8;
1005
1006         /*
1007          * Multiple controller bus width, 2x for 64 bit
1008          * (SoC controller may be 32 or 64 bit,
1009          * so bit 15 in 0x1400, that means if whole bus used or only half,
1010          * have a differnt meaning
1011          */
1012         cs_mem_size *= DDR_CONTROLLER_BUS_WIDTH_MULTIPLIER;
1013
1014         if ((cs_mem_size < 128) || (cs_mem_size > 4096)) {
1015                 DEBUG_INIT_C("Error: Wrong Memory size of Cs: ", cs, 1);
1016                 return MV_BAD_VALUE;
1017         }
1018
1019         *cs_size = cs_mem_size << 20; /* write cs size in bytes */
1020
1021         return MV_OK;
1022 }
1023
1024 static int ddr3_fast_path_dynamic_cs_size_config(u32 cs_ena)
1025 {
1026         u32 reg, cs;
1027         uint64_t mem_total_size = 0;
1028         uint64_t cs_mem_size = 0;
1029         uint64_t mem_total_size_c, cs_mem_size_c;
1030
1031 #ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE
1032         u32 physical_mem_size;
1033         u32 max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE;
1034         struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
1035 #endif
1036
1037         /* Open fast path windows */
1038         for (cs = 0; cs < MAX_CS_NUM; cs++) {
1039                 if (cs_ena & (1 << cs)) {
1040                         /* get CS size */
1041                         if (ddr3_calc_mem_cs_size(cs, &cs_mem_size) != MV_OK)
1042                                 return MV_FAIL;
1043
1044 #ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE
1045                         /*
1046                          * if number of address pins doesn't allow to use max
1047                          * mem size that is defined in topology
1048                          * mem size is defined by DEVICE_MAX_DRAM_ADDRESS_SIZE
1049                          */
1050                         physical_mem_size = mem_size
1051                                 [tm->interface_params[0].memory_size];
1052
1053                         if (ddr3_get_device_width(cs) == 16) {
1054                                 /*
1055                                  * 16bit mem device can be twice more - no need
1056                                  * in less significant pin
1057                                  */
1058                                 max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE * 2;
1059                         }
1060
1061                         if (physical_mem_size > max_mem_size) {
1062                                 cs_mem_size = max_mem_size *
1063                                         (ddr3_get_bus_width() /
1064                                          ddr3_get_device_width(cs));
1065                                 printf("Updated Physical Mem size is from 0x%x to %x\n",
1066                                        physical_mem_size,
1067                                        DEVICE_MAX_DRAM_ADDRESS_SIZE);
1068                         }
1069 #endif
1070
1071                         /* set fast path window control for the cs */
1072                         reg = 0xffffe1;
1073                         reg |= (cs << 2);
1074                         reg |= (cs_mem_size - 1) & 0xffff0000;
1075                         /*Open fast path Window */
1076                         reg_write(REG_FASTPATH_WIN_CTRL_ADDR(cs), reg);
1077
1078                         /* Set fast path window base address for the cs */
1079                         reg = ((cs_mem_size) * cs) & 0xffff0000;
1080                         /* Set base address */
1081                         reg_write(REG_FASTPATH_WIN_BASE_ADDR(cs), reg);
1082
1083                         /*
1084                          * Since memory size may be bigger than 4G the summ may
1085                          * be more than 32 bit word,
1086                          * so to estimate the result divide mem_total_size and
1087                          * cs_mem_size by 0x10000 (it is equal to >> 16)
1088                          */
1089                         mem_total_size_c = (mem_total_size >> 16) & 0xffffffffffff;
1090                         cs_mem_size_c = (cs_mem_size >> 16) & 0xffffffffffff;
1091                         /* if the sum less than 2 G - calculate the value */
1092                         if (mem_total_size_c + cs_mem_size_c < 0x10000)
1093                                 mem_total_size += cs_mem_size;
1094                         else    /* put max possible size */
1095                                 mem_total_size = L2_FILTER_FOR_MAX_MEMORY_SIZE;
1096                 }
1097         }
1098
1099         /* Set L2 filtering to Max Memory size */
1100         reg_write(ADDRESS_FILTERING_END_REGISTER, mem_total_size);
1101
1102         return MV_OK;
1103 }
1104
1105 static int ddr3_restore_and_set_final_windows(u32 *win, const char *ddr_type)
1106 {
1107         u32 win_ctrl_reg, num_of_win_regs;
1108         u32 cs_ena = mv_ddr_sys_env_get_cs_ena_from_reg();
1109         u32 ui;
1110
1111         win_ctrl_reg = REG_XBAR_WIN_4_CTRL_ADDR;
1112         num_of_win_regs = 16;
1113
1114         /* Return XBAR windows 4-7 or 16-19 init configuration */
1115         for (ui = 0; ui < num_of_win_regs; ui++)
1116                 reg_write((win_ctrl_reg + 0x4 * ui), win[ui]);
1117
1118         printf("%s Training Sequence - Switching XBAR Window to FastPath Window\n",
1119                ddr_type);
1120
1121 #if defined DYNAMIC_CS_SIZE_CONFIG
1122         if (ddr3_fast_path_dynamic_cs_size_config(cs_ena) != MV_OK)
1123                 printf("ddr3_fast_path_dynamic_cs_size_config FAILED\n");
1124 #else
1125         u32 reg, cs;
1126         reg = 0x1fffffe1;
1127         for (cs = 0; cs < MAX_CS_NUM; cs++) {
1128                 if (cs_ena & (1 << cs)) {
1129                         reg |= (cs << 2);
1130                         break;
1131                 }
1132         }
1133         /* Open fast path Window to - 0.5G */
1134         reg_write(REG_FASTPATH_WIN_CTRL_ADDR(0), reg);
1135 #endif
1136
1137         return MV_OK;
1138 }
1139
1140 static int ddr3_save_and_set_training_windows(u32 *win)
1141 {
1142         u32 cs_ena;
1143         u32 reg, tmp_count, cs, ui;
1144         u32 win_ctrl_reg, win_base_reg, win_remap_reg;
1145         u32 num_of_win_regs, win_jump_index;
1146         win_ctrl_reg = REG_XBAR_WIN_4_CTRL_ADDR;
1147         win_base_reg = REG_XBAR_WIN_4_BASE_ADDR;
1148         win_remap_reg = REG_XBAR_WIN_4_REMAP_ADDR;
1149         win_jump_index = 0x10;
1150         num_of_win_regs = 16;
1151         struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
1152
1153 #ifdef DISABLE_L2_FILTERING_DURING_DDR_TRAINING
1154         /*
1155          * Disable L2 filtering during DDR training
1156          * (when Cross Bar window is open)
1157          */
1158         reg_write(ADDRESS_FILTERING_END_REGISTER, 0);
1159 #endif
1160
1161         cs_ena = tm->interface_params[0].as_bus_params[0].cs_bitmask;
1162
1163         /* Close XBAR Window 19 - Not needed */
1164         /* {0x000200e8}  -   Open Mbus Window - 2G */
1165         reg_write(REG_XBAR_WIN_19_CTRL_ADDR, 0);
1166
1167         /* Save XBAR Windows 4-19 init configurations */
1168         for (ui = 0; ui < num_of_win_regs; ui++)
1169                 win[ui] = reg_read(win_ctrl_reg + 0x4 * ui);
1170
1171         /* Open XBAR Windows 4-7 or 16-19 for other CS */
1172         reg = 0;
1173         tmp_count = 0;
1174         for (cs = 0; cs < MAX_CS_NUM; cs++) {
1175                 if (cs_ena & (1 << cs)) {
1176                         switch (cs) {
1177                         case 0:
1178                                 reg = 0x0e00;
1179                                 break;
1180                         case 1:
1181                                 reg = 0x0d00;
1182                                 break;
1183                         case 2:
1184                                 reg = 0x0b00;
1185                                 break;
1186                         case 3:
1187                                 reg = 0x0700;
1188                                 break;
1189                         }
1190                         reg |= (1 << 0);
1191                         reg |= (SDRAM_CS_SIZE & 0xffff0000);
1192
1193                         reg_write(win_ctrl_reg + win_jump_index * tmp_count,
1194                                   reg);
1195                         reg = (((SDRAM_CS_SIZE + 1) * (tmp_count)) &
1196                                0xffff0000);
1197                         reg_write(win_base_reg + win_jump_index * tmp_count,
1198                                   reg);
1199
1200                         if (win_remap_reg <= REG_XBAR_WIN_7_REMAP_ADDR)
1201                                 reg_write(win_remap_reg +
1202                                           win_jump_index * tmp_count, 0);
1203
1204                         tmp_count++;
1205                 }
1206         }
1207
1208         return MV_OK;
1209 }
1210
1211 static u32 win[16];
1212
1213 int mv_ddr_pre_training_soc_config(const char *ddr_type)
1214 {
1215         u32 soc_num;
1216         u32 reg_val;
1217
1218         /* Switching CPU to MRVL ID */
1219         soc_num = (reg_read(REG_SAMPLE_RESET_HIGH_ADDR) & SAR1_CPU_CORE_MASK) >>
1220                 SAR1_CPU_CORE_OFFSET;
1221         switch (soc_num) {
1222         case 0x3:
1223                 reg_bit_set(CPU_CONFIGURATION_REG(3), CPU_MRVL_ID_OFFSET);
1224                 reg_bit_set(CPU_CONFIGURATION_REG(2), CPU_MRVL_ID_OFFSET);
1225                 /* fallthrough */
1226         case 0x1:
1227                 reg_bit_set(CPU_CONFIGURATION_REG(1), CPU_MRVL_ID_OFFSET);
1228                 /* fallthrough */
1229         case 0x0:
1230                 reg_bit_set(CPU_CONFIGURATION_REG(0), CPU_MRVL_ID_OFFSET);
1231                 /* fallthrough */
1232         default:
1233                 break;
1234         }
1235
1236         /*
1237          * Set DRAM Reset Mask in case detected GPIO indication of wakeup from
1238          * suspend i.e the DRAM values will not be overwritten / reset when
1239          * waking from suspend
1240          */
1241         if (mv_ddr_sys_env_suspend_wakeup_check() ==
1242             SUSPEND_WAKEUP_ENABLED_GPIO_DETECTED) {
1243                 reg_bit_set(SDRAM_INIT_CTRL_REG,
1244                             DRAM_RESET_MASK_MASKED << DRAM_RESET_MASK_OFFS);
1245         }
1246
1247         /* Check if DRAM is already initialized  */
1248         if (reg_read(REG_BOOTROM_ROUTINE_ADDR) &
1249             (1 << REG_BOOTROM_ROUTINE_DRAM_INIT_OFFS)) {
1250                 printf("%s Training Sequence - 2nd boot - Skip\n", ddr_type);
1251                 return MV_OK;
1252         }
1253
1254         /* Fix read ready phases for all SOC in reg 0x15c8 */
1255         reg_val = reg_read(TRAINING_DBG_3_REG);
1256
1257         reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(0));
1258         reg_val |= (0x4 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(0));    /* phase 0 */
1259
1260         reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(1));
1261         reg_val |= (0x4 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(1));    /* phase 1 */
1262
1263         reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(3));
1264         reg_val |= (0x6 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(3));    /* phase 3 */
1265
1266         reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(4));
1267         reg_val |= (0x6 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(4));    /* phase 4 */
1268
1269         reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(5));
1270         reg_val |= (0x6 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(5));    /* phase 5 */
1271
1272         reg_write(TRAINING_DBG_3_REG, reg_val);
1273
1274         /*
1275          * Axi_bresp_mode[8] = Compliant,
1276          * Axi_addr_decode_cntrl[11] = Internal,
1277          * Axi_data_bus_width[0] = 128bit
1278          * */
1279         /* 0x14a8 - AXI Control Register */
1280         reg_write(AXI_CTRL_REG, 0);
1281
1282         /*
1283          * Stage 2 - Training Values Setup
1284          */
1285         /* Set X-BAR windows for the training sequence */
1286         ddr3_save_and_set_training_windows(win);
1287
1288         return MV_OK;
1289 }
1290
1291 static int ddr3_new_tip_dlb_config(void)
1292 {
1293         u32 reg, i = 0;
1294         struct dlb_config *config_table_ptr = sys_env_dlb_config_ptr_get();
1295
1296         /* Write the configuration */
1297         while (config_table_ptr[i].reg_addr != 0) {
1298                 reg_write(config_table_ptr[i].reg_addr,
1299                           config_table_ptr[i].reg_data);
1300                 i++;
1301         }
1302
1303
1304         /* Enable DLB */
1305         reg = reg_read(DLB_CTRL_REG);
1306         reg &= ~(DLB_EN_MASK << DLB_EN_OFFS) &
1307                ~(WR_COALESCE_EN_MASK << WR_COALESCE_EN_OFFS) &
1308                ~(AXI_PREFETCH_EN_MASK << AXI_PREFETCH_EN_OFFS) &
1309                ~(MBUS_PREFETCH_EN_MASK << MBUS_PREFETCH_EN_OFFS) &
1310                ~(PREFETCH_NXT_LN_SZ_TRIG_MASK << PREFETCH_NXT_LN_SZ_TRIG_OFFS);
1311
1312         reg |= (DLB_EN_ENA << DLB_EN_OFFS) |
1313                (WR_COALESCE_EN_ENA << WR_COALESCE_EN_OFFS) |
1314                (AXI_PREFETCH_EN_ENA << AXI_PREFETCH_EN_OFFS) |
1315                (MBUS_PREFETCH_EN_ENA << MBUS_PREFETCH_EN_OFFS) |
1316                (PREFETCH_NXT_LN_SZ_TRIG_ENA << PREFETCH_NXT_LN_SZ_TRIG_OFFS);
1317
1318         reg_write(DLB_CTRL_REG, reg);
1319
1320         return MV_OK;
1321 }
1322
1323 int mv_ddr_post_training_soc_config(const char *ddr_type)
1324 {
1325         u32 reg_val;
1326
1327         /* Restore and set windows */
1328         ddr3_restore_and_set_final_windows(win, ddr_type);
1329
1330         /* Update DRAM init indication in bootROM register */
1331         reg_val = reg_read(REG_BOOTROM_ROUTINE_ADDR);
1332         reg_write(REG_BOOTROM_ROUTINE_ADDR,
1333                   reg_val | (1 << REG_BOOTROM_ROUTINE_DRAM_INIT_OFFS));
1334
1335         /* DLB config */
1336         ddr3_new_tip_dlb_config();
1337
1338         return MV_OK;
1339 }
1340
1341 void mv_ddr_mc_config(void)
1342 {
1343         /* Memory controller initializations */
1344         struct init_cntr_param init_param;
1345         int status;
1346
1347         init_param.do_mrs_phy = 1;
1348         init_param.is_ctrl64_bit = 0;
1349         init_param.init_phy = 1;
1350         init_param.msys_init = 1;
1351         status = hws_ddr3_tip_init_controller(0, &init_param);
1352         if (status != MV_OK)
1353                 printf("DDR3 init controller - FAILED 0x%x\n", status);
1354
1355         status = mv_ddr_mc_init();
1356         if (status != MV_OK)
1357                 printf("DDR3 init_sequence - FAILED 0x%x\n", status);
1358 }
1359 /* function: mv_ddr_mc_init
1360  * this function enables the dunit after init controller configuration
1361  */
1362 int mv_ddr_mc_init(void)
1363 {
1364         CHECK_STATUS(ddr3_tip_enable_init_sequence(0));
1365
1366         return MV_OK;
1367 }
1368
1369 /* function: ddr3_tip_configure_phy
1370  * configures phy and electrical parameters
1371  */
1372 int ddr3_tip_configure_phy(u32 dev_num)
1373 {
1374         u32 if_id, phy_id;
1375         u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
1376         struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
1377
1378         CHECK_STATUS(ddr3_tip_bus_write
1379                 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1380                 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
1381                 PAD_ZRI_CAL_PHY_REG,
1382                 ((0x7f & g_zpri_data) << 7 | (0x7f & g_znri_data))));
1383         CHECK_STATUS(ddr3_tip_bus_write
1384                 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1385                 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
1386                 PAD_ZRI_CAL_PHY_REG,
1387                 ((0x7f & g_zpri_ctrl) << 7 | (0x7f & g_znri_ctrl))));
1388         CHECK_STATUS(ddr3_tip_bus_write
1389                 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1390                 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
1391                 PAD_ODT_CAL_PHY_REG,
1392                 ((0x3f & g_zpodt_data) << 6 | (0x3f & g_znodt_data))));
1393         CHECK_STATUS(ddr3_tip_bus_write
1394                 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1395                 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
1396                 PAD_ODT_CAL_PHY_REG,
1397                 ((0x3f & g_zpodt_ctrl) << 6 | (0x3f & g_znodt_ctrl))));
1398
1399         CHECK_STATUS(ddr3_tip_bus_write
1400                 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1401                 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
1402                 PAD_PRE_DISABLE_PHY_REG, 0));
1403         CHECK_STATUS(ddr3_tip_bus_write
1404                 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1405                 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
1406                 CMOS_CONFIG_PHY_REG, 0));
1407         CHECK_STATUS(ddr3_tip_bus_write
1408                 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1409                 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
1410                 CMOS_CONFIG_PHY_REG, 0));
1411
1412         for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
1413                 /* check if the interface is enabled */
1414                 VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
1415
1416                 for (phy_id = 0;
1417                         phy_id < octets_per_if_num;
1418                         phy_id++) {
1419                                 VALIDATE_BUS_ACTIVE(tm->bus_act_mask, phy_id);
1420                                 /* Vref & clamp */
1421                                 CHECK_STATUS(ddr3_tip_bus_read_modify_write
1422                                         (dev_num, ACCESS_TYPE_UNICAST,
1423                                         if_id, phy_id, DDR_PHY_DATA,
1424                                         PAD_CFG_PHY_REG,
1425                                         ((clamp_tbl[if_id] << 4) | vref_init_val),
1426                                         ((0x7 << 4) | 0x7)));
1427                                 /* clamp not relevant for control */
1428                                 CHECK_STATUS(ddr3_tip_bus_read_modify_write
1429                                         (dev_num, ACCESS_TYPE_UNICAST,
1430                                         if_id, phy_id, DDR_PHY_CONTROL,
1431                                         PAD_CFG_PHY_REG, 0x4, 0x7));
1432                 }
1433         }
1434
1435         if (ddr3_tip_dev_attr_get(dev_num, MV_ATTR_PHY_EDGE) ==
1436                 MV_DDR_PHY_EDGE_POSITIVE)
1437                 CHECK_STATUS(ddr3_tip_bus_write
1438                 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1439                 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1440                 DDR_PHY_DATA, 0x90, 0x6002));
1441
1442
1443         return MV_OK;
1444 }
1445
1446
1447 int mv_ddr_manual_cal_do(void)
1448 {
1449         return 0;
1450 }