Linux-libre 4.17.3-gnu
[librecmc/linux-libre.git] / drivers / scsi / qla4xxx / ql4_83xx.c
1 /*
2  * QLogic iSCSI HBA Driver
3  * Copyright (c)   2003-2013 QLogic Corporation
4  *
5  * See LICENSE.qla4xxx for copyright and licensing details.
6  */
7
8 #include <linux/ratelimit.h>
9
10 #include "ql4_def.h"
11 #include "ql4_version.h"
12 #include "ql4_glbl.h"
13 #include "ql4_dbg.h"
14 #include "ql4_inline.h"
15
16 uint32_t qla4_83xx_rd_reg(struct scsi_qla_host *ha, ulong addr)
17 {
18         return readl((void __iomem *)(ha->nx_pcibase + addr));
19 }
20
21 void qla4_83xx_wr_reg(struct scsi_qla_host *ha, ulong addr, uint32_t val)
22 {
23         writel(val, (void __iomem *)(ha->nx_pcibase + addr));
24 }
25
26 static int qla4_83xx_set_win_base(struct scsi_qla_host *ha, uint32_t addr)
27 {
28         uint32_t val;
29         int ret_val = QLA_SUCCESS;
30
31         qla4_83xx_wr_reg(ha, QLA83XX_CRB_WIN_FUNC(ha->func_num), addr);
32         val = qla4_83xx_rd_reg(ha, QLA83XX_CRB_WIN_FUNC(ha->func_num));
33         if (val != addr) {
34                 ql4_printk(KERN_ERR, ha, "%s: Failed to set register window : addr written 0x%x, read 0x%x!\n",
35                            __func__, addr, val);
36                 ret_val = QLA_ERROR;
37         }
38
39         return ret_val;
40 }
41
42 int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
43                               uint32_t *data)
44 {
45         int ret_val;
46
47         ret_val = qla4_83xx_set_win_base(ha, addr);
48
49         if (ret_val == QLA_SUCCESS) {
50                 *data = qla4_83xx_rd_reg(ha, QLA83XX_WILDCARD);
51         } else {
52                 *data = 0xffffffff;
53                 ql4_printk(KERN_ERR, ha, "%s: failed read of addr 0x%x!\n",
54                            __func__, addr);
55         }
56
57         return ret_val;
58 }
59
60 int qla4_83xx_wr_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
61                               uint32_t data)
62 {
63         int ret_val;
64
65         ret_val = qla4_83xx_set_win_base(ha, addr);
66
67         if (ret_val == QLA_SUCCESS)
68                 qla4_83xx_wr_reg(ha, QLA83XX_WILDCARD, data);
69         else
70                 ql4_printk(KERN_ERR, ha, "%s: failed wrt to addr 0x%x, data 0x%x\n",
71                            __func__, addr, data);
72
73         return ret_val;
74 }
75
76 static int qla4_83xx_flash_lock(struct scsi_qla_host *ha)
77 {
78         int lock_owner;
79         int timeout = 0;
80         uint32_t lock_status = 0;
81         int ret_val = QLA_SUCCESS;
82
83         while (lock_status == 0) {
84                 lock_status = qla4_83xx_rd_reg(ha, QLA83XX_FLASH_LOCK);
85                 if (lock_status)
86                         break;
87
88                 if (++timeout >= QLA83XX_FLASH_LOCK_TIMEOUT / 20) {
89                         lock_owner = qla4_83xx_rd_reg(ha,
90                                                       QLA83XX_FLASH_LOCK_ID);
91                         ql4_printk(KERN_ERR, ha, "%s: flash lock by func %d failed, held by func %d\n",
92                                    __func__, ha->func_num, lock_owner);
93                         ret_val = QLA_ERROR;
94                         break;
95                 }
96                 msleep(20);
97         }
98
99         qla4_83xx_wr_reg(ha, QLA83XX_FLASH_LOCK_ID, ha->func_num);
100         return ret_val;
101 }
102
103 static void qla4_83xx_flash_unlock(struct scsi_qla_host *ha)
104 {
105         /* Reading FLASH_UNLOCK register unlocks the Flash */
106         qla4_83xx_wr_reg(ha, QLA83XX_FLASH_LOCK_ID, 0xFF);
107         qla4_83xx_rd_reg(ha, QLA83XX_FLASH_UNLOCK);
108 }
109
110 int qla4_83xx_flash_read_u32(struct scsi_qla_host *ha, uint32_t flash_addr,
111                              uint8_t *p_data, int u32_word_count)
112 {
113         int i;
114         uint32_t u32_word;
115         uint32_t addr = flash_addr;
116         int ret_val = QLA_SUCCESS;
117
118         ret_val = qla4_83xx_flash_lock(ha);
119         if (ret_val == QLA_ERROR)
120                 goto exit_lock_error;
121
122         if (addr & 0x03) {
123                 ql4_printk(KERN_ERR, ha, "%s: Illegal addr = 0x%x\n",
124                            __func__, addr);
125                 ret_val = QLA_ERROR;
126                 goto exit_flash_read;
127         }
128
129         for (i = 0; i < u32_word_count; i++) {
130                 ret_val = qla4_83xx_wr_reg_indirect(ha,
131                                                     QLA83XX_FLASH_DIRECT_WINDOW,
132                                                     (addr & 0xFFFF0000));
133                 if (ret_val == QLA_ERROR) {
134                         ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW\n!",
135                                    __func__, addr);
136                         goto exit_flash_read;
137                 }
138
139                 ret_val = qla4_83xx_rd_reg_indirect(ha,
140                                                 QLA83XX_FLASH_DIRECT_DATA(addr),
141                                                 &u32_word);
142                 if (ret_val == QLA_ERROR) {
143                         ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n",
144                                    __func__, addr);
145                         goto exit_flash_read;
146                 }
147
148                 *(__le32 *)p_data = le32_to_cpu(u32_word);
149                 p_data = p_data + 4;
150                 addr = addr + 4;
151         }
152
153 exit_flash_read:
154         qla4_83xx_flash_unlock(ha);
155
156 exit_lock_error:
157         return ret_val;
158 }
159
160 int qla4_83xx_lockless_flash_read_u32(struct scsi_qla_host *ha,
161                                       uint32_t flash_addr, uint8_t *p_data,
162                                       int u32_word_count)
163 {
164         uint32_t i;
165         uint32_t u32_word;
166         uint32_t flash_offset;
167         uint32_t addr = flash_addr;
168         int ret_val = QLA_SUCCESS;
169
170         flash_offset = addr & (QLA83XX_FLASH_SECTOR_SIZE - 1);
171
172         if (addr & 0x3) {
173                 ql4_printk(KERN_ERR, ha, "%s: Illegal addr = 0x%x\n",
174                            __func__, addr);
175                 ret_val = QLA_ERROR;
176                 goto exit_lockless_read;
177         }
178
179         ret_val = qla4_83xx_wr_reg_indirect(ha, QLA83XX_FLASH_DIRECT_WINDOW,
180                                             addr);
181         if (ret_val == QLA_ERROR) {
182                 ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n",
183                            __func__, addr);
184                 goto exit_lockless_read;
185         }
186
187         /* Check if data is spread across multiple sectors  */
188         if ((flash_offset + (u32_word_count * sizeof(uint32_t))) >
189             (QLA83XX_FLASH_SECTOR_SIZE - 1)) {
190
191                 /* Multi sector read */
192                 for (i = 0; i < u32_word_count; i++) {
193                         ret_val = qla4_83xx_rd_reg_indirect(ha,
194                                                 QLA83XX_FLASH_DIRECT_DATA(addr),
195                                                 &u32_word);
196                         if (ret_val == QLA_ERROR) {
197                                 ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n",
198                                            __func__, addr);
199                                 goto exit_lockless_read;
200                         }
201
202                         *(__le32 *)p_data  = le32_to_cpu(u32_word);
203                         p_data = p_data + 4;
204                         addr = addr + 4;
205                         flash_offset = flash_offset + 4;
206
207                         if (flash_offset > (QLA83XX_FLASH_SECTOR_SIZE - 1)) {
208                                 /* This write is needed once for each sector */
209                                 ret_val = qla4_83xx_wr_reg_indirect(ha,
210                                                    QLA83XX_FLASH_DIRECT_WINDOW,
211                                                    addr);
212                                 if (ret_val == QLA_ERROR) {
213                                         ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n",
214                                                    __func__, addr);
215                                         goto exit_lockless_read;
216                                 }
217                                 flash_offset = 0;
218                         }
219                 }
220         } else {
221                 /* Single sector read */
222                 for (i = 0; i < u32_word_count; i++) {
223                         ret_val = qla4_83xx_rd_reg_indirect(ha,
224                                                 QLA83XX_FLASH_DIRECT_DATA(addr),
225                                                 &u32_word);
226                         if (ret_val == QLA_ERROR) {
227                                 ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n",
228                                            __func__, addr);
229                                 goto exit_lockless_read;
230                         }
231
232                         *(__le32 *)p_data = le32_to_cpu(u32_word);
233                         p_data = p_data + 4;
234                         addr = addr + 4;
235                 }
236         }
237
238 exit_lockless_read:
239         return ret_val;
240 }
241
242 void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha)
243 {
244         if (qla4_83xx_flash_lock(ha))
245                 ql4_printk(KERN_INFO, ha, "%s: Resetting rom lock\n", __func__);
246
247         /*
248          * We got the lock, or someone else is holding the lock
249          * since we are restting, forcefully unlock
250          */
251         qla4_83xx_flash_unlock(ha);
252 }
253
254 #define INTENT_TO_RECOVER       0x01
255 #define PROCEED_TO_RECOVER      0x02
256
257 static int qla4_83xx_lock_recovery(struct scsi_qla_host *ha)
258 {
259
260         uint32_t lock = 0, lockid;
261         int ret_val = QLA_ERROR;
262
263         lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY);
264
265         /* Check for other Recovery in progress, go wait */
266         if ((lockid & 0x3) != 0)
267                 goto exit_lock_recovery;
268
269         /* Intent to Recover */
270         ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY,
271                                    (ha->func_num << 2) | INTENT_TO_RECOVER);
272
273         msleep(200);
274
275         /* Check Intent to Recover is advertised */
276         lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY);
277         if ((lockid & 0x3C) != (ha->func_num << 2))
278                 goto exit_lock_recovery;
279
280         ql4_printk(KERN_INFO, ha, "%s: IDC Lock recovery initiated for func %d\n",
281                    __func__, ha->func_num);
282
283         /* Proceed to Recover */
284         ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY,
285                                    (ha->func_num << 2) | PROCEED_TO_RECOVER);
286
287         /* Force Unlock */
288         ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCK_ID, 0xFF);
289         ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_UNLOCK);
290
291         /* Clear bits 0-5 in IDC_RECOVERY register*/
292         ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY, 0);
293
294         /* Get lock */
295         lock = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK);
296         if (lock) {
297                 lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK_ID);
298                 lockid = ((lockid + (1 << 8)) & ~0xFF) | ha->func_num;
299                 ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCK_ID, lockid);
300                 ret_val = QLA_SUCCESS;
301         }
302
303 exit_lock_recovery:
304         return ret_val;
305 }
306
307 #define QLA83XX_DRV_LOCK_MSLEEP         200
308
309 int qla4_83xx_drv_lock(struct scsi_qla_host *ha)
310 {
311         int timeout = 0;
312         uint32_t status = 0;
313         int ret_val = QLA_SUCCESS;
314         uint32_t first_owner = 0;
315         uint32_t tmo_owner = 0;
316         uint32_t lock_id;
317         uint32_t func_num;
318         uint32_t lock_cnt;
319
320         while (status == 0) {
321                 status = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK);
322                 if (status) {
323                         /* Increment Counter (8-31) and update func_num (0-7) on
324                          * getting a successful lock  */
325                         lock_id = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
326                         lock_id = ((lock_id + (1 << 8)) & ~0xFF) | ha->func_num;
327                         qla4_83xx_wr_reg(ha, QLA83XX_DRV_LOCK_ID, lock_id);
328                         break;
329                 }
330
331                 if (timeout == 0)
332                         /* Save counter + ID of function holding the lock for
333                          * first failure */
334                         first_owner = ha->isp_ops->rd_reg_direct(ha,
335                                                           QLA83XX_DRV_LOCK_ID);
336
337                 if (++timeout >=
338                     (QLA83XX_DRV_LOCK_TIMEOUT / QLA83XX_DRV_LOCK_MSLEEP)) {
339                         tmo_owner = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
340                         func_num = tmo_owner & 0xFF;
341                         lock_cnt = tmo_owner >> 8;
342                         ql4_printk(KERN_INFO, ha, "%s: Lock by func %d failed after 2s, lock held by func %d, lock count %d, first_owner %d\n",
343                                    __func__, ha->func_num, func_num, lock_cnt,
344                                    (first_owner & 0xFF));
345
346                         if (first_owner != tmo_owner) {
347                                 /* Some other driver got lock, OR same driver
348                                  * got lock again (counter value changed), when
349                                  * we were waiting for lock.
350                                  * Retry for another 2 sec */
351                                 ql4_printk(KERN_INFO, ha, "%s: IDC lock failed for func %d\n",
352                                            __func__, ha->func_num);
353                                 timeout = 0;
354                         } else {
355                                 /* Same driver holding lock > 2sec.
356                                  * Force Recovery */
357                                 ret_val = qla4_83xx_lock_recovery(ha);
358                                 if (ret_val == QLA_SUCCESS) {
359                                         /* Recovered and got lock */
360                                         ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d successful\n",
361                                                    __func__, ha->func_num);
362                                         break;
363                                 }
364                                 /* Recovery Failed, some other function
365                                  * has the lock, wait for 2secs and retry */
366                                 ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d failed, Retrying timeout\n",
367                                            __func__, ha->func_num);
368                                 timeout = 0;
369                         }
370                 }
371                 msleep(QLA83XX_DRV_LOCK_MSLEEP);
372         }
373
374         return ret_val;
375 }
376
377 void qla4_83xx_drv_unlock(struct scsi_qla_host *ha)
378 {
379         int id;
380
381         id = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
382
383         if ((id & 0xFF) != ha->func_num) {
384                 ql4_printk(KERN_ERR, ha, "%s: IDC Unlock by %d failed, lock owner is %d\n",
385                            __func__, ha->func_num, (id & 0xFF));
386                 return;
387         }
388
389         /* Keep lock counter value, update the ha->func_num to 0xFF */
390         qla4_83xx_wr_reg(ha, QLA83XX_DRV_LOCK_ID, (id | 0xFF));
391         qla4_83xx_rd_reg(ha, QLA83XX_DRV_UNLOCK);
392 }
393
394 void qla4_83xx_set_idc_dontreset(struct scsi_qla_host *ha)
395 {
396         uint32_t idc_ctrl;
397
398         idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
399         idc_ctrl |= DONTRESET_BIT0;
400         qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, idc_ctrl);
401         DEBUG2(ql4_printk(KERN_INFO, ha, "%s: idc_ctrl = %d\n", __func__,
402                           idc_ctrl));
403 }
404
405 void qla4_83xx_clear_idc_dontreset(struct scsi_qla_host *ha)
406 {
407         uint32_t idc_ctrl;
408
409         idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
410         idc_ctrl &= ~DONTRESET_BIT0;
411         qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, idc_ctrl);
412         DEBUG2(ql4_printk(KERN_INFO, ha, "%s: idc_ctrl = %d\n", __func__,
413                           idc_ctrl));
414 }
415
416 int qla4_83xx_idc_dontreset(struct scsi_qla_host *ha)
417 {
418         uint32_t idc_ctrl;
419
420         idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
421         return idc_ctrl & DONTRESET_BIT0;
422 }
423
424 /*-------------------------IDC State Machine ---------------------*/
425
426 enum {
427         UNKNOWN_CLASS = 0,
428         NIC_CLASS,
429         FCOE_CLASS,
430         ISCSI_CLASS
431 };
432
433 struct device_info {
434         int func_num;
435         int device_type;
436         int port_num;
437 };
438
439 int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha)
440 {
441         uint32_t drv_active;
442         uint32_t dev_part, dev_part1, dev_part2;
443         int i;
444         struct device_info device_map[16];
445         int func_nibble;
446         int nibble;
447         int nic_present = 0;
448         int iscsi_present = 0;
449         int iscsi_func_low = 0;
450
451         /* Use the dev_partition register to determine the PCI function number
452          * and then check drv_active register to see which driver is loaded */
453         dev_part1 = qla4_83xx_rd_reg(ha,
454                                      ha->reg_tbl[QLA8XXX_CRB_DEV_PART_INFO]);
455         dev_part2 = qla4_83xx_rd_reg(ha, QLA83XX_CRB_DEV_PART_INFO2);
456         drv_active = qla4_83xx_rd_reg(ha, ha->reg_tbl[QLA8XXX_CRB_DRV_ACTIVE]);
457
458         /* Each function has 4 bits in dev_partition Info register,
459          * Lower 2 bits - device type, Upper 2 bits - physical port number */
460         dev_part = dev_part1;
461         for (i = nibble = 0; i <= 15; i++, nibble++) {
462                 func_nibble = dev_part & (0xF << (nibble * 4));
463                 func_nibble >>= (nibble * 4);
464                 device_map[i].func_num = i;
465                 device_map[i].device_type = func_nibble & 0x3;
466                 device_map[i].port_num = func_nibble & 0xC;
467
468                 if (device_map[i].device_type == NIC_CLASS) {
469                         if (drv_active & (1 << device_map[i].func_num)) {
470                                 nic_present++;
471                                 break;
472                         }
473                 } else if (device_map[i].device_type == ISCSI_CLASS) {
474                         if (drv_active & (1 << device_map[i].func_num)) {
475                                 if (!iscsi_present ||
476                                     (iscsi_present &&
477                                      (iscsi_func_low > device_map[i].func_num)))
478                                         iscsi_func_low = device_map[i].func_num;
479
480                                 iscsi_present++;
481                         }
482                 }
483
484                 /* For function_num[8..15] get info from dev_part2 register */
485                 if (nibble == 7) {
486                         nibble = 0;
487                         dev_part = dev_part2;
488                 }
489         }
490
491         /* NIC, iSCSI and FCOE are the Reset owners based on order, NIC gets
492          * precedence over iSCSI and FCOE and iSCSI over FCOE, based on drivers
493          * present. */
494         if (!nic_present && (ha->func_num == iscsi_func_low)) {
495                 DEBUG2(ql4_printk(KERN_INFO, ha,
496                                   "%s: can reset - NIC not present and lower iSCSI function is %d\n",
497                                   __func__, ha->func_num));
498                 return 1;
499         }
500
501         return 0;
502 }
503
504 /**
505  * qla4_83xx_need_reset_handler - Code to start reset sequence
506  * @ha: pointer to adapter structure
507  *
508  * Note: IDC lock must be held upon entry
509  **/
510 void qla4_83xx_need_reset_handler(struct scsi_qla_host *ha)
511 {
512         uint32_t dev_state, drv_state, drv_active;
513         unsigned long reset_timeout, dev_init_timeout;
514
515         ql4_printk(KERN_INFO, ha, "%s: Performing ISP error recovery\n",
516                    __func__);
517
518         if (!test_bit(AF_8XXX_RST_OWNER, &ha->flags)) {
519                 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: reset acknowledged\n",
520                                   __func__));
521                 qla4_8xxx_set_rst_ready(ha);
522
523                 /* Non-reset owners ACK Reset and wait for device INIT state
524                  * as part of Reset Recovery by Reset Owner */
525                 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
526
527                 do {
528                         if (time_after_eq(jiffies, dev_init_timeout)) {
529                                 ql4_printk(KERN_INFO, ha, "%s: Non Reset owner dev init timeout\n",
530                                            __func__);
531                                 break;
532                         }
533
534                         ha->isp_ops->idc_unlock(ha);
535                         msleep(1000);
536                         ha->isp_ops->idc_lock(ha);
537
538                         dev_state = qla4_8xxx_rd_direct(ha,
539                                                         QLA8XXX_CRB_DEV_STATE);
540                 } while (dev_state == QLA8XXX_DEV_NEED_RESET);
541         } else {
542                 qla4_8xxx_set_rst_ready(ha);
543                 reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
544                 drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
545                 drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
546
547                 ql4_printk(KERN_INFO, ha, "%s: drv_state = 0x%x, drv_active = 0x%x\n",
548                            __func__, drv_state, drv_active);
549
550                 while (drv_state != drv_active) {
551                         if (time_after_eq(jiffies, reset_timeout)) {
552                                 ql4_printk(KERN_INFO, ha, "%s: %s: RESET TIMEOUT! drv_state: 0x%08x, drv_active: 0x%08x\n",
553                                            __func__, DRIVER_NAME, drv_state,
554                                            drv_active);
555                                 break;
556                         }
557
558                         ha->isp_ops->idc_unlock(ha);
559                         msleep(1000);
560                         ha->isp_ops->idc_lock(ha);
561
562                         drv_state = qla4_8xxx_rd_direct(ha,
563                                                         QLA8XXX_CRB_DRV_STATE);
564                         drv_active = qla4_8xxx_rd_direct(ha,
565                                                         QLA8XXX_CRB_DRV_ACTIVE);
566                 }
567
568                 if (drv_state != drv_active) {
569                         ql4_printk(KERN_INFO, ha, "%s: Reset_owner turning off drv_active of non-acking function 0x%x\n",
570                                    __func__, (drv_active ^ drv_state));
571                         drv_active = drv_active & drv_state;
572                         qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_ACTIVE,
573                                             drv_active);
574                 }
575
576                 clear_bit(AF_8XXX_RST_OWNER, &ha->flags);
577                 /* Start Reset Recovery */
578                 qla4_8xxx_device_bootstrap(ha);
579         }
580 }
581
582 void qla4_83xx_get_idc_param(struct scsi_qla_host *ha)
583 {
584         uint32_t idc_params, ret_val;
585
586         ret_val = qla4_83xx_flash_read_u32(ha, QLA83XX_IDC_PARAM_ADDR,
587                                            (uint8_t *)&idc_params, 1);
588         if (ret_val == QLA_SUCCESS) {
589                 ha->nx_dev_init_timeout = idc_params & 0xFFFF;
590                 ha->nx_reset_timeout = (idc_params >> 16) & 0xFFFF;
591         } else {
592                 ha->nx_dev_init_timeout = ROM_DEV_INIT_TIMEOUT;
593                 ha->nx_reset_timeout = ROM_DRV_RESET_ACK_TIMEOUT;
594         }
595
596         DEBUG2(ql4_printk(KERN_DEBUG, ha,
597                           "%s: ha->nx_dev_init_timeout = %d, ha->nx_reset_timeout = %d\n",
598                           __func__, ha->nx_dev_init_timeout,
599                           ha->nx_reset_timeout));
600 }
601
602 /*-------------------------Reset Sequence Functions-----------------------*/
603
604 static void qla4_83xx_dump_reset_seq_hdr(struct scsi_qla_host *ha)
605 {
606         uint8_t *phdr;
607
608         if (!ha->reset_tmplt.buff) {
609                 ql4_printk(KERN_ERR, ha, "%s: Error: Invalid reset_seq_template\n",
610                            __func__);
611                 return;
612         }
613
614         phdr = ha->reset_tmplt.buff;
615
616         DEBUG2(ql4_printk(KERN_INFO, ha,
617                           "Reset Template: 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n",
618                           *phdr, *(phdr+1), *(phdr+2), *(phdr+3), *(phdr+4),
619                           *(phdr+5), *(phdr+6), *(phdr+7), *(phdr + 8),
620                           *(phdr+9), *(phdr+10), *(phdr+11), *(phdr+12),
621                           *(phdr+13), *(phdr+14), *(phdr+15)));
622 }
623
624 static int qla4_83xx_copy_bootloader(struct scsi_qla_host *ha)
625 {
626         uint8_t *p_cache;
627         uint32_t src, count, size;
628         uint64_t dest;
629         int ret_val = QLA_SUCCESS;
630
631         src = QLA83XX_BOOTLOADER_FLASH_ADDR;
632         dest = qla4_83xx_rd_reg(ha, QLA83XX_BOOTLOADER_ADDR);
633         size = qla4_83xx_rd_reg(ha, QLA83XX_BOOTLOADER_SIZE);
634
635         /* 128 bit alignment check */
636         if (size & 0xF)
637                 size = (size + 16) & ~0xF;
638
639         /* 16 byte count */
640         count = size/16;
641
642         p_cache = vmalloc(size);
643         if (p_cache == NULL) {
644                 ql4_printk(KERN_ERR, ha, "%s: Failed to allocate memory for boot loader cache\n",
645                            __func__);
646                 ret_val = QLA_ERROR;
647                 goto exit_copy_bootloader;
648         }
649
650         ret_val = qla4_83xx_lockless_flash_read_u32(ha, src, p_cache,
651                                                     size / sizeof(uint32_t));
652         if (ret_val == QLA_ERROR) {
653                 ql4_printk(KERN_ERR, ha, "%s: Error reading firmware from flash\n",
654                            __func__);
655                 goto exit_copy_error;
656         }
657         DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Read firmware from flash\n",
658                           __func__));
659
660         /* 128 bit/16 byte write to MS memory */
661         ret_val = qla4_8xxx_ms_mem_write_128b(ha, dest, (uint32_t *)p_cache,
662                                               count);
663         if (ret_val == QLA_ERROR) {
664                 ql4_printk(KERN_ERR, ha, "%s: Error writing firmware to MS\n",
665                            __func__);
666                 goto exit_copy_error;
667         }
668
669         DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Wrote firmware size %d to MS\n",
670                           __func__, size));
671
672 exit_copy_error:
673         vfree(p_cache);
674
675 exit_copy_bootloader:
676         return ret_val;
677 }
678
679 static int qla4_83xx_check_cmd_peg_status(struct scsi_qla_host *ha)
680 {
681         uint32_t val, ret_val = QLA_ERROR;
682         int retries = CRB_CMDPEG_CHECK_RETRY_COUNT;
683
684         do {
685                 val = qla4_83xx_rd_reg(ha, QLA83XX_CMDPEG_STATE);
686                 if (val == PHAN_INITIALIZE_COMPLETE) {
687                         DEBUG2(ql4_printk(KERN_INFO, ha,
688                                           "%s: Command Peg initialization complete. State=0x%x\n",
689                                           __func__, val));
690                         ret_val = QLA_SUCCESS;
691                         break;
692                 }
693                 msleep(CRB_CMDPEG_CHECK_DELAY);
694         } while (--retries);
695
696         return ret_val;
697 }
698
699 /**
700  * qla4_83xx_poll_reg - Poll the given CRB addr for duration msecs till
701  * value read ANDed with test_mask is equal to test_result.
702  *
703  * @ha : Pointer to adapter structure
704  * @addr : CRB register address
705  * @duration : Poll for total of "duration" msecs
706  * @test_mask : Mask value read with "test_mask"
707  * @test_result : Compare (value&test_mask) with test_result.
708  **/
709 static int qla4_83xx_poll_reg(struct scsi_qla_host *ha, uint32_t addr,
710                               int duration, uint32_t test_mask,
711                               uint32_t test_result)
712 {
713         uint32_t value;
714         uint8_t retries;
715         int ret_val = QLA_SUCCESS;
716
717         ret_val = qla4_83xx_rd_reg_indirect(ha, addr, &value);
718         if (ret_val == QLA_ERROR)
719                 goto exit_poll_reg;
720
721         retries = duration / 10;
722         do {
723                 if ((value & test_mask) != test_result) {
724                         msleep(duration / 10);
725                         ret_val = qla4_83xx_rd_reg_indirect(ha, addr, &value);
726                         if (ret_val == QLA_ERROR)
727                                 goto exit_poll_reg;
728
729                         ret_val = QLA_ERROR;
730                 } else {
731                         ret_val = QLA_SUCCESS;
732                         break;
733                 }
734         } while (retries--);
735
736 exit_poll_reg:
737         if (ret_val == QLA_ERROR) {
738                 ha->reset_tmplt.seq_error++;
739                 ql4_printk(KERN_ERR, ha, "%s: Poll Failed:  0x%08x 0x%08x 0x%08x\n",
740                            __func__, value, test_mask, test_result);
741         }
742
743         return ret_val;
744 }
745
746 static int qla4_83xx_reset_seq_checksum_test(struct scsi_qla_host *ha)
747 {
748         uint32_t sum =  0;
749         uint16_t *buff = (uint16_t *)ha->reset_tmplt.buff;
750         int u16_count =  ha->reset_tmplt.hdr->size / sizeof(uint16_t);
751         int ret_val;
752
753         while (u16_count-- > 0)
754                 sum += *buff++;
755
756         while (sum >> 16)
757                 sum = (sum & 0xFFFF) +  (sum >> 16);
758
759         /* checksum of 0 indicates a valid template */
760         if (~sum) {
761                 ret_val = QLA_SUCCESS;
762         } else {
763                 ql4_printk(KERN_ERR, ha, "%s: Reset seq checksum failed\n",
764                            __func__);
765                 ret_val = QLA_ERROR;
766         }
767
768         return ret_val;
769 }
770
771 /**
772  * qla4_83xx_read_reset_template - Read Reset Template from Flash
773  * @ha: Pointer to adapter structure
774  **/
775 void qla4_83xx_read_reset_template(struct scsi_qla_host *ha)
776 {
777         uint8_t *p_buff;
778         uint32_t addr, tmplt_hdr_def_size, tmplt_hdr_size;
779         uint32_t ret_val;
780
781         ha->reset_tmplt.seq_error = 0;
782         ha->reset_tmplt.buff = vmalloc(QLA83XX_RESTART_TEMPLATE_SIZE);
783         if (ha->reset_tmplt.buff == NULL) {
784                 ql4_printk(KERN_ERR, ha, "%s: Failed to allocate reset template resources\n",
785                            __func__);
786                 goto exit_read_reset_template;
787         }
788
789         p_buff = ha->reset_tmplt.buff;
790         addr = QLA83XX_RESET_TEMPLATE_ADDR;
791
792         tmplt_hdr_def_size = sizeof(struct qla4_83xx_reset_template_hdr) /
793                                     sizeof(uint32_t);
794
795         DEBUG2(ql4_printk(KERN_INFO, ha,
796                           "%s: Read template hdr size %d from Flash\n",
797                           __func__, tmplt_hdr_def_size));
798
799         /* Copy template header from flash */
800         ret_val = qla4_83xx_flash_read_u32(ha, addr, p_buff,
801                                            tmplt_hdr_def_size);
802         if (ret_val != QLA_SUCCESS) {
803                 ql4_printk(KERN_ERR, ha, "%s: Failed to read reset template\n",
804                            __func__);
805                 goto exit_read_template_error;
806         }
807
808         ha->reset_tmplt.hdr =
809                 (struct qla4_83xx_reset_template_hdr *)ha->reset_tmplt.buff;
810
811         /* Validate the template header size and signature */
812         tmplt_hdr_size = ha->reset_tmplt.hdr->hdr_size/sizeof(uint32_t);
813         if ((tmplt_hdr_size != tmplt_hdr_def_size) ||
814             (ha->reset_tmplt.hdr->signature != RESET_TMPLT_HDR_SIGNATURE)) {
815                 ql4_printk(KERN_ERR, ha, "%s: Template Header size %d is invalid, tmplt_hdr_def_size %d\n",
816                            __func__, tmplt_hdr_size, tmplt_hdr_def_size);
817                 goto exit_read_template_error;
818         }
819
820         addr = QLA83XX_RESET_TEMPLATE_ADDR + ha->reset_tmplt.hdr->hdr_size;
821         p_buff = ha->reset_tmplt.buff + ha->reset_tmplt.hdr->hdr_size;
822         tmplt_hdr_def_size = (ha->reset_tmplt.hdr->size -
823                               ha->reset_tmplt.hdr->hdr_size) / sizeof(uint32_t);
824
825         DEBUG2(ql4_printk(KERN_INFO, ha,
826                           "%s: Read rest of the template size %d\n",
827                           __func__, ha->reset_tmplt.hdr->size));
828
829         /* Copy rest of the template */
830         ret_val = qla4_83xx_flash_read_u32(ha, addr, p_buff,
831                                            tmplt_hdr_def_size);
832         if (ret_val != QLA_SUCCESS) {
833                 ql4_printk(KERN_ERR, ha, "%s: Failed to read reset template\n",
834                            __func__);
835                 goto exit_read_template_error;
836         }
837
838         /* Integrity check */
839         if (qla4_83xx_reset_seq_checksum_test(ha)) {
840                 ql4_printk(KERN_ERR, ha, "%s: Reset Seq checksum failed!\n",
841                            __func__);
842                 goto exit_read_template_error;
843         }
844         DEBUG2(ql4_printk(KERN_INFO, ha,
845                           "%s: Reset Seq checksum passed, Get stop, start and init seq offsets\n",
846                           __func__));
847
848         /* Get STOP, START, INIT sequence offsets */
849         ha->reset_tmplt.init_offset = ha->reset_tmplt.buff +
850                                       ha->reset_tmplt.hdr->init_seq_offset;
851         ha->reset_tmplt.start_offset = ha->reset_tmplt.buff +
852                                        ha->reset_tmplt.hdr->start_seq_offset;
853         ha->reset_tmplt.stop_offset = ha->reset_tmplt.buff +
854                                       ha->reset_tmplt.hdr->hdr_size;
855         qla4_83xx_dump_reset_seq_hdr(ha);
856
857         goto exit_read_reset_template;
858
859 exit_read_template_error:
860         vfree(ha->reset_tmplt.buff);
861
862 exit_read_reset_template:
863         return;
864 }
865
866 /**
867  * qla4_83xx_read_write_crb_reg - Read from raddr and write value to waddr.
868  *
869  * @ha : Pointer to adapter structure
870  * @raddr : CRB address to read from
871  * @waddr : CRB address to write to
872  **/
873 static void qla4_83xx_read_write_crb_reg(struct scsi_qla_host *ha,
874                                          uint32_t raddr, uint32_t waddr)
875 {
876         uint32_t value;
877
878         qla4_83xx_rd_reg_indirect(ha, raddr, &value);
879         qla4_83xx_wr_reg_indirect(ha, waddr, value);
880 }
881
882 /**
883  * qla4_83xx_rmw_crb_reg - Read Modify Write crb register
884  *
885  * This function read value from raddr, AND with test_mask,
886  * Shift Left,Right/OR/XOR with values RMW header and write value to waddr.
887  *
888  * @ha : Pointer to adapter structure
889  * @raddr : CRB address to read from
890  * @waddr : CRB address to write to
891  * @p_rmw_hdr : header with shift/or/xor values.
892  **/
893 static void qla4_83xx_rmw_crb_reg(struct scsi_qla_host *ha, uint32_t raddr,
894                                   uint32_t waddr,
895                                   struct qla4_83xx_rmw *p_rmw_hdr)
896 {
897         uint32_t value;
898
899         if (p_rmw_hdr->index_a)
900                 value = ha->reset_tmplt.array[p_rmw_hdr->index_a];
901         else
902                 qla4_83xx_rd_reg_indirect(ha, raddr, &value);
903
904         value &= p_rmw_hdr->test_mask;
905         value <<= p_rmw_hdr->shl;
906         value >>= p_rmw_hdr->shr;
907         value |= p_rmw_hdr->or_value;
908         value ^= p_rmw_hdr->xor_value;
909
910         qla4_83xx_wr_reg_indirect(ha, waddr, value);
911
912         return;
913 }
914
915 static void qla4_83xx_write_list(struct scsi_qla_host *ha,
916                                  struct qla4_83xx_reset_entry_hdr *p_hdr)
917 {
918         struct qla4_83xx_entry *p_entry;
919         uint32_t i;
920
921         p_entry = (struct qla4_83xx_entry *)
922                   ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
923
924         for (i = 0; i < p_hdr->count; i++, p_entry++) {
925                 qla4_83xx_wr_reg_indirect(ha, p_entry->arg1, p_entry->arg2);
926                 if (p_hdr->delay)
927                         udelay((uint32_t)(p_hdr->delay));
928         }
929 }
930
931 static void qla4_83xx_read_write_list(struct scsi_qla_host *ha,
932                                       struct qla4_83xx_reset_entry_hdr *p_hdr)
933 {
934         struct qla4_83xx_entry *p_entry;
935         uint32_t i;
936
937         p_entry = (struct qla4_83xx_entry *)
938                   ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
939
940         for (i = 0; i < p_hdr->count; i++, p_entry++) {
941                 qla4_83xx_read_write_crb_reg(ha, p_entry->arg1, p_entry->arg2);
942                 if (p_hdr->delay)
943                         udelay((uint32_t)(p_hdr->delay));
944         }
945 }
946
947 static void qla4_83xx_poll_list(struct scsi_qla_host *ha,
948                                 struct qla4_83xx_reset_entry_hdr *p_hdr)
949 {
950         long delay;
951         struct qla4_83xx_entry *p_entry;
952         struct qla4_83xx_poll *p_poll;
953         uint32_t i;
954         uint32_t value;
955
956         p_poll = (struct qla4_83xx_poll *)
957                  ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
958
959         /* Entries start after 8 byte qla4_83xx_poll, poll header contains
960          * the test_mask, test_value. */
961         p_entry = (struct qla4_83xx_entry *)((char *)p_poll +
962                                              sizeof(struct qla4_83xx_poll));
963
964         delay = (long)p_hdr->delay;
965         if (!delay) {
966                 for (i = 0; i < p_hdr->count; i++, p_entry++) {
967                         qla4_83xx_poll_reg(ha, p_entry->arg1, delay,
968                                            p_poll->test_mask,
969                                            p_poll->test_value);
970                 }
971         } else {
972                 for (i = 0; i < p_hdr->count; i++, p_entry++) {
973                         if (qla4_83xx_poll_reg(ha, p_entry->arg1, delay,
974                                                p_poll->test_mask,
975                                                p_poll->test_value)) {
976                                 qla4_83xx_rd_reg_indirect(ha, p_entry->arg1,
977                                                           &value);
978                                 qla4_83xx_rd_reg_indirect(ha, p_entry->arg2,
979                                                           &value);
980                         }
981                 }
982         }
983 }
984
985 static void qla4_83xx_poll_write_list(struct scsi_qla_host *ha,
986                                       struct qla4_83xx_reset_entry_hdr *p_hdr)
987 {
988         long delay;
989         struct qla4_83xx_quad_entry *p_entry;
990         struct qla4_83xx_poll *p_poll;
991         uint32_t i;
992
993         p_poll = (struct qla4_83xx_poll *)
994                  ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
995         p_entry = (struct qla4_83xx_quad_entry *)
996                   ((char *)p_poll + sizeof(struct qla4_83xx_poll));
997         delay = (long)p_hdr->delay;
998
999         for (i = 0; i < p_hdr->count; i++, p_entry++) {
1000                 qla4_83xx_wr_reg_indirect(ha, p_entry->dr_addr,
1001                                           p_entry->dr_value);
1002                 qla4_83xx_wr_reg_indirect(ha, p_entry->ar_addr,
1003                                           p_entry->ar_value);
1004                 if (delay) {
1005                         if (qla4_83xx_poll_reg(ha, p_entry->ar_addr, delay,
1006                                                p_poll->test_mask,
1007                                                p_poll->test_value)) {
1008                                 DEBUG2(ql4_printk(KERN_INFO, ha,
1009                                                   "%s: Timeout Error: poll list, item_num %d, entry_num %d\n",
1010                                                   __func__, i,
1011                                                   ha->reset_tmplt.seq_index));
1012                         }
1013                 }
1014         }
1015 }
1016
1017 static void qla4_83xx_read_modify_write(struct scsi_qla_host *ha,
1018                                         struct qla4_83xx_reset_entry_hdr *p_hdr)
1019 {
1020         struct qla4_83xx_entry *p_entry;
1021         struct qla4_83xx_rmw *p_rmw_hdr;
1022         uint32_t i;
1023
1024         p_rmw_hdr = (struct qla4_83xx_rmw *)
1025                     ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
1026         p_entry = (struct qla4_83xx_entry *)
1027                   ((char *)p_rmw_hdr + sizeof(struct qla4_83xx_rmw));
1028
1029         for (i = 0; i < p_hdr->count; i++, p_entry++) {
1030                 qla4_83xx_rmw_crb_reg(ha, p_entry->arg1, p_entry->arg2,
1031                                       p_rmw_hdr);
1032                 if (p_hdr->delay)
1033                         udelay((uint32_t)(p_hdr->delay));
1034         }
1035 }
1036
1037 static void qla4_83xx_pause(struct scsi_qla_host *ha,
1038                             struct qla4_83xx_reset_entry_hdr *p_hdr)
1039 {
1040         if (p_hdr->delay)
1041                 mdelay((uint32_t)((long)p_hdr->delay));
1042 }
1043
1044 static void qla4_83xx_poll_read_list(struct scsi_qla_host *ha,
1045                                      struct qla4_83xx_reset_entry_hdr *p_hdr)
1046 {
1047         long delay;
1048         int index;
1049         struct qla4_83xx_quad_entry *p_entry;
1050         struct qla4_83xx_poll *p_poll;
1051         uint32_t i;
1052         uint32_t value;
1053
1054         p_poll = (struct qla4_83xx_poll *)
1055                  ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
1056         p_entry = (struct qla4_83xx_quad_entry *)
1057                   ((char *)p_poll + sizeof(struct qla4_83xx_poll));
1058         delay = (long)p_hdr->delay;
1059
1060         for (i = 0; i < p_hdr->count; i++, p_entry++) {
1061                 qla4_83xx_wr_reg_indirect(ha, p_entry->ar_addr,
1062                                           p_entry->ar_value);
1063                 if (delay) {
1064                         if (qla4_83xx_poll_reg(ha, p_entry->ar_addr, delay,
1065                                                p_poll->test_mask,
1066                                                p_poll->test_value)) {
1067                                 DEBUG2(ql4_printk(KERN_INFO, ha,
1068                                                   "%s: Timeout Error: poll list, Item_num %d, entry_num %d\n",
1069                                                   __func__, i,
1070                                                   ha->reset_tmplt.seq_index));
1071                         } else {
1072                                 index = ha->reset_tmplt.array_index;
1073                                 qla4_83xx_rd_reg_indirect(ha, p_entry->dr_addr,
1074                                                           &value);
1075                                 ha->reset_tmplt.array[index++] = value;
1076
1077                                 if (index == QLA83XX_MAX_RESET_SEQ_ENTRIES)
1078                                         ha->reset_tmplt.array_index = 1;
1079                         }
1080                 }
1081         }
1082 }
1083
1084 static void qla4_83xx_seq_end(struct scsi_qla_host *ha,
1085                               struct qla4_83xx_reset_entry_hdr *p_hdr)
1086 {
1087         ha->reset_tmplt.seq_end = 1;
1088 }
1089
1090 static void qla4_83xx_template_end(struct scsi_qla_host *ha,
1091                                    struct qla4_83xx_reset_entry_hdr *p_hdr)
1092 {
1093         ha->reset_tmplt.template_end = 1;
1094
1095         if (ha->reset_tmplt.seq_error == 0) {
1096                 DEBUG2(ql4_printk(KERN_INFO, ha,
1097                                   "%s: Reset sequence completed SUCCESSFULLY.\n",
1098                                   __func__));
1099         } else {
1100                 ql4_printk(KERN_ERR, ha, "%s: Reset sequence completed with some timeout errors.\n",
1101                            __func__);
1102         }
1103 }
1104
1105 /**
1106  * qla4_83xx_process_reset_template - Process reset template.
1107  *
1108  * Process all entries in reset template till entry with SEQ_END opcode,
1109  * which indicates end of the reset template processing. Each entry has a
1110  * Reset Entry header, entry opcode/command, with size of the entry, number
1111  * of entries in sub-sequence and delay in microsecs or timeout in millisecs.
1112  *
1113  * @ha : Pointer to adapter structure
1114  * @p_buff : Common reset entry header.
1115  **/
1116 static void qla4_83xx_process_reset_template(struct scsi_qla_host *ha,
1117                                              char *p_buff)
1118 {
1119         int index, entries;
1120         struct qla4_83xx_reset_entry_hdr *p_hdr;
1121         char *p_entry = p_buff;
1122
1123         ha->reset_tmplt.seq_end = 0;
1124         ha->reset_tmplt.template_end = 0;
1125         entries = ha->reset_tmplt.hdr->entries;
1126         index = ha->reset_tmplt.seq_index;
1127
1128         for (; (!ha->reset_tmplt.seq_end) && (index  < entries); index++) {
1129
1130                 p_hdr = (struct qla4_83xx_reset_entry_hdr *)p_entry;
1131                 switch (p_hdr->cmd) {
1132                 case OPCODE_NOP:
1133                         break;
1134                 case OPCODE_WRITE_LIST:
1135                         qla4_83xx_write_list(ha, p_hdr);
1136                         break;
1137                 case OPCODE_READ_WRITE_LIST:
1138                         qla4_83xx_read_write_list(ha, p_hdr);
1139                         break;
1140                 case OPCODE_POLL_LIST:
1141                         qla4_83xx_poll_list(ha, p_hdr);
1142                         break;
1143                 case OPCODE_POLL_WRITE_LIST:
1144                         qla4_83xx_poll_write_list(ha, p_hdr);
1145                         break;
1146                 case OPCODE_READ_MODIFY_WRITE:
1147                         qla4_83xx_read_modify_write(ha, p_hdr);
1148                         break;
1149                 case OPCODE_SEQ_PAUSE:
1150                         qla4_83xx_pause(ha, p_hdr);
1151                         break;
1152                 case OPCODE_SEQ_END:
1153                         qla4_83xx_seq_end(ha, p_hdr);
1154                         break;
1155                 case OPCODE_TMPL_END:
1156                         qla4_83xx_template_end(ha, p_hdr);
1157                         break;
1158                 case OPCODE_POLL_READ_LIST:
1159                         qla4_83xx_poll_read_list(ha, p_hdr);
1160                         break;
1161                 default:
1162                         ql4_printk(KERN_ERR, ha, "%s: Unknown command ==> 0x%04x on entry = %d\n",
1163                                    __func__, p_hdr->cmd, index);
1164                         break;
1165                 }
1166
1167                 /* Set pointer to next entry in the sequence. */
1168                 p_entry += p_hdr->size;
1169         }
1170
1171         ha->reset_tmplt.seq_index = index;
1172 }
1173
1174 static void qla4_83xx_process_stop_seq(struct scsi_qla_host *ha)
1175 {
1176         ha->reset_tmplt.seq_index = 0;
1177         qla4_83xx_process_reset_template(ha, ha->reset_tmplt.stop_offset);
1178
1179         if (ha->reset_tmplt.seq_end != 1)
1180                 ql4_printk(KERN_ERR, ha, "%s: Abrupt STOP Sub-Sequence end.\n",
1181                            __func__);
1182 }
1183
1184 static void qla4_83xx_process_start_seq(struct scsi_qla_host *ha)
1185 {
1186         qla4_83xx_process_reset_template(ha, ha->reset_tmplt.start_offset);
1187
1188         if (ha->reset_tmplt.template_end != 1)
1189                 ql4_printk(KERN_ERR, ha, "%s: Abrupt START Sub-Sequence end.\n",
1190                            __func__);
1191 }
1192
1193 static void qla4_83xx_process_init_seq(struct scsi_qla_host *ha)
1194 {
1195         qla4_83xx_process_reset_template(ha, ha->reset_tmplt.init_offset);
1196
1197         if (ha->reset_tmplt.seq_end != 1)
1198                 ql4_printk(KERN_ERR, ha, "%s: Abrupt INIT Sub-Sequence end.\n",
1199                            __func__);
1200 }
1201
1202 static int qla4_83xx_restart(struct scsi_qla_host *ha)
1203 {
1204         int ret_val = QLA_SUCCESS;
1205         uint32_t idc_ctrl;
1206
1207         qla4_83xx_process_stop_seq(ha);
1208
1209         /*
1210          * Collect minidump.
1211          * If IDC_CTRL BIT1 is set, clear it on going to INIT state and
1212          * don't collect minidump
1213          */
1214         idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
1215         if (idc_ctrl & GRACEFUL_RESET_BIT1) {
1216                 qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
1217                                  (idc_ctrl & ~GRACEFUL_RESET_BIT1));
1218                 ql4_printk(KERN_INFO, ha, "%s: Graceful RESET: Not collecting minidump\n",
1219                            __func__);
1220         } else {
1221                 qla4_8xxx_get_minidump(ha);
1222         }
1223
1224         qla4_83xx_process_init_seq(ha);
1225
1226         if (qla4_83xx_copy_bootloader(ha)) {
1227                 ql4_printk(KERN_ERR, ha, "%s: Copy bootloader, firmware restart failed!\n",
1228                            __func__);
1229                 ret_val = QLA_ERROR;
1230                 goto exit_restart;
1231         }
1232
1233         qla4_83xx_wr_reg(ha, QLA83XX_FW_IMAGE_VALID, QLA83XX_BOOT_FROM_FLASH);
1234         qla4_83xx_process_start_seq(ha);
1235
1236 exit_restart:
1237         return ret_val;
1238 }
1239
1240 int qla4_83xx_start_firmware(struct scsi_qla_host *ha)
1241 {
1242         int ret_val = QLA_SUCCESS;
1243
1244         ret_val = qla4_83xx_restart(ha);
1245         if (ret_val == QLA_ERROR) {
1246                 ql4_printk(KERN_ERR, ha, "%s: Restart error\n", __func__);
1247                 goto exit_start_fw;
1248         } else {
1249                 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Restart done\n",
1250                                   __func__));
1251         }
1252
1253         ret_val = qla4_83xx_check_cmd_peg_status(ha);
1254         if (ret_val == QLA_ERROR)
1255                 ql4_printk(KERN_ERR, ha, "%s: Peg not initialized\n",
1256                            __func__);
1257
1258 exit_start_fw:
1259         return ret_val;
1260 }
1261
1262 /*----------------------Interrupt Related functions ---------------------*/
1263
1264 static void qla4_83xx_disable_iocb_intrs(struct scsi_qla_host *ha)
1265 {
1266         if (test_and_clear_bit(AF_83XX_IOCB_INTR_ON, &ha->flags))
1267                 qla4_8xxx_intr_disable(ha);
1268 }
1269
1270 static void qla4_83xx_disable_mbox_intrs(struct scsi_qla_host *ha)
1271 {
1272         uint32_t mb_int, ret;
1273
1274         if (test_and_clear_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) {
1275                 ret = readl(&ha->qla4_83xx_reg->mbox_int);
1276                 mb_int = ret & ~INT_ENABLE_FW_MB;
1277                 writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
1278                 writel(1, &ha->qla4_83xx_reg->leg_int_mask);
1279         }
1280 }
1281
1282 void qla4_83xx_disable_intrs(struct scsi_qla_host *ha)
1283 {
1284         qla4_83xx_disable_mbox_intrs(ha);
1285         qla4_83xx_disable_iocb_intrs(ha);
1286 }
1287
1288 static void qla4_83xx_enable_iocb_intrs(struct scsi_qla_host *ha)
1289 {
1290         if (!test_bit(AF_83XX_IOCB_INTR_ON, &ha->flags)) {
1291                 qla4_8xxx_intr_enable(ha);
1292                 set_bit(AF_83XX_IOCB_INTR_ON, &ha->flags);
1293         }
1294 }
1295
1296 void qla4_83xx_enable_mbox_intrs(struct scsi_qla_host *ha)
1297 {
1298         uint32_t mb_int;
1299
1300         if (!test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) {
1301                 mb_int = INT_ENABLE_FW_MB;
1302                 writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
1303                 writel(0, &ha->qla4_83xx_reg->leg_int_mask);
1304                 set_bit(AF_83XX_MBOX_INTR_ON, &ha->flags);
1305         }
1306 }
1307
1308
1309 void qla4_83xx_enable_intrs(struct scsi_qla_host *ha)
1310 {
1311         qla4_83xx_enable_mbox_intrs(ha);
1312         qla4_83xx_enable_iocb_intrs(ha);
1313 }
1314
1315
1316 void qla4_83xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
1317                               int incount)
1318 {
1319         int i;
1320
1321         /* Load all mailbox registers, except mailbox 0. */
1322         for (i = 1; i < incount; i++)
1323                 writel(mbx_cmd[i], &ha->qla4_83xx_reg->mailbox_in[i]);
1324
1325         writel(mbx_cmd[0], &ha->qla4_83xx_reg->mailbox_in[0]);
1326
1327         /* Set Host Interrupt register to 1, to tell the firmware that
1328          * a mailbox command is pending. Firmware after reading the
1329          * mailbox command, clears the host interrupt register */
1330         writel(HINT_MBX_INT_PENDING, &ha->qla4_83xx_reg->host_intr);
1331 }
1332
1333 void qla4_83xx_process_mbox_intr(struct scsi_qla_host *ha, int outcount)
1334 {
1335         int intr_status;
1336
1337         intr_status = readl(&ha->qla4_83xx_reg->risc_intr);
1338         if (intr_status) {
1339                 ha->mbox_status_count = outcount;
1340                 ha->isp_ops->interrupt_service_routine(ha, intr_status);
1341         }
1342 }
1343
1344 /**
1345  * qla4_83xx_isp_reset - Resets ISP and aborts all outstanding commands.
1346  * @ha: pointer to host adapter structure.
1347  **/
1348 int qla4_83xx_isp_reset(struct scsi_qla_host *ha)
1349 {
1350         int rval;
1351         uint32_t dev_state;
1352
1353         ha->isp_ops->idc_lock(ha);
1354         dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
1355
1356         if (ql4xdontresethba)
1357                 qla4_83xx_set_idc_dontreset(ha);
1358
1359         if (dev_state == QLA8XXX_DEV_READY) {
1360                 /* If IDC_CTRL DONTRESETHBA_BIT0 is set dont do reset
1361                  * recovery */
1362                 if (qla4_83xx_idc_dontreset(ha) == DONTRESET_BIT0) {
1363                         ql4_printk(KERN_ERR, ha, "%s: Reset recovery disabled\n",
1364                                    __func__);
1365                         rval = QLA_ERROR;
1366                         goto exit_isp_reset;
1367                 }
1368
1369                 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET\n",
1370                                   __func__));
1371                 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
1372                                     QLA8XXX_DEV_NEED_RESET);
1373
1374         } else {
1375                 /* If device_state is NEED_RESET, go ahead with
1376                  * Reset,irrespective of ql4xdontresethba. This is to allow a
1377                  * non-reset-owner to force a reset. Non-reset-owner sets
1378                  * the IDC_CTRL BIT0 to prevent Reset-owner from doing a Reset
1379                  * and then forces a Reset by setting device_state to
1380                  * NEED_RESET. */
1381                 DEBUG2(ql4_printk(KERN_INFO, ha,
1382                                   "%s: HW state already set to NEED_RESET\n",
1383                                   __func__));
1384         }
1385
1386         /* For ISP8324 and ISP8042, Reset owner is NIC, iSCSI or FCOE based on
1387          * priority and which drivers are present. Unlike ISP8022, the function
1388          * setting NEED_RESET, may not be the Reset owner. */
1389         if (qla4_83xx_can_perform_reset(ha))
1390                 set_bit(AF_8XXX_RST_OWNER, &ha->flags);
1391
1392         ha->isp_ops->idc_unlock(ha);
1393         rval = qla4_8xxx_device_state_handler(ha);
1394
1395         ha->isp_ops->idc_lock(ha);
1396         qla4_8xxx_clear_rst_ready(ha);
1397 exit_isp_reset:
1398         ha->isp_ops->idc_unlock(ha);
1399
1400         if (rval == QLA_SUCCESS)
1401                 clear_bit(AF_FW_RECOVERY, &ha->flags);
1402
1403         return rval;
1404 }
1405
1406 static void qla4_83xx_dump_pause_control_regs(struct scsi_qla_host *ha)
1407 {
1408         u32 val = 0, val1 = 0;
1409         int i, status = QLA_SUCCESS;
1410
1411         status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_SRE_SHIM_CONTROL, &val);
1412         DEBUG2(ql4_printk(KERN_INFO, ha, "SRE-Shim Ctrl:0x%x\n", val));
1413
1414         /* Port 0 Rx Buffer Pause Threshold Registers. */
1415         DEBUG2(ql4_printk(KERN_INFO, ha,
1416                 "Port 0 Rx Buffer Pause Threshold Registers[TC7..TC0]:"));
1417         for (i = 0; i < 8; i++) {
1418                 status = qla4_83xx_rd_reg_indirect(ha,
1419                                 QLA83XX_PORT0_RXB_PAUSE_THRS + (i * 0x4), &val);
1420                 DEBUG2(pr_info("0x%x ", val));
1421         }
1422
1423         DEBUG2(pr_info("\n"));
1424
1425         /* Port 1 Rx Buffer Pause Threshold Registers. */
1426         DEBUG2(ql4_printk(KERN_INFO, ha,
1427                 "Port 1 Rx Buffer Pause Threshold Registers[TC7..TC0]:"));
1428         for (i = 0; i < 8; i++) {
1429                 status = qla4_83xx_rd_reg_indirect(ha,
1430                                 QLA83XX_PORT1_RXB_PAUSE_THRS + (i * 0x4), &val);
1431                 DEBUG2(pr_info("0x%x  ", val));
1432         }
1433
1434         DEBUG2(pr_info("\n"));
1435
1436         /* Port 0 RxB Traffic Class Max Cell Registers. */
1437         DEBUG2(ql4_printk(KERN_INFO, ha,
1438                 "Port 0 RxB Traffic Class Max Cell Registers[3..0]:"));
1439         for (i = 0; i < 4; i++) {
1440                 status = qla4_83xx_rd_reg_indirect(ha,
1441                                QLA83XX_PORT0_RXB_TC_MAX_CELL + (i * 0x4), &val);
1442                 DEBUG2(pr_info("0x%x  ", val));
1443         }
1444
1445         DEBUG2(pr_info("\n"));
1446
1447         /* Port 1 RxB Traffic Class Max Cell Registers. */
1448         DEBUG2(ql4_printk(KERN_INFO, ha,
1449                 "Port 1 RxB Traffic Class Max Cell Registers[3..0]:"));
1450         for (i = 0; i < 4; i++) {
1451                 status = qla4_83xx_rd_reg_indirect(ha,
1452                                QLA83XX_PORT1_RXB_TC_MAX_CELL + (i * 0x4), &val);
1453                 DEBUG2(pr_info("0x%x  ", val));
1454         }
1455
1456         DEBUG2(pr_info("\n"));
1457
1458         /* Port 0 RxB Rx Traffic Class Stats. */
1459         DEBUG2(ql4_printk(KERN_INFO, ha,
1460                           "Port 0 RxB Rx Traffic Class Stats [TC7..TC0]"));
1461         for (i = 7; i >= 0; i--) {
1462                 status = qla4_83xx_rd_reg_indirect(ha,
1463                                                    QLA83XX_PORT0_RXB_TC_STATS,
1464                                                    &val);
1465                 val &= ~(0x7 << 29);    /* Reset bits 29 to 31 */
1466                 qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_STATS,
1467                                           (val | (i << 29)));
1468                 status = qla4_83xx_rd_reg_indirect(ha,
1469                                                    QLA83XX_PORT0_RXB_TC_STATS,
1470                                                    &val);
1471                 DEBUG2(pr_info("0x%x  ", val));
1472         }
1473
1474         DEBUG2(pr_info("\n"));
1475
1476         /* Port 1 RxB Rx Traffic Class Stats. */
1477         DEBUG2(ql4_printk(KERN_INFO, ha,
1478                           "Port 1 RxB Rx Traffic Class Stats [TC7..TC0]"));
1479         for (i = 7; i >= 0; i--) {
1480                 status = qla4_83xx_rd_reg_indirect(ha,
1481                                                    QLA83XX_PORT1_RXB_TC_STATS,
1482                                                    &val);
1483                 val &= ~(0x7 << 29);    /* Reset bits 29 to 31 */
1484                 qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_STATS,
1485                                           (val | (i << 29)));
1486                 status = qla4_83xx_rd_reg_indirect(ha,
1487                                                    QLA83XX_PORT1_RXB_TC_STATS,
1488                                                    &val);
1489                 DEBUG2(pr_info("0x%x  ", val));
1490         }
1491
1492         DEBUG2(pr_info("\n"));
1493
1494         status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT2_IFB_PAUSE_THRS,
1495                                            &val);
1496         status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT3_IFB_PAUSE_THRS,
1497                                            &val1);
1498
1499         DEBUG2(ql4_printk(KERN_INFO, ha,
1500                           "IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n",
1501                           val, val1));
1502 }
1503
1504 static void __qla4_83xx_disable_pause(struct scsi_qla_host *ha)
1505 {
1506         int i;
1507
1508         /* set SRE-Shim Control Register */
1509         qla4_83xx_wr_reg_indirect(ha, QLA83XX_SRE_SHIM_CONTROL,
1510                                   QLA83XX_SET_PAUSE_VAL);
1511
1512         for (i = 0; i < 8; i++) {
1513                 /* Port 0 Rx Buffer Pause Threshold Registers. */
1514                 qla4_83xx_wr_reg_indirect(ha,
1515                                       QLA83XX_PORT0_RXB_PAUSE_THRS + (i * 0x4),
1516                                       QLA83XX_SET_PAUSE_VAL);
1517                 /* Port 1 Rx Buffer Pause Threshold Registers. */
1518                 qla4_83xx_wr_reg_indirect(ha,
1519                                       QLA83XX_PORT1_RXB_PAUSE_THRS + (i * 0x4),
1520                                       QLA83XX_SET_PAUSE_VAL);
1521         }
1522
1523         for (i = 0; i < 4; i++) {
1524                 /* Port 0 RxB Traffic Class Max Cell Registers. */
1525                 qla4_83xx_wr_reg_indirect(ha,
1526                                      QLA83XX_PORT0_RXB_TC_MAX_CELL + (i * 0x4),
1527                                      QLA83XX_SET_TC_MAX_CELL_VAL);
1528                 /* Port 1 RxB Traffic Class Max Cell Registers. */
1529                 qla4_83xx_wr_reg_indirect(ha,
1530                                      QLA83XX_PORT1_RXB_TC_MAX_CELL + (i * 0x4),
1531                                      QLA83XX_SET_TC_MAX_CELL_VAL);
1532         }
1533
1534         qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT2_IFB_PAUSE_THRS,
1535                                   QLA83XX_SET_PAUSE_VAL);
1536         qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT3_IFB_PAUSE_THRS,
1537                                   QLA83XX_SET_PAUSE_VAL);
1538
1539         ql4_printk(KERN_INFO, ha, "Disabled pause frames successfully.\n");
1540 }
1541
1542 /**
1543  * qla4_83xx_eport_init - Initialize EPort.
1544  * @ha: Pointer to host adapter structure.
1545  *
1546  * If EPort hardware is in reset state before disabling pause, there would be
1547  * serious hardware wedging issues. To prevent this perform eport init everytime
1548  * before disabling pause frames.
1549  **/
1550 static void qla4_83xx_eport_init(struct scsi_qla_host *ha)
1551 {
1552         /* Clear the 8 registers */
1553         qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_REG, 0x0);
1554         qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT0, 0x0);
1555         qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT1, 0x0);
1556         qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT2, 0x0);
1557         qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT3, 0x0);
1558         qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_SRE_SHIM, 0x0);
1559         qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_EPG_SHIM, 0x0);
1560         qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_ETHER_PCS, 0x0);
1561
1562         /* Write any value to Reset Control register */
1563         qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_CONTROL, 0xFF);
1564
1565         ql4_printk(KERN_INFO, ha, "EPORT is out of reset.\n");
1566 }
1567
1568 void qla4_83xx_disable_pause(struct scsi_qla_host *ha)
1569 {
1570         ha->isp_ops->idc_lock(ha);
1571         /* Before disabling pause frames, ensure that eport is not in reset */
1572         qla4_83xx_eport_init(ha);
1573         qla4_83xx_dump_pause_control_regs(ha);
1574         __qla4_83xx_disable_pause(ha);
1575         ha->isp_ops->idc_unlock(ha);
1576 }
1577
1578 /**
1579  * qla4_83xx_is_detached - Check if we are marked invisible.
1580  * @ha: Pointer to host adapter structure.
1581  **/
1582 int qla4_83xx_is_detached(struct scsi_qla_host *ha)
1583 {
1584         uint32_t drv_active;
1585
1586         drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
1587
1588         if (test_bit(AF_INIT_DONE, &ha->flags) &&
1589             !(drv_active & (1 << ha->func_num))) {
1590                 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: drv_active = 0x%X\n",
1591                                   __func__, drv_active));
1592                 return QLA_SUCCESS;
1593         }
1594
1595         return QLA_ERROR;
1596 }