2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2016 Microsemi Corporation
4 * Copyright 2014-2015 PMC-Sierra, Inc.
5 * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more details.
16 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
20 #include <linux/module.h>
21 #include <linux/interrupt.h>
22 #include <linux/types.h>
23 #include <linux/pci.h>
24 #include <linux/pci-aspm.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/delay.h>
29 #include <linux/timer.h>
30 #include <linux/init.h>
31 #include <linux/spinlock.h>
32 #include <linux/compat.h>
33 #include <linux/blktrace_api.h>
34 #include <linux/uaccess.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/completion.h>
38 #include <linux/moduleparam.h>
39 #include <scsi/scsi.h>
40 #include <scsi/scsi_cmnd.h>
41 #include <scsi/scsi_device.h>
42 #include <scsi/scsi_host.h>
43 #include <scsi/scsi_tcq.h>
44 #include <scsi/scsi_eh.h>
45 #include <scsi/scsi_transport_sas.h>
46 #include <scsi/scsi_dbg.h>
47 #include <linux/cciss_ioctl.h>
48 #include <linux/string.h>
49 #include <linux/bitmap.h>
50 #include <linux/atomic.h>
51 #include <linux/jiffies.h>
52 #include <linux/percpu-defs.h>
53 #include <linux/percpu.h>
54 #include <asm/unaligned.h>
55 #include <asm/div64.h>
60 * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.'
61 * with an optional trailing '-' followed by a byte value (0-255).
63 #define HPSA_DRIVER_VERSION "3.4.16-0"
64 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
67 /* How long to wait for CISS doorbell communication */
68 #define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */
69 #define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */
70 #define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */
71 #define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */
72 #define MAX_IOCTL_CONFIG_WAIT 1000
74 /*define how many times we will try a command because of bus resets */
75 #define MAX_CMD_RETRIES 3
77 /* Embedded module documentation macros - see modules.h */
78 MODULE_AUTHOR("Hewlett-Packard Company");
79 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
81 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
82 MODULE_VERSION(HPSA_DRIVER_VERSION);
83 MODULE_LICENSE("GPL");
85 static int hpsa_allow_any;
86 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
87 MODULE_PARM_DESC(hpsa_allow_any,
88 "Allow hpsa driver to access unknown HP Smart Array hardware");
89 static int hpsa_simple_mode;
90 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
91 MODULE_PARM_DESC(hpsa_simple_mode,
92 "Use 'simple mode' rather than 'performant mode'");
94 /* define the PCI info for the cards we can control */
95 static const struct pci_device_id hpsa_pci_device_id[] = {
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
127 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
128 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
129 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
130 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
131 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
132 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
133 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
134 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
135 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
136 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580},
137 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581},
138 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582},
139 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583},
140 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584},
141 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585},
142 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
143 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
144 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
145 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
146 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
147 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
148 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
152 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
154 /* board_id = Subsystem Device ID & Vendor ID
155 * product = Marketing Name for the board
156 * access = Address of the struct of function pointers
158 static struct board_type products[] = {
159 {0x3241103C, "Smart Array P212", &SA5_access},
160 {0x3243103C, "Smart Array P410", &SA5_access},
161 {0x3245103C, "Smart Array P410i", &SA5_access},
162 {0x3247103C, "Smart Array P411", &SA5_access},
163 {0x3249103C, "Smart Array P812", &SA5_access},
164 {0x324A103C, "Smart Array P712m", &SA5_access},
165 {0x324B103C, "Smart Array P711m", &SA5_access},
166 {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
167 {0x3350103C, "Smart Array P222", &SA5_access},
168 {0x3351103C, "Smart Array P420", &SA5_access},
169 {0x3352103C, "Smart Array P421", &SA5_access},
170 {0x3353103C, "Smart Array P822", &SA5_access},
171 {0x3354103C, "Smart Array P420i", &SA5_access},
172 {0x3355103C, "Smart Array P220i", &SA5_access},
173 {0x3356103C, "Smart Array P721m", &SA5_access},
174 {0x1921103C, "Smart Array P830i", &SA5_access},
175 {0x1922103C, "Smart Array P430", &SA5_access},
176 {0x1923103C, "Smart Array P431", &SA5_access},
177 {0x1924103C, "Smart Array P830", &SA5_access},
178 {0x1926103C, "Smart Array P731m", &SA5_access},
179 {0x1928103C, "Smart Array P230i", &SA5_access},
180 {0x1929103C, "Smart Array P530", &SA5_access},
181 {0x21BD103C, "Smart Array P244br", &SA5_access},
182 {0x21BE103C, "Smart Array P741m", &SA5_access},
183 {0x21BF103C, "Smart HBA H240ar", &SA5_access},
184 {0x21C0103C, "Smart Array P440ar", &SA5_access},
185 {0x21C1103C, "Smart Array P840ar", &SA5_access},
186 {0x21C2103C, "Smart Array P440", &SA5_access},
187 {0x21C3103C, "Smart Array P441", &SA5_access},
188 {0x21C4103C, "Smart Array", &SA5_access},
189 {0x21C5103C, "Smart Array P841", &SA5_access},
190 {0x21C6103C, "Smart HBA H244br", &SA5_access},
191 {0x21C7103C, "Smart HBA H240", &SA5_access},
192 {0x21C8103C, "Smart HBA H241", &SA5_access},
193 {0x21C9103C, "Smart Array", &SA5_access},
194 {0x21CA103C, "Smart Array P246br", &SA5_access},
195 {0x21CB103C, "Smart Array P840", &SA5_access},
196 {0x21CC103C, "Smart Array", &SA5_access},
197 {0x21CD103C, "Smart Array", &SA5_access},
198 {0x21CE103C, "Smart HBA", &SA5_access},
199 {0x05809005, "SmartHBA-SA", &SA5_access},
200 {0x05819005, "SmartHBA-SA 8i", &SA5_access},
201 {0x05829005, "SmartHBA-SA 8i8e", &SA5_access},
202 {0x05839005, "SmartHBA-SA 8e", &SA5_access},
203 {0x05849005, "SmartHBA-SA 16i", &SA5_access},
204 {0x05859005, "SmartHBA-SA 4i4e", &SA5_access},
205 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
206 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
207 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
208 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
209 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
210 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
213 static struct scsi_transport_template *hpsa_sas_transport_template;
214 static int hpsa_add_sas_host(struct ctlr_info *h);
215 static void hpsa_delete_sas_host(struct ctlr_info *h);
216 static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
217 struct hpsa_scsi_dev_t *device);
218 static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device);
219 static struct hpsa_scsi_dev_t
220 *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
221 struct sas_rphy *rphy);
223 #define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
224 static const struct scsi_cmnd hpsa_cmd_busy;
225 #define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
226 static const struct scsi_cmnd hpsa_cmd_idle;
227 static int number_of_controllers;
229 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
230 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
231 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
234 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
238 static void cmd_free(struct ctlr_info *h, struct CommandList *c);
239 static struct CommandList *cmd_alloc(struct ctlr_info *h);
240 static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
241 static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
242 struct scsi_cmnd *scmd);
243 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
244 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
246 static void hpsa_free_cmd_pool(struct ctlr_info *h);
247 #define VPD_PAGE (1 << 8)
248 #define HPSA_SIMPLE_ERROR_BITS 0x03
250 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
251 static void hpsa_scan_start(struct Scsi_Host *);
252 static int hpsa_scan_finished(struct Scsi_Host *sh,
253 unsigned long elapsed_time);
254 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
256 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
257 static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
258 static int hpsa_slave_alloc(struct scsi_device *sdev);
259 static int hpsa_slave_configure(struct scsi_device *sdev);
260 static void hpsa_slave_destroy(struct scsi_device *sdev);
262 static void hpsa_update_scsi_devices(struct ctlr_info *h);
263 static int check_for_unit_attention(struct ctlr_info *h,
264 struct CommandList *c);
265 static void check_ioctl_unit_attention(struct ctlr_info *h,
266 struct CommandList *c);
267 /* performant mode helper functions */
268 static void calc_bucket_map(int *bucket, int num_buckets,
269 int nsgs, int min_blocks, u32 *bucket_map);
270 static void hpsa_free_performant_mode(struct ctlr_info *h);
271 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
272 static inline u32 next_command(struct ctlr_info *h, u8 q);
273 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
274 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
276 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
277 unsigned long *memory_bar);
278 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
279 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
281 static inline void finish_cmd(struct CommandList *c);
282 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
283 #define BOARD_NOT_READY 0
284 #define BOARD_READY 1
285 static void hpsa_drain_accel_commands(struct ctlr_info *h);
286 static void hpsa_flush_cache(struct ctlr_info *h);
287 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
288 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
289 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
290 static void hpsa_command_resubmit_worker(struct work_struct *work);
291 static u32 lockup_detected(struct ctlr_info *h);
292 static int detect_controller_lockup(struct ctlr_info *h);
293 static void hpsa_disable_rld_caching(struct ctlr_info *h);
294 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
295 struct ReportExtendedLUNdata *buf, int bufsize);
296 static bool hpsa_vpd_page_supported(struct ctlr_info *h,
297 unsigned char scsi3addr[], u8 page);
298 static int hpsa_luns_changed(struct ctlr_info *h);
299 static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
300 struct hpsa_scsi_dev_t *dev,
301 unsigned char *scsi3addr);
303 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
305 unsigned long *priv = shost_priv(sdev->host);
306 return (struct ctlr_info *) *priv;
309 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
311 unsigned long *priv = shost_priv(sh);
312 return (struct ctlr_info *) *priv;
315 static inline bool hpsa_is_cmd_idle(struct CommandList *c)
317 return c->scsi_cmd == SCSI_CMD_IDLE;
320 static inline bool hpsa_is_pending_event(struct CommandList *c)
322 return c->abort_pending || c->reset_pending;
325 /* extract sense key, asc, and ascq from sense data. -1 means invalid. */
326 static void decode_sense_data(const u8 *sense_data, int sense_data_len,
327 u8 *sense_key, u8 *asc, u8 *ascq)
329 struct scsi_sense_hdr sshdr;
336 if (sense_data_len < 1)
339 rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
341 *sense_key = sshdr.sense_key;
347 static int check_for_unit_attention(struct ctlr_info *h,
348 struct CommandList *c)
350 u8 sense_key, asc, ascq;
353 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
354 sense_len = sizeof(c->err_info->SenseInfo);
356 sense_len = c->err_info->SenseLen;
358 decode_sense_data(c->err_info->SenseInfo, sense_len,
359 &sense_key, &asc, &ascq);
360 if (sense_key != UNIT_ATTENTION || asc == 0xff)
365 dev_warn(&h->pdev->dev,
366 "%s: a state change detected, command retried\n",
370 dev_warn(&h->pdev->dev,
371 "%s: LUN failure detected\n", h->devname);
373 case REPORT_LUNS_CHANGED:
374 dev_warn(&h->pdev->dev,
375 "%s: report LUN data changed\n", h->devname);
377 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
378 * target (array) devices.
382 dev_warn(&h->pdev->dev,
383 "%s: a power on or device reset detected\n",
386 case UNIT_ATTENTION_CLEARED:
387 dev_warn(&h->pdev->dev,
388 "%s: unit attention cleared by another initiator\n",
392 dev_warn(&h->pdev->dev,
393 "%s: unknown unit attention detected\n",
400 static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
402 if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
403 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
404 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
406 dev_warn(&h->pdev->dev, HPSA "device busy");
410 static u32 lockup_detected(struct ctlr_info *h);
411 static ssize_t host_show_lockup_detected(struct device *dev,
412 struct device_attribute *attr, char *buf)
416 struct Scsi_Host *shost = class_to_shost(dev);
418 h = shost_to_hba(shost);
419 ld = lockup_detected(h);
421 return sprintf(buf, "ld=%d\n", ld);
424 static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
425 struct device_attribute *attr,
426 const char *buf, size_t count)
430 struct Scsi_Host *shost = class_to_shost(dev);
433 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
435 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
436 strncpy(tmpbuf, buf, len);
438 if (sscanf(tmpbuf, "%d", &status) != 1)
440 h = shost_to_hba(shost);
441 h->acciopath_status = !!status;
442 dev_warn(&h->pdev->dev,
443 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
444 h->acciopath_status ? "enabled" : "disabled");
448 static ssize_t host_store_raid_offload_debug(struct device *dev,
449 struct device_attribute *attr,
450 const char *buf, size_t count)
452 int debug_level, len;
454 struct Scsi_Host *shost = class_to_shost(dev);
457 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
459 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
460 strncpy(tmpbuf, buf, len);
462 if (sscanf(tmpbuf, "%d", &debug_level) != 1)
466 h = shost_to_hba(shost);
467 h->raid_offload_debug = debug_level;
468 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
469 h->raid_offload_debug);
473 static ssize_t host_store_rescan(struct device *dev,
474 struct device_attribute *attr,
475 const char *buf, size_t count)
478 struct Scsi_Host *shost = class_to_shost(dev);
479 h = shost_to_hba(shost);
480 hpsa_scan_start(h->scsi_host);
484 static ssize_t host_show_firmware_revision(struct device *dev,
485 struct device_attribute *attr, char *buf)
488 struct Scsi_Host *shost = class_to_shost(dev);
489 unsigned char *fwrev;
491 h = shost_to_hba(shost);
492 if (!h->hba_inquiry_data)
494 fwrev = &h->hba_inquiry_data[32];
495 return snprintf(buf, 20, "%c%c%c%c\n",
496 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
499 static ssize_t host_show_commands_outstanding(struct device *dev,
500 struct device_attribute *attr, char *buf)
502 struct Scsi_Host *shost = class_to_shost(dev);
503 struct ctlr_info *h = shost_to_hba(shost);
505 return snprintf(buf, 20, "%d\n",
506 atomic_read(&h->commands_outstanding));
509 static ssize_t host_show_transport_mode(struct device *dev,
510 struct device_attribute *attr, char *buf)
513 struct Scsi_Host *shost = class_to_shost(dev);
515 h = shost_to_hba(shost);
516 return snprintf(buf, 20, "%s\n",
517 h->transMethod & CFGTBL_Trans_Performant ?
518 "performant" : "simple");
521 static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
522 struct device_attribute *attr, char *buf)
525 struct Scsi_Host *shost = class_to_shost(dev);
527 h = shost_to_hba(shost);
528 return snprintf(buf, 30, "HP SSD Smart Path %s\n",
529 (h->acciopath_status == 1) ? "enabled" : "disabled");
532 /* List of controllers which cannot be hard reset on kexec with reset_devices */
533 static u32 unresettable_controller[] = {
534 0x324a103C, /* Smart Array P712m */
535 0x324b103C, /* Smart Array P711m */
536 0x3223103C, /* Smart Array P800 */
537 0x3234103C, /* Smart Array P400 */
538 0x3235103C, /* Smart Array P400i */
539 0x3211103C, /* Smart Array E200i */
540 0x3212103C, /* Smart Array E200 */
541 0x3213103C, /* Smart Array E200i */
542 0x3214103C, /* Smart Array E200i */
543 0x3215103C, /* Smart Array E200i */
544 0x3237103C, /* Smart Array E500 */
545 0x323D103C, /* Smart Array P700m */
546 0x40800E11, /* Smart Array 5i */
547 0x409C0E11, /* Smart Array 6400 */
548 0x409D0E11, /* Smart Array 6400 EM */
549 0x40700E11, /* Smart Array 5300 */
550 0x40820E11, /* Smart Array 532 */
551 0x40830E11, /* Smart Array 5312 */
552 0x409A0E11, /* Smart Array 641 */
553 0x409B0E11, /* Smart Array 642 */
554 0x40910E11, /* Smart Array 6i */
557 /* List of controllers which cannot even be soft reset */
558 static u32 soft_unresettable_controller[] = {
559 0x40800E11, /* Smart Array 5i */
560 0x40700E11, /* Smart Array 5300 */
561 0x40820E11, /* Smart Array 532 */
562 0x40830E11, /* Smart Array 5312 */
563 0x409A0E11, /* Smart Array 641 */
564 0x409B0E11, /* Smart Array 642 */
565 0x40910E11, /* Smart Array 6i */
566 /* Exclude 640x boards. These are two pci devices in one slot
567 * which share a battery backed cache module. One controls the
568 * cache, the other accesses the cache through the one that controls
569 * it. If we reset the one controlling the cache, the other will
570 * likely not be happy. Just forbid resetting this conjoined mess.
571 * The 640x isn't really supported by hpsa anyway.
573 0x409C0E11, /* Smart Array 6400 */
574 0x409D0E11, /* Smart Array 6400 EM */
577 static u32 needs_abort_tags_swizzled[] = {
578 0x323D103C, /* Smart Array P700m */
579 0x324a103C, /* Smart Array P712m */
580 0x324b103C, /* SmartArray P711m */
583 static int board_id_in_array(u32 a[], int nelems, u32 board_id)
587 for (i = 0; i < nelems; i++)
588 if (a[i] == board_id)
593 static int ctlr_is_hard_resettable(u32 board_id)
595 return !board_id_in_array(unresettable_controller,
596 ARRAY_SIZE(unresettable_controller), board_id);
599 static int ctlr_is_soft_resettable(u32 board_id)
601 return !board_id_in_array(soft_unresettable_controller,
602 ARRAY_SIZE(soft_unresettable_controller), board_id);
605 static int ctlr_is_resettable(u32 board_id)
607 return ctlr_is_hard_resettable(board_id) ||
608 ctlr_is_soft_resettable(board_id);
611 static int ctlr_needs_abort_tags_swizzled(u32 board_id)
613 return board_id_in_array(needs_abort_tags_swizzled,
614 ARRAY_SIZE(needs_abort_tags_swizzled), board_id);
617 static ssize_t host_show_resettable(struct device *dev,
618 struct device_attribute *attr, char *buf)
621 struct Scsi_Host *shost = class_to_shost(dev);
623 h = shost_to_hba(shost);
624 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
627 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
629 return (scsi3addr[3] & 0xC0) == 0x40;
632 static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
633 "1(+0)ADM", "UNKNOWN", "PHYS DRV"
635 #define HPSA_RAID_0 0
636 #define HPSA_RAID_4 1
637 #define HPSA_RAID_1 2 /* also used for RAID 10 */
638 #define HPSA_RAID_5 3 /* also used for RAID 50 */
639 #define HPSA_RAID_51 4
640 #define HPSA_RAID_6 5 /* also used for RAID 60 */
641 #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
642 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 2)
643 #define PHYSICAL_DRIVE (ARRAY_SIZE(raid_label) - 1)
645 static inline bool is_logical_device(struct hpsa_scsi_dev_t *device)
647 return !device->physical_device;
650 static ssize_t raid_level_show(struct device *dev,
651 struct device_attribute *attr, char *buf)
654 unsigned char rlevel;
656 struct scsi_device *sdev;
657 struct hpsa_scsi_dev_t *hdev;
660 sdev = to_scsi_device(dev);
661 h = sdev_to_hba(sdev);
662 spin_lock_irqsave(&h->lock, flags);
663 hdev = sdev->hostdata;
665 spin_unlock_irqrestore(&h->lock, flags);
669 /* Is this even a logical drive? */
670 if (!is_logical_device(hdev)) {
671 spin_unlock_irqrestore(&h->lock, flags);
672 l = snprintf(buf, PAGE_SIZE, "N/A\n");
676 rlevel = hdev->raid_level;
677 spin_unlock_irqrestore(&h->lock, flags);
678 if (rlevel > RAID_UNKNOWN)
679 rlevel = RAID_UNKNOWN;
680 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
684 static ssize_t lunid_show(struct device *dev,
685 struct device_attribute *attr, char *buf)
688 struct scsi_device *sdev;
689 struct hpsa_scsi_dev_t *hdev;
691 unsigned char lunid[8];
693 sdev = to_scsi_device(dev);
694 h = sdev_to_hba(sdev);
695 spin_lock_irqsave(&h->lock, flags);
696 hdev = sdev->hostdata;
698 spin_unlock_irqrestore(&h->lock, flags);
701 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
702 spin_unlock_irqrestore(&h->lock, flags);
703 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
704 lunid[0], lunid[1], lunid[2], lunid[3],
705 lunid[4], lunid[5], lunid[6], lunid[7]);
708 static ssize_t unique_id_show(struct device *dev,
709 struct device_attribute *attr, char *buf)
712 struct scsi_device *sdev;
713 struct hpsa_scsi_dev_t *hdev;
715 unsigned char sn[16];
717 sdev = to_scsi_device(dev);
718 h = sdev_to_hba(sdev);
719 spin_lock_irqsave(&h->lock, flags);
720 hdev = sdev->hostdata;
722 spin_unlock_irqrestore(&h->lock, flags);
725 memcpy(sn, hdev->device_id, sizeof(sn));
726 spin_unlock_irqrestore(&h->lock, flags);
727 return snprintf(buf, 16 * 2 + 2,
728 "%02X%02X%02X%02X%02X%02X%02X%02X"
729 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
730 sn[0], sn[1], sn[2], sn[3],
731 sn[4], sn[5], sn[6], sn[7],
732 sn[8], sn[9], sn[10], sn[11],
733 sn[12], sn[13], sn[14], sn[15]);
736 static ssize_t sas_address_show(struct device *dev,
737 struct device_attribute *attr, char *buf)
740 struct scsi_device *sdev;
741 struct hpsa_scsi_dev_t *hdev;
745 sdev = to_scsi_device(dev);
746 h = sdev_to_hba(sdev);
747 spin_lock_irqsave(&h->lock, flags);
748 hdev = sdev->hostdata;
749 if (!hdev || is_logical_device(hdev) || !hdev->expose_device) {
750 spin_unlock_irqrestore(&h->lock, flags);
753 sas_address = hdev->sas_address;
754 spin_unlock_irqrestore(&h->lock, flags);
756 return snprintf(buf, PAGE_SIZE, "0x%016llx\n", sas_address);
759 static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
760 struct device_attribute *attr, char *buf)
763 struct scsi_device *sdev;
764 struct hpsa_scsi_dev_t *hdev;
768 sdev = to_scsi_device(dev);
769 h = sdev_to_hba(sdev);
770 spin_lock_irqsave(&h->lock, flags);
771 hdev = sdev->hostdata;
773 spin_unlock_irqrestore(&h->lock, flags);
776 offload_enabled = hdev->offload_enabled;
777 spin_unlock_irqrestore(&h->lock, flags);
778 return snprintf(buf, 20, "%d\n", offload_enabled);
782 static ssize_t path_info_show(struct device *dev,
783 struct device_attribute *attr, char *buf)
786 struct scsi_device *sdev;
787 struct hpsa_scsi_dev_t *hdev;
793 u8 path_map_index = 0;
795 unsigned char phys_connector[2];
797 sdev = to_scsi_device(dev);
798 h = sdev_to_hba(sdev);
799 spin_lock_irqsave(&h->devlock, flags);
800 hdev = sdev->hostdata;
802 spin_unlock_irqrestore(&h->devlock, flags);
807 for (i = 0; i < MAX_PATHS; i++) {
808 path_map_index = 1<<i;
809 if (i == hdev->active_path_index)
811 else if (hdev->path_map & path_map_index)
816 output_len += scnprintf(buf + output_len,
817 PAGE_SIZE - output_len,
818 "[%d:%d:%d:%d] %20.20s ",
819 h->scsi_host->host_no,
820 hdev->bus, hdev->target, hdev->lun,
821 scsi_device_type(hdev->devtype));
823 if (hdev->devtype == TYPE_RAID || is_logical_device(hdev)) {
824 output_len += scnprintf(buf + output_len,
825 PAGE_SIZE - output_len,
831 memcpy(&phys_connector, &hdev->phys_connector[i],
832 sizeof(phys_connector));
833 if (phys_connector[0] < '0')
834 phys_connector[0] = '0';
835 if (phys_connector[1] < '0')
836 phys_connector[1] = '0';
837 output_len += scnprintf(buf + output_len,
838 PAGE_SIZE - output_len,
841 if ((hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC) &&
842 hdev->expose_device) {
843 if (box == 0 || box == 0xFF) {
844 output_len += scnprintf(buf + output_len,
845 PAGE_SIZE - output_len,
849 output_len += scnprintf(buf + output_len,
850 PAGE_SIZE - output_len,
851 "BOX: %hhu BAY: %hhu %s\n",
854 } else if (box != 0 && box != 0xFF) {
855 output_len += scnprintf(buf + output_len,
856 PAGE_SIZE - output_len, "BOX: %hhu %s\n",
859 output_len += scnprintf(buf + output_len,
860 PAGE_SIZE - output_len, "%s\n", active);
863 spin_unlock_irqrestore(&h->devlock, flags);
867 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
868 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
869 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
870 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
871 static DEVICE_ATTR(sas_address, S_IRUGO, sas_address_show, NULL);
872 static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
873 host_show_hp_ssd_smart_path_enabled, NULL);
874 static DEVICE_ATTR(path_info, S_IRUGO, path_info_show, NULL);
875 static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
876 host_show_hp_ssd_smart_path_status,
877 host_store_hp_ssd_smart_path_status);
878 static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
879 host_store_raid_offload_debug);
880 static DEVICE_ATTR(firmware_revision, S_IRUGO,
881 host_show_firmware_revision, NULL);
882 static DEVICE_ATTR(commands_outstanding, S_IRUGO,
883 host_show_commands_outstanding, NULL);
884 static DEVICE_ATTR(transport_mode, S_IRUGO,
885 host_show_transport_mode, NULL);
886 static DEVICE_ATTR(resettable, S_IRUGO,
887 host_show_resettable, NULL);
888 static DEVICE_ATTR(lockup_detected, S_IRUGO,
889 host_show_lockup_detected, NULL);
891 static struct device_attribute *hpsa_sdev_attrs[] = {
892 &dev_attr_raid_level,
895 &dev_attr_hp_ssd_smart_path_enabled,
897 &dev_attr_sas_address,
901 static struct device_attribute *hpsa_shost_attrs[] = {
903 &dev_attr_firmware_revision,
904 &dev_attr_commands_outstanding,
905 &dev_attr_transport_mode,
906 &dev_attr_resettable,
907 &dev_attr_hp_ssd_smart_path_status,
908 &dev_attr_raid_offload_debug,
909 &dev_attr_lockup_detected,
913 #define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_ABORTS + \
914 HPSA_CMDS_RESERVED_FOR_DRIVER + HPSA_MAX_CONCURRENT_PASSTHRUS)
916 static struct scsi_host_template hpsa_driver_template = {
917 .module = THIS_MODULE,
920 .queuecommand = hpsa_scsi_queue_command,
921 .scan_start = hpsa_scan_start,
922 .scan_finished = hpsa_scan_finished,
923 .change_queue_depth = hpsa_change_queue_depth,
925 .use_clustering = ENABLE_CLUSTERING,
926 .eh_abort_handler = hpsa_eh_abort_handler,
927 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
929 .slave_alloc = hpsa_slave_alloc,
930 .slave_configure = hpsa_slave_configure,
931 .slave_destroy = hpsa_slave_destroy,
933 .compat_ioctl = hpsa_compat_ioctl,
935 .sdev_attrs = hpsa_sdev_attrs,
936 .shost_attrs = hpsa_shost_attrs,
941 static inline u32 next_command(struct ctlr_info *h, u8 q)
944 struct reply_queue_buffer *rq = &h->reply_queue[q];
946 if (h->transMethod & CFGTBL_Trans_io_accel1)
947 return h->access.command_completed(h, q);
949 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
950 return h->access.command_completed(h, q);
952 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
953 a = rq->head[rq->current_entry];
955 atomic_dec(&h->commands_outstanding);
959 /* Check for wraparound */
960 if (rq->current_entry == h->max_commands) {
961 rq->current_entry = 0;
968 * There are some special bits in the bus address of the
969 * command that we have to set for the controller to know
970 * how to process the command:
972 * Normal performant mode:
973 * bit 0: 1 means performant mode, 0 means simple mode.
974 * bits 1-3 = block fetch table entry
975 * bits 4-6 = command type (== 0)
978 * bit 0 = "performant mode" bit.
979 * bits 1-3 = block fetch table entry
980 * bits 4-6 = command type (== 110)
981 * (command type is needed because ioaccel1 mode
982 * commands are submitted through the same register as normal
983 * mode commands, so this is how the controller knows whether
984 * the command is normal mode or ioaccel1 mode.)
987 * bit 0 = "performant mode" bit.
988 * bits 1-4 = block fetch table entry (note extra bit)
989 * bits 4-6 = not needed, because ioaccel2 mode has
990 * a separate special register for submitting commands.
994 * set_performant_mode: Modify the tag for cciss performant
995 * set bit 0 for pull model, bits 3-1 for block fetch
998 #define DEFAULT_REPLY_QUEUE (-1)
999 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
1002 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
1003 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
1004 if (unlikely(!h->msix_vector))
1006 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
1007 c->Header.ReplyQueue =
1008 raw_smp_processor_id() % h->nreply_queues;
1010 c->Header.ReplyQueue = reply_queue % h->nreply_queues;
1014 static void set_ioaccel1_performant_mode(struct ctlr_info *h,
1015 struct CommandList *c,
1018 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
1021 * Tell the controller to post the reply to the queue for this
1022 * processor. This seems to give the best I/O throughput.
1024 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
1025 cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
1027 cp->ReplyQueue = reply_queue % h->nreply_queues;
1029 * Set the bits in the address sent down to include:
1030 * - performant mode bit (bit 0)
1031 * - pull count (bits 1-3)
1032 * - command type (bits 4-6)
1034 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
1035 IOACCEL1_BUSADDR_CMDTYPE;
1038 static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
1039 struct CommandList *c,
1042 struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *)
1043 &h->ioaccel2_cmd_pool[c->cmdindex];
1045 /* Tell the controller to post the reply to the queue for this
1046 * processor. This seems to give the best I/O throughput.
1048 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
1049 cp->reply_queue = smp_processor_id() % h->nreply_queues;
1051 cp->reply_queue = reply_queue % h->nreply_queues;
1052 /* Set the bits in the address sent down to include:
1053 * - performant mode bit not used in ioaccel mode 2
1054 * - pull count (bits 0-3)
1055 * - command type isn't needed for ioaccel2
1057 c->busaddr |= h->ioaccel2_blockFetchTable[0];
1060 static void set_ioaccel2_performant_mode(struct ctlr_info *h,
1061 struct CommandList *c,
1064 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
1067 * Tell the controller to post the reply to the queue for this
1068 * processor. This seems to give the best I/O throughput.
1070 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
1071 cp->reply_queue = smp_processor_id() % h->nreply_queues;
1073 cp->reply_queue = reply_queue % h->nreply_queues;
1075 * Set the bits in the address sent down to include:
1076 * - performant mode bit not used in ioaccel mode 2
1077 * - pull count (bits 0-3)
1078 * - command type isn't needed for ioaccel2
1080 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
1083 static int is_firmware_flash_cmd(u8 *cdb)
1085 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
1089 * During firmware flash, the heartbeat register may not update as frequently
1090 * as it should. So we dial down lockup detection during firmware flash. and
1091 * dial it back up when firmware flash completes.
1093 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
1094 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
1095 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
1096 struct CommandList *c)
1098 if (!is_firmware_flash_cmd(c->Request.CDB))
1100 atomic_inc(&h->firmware_flash_in_progress);
1101 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
1104 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
1105 struct CommandList *c)
1107 if (is_firmware_flash_cmd(c->Request.CDB) &&
1108 atomic_dec_and_test(&h->firmware_flash_in_progress))
1109 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
1112 static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
1113 struct CommandList *c, int reply_queue)
1115 dial_down_lockup_detection_during_fw_flash(h, c);
1116 atomic_inc(&h->commands_outstanding);
1117 switch (c->cmd_type) {
1119 set_ioaccel1_performant_mode(h, c, reply_queue);
1120 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
1123 set_ioaccel2_performant_mode(h, c, reply_queue);
1124 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1127 set_ioaccel2_tmf_performant_mode(h, c, reply_queue);
1128 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1131 set_performant_mode(h, c, reply_queue);
1132 h->access.submit_command(h, c);
1136 static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
1138 if (unlikely(hpsa_is_pending_event(c)))
1139 return finish_cmd(c);
1141 __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
1144 static inline int is_hba_lunid(unsigned char scsi3addr[])
1146 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
1149 static inline int is_scsi_rev_5(struct ctlr_info *h)
1151 if (!h->hba_inquiry_data)
1153 if ((h->hba_inquiry_data[2] & 0x07) == 5)
1158 static int hpsa_find_target_lun(struct ctlr_info *h,
1159 unsigned char scsi3addr[], int bus, int *target, int *lun)
1161 /* finds an unused bus, target, lun for a new physical device
1162 * assumes h->devlock is held
1165 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
1167 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
1169 for (i = 0; i < h->ndevices; i++) {
1170 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
1171 __set_bit(h->dev[i]->target, lun_taken);
1174 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
1175 if (i < HPSA_MAX_DEVICES) {
1184 static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
1185 struct hpsa_scsi_dev_t *dev, char *description)
1187 #define LABEL_SIZE 25
1188 char label[LABEL_SIZE];
1190 if (h == NULL || h->pdev == NULL || h->scsi_host == NULL)
1193 switch (dev->devtype) {
1195 snprintf(label, LABEL_SIZE, "controller");
1197 case TYPE_ENCLOSURE:
1198 snprintf(label, LABEL_SIZE, "enclosure");
1203 snprintf(label, LABEL_SIZE, "external");
1204 else if (!is_logical_dev_addr_mode(dev->scsi3addr))
1205 snprintf(label, LABEL_SIZE, "%s",
1206 raid_label[PHYSICAL_DRIVE]);
1208 snprintf(label, LABEL_SIZE, "RAID-%s",
1209 dev->raid_level > RAID_UNKNOWN ? "?" :
1210 raid_label[dev->raid_level]);
1213 snprintf(label, LABEL_SIZE, "rom");
1216 snprintf(label, LABEL_SIZE, "tape");
1218 case TYPE_MEDIUM_CHANGER:
1219 snprintf(label, LABEL_SIZE, "changer");
1222 snprintf(label, LABEL_SIZE, "UNKNOWN");
1226 dev_printk(level, &h->pdev->dev,
1227 "scsi %d:%d:%d:%d: %s %s %.8s %.16s %s SSDSmartPathCap%c En%c Exp=%d\n",
1228 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
1230 scsi_device_type(dev->devtype),
1234 dev->offload_config ? '+' : '-',
1235 dev->offload_enabled ? '+' : '-',
1236 dev->expose_device);
1239 /* Add an entry into h->dev[] array. */
1240 static int hpsa_scsi_add_entry(struct ctlr_info *h,
1241 struct hpsa_scsi_dev_t *device,
1242 struct hpsa_scsi_dev_t *added[], int *nadded)
1244 /* assumes h->devlock is held */
1245 int n = h->ndevices;
1247 unsigned char addr1[8], addr2[8];
1248 struct hpsa_scsi_dev_t *sd;
1250 if (n >= HPSA_MAX_DEVICES) {
1251 dev_err(&h->pdev->dev, "too many devices, some will be "
1256 /* physical devices do not have lun or target assigned until now. */
1257 if (device->lun != -1)
1258 /* Logical device, lun is already assigned. */
1261 /* If this device a non-zero lun of a multi-lun device
1262 * byte 4 of the 8-byte LUN addr will contain the logical
1263 * unit no, zero otherwise.
1265 if (device->scsi3addr[4] == 0) {
1266 /* This is not a non-zero lun of a multi-lun device */
1267 if (hpsa_find_target_lun(h, device->scsi3addr,
1268 device->bus, &device->target, &device->lun) != 0)
1273 /* This is a non-zero lun of a multi-lun device.
1274 * Search through our list and find the device which
1275 * has the same 8 byte LUN address, excepting byte 4 and 5.
1276 * Assign the same bus and target for this new LUN.
1277 * Use the logical unit number from the firmware.
1279 memcpy(addr1, device->scsi3addr, 8);
1282 for (i = 0; i < n; i++) {
1284 memcpy(addr2, sd->scsi3addr, 8);
1287 /* differ only in byte 4 and 5? */
1288 if (memcmp(addr1, addr2, 8) == 0) {
1289 device->bus = sd->bus;
1290 device->target = sd->target;
1291 device->lun = device->scsi3addr[4];
1295 if (device->lun == -1) {
1296 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
1297 " suspect firmware bug or unsupported hardware "
1298 "configuration.\n");
1306 added[*nadded] = device;
1308 hpsa_show_dev_msg(KERN_INFO, h, device,
1309 device->expose_device ? "added" : "masked");
1310 device->offload_to_be_enabled = device->offload_enabled;
1311 device->offload_enabled = 0;
1315 /* Update an entry in h->dev[] array. */
1316 static void hpsa_scsi_update_entry(struct ctlr_info *h,
1317 int entry, struct hpsa_scsi_dev_t *new_entry)
1319 int offload_enabled;
1320 /* assumes h->devlock is held */
1321 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1323 /* Raid level changed. */
1324 h->dev[entry]->raid_level = new_entry->raid_level;
1326 /* Raid offload parameters changed. Careful about the ordering. */
1327 if (new_entry->offload_config && new_entry->offload_enabled) {
1329 * if drive is newly offload_enabled, we want to copy the
1330 * raid map data first. If previously offload_enabled and
1331 * offload_config were set, raid map data had better be
1332 * the same as it was before. if raid map data is changed
1333 * then it had better be the case that
1334 * h->dev[entry]->offload_enabled is currently 0.
1336 h->dev[entry]->raid_map = new_entry->raid_map;
1337 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1339 if (new_entry->hba_ioaccel_enabled) {
1340 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1341 wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
1343 h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
1344 h->dev[entry]->offload_config = new_entry->offload_config;
1345 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
1346 h->dev[entry]->queue_depth = new_entry->queue_depth;
1349 * We can turn off ioaccel offload now, but need to delay turning
1350 * it on until we can update h->dev[entry]->phys_disk[], but we
1351 * can't do that until all the devices are updated.
1353 h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled;
1354 if (!new_entry->offload_enabled)
1355 h->dev[entry]->offload_enabled = 0;
1357 offload_enabled = h->dev[entry]->offload_enabled;
1358 h->dev[entry]->offload_enabled = h->dev[entry]->offload_to_be_enabled;
1359 hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
1360 h->dev[entry]->offload_enabled = offload_enabled;
1363 /* Replace an entry from h->dev[] array. */
1364 static void hpsa_scsi_replace_entry(struct ctlr_info *h,
1365 int entry, struct hpsa_scsi_dev_t *new_entry,
1366 struct hpsa_scsi_dev_t *added[], int *nadded,
1367 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1369 /* assumes h->devlock is held */
1370 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1371 removed[*nremoved] = h->dev[entry];
1375 * New physical devices won't have target/lun assigned yet
1376 * so we need to preserve the values in the slot we are replacing.
1378 if (new_entry->target == -1) {
1379 new_entry->target = h->dev[entry]->target;
1380 new_entry->lun = h->dev[entry]->lun;
1383 h->dev[entry] = new_entry;
1384 added[*nadded] = new_entry;
1386 hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
1387 new_entry->offload_to_be_enabled = new_entry->offload_enabled;
1388 new_entry->offload_enabled = 0;
1391 /* Remove an entry from h->dev[] array. */
1392 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry,
1393 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1395 /* assumes h->devlock is held */
1397 struct hpsa_scsi_dev_t *sd;
1399 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1402 removed[*nremoved] = h->dev[entry];
1405 for (i = entry; i < h->ndevices-1; i++)
1406 h->dev[i] = h->dev[i+1];
1408 hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
1411 #define SCSI3ADDR_EQ(a, b) ( \
1412 (a)[7] == (b)[7] && \
1413 (a)[6] == (b)[6] && \
1414 (a)[5] == (b)[5] && \
1415 (a)[4] == (b)[4] && \
1416 (a)[3] == (b)[3] && \
1417 (a)[2] == (b)[2] && \
1418 (a)[1] == (b)[1] && \
1421 static void fixup_botched_add(struct ctlr_info *h,
1422 struct hpsa_scsi_dev_t *added)
1424 /* called when scsi_add_device fails in order to re-adjust
1425 * h->dev[] to match the mid layer's view.
1427 unsigned long flags;
1430 spin_lock_irqsave(&h->lock, flags);
1431 for (i = 0; i < h->ndevices; i++) {
1432 if (h->dev[i] == added) {
1433 for (j = i; j < h->ndevices-1; j++)
1434 h->dev[j] = h->dev[j+1];
1439 spin_unlock_irqrestore(&h->lock, flags);
1443 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1444 struct hpsa_scsi_dev_t *dev2)
1446 /* we compare everything except lun and target as these
1447 * are not yet assigned. Compare parts likely
1450 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1451 sizeof(dev1->scsi3addr)) != 0)
1453 if (memcmp(dev1->device_id, dev2->device_id,
1454 sizeof(dev1->device_id)) != 0)
1456 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1458 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1460 if (dev1->devtype != dev2->devtype)
1462 if (dev1->bus != dev2->bus)
1467 static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1468 struct hpsa_scsi_dev_t *dev2)
1470 /* Device attributes that can change, but don't mean
1471 * that the device is a different device, nor that the OS
1472 * needs to be told anything about the change.
1474 if (dev1->raid_level != dev2->raid_level)
1476 if (dev1->offload_config != dev2->offload_config)
1478 if (dev1->offload_enabled != dev2->offload_enabled)
1480 if (!is_logical_dev_addr_mode(dev1->scsi3addr))
1481 if (dev1->queue_depth != dev2->queue_depth)
1486 /* Find needle in haystack. If exact match found, return DEVICE_SAME,
1487 * and return needle location in *index. If scsi3addr matches, but not
1488 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
1489 * location in *index.
1490 * In the case of a minor device attribute change, such as RAID level, just
1491 * return DEVICE_UPDATED, along with the updated device's location in index.
1492 * If needle not found, return DEVICE_NOT_FOUND.
1494 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1495 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1499 #define DEVICE_NOT_FOUND 0
1500 #define DEVICE_CHANGED 1
1501 #define DEVICE_SAME 2
1502 #define DEVICE_UPDATED 3
1504 return DEVICE_NOT_FOUND;
1506 for (i = 0; i < haystack_size; i++) {
1507 if (haystack[i] == NULL) /* previously removed. */
1509 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1511 if (device_is_the_same(needle, haystack[i])) {
1512 if (device_updated(needle, haystack[i]))
1513 return DEVICE_UPDATED;
1516 /* Keep offline devices offline */
1517 if (needle->volume_offline)
1518 return DEVICE_NOT_FOUND;
1519 return DEVICE_CHANGED;
1524 return DEVICE_NOT_FOUND;
1527 static void hpsa_monitor_offline_device(struct ctlr_info *h,
1528 unsigned char scsi3addr[])
1530 struct offline_device_entry *device;
1531 unsigned long flags;
1533 /* Check to see if device is already on the list */
1534 spin_lock_irqsave(&h->offline_device_lock, flags);
1535 list_for_each_entry(device, &h->offline_device_list, offline_list) {
1536 if (memcmp(device->scsi3addr, scsi3addr,
1537 sizeof(device->scsi3addr)) == 0) {
1538 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1542 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1544 /* Device is not on the list, add it. */
1545 device = kmalloc(sizeof(*device), GFP_KERNEL);
1547 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
1550 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1551 spin_lock_irqsave(&h->offline_device_lock, flags);
1552 list_add_tail(&device->offline_list, &h->offline_device_list);
1553 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1556 /* Print a message explaining various offline volume states */
1557 static void hpsa_show_volume_status(struct ctlr_info *h,
1558 struct hpsa_scsi_dev_t *sd)
1560 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1561 dev_info(&h->pdev->dev,
1562 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1563 h->scsi_host->host_no,
1564 sd->bus, sd->target, sd->lun);
1565 switch (sd->volume_offline) {
1568 case HPSA_LV_UNDERGOING_ERASE:
1569 dev_info(&h->pdev->dev,
1570 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1571 h->scsi_host->host_no,
1572 sd->bus, sd->target, sd->lun);
1574 case HPSA_LV_NOT_AVAILABLE:
1575 dev_info(&h->pdev->dev,
1576 "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n",
1577 h->scsi_host->host_no,
1578 sd->bus, sd->target, sd->lun);
1580 case HPSA_LV_UNDERGOING_RPI:
1581 dev_info(&h->pdev->dev,
1582 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n",
1583 h->scsi_host->host_no,
1584 sd->bus, sd->target, sd->lun);
1586 case HPSA_LV_PENDING_RPI:
1587 dev_info(&h->pdev->dev,
1588 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1589 h->scsi_host->host_no,
1590 sd->bus, sd->target, sd->lun);
1592 case HPSA_LV_ENCRYPTED_NO_KEY:
1593 dev_info(&h->pdev->dev,
1594 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1595 h->scsi_host->host_no,
1596 sd->bus, sd->target, sd->lun);
1598 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1599 dev_info(&h->pdev->dev,
1600 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1601 h->scsi_host->host_no,
1602 sd->bus, sd->target, sd->lun);
1604 case HPSA_LV_UNDERGOING_ENCRYPTION:
1605 dev_info(&h->pdev->dev,
1606 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1607 h->scsi_host->host_no,
1608 sd->bus, sd->target, sd->lun);
1610 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1611 dev_info(&h->pdev->dev,
1612 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1613 h->scsi_host->host_no,
1614 sd->bus, sd->target, sd->lun);
1616 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1617 dev_info(&h->pdev->dev,
1618 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1619 h->scsi_host->host_no,
1620 sd->bus, sd->target, sd->lun);
1622 case HPSA_LV_PENDING_ENCRYPTION:
1623 dev_info(&h->pdev->dev,
1624 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1625 h->scsi_host->host_no,
1626 sd->bus, sd->target, sd->lun);
1628 case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1629 dev_info(&h->pdev->dev,
1630 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1631 h->scsi_host->host_no,
1632 sd->bus, sd->target, sd->lun);
1638 * Figure the list of physical drive pointers for a logical drive with
1639 * raid offload configured.
1641 static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1642 struct hpsa_scsi_dev_t *dev[], int ndevices,
1643 struct hpsa_scsi_dev_t *logical_drive)
1645 struct raid_map_data *map = &logical_drive->raid_map;
1646 struct raid_map_disk_data *dd = &map->data[0];
1648 int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1649 le16_to_cpu(map->metadata_disks_per_row);
1650 int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1651 le16_to_cpu(map->layout_map_count) *
1652 total_disks_per_row;
1653 int nphys_disk = le16_to_cpu(map->layout_map_count) *
1654 total_disks_per_row;
1657 if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1658 nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1660 logical_drive->nphysical_disks = nraid_map_entries;
1663 for (i = 0; i < nraid_map_entries; i++) {
1664 logical_drive->phys_disk[i] = NULL;
1665 if (!logical_drive->offload_config)
1667 for (j = 0; j < ndevices; j++) {
1670 if (dev[j]->devtype != TYPE_DISK &&
1671 dev[j]->devtype != TYPE_ZBC)
1673 if (is_logical_device(dev[j]))
1675 if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1678 logical_drive->phys_disk[i] = dev[j];
1680 qdepth = min(h->nr_cmds, qdepth +
1681 logical_drive->phys_disk[i]->queue_depth);
1686 * This can happen if a physical drive is removed and
1687 * the logical drive is degraded. In that case, the RAID
1688 * map data will refer to a physical disk which isn't actually
1689 * present. And in that case offload_enabled should already
1690 * be 0, but we'll turn it off here just in case
1692 if (!logical_drive->phys_disk[i]) {
1693 logical_drive->offload_enabled = 0;
1694 logical_drive->offload_to_be_enabled = 0;
1695 logical_drive->queue_depth = 8;
1698 if (nraid_map_entries)
1700 * This is correct for reads, too high for full stripe writes,
1701 * way too high for partial stripe writes
1703 logical_drive->queue_depth = qdepth;
1705 logical_drive->queue_depth = h->nr_cmds;
1708 static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1709 struct hpsa_scsi_dev_t *dev[], int ndevices)
1713 for (i = 0; i < ndevices; i++) {
1716 if (dev[i]->devtype != TYPE_DISK &&
1717 dev[i]->devtype != TYPE_ZBC)
1719 if (!is_logical_device(dev[i]))
1723 * If offload is currently enabled, the RAID map and
1724 * phys_disk[] assignment *better* not be changing
1725 * and since it isn't changing, we do not need to
1728 if (dev[i]->offload_enabled)
1731 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1735 static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1742 if (is_logical_device(device)) /* RAID */
1743 rc = scsi_add_device(h->scsi_host, device->bus,
1744 device->target, device->lun);
1746 rc = hpsa_add_sas_device(h->sas_host, device);
1751 static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info *h,
1752 struct hpsa_scsi_dev_t *dev)
1757 for (i = 0; i < h->nr_cmds; i++) {
1758 struct CommandList *c = h->cmd_pool + i;
1759 int refcount = atomic_inc_return(&c->refcount);
1761 if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev,
1763 unsigned long flags;
1765 spin_lock_irqsave(&h->lock, flags); /* Implied MB */
1766 if (!hpsa_is_cmd_idle(c))
1768 spin_unlock_irqrestore(&h->lock, flags);
1777 static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h,
1778 struct hpsa_scsi_dev_t *device)
1784 cmds = hpsa_find_outstanding_commands_for_dev(h, device);
1789 dev_warn(&h->pdev->dev,
1790 "%s: removing device with %d outstanding commands!\n",
1796 static void hpsa_remove_device(struct ctlr_info *h,
1797 struct hpsa_scsi_dev_t *device)
1799 struct scsi_device *sdev = NULL;
1804 if (is_logical_device(device)) { /* RAID */
1805 sdev = scsi_device_lookup(h->scsi_host, device->bus,
1806 device->target, device->lun);
1808 scsi_remove_device(sdev);
1809 scsi_device_put(sdev);
1812 * We don't expect to get here. Future commands
1813 * to this device will get a selection timeout as
1814 * if the device were gone.
1816 hpsa_show_dev_msg(KERN_WARNING, h, device,
1817 "didn't find device for removal.");
1821 device->removed = 1;
1822 hpsa_wait_for_outstanding_commands_for_dev(h, device);
1824 hpsa_remove_sas_device(device);
1828 static void adjust_hpsa_scsi_table(struct ctlr_info *h,
1829 struct hpsa_scsi_dev_t *sd[], int nsds)
1831 /* sd contains scsi3 addresses and devtypes, and inquiry
1832 * data. This function takes what's in sd to be the current
1833 * reality and updates h->dev[] to reflect that reality.
1835 int i, entry, device_change, changes = 0;
1836 struct hpsa_scsi_dev_t *csd;
1837 unsigned long flags;
1838 struct hpsa_scsi_dev_t **added, **removed;
1839 int nadded, nremoved;
1842 * A reset can cause a device status to change
1843 * re-schedule the scan to see what happened.
1845 if (h->reset_in_progress) {
1846 h->drv_req_rescan = 1;
1850 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
1851 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
1853 if (!added || !removed) {
1854 dev_warn(&h->pdev->dev, "out of memory in "
1855 "adjust_hpsa_scsi_table\n");
1859 spin_lock_irqsave(&h->devlock, flags);
1861 /* find any devices in h->dev[] that are not in
1862 * sd[] and remove them from h->dev[], and for any
1863 * devices which have changed, remove the old device
1864 * info and add the new device info.
1865 * If minor device attributes change, just update
1866 * the existing device structure.
1871 while (i < h->ndevices) {
1873 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1874 if (device_change == DEVICE_NOT_FOUND) {
1876 hpsa_scsi_remove_entry(h, i, removed, &nremoved);
1877 continue; /* remove ^^^, hence i not incremented */
1878 } else if (device_change == DEVICE_CHANGED) {
1880 hpsa_scsi_replace_entry(h, i, sd[entry],
1881 added, &nadded, removed, &nremoved);
1882 /* Set it to NULL to prevent it from being freed
1883 * at the bottom of hpsa_update_scsi_devices()
1886 } else if (device_change == DEVICE_UPDATED) {
1887 hpsa_scsi_update_entry(h, i, sd[entry]);
1892 /* Now, make sure every device listed in sd[] is also
1893 * listed in h->dev[], adding them if they aren't found
1896 for (i = 0; i < nsds; i++) {
1897 if (!sd[i]) /* if already added above. */
1900 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1901 * as the SCSI mid-layer does not handle such devices well.
1902 * It relentlessly loops sending TUR at 3Hz, then READ(10)
1903 * at 160Hz, and prevents the system from coming up.
1905 if (sd[i]->volume_offline) {
1906 hpsa_show_volume_status(h, sd[i]);
1907 hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
1911 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1912 h->ndevices, &entry);
1913 if (device_change == DEVICE_NOT_FOUND) {
1915 if (hpsa_scsi_add_entry(h, sd[i], added, &nadded) != 0)
1917 sd[i] = NULL; /* prevent from being freed later. */
1918 } else if (device_change == DEVICE_CHANGED) {
1919 /* should never happen... */
1921 dev_warn(&h->pdev->dev,
1922 "device unexpectedly changed.\n");
1923 /* but if it does happen, we just ignore that device */
1926 hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
1928 /* Now that h->dev[]->phys_disk[] is coherent, we can enable
1929 * any logical drives that need it enabled.
1931 for (i = 0; i < h->ndevices; i++) {
1932 if (h->dev[i] == NULL)
1934 h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
1937 spin_unlock_irqrestore(&h->devlock, flags);
1939 /* Monitor devices which are in one of several NOT READY states to be
1940 * brought online later. This must be done without holding h->devlock,
1941 * so don't touch h->dev[]
1943 for (i = 0; i < nsds; i++) {
1944 if (!sd[i]) /* if already added above. */
1946 if (sd[i]->volume_offline)
1947 hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
1950 /* Don't notify scsi mid layer of any changes the first time through
1951 * (or if there are no changes) scsi_scan_host will do it later the
1952 * first time through.
1957 /* Notify scsi mid layer of any removed devices */
1958 for (i = 0; i < nremoved; i++) {
1959 if (removed[i] == NULL)
1961 if (removed[i]->expose_device)
1962 hpsa_remove_device(h, removed[i]);
1967 /* Notify scsi mid layer of any added devices */
1968 for (i = 0; i < nadded; i++) {
1971 if (added[i] == NULL)
1973 if (!(added[i]->expose_device))
1975 rc = hpsa_add_device(h, added[i]);
1978 dev_warn(&h->pdev->dev,
1979 "addition failed %d, device not added.", rc);
1980 /* now we have to remove it from h->dev,
1981 * since it didn't get added to scsi mid layer
1983 fixup_botched_add(h, added[i]);
1984 h->drv_req_rescan = 1;
1993 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
1994 * Assume's h->devlock is held.
1996 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
1997 int bus, int target, int lun)
2000 struct hpsa_scsi_dev_t *sd;
2002 for (i = 0; i < h->ndevices; i++) {
2004 if (sd->bus == bus && sd->target == target && sd->lun == lun)
2010 static int hpsa_slave_alloc(struct scsi_device *sdev)
2012 struct hpsa_scsi_dev_t *sd = NULL;
2013 unsigned long flags;
2014 struct ctlr_info *h;
2016 h = sdev_to_hba(sdev);
2017 spin_lock_irqsave(&h->devlock, flags);
2018 if (sdev_channel(sdev) == HPSA_PHYSICAL_DEVICE_BUS) {
2019 struct scsi_target *starget;
2020 struct sas_rphy *rphy;
2022 starget = scsi_target(sdev);
2023 rphy = target_to_rphy(starget);
2024 sd = hpsa_find_device_by_sas_rphy(h, rphy);
2026 sd->target = sdev_id(sdev);
2027 sd->lun = sdev->lun;
2031 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
2032 sdev_id(sdev), sdev->lun);
2034 if (sd && sd->expose_device) {
2035 atomic_set(&sd->ioaccel_cmds_out, 0);
2036 sdev->hostdata = sd;
2038 sdev->hostdata = NULL;
2039 spin_unlock_irqrestore(&h->devlock, flags);
2043 /* configure scsi device based on internal per-device structure */
2044 static int hpsa_slave_configure(struct scsi_device *sdev)
2046 struct hpsa_scsi_dev_t *sd;
2049 sd = sdev->hostdata;
2050 sdev->no_uld_attach = !sd || !sd->expose_device;
2053 queue_depth = sd->queue_depth != 0 ?
2054 sd->queue_depth : sdev->host->can_queue;
2056 queue_depth = sdev->host->can_queue;
2058 scsi_change_queue_depth(sdev, queue_depth);
2063 static void hpsa_slave_destroy(struct scsi_device *sdev)
2065 /* nothing to do. */
2068 static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
2072 if (!h->ioaccel2_cmd_sg_list)
2074 for (i = 0; i < h->nr_cmds; i++) {
2075 kfree(h->ioaccel2_cmd_sg_list[i]);
2076 h->ioaccel2_cmd_sg_list[i] = NULL;
2078 kfree(h->ioaccel2_cmd_sg_list);
2079 h->ioaccel2_cmd_sg_list = NULL;
2082 static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
2086 if (h->chainsize <= 0)
2089 h->ioaccel2_cmd_sg_list =
2090 kzalloc(sizeof(*h->ioaccel2_cmd_sg_list) * h->nr_cmds,
2092 if (!h->ioaccel2_cmd_sg_list)
2094 for (i = 0; i < h->nr_cmds; i++) {
2095 h->ioaccel2_cmd_sg_list[i] =
2096 kmalloc(sizeof(*h->ioaccel2_cmd_sg_list[i]) *
2097 h->maxsgentries, GFP_KERNEL);
2098 if (!h->ioaccel2_cmd_sg_list[i])
2104 hpsa_free_ioaccel2_sg_chain_blocks(h);
2108 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
2112 if (!h->cmd_sg_list)
2114 for (i = 0; i < h->nr_cmds; i++) {
2115 kfree(h->cmd_sg_list[i]);
2116 h->cmd_sg_list[i] = NULL;
2118 kfree(h->cmd_sg_list);
2119 h->cmd_sg_list = NULL;
2122 static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
2126 if (h->chainsize <= 0)
2129 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
2131 if (!h->cmd_sg_list) {
2132 dev_err(&h->pdev->dev, "Failed to allocate SG list\n");
2135 for (i = 0; i < h->nr_cmds; i++) {
2136 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
2137 h->chainsize, GFP_KERNEL);
2138 if (!h->cmd_sg_list[i]) {
2139 dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n");
2146 hpsa_free_sg_chain_blocks(h);
2150 static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
2151 struct io_accel2_cmd *cp, struct CommandList *c)
2153 struct ioaccel2_sg_element *chain_block;
2157 chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
2158 chain_size = le32_to_cpu(cp->sg[0].length);
2159 temp64 = pci_map_single(h->pdev, chain_block, chain_size,
2161 if (dma_mapping_error(&h->pdev->dev, temp64)) {
2162 /* prevent subsequent unmapping */
2163 cp->sg->address = 0;
2166 cp->sg->address = cpu_to_le64(temp64);
2170 static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
2171 struct io_accel2_cmd *cp)
2173 struct ioaccel2_sg_element *chain_sg;
2178 temp64 = le64_to_cpu(chain_sg->address);
2179 chain_size = le32_to_cpu(cp->sg[0].length);
2180 pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE);
2183 static int hpsa_map_sg_chain_block(struct ctlr_info *h,
2184 struct CommandList *c)
2186 struct SGDescriptor *chain_sg, *chain_block;
2190 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2191 chain_block = h->cmd_sg_list[c->cmdindex];
2192 chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
2193 chain_len = sizeof(*chain_sg) *
2194 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
2195 chain_sg->Len = cpu_to_le32(chain_len);
2196 temp64 = pci_map_single(h->pdev, chain_block, chain_len,
2198 if (dma_mapping_error(&h->pdev->dev, temp64)) {
2199 /* prevent subsequent unmapping */
2200 chain_sg->Addr = cpu_to_le64(0);
2203 chain_sg->Addr = cpu_to_le64(temp64);
2207 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
2208 struct CommandList *c)
2210 struct SGDescriptor *chain_sg;
2212 if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
2215 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2216 pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
2217 le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
2221 /* Decode the various types of errors on ioaccel2 path.
2222 * Return 1 for any error that should generate a RAID path retry.
2223 * Return 0 for errors that don't require a RAID path retry.
2225 static int handle_ioaccel_mode2_error(struct ctlr_info *h,
2226 struct CommandList *c,
2227 struct scsi_cmnd *cmd,
2228 struct io_accel2_cmd *c2,
2229 struct hpsa_scsi_dev_t *dev)
2233 u32 ioaccel2_resid = 0;
2235 switch (c2->error_data.serv_response) {
2236 case IOACCEL2_SERV_RESPONSE_COMPLETE:
2237 switch (c2->error_data.status) {
2238 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
2240 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
2241 cmd->result |= SAM_STAT_CHECK_CONDITION;
2242 if (c2->error_data.data_present !=
2243 IOACCEL2_SENSE_DATA_PRESENT) {
2244 memset(cmd->sense_buffer, 0,
2245 SCSI_SENSE_BUFFERSIZE);
2248 /* copy the sense data */
2249 data_len = c2->error_data.sense_data_len;
2250 if (data_len > SCSI_SENSE_BUFFERSIZE)
2251 data_len = SCSI_SENSE_BUFFERSIZE;
2252 if (data_len > sizeof(c2->error_data.sense_data_buff))
2254 sizeof(c2->error_data.sense_data_buff);
2255 memcpy(cmd->sense_buffer,
2256 c2->error_data.sense_data_buff, data_len);
2259 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
2262 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
2265 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
2268 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
2276 case IOACCEL2_SERV_RESPONSE_FAILURE:
2277 switch (c2->error_data.status) {
2278 case IOACCEL2_STATUS_SR_IO_ERROR:
2279 case IOACCEL2_STATUS_SR_IO_ABORTED:
2280 case IOACCEL2_STATUS_SR_OVERRUN:
2283 case IOACCEL2_STATUS_SR_UNDERRUN:
2284 cmd->result = (DID_OK << 16); /* host byte */
2285 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
2286 ioaccel2_resid = get_unaligned_le32(
2287 &c2->error_data.resid_cnt[0]);
2288 scsi_set_resid(cmd, ioaccel2_resid);
2290 case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
2291 case IOACCEL2_STATUS_SR_INVALID_DEVICE:
2292 case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
2294 * Did an HBA disk disappear? We will eventually
2295 * get a state change event from the controller but
2296 * in the meantime, we need to tell the OS that the
2297 * HBA disk is no longer there and stop I/O
2298 * from going down. This allows the potential re-insert
2299 * of the disk to get the same device node.
2301 if (dev->physical_device && dev->expose_device) {
2302 cmd->result = DID_NO_CONNECT << 16;
2304 h->drv_req_rescan = 1;
2305 dev_warn(&h->pdev->dev,
2306 "%s: device is gone!\n", __func__);
2309 * Retry by sending down the RAID path.
2310 * We will get an event from ctlr to
2311 * trigger rescan regardless.
2319 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
2321 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
2323 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
2326 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
2333 return retry; /* retry on raid path? */
2336 static void hpsa_cmd_resolve_events(struct ctlr_info *h,
2337 struct CommandList *c)
2339 bool do_wake = false;
2342 * Prevent the following race in the abort handler:
2344 * 1. LLD is requested to abort a SCSI command
2345 * 2. The SCSI command completes
2346 * 3. The struct CommandList associated with step 2 is made available
2347 * 4. New I/O request to LLD to another LUN re-uses struct CommandList
2348 * 5. Abort handler follows scsi_cmnd->host_scribble and
2349 * finds struct CommandList and tries to aborts it
2350 * Now we have aborted the wrong command.
2352 * Reset c->scsi_cmd here so that the abort or reset handler will know
2353 * this command has completed. Then, check to see if the handler is
2354 * waiting for this command, and, if so, wake it.
2356 c->scsi_cmd = SCSI_CMD_IDLE;
2357 mb(); /* Declare command idle before checking for pending events. */
2358 if (c->abort_pending) {
2360 c->abort_pending = false;
2362 if (c->reset_pending) {
2363 unsigned long flags;
2364 struct hpsa_scsi_dev_t *dev;
2367 * There appears to be a reset pending; lock the lock and
2368 * reconfirm. If so, then decrement the count of outstanding
2369 * commands and wake the reset command if this is the last one.
2371 spin_lock_irqsave(&h->lock, flags);
2372 dev = c->reset_pending; /* Re-fetch under the lock. */
2373 if (dev && atomic_dec_and_test(&dev->reset_cmds_out))
2375 c->reset_pending = NULL;
2376 spin_unlock_irqrestore(&h->lock, flags);
2380 wake_up_all(&h->event_sync_wait_queue);
2383 static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
2384 struct CommandList *c)
2386 hpsa_cmd_resolve_events(h, c);
2387 cmd_tagged_free(h, c);
2390 static void hpsa_cmd_free_and_done(struct ctlr_info *h,
2391 struct CommandList *c, struct scsi_cmnd *cmd)
2393 hpsa_cmd_resolve_and_free(h, c);
2394 if (cmd && cmd->scsi_done)
2395 cmd->scsi_done(cmd);
2398 static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
2400 INIT_WORK(&c->work, hpsa_command_resubmit_worker);
2401 queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
2404 static void hpsa_set_scsi_cmd_aborted(struct scsi_cmnd *cmd)
2406 cmd->result = DID_ABORT << 16;
2409 static void hpsa_cmd_abort_and_free(struct ctlr_info *h, struct CommandList *c,
2410 struct scsi_cmnd *cmd)
2412 hpsa_set_scsi_cmd_aborted(cmd);
2413 dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n",
2414 c->Request.CDB, c->err_info->ScsiStatus);
2415 hpsa_cmd_resolve_and_free(h, c);
2418 static void process_ioaccel2_completion(struct ctlr_info *h,
2419 struct CommandList *c, struct scsi_cmnd *cmd,
2420 struct hpsa_scsi_dev_t *dev)
2422 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2424 /* check for good status */
2425 if (likely(c2->error_data.serv_response == 0 &&
2426 c2->error_data.status == 0))
2427 return hpsa_cmd_free_and_done(h, c, cmd);
2430 * Any RAID offload error results in retry which will use
2431 * the normal I/O path so the controller can handle whatever's
2434 if (is_logical_device(dev) &&
2435 c2->error_data.serv_response ==
2436 IOACCEL2_SERV_RESPONSE_FAILURE) {
2437 if (c2->error_data.status ==
2438 IOACCEL2_STATUS_SR_IOACCEL_DISABLED) {
2439 dev->offload_enabled = 0;
2440 dev->offload_to_be_enabled = 0;
2443 return hpsa_retry_cmd(h, c);
2446 if (handle_ioaccel_mode2_error(h, c, cmd, c2, dev))
2447 return hpsa_retry_cmd(h, c);
2449 return hpsa_cmd_free_and_done(h, c, cmd);
2452 /* Returns 0 on success, < 0 otherwise. */
2453 static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
2454 struct CommandList *cp)
2456 u8 tmf_status = cp->err_info->ScsiStatus;
2458 switch (tmf_status) {
2459 case CISS_TMF_COMPLETE:
2461 * CISS_TMF_COMPLETE never happens, instead,
2462 * ei->CommandStatus == 0 for this case.
2464 case CISS_TMF_SUCCESS:
2466 case CISS_TMF_INVALID_FRAME:
2467 case CISS_TMF_NOT_SUPPORTED:
2468 case CISS_TMF_FAILED:
2469 case CISS_TMF_WRONG_LUN:
2470 case CISS_TMF_OVERLAPPED_TAG:
2473 dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
2480 static void complete_scsi_command(struct CommandList *cp)
2482 struct scsi_cmnd *cmd;
2483 struct ctlr_info *h;
2484 struct ErrorInfo *ei;
2485 struct hpsa_scsi_dev_t *dev;
2486 struct io_accel2_cmd *c2;
2489 u8 asc; /* additional sense code */
2490 u8 ascq; /* additional sense code qualifier */
2491 unsigned long sense_data_size;
2498 cmd->result = DID_NO_CONNECT << 16;
2499 return hpsa_cmd_free_and_done(h, cp, cmd);
2502 dev = cmd->device->hostdata;
2504 cmd->result = DID_NO_CONNECT << 16;
2505 return hpsa_cmd_free_and_done(h, cp, cmd);
2507 c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
2509 scsi_dma_unmap(cmd); /* undo the DMA mappings */
2510 if ((cp->cmd_type == CMD_SCSI) &&
2511 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
2512 hpsa_unmap_sg_chain_block(h, cp);
2514 if ((cp->cmd_type == CMD_IOACCEL2) &&
2515 (c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
2516 hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
2518 cmd->result = (DID_OK << 16); /* host byte */
2519 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
2521 if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) {
2522 if (dev->physical_device && dev->expose_device &&
2524 cmd->result = DID_NO_CONNECT << 16;
2525 return hpsa_cmd_free_and_done(h, cp, cmd);
2527 if (likely(cp->phys_disk != NULL))
2528 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
2532 * We check for lockup status here as it may be set for
2533 * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by
2534 * fail_all_oustanding_cmds()
2536 if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
2537 /* DID_NO_CONNECT will prevent a retry */
2538 cmd->result = DID_NO_CONNECT << 16;
2539 return hpsa_cmd_free_and_done(h, cp, cmd);
2542 if ((unlikely(hpsa_is_pending_event(cp)))) {
2543 if (cp->reset_pending)
2544 return hpsa_cmd_resolve_and_free(h, cp);
2545 if (cp->abort_pending)
2546 return hpsa_cmd_abort_and_free(h, cp, cmd);
2549 if (cp->cmd_type == CMD_IOACCEL2)
2550 return process_ioaccel2_completion(h, cp, cmd, dev);
2552 scsi_set_resid(cmd, ei->ResidualCnt);
2553 if (ei->CommandStatus == 0)
2554 return hpsa_cmd_free_and_done(h, cp, cmd);
2556 /* For I/O accelerator commands, copy over some fields to the normal
2557 * CISS header used below for error handling.
2559 if (cp->cmd_type == CMD_IOACCEL1) {
2560 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
2561 cp->Header.SGList = scsi_sg_count(cmd);
2562 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
2563 cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
2564 IOACCEL1_IOFLAGS_CDBLEN_MASK;
2565 cp->Header.tag = c->tag;
2566 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
2567 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
2569 /* Any RAID offload error results in retry which will use
2570 * the normal I/O path so the controller can handle whatever's
2573 if (is_logical_device(dev)) {
2574 if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
2575 dev->offload_enabled = 0;
2576 return hpsa_retry_cmd(h, cp);
2580 /* an error has occurred */
2581 switch (ei->CommandStatus) {
2583 case CMD_TARGET_STATUS:
2584 cmd->result |= ei->ScsiStatus;
2585 /* copy the sense data */
2586 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
2587 sense_data_size = SCSI_SENSE_BUFFERSIZE;
2589 sense_data_size = sizeof(ei->SenseInfo);
2590 if (ei->SenseLen < sense_data_size)
2591 sense_data_size = ei->SenseLen;
2592 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
2594 decode_sense_data(ei->SenseInfo, sense_data_size,
2595 &sense_key, &asc, &ascq);
2596 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
2597 if (sense_key == ABORTED_COMMAND) {
2598 cmd->result |= DID_SOFT_ERROR << 16;
2603 /* Problem was not a check condition
2604 * Pass it up to the upper layers...
2606 if (ei->ScsiStatus) {
2607 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
2608 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
2609 "Returning result: 0x%x\n",
2611 sense_key, asc, ascq,
2613 } else { /* scsi status is zero??? How??? */
2614 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
2615 "Returning no connection.\n", cp),
2617 /* Ordinarily, this case should never happen,
2618 * but there is a bug in some released firmware
2619 * revisions that allows it to happen if, for
2620 * example, a 4100 backplane loses power and
2621 * the tape drive is in it. We assume that
2622 * it's a fatal error of some kind because we
2623 * can't show that it wasn't. We will make it
2624 * look like selection timeout since that is
2625 * the most common reason for this to occur,
2626 * and it's severe enough.
2629 cmd->result = DID_NO_CONNECT << 16;
2633 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2635 case CMD_DATA_OVERRUN:
2636 dev_warn(&h->pdev->dev,
2637 "CDB %16phN data overrun\n", cp->Request.CDB);
2640 /* print_bytes(cp, sizeof(*cp), 1, 0);
2642 /* We get CMD_INVALID if you address a non-existent device
2643 * instead of a selection timeout (no response). You will
2644 * see this if you yank out a drive, then try to access it.
2645 * This is kind of a shame because it means that any other
2646 * CMD_INVALID (e.g. driver bug) will get interpreted as a
2647 * missing target. */
2648 cmd->result = DID_NO_CONNECT << 16;
2651 case CMD_PROTOCOL_ERR:
2652 cmd->result = DID_ERROR << 16;
2653 dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
2656 case CMD_HARDWARE_ERR:
2657 cmd->result = DID_ERROR << 16;
2658 dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
2661 case CMD_CONNECTION_LOST:
2662 cmd->result = DID_ERROR << 16;
2663 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
2667 /* Return now to avoid calling scsi_done(). */
2668 return hpsa_cmd_abort_and_free(h, cp, cmd);
2669 case CMD_ABORT_FAILED:
2670 cmd->result = DID_ERROR << 16;
2671 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
2674 case CMD_UNSOLICITED_ABORT:
2675 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
2676 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
2680 cmd->result = DID_TIME_OUT << 16;
2681 dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
2684 case CMD_UNABORTABLE:
2685 cmd->result = DID_ERROR << 16;
2686 dev_warn(&h->pdev->dev, "Command unabortable\n");
2688 case CMD_TMF_STATUS:
2689 if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */
2690 cmd->result = DID_ERROR << 16;
2692 case CMD_IOACCEL_DISABLED:
2693 /* This only handles the direct pass-through case since RAID
2694 * offload is handled above. Just attempt a retry.
2696 cmd->result = DID_SOFT_ERROR << 16;
2697 dev_warn(&h->pdev->dev,
2698 "cp %p had HP SSD Smart Path error\n", cp);
2701 cmd->result = DID_ERROR << 16;
2702 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
2703 cp, ei->CommandStatus);
2706 return hpsa_cmd_free_and_done(h, cp, cmd);
2709 static void hpsa_pci_unmap(struct pci_dev *pdev,
2710 struct CommandList *c, int sg_used, int data_direction)
2714 for (i = 0; i < sg_used; i++)
2715 pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
2716 le32_to_cpu(c->SG[i].Len),
2720 static int hpsa_map_one(struct pci_dev *pdev,
2721 struct CommandList *cp,
2728 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
2729 cp->Header.SGList = 0;
2730 cp->Header.SGTotal = cpu_to_le16(0);
2734 addr64 = pci_map_single(pdev, buf, buflen, data_direction);
2735 if (dma_mapping_error(&pdev->dev, addr64)) {
2736 /* Prevent subsequent unmap of something never mapped */
2737 cp->Header.SGList = 0;
2738 cp->Header.SGTotal = cpu_to_le16(0);
2741 cp->SG[0].Addr = cpu_to_le64(addr64);
2742 cp->SG[0].Len = cpu_to_le32(buflen);
2743 cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
2744 cp->Header.SGList = 1; /* no. SGs contig in this cmd */
2745 cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
2749 #define NO_TIMEOUT ((unsigned long) -1)
2750 #define DEFAULT_TIMEOUT 30000 /* milliseconds */
2751 static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
2752 struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
2754 DECLARE_COMPLETION_ONSTACK(wait);
2757 __enqueue_cmd_and_start_io(h, c, reply_queue);
2758 if (timeout_msecs == NO_TIMEOUT) {
2759 /* TODO: get rid of this no-timeout thing */
2760 wait_for_completion_io(&wait);
2763 if (!wait_for_completion_io_timeout(&wait,
2764 msecs_to_jiffies(timeout_msecs))) {
2765 dev_warn(&h->pdev->dev, "Command timed out.\n");
2771 static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
2772 int reply_queue, unsigned long timeout_msecs)
2774 if (unlikely(lockup_detected(h))) {
2775 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
2778 return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
2781 static u32 lockup_detected(struct ctlr_info *h)
2784 u32 rc, *lockup_detected;
2787 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2788 rc = *lockup_detected;
2793 #define MAX_DRIVER_CMD_RETRIES 25
2794 static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2795 struct CommandList *c, int data_direction, unsigned long timeout_msecs)
2797 int backoff_time = 10, retry_count = 0;
2801 memset(c->err_info, 0, sizeof(*c->err_info));
2802 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
2807 if (retry_count > 3) {
2808 msleep(backoff_time);
2809 if (backoff_time < 1000)
2812 } while ((check_for_unit_attention(h, c) ||
2813 check_for_busy(h, c)) &&
2814 retry_count <= MAX_DRIVER_CMD_RETRIES);
2815 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
2816 if (retry_count > MAX_DRIVER_CMD_RETRIES)
2821 static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2822 struct CommandList *c)
2824 const u8 *cdb = c->Request.CDB;
2825 const u8 *lun = c->Header.LUN.LunAddrBytes;
2827 dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
2828 " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2829 txt, lun[0], lun[1], lun[2], lun[3],
2830 lun[4], lun[5], lun[6], lun[7],
2831 cdb[0], cdb[1], cdb[2], cdb[3],
2832 cdb[4], cdb[5], cdb[6], cdb[7],
2833 cdb[8], cdb[9], cdb[10], cdb[11],
2834 cdb[12], cdb[13], cdb[14], cdb[15]);
2837 static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2838 struct CommandList *cp)
2840 const struct ErrorInfo *ei = cp->err_info;
2841 struct device *d = &cp->h->pdev->dev;
2842 u8 sense_key, asc, ascq;
2845 switch (ei->CommandStatus) {
2846 case CMD_TARGET_STATUS:
2847 if (ei->SenseLen > sizeof(ei->SenseInfo))
2848 sense_len = sizeof(ei->SenseInfo);
2850 sense_len = ei->SenseLen;
2851 decode_sense_data(ei->SenseInfo, sense_len,
2852 &sense_key, &asc, &ascq);
2853 hpsa_print_cmd(h, "SCSI status", cp);
2854 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
2855 dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2856 sense_key, asc, ascq);
2858 dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
2859 if (ei->ScsiStatus == 0)
2860 dev_warn(d, "SCSI status is abnormally zero. "
2861 "(probably indicates selection timeout "
2862 "reported incorrectly due to a known "
2863 "firmware bug, circa July, 2001.)\n");
2865 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2867 case CMD_DATA_OVERRUN:
2868 hpsa_print_cmd(h, "overrun condition", cp);
2871 /* controller unfortunately reports SCSI passthru's
2872 * to non-existent targets as invalid commands.
2874 hpsa_print_cmd(h, "invalid command", cp);
2875 dev_warn(d, "probably means device no longer present\n");
2878 case CMD_PROTOCOL_ERR:
2879 hpsa_print_cmd(h, "protocol error", cp);
2881 case CMD_HARDWARE_ERR:
2882 hpsa_print_cmd(h, "hardware error", cp);
2884 case CMD_CONNECTION_LOST:
2885 hpsa_print_cmd(h, "connection lost", cp);
2888 hpsa_print_cmd(h, "aborted", cp);
2890 case CMD_ABORT_FAILED:
2891 hpsa_print_cmd(h, "abort failed", cp);
2893 case CMD_UNSOLICITED_ABORT:
2894 hpsa_print_cmd(h, "unsolicited abort", cp);
2897 hpsa_print_cmd(h, "timed out", cp);
2899 case CMD_UNABORTABLE:
2900 hpsa_print_cmd(h, "unabortable", cp);
2902 case CMD_CTLR_LOCKUP:
2903 hpsa_print_cmd(h, "controller lockup detected", cp);
2906 hpsa_print_cmd(h, "unknown status", cp);
2907 dev_warn(d, "Unknown command status %x\n",
2912 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
2913 u16 page, unsigned char *buf,
2914 unsigned char bufsize)
2917 struct CommandList *c;
2918 struct ErrorInfo *ei;
2922 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
2923 page, scsi3addr, TYPE_CMD)) {
2927 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2928 PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
2932 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2933 hpsa_scsi_interpret_error(h, c);
2941 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
2942 u8 reset_type, int reply_queue)
2945 struct CommandList *c;
2946 struct ErrorInfo *ei;
2951 /* fill_cmd can't fail here, no data buffer to map. */
2952 (void) fill_cmd(c, reset_type, h, NULL, 0, 0,
2953 scsi3addr, TYPE_MSG);
2954 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
2956 dev_warn(&h->pdev->dev, "Failed to send reset command\n");
2959 /* no unmap needed here because no data xfer. */
2962 if (ei->CommandStatus != 0) {
2963 hpsa_scsi_interpret_error(h, c);
2971 static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
2972 struct hpsa_scsi_dev_t *dev,
2973 unsigned char *scsi3addr)
2977 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2978 struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
2980 if (hpsa_is_cmd_idle(c))
2983 switch (c->cmd_type) {
2985 case CMD_IOCTL_PEND:
2986 match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes,
2987 sizeof(c->Header.LUN.LunAddrBytes));
2992 if (c->phys_disk == dev) {
2993 /* HBA mode match */
2996 /* Possible RAID mode -- check each phys dev. */
2997 /* FIXME: Do we need to take out a lock here? If
2998 * so, we could just call hpsa_get_pdisk_of_ioaccel2()
3000 for (i = 0; i < dev->nphysical_disks && !match; i++) {
3001 /* FIXME: an alternate test might be
3003 * match = dev->phys_disk[i]->ioaccel_handle
3004 * == c2->scsi_nexus; */
3005 match = dev->phys_disk[i] == c->phys_disk;
3011 for (i = 0; i < dev->nphysical_disks && !match; i++) {
3012 match = dev->phys_disk[i]->ioaccel_handle ==
3013 le32_to_cpu(ac->it_nexus);
3017 case 0: /* The command is in the middle of being initialized. */
3022 dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n",
3030 static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
3031 unsigned char *scsi3addr, u8 reset_type, int reply_queue)
3036 /* We can really only handle one reset at a time */
3037 if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) {
3038 dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n");
3042 BUG_ON(atomic_read(&dev->reset_cmds_out) != 0);
3044 for (i = 0; i < h->nr_cmds; i++) {
3045 struct CommandList *c = h->cmd_pool + i;
3046 int refcount = atomic_inc_return(&c->refcount);
3048 if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, scsi3addr)) {
3049 unsigned long flags;
3052 * Mark the target command as having a reset pending,
3053 * then lock a lock so that the command cannot complete
3054 * while we're considering it. If the command is not
3055 * idle then count it; otherwise revoke the event.
3057 c->reset_pending = dev;
3058 spin_lock_irqsave(&h->lock, flags); /* Implied MB */
3059 if (!hpsa_is_cmd_idle(c))
3060 atomic_inc(&dev->reset_cmds_out);
3062 c->reset_pending = NULL;
3063 spin_unlock_irqrestore(&h->lock, flags);
3069 rc = hpsa_send_reset(h, scsi3addr, reset_type, reply_queue);
3071 wait_event(h->event_sync_wait_queue,
3072 atomic_read(&dev->reset_cmds_out) == 0 ||
3073 lockup_detected(h));
3075 if (unlikely(lockup_detected(h))) {
3076 dev_warn(&h->pdev->dev,
3077 "Controller lockup detected during reset wait\n");
3082 atomic_set(&dev->reset_cmds_out, 0);
3084 mutex_unlock(&h->reset_mutex);
3088 static void hpsa_get_raid_level(struct ctlr_info *h,
3089 unsigned char *scsi3addr, unsigned char *raid_level)
3094 *raid_level = RAID_UNKNOWN;
3095 buf = kzalloc(64, GFP_KERNEL);
3099 if (!hpsa_vpd_page_supported(h, scsi3addr,
3100 HPSA_VPD_LV_DEVICE_GEOMETRY))
3103 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
3104 HPSA_VPD_LV_DEVICE_GEOMETRY, buf, 64);
3107 *raid_level = buf[8];
3108 if (*raid_level > RAID_UNKNOWN)
3109 *raid_level = RAID_UNKNOWN;
3115 #define HPSA_MAP_DEBUG
3116 #ifdef HPSA_MAP_DEBUG
3117 static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
3118 struct raid_map_data *map_buff)
3120 struct raid_map_disk_data *dd = &map_buff->data[0];
3122 u16 map_cnt, row_cnt, disks_per_row;
3127 /* Show details only if debugging has been activated. */
3128 if (h->raid_offload_debug < 2)
3131 dev_info(&h->pdev->dev, "structure_size = %u\n",
3132 le32_to_cpu(map_buff->structure_size));
3133 dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
3134 le32_to_cpu(map_buff->volume_blk_size));
3135 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
3136 le64_to_cpu(map_buff->volume_blk_cnt));
3137 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
3138 map_buff->phys_blk_shift);
3139 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
3140 map_buff->parity_rotation_shift);
3141 dev_info(&h->pdev->dev, "strip_size = %u\n",
3142 le16_to_cpu(map_buff->strip_size));
3143 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
3144 le64_to_cpu(map_buff->disk_starting_blk));
3145 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
3146 le64_to_cpu(map_buff->disk_blk_cnt));
3147 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
3148 le16_to_cpu(map_buff->data_disks_per_row));
3149 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
3150 le16_to_cpu(map_buff->metadata_disks_per_row));
3151 dev_info(&h->pdev->dev, "row_cnt = %u\n",
3152 le16_to_cpu(map_buff->row_cnt));
3153 dev_info(&h->pdev->dev, "layout_map_count = %u\n",
3154 le16_to_cpu(map_buff->layout_map_count));
3155 dev_info(&h->pdev->dev, "flags = 0x%x\n",
3156 le16_to_cpu(map_buff->flags));
3157 dev_info(&h->pdev->dev, "encrypytion = %s\n",
3158 le16_to_cpu(map_buff->flags) &
3159 RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF");
3160 dev_info(&h->pdev->dev, "dekindex = %u\n",
3161 le16_to_cpu(map_buff->dekindex));
3162 map_cnt = le16_to_cpu(map_buff->layout_map_count);
3163 for (map = 0; map < map_cnt; map++) {
3164 dev_info(&h->pdev->dev, "Map%u:\n", map);
3165 row_cnt = le16_to_cpu(map_buff->row_cnt);
3166 for (row = 0; row < row_cnt; row++) {
3167 dev_info(&h->pdev->dev, " Row%u:\n", row);
3169 le16_to_cpu(map_buff->data_disks_per_row);
3170 for (col = 0; col < disks_per_row; col++, dd++)
3171 dev_info(&h->pdev->dev,
3172 " D%02u: h=0x%04x xor=%u,%u\n",
3173 col, dd->ioaccel_handle,
3174 dd->xor_mult[0], dd->xor_mult[1]);
3176 le16_to_cpu(map_buff->metadata_disks_per_row);
3177 for (col = 0; col < disks_per_row; col++, dd++)
3178 dev_info(&h->pdev->dev,
3179 " M%02u: h=0x%04x xor=%u,%u\n",
3180 col, dd->ioaccel_handle,
3181 dd->xor_mult[0], dd->xor_mult[1]);
3186 static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
3187 __attribute__((unused)) int rc,
3188 __attribute__((unused)) struct raid_map_data *map_buff)
3193 static int hpsa_get_raid_map(struct ctlr_info *h,
3194 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3197 struct CommandList *c;
3198 struct ErrorInfo *ei;
3202 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
3203 sizeof(this_device->raid_map), 0,
3204 scsi3addr, TYPE_CMD)) {
3205 dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n");
3209 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3210 PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
3214 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3215 hpsa_scsi_interpret_error(h, c);
3221 /* @todo in the future, dynamically allocate RAID map memory */
3222 if (le32_to_cpu(this_device->raid_map.structure_size) >
3223 sizeof(this_device->raid_map)) {
3224 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
3227 hpsa_debug_map_buff(h, rc, &this_device->raid_map);
3234 static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h,
3235 unsigned char scsi3addr[], u16 bmic_device_index,
3236 struct bmic_sense_subsystem_info *buf, size_t bufsize)
3239 struct CommandList *c;
3240 struct ErrorInfo *ei;
3244 rc = fill_cmd(c, BMIC_SENSE_SUBSYSTEM_INFORMATION, h, buf, bufsize,
3245 0, RAID_CTLR_LUNID, TYPE_CMD);
3249 c->Request.CDB[2] = bmic_device_index & 0xff;
3250 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3252 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3253 PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
3257 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3258 hpsa_scsi_interpret_error(h, c);
3266 static int hpsa_bmic_id_controller(struct ctlr_info *h,
3267 struct bmic_identify_controller *buf, size_t bufsize)
3270 struct CommandList *c;
3271 struct ErrorInfo *ei;
3275 rc = fill_cmd(c, BMIC_IDENTIFY_CONTROLLER, h, buf, bufsize,
3276 0, RAID_CTLR_LUNID, TYPE_CMD);
3280 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3281 PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
3285 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3286 hpsa_scsi_interpret_error(h, c);
3294 static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
3295 unsigned char scsi3addr[], u16 bmic_device_index,
3296 struct bmic_identify_physical_device *buf, size_t bufsize)
3299 struct CommandList *c;
3300 struct ErrorInfo *ei;
3303 rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
3304 0, RAID_CTLR_LUNID, TYPE_CMD);
3308 c->Request.CDB[2] = bmic_device_index & 0xff;
3309 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3311 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
3314 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3315 hpsa_scsi_interpret_error(h, c);
3325 * get enclosure information
3326 * struct ReportExtendedLUNdata *rlep - Used for BMIC drive number
3327 * struct hpsa_scsi_dev_t *encl_dev - device entry for enclosure
3328 * Uses id_physical_device to determine the box_index.
3330 static void hpsa_get_enclosure_info(struct ctlr_info *h,
3331 unsigned char *scsi3addr,
3332 struct ReportExtendedLUNdata *rlep, int rle_index,
3333 struct hpsa_scsi_dev_t *encl_dev)
3336 struct CommandList *c = NULL;
3337 struct ErrorInfo *ei = NULL;
3338 struct bmic_sense_storage_box_params *bssbp = NULL;
3339 struct bmic_identify_physical_device *id_phys = NULL;
3340 struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
3341 u16 bmic_device_index = 0;
3343 bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]);
3345 if (bmic_device_index == 0xFF00 || MASKED_DEVICE(&rle->lunid[0])) {
3350 bssbp = kzalloc(sizeof(*bssbp), GFP_KERNEL);
3354 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
3358 rc = hpsa_bmic_id_physical_device(h, scsi3addr, bmic_device_index,
3359 id_phys, sizeof(*id_phys));
3361 dev_warn(&h->pdev->dev, "%s: id_phys failed %d bdi[0x%x]\n",
3362 __func__, encl_dev->external, bmic_device_index);
3368 rc = fill_cmd(c, BMIC_SENSE_STORAGE_BOX_PARAMS, h, bssbp,
3369 sizeof(*bssbp), 0, RAID_CTLR_LUNID, TYPE_CMD);
3374 if (id_phys->phys_connector[1] == 'E')
3375 c->Request.CDB[5] = id_phys->box_index;
3377 c->Request.CDB[5] = 0;
3379 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
3385 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3390 encl_dev->box[id_phys->active_path_number] = bssbp->phys_box_on_port;
3391 memcpy(&encl_dev->phys_connector[id_phys->active_path_number],
3392 bssbp->phys_connector, sizeof(bssbp->phys_connector));
3403 hpsa_show_dev_msg(KERN_INFO, h, encl_dev,
3404 "Error, could not get enclosure information\n");
3407 static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h,
3408 unsigned char *scsi3addr)
3410 struct ReportExtendedLUNdata *physdev;
3415 physdev = kzalloc(sizeof(*physdev), GFP_KERNEL);
3419 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
3420 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
3424 nphysicals = get_unaligned_be32(physdev->LUNListLength) / 24;
3426 for (i = 0; i < nphysicals; i++)
3427 if (!memcmp(&physdev->LUN[i].lunid[0], scsi3addr, 8)) {
3428 sa = get_unaligned_be64(&physdev->LUN[i].wwid[0]);
3437 static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr,
3438 struct hpsa_scsi_dev_t *dev)
3443 if (is_hba_lunid(scsi3addr)) {
3444 struct bmic_sense_subsystem_info *ssi;
3446 ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
3448 dev_warn(&h->pdev->dev,
3449 "%s: out of memory\n", __func__);
3453 rc = hpsa_bmic_sense_subsystem_information(h,
3454 scsi3addr, 0, ssi, sizeof(*ssi));
3456 sa = get_unaligned_be64(ssi->primary_world_wide_id);
3457 h->sas_address = sa;
3462 sa = hpsa_get_sas_address_from_report_physical(h, scsi3addr);
3464 dev->sas_address = sa;
3467 /* Get a device id from inquiry page 0x83 */
3468 static bool hpsa_vpd_page_supported(struct ctlr_info *h,
3469 unsigned char scsi3addr[], u8 page)
3474 unsigned char *buf, bufsize;
3476 buf = kzalloc(256, GFP_KERNEL);
3480 /* Get the size of the page list first */
3481 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3482 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3483 buf, HPSA_VPD_HEADER_SZ);
3485 goto exit_unsupported;
3487 if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
3488 bufsize = pages + HPSA_VPD_HEADER_SZ;
3492 /* Get the whole VPD page list */
3493 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3494 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3497 goto exit_unsupported;
3500 for (i = 1; i <= pages; i++)
3501 if (buf[3 + i] == page)
3502 goto exit_supported;
3511 static void hpsa_get_ioaccel_status(struct ctlr_info *h,
3512 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3518 this_device->offload_config = 0;
3519 this_device->offload_enabled = 0;
3520 this_device->offload_to_be_enabled = 0;
3522 buf = kzalloc(64, GFP_KERNEL);
3525 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
3527 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3528 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
3532 #define IOACCEL_STATUS_BYTE 4
3533 #define OFFLOAD_CONFIGURED_BIT 0x01
3534 #define OFFLOAD_ENABLED_BIT 0x02
3535 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
3536 this_device->offload_config =
3537 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
3538 if (this_device->offload_config) {
3539 this_device->offload_enabled =
3540 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
3541 if (hpsa_get_raid_map(h, scsi3addr, this_device))
3542 this_device->offload_enabled = 0;
3544 this_device->offload_to_be_enabled = this_device->offload_enabled;
3550 /* Get the device id from inquiry page 0x83 */
3551 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
3552 unsigned char *device_id, int index, int buflen)
3557 /* Does controller have VPD for device id? */
3558 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_DEVICE_ID))
3559 return 1; /* not supported */
3561 buf = kzalloc(64, GFP_KERNEL);
3565 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
3566 HPSA_VPD_LV_DEVICE_ID, buf, 64);
3570 memcpy(device_id, &buf[8], buflen);
3575 return rc; /*0 - got id, otherwise, didn't */
3578 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
3579 void *buf, int bufsize,
3580 int extended_response)
3583 struct CommandList *c;
3584 unsigned char scsi3addr[8];
3585 struct ErrorInfo *ei;
3589 /* address the controller */
3590 memset(scsi3addr, 0, sizeof(scsi3addr));
3591 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
3592 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
3596 if (extended_response)
3597 c->Request.CDB[1] = extended_response;
3598 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3599 PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
3603 if (ei->CommandStatus != 0 &&
3604 ei->CommandStatus != CMD_DATA_UNDERRUN) {
3605 hpsa_scsi_interpret_error(h, c);
3608 struct ReportLUNdata *rld = buf;
3610 if (rld->extended_response_flag != extended_response) {
3611 dev_err(&h->pdev->dev,
3612 "report luns requested format %u, got %u\n",
3614 rld->extended_response_flag);
3623 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
3624 struct ReportExtendedLUNdata *buf, int bufsize)
3626 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
3627 HPSA_REPORT_PHYS_EXTENDED);
3630 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
3631 struct ReportLUNdata *buf, int bufsize)
3633 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
3636 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
3637 int bus, int target, int lun)
3640 device->target = target;
3644 /* Use VPD inquiry to get details of volume status */
3645 static int hpsa_get_volume_status(struct ctlr_info *h,
3646 unsigned char scsi3addr[])
3653 buf = kzalloc(64, GFP_KERNEL);
3655 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3657 /* Does controller have VPD for logical volume status? */
3658 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
3661 /* Get the size of the VPD return buffer */
3662 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3663 buf, HPSA_VPD_HEADER_SZ);
3668 /* Now get the whole VPD buffer */
3669 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3670 buf, size + HPSA_VPD_HEADER_SZ);
3673 status = buf[4]; /* status byte */
3679 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3682 /* Determine offline status of a volume.
3685 * 0xff (offline for unknown reasons)
3686 * # (integer code indicating one of several NOT READY states
3687 * describing why a volume is to be kept offline)
3689 static unsigned char hpsa_volume_offline(struct ctlr_info *h,
3690 unsigned char scsi3addr[])
3692 struct CommandList *c;
3693 unsigned char *sense;
3694 u8 sense_key, asc, ascq;
3699 #define ASC_LUN_NOT_READY 0x04
3700 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
3701 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
3705 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
3706 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
3710 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3712 sense = c->err_info->SenseInfo;
3713 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
3714 sense_len = sizeof(c->err_info->SenseInfo);
3716 sense_len = c->err_info->SenseLen;
3717 decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
3718 cmd_status = c->err_info->CommandStatus;
3719 scsi_status = c->err_info->ScsiStatus;
3722 /* Determine the reason for not ready state */
3723 ldstat = hpsa_get_volume_status(h, scsi3addr);
3725 /* Keep volume offline in certain cases: */
3727 case HPSA_LV_FAILED:
3728 case HPSA_LV_UNDERGOING_ERASE:
3729 case HPSA_LV_NOT_AVAILABLE:
3730 case HPSA_LV_UNDERGOING_RPI:
3731 case HPSA_LV_PENDING_RPI:
3732 case HPSA_LV_ENCRYPTED_NO_KEY:
3733 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
3734 case HPSA_LV_UNDERGOING_ENCRYPTION:
3735 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
3736 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
3738 case HPSA_VPD_LV_STATUS_UNSUPPORTED:
3739 /* If VPD status page isn't available,
3740 * use ASC/ASCQ to determine state
3742 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
3743 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
3753 * Find out if a logical device supports aborts by simply trying one.
3754 * Smart Array may claim not to support aborts on logical drives, but
3755 * if a MSA2000 * is connected, the drives on that will be presented
3756 * by the Smart Array as logical drives, and aborts may be sent to
3757 * those devices successfully. So the simplest way to find out is
3758 * to simply try an abort and see how the device responds.
3760 static int hpsa_device_supports_aborts(struct ctlr_info *h,
3761 unsigned char *scsi3addr)
3763 struct CommandList *c;
3764 struct ErrorInfo *ei;
3767 u64 tag = (u64) -1; /* bogus tag */
3769 /* Assume that physical devices support aborts */
3770 if (!is_logical_dev_addr_mode(scsi3addr))
3775 (void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG);
3776 (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
3778 /* no unmap needed here because no data xfer. */
3780 switch (ei->CommandStatus) {
3784 case CMD_UNABORTABLE:
3785 case CMD_ABORT_FAILED:
3788 case CMD_TMF_STATUS:
3789 rc = hpsa_evaluate_tmf_status(h, c);
3799 static int hpsa_update_device_info(struct ctlr_info *h,
3800 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
3801 unsigned char *is_OBDR_device)
3804 #define OBDR_SIG_OFFSET 43
3805 #define OBDR_TAPE_SIG "$DR-10"
3806 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
3807 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
3809 unsigned char *inq_buff;
3810 unsigned char *obdr_sig;
3813 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
3819 /* Do an inquiry to the device to see what it is. */
3820 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
3821 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
3822 dev_err(&h->pdev->dev,
3823 "%s: inquiry failed, device will be skipped.\n",
3825 rc = HPSA_INQUIRY_FAILED;
3829 scsi_sanitize_inquiry_string(&inq_buff[8], 8);
3830 scsi_sanitize_inquiry_string(&inq_buff[16], 16);
3832 this_device->devtype = (inq_buff[0] & 0x1f);
3833 memcpy(this_device->scsi3addr, scsi3addr, 8);
3834 memcpy(this_device->vendor, &inq_buff[8],
3835 sizeof(this_device->vendor));
3836 memcpy(this_device->model, &inq_buff[16],
3837 sizeof(this_device->model));
3838 this_device->rev = inq_buff[2];
3839 memset(this_device->device_id, 0,
3840 sizeof(this_device->device_id));
3841 if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
3842 sizeof(this_device->device_id)))
3843 dev_err(&h->pdev->dev,
3844 "hpsa%d: %s: can't get device id for host %d:C0:T%d:L%d\t%s\t%.16s\n",
3846 h->scsi_host->host_no,
3847 this_device->target, this_device->lun,
3848 scsi_device_type(this_device->devtype),
3849 this_device->model);
3851 if ((this_device->devtype == TYPE_DISK ||
3852 this_device->devtype == TYPE_ZBC) &&
3853 is_logical_dev_addr_mode(scsi3addr)) {
3854 unsigned char volume_offline;
3856 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
3857 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
3858 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
3859 volume_offline = hpsa_volume_offline(h, scsi3addr);
3860 this_device->volume_offline = volume_offline;
3861 if (volume_offline == HPSA_LV_FAILED) {
3862 rc = HPSA_LV_FAILED;
3863 dev_err(&h->pdev->dev,
3864 "%s: LV failed, device will be skipped.\n",
3869 this_device->raid_level = RAID_UNKNOWN;
3870 this_device->offload_config = 0;
3871 this_device->offload_enabled = 0;
3872 this_device->offload_to_be_enabled = 0;
3873 this_device->hba_ioaccel_enabled = 0;
3874 this_device->volume_offline = 0;
3875 this_device->queue_depth = h->nr_cmds;
3878 if (is_OBDR_device) {
3879 /* See if this is a One-Button-Disaster-Recovery device
3880 * by looking for "$DR-10" at offset 43 in inquiry data.
3882 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
3883 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
3884 strncmp(obdr_sig, OBDR_TAPE_SIG,
3885 OBDR_SIG_LEN) == 0);
3895 static void hpsa_update_device_supports_aborts(struct ctlr_info *h,
3896 struct hpsa_scsi_dev_t *dev, u8 *scsi3addr)
3898 unsigned long flags;
3901 * See if this device supports aborts. If we already know
3902 * the device, we already know if it supports aborts, otherwise
3903 * we have to find out if it supports aborts by trying one.
3905 spin_lock_irqsave(&h->devlock, flags);
3906 rc = hpsa_scsi_find_entry(dev, h->dev, h->ndevices, &entry);
3907 if ((rc == DEVICE_SAME || rc == DEVICE_UPDATED) &&
3908 entry >= 0 && entry < h->ndevices) {
3909 dev->supports_aborts = h->dev[entry]->supports_aborts;
3910 spin_unlock_irqrestore(&h->devlock, flags);
3912 spin_unlock_irqrestore(&h->devlock, flags);
3913 dev->supports_aborts =
3914 hpsa_device_supports_aborts(h, scsi3addr);
3915 if (dev->supports_aborts < 0)
3916 dev->supports_aborts = 0;
3921 * Helper function to assign bus, target, lun mapping of devices.
3922 * Logical drive target and lun are assigned at this time, but
3923 * physical device lun and target assignment are deferred (assigned
3924 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
3926 static void figure_bus_target_lun(struct ctlr_info *h,
3927 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
3929 u32 lunid = get_unaligned_le32(lunaddrbytes);
3931 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
3932 /* physical device, target and lun filled in later */
3933 if (is_hba_lunid(lunaddrbytes)) {
3934 int bus = HPSA_HBA_BUS;
3937 bus = HPSA_LEGACY_HBA_BUS;
3938 hpsa_set_bus_target_lun(device,
3939 bus, 0, lunid & 0x3fff);
3941 /* defer target, lun assignment for physical devices */
3942 hpsa_set_bus_target_lun(device,
3943 HPSA_PHYSICAL_DEVICE_BUS, -1, -1);
3946 /* It's a logical device */
3947 if (device->external) {
3948 hpsa_set_bus_target_lun(device,
3949 HPSA_EXTERNAL_RAID_VOLUME_BUS, (lunid >> 16) & 0x3fff,
3953 hpsa_set_bus_target_lun(device, HPSA_RAID_VOLUME_BUS,
3959 * Get address of physical disk used for an ioaccel2 mode command:
3960 * 1. Extract ioaccel2 handle from the command.
3961 * 2. Find a matching ioaccel2 handle from list of physical disks.
3963 * 1 and set scsi3addr to address of matching physical
3964 * 0 if no matching physical disk was found.
3966 static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
3967 struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
3969 struct io_accel2_cmd *c2 =
3970 &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
3971 unsigned long flags;
3974 spin_lock_irqsave(&h->devlock, flags);
3975 for (i = 0; i < h->ndevices; i++)
3976 if (h->dev[i]->ioaccel_handle == le32_to_cpu(c2->scsi_nexus)) {
3977 memcpy(scsi3addr, h->dev[i]->scsi3addr,
3978 sizeof(h->dev[i]->scsi3addr));
3979 spin_unlock_irqrestore(&h->devlock, flags);
3982 spin_unlock_irqrestore(&h->devlock, flags);
3986 static int figure_external_status(struct ctlr_info *h, int raid_ctlr_position,
3987 int i, int nphysicals, int nlocal_logicals)
3989 /* In report logicals, local logicals are listed first,
3990 * then any externals.
3992 int logicals_start = nphysicals + (raid_ctlr_position == 0);
3994 if (i == raid_ctlr_position)
3997 if (i < logicals_start)
4000 /* i is in logicals range, but still within local logicals */
4001 if ((i - nphysicals - (raid_ctlr_position == 0)) < nlocal_logicals)
4004 return 1; /* it's an external lun */
4008 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
4009 * logdev. The number of luns in physdev and logdev are returned in
4010 * *nphysicals and *nlogicals, respectively.
4011 * Returns 0 on success, -1 otherwise.
4013 static int hpsa_gather_lun_info(struct ctlr_info *h,
4014 struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
4015 struct ReportLUNdata *logdev, u32 *nlogicals)
4017 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
4018 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
4021 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
4022 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
4023 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
4024 HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
4025 *nphysicals = HPSA_MAX_PHYS_LUN;
4027 if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
4028 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
4031 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
4032 /* Reject Logicals in excess of our max capability. */
4033 if (*nlogicals > HPSA_MAX_LUN) {
4034 dev_warn(&h->pdev->dev,
4035 "maximum logical LUNs (%d) exceeded. "
4036 "%d LUNs ignored.\n", HPSA_MAX_LUN,
4037 *nlogicals - HPSA_MAX_LUN);
4038 *nlogicals = HPSA_MAX_LUN;
4040 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
4041 dev_warn(&h->pdev->dev,
4042 "maximum logical + physical LUNs (%d) exceeded. "
4043 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
4044 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
4045 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
4050 static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
4051 int i, int nphysicals, int nlogicals,
4052 struct ReportExtendedLUNdata *physdev_list,
4053 struct ReportLUNdata *logdev_list)
4055 /* Helper function, figure out where the LUN ID info is coming from
4056 * given index i, lists of physical and logical devices, where in
4057 * the list the raid controller is supposed to appear (first or last)
4060 int logicals_start = nphysicals + (raid_ctlr_position == 0);
4061 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
4063 if (i == raid_ctlr_position)
4064 return RAID_CTLR_LUNID;
4066 if (i < logicals_start)
4067 return &physdev_list->LUN[i -
4068 (raid_ctlr_position == 0)].lunid[0];
4070 if (i < last_device)
4071 return &logdev_list->LUN[i - nphysicals -
4072 (raid_ctlr_position == 0)][0];
4077 /* get physical drive ioaccel handle and queue depth */
4078 static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
4079 struct hpsa_scsi_dev_t *dev,
4080 struct ReportExtendedLUNdata *rlep, int rle_index,
4081 struct bmic_identify_physical_device *id_phys)
4084 struct ext_report_lun_entry *rle;
4087 * external targets don't support BMIC
4089 if (dev->external) {
4090 dev->queue_depth = 7;
4094 rle = &rlep->LUN[rle_index];
4096 dev->ioaccel_handle = rle->ioaccel_handle;
4097 if ((rle->device_flags & 0x08) && dev->ioaccel_handle)
4098 dev->hba_ioaccel_enabled = 1;
4099 memset(id_phys, 0, sizeof(*id_phys));
4100 rc = hpsa_bmic_id_physical_device(h, &rle->lunid[0],
4101 GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]), id_phys,
4104 /* Reserve space for FW operations */
4105 #define DRIVE_CMDS_RESERVED_FOR_FW 2
4106 #define DRIVE_QUEUE_DEPTH 7
4108 le16_to_cpu(id_phys->current_queue_depth_limit) -
4109 DRIVE_CMDS_RESERVED_FOR_FW;
4111 dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
4114 static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device,
4115 struct ReportExtendedLUNdata *rlep, int rle_index,
4116 struct bmic_identify_physical_device *id_phys)
4118 struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
4120 if ((rle->device_flags & 0x08) && this_device->ioaccel_handle)
4121 this_device->hba_ioaccel_enabled = 1;
4123 memcpy(&this_device->active_path_index,
4124 &id_phys->active_path_number,
4125 sizeof(this_device->active_path_index));
4126 memcpy(&this_device->path_map,
4127 &id_phys->redundant_path_present_map,
4128 sizeof(this_device->path_map));
4129 memcpy(&this_device->box,
4130 &id_phys->alternate_paths_phys_box_on_port,
4131 sizeof(this_device->box));
4132 memcpy(&this_device->phys_connector,
4133 &id_phys->alternate_paths_phys_connector,
4134 sizeof(this_device->phys_connector));
4135 memcpy(&this_device->bay,
4136 &id_phys->phys_bay_in_box,
4137 sizeof(this_device->bay));
4140 /* get number of local logical disks. */
4141 static int hpsa_set_local_logical_count(struct ctlr_info *h,
4142 struct bmic_identify_controller *id_ctlr,
4148 dev_warn(&h->pdev->dev, "%s: id_ctlr buffer is NULL.\n",
4152 memset(id_ctlr, 0, sizeof(*id_ctlr));
4153 rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr));
4155 if (id_ctlr->configured_logical_drive_count < 256)
4156 *nlocals = id_ctlr->configured_logical_drive_count;
4158 *nlocals = le16_to_cpu(
4159 id_ctlr->extended_logical_unit_count);
4165 static bool hpsa_is_disk_spare(struct ctlr_info *h, u8 *lunaddrbytes)
4167 struct bmic_identify_physical_device *id_phys;
4168 bool is_spare = false;
4171 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
4175 rc = hpsa_bmic_id_physical_device(h,
4177 GET_BMIC_DRIVE_NUMBER(lunaddrbytes),
4178 id_phys, sizeof(*id_phys));
4180 is_spare = (id_phys->more_flags >> 6) & 0x01;
4186 #define RPL_DEV_FLAG_NON_DISK 0x1
4187 #define RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED 0x2
4188 #define RPL_DEV_FLAG_UNCONFIG_DISK 0x4
4190 #define BMIC_DEVICE_TYPE_ENCLOSURE 6
4192 static bool hpsa_skip_device(struct ctlr_info *h, u8 *lunaddrbytes,
4193 struct ext_report_lun_entry *rle)
4198 if (!MASKED_DEVICE(lunaddrbytes))
4201 device_flags = rle->device_flags;
4202 device_type = rle->device_type;
4204 if (device_flags & RPL_DEV_FLAG_NON_DISK) {
4205 if (device_type == BMIC_DEVICE_TYPE_ENCLOSURE)
4210 if (!(device_flags & RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED))
4213 if (device_flags & RPL_DEV_FLAG_UNCONFIG_DISK)
4217 * Spares may be spun down, we do not want to
4218 * do an Inquiry to a RAID set spare drive as
4219 * that would have them spun up, that is a
4220 * performance hit because I/O to the RAID device
4221 * stops while the spin up occurs which can take
4224 if (hpsa_is_disk_spare(h, lunaddrbytes))
4230 static void hpsa_update_scsi_devices(struct ctlr_info *h)
4232 /* the idea here is we could get notified
4233 * that some devices have changed, so we do a report
4234 * physical luns and report logical luns cmd, and adjust
4235 * our list of devices accordingly.
4237 * The scsi3addr's of devices won't change so long as the
4238 * adapter is not reset. That means we can rescan and
4239 * tell which devices we already know about, vs. new
4240 * devices, vs. disappearing devices.
4242 struct ReportExtendedLUNdata *physdev_list = NULL;
4243 struct ReportLUNdata *logdev_list = NULL;
4244 struct bmic_identify_physical_device *id_phys = NULL;
4245 struct bmic_identify_controller *id_ctlr = NULL;
4248 u32 nlocal_logicals = 0;
4249 u32 ndev_allocated = 0;
4250 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
4252 int i, n_ext_target_devs, ndevs_to_allocate;
4253 int raid_ctlr_position;
4254 bool physical_device;
4255 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
4257 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
4258 physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
4259 logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
4260 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
4261 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
4262 id_ctlr = kzalloc(sizeof(*id_ctlr), GFP_KERNEL);
4264 if (!currentsd || !physdev_list || !logdev_list ||
4265 !tmpdevice || !id_phys || !id_ctlr) {
4266 dev_err(&h->pdev->dev, "out of memory\n");
4269 memset(lunzerobits, 0, sizeof(lunzerobits));
4271 h->drv_req_rescan = 0; /* cancel scheduled rescan - we're doing it. */
4273 if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
4274 logdev_list, &nlogicals)) {
4275 h->drv_req_rescan = 1;
4279 /* Set number of local logicals (non PTRAID) */
4280 if (hpsa_set_local_logical_count(h, id_ctlr, &nlocal_logicals)) {
4281 dev_warn(&h->pdev->dev,
4282 "%s: Can't determine number of local logical devices.\n",
4286 /* We might see up to the maximum number of logical and physical disks
4287 * plus external target devices, and a device for the local RAID
4290 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
4292 /* Allocate the per device structures */
4293 for (i = 0; i < ndevs_to_allocate; i++) {
4294 if (i >= HPSA_MAX_DEVICES) {
4295 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
4296 " %d devices ignored.\n", HPSA_MAX_DEVICES,
4297 ndevs_to_allocate - HPSA_MAX_DEVICES);
4301 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
4302 if (!currentsd[i]) {
4303 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
4304 __FILE__, __LINE__);
4305 h->drv_req_rescan = 1;
4311 if (is_scsi_rev_5(h))
4312 raid_ctlr_position = 0;
4314 raid_ctlr_position = nphysicals + nlogicals;
4316 /* adjust our table of devices */
4317 n_ext_target_devs = 0;
4318 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
4319 u8 *lunaddrbytes, is_OBDR = 0;
4321 int phys_dev_index = i - (raid_ctlr_position == 0);
4322 bool skip_device = false;
4324 physical_device = i < nphysicals + (raid_ctlr_position == 0);
4326 /* Figure out where the LUN ID info is coming from */
4327 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
4328 i, nphysicals, nlogicals, physdev_list, logdev_list);
4330 /* Determine if this is a lun from an external target array */
4331 tmpdevice->external =
4332 figure_external_status(h, raid_ctlr_position, i,
4333 nphysicals, nlocal_logicals);
4336 * Skip over some devices such as a spare.
4338 if (!tmpdevice->external && physical_device) {
4339 skip_device = hpsa_skip_device(h, lunaddrbytes,
4340 &physdev_list->LUN[phys_dev_index]);
4345 /* Get device type, vendor, model, device id */
4346 rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
4348 if (rc == -ENOMEM) {
4349 dev_warn(&h->pdev->dev,
4350 "Out of memory, rescan deferred.\n");
4351 h->drv_req_rescan = 1;
4355 h->drv_req_rescan = 1;
4359 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
4360 hpsa_update_device_supports_aborts(h, tmpdevice, lunaddrbytes);
4361 this_device = currentsd[ncurrent];
4363 /* Turn on discovery_polling if there are ext target devices.
4364 * Event-based change notification is unreliable for those.
4366 if (!h->discovery_polling) {
4367 if (tmpdevice->external) {
4368 h->discovery_polling = 1;
4369 dev_info(&h->pdev->dev,
4370 "External target, activate discovery polling.\n");
4375 *this_device = *tmpdevice;
4376 this_device->physical_device = physical_device;
4379 * Expose all devices except for physical devices that
4382 if (MASKED_DEVICE(lunaddrbytes) && this_device->physical_device)
4383 this_device->expose_device = 0;
4385 this_device->expose_device = 1;
4389 * Get the SAS address for physical devices that are exposed.
4391 if (this_device->physical_device && this_device->expose_device)
4392 hpsa_get_sas_address(h, lunaddrbytes, this_device);
4394 switch (this_device->devtype) {
4396 /* We don't *really* support actual CD-ROM devices,
4397 * just "One Button Disaster Recovery" tape drive
4398 * which temporarily pretends to be a CD-ROM drive.
4399 * So we check that the device is really an OBDR tape
4400 * device by checking for "$DR-10" in bytes 43-48 of
4408 if (this_device->physical_device) {
4409 /* The disk is in HBA mode. */
4410 /* Never use RAID mapper in HBA mode. */
4411 this_device->offload_enabled = 0;
4412 hpsa_get_ioaccel_drive_info(h, this_device,
4413 physdev_list, phys_dev_index, id_phys);
4414 hpsa_get_path_info(this_device,
4415 physdev_list, phys_dev_index, id_phys);
4420 case TYPE_MEDIUM_CHANGER:
4423 case TYPE_ENCLOSURE:
4424 if (!this_device->external)
4425 hpsa_get_enclosure_info(h, lunaddrbytes,
4426 physdev_list, phys_dev_index,
4431 /* Only present the Smartarray HBA as a RAID controller.
4432 * If it's a RAID controller other than the HBA itself
4433 * (an external RAID controller, MSA500 or similar)
4436 if (!is_hba_lunid(lunaddrbytes))
4443 if (ncurrent >= HPSA_MAX_DEVICES)
4447 if (h->sas_host == NULL) {
4450 rc = hpsa_add_sas_host(h);
4452 dev_warn(&h->pdev->dev,
4453 "Could not add sas host %d\n", rc);
4458 adjust_hpsa_scsi_table(h, currentsd, ncurrent);
4461 for (i = 0; i < ndev_allocated; i++)
4462 kfree(currentsd[i]);
4464 kfree(physdev_list);
4470 static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
4471 struct scatterlist *sg)
4473 u64 addr64 = (u64) sg_dma_address(sg);
4474 unsigned int len = sg_dma_len(sg);
4476 desc->Addr = cpu_to_le64(addr64);
4477 desc->Len = cpu_to_le32(len);
4482 * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
4483 * dma mapping and fills in the scatter gather entries of the
4486 static int hpsa_scatter_gather(struct ctlr_info *h,
4487 struct CommandList *cp,
4488 struct scsi_cmnd *cmd)
4490 struct scatterlist *sg;
4491 int use_sg, i, sg_limit, chained, last_sg;
4492 struct SGDescriptor *curr_sg;
4494 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4496 use_sg = scsi_dma_map(cmd);
4501 goto sglist_finished;
4504 * If the number of entries is greater than the max for a single list,
4505 * then we have a chained list; we will set up all but one entry in the
4506 * first list (the last entry is saved for link information);
4507 * otherwise, we don't have a chained list and we'll set up at each of
4508 * the entries in the one list.
4511 chained = use_sg > h->max_cmd_sg_entries;
4512 sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg;
4513 last_sg = scsi_sg_count(cmd) - 1;
4514 scsi_for_each_sg(cmd, sg, sg_limit, i) {
4515 hpsa_set_sg_descriptor(curr_sg, sg);
4521 * Continue with the chained list. Set curr_sg to the chained
4522 * list. Modify the limit to the total count less the entries
4523 * we've already set up. Resume the scan at the list entry
4524 * where the previous loop left off.
4526 curr_sg = h->cmd_sg_list[cp->cmdindex];
4527 sg_limit = use_sg - sg_limit;
4528 for_each_sg(sg, sg, sg_limit, i) {
4529 hpsa_set_sg_descriptor(curr_sg, sg);
4534 /* Back the pointer up to the last entry and mark it as "last". */
4535 (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST);
4537 if (use_sg + chained > h->maxSG)
4538 h->maxSG = use_sg + chained;
4541 cp->Header.SGList = h->max_cmd_sg_entries;
4542 cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
4543 if (hpsa_map_sg_chain_block(h, cp)) {
4544 scsi_dma_unmap(cmd);
4552 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
4553 cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
4557 #define IO_ACCEL_INELIGIBLE (1)
4558 static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
4564 /* Perform some CDB fixups if needed using 10 byte reads/writes only */
4571 if (*cdb_len == 6) {
4572 block = (((cdb[1] & 0x1F) << 16) |
4579 BUG_ON(*cdb_len != 12);
4580 block = get_unaligned_be32(&cdb[2]);
4581 block_cnt = get_unaligned_be32(&cdb[6]);
4583 if (block_cnt > 0xffff)
4584 return IO_ACCEL_INELIGIBLE;
4586 cdb[0] = is_write ? WRITE_10 : READ_10;
4588 cdb[2] = (u8) (block >> 24);
4589 cdb[3] = (u8) (block >> 16);
4590 cdb[4] = (u8) (block >> 8);
4591 cdb[5] = (u8) (block);
4593 cdb[7] = (u8) (block_cnt >> 8);
4594 cdb[8] = (u8) (block_cnt);
4602 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
4603 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4604 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4606 struct scsi_cmnd *cmd = c->scsi_cmd;
4607 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
4609 unsigned int total_len = 0;
4610 struct scatterlist *sg;
4613 struct SGDescriptor *curr_sg;
4614 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
4616 /* TODO: implement chaining support */
4617 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
4618 atomic_dec(&phys_disk->ioaccel_cmds_out);
4619 return IO_ACCEL_INELIGIBLE;
4622 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
4624 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4625 atomic_dec(&phys_disk->ioaccel_cmds_out);
4626 return IO_ACCEL_INELIGIBLE;
4629 c->cmd_type = CMD_IOACCEL1;
4631 /* Adjust the DMA address to point to the accelerated command buffer */
4632 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
4633 (c->cmdindex * sizeof(*cp));
4634 BUG_ON(c->busaddr & 0x0000007F);
4636 use_sg = scsi_dma_map(cmd);
4638 atomic_dec(&phys_disk->ioaccel_cmds_out);
4644 scsi_for_each_sg(cmd, sg, use_sg, i) {
4645 addr64 = (u64) sg_dma_address(sg);
4646 len = sg_dma_len(sg);
4648 curr_sg->Addr = cpu_to_le64(addr64);
4649 curr_sg->Len = cpu_to_le32(len);
4650 curr_sg->Ext = cpu_to_le32(0);
4653 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
4655 switch (cmd->sc_data_direction) {
4657 control |= IOACCEL1_CONTROL_DATA_OUT;
4659 case DMA_FROM_DEVICE:
4660 control |= IOACCEL1_CONTROL_DATA_IN;
4663 control |= IOACCEL1_CONTROL_NODATAXFER;
4666 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4667 cmd->sc_data_direction);
4672 control |= IOACCEL1_CONTROL_NODATAXFER;
4675 c->Header.SGList = use_sg;
4676 /* Fill out the command structure to submit */
4677 cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
4678 cp->transfer_len = cpu_to_le32(total_len);
4679 cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
4680 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
4681 cp->control = cpu_to_le32(control);
4682 memcpy(cp->CDB, cdb, cdb_len);
4683 memcpy(cp->CISS_LUN, scsi3addr, 8);
4684 /* Tag was already set at init time. */
4685 enqueue_cmd_and_start_io(h, c);
4690 * Queue a command directly to a device behind the controller using the
4691 * I/O accelerator path.
4693 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
4694 struct CommandList *c)
4696 struct scsi_cmnd *cmd = c->scsi_cmd;
4697 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4704 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
4705 cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
4709 * Set encryption parameters for the ioaccel2 request
4711 static void set_encrypt_ioaccel2(struct ctlr_info *h,
4712 struct CommandList *c, struct io_accel2_cmd *cp)
4714 struct scsi_cmnd *cmd = c->scsi_cmd;
4715 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4716 struct raid_map_data *map = &dev->raid_map;
4719 /* Are we doing encryption on this device */
4720 if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
4722 /* Set the data encryption key index. */
4723 cp->dekindex = map->dekindex;
4725 /* Set the encryption enable flag, encoded into direction field. */
4726 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
4728 /* Set encryption tweak values based on logical block address
4729 * If block size is 512, tweak value is LBA.
4730 * For other block sizes, tweak is (LBA * block size)/ 512)
4732 switch (cmd->cmnd[0]) {
4733 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
4736 first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
4737 (cmd->cmnd[2] << 8) |
4742 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
4745 first_block = get_unaligned_be32(&cmd->cmnd[2]);
4749 first_block = get_unaligned_be64(&cmd->cmnd[2]);
4752 dev_err(&h->pdev->dev,
4753 "ERROR: %s: size (0x%x) not supported for encryption\n",
4754 __func__, cmd->cmnd[0]);
4759 if (le32_to_cpu(map->volume_blk_size) != 512)
4760 first_block = first_block *
4761 le32_to_cpu(map->volume_blk_size)/512;
4763 cp->tweak_lower = cpu_to_le32(first_block);
4764 cp->tweak_upper = cpu_to_le32(first_block >> 32);
4767 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
4768 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4769 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4771 struct scsi_cmnd *cmd = c->scsi_cmd;
4772 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
4773 struct ioaccel2_sg_element *curr_sg;
4775 struct scatterlist *sg;
4783 if (!cmd->device->hostdata)
4786 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4788 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4789 atomic_dec(&phys_disk->ioaccel_cmds_out);
4790 return IO_ACCEL_INELIGIBLE;
4793 c->cmd_type = CMD_IOACCEL2;
4794 /* Adjust the DMA address to point to the accelerated command buffer */
4795 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
4796 (c->cmdindex * sizeof(*cp));
4797 BUG_ON(c->busaddr & 0x0000007F);
4799 memset(cp, 0, sizeof(*cp));
4800 cp->IU_type = IOACCEL2_IU_TYPE;
4802 use_sg = scsi_dma_map(cmd);
4804 atomic_dec(&phys_disk->ioaccel_cmds_out);
4810 if (use_sg > h->ioaccel_maxsg) {
4811 addr64 = le64_to_cpu(
4812 h->ioaccel2_cmd_sg_list[c->cmdindex]->address);
4813 curr_sg->address = cpu_to_le64(addr64);
4814 curr_sg->length = 0;
4815 curr_sg->reserved[0] = 0;
4816 curr_sg->reserved[1] = 0;
4817 curr_sg->reserved[2] = 0;
4818 curr_sg->chain_indicator = 0x80;
4820 curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
4822 scsi_for_each_sg(cmd, sg, use_sg, i) {
4823 addr64 = (u64) sg_dma_address(sg);
4824 len = sg_dma_len(sg);
4826 curr_sg->address = cpu_to_le64(addr64);
4827 curr_sg->length = cpu_to_le32(len);
4828 curr_sg->reserved[0] = 0;
4829 curr_sg->reserved[1] = 0;
4830 curr_sg->reserved[2] = 0;
4831 curr_sg->chain_indicator = 0;
4835 switch (cmd->sc_data_direction) {
4837 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4838 cp->direction |= IOACCEL2_DIR_DATA_OUT;
4840 case DMA_FROM_DEVICE:
4841 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4842 cp->direction |= IOACCEL2_DIR_DATA_IN;
4845 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4846 cp->direction |= IOACCEL2_DIR_NO_DATA;
4849 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4850 cmd->sc_data_direction);
4855 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4856 cp->direction |= IOACCEL2_DIR_NO_DATA;
4859 /* Set encryption parameters, if necessary */
4860 set_encrypt_ioaccel2(h, c, cp);
4862 cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
4863 cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
4864 memcpy(cp->cdb, cdb, sizeof(cp->cdb));
4866 cp->data_len = cpu_to_le32(total_len);
4867 cp->err_ptr = cpu_to_le64(c->busaddr +
4868 offsetof(struct io_accel2_cmd, error_data));
4869 cp->err_len = cpu_to_le32(sizeof(cp->error_data));
4871 /* fill in sg elements */
4872 if (use_sg > h->ioaccel_maxsg) {
4874 cp->sg[0].length = cpu_to_le32(use_sg * sizeof(cp->sg[0]));
4875 if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) {
4876 atomic_dec(&phys_disk->ioaccel_cmds_out);
4877 scsi_dma_unmap(cmd);
4881 cp->sg_count = (u8) use_sg;
4883 enqueue_cmd_and_start_io(h, c);
4888 * Queue a command to the correct I/O accelerator path.
4890 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
4891 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4892 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4894 if (!c->scsi_cmd->device)
4897 if (!c->scsi_cmd->device->hostdata)
4900 /* Try to honor the device's queue depth */
4901 if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
4902 phys_disk->queue_depth) {
4903 atomic_dec(&phys_disk->ioaccel_cmds_out);
4904 return IO_ACCEL_INELIGIBLE;
4906 if (h->transMethod & CFGTBL_Trans_io_accel1)
4907 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
4908 cdb, cdb_len, scsi3addr,
4911 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
4912 cdb, cdb_len, scsi3addr,
4916 static void raid_map_helper(struct raid_map_data *map,
4917 int offload_to_mirror, u32 *map_index, u32 *current_group)
4919 if (offload_to_mirror == 0) {
4920 /* use physical disk in the first mirrored group. */
4921 *map_index %= le16_to_cpu(map->data_disks_per_row);
4925 /* determine mirror group that *map_index indicates */
4926 *current_group = *map_index /
4927 le16_to_cpu(map->data_disks_per_row);
4928 if (offload_to_mirror == *current_group)
4930 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
4931 /* select map index from next group */
4932 *map_index += le16_to_cpu(map->data_disks_per_row);
4935 /* select map index from first group */
4936 *map_index %= le16_to_cpu(map->data_disks_per_row);
4939 } while (offload_to_mirror != *current_group);
4943 * Attempt to perform offload RAID mapping for a logical volume I/O.
4945 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
4946 struct CommandList *c)
4948 struct scsi_cmnd *cmd = c->scsi_cmd;
4949 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4950 struct raid_map_data *map = &dev->raid_map;
4951 struct raid_map_disk_data *dd = &map->data[0];
4954 u64 first_block, last_block;
4957 u64 first_row, last_row;
4958 u32 first_row_offset, last_row_offset;
4959 u32 first_column, last_column;
4960 u64 r0_first_row, r0_last_row;
4961 u32 r5or6_blocks_per_row;
4962 u64 r5or6_first_row, r5or6_last_row;
4963 u32 r5or6_first_row_offset, r5or6_last_row_offset;
4964 u32 r5or6_first_column, r5or6_last_column;
4965 u32 total_disks_per_row;
4967 u32 first_group, last_group, current_group;
4975 #if BITS_PER_LONG == 32
4978 int offload_to_mirror;
4983 /* check for valid opcode, get LBA and block count */
4984 switch (cmd->cmnd[0]) {
4988 first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
4989 (cmd->cmnd[2] << 8) |
4991 block_cnt = cmd->cmnd[4];
4999 (((u64) cmd->cmnd[2]) << 24) |
5000 (((u64) cmd->cmnd[3]) << 16) |
5001 (((u64) cmd->cmnd[4]) << 8) |
5004 (((u32) cmd->cmnd[7]) << 8) |
5011 (((u64) cmd->cmnd[2]) << 24) |
5012 (((u64) cmd->cmnd[3]) << 16) |
5013 (((u64) cmd->cmnd[4]) << 8) |
5016 (((u32) cmd->cmnd[6]) << 24) |
5017 (((u32) cmd->cmnd[7]) << 16) |
5018 (((u32) cmd->cmnd[8]) << 8) |
5025 (((u64) cmd->cmnd[2]) << 56) |
5026 (((u64) cmd->cmnd[3]) << 48) |
5027 (((u64) cmd->cmnd[4]) << 40) |
5028 (((u64) cmd->cmnd[5]) << 32) |
5029 (((u64) cmd->cmnd[6]) << 24) |
5030 (((u64) cmd->cmnd[7]) << 16) |
5031 (((u64) cmd->cmnd[8]) << 8) |
5034 (((u32) cmd->cmnd[10]) << 24) |
5035 (((u32) cmd->cmnd[11]) << 16) |
5036 (((u32) cmd->cmnd[12]) << 8) |
5040 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
5042 last_block = first_block + block_cnt - 1;
5044 /* check for write to non-RAID-0 */
5045 if (is_write && dev->raid_level != 0)
5046 return IO_ACCEL_INELIGIBLE;
5048 /* check for invalid block or wraparound */
5049 if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
5050 last_block < first_block)
5051 return IO_ACCEL_INELIGIBLE;
5053 /* calculate stripe information for the request */
5054 blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
5055 le16_to_cpu(map->strip_size);
5056 strip_size = le16_to_cpu(map->strip_size);
5057 #if BITS_PER_LONG == 32
5058 tmpdiv = first_block;
5059 (void) do_div(tmpdiv, blocks_per_row);
5061 tmpdiv = last_block;
5062 (void) do_div(tmpdiv, blocks_per_row);
5064 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
5065 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
5066 tmpdiv = first_row_offset;
5067 (void) do_div(tmpdiv, strip_size);
5068 first_column = tmpdiv;
5069 tmpdiv = last_row_offset;
5070 (void) do_div(tmpdiv, strip_size);
5071 last_column = tmpdiv;
5073 first_row = first_block / blocks_per_row;
5074 last_row = last_block / blocks_per_row;
5075 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
5076 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
5077 first_column = first_row_offset / strip_size;
5078 last_column = last_row_offset / strip_size;
5081 /* if this isn't a single row/column then give to the controller */
5082 if ((first_row != last_row) || (first_column != last_column))
5083 return IO_ACCEL_INELIGIBLE;
5085 /* proceeding with driver mapping */
5086 total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
5087 le16_to_cpu(map->metadata_disks_per_row);
5088 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
5089 le16_to_cpu(map->row_cnt);
5090 map_index = (map_row * total_disks_per_row) + first_column;
5092 switch (dev->raid_level) {
5094 break; /* nothing special to do */
5096 /* Handles load balance across RAID 1 members.
5097 * (2-drive R1 and R10 with even # of drives.)
5098 * Appropriate for SSDs, not optimal for HDDs
5100 BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
5101 if (dev->offload_to_mirror)
5102 map_index += le16_to_cpu(map->data_disks_per_row);
5103 dev->offload_to_mirror = !dev->offload_to_mirror;
5106 /* Handles N-way mirrors (R1-ADM)
5107 * and R10 with # of drives divisible by 3.)
5109 BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
5111 offload_to_mirror = dev->offload_to_mirror;
5112 raid_map_helper(map, offload_to_mirror,
5113 &map_index, ¤t_group);
5114 /* set mirror group to use next time */
5116 (offload_to_mirror >=
5117 le16_to_cpu(map->layout_map_count) - 1)
5118 ? 0 : offload_to_mirror + 1;
5119 dev->offload_to_mirror = offload_to_mirror;
5120 /* Avoid direct use of dev->offload_to_mirror within this
5121 * function since multiple threads might simultaneously
5122 * increment it beyond the range of dev->layout_map_count -1.
5127 if (le16_to_cpu(map->layout_map_count) <= 1)
5130 /* Verify first and last block are in same RAID group */
5131 r5or6_blocks_per_row =
5132 le16_to_cpu(map->strip_size) *
5133 le16_to_cpu(map->data_disks_per_row);
5134 BUG_ON(r5or6_blocks_per_row == 0);
5135 stripesize = r5or6_blocks_per_row *
5136 le16_to_cpu(map->layout_map_count);
5137 #if BITS_PER_LONG == 32
5138 tmpdiv = first_block;
5139 first_group = do_div(tmpdiv, stripesize);
5140 tmpdiv = first_group;
5141 (void) do_div(tmpdiv, r5or6_blocks_per_row);
5142 first_group = tmpdiv;
5143 tmpdiv = last_block;
5144 last_group = do_div(tmpdiv, stripesize);
5145 tmpdiv = last_group;
5146 (void) do_div(tmpdiv, r5or6_blocks_per_row);
5147 last_group = tmpdiv;
5149 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
5150 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
5152 if (first_group != last_group)
5153 return IO_ACCEL_INELIGIBLE;
5155 /* Verify request is in a single row of RAID 5/6 */
5156 #if BITS_PER_LONG == 32
5157 tmpdiv = first_block;
5158 (void) do_div(tmpdiv, stripesize);
5159 first_row = r5or6_first_row = r0_first_row = tmpdiv;
5160 tmpdiv = last_block;
5161 (void) do_div(tmpdiv, stripesize);
5162 r5or6_last_row = r0_last_row = tmpdiv;
5164 first_row = r5or6_first_row = r0_first_row =
5165 first_block / stripesize;
5166 r5or6_last_row = r0_last_row = last_block / stripesize;
5168 if (r5or6_first_row != r5or6_last_row)
5169 return IO_ACCEL_INELIGIBLE;
5172 /* Verify request is in a single column */
5173 #if BITS_PER_LONG == 32
5174 tmpdiv = first_block;
5175 first_row_offset = do_div(tmpdiv, stripesize);
5176 tmpdiv = first_row_offset;
5177 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
5178 r5or6_first_row_offset = first_row_offset;
5179 tmpdiv = last_block;
5180 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
5181 tmpdiv = r5or6_last_row_offset;
5182 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
5183 tmpdiv = r5or6_first_row_offset;
5184 (void) do_div(tmpdiv, map->strip_size);
5185 first_column = r5or6_first_column = tmpdiv;
5186 tmpdiv = r5or6_last_row_offset;
5187 (void) do_div(tmpdiv, map->strip_size);
5188 r5or6_last_column = tmpdiv;
5190 first_row_offset = r5or6_first_row_offset =
5191 (u32)((first_block % stripesize) %
5192 r5or6_blocks_per_row);
5194 r5or6_last_row_offset =
5195 (u32)((last_block % stripesize) %
5196 r5or6_blocks_per_row);
5198 first_column = r5or6_first_column =
5199 r5or6_first_row_offset / le16_to_cpu(map->strip_size);
5201 r5or6_last_row_offset / le16_to_cpu(map->strip_size);
5203 if (r5or6_first_column != r5or6_last_column)
5204 return IO_ACCEL_INELIGIBLE;
5206 /* Request is eligible */
5207 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
5208 le16_to_cpu(map->row_cnt);
5210 map_index = (first_group *
5211 (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
5212 (map_row * total_disks_per_row) + first_column;
5215 return IO_ACCEL_INELIGIBLE;
5218 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
5219 return IO_ACCEL_INELIGIBLE;
5221 c->phys_disk = dev->phys_disk[map_index];
5223 return IO_ACCEL_INELIGIBLE;
5225 disk_handle = dd[map_index].ioaccel_handle;
5226 disk_block = le64_to_cpu(map->disk_starting_blk) +
5227 first_row * le16_to_cpu(map->strip_size) +
5228 (first_row_offset - first_column *
5229 le16_to_cpu(map->strip_size));
5230 disk_block_cnt = block_cnt;
5232 /* handle differing logical/physical block sizes */
5233 if (map->phys_blk_shift) {
5234 disk_block <<= map->phys_blk_shift;
5235 disk_block_cnt <<= map->phys_blk_shift;
5237 BUG_ON(disk_block_cnt > 0xffff);
5239 /* build the new CDB for the physical disk I/O */
5240 if (disk_block > 0xffffffff) {
5241 cdb[0] = is_write ? WRITE_16 : READ_16;
5243 cdb[2] = (u8) (disk_block >> 56);
5244 cdb[3] = (u8) (disk_block >> 48);
5245 cdb[4] = (u8) (disk_block >> 40);
5246 cdb[5] = (u8) (disk_block >> 32);
5247 cdb[6] = (u8) (disk_block >> 24);
5248 cdb[7] = (u8) (disk_block >> 16);
5249 cdb[8] = (u8) (disk_block >> 8);
5250 cdb[9] = (u8) (disk_block);
5251 cdb[10] = (u8) (disk_block_cnt >> 24);
5252 cdb[11] = (u8) (disk_block_cnt >> 16);
5253 cdb[12] = (u8) (disk_block_cnt >> 8);
5254 cdb[13] = (u8) (disk_block_cnt);
5259 cdb[0] = is_write ? WRITE_10 : READ_10;
5261 cdb[2] = (u8) (disk_block >> 24);
5262 cdb[3] = (u8) (disk_block >> 16);
5263 cdb[4] = (u8) (disk_block >> 8);
5264 cdb[5] = (u8) (disk_block);
5266 cdb[7] = (u8) (disk_block_cnt >> 8);
5267 cdb[8] = (u8) (disk_block_cnt);
5271 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
5273 dev->phys_disk[map_index]);
5277 * Submit commands down the "normal" RAID stack path
5278 * All callers to hpsa_ciss_submit must check lockup_detected
5279 * beforehand, before (opt.) and after calling cmd_alloc
5281 static int hpsa_ciss_submit(struct ctlr_info *h,
5282 struct CommandList *c, struct scsi_cmnd *cmd,
5283 unsigned char scsi3addr[])
5285 cmd->host_scribble = (unsigned char *) c;
5286 c->cmd_type = CMD_SCSI;
5288 c->Header.ReplyQueue = 0; /* unused in simple mode */
5289 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
5290 c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
5292 /* Fill in the request block... */
5294 c->Request.Timeout = 0;
5295 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
5296 c->Request.CDBLen = cmd->cmd_len;
5297 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
5298 switch (cmd->sc_data_direction) {
5300 c->Request.type_attr_dir =
5301 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
5303 case DMA_FROM_DEVICE:
5304 c->Request.type_attr_dir =
5305 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
5308 c->Request.type_attr_dir =
5309 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
5311 case DMA_BIDIRECTIONAL:
5312 /* This can happen if a buggy application does a scsi passthru
5313 * and sets both inlen and outlen to non-zero. ( see
5314 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
5317 c->Request.type_attr_dir =
5318 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
5319 /* This is technically wrong, and hpsa controllers should
5320 * reject it with CMD_INVALID, which is the most correct
5321 * response, but non-fibre backends appear to let it
5322 * slide by, and give the same results as if this field
5323 * were set correctly. Either way is acceptable for
5324 * our purposes here.
5330 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
5331 cmd->sc_data_direction);
5336 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
5337 hpsa_cmd_resolve_and_free(h, c);
5338 return SCSI_MLQUEUE_HOST_BUSY;
5340 enqueue_cmd_and_start_io(h, c);
5341 /* the cmd'll come back via intr handler in complete_scsi_command() */
5345 static void hpsa_cmd_init(struct ctlr_info *h, int index,
5346 struct CommandList *c)
5348 dma_addr_t cmd_dma_handle, err_dma_handle;
5350 /* Zero out all of commandlist except the last field, refcount */
5351 memset(c, 0, offsetof(struct CommandList, refcount));
5352 c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT));
5353 cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
5354 c->err_info = h->errinfo_pool + index;
5355 memset(c->err_info, 0, sizeof(*c->err_info));
5356 err_dma_handle = h->errinfo_pool_dhandle
5357 + index * sizeof(*c->err_info);
5358 c->cmdindex = index;
5359 c->busaddr = (u32) cmd_dma_handle;
5360 c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
5361 c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
5363 c->scsi_cmd = SCSI_CMD_IDLE;
5366 static void hpsa_preinitialize_commands(struct ctlr_info *h)
5370 for (i = 0; i < h->nr_cmds; i++) {
5371 struct CommandList *c = h->cmd_pool + i;
5373 hpsa_cmd_init(h, i, c);
5374 atomic_set(&c->refcount, 0);
5378 static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
5379 struct CommandList *c)
5381 dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
5383 BUG_ON(c->cmdindex != index);
5385 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
5386 memset(c->err_info, 0, sizeof(*c->err_info));
5387 c->busaddr = (u32) cmd_dma_handle;
5390 static int hpsa_ioaccel_submit(struct ctlr_info *h,
5391 struct CommandList *c, struct scsi_cmnd *cmd,
5392 unsigned char *scsi3addr)
5394 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
5395 int rc = IO_ACCEL_INELIGIBLE;
5398 return SCSI_MLQUEUE_HOST_BUSY;
5400 cmd->host_scribble = (unsigned char *) c;
5402 if (dev->offload_enabled) {
5403 hpsa_cmd_init(h, c->cmdindex, c);
5404 c->cmd_type = CMD_SCSI;
5406 rc = hpsa_scsi_ioaccel_raid_map(h, c);
5407 if (rc < 0) /* scsi_dma_map failed. */
5408 rc = SCSI_MLQUEUE_HOST_BUSY;
5409 } else if (dev->hba_ioaccel_enabled) {
5410 hpsa_cmd_init(h, c->cmdindex, c);
5411 c->cmd_type = CMD_SCSI;
5413 rc = hpsa_scsi_ioaccel_direct_map(h, c);
5414 if (rc < 0) /* scsi_dma_map failed. */
5415 rc = SCSI_MLQUEUE_HOST_BUSY;
5420 static void hpsa_command_resubmit_worker(struct work_struct *work)
5422 struct scsi_cmnd *cmd;
5423 struct hpsa_scsi_dev_t *dev;
5424 struct CommandList *c = container_of(work, struct CommandList, work);
5427 dev = cmd->device->hostdata;
5429 cmd->result = DID_NO_CONNECT << 16;
5430 return hpsa_cmd_free_and_done(c->h, c, cmd);
5432 if (c->reset_pending)
5433 return hpsa_cmd_resolve_and_free(c->h, c);
5434 if (c->abort_pending)
5435 return hpsa_cmd_abort_and_free(c->h, c, cmd);
5436 if (c->cmd_type == CMD_IOACCEL2) {
5437 struct ctlr_info *h = c->h;
5438 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5441 if (c2->error_data.serv_response ==
5442 IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
5443 rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr);
5446 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
5448 * If we get here, it means dma mapping failed.
5449 * Try again via scsi mid layer, which will
5450 * then get SCSI_MLQUEUE_HOST_BUSY.
5452 cmd->result = DID_IMM_RETRY << 16;
5453 return hpsa_cmd_free_and_done(h, c, cmd);
5455 /* else, fall thru and resubmit down CISS path */
5458 hpsa_cmd_partial_init(c->h, c->cmdindex, c);
5459 if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) {
5461 * If we get here, it means dma mapping failed. Try
5462 * again via scsi mid layer, which will then get
5463 * SCSI_MLQUEUE_HOST_BUSY.
5465 * hpsa_ciss_submit will have already freed c
5466 * if it encountered a dma mapping failure.
5468 cmd->result = DID_IMM_RETRY << 16;
5469 cmd->scsi_done(cmd);
5473 /* Running in struct Scsi_Host->host_lock less mode */
5474 static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
5476 struct ctlr_info *h;
5477 struct hpsa_scsi_dev_t *dev;
5478 unsigned char scsi3addr[8];
5479 struct CommandList *c;
5482 /* Get the ptr to our adapter structure out of cmd->host. */
5483 h = sdev_to_hba(cmd->device);
5485 BUG_ON(cmd->request->tag < 0);
5487 dev = cmd->device->hostdata;
5489 cmd->result = NOT_READY << 16; /* host byte */
5490 cmd->scsi_done(cmd);
5495 cmd->result = DID_NO_CONNECT << 16;
5496 cmd->scsi_done(cmd);
5500 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
5502 if (unlikely(lockup_detected(h))) {
5503 cmd->result = DID_NO_CONNECT << 16;
5504 cmd->scsi_done(cmd);
5507 c = cmd_tagged_alloc(h, cmd);
5510 * Call alternate submit routine for I/O accelerated commands.
5511 * Retries always go down the normal I/O path.
5513 if (likely(cmd->retries == 0 &&
5514 cmd->request->cmd_type == REQ_TYPE_FS &&
5515 h->acciopath_status)) {
5516 rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr);
5519 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
5520 hpsa_cmd_resolve_and_free(h, c);
5521 return SCSI_MLQUEUE_HOST_BUSY;
5524 return hpsa_ciss_submit(h, c, cmd, scsi3addr);
5527 static void hpsa_scan_complete(struct ctlr_info *h)
5529 unsigned long flags;
5531 spin_lock_irqsave(&h->scan_lock, flags);
5532 h->scan_finished = 1;
5533 wake_up(&h->scan_wait_queue);
5534 spin_unlock_irqrestore(&h->scan_lock, flags);
5537 static void hpsa_scan_start(struct Scsi_Host *sh)
5539 struct ctlr_info *h = shost_to_hba(sh);
5540 unsigned long flags;
5543 * Don't let rescans be initiated on a controller known to be locked
5544 * up. If the controller locks up *during* a rescan, that thread is
5545 * probably hosed, but at least we can prevent new rescan threads from
5546 * piling up on a locked up controller.
5548 if (unlikely(lockup_detected(h)))
5549 return hpsa_scan_complete(h);
5552 * If a scan is already waiting to run, no need to add another
5554 spin_lock_irqsave(&h->scan_lock, flags);
5555 if (h->scan_waiting) {
5556 spin_unlock_irqrestore(&h->scan_lock, flags);
5560 spin_unlock_irqrestore(&h->scan_lock, flags);
5562 /* wait until any scan already in progress is finished. */
5564 spin_lock_irqsave(&h->scan_lock, flags);
5565 if (h->scan_finished)
5567 h->scan_waiting = 1;
5568 spin_unlock_irqrestore(&h->scan_lock, flags);
5569 wait_event(h->scan_wait_queue, h->scan_finished);
5570 /* Note: We don't need to worry about a race between this
5571 * thread and driver unload because the midlayer will
5572 * have incremented the reference count, so unload won't
5573 * happen if we're in here.
5576 h->scan_finished = 0; /* mark scan as in progress */
5577 h->scan_waiting = 0;
5578 spin_unlock_irqrestore(&h->scan_lock, flags);
5580 if (unlikely(lockup_detected(h)))
5581 return hpsa_scan_complete(h);
5583 hpsa_update_scsi_devices(h);
5585 hpsa_scan_complete(h);
5588 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
5590 struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
5597 else if (qdepth > logical_drive->queue_depth)
5598 qdepth = logical_drive->queue_depth;
5600 return scsi_change_queue_depth(sdev, qdepth);
5603 static int hpsa_scan_finished(struct Scsi_Host *sh,
5604 unsigned long elapsed_time)
5606 struct ctlr_info *h = shost_to_hba(sh);
5607 unsigned long flags;
5610 spin_lock_irqsave(&h->scan_lock, flags);
5611 finished = h->scan_finished;
5612 spin_unlock_irqrestore(&h->scan_lock, flags);
5616 static int hpsa_scsi_host_alloc(struct ctlr_info *h)
5618 struct Scsi_Host *sh;
5620 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
5622 dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
5629 sh->max_channel = 3;
5630 sh->max_cmd_len = MAX_COMMAND_SIZE;
5631 sh->max_lun = HPSA_MAX_LUN;
5632 sh->max_id = HPSA_MAX_LUN;
5633 sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS;
5634 sh->cmd_per_lun = sh->can_queue;
5635 sh->sg_tablesize = h->maxsgentries;
5636 sh->transportt = hpsa_sas_transport_template;
5637 sh->hostdata[0] = (unsigned long) h;
5638 sh->irq = h->intr[h->intr_mode];
5639 sh->unique_id = sh->irq;
5645 static int hpsa_scsi_add_host(struct ctlr_info *h)
5649 rv = scsi_add_host(h->scsi_host, &h->pdev->dev);
5651 dev_err(&h->pdev->dev, "scsi_add_host failed\n");
5654 scsi_scan_host(h->scsi_host);
5659 * The block layer has already gone to the trouble of picking out a unique,
5660 * small-integer tag for this request. We use an offset from that value as
5661 * an index to select our command block. (The offset allows us to reserve the
5662 * low-numbered entries for our own uses.)
5664 static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
5666 int idx = scmd->request->tag;
5671 /* Offset to leave space for internal cmds. */
5672 return idx += HPSA_NRESERVED_CMDS;
5676 * Send a TEST_UNIT_READY command to the specified LUN using the specified
5677 * reply queue; returns zero if the unit is ready, and non-zero otherwise.
5679 static int hpsa_send_test_unit_ready(struct ctlr_info *h,
5680 struct CommandList *c, unsigned char lunaddr[],
5685 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
5686 (void) fill_cmd(c, TEST_UNIT_READY, h,
5687 NULL, 0, 0, lunaddr, TYPE_CMD);
5688 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
5691 /* no unmap needed here because no data xfer. */
5693 /* Check if the unit is already ready. */
5694 if (c->err_info->CommandStatus == CMD_SUCCESS)
5698 * The first command sent after reset will receive "unit attention" to
5699 * indicate that the LUN has been reset...this is actually what we're
5700 * looking for (but, success is good too).
5702 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5703 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
5704 (c->err_info->SenseInfo[2] == NO_SENSE ||
5705 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
5712 * Wait for a TEST_UNIT_READY command to complete, retrying as necessary;
5713 * returns zero when the unit is ready, and non-zero when giving up.
5715 static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h,
5716 struct CommandList *c,
5717 unsigned char lunaddr[], int reply_queue)
5721 int waittime = 1; /* seconds */
5723 /* Send test unit ready until device ready, or give up. */
5724 for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) {
5727 * Wait for a bit. do this first, because if we send
5728 * the TUR right away, the reset will just abort it.
5730 msleep(1000 * waittime);
5732 rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue);
5736 /* Increase wait time with each try, up to a point. */
5737 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
5740 dev_warn(&h->pdev->dev,
5741 "waiting %d secs for device to become ready.\n",
5748 static int wait_for_device_to_become_ready(struct ctlr_info *h,
5749 unsigned char lunaddr[],
5756 struct CommandList *c;
5761 * If no specific reply queue was requested, then send the TUR
5762 * repeatedly, requesting a reply on each reply queue; otherwise execute
5763 * the loop exactly once using only the specified queue.
5765 if (reply_queue == DEFAULT_REPLY_QUEUE) {
5767 last_queue = h->nreply_queues - 1;
5769 first_queue = reply_queue;
5770 last_queue = reply_queue;
5773 for (rq = first_queue; rq <= last_queue; rq++) {
5774 rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq);
5780 dev_warn(&h->pdev->dev, "giving up on device.\n");
5782 dev_warn(&h->pdev->dev, "device is ready.\n");
5788 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
5789 * complaining. Doing a host- or bus-reset can't do anything good here.
5791 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
5794 struct ctlr_info *h;
5795 struct hpsa_scsi_dev_t *dev;
5799 /* find the controller to which the command to be aborted was sent */
5800 h = sdev_to_hba(scsicmd->device);
5801 if (h == NULL) /* paranoia */
5804 if (lockup_detected(h))
5807 dev = scsicmd->device->hostdata;
5809 dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__);
5813 /* if controller locked up, we can guarantee command won't complete */
5814 if (lockup_detected(h)) {
5815 snprintf(msg, sizeof(msg),
5816 "cmd %d RESET FAILED, lockup detected",
5817 hpsa_get_cmd_index(scsicmd));
5818 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5822 /* this reset request might be the result of a lockup; check */
5823 if (detect_controller_lockup(h)) {
5824 snprintf(msg, sizeof(msg),
5825 "cmd %d RESET FAILED, new lockup detected",
5826 hpsa_get_cmd_index(scsicmd));
5827 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5831 /* Do not attempt on controller */
5832 if (is_hba_lunid(dev->scsi3addr))
5835 if (is_logical_dev_addr_mode(dev->scsi3addr))
5836 reset_type = HPSA_DEVICE_RESET_MSG;
5838 reset_type = HPSA_PHYS_TARGET_RESET;
5840 sprintf(msg, "resetting %s",
5841 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ");
5842 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5844 h->reset_in_progress = 1;
5846 /* send a reset to the SCSI LUN which the command was sent to */
5847 rc = hpsa_do_reset(h, dev, dev->scsi3addr, reset_type,
5848 DEFAULT_REPLY_QUEUE);
5849 sprintf(msg, "reset %s %s",
5850 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ",
5851 rc == 0 ? "completed successfully" : "failed");
5852 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5853 h->reset_in_progress = 0;
5854 return rc == 0 ? SUCCESS : FAILED;
5857 static void swizzle_abort_tag(u8 *tag)
5861 memcpy(original_tag, tag, 8);
5862 tag[0] = original_tag[3];
5863 tag[1] = original_tag[2];
5864 tag[2] = original_tag[1];
5865 tag[3] = original_tag[0];
5866 tag[4] = original_tag[7];
5867 tag[5] = original_tag[6];
5868 tag[6] = original_tag[5];
5869 tag[7] = original_tag[4];
5872 static void hpsa_get_tag(struct ctlr_info *h,
5873 struct CommandList *c, __le32 *taglower, __le32 *tagupper)
5876 if (c->cmd_type == CMD_IOACCEL1) {
5877 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
5878 &h->ioaccel_cmd_pool[c->cmdindex];
5879 tag = le64_to_cpu(cm1->tag);
5880 *tagupper = cpu_to_le32(tag >> 32);
5881 *taglower = cpu_to_le32(tag);
5884 if (c->cmd_type == CMD_IOACCEL2) {
5885 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
5886 &h->ioaccel2_cmd_pool[c->cmdindex];
5887 /* upper tag not used in ioaccel2 mode */
5888 memset(tagupper, 0, sizeof(*tagupper));
5889 *taglower = cm2->Tag;
5892 tag = le64_to_cpu(c->Header.tag);
5893 *tagupper = cpu_to_le32(tag >> 32);
5894 *taglower = cpu_to_le32(tag);
5897 static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
5898 struct CommandList *abort, int reply_queue)
5901 struct CommandList *c;
5902 struct ErrorInfo *ei;
5903 __le32 tagupper, taglower;
5907 /* fill_cmd can't fail here, no buffer to map */
5908 (void) fill_cmd(c, HPSA_ABORT_MSG, h, &abort->Header.tag,
5909 0, 0, scsi3addr, TYPE_MSG);
5910 if (h->needs_abort_tags_swizzled)
5911 swizzle_abort_tag(&c->Request.CDB[4]);
5912 (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
5913 hpsa_get_tag(h, abort, &taglower, &tagupper);
5914 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n",
5915 __func__, tagupper, taglower);
5916 /* no unmap needed here because no data xfer. */
5919 switch (ei->CommandStatus) {
5922 case CMD_TMF_STATUS:
5923 rc = hpsa_evaluate_tmf_status(h, c);
5925 case CMD_UNABORTABLE: /* Very common, don't make noise. */
5929 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
5930 __func__, tagupper, taglower);
5931 hpsa_scsi_interpret_error(h, c);
5936 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
5937 __func__, tagupper, taglower);
5941 static void setup_ioaccel2_abort_cmd(struct CommandList *c, struct ctlr_info *h,
5942 struct CommandList *command_to_abort, int reply_queue)
5944 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5945 struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
5946 struct io_accel2_cmd *c2a =
5947 &h->ioaccel2_cmd_pool[command_to_abort->cmdindex];
5948 struct scsi_cmnd *scmd = command_to_abort->scsi_cmd;
5949 struct hpsa_scsi_dev_t *dev = scmd->device->hostdata;
5955 * We're overlaying struct hpsa_tmf_struct on top of something which
5956 * was allocated as a struct io_accel2_cmd, so we better be sure it
5957 * actually fits, and doesn't overrun the error info space.
5959 BUILD_BUG_ON(sizeof(struct hpsa_tmf_struct) >
5960 sizeof(struct io_accel2_cmd));
5961 BUG_ON(offsetof(struct io_accel2_cmd, error_data) <
5962 offsetof(struct hpsa_tmf_struct, error_len) +
5963 sizeof(ac->error_len));
5965 c->cmd_type = IOACCEL2_TMF;
5966 c->scsi_cmd = SCSI_CMD_BUSY;
5968 /* Adjust the DMA address to point to the accelerated command buffer */
5969 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
5970 (c->cmdindex * sizeof(struct io_accel2_cmd));
5971 BUG_ON(c->busaddr & 0x0000007F);
5973 memset(ac, 0, sizeof(*c2)); /* yes this is correct */
5974 ac->iu_type = IOACCEL2_IU_TMF_TYPE;
5975 ac->reply_queue = reply_queue;
5976 ac->tmf = IOACCEL2_TMF_ABORT;
5977 ac->it_nexus = cpu_to_le32(dev->ioaccel_handle);
5978 memset(ac->lun_id, 0, sizeof(ac->lun_id));
5979 ac->tag = cpu_to_le64(c->cmdindex << DIRECT_LOOKUP_SHIFT);
5980 ac->abort_tag = cpu_to_le64(le32_to_cpu(c2a->Tag));
5981 ac->error_ptr = cpu_to_le64(c->busaddr +
5982 offsetof(struct io_accel2_cmd, error_data));
5983 ac->error_len = cpu_to_le32(sizeof(c2->error_data));
5986 /* ioaccel2 path firmware cannot handle abort task requests.
5987 * Change abort requests to physical target reset, and send to the
5988 * address of the physical disk used for the ioaccel 2 command.
5989 * Return 0 on success (IO_OK)
5993 static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
5994 unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
5997 struct scsi_cmnd *scmd; /* scsi command within request being aborted */
5998 struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
5999 unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
6000 unsigned char *psa = &phys_scsi3addr[0];
6002 /* Get a pointer to the hpsa logical device. */
6003 scmd = abort->scsi_cmd;
6004 dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
6006 dev_warn(&h->pdev->dev,
6007 "Cannot abort: no device pointer for command.\n");
6008 return -1; /* not abortable */
6011 if (h->raid_offload_debug > 0)
6012 dev_info(&h->pdev->dev,
6013 "scsi %d:%d:%d:%d %s scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
6014 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
6016 scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
6017 scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
6019 if (!dev->offload_enabled) {
6020 dev_warn(&h->pdev->dev,
6021 "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
6022 return -1; /* not abortable */
6025 /* Incoming scsi3addr is logical addr. We need physical disk addr. */
6026 if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
6027 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
6028 return -1; /* not abortable */
6031 /* send the reset */
6032 if (h->raid_offload_debug > 0)
6033 dev_info(&h->pdev->dev,
6034 "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
6035 psa[0], psa[1], psa[2], psa[3],
6036 psa[4], psa[5], psa[6], psa[7]);
6037 rc = hpsa_do_reset(h, dev, psa, HPSA_PHYS_TARGET_RESET, reply_queue);
6039 dev_warn(&h->pdev->dev,
6040 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
6041 psa[0], psa[1], psa[2], psa[3],
6042 psa[4], psa[5], psa[6], psa[7]);
6043 return rc; /* failed to reset */
6046 /* wait for device to recover */
6047 if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) {
6048 dev_warn(&h->pdev->dev,
6049 "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
6050 psa[0], psa[1], psa[2], psa[3],
6051 psa[4], psa[5], psa[6], psa[7]);
6052 return -1; /* failed to recover */
6055 /* device recovered */
6056 dev_info(&h->pdev->dev,
6057 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
6058 psa[0], psa[1], psa[2], psa[3],
6059 psa[4], psa[5], psa[6], psa[7]);
6061 return rc; /* success */
6064 static int hpsa_send_abort_ioaccel2(struct ctlr_info *h,
6065 struct CommandList *abort, int reply_queue)
6068 struct CommandList *c;
6069 __le32 taglower, tagupper;
6070 struct hpsa_scsi_dev_t *dev;
6071 struct io_accel2_cmd *c2;
6073 dev = abort->scsi_cmd->device->hostdata;
6077 if (!dev->offload_enabled && !dev->hba_ioaccel_enabled)
6081 setup_ioaccel2_abort_cmd(c, h, abort, reply_queue);
6082 c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
6083 (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
6084 hpsa_get_tag(h, abort, &taglower, &tagupper);
6085 dev_dbg(&h->pdev->dev,
6086 "%s: Tag:0x%08x:%08x: do_simple_cmd(ioaccel2 abort) completed.\n",
6087 __func__, tagupper, taglower);
6088 /* no unmap needed here because no data xfer. */
6090 dev_dbg(&h->pdev->dev,
6091 "%s: Tag:0x%08x:%08x: abort service response = 0x%02x.\n",
6092 __func__, tagupper, taglower, c2->error_data.serv_response);
6093 switch (c2->error_data.serv_response) {
6094 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
6095 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
6098 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
6099 case IOACCEL2_SERV_RESPONSE_FAILURE:
6100 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
6104 dev_warn(&h->pdev->dev,
6105 "%s: Tag:0x%08x:%08x: unknown abort service response 0x%02x\n",
6106 __func__, tagupper, taglower,
6107 c2->error_data.serv_response);
6111 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__,
6112 tagupper, taglower);
6116 static int hpsa_send_abort_both_ways(struct ctlr_info *h,
6117 struct hpsa_scsi_dev_t *dev, struct CommandList *abort, int reply_queue)
6120 * ioccelerator mode 2 commands should be aborted via the
6121 * accelerated path, since RAID path is unaware of these commands,
6122 * but not all underlying firmware can handle abort TMF.
6123 * Change abort to physical device reset when abort TMF is unsupported.
6125 if (abort->cmd_type == CMD_IOACCEL2) {
6126 if ((HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags) ||
6127 dev->physical_device)
6128 return hpsa_send_abort_ioaccel2(h, abort,
6131 return hpsa_send_reset_as_abort_ioaccel2(h,
6133 abort, reply_queue);
6135 return hpsa_send_abort(h, dev->scsi3addr, abort, reply_queue);
6138 /* Find out which reply queue a command was meant to return on */
6139 static int hpsa_extract_reply_queue(struct ctlr_info *h,
6140 struct CommandList *c)
6142 if (c->cmd_type == CMD_IOACCEL2)
6143 return h->ioaccel2_cmd_pool[c->cmdindex].reply_queue;
6144 return c->Header.ReplyQueue;
6148 * Limit concurrency of abort commands to prevent
6149 * over-subscription of commands
6151 static inline int wait_for_available_abort_cmd(struct ctlr_info *h)
6153 #define ABORT_CMD_WAIT_MSECS 5000
6154 return !wait_event_timeout(h->abort_cmd_wait_queue,
6155 atomic_dec_if_positive(&h->abort_cmds_available) >= 0,
6156 msecs_to_jiffies(ABORT_CMD_WAIT_MSECS));
6159 /* Send an abort for the specified command.
6160 * If the device and controller support it,
6161 * send a task abort request.
6163 static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
6167 struct ctlr_info *h;
6168 struct hpsa_scsi_dev_t *dev;
6169 struct CommandList *abort; /* pointer to command to be aborted */
6170 struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */
6171 char msg[256]; /* For debug messaging. */
6173 __le32 tagupper, taglower;
6174 int refcount, reply_queue;
6179 if (sc->device == NULL)
6182 /* Find the controller of the command to be aborted */
6183 h = sdev_to_hba(sc->device);
6187 /* Find the device of the command to be aborted */
6188 dev = sc->device->hostdata;
6190 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
6195 /* If controller locked up, we can guarantee command won't complete */
6196 if (lockup_detected(h)) {
6197 hpsa_show_dev_msg(KERN_WARNING, h, dev,
6198 "ABORT FAILED, lockup detected");
6202 /* This is a good time to check if controller lockup has occurred */
6203 if (detect_controller_lockup(h)) {
6204 hpsa_show_dev_msg(KERN_WARNING, h, dev,
6205 "ABORT FAILED, new lockup detected");
6209 /* Check that controller supports some kind of task abort */
6210 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
6211 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
6214 memset(msg, 0, sizeof(msg));
6215 ml += sprintf(msg+ml, "scsi %d:%d:%d:%llu %s %p",
6216 h->scsi_host->host_no, sc->device->channel,
6217 sc->device->id, sc->device->lun,
6218 "Aborting command", sc);
6220 /* Get SCSI command to be aborted */
6221 abort = (struct CommandList *) sc->host_scribble;
6222 if (abort == NULL) {
6223 /* This can happen if the command already completed. */
6226 refcount = atomic_inc_return(&abort->refcount);
6227 if (refcount == 1) { /* Command is done already. */
6232 /* Don't bother trying the abort if we know it won't work. */
6233 if (abort->cmd_type != CMD_IOACCEL2 &&
6234 abort->cmd_type != CMD_IOACCEL1 && !dev->supports_aborts) {
6240 * Check that we're aborting the right command.
6241 * It's possible the CommandList already completed and got re-used.
6243 if (abort->scsi_cmd != sc) {
6248 abort->abort_pending = true;
6249 hpsa_get_tag(h, abort, &taglower, &tagupper);
6250 reply_queue = hpsa_extract_reply_queue(h, abort);
6251 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
6252 as = abort->scsi_cmd;
6254 ml += sprintf(msg+ml,
6255 "CDBLen: %d CDB: 0x%02x%02x... SN: 0x%lx ",
6256 as->cmd_len, as->cmnd[0], as->cmnd[1],
6258 dev_warn(&h->pdev->dev, "%s BEING SENT\n", msg);
6259 hpsa_show_dev_msg(KERN_WARNING, h, dev, "Aborting command");
6262 * Command is in flight, or possibly already completed
6263 * by the firmware (but not to the scsi mid layer) but we can't
6264 * distinguish which. Send the abort down.
6266 if (wait_for_available_abort_cmd(h)) {
6267 dev_warn(&h->pdev->dev,
6268 "%s FAILED, timeout waiting for an abort command to become available.\n",
6273 rc = hpsa_send_abort_both_ways(h, dev, abort, reply_queue);
6274 atomic_inc(&h->abort_cmds_available);
6275 wake_up_all(&h->abort_cmd_wait_queue);
6277 dev_warn(&h->pdev->dev, "%s SENT, FAILED\n", msg);
6278 hpsa_show_dev_msg(KERN_WARNING, h, dev,
6279 "FAILED to abort command");
6283 dev_info(&h->pdev->dev, "%s SENT, SUCCESS\n", msg);
6284 wait_event(h->event_sync_wait_queue,
6285 abort->scsi_cmd != sc || lockup_detected(h));
6287 return !lockup_detected(h) ? SUCCESS : FAILED;
6291 * For operations with an associated SCSI command, a command block is allocated
6292 * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the
6293 * block request tag as an index into a table of entries. cmd_tagged_free() is
6294 * the complement, although cmd_free() may be called instead.
6296 static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
6297 struct scsi_cmnd *scmd)
6299 int idx = hpsa_get_cmd_index(scmd);
6300 struct CommandList *c = h->cmd_pool + idx;
6302 if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) {
6303 dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n",
6304 idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1);
6305 /* The index value comes from the block layer, so if it's out of
6306 * bounds, it's probably not our bug.
6311 atomic_inc(&c->refcount);
6312 if (unlikely(!hpsa_is_cmd_idle(c))) {
6314 * We expect that the SCSI layer will hand us a unique tag
6315 * value. Thus, there should never be a collision here between
6316 * two requests...because if the selected command isn't idle
6317 * then someone is going to be very disappointed.
6319 dev_err(&h->pdev->dev,
6320 "tag collision (tag=%d) in cmd_tagged_alloc().\n",
6322 if (c->scsi_cmd != NULL)
6323 scsi_print_command(c->scsi_cmd);
6324 scsi_print_command(scmd);
6327 hpsa_cmd_partial_init(h, idx, c);
6331 static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c)
6334 * Release our reference to the block. We don't need to do anything
6335 * else to free it, because it is accessed by index. (There's no point
6336 * in checking the result of the decrement, since we cannot guarantee
6337 * that there isn't a concurrent abort which is also accessing it.)
6339 (void)atomic_dec(&c->refcount);
6343 * For operations that cannot sleep, a command block is allocated at init,
6344 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
6345 * which ones are free or in use. Lock must be held when calling this.
6346 * cmd_free() is the complement.
6347 * This function never gives up and returns NULL. If it hangs,
6348 * another thread must call cmd_free() to free some tags.
6351 static struct CommandList *cmd_alloc(struct ctlr_info *h)
6353 struct CommandList *c;
6358 * There is some *extremely* small but non-zero chance that that
6359 * multiple threads could get in here, and one thread could
6360 * be scanning through the list of bits looking for a free
6361 * one, but the free ones are always behind him, and other
6362 * threads sneak in behind him and eat them before he can
6363 * get to them, so that while there is always a free one, a
6364 * very unlucky thread might be starved anyway, never able to
6365 * beat the other threads. In reality, this happens so
6366 * infrequently as to be indistinguishable from never.
6368 * Note that we start allocating commands before the SCSI host structure
6369 * is initialized. Since the search starts at bit zero, this
6370 * all works, since we have at least one command structure available;
6371 * however, it means that the structures with the low indexes have to be
6372 * reserved for driver-initiated requests, while requests from the block
6373 * layer will use the higher indexes.
6377 i = find_next_zero_bit(h->cmd_pool_bits,
6378 HPSA_NRESERVED_CMDS,
6380 if (unlikely(i >= HPSA_NRESERVED_CMDS)) {
6384 c = h->cmd_pool + i;
6385 refcount = atomic_inc_return(&c->refcount);
6386 if (unlikely(refcount > 1)) {
6387 cmd_free(h, c); /* already in use */
6388 offset = (i + 1) % HPSA_NRESERVED_CMDS;
6391 set_bit(i & (BITS_PER_LONG - 1),
6392 h->cmd_pool_bits + (i / BITS_PER_LONG));
6393 break; /* it's ours now. */
6395 hpsa_cmd_partial_init(h, i, c);
6400 * This is the complementary operation to cmd_alloc(). Note, however, in some
6401 * corner cases it may also be used to free blocks allocated by
6402 * cmd_tagged_alloc() in which case the ref-count decrement does the trick and
6403 * the clear-bit is harmless.
6405 static void cmd_free(struct ctlr_info *h, struct CommandList *c)
6407 if (atomic_dec_and_test(&c->refcount)) {
6410 i = c - h->cmd_pool;
6411 clear_bit(i & (BITS_PER_LONG - 1),
6412 h->cmd_pool_bits + (i / BITS_PER_LONG));
6416 #ifdef CONFIG_COMPAT
6418 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd,
6421 IOCTL32_Command_struct __user *arg32 =
6422 (IOCTL32_Command_struct __user *) arg;
6423 IOCTL_Command_struct arg64;
6424 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
6428 memset(&arg64, 0, sizeof(arg64));
6430 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
6431 sizeof(arg64.LUN_info));
6432 err |= copy_from_user(&arg64.Request, &arg32->Request,
6433 sizeof(arg64.Request));
6434 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
6435 sizeof(arg64.error_info));
6436 err |= get_user(arg64.buf_size, &arg32->buf_size);
6437 err |= get_user(cp, &arg32->buf);
6438 arg64.buf = compat_ptr(cp);
6439 err |= copy_to_user(p, &arg64, sizeof(arg64));
6444 err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
6447 err |= copy_in_user(&arg32->error_info, &p->error_info,
6448 sizeof(arg32->error_info));
6454 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
6455 int cmd, void __user *arg)
6457 BIG_IOCTL32_Command_struct __user *arg32 =
6458 (BIG_IOCTL32_Command_struct __user *) arg;
6459 BIG_IOCTL_Command_struct arg64;
6460 BIG_IOCTL_Command_struct __user *p =
6461 compat_alloc_user_space(sizeof(arg64));
6465 memset(&arg64, 0, sizeof(arg64));
6467 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
6468 sizeof(arg64.LUN_info));
6469 err |= copy_from_user(&arg64.Request, &arg32->Request,
6470 sizeof(arg64.Request));
6471 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
6472 sizeof(arg64.error_info));
6473 err |= get_user(arg64.buf_size, &arg32->buf_size);
6474 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
6475 err |= get_user(cp, &arg32->buf);
6476 arg64.buf = compat_ptr(cp);
6477 err |= copy_to_user(p, &arg64, sizeof(arg64));
6482 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
6485 err |= copy_in_user(&arg32->error_info, &p->error_info,
6486 sizeof(arg32->error_info));
6492 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
6495 case CCISS_GETPCIINFO:
6496 case CCISS_GETINTINFO:
6497 case CCISS_SETINTINFO:
6498 case CCISS_GETNODENAME:
6499 case CCISS_SETNODENAME:
6500 case CCISS_GETHEARTBEAT:
6501 case CCISS_GETBUSTYPES:
6502 case CCISS_GETFIRMVER:
6503 case CCISS_GETDRIVVER:
6504 case CCISS_REVALIDVOLS:
6505 case CCISS_DEREGDISK:
6506 case CCISS_REGNEWDISK:
6508 case CCISS_RESCANDISK:
6509 case CCISS_GETLUNINFO:
6510 return hpsa_ioctl(dev, cmd, arg);
6512 case CCISS_PASSTHRU32:
6513 return hpsa_ioctl32_passthru(dev, cmd, arg);
6514 case CCISS_BIG_PASSTHRU32:
6515 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
6518 return -ENOIOCTLCMD;
6523 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
6525 struct hpsa_pci_info pciinfo;
6529 pciinfo.domain = pci_domain_nr(h->pdev->bus);
6530 pciinfo.bus = h->pdev->bus->number;
6531 pciinfo.dev_fn = h->pdev->devfn;
6532 pciinfo.board_id = h->board_id;
6533 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
6538 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
6540 DriverVer_type DriverVer;
6541 unsigned char vmaj, vmin, vsubmin;
6544 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
6545 &vmaj, &vmin, &vsubmin);
6547 dev_info(&h->pdev->dev, "driver version string '%s' "
6548 "unrecognized.", HPSA_DRIVER_VERSION);
6553 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
6556 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
6561 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
6563 IOCTL_Command_struct iocommand;
6564 struct CommandList *c;
6571 if (!capable(CAP_SYS_RAWIO))
6573 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
6575 if ((iocommand.buf_size < 1) &&
6576 (iocommand.Request.Type.Direction != XFER_NONE)) {
6579 if (iocommand.buf_size > 0) {
6580 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
6583 if (iocommand.Request.Type.Direction & XFER_WRITE) {
6584 /* Copy the data into the buffer we created */
6585 if (copy_from_user(buff, iocommand.buf,
6586 iocommand.buf_size)) {
6591 memset(buff, 0, iocommand.buf_size);
6596 /* Fill in the command type */
6597 c->cmd_type = CMD_IOCTL_PEND;
6598 c->scsi_cmd = SCSI_CMD_BUSY;
6599 /* Fill in Command Header */
6600 c->Header.ReplyQueue = 0; /* unused in simple mode */
6601 if (iocommand.buf_size > 0) { /* buffer to fill */
6602 c->Header.SGList = 1;
6603 c->Header.SGTotal = cpu_to_le16(1);
6604 } else { /* no buffers to fill */
6605 c->Header.SGList = 0;
6606 c->Header.SGTotal = cpu_to_le16(0);
6608 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
6610 /* Fill in Request block */
6611 memcpy(&c->Request, &iocommand.Request,
6612 sizeof(c->Request));
6614 /* Fill in the scatter gather information */
6615 if (iocommand.buf_size > 0) {
6616 temp64 = pci_map_single(h->pdev, buff,
6617 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
6618 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
6619 c->SG[0].Addr = cpu_to_le64(0);
6620 c->SG[0].Len = cpu_to_le32(0);
6624 c->SG[0].Addr = cpu_to_le64(temp64);
6625 c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
6626 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
6628 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
6630 if (iocommand.buf_size > 0)
6631 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
6632 check_ioctl_unit_attention(h, c);
6638 /* Copy the error information out */
6639 memcpy(&iocommand.error_info, c->err_info,
6640 sizeof(iocommand.error_info));
6641 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
6645 if ((iocommand.Request.Type.Direction & XFER_READ) &&
6646 iocommand.buf_size > 0) {
6647 /* Copy the data out of the buffer we created */
6648 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
6660 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
6662 BIG_IOCTL_Command_struct *ioc;
6663 struct CommandList *c;
6664 unsigned char **buff = NULL;
6665 int *buff_size = NULL;
6671 BYTE __user *data_ptr;
6675 if (!capable(CAP_SYS_RAWIO))
6677 ioc = (BIG_IOCTL_Command_struct *)
6678 kmalloc(sizeof(*ioc), GFP_KERNEL);
6683 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
6687 if ((ioc->buf_size < 1) &&
6688 (ioc->Request.Type.Direction != XFER_NONE)) {
6692 /* Check kmalloc limits using all SGs */
6693 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
6697 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
6701 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
6706 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
6711 left = ioc->buf_size;
6712 data_ptr = ioc->buf;
6714 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
6715 buff_size[sg_used] = sz;
6716 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
6717 if (buff[sg_used] == NULL) {
6721 if (ioc->Request.Type.Direction & XFER_WRITE) {
6722 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
6727 memset(buff[sg_used], 0, sz);
6734 c->cmd_type = CMD_IOCTL_PEND;
6735 c->scsi_cmd = SCSI_CMD_BUSY;
6736 c->Header.ReplyQueue = 0;
6737 c->Header.SGList = (u8) sg_used;
6738 c->Header.SGTotal = cpu_to_le16(sg_used);
6739 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
6740 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
6741 if (ioc->buf_size > 0) {
6743 for (i = 0; i < sg_used; i++) {
6744 temp64 = pci_map_single(h->pdev, buff[i],
6745 buff_size[i], PCI_DMA_BIDIRECTIONAL);
6746 if (dma_mapping_error(&h->pdev->dev,
6747 (dma_addr_t) temp64)) {
6748 c->SG[i].Addr = cpu_to_le64(0);
6749 c->SG[i].Len = cpu_to_le32(0);
6750 hpsa_pci_unmap(h->pdev, c, i,
6751 PCI_DMA_BIDIRECTIONAL);
6755 c->SG[i].Addr = cpu_to_le64(temp64);
6756 c->SG[i].Len = cpu_to_le32(buff_size[i]);
6757 c->SG[i].Ext = cpu_to_le32(0);
6759 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
6761 status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
6764 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
6765 check_ioctl_unit_attention(h, c);
6771 /* Copy the error information out */
6772 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
6773 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
6777 if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
6780 /* Copy the data out of the buffer we created */
6781 BYTE __user *ptr = ioc->buf;
6782 for (i = 0; i < sg_used; i++) {
6783 if (copy_to_user(ptr, buff[i], buff_size[i])) {
6787 ptr += buff_size[i];
6797 for (i = 0; i < sg_used; i++)
6806 static void check_ioctl_unit_attention(struct ctlr_info *h,
6807 struct CommandList *c)
6809 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
6810 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
6811 (void) check_for_unit_attention(h, c);
6817 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
6819 struct ctlr_info *h;
6820 void __user *argp = (void __user *)arg;
6823 h = sdev_to_hba(dev);
6826 case CCISS_DEREGDISK:
6827 case CCISS_REGNEWDISK:
6829 hpsa_scan_start(h->scsi_host);
6831 case CCISS_GETPCIINFO:
6832 return hpsa_getpciinfo_ioctl(h, argp);
6833 case CCISS_GETDRIVVER:
6834 return hpsa_getdrivver_ioctl(h, argp);
6835 case CCISS_PASSTHRU:
6836 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6838 rc = hpsa_passthru_ioctl(h, argp);
6839 atomic_inc(&h->passthru_cmds_avail);
6841 case CCISS_BIG_PASSTHRU:
6842 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6844 rc = hpsa_big_passthru_ioctl(h, argp);
6845 atomic_inc(&h->passthru_cmds_avail);
6852 static void hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
6855 struct CommandList *c;
6859 /* fill_cmd can't fail here, no data buffer to map */
6860 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
6861 RAID_CTLR_LUNID, TYPE_MSG);
6862 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
6864 enqueue_cmd_and_start_io(h, c);
6865 /* Don't wait for completion, the reset won't complete. Don't free
6866 * the command either. This is the last command we will send before
6867 * re-initializing everything, so it doesn't matter and won't leak.
6872 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
6873 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
6876 int pci_dir = XFER_NONE;
6877 u64 tag; /* for commands to be aborted */
6879 c->cmd_type = CMD_IOCTL_PEND;
6880 c->scsi_cmd = SCSI_CMD_BUSY;
6881 c->Header.ReplyQueue = 0;
6882 if (buff != NULL && size > 0) {
6883 c->Header.SGList = 1;
6884 c->Header.SGTotal = cpu_to_le16(1);
6886 c->Header.SGList = 0;
6887 c->Header.SGTotal = cpu_to_le16(0);
6889 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
6891 if (cmd_type == TYPE_CMD) {
6894 /* are we trying to read a vital product page */
6895 if (page_code & VPD_PAGE) {
6896 c->Request.CDB[1] = 0x01;
6897 c->Request.CDB[2] = (page_code & 0xff);
6899 c->Request.CDBLen = 6;
6900 c->Request.type_attr_dir =
6901 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6902 c->Request.Timeout = 0;
6903 c->Request.CDB[0] = HPSA_INQUIRY;
6904 c->Request.CDB[4] = size & 0xFF;
6906 case HPSA_REPORT_LOG:
6907 case HPSA_REPORT_PHYS:
6908 /* Talking to controller so It's a physical command
6909 mode = 00 target = 0. Nothing to write.
6911 c->Request.CDBLen = 12;
6912 c->Request.type_attr_dir =
6913 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6914 c->Request.Timeout = 0;
6915 c->Request.CDB[0] = cmd;
6916 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6917 c->Request.CDB[7] = (size >> 16) & 0xFF;
6918 c->Request.CDB[8] = (size >> 8) & 0xFF;
6919 c->Request.CDB[9] = size & 0xFF;
6921 case BMIC_SENSE_DIAG_OPTIONS:
6922 c->Request.CDBLen = 16;
6923 c->Request.type_attr_dir =
6924 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6925 c->Request.Timeout = 0;
6926 /* Spec says this should be BMIC_WRITE */
6927 c->Request.CDB[0] = BMIC_READ;
6928 c->Request.CDB[6] = BMIC_SENSE_DIAG_OPTIONS;
6930 case BMIC_SET_DIAG_OPTIONS:
6931 c->Request.CDBLen = 16;
6932 c->Request.type_attr_dir =
6933 TYPE_ATTR_DIR(cmd_type,
6934 ATTR_SIMPLE, XFER_WRITE);
6935 c->Request.Timeout = 0;
6936 c->Request.CDB[0] = BMIC_WRITE;
6937 c->Request.CDB[6] = BMIC_SET_DIAG_OPTIONS;
6939 case HPSA_CACHE_FLUSH:
6940 c->Request.CDBLen = 12;
6941 c->Request.type_attr_dir =
6942 TYPE_ATTR_DIR(cmd_type,
6943 ATTR_SIMPLE, XFER_WRITE);
6944 c->Request.Timeout = 0;
6945 c->Request.CDB[0] = BMIC_WRITE;
6946 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
6947 c->Request.CDB[7] = (size >> 8) & 0xFF;
6948 c->Request.CDB[8] = size & 0xFF;
6950 case TEST_UNIT_READY:
6951 c->Request.CDBLen = 6;
6952 c->Request.type_attr_dir =
6953 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6954 c->Request.Timeout = 0;
6956 case HPSA_GET_RAID_MAP:
6957 c->Request.CDBLen = 12;
6958 c->Request.type_attr_dir =
6959 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6960 c->Request.Timeout = 0;
6961 c->Request.CDB[0] = HPSA_CISS_READ;
6962 c->Request.CDB[1] = cmd;
6963 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6964 c->Request.CDB[7] = (size >> 16) & 0xFF;
6965 c->Request.CDB[8] = (size >> 8) & 0xFF;
6966 c->Request.CDB[9] = size & 0xFF;
6968 case BMIC_SENSE_CONTROLLER_PARAMETERS:
6969 c->Request.CDBLen = 10;
6970 c->Request.type_attr_dir =
6971 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6972 c->Request.Timeout = 0;
6973 c->Request.CDB[0] = BMIC_READ;
6974 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
6975 c->Request.CDB[7] = (size >> 16) & 0xFF;
6976 c->Request.CDB[8] = (size >> 8) & 0xFF;
6978 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
6979 c->Request.CDBLen = 10;
6980 c->Request.type_attr_dir =
6981 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6982 c->Request.Timeout = 0;
6983 c->Request.CDB[0] = BMIC_READ;
6984 c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
6985 c->Request.CDB[7] = (size >> 16) & 0xFF;
6986 c->Request.CDB[8] = (size >> 8) & 0XFF;
6988 case BMIC_SENSE_SUBSYSTEM_INFORMATION:
6989 c->Request.CDBLen = 10;
6990 c->Request.type_attr_dir =
6991 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6992 c->Request.Timeout = 0;
6993 c->Request.CDB[0] = BMIC_READ;
6994 c->Request.CDB[6] = BMIC_SENSE_SUBSYSTEM_INFORMATION;
6995 c->Request.CDB[7] = (size >> 16) & 0xFF;
6996 c->Request.CDB[8] = (size >> 8) & 0XFF;
6998 case BMIC_SENSE_STORAGE_BOX_PARAMS:
6999 c->Request.CDBLen = 10;
7000 c->Request.type_attr_dir =
7001 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
7002 c->Request.Timeout = 0;
7003 c->Request.CDB[0] = BMIC_READ;
7004 c->Request.CDB[6] = BMIC_SENSE_STORAGE_BOX_PARAMS;
7005 c->Request.CDB[7] = (size >> 16) & 0xFF;
7006 c->Request.CDB[8] = (size >> 8) & 0XFF;
7008 case BMIC_IDENTIFY_CONTROLLER:
7009 c->Request.CDBLen = 10;
7010 c->Request.type_attr_dir =
7011 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
7012 c->Request.Timeout = 0;
7013 c->Request.CDB[0] = BMIC_READ;
7014 c->Request.CDB[1] = 0;
7015 c->Request.CDB[2] = 0;
7016 c->Request.CDB[3] = 0;
7017 c->Request.CDB[4] = 0;
7018 c->Request.CDB[5] = 0;
7019 c->Request.CDB[6] = BMIC_IDENTIFY_CONTROLLER;
7020 c->Request.CDB[7] = (size >> 16) & 0xFF;
7021 c->Request.CDB[8] = (size >> 8) & 0XFF;
7022 c->Request.CDB[9] = 0;
7025 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
7029 } else if (cmd_type == TYPE_MSG) {
7032 case HPSA_PHYS_TARGET_RESET:
7033 c->Request.CDBLen = 16;
7034 c->Request.type_attr_dir =
7035 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
7036 c->Request.Timeout = 0; /* Don't time out */
7037 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
7038 c->Request.CDB[0] = HPSA_RESET;
7039 c->Request.CDB[1] = HPSA_TARGET_RESET_TYPE;
7040 /* Physical target reset needs no control bytes 4-7*/
7041 c->Request.CDB[4] = 0x00;
7042 c->Request.CDB[5] = 0x00;
7043 c->Request.CDB[6] = 0x00;
7044 c->Request.CDB[7] = 0x00;
7046 case HPSA_DEVICE_RESET_MSG:
7047 c->Request.CDBLen = 16;
7048 c->Request.type_attr_dir =
7049 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
7050 c->Request.Timeout = 0; /* Don't time out */
7051 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
7052 c->Request.CDB[0] = cmd;
7053 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
7054 /* If bytes 4-7 are zero, it means reset the */
7056 c->Request.CDB[4] = 0x00;
7057 c->Request.CDB[5] = 0x00;
7058 c->Request.CDB[6] = 0x00;
7059 c->Request.CDB[7] = 0x00;
7061 case HPSA_ABORT_MSG:
7062 memcpy(&tag, buff, sizeof(tag));
7063 dev_dbg(&h->pdev->dev,
7064 "Abort Tag:0x%016llx using rqst Tag:0x%016llx",
7065 tag, c->Header.tag);
7066 c->Request.CDBLen = 16;
7067 c->Request.type_attr_dir =
7068 TYPE_ATTR_DIR(cmd_type,
7069 ATTR_SIMPLE, XFER_WRITE);
7070 c->Request.Timeout = 0; /* Don't time out */
7071 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
7072 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
7073 c->Request.CDB[2] = 0x00; /* reserved */
7074 c->Request.CDB[3] = 0x00; /* reserved */
7075 /* Tag to abort goes in CDB[4]-CDB[11] */
7076 memcpy(&c->Request.CDB[4], &tag, sizeof(tag));
7077 c->Request.CDB[12] = 0x00; /* reserved */
7078 c->Request.CDB[13] = 0x00; /* reserved */
7079 c->Request.CDB[14] = 0x00; /* reserved */
7080 c->Request.CDB[15] = 0x00; /* reserved */
7083 dev_warn(&h->pdev->dev, "unknown message type %d\n",
7088 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
7092 switch (GET_DIR(c->Request.type_attr_dir)) {
7094 pci_dir = PCI_DMA_FROMDEVICE;
7097 pci_dir = PCI_DMA_TODEVICE;
7100 pci_dir = PCI_DMA_NONE;
7103 pci_dir = PCI_DMA_BIDIRECTIONAL;
7105 if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
7111 * Map (physical) PCI mem into (virtual) kernel space
7113 static void __iomem *remap_pci_mem(ulong base, ulong size)
7115 ulong page_base = ((ulong) base) & PAGE_MASK;
7116 ulong page_offs = ((ulong) base) - page_base;
7117 void __iomem *page_remapped = ioremap_nocache(page_base,
7120 return page_remapped ? (page_remapped + page_offs) : NULL;
7123 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
7125 return h->access.command_completed(h, q);
7128 static inline bool interrupt_pending(struct ctlr_info *h)
7130 return h->access.intr_pending(h);
7133 static inline long interrupt_not_for_us(struct ctlr_info *h)
7135 return (h->access.intr_pending(h) == 0) ||
7136 (h->interrupts_enabled == 0);
7139 static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
7142 if (unlikely(tag_index >= h->nr_cmds)) {
7143 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
7149 static inline void finish_cmd(struct CommandList *c)
7151 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
7152 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
7153 || c->cmd_type == CMD_IOACCEL2))
7154 complete_scsi_command(c);
7155 else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF)
7156 complete(c->waiting);
7159 /* process completion of an indexed ("direct lookup") command */
7160 static inline void process_indexed_cmd(struct ctlr_info *h,
7164 struct CommandList *c;
7166 tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
7167 if (!bad_tag(h, tag_index, raw_tag)) {
7168 c = h->cmd_pool + tag_index;
7173 /* Some controllers, like p400, will give us one interrupt
7174 * after a soft reset, even if we turned interrupts off.
7175 * Only need to check for this in the hpsa_xxx_discard_completions
7178 static int ignore_bogus_interrupt(struct ctlr_info *h)
7180 if (likely(!reset_devices))
7183 if (likely(h->interrupts_enabled))
7186 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
7187 "(known firmware bug.) Ignoring.\n");
7193 * Convert &h->q[x] (passed to interrupt handlers) back to h.
7194 * Relies on (h-q[x] == x) being true for x such that
7195 * 0 <= x < MAX_REPLY_QUEUES.
7197 static struct ctlr_info *queue_to_hba(u8 *queue)
7199 return container_of((queue - *queue), struct ctlr_info, q[0]);
7202 static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
7204 struct ctlr_info *h = queue_to_hba(queue);
7205 u8 q = *(u8 *) queue;
7208 if (ignore_bogus_interrupt(h))
7211 if (interrupt_not_for_us(h))
7213 h->last_intr_timestamp = get_jiffies_64();
7214 while (interrupt_pending(h)) {
7215 raw_tag = get_next_completion(h, q);
7216 while (raw_tag != FIFO_EMPTY)
7217 raw_tag = next_command(h, q);
7222 static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
7224 struct ctlr_info *h = queue_to_hba(queue);
7226 u8 q = *(u8 *) queue;
7228 if (ignore_bogus_interrupt(h))
7231 h->last_intr_timestamp = get_jiffies_64();
7232 raw_tag = get_next_completion(h, q);
7233 while (raw_tag != FIFO_EMPTY)
7234 raw_tag = next_command(h, q);
7238 static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
7240 struct ctlr_info *h = queue_to_hba((u8 *) queue);
7242 u8 q = *(u8 *) queue;
7244 if (interrupt_not_for_us(h))
7246 h->last_intr_timestamp = get_jiffies_64();
7247 while (interrupt_pending(h)) {
7248 raw_tag = get_next_completion(h, q);
7249 while (raw_tag != FIFO_EMPTY) {
7250 process_indexed_cmd(h, raw_tag);
7251 raw_tag = next_command(h, q);
7257 static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
7259 struct ctlr_info *h = queue_to_hba(queue);
7261 u8 q = *(u8 *) queue;
7263 h->last_intr_timestamp = get_jiffies_64();
7264 raw_tag = get_next_completion(h, q);
7265 while (raw_tag != FIFO_EMPTY) {
7266 process_indexed_cmd(h, raw_tag);
7267 raw_tag = next_command(h, q);
7272 /* Send a message CDB to the firmware. Careful, this only works
7273 * in simple mode, not performant mode due to the tag lookup.
7274 * We only ever use this immediately after a controller reset.
7276 static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
7280 struct CommandListHeader CommandHeader;
7281 struct RequestBlock Request;
7282 struct ErrDescriptor ErrorDescriptor;
7284 struct Command *cmd;
7285 static const size_t cmd_sz = sizeof(*cmd) +
7286 sizeof(cmd->ErrorDescriptor);
7290 void __iomem *vaddr;
7293 vaddr = pci_ioremap_bar(pdev, 0);
7297 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
7298 * CCISS commands, so they must be allocated from the lower 4GiB of
7301 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
7307 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
7313 /* This must fit, because of the 32-bit consistent DMA mask. Also,
7314 * although there's no guarantee, we assume that the address is at
7315 * least 4-byte aligned (most likely, it's page-aligned).
7317 paddr32 = cpu_to_le32(paddr64);
7319 cmd->CommandHeader.ReplyQueue = 0;
7320 cmd->CommandHeader.SGList = 0;
7321 cmd->CommandHeader.SGTotal = cpu_to_le16(0);
7322 cmd->CommandHeader.tag = cpu_to_le64(paddr64);
7323 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
7325 cmd->Request.CDBLen = 16;
7326 cmd->Request.type_attr_dir =
7327 TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
7328 cmd->Request.Timeout = 0; /* Don't time out */
7329 cmd->Request.CDB[0] = opcode;
7330 cmd->Request.CDB[1] = type;
7331 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
7332 cmd->ErrorDescriptor.Addr =
7333 cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
7334 cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
7336 writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
7338 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
7339 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
7340 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
7342 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
7347 /* we leak the DMA buffer here ... no choice since the controller could
7348 * still complete the command.
7350 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
7351 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
7356 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
7358 if (tag & HPSA_ERROR_BIT) {
7359 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
7364 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
7369 #define hpsa_noop(p) hpsa_message(p, 3, 0)
7371 static int hpsa_controller_hard_reset(struct pci_dev *pdev,
7372 void __iomem *vaddr, u32 use_doorbell)
7376 /* For everything after the P600, the PCI power state method
7377 * of resetting the controller doesn't work, so we have this
7378 * other way using the doorbell register.
7380 dev_info(&pdev->dev, "using doorbell to reset controller\n");
7381 writel(use_doorbell, vaddr + SA5_DOORBELL);
7383 /* PMC hardware guys tell us we need a 10 second delay after
7384 * doorbell reset and before any attempt to talk to the board
7385 * at all to ensure that this actually works and doesn't fall
7386 * over in some weird corner cases.
7389 } else { /* Try to do it the PCI power state way */
7391 /* Quoting from the Open CISS Specification: "The Power
7392 * Management Control/Status Register (CSR) controls the power
7393 * state of the device. The normal operating state is D0,
7394 * CSR=00h. The software off state is D3, CSR=03h. To reset
7395 * the controller, place the interface device in D3 then to D0,
7396 * this causes a secondary PCI reset which will reset the
7401 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
7403 /* enter the D3hot power management state */
7404 rc = pci_set_power_state(pdev, PCI_D3hot);
7410 /* enter the D0 power management state */
7411 rc = pci_set_power_state(pdev, PCI_D0);
7416 * The P600 requires a small delay when changing states.
7417 * Otherwise we may think the board did not reset and we bail.
7418 * This for kdump only and is particular to the P600.
7425 static void init_driver_version(char *driver_version, int len)
7427 memset(driver_version, 0, len);
7428 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
7431 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
7433 char *driver_version;
7434 int i, size = sizeof(cfgtable->driver_version);
7436 driver_version = kmalloc(size, GFP_KERNEL);
7437 if (!driver_version)
7440 init_driver_version(driver_version, size);
7441 for (i = 0; i < size; i++)
7442 writeb(driver_version[i], &cfgtable->driver_version[i]);
7443 kfree(driver_version);
7447 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
7448 unsigned char *driver_ver)
7452 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
7453 driver_ver[i] = readb(&cfgtable->driver_version[i]);
7456 static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
7459 char *driver_ver, *old_driver_ver;
7460 int rc, size = sizeof(cfgtable->driver_version);
7462 old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
7463 if (!old_driver_ver)
7465 driver_ver = old_driver_ver + size;
7467 /* After a reset, the 32 bytes of "driver version" in the cfgtable
7468 * should have been changed, otherwise we know the reset failed.
7470 init_driver_version(old_driver_ver, size);
7471 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
7472 rc = !memcmp(driver_ver, old_driver_ver, size);
7473 kfree(old_driver_ver);
7476 /* This does a hard reset of the controller using PCI power management
7477 * states or the using the doorbell register.
7479 static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id)
7483 u64 cfg_base_addr_index;
7484 void __iomem *vaddr;
7485 unsigned long paddr;
7486 u32 misc_fw_support;
7488 struct CfgTable __iomem *cfgtable;
7490 u16 command_register;
7492 /* For controllers as old as the P600, this is very nearly
7495 * pci_save_state(pci_dev);
7496 * pci_set_power_state(pci_dev, PCI_D3hot);
7497 * pci_set_power_state(pci_dev, PCI_D0);
7498 * pci_restore_state(pci_dev);
7500 * For controllers newer than the P600, the pci power state
7501 * method of resetting doesn't work so we have another way
7502 * using the doorbell register.
7505 if (!ctlr_is_resettable(board_id)) {
7506 dev_warn(&pdev->dev, "Controller not resettable\n");
7510 /* if controller is soft- but not hard resettable... */
7511 if (!ctlr_is_hard_resettable(board_id))
7512 return -ENOTSUPP; /* try soft reset later. */
7514 /* Save the PCI command register */
7515 pci_read_config_word(pdev, 4, &command_register);
7516 pci_save_state(pdev);
7518 /* find the first memory BAR, so we can find the cfg table */
7519 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
7522 vaddr = remap_pci_mem(paddr, 0x250);
7526 /* find cfgtable in order to check if reset via doorbell is supported */
7527 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
7528 &cfg_base_addr_index, &cfg_offset);
7531 cfgtable = remap_pci_mem(pci_resource_start(pdev,
7532 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
7537 rc = write_driver_ver_to_cfgtable(cfgtable);
7539 goto unmap_cfgtable;
7541 /* If reset via doorbell register is supported, use that.
7542 * There are two such methods. Favor the newest method.
7544 misc_fw_support = readl(&cfgtable->misc_fw_support);
7545 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
7547 use_doorbell = DOORBELL_CTLR_RESET2;
7549 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
7551 dev_warn(&pdev->dev,
7552 "Soft reset not supported. Firmware update is required.\n");
7553 rc = -ENOTSUPP; /* try soft reset */
7554 goto unmap_cfgtable;
7558 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
7560 goto unmap_cfgtable;
7562 pci_restore_state(pdev);
7563 pci_write_config_word(pdev, 4, command_register);
7565 /* Some devices (notably the HP Smart Array 5i Controller)
7566 need a little pause here */
7567 msleep(HPSA_POST_RESET_PAUSE_MSECS);
7569 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
7571 dev_warn(&pdev->dev,
7572 "Failed waiting for board to become ready after hard reset\n");
7573 goto unmap_cfgtable;
7576 rc = controller_reset_failed(vaddr);
7578 goto unmap_cfgtable;
7580 dev_warn(&pdev->dev, "Unable to successfully reset "
7581 "controller. Will try soft reset.\n");
7584 dev_info(&pdev->dev, "board ready after hard reset.\n");
7596 * We cannot read the structure directly, for portability we must use
7598 * This is for debug only.
7600 static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
7606 dev_info(dev, "Controller Configuration information\n");
7607 dev_info(dev, "------------------------------------\n");
7608 for (i = 0; i < 4; i++)
7609 temp_name[i] = readb(&(tb->Signature[i]));
7610 temp_name[4] = '\0';
7611 dev_info(dev, " Signature = %s\n", temp_name);
7612 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
7613 dev_info(dev, " Transport methods supported = 0x%x\n",
7614 readl(&(tb->TransportSupport)));
7615 dev_info(dev, " Transport methods active = 0x%x\n",
7616 readl(&(tb->TransportActive)));
7617 dev_info(dev, " Requested transport Method = 0x%x\n",
7618 readl(&(tb->HostWrite.TransportRequest)));
7619 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
7620 readl(&(tb->HostWrite.CoalIntDelay)));
7621 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
7622 readl(&(tb->HostWrite.CoalIntCount)));
7623 dev_info(dev, " Max outstanding commands = %d\n",
7624 readl(&(tb->CmdsOutMax)));
7625 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
7626 for (i = 0; i < 16; i++)
7627 temp_name[i] = readb(&(tb->ServerName[i]));
7628 temp_name[16] = '\0';
7629 dev_info(dev, " Server Name = %s\n", temp_name);
7630 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
7631 readl(&(tb->HeartBeat)));
7632 #endif /* HPSA_DEBUG */
7635 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
7637 int i, offset, mem_type, bar_type;
7639 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
7642 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
7643 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
7644 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
7647 mem_type = pci_resource_flags(pdev, i) &
7648 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
7650 case PCI_BASE_ADDRESS_MEM_TYPE_32:
7651 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
7652 offset += 4; /* 32 bit */
7654 case PCI_BASE_ADDRESS_MEM_TYPE_64:
7657 default: /* reserved in PCI 2.2 */
7658 dev_warn(&pdev->dev,
7659 "base address is invalid\n");
7664 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
7670 static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
7672 if (h->msix_vector) {
7673 if (h->pdev->msix_enabled)
7674 pci_disable_msix(h->pdev);
7676 } else if (h->msi_vector) {
7677 if (h->pdev->msi_enabled)
7678 pci_disable_msi(h->pdev);
7683 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
7684 * controllers that are capable. If not, we use legacy INTx mode.
7686 static void hpsa_interrupt_mode(struct ctlr_info *h)
7688 #ifdef CONFIG_PCI_MSI
7690 struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
7692 for (i = 0; i < MAX_REPLY_QUEUES; i++) {
7693 hpsa_msix_entries[i].vector = 0;
7694 hpsa_msix_entries[i].entry = i;
7697 /* Some boards advertise MSI but don't really support it */
7698 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
7699 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
7700 goto default_int_mode;
7701 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
7702 dev_info(&h->pdev->dev, "MSI-X capable controller\n");
7703 h->msix_vector = MAX_REPLY_QUEUES;
7704 if (h->msix_vector > num_online_cpus())
7705 h->msix_vector = num_online_cpus();
7706 err = pci_enable_msix_range(h->pdev, hpsa_msix_entries,
7709 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err);
7711 goto single_msi_mode;
7712 } else if (err < h->msix_vector) {
7713 dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
7714 "available\n", err);
7716 h->msix_vector = err;
7717 for (i = 0; i < h->msix_vector; i++)
7718 h->intr[i] = hpsa_msix_entries[i].vector;
7722 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
7723 dev_info(&h->pdev->dev, "MSI capable controller\n");
7724 if (!pci_enable_msi(h->pdev))
7727 dev_warn(&h->pdev->dev, "MSI init failed\n");
7730 #endif /* CONFIG_PCI_MSI */
7731 /* if we get here we're going to use the default interrupt mode */
7732 h->intr[h->intr_mode] = h->pdev->irq;
7735 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
7738 u32 subsystem_vendor_id, subsystem_device_id;
7740 subsystem_vendor_id = pdev->subsystem_vendor;
7741 subsystem_device_id = pdev->subsystem_device;
7742 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
7743 subsystem_vendor_id;
7745 for (i = 0; i < ARRAY_SIZE(products); i++)
7746 if (*board_id == products[i].board_id)
7749 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
7750 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
7752 dev_warn(&pdev->dev, "unrecognized board ID: "
7753 "0x%08x, ignoring.\n", *board_id);
7756 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
7759 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
7760 unsigned long *memory_bar)
7764 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
7765 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
7766 /* addressing mode bits already removed */
7767 *memory_bar = pci_resource_start(pdev, i);
7768 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
7772 dev_warn(&pdev->dev, "no memory BAR found\n");
7776 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
7782 iterations = HPSA_BOARD_READY_ITERATIONS;
7784 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
7786 for (i = 0; i < iterations; i++) {
7787 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
7788 if (wait_for_ready) {
7789 if (scratchpad == HPSA_FIRMWARE_READY)
7792 if (scratchpad != HPSA_FIRMWARE_READY)
7795 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
7797 dev_warn(&pdev->dev, "board not ready, timed out.\n");
7801 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
7802 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
7805 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
7806 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
7807 *cfg_base_addr &= (u32) 0x0000ffff;
7808 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
7809 if (*cfg_base_addr_index == -1) {
7810 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
7816 static void hpsa_free_cfgtables(struct ctlr_info *h)
7818 if (h->transtable) {
7819 iounmap(h->transtable);
7820 h->transtable = NULL;
7823 iounmap(h->cfgtable);
7828 /* Find and map CISS config table and transfer table
7829 + * several items must be unmapped (freed) later
7831 static int hpsa_find_cfgtables(struct ctlr_info *h)
7835 u64 cfg_base_addr_index;
7839 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7840 &cfg_base_addr_index, &cfg_offset);
7843 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
7844 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
7846 dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
7849 rc = write_driver_ver_to_cfgtable(h->cfgtable);
7852 /* Find performant mode table. */
7853 trans_offset = readl(&h->cfgtable->TransMethodOffset);
7854 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
7855 cfg_base_addr_index)+cfg_offset+trans_offset,
7856 sizeof(*h->transtable));
7857 if (!h->transtable) {
7858 dev_err(&h->pdev->dev, "Failed mapping transfer table\n");
7859 hpsa_free_cfgtables(h);
7865 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
7867 #define MIN_MAX_COMMANDS 16
7868 BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS);
7870 h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands);
7872 /* Limit commands in memory limited kdump scenario. */
7873 if (reset_devices && h->max_commands > 32)
7874 h->max_commands = 32;
7876 if (h->max_commands < MIN_MAX_COMMANDS) {
7877 dev_warn(&h->pdev->dev,
7878 "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
7881 h->max_commands = MIN_MAX_COMMANDS;
7885 /* If the controller reports that the total max sg entries is greater than 512,
7886 * then we know that chained SG blocks work. (Original smart arrays did not
7887 * support chained SG blocks and would return zero for max sg entries.)
7889 static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
7891 return h->maxsgentries > 512;
7894 /* Interrogate the hardware for some limits:
7895 * max commands, max SG elements without chaining, and with chaining,
7896 * SG chain block size, etc.
7898 static void hpsa_find_board_params(struct ctlr_info *h)
7900 hpsa_get_max_perf_mode_cmds(h);
7901 h->nr_cmds = h->max_commands;
7902 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
7903 h->fw_support = readl(&(h->cfgtable->misc_fw_support));
7904 if (hpsa_supports_chained_sg_blocks(h)) {
7905 /* Limit in-command s/g elements to 32 save dma'able memory. */
7906 h->max_cmd_sg_entries = 32;
7907 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
7908 h->maxsgentries--; /* save one for chain pointer */
7911 * Original smart arrays supported at most 31 s/g entries
7912 * embedded inline in the command (trying to use more
7913 * would lock up the controller)
7915 h->max_cmd_sg_entries = 31;
7916 h->maxsgentries = 31; /* default to traditional values */
7920 /* Find out what task management functions are supported and cache */
7921 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
7922 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
7923 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
7924 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
7925 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
7926 if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags))
7927 dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n");
7930 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
7932 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
7933 dev_err(&h->pdev->dev, "not a valid CISS config table\n");
7939 static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
7943 driver_support = readl(&(h->cfgtable->driver_support));
7944 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
7946 driver_support |= ENABLE_SCSI_PREFETCH;
7948 driver_support |= ENABLE_UNIT_ATTN;
7949 writel(driver_support, &(h->cfgtable->driver_support));
7952 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
7953 * in a prefetch beyond physical memory.
7955 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
7959 if (h->board_id != 0x3225103C)
7961 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
7962 dma_prefetch |= 0x8000;
7963 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
7966 static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
7970 unsigned long flags;
7971 /* wait until the clear_event_notify bit 6 is cleared by controller. */
7972 for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
7973 spin_lock_irqsave(&h->lock, flags);
7974 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7975 spin_unlock_irqrestore(&h->lock, flags);
7976 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
7978 /* delay and try again */
7979 msleep(CLEAR_EVENT_WAIT_INTERVAL);
7986 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
7990 unsigned long flags;
7992 /* under certain very rare conditions, this can take awhile.
7993 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
7994 * as we enter this code.)
7996 for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
7997 if (h->remove_in_progress)
7999 spin_lock_irqsave(&h->lock, flags);
8000 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
8001 spin_unlock_irqrestore(&h->lock, flags);
8002 if (!(doorbell_value & CFGTBL_ChangeReq))
8004 /* delay and try again */
8005 msleep(MODE_CHANGE_WAIT_INTERVAL);
8012 /* return -ENODEV or other reason on error, 0 on success */
8013 static int hpsa_enter_simple_mode(struct ctlr_info *h)
8017 trans_support = readl(&(h->cfgtable->TransportSupport));
8018 if (!(trans_support & SIMPLE_MODE))
8021 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
8023 /* Update the field, and then ring the doorbell */
8024 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
8025 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
8026 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
8027 if (hpsa_wait_for_mode_change_ack(h))
8029 print_cfg_table(&h->pdev->dev, h->cfgtable);
8030 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
8032 h->transMethod = CFGTBL_Trans_Simple;
8035 dev_err(&h->pdev->dev, "failed to enter simple mode\n");
8039 /* free items allocated or mapped by hpsa_pci_init */
8040 static void hpsa_free_pci_init(struct ctlr_info *h)
8042 hpsa_free_cfgtables(h); /* pci_init 4 */
8043 iounmap(h->vaddr); /* pci_init 3 */
8045 hpsa_disable_interrupt_mode(h); /* pci_init 2 */
8047 * call pci_disable_device before pci_release_regions per
8048 * Documentation/PCI/pci.txt
8050 pci_disable_device(h->pdev); /* pci_init 1 */
8051 pci_release_regions(h->pdev); /* pci_init 2 */
8054 /* several items must be freed later */
8055 static int hpsa_pci_init(struct ctlr_info *h)
8057 int prod_index, err;
8059 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
8062 h->product_name = products[prod_index].product_name;
8063 h->access = *(products[prod_index].access);
8065 h->needs_abort_tags_swizzled =
8066 ctlr_needs_abort_tags_swizzled(h->board_id);
8068 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
8069 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
8071 err = pci_enable_device(h->pdev);
8073 dev_err(&h->pdev->dev, "failed to enable PCI device\n");
8074 pci_disable_device(h->pdev);
8078 err = pci_request_regions(h->pdev, HPSA);
8080 dev_err(&h->pdev->dev,
8081 "failed to obtain PCI resources\n");
8082 pci_disable_device(h->pdev);
8086 pci_set_master(h->pdev);
8088 hpsa_interrupt_mode(h);
8089 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
8091 goto clean2; /* intmode+region, pci */
8092 h->vaddr = remap_pci_mem(h->paddr, 0x250);
8094 dev_err(&h->pdev->dev, "failed to remap PCI mem\n");
8096 goto clean2; /* intmode+region, pci */
8098 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
8100 goto clean3; /* vaddr, intmode+region, pci */
8101 err = hpsa_find_cfgtables(h);
8103 goto clean3; /* vaddr, intmode+region, pci */
8104 hpsa_find_board_params(h);
8106 if (!hpsa_CISS_signature_present(h)) {
8108 goto clean4; /* cfgtables, vaddr, intmode+region, pci */
8110 hpsa_set_driver_support_bits(h);
8111 hpsa_p600_dma_prefetch_quirk(h);
8112 err = hpsa_enter_simple_mode(h);
8114 goto clean4; /* cfgtables, vaddr, intmode+region, pci */
8117 clean4: /* cfgtables, vaddr, intmode+region, pci */
8118 hpsa_free_cfgtables(h);
8119 clean3: /* vaddr, intmode+region, pci */
8122 clean2: /* intmode+region, pci */
8123 hpsa_disable_interrupt_mode(h);
8125 * call pci_disable_device before pci_release_regions per
8126 * Documentation/PCI/pci.txt
8128 pci_disable_device(h->pdev);
8129 pci_release_regions(h->pdev);
8133 static void hpsa_hba_inquiry(struct ctlr_info *h)
8137 #define HBA_INQUIRY_BYTE_COUNT 64
8138 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
8139 if (!h->hba_inquiry_data)
8141 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
8142 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
8144 kfree(h->hba_inquiry_data);
8145 h->hba_inquiry_data = NULL;
8149 static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id)
8152 void __iomem *vaddr;
8157 /* kdump kernel is loading, we don't know in which state is
8158 * the pci interface. The dev->enable_cnt is equal zero
8159 * so we call enable+disable, wait a while and switch it on.
8161 rc = pci_enable_device(pdev);
8163 dev_warn(&pdev->dev, "Failed to enable PCI device\n");
8166 pci_disable_device(pdev);
8167 msleep(260); /* a randomly chosen number */
8168 rc = pci_enable_device(pdev);
8170 dev_warn(&pdev->dev, "failed to enable device.\n");
8174 pci_set_master(pdev);
8176 vaddr = pci_ioremap_bar(pdev, 0);
8177 if (vaddr == NULL) {
8181 writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
8184 /* Reset the controller with a PCI power-cycle or via doorbell */
8185 rc = hpsa_kdump_hard_reset_controller(pdev, board_id);
8187 /* -ENOTSUPP here means we cannot reset the controller
8188 * but it's already (and still) up and running in
8189 * "performant mode". Or, it might be 640x, which can't reset
8190 * due to concerns about shared bbwc between 6402/6404 pair.
8195 /* Now try to get the controller to respond to a no-op */
8196 dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
8197 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
8198 if (hpsa_noop(pdev) == 0)
8201 dev_warn(&pdev->dev, "no-op failed%s\n",
8202 (i < 11 ? "; re-trying" : ""));
8207 pci_disable_device(pdev);
8211 static void hpsa_free_cmd_pool(struct ctlr_info *h)
8213 kfree(h->cmd_pool_bits);
8214 h->cmd_pool_bits = NULL;
8216 pci_free_consistent(h->pdev,
8217 h->nr_cmds * sizeof(struct CommandList),
8219 h->cmd_pool_dhandle);
8221 h->cmd_pool_dhandle = 0;
8223 if (h->errinfo_pool) {
8224 pci_free_consistent(h->pdev,
8225 h->nr_cmds * sizeof(struct ErrorInfo),
8227 h->errinfo_pool_dhandle);
8228 h->errinfo_pool = NULL;
8229 h->errinfo_pool_dhandle = 0;
8233 static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
8235 h->cmd_pool_bits = kzalloc(
8236 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
8237 sizeof(unsigned long), GFP_KERNEL);
8238 h->cmd_pool = pci_alloc_consistent(h->pdev,
8239 h->nr_cmds * sizeof(*h->cmd_pool),
8240 &(h->cmd_pool_dhandle));
8241 h->errinfo_pool = pci_alloc_consistent(h->pdev,
8242 h->nr_cmds * sizeof(*h->errinfo_pool),
8243 &(h->errinfo_pool_dhandle));
8244 if ((h->cmd_pool_bits == NULL)
8245 || (h->cmd_pool == NULL)
8246 || (h->errinfo_pool == NULL)) {
8247 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
8250 hpsa_preinitialize_commands(h);
8253 hpsa_free_cmd_pool(h);
8257 static void hpsa_irq_affinity_hints(struct ctlr_info *h)
8261 cpu = cpumask_first(cpu_online_mask);
8262 for (i = 0; i < h->msix_vector; i++) {
8263 irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
8264 cpu = cpumask_next(cpu, cpu_online_mask);
8268 /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
8269 static void hpsa_free_irqs(struct ctlr_info *h)
8273 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
8274 /* Single reply queue, only one irq to free */
8276 irq_set_affinity_hint(h->intr[i], NULL);
8277 free_irq(h->intr[i], &h->q[i]);
8282 for (i = 0; i < h->msix_vector; i++) {
8283 irq_set_affinity_hint(h->intr[i], NULL);
8284 free_irq(h->intr[i], &h->q[i]);
8287 for (; i < MAX_REPLY_QUEUES; i++)
8291 /* returns 0 on success; cleans up and returns -Enn on error */
8292 static int hpsa_request_irqs(struct ctlr_info *h,
8293 irqreturn_t (*msixhandler)(int, void *),
8294 irqreturn_t (*intxhandler)(int, void *))
8299 * initialize h->q[x] = x so that interrupt handlers know which
8302 for (i = 0; i < MAX_REPLY_QUEUES; i++)
8305 if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
8306 /* If performant mode and MSI-X, use multiple reply queues */
8307 for (i = 0; i < h->msix_vector; i++) {
8308 sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
8309 rc = request_irq(h->intr[i], msixhandler,
8315 dev_err(&h->pdev->dev,
8316 "failed to get irq %d for %s\n",
8317 h->intr[i], h->devname);
8318 for (j = 0; j < i; j++) {
8319 free_irq(h->intr[j], &h->q[j]);
8322 for (; j < MAX_REPLY_QUEUES; j++)
8327 hpsa_irq_affinity_hints(h);
8329 /* Use single reply pool */
8330 if (h->msix_vector > 0 || h->msi_vector) {
8332 sprintf(h->intrname[h->intr_mode],
8333 "%s-msix", h->devname);
8335 sprintf(h->intrname[h->intr_mode],
8336 "%s-msi", h->devname);
8337 rc = request_irq(h->intr[h->intr_mode],
8339 h->intrname[h->intr_mode],
8340 &h->q[h->intr_mode]);
8342 sprintf(h->intrname[h->intr_mode],
8343 "%s-intx", h->devname);
8344 rc = request_irq(h->intr[h->intr_mode],
8345 intxhandler, IRQF_SHARED,
8346 h->intrname[h->intr_mode],
8347 &h->q[h->intr_mode]);
8349 irq_set_affinity_hint(h->intr[h->intr_mode], NULL);
8352 dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
8353 h->intr[h->intr_mode], h->devname);
8360 static int hpsa_kdump_soft_reset(struct ctlr_info *h)
8363 hpsa_send_host_reset(h, RAID_CTLR_LUNID, HPSA_RESET_TYPE_CONTROLLER);
8365 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
8366 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY);
8368 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
8372 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
8373 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
8375 dev_warn(&h->pdev->dev, "Board failed to become ready "
8376 "after soft reset.\n");
8383 static void hpsa_free_reply_queues(struct ctlr_info *h)
8387 for (i = 0; i < h->nreply_queues; i++) {
8388 if (!h->reply_queue[i].head)
8390 pci_free_consistent(h->pdev,
8391 h->reply_queue_size,
8392 h->reply_queue[i].head,
8393 h->reply_queue[i].busaddr);
8394 h->reply_queue[i].head = NULL;
8395 h->reply_queue[i].busaddr = 0;
8397 h->reply_queue_size = 0;
8400 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
8402 hpsa_free_performant_mode(h); /* init_one 7 */
8403 hpsa_free_sg_chain_blocks(h); /* init_one 6 */
8404 hpsa_free_cmd_pool(h); /* init_one 5 */
8405 hpsa_free_irqs(h); /* init_one 4 */
8406 scsi_host_put(h->scsi_host); /* init_one 3 */
8407 h->scsi_host = NULL; /* init_one 3 */
8408 hpsa_free_pci_init(h); /* init_one 2_5 */
8409 free_percpu(h->lockup_detected); /* init_one 2 */
8410 h->lockup_detected = NULL; /* init_one 2 */
8411 if (h->resubmit_wq) {
8412 destroy_workqueue(h->resubmit_wq); /* init_one 1 */
8413 h->resubmit_wq = NULL;
8415 if (h->rescan_ctlr_wq) {
8416 destroy_workqueue(h->rescan_ctlr_wq);
8417 h->rescan_ctlr_wq = NULL;
8419 kfree(h); /* init_one 1 */
8422 /* Called when controller lockup detected. */
8423 static void fail_all_outstanding_cmds(struct ctlr_info *h)
8426 struct CommandList *c;
8429 flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
8430 for (i = 0; i < h->nr_cmds; i++) {
8431 c = h->cmd_pool + i;
8432 refcount = atomic_inc_return(&c->refcount);
8434 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
8436 atomic_dec(&h->commands_outstanding);
8441 dev_warn(&h->pdev->dev,
8442 "failed %d commands in fail_all\n", failcount);
8445 static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
8449 for_each_online_cpu(cpu) {
8450 u32 *lockup_detected;
8451 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
8452 *lockup_detected = value;
8454 wmb(); /* be sure the per-cpu variables are out to memory */
8457 static void controller_lockup_detected(struct ctlr_info *h)
8459 unsigned long flags;
8460 u32 lockup_detected;
8462 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8463 spin_lock_irqsave(&h->lock, flags);
8464 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
8465 if (!lockup_detected) {
8466 /* no heartbeat, but controller gave us a zero. */
8467 dev_warn(&h->pdev->dev,
8468 "lockup detected after %d but scratchpad register is zero\n",
8469 h->heartbeat_sample_interval / HZ);
8470 lockup_detected = 0xffffffff;
8472 set_lockup_detected_for_all_cpus(h, lockup_detected);
8473 spin_unlock_irqrestore(&h->lock, flags);
8474 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
8475 lockup_detected, h->heartbeat_sample_interval / HZ);
8476 pci_disable_device(h->pdev);
8477 fail_all_outstanding_cmds(h);
8480 static int detect_controller_lockup(struct ctlr_info *h)
8484 unsigned long flags;
8486 now = get_jiffies_64();
8487 /* If we've received an interrupt recently, we're ok. */
8488 if (time_after64(h->last_intr_timestamp +
8489 (h->heartbeat_sample_interval), now))
8493 * If we've already checked the heartbeat recently, we're ok.
8494 * This could happen if someone sends us a signal. We
8495 * otherwise don't care about signals in this thread.
8497 if (time_after64(h->last_heartbeat_timestamp +
8498 (h->heartbeat_sample_interval), now))
8501 /* If heartbeat has not changed since we last looked, we're not ok. */
8502 spin_lock_irqsave(&h->lock, flags);
8503 heartbeat = readl(&h->cfgtable->HeartBeat);
8504 spin_unlock_irqrestore(&h->lock, flags);
8505 if (h->last_heartbeat == heartbeat) {
8506 controller_lockup_detected(h);
8511 h->last_heartbeat = heartbeat;
8512 h->last_heartbeat_timestamp = now;
8516 static void hpsa_ack_ctlr_events(struct ctlr_info *h)
8521 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
8524 /* Ask the controller to clear the events we're handling. */
8525 if ((h->transMethod & (CFGTBL_Trans_io_accel1
8526 | CFGTBL_Trans_io_accel2)) &&
8527 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
8528 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
8530 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
8531 event_type = "state change";
8532 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
8533 event_type = "configuration change";
8534 /* Stop sending new RAID offload reqs via the IO accelerator */
8535 scsi_block_requests(h->scsi_host);
8536 for (i = 0; i < h->ndevices; i++) {
8537 h->dev[i]->offload_enabled = 0;
8538 h->dev[i]->offload_to_be_enabled = 0;
8540 hpsa_drain_accel_commands(h);
8541 /* Set 'accelerator path config change' bit */
8542 dev_warn(&h->pdev->dev,
8543 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
8544 h->events, event_type);
8545 writel(h->events, &(h->cfgtable->clear_event_notify));
8546 /* Set the "clear event notify field update" bit 6 */
8547 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
8548 /* Wait until ctlr clears 'clear event notify field', bit 6 */
8549 hpsa_wait_for_clear_event_notify_ack(h);
8550 scsi_unblock_requests(h->scsi_host);
8552 /* Acknowledge controller notification events. */
8553 writel(h->events, &(h->cfgtable->clear_event_notify));
8554 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
8555 hpsa_wait_for_clear_event_notify_ack(h);
8557 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
8558 hpsa_wait_for_mode_change_ack(h);
8564 /* Check a register on the controller to see if there are configuration
8565 * changes (added/changed/removed logical drives, etc.) which mean that
8566 * we should rescan the controller for devices.
8567 * Also check flag for driver-initiated rescan.
8569 static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
8571 if (h->drv_req_rescan) {
8572 h->drv_req_rescan = 0;
8576 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
8579 h->events = readl(&(h->cfgtable->event_notify));
8580 return h->events & RESCAN_REQUIRED_EVENT_BITS;
8584 * Check if any of the offline devices have become ready
8586 static int hpsa_offline_devices_ready(struct ctlr_info *h)
8588 unsigned long flags;
8589 struct offline_device_entry *d;
8590 struct list_head *this, *tmp;
8592 spin_lock_irqsave(&h->offline_device_lock, flags);
8593 list_for_each_safe(this, tmp, &h->offline_device_list) {
8594 d = list_entry(this, struct offline_device_entry,
8596 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8597 if (!hpsa_volume_offline(h, d->scsi3addr)) {
8598 spin_lock_irqsave(&h->offline_device_lock, flags);
8599 list_del(&d->offline_list);
8600 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8603 spin_lock_irqsave(&h->offline_device_lock, flags);
8605 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8609 static int hpsa_luns_changed(struct ctlr_info *h)
8611 int rc = 1; /* assume there are changes */
8612 struct ReportLUNdata *logdev = NULL;
8614 /* if we can't find out if lun data has changed,
8615 * assume that it has.
8618 if (!h->lastlogicals)
8621 logdev = kzalloc(sizeof(*logdev), GFP_KERNEL);
8623 dev_warn(&h->pdev->dev,
8624 "Out of memory, can't track lun changes.\n");
8627 if (hpsa_scsi_do_report_luns(h, 1, logdev, sizeof(*logdev), 0)) {
8628 dev_warn(&h->pdev->dev,
8629 "report luns failed, can't track lun changes.\n");
8632 if (memcmp(logdev, h->lastlogicals, sizeof(*logdev))) {
8633 dev_info(&h->pdev->dev,
8634 "Lun changes detected.\n");
8635 memcpy(h->lastlogicals, logdev, sizeof(*logdev));
8638 rc = 0; /* no changes detected. */
8644 static void hpsa_rescan_ctlr_worker(struct work_struct *work)
8646 unsigned long flags;
8647 struct ctlr_info *h = container_of(to_delayed_work(work),
8648 struct ctlr_info, rescan_ctlr_work);
8651 if (h->remove_in_progress)
8654 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
8655 scsi_host_get(h->scsi_host);
8656 hpsa_ack_ctlr_events(h);
8657 hpsa_scan_start(h->scsi_host);
8658 scsi_host_put(h->scsi_host);
8659 } else if (h->discovery_polling) {
8660 hpsa_disable_rld_caching(h);
8661 if (hpsa_luns_changed(h)) {
8662 struct Scsi_Host *sh = NULL;
8664 dev_info(&h->pdev->dev,
8665 "driver discovery polling rescan.\n");
8666 sh = scsi_host_get(h->scsi_host);
8668 hpsa_scan_start(sh);
8673 spin_lock_irqsave(&h->lock, flags);
8674 if (!h->remove_in_progress)
8675 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8676 h->heartbeat_sample_interval);
8677 spin_unlock_irqrestore(&h->lock, flags);
8680 static void hpsa_monitor_ctlr_worker(struct work_struct *work)
8682 unsigned long flags;
8683 struct ctlr_info *h = container_of(to_delayed_work(work),
8684 struct ctlr_info, monitor_ctlr_work);
8686 detect_controller_lockup(h);
8687 if (lockup_detected(h))
8690 spin_lock_irqsave(&h->lock, flags);
8691 if (!h->remove_in_progress)
8692 schedule_delayed_work(&h->monitor_ctlr_work,
8693 h->heartbeat_sample_interval);
8694 spin_unlock_irqrestore(&h->lock, flags);
8697 static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
8700 struct workqueue_struct *wq = NULL;
8702 wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
8704 dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
8709 static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8712 struct ctlr_info *h;
8713 int try_soft_reset = 0;
8714 unsigned long flags;
8717 if (number_of_controllers == 0)
8718 printk(KERN_INFO DRIVER_NAME "\n");
8720 rc = hpsa_lookup_board_id(pdev, &board_id);
8722 dev_warn(&pdev->dev, "Board ID not found\n");
8726 rc = hpsa_init_reset_devices(pdev, board_id);
8728 if (rc != -ENOTSUPP)
8730 /* If the reset fails in a particular way (it has no way to do
8731 * a proper hard reset, so returns -ENOTSUPP) we can try to do
8732 * a soft reset once we get the controller configured up to the
8733 * point that it can accept a command.
8739 reinit_after_soft_reset:
8741 /* Command structures must be aligned on a 32-byte boundary because
8742 * the 5 lower bits of the address are used by the hardware. and by
8743 * the driver. See comments in hpsa.h for more info.
8745 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
8746 h = kzalloc(sizeof(*h), GFP_KERNEL);
8748 dev_err(&pdev->dev, "Failed to allocate controller head\n");
8754 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
8755 INIT_LIST_HEAD(&h->offline_device_list);
8756 spin_lock_init(&h->lock);
8757 spin_lock_init(&h->offline_device_lock);
8758 spin_lock_init(&h->scan_lock);
8759 atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
8760 atomic_set(&h->abort_cmds_available, HPSA_CMDS_RESERVED_FOR_ABORTS);
8762 /* Allocate and clear per-cpu variable lockup_detected */
8763 h->lockup_detected = alloc_percpu(u32);
8764 if (!h->lockup_detected) {
8765 dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n");
8767 goto clean1; /* aer/h */
8769 set_lockup_detected_for_all_cpus(h, 0);
8771 rc = hpsa_pci_init(h);
8773 goto clean2; /* lu, aer/h */
8775 /* relies on h-> settings made by hpsa_pci_init, including
8776 * interrupt_mode h->intr */
8777 rc = hpsa_scsi_host_alloc(h);
8779 goto clean2_5; /* pci, lu, aer/h */
8781 sprintf(h->devname, HPSA "%d", h->scsi_host->host_no);
8782 h->ctlr = number_of_controllers;
8783 number_of_controllers++;
8785 /* configure PCI DMA stuff */
8786 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8790 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8794 dev_err(&pdev->dev, "no suitable DMA available\n");
8795 goto clean3; /* shost, pci, lu, aer/h */
8799 /* make sure the board interrupts are off */
8800 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8802 rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx);
8804 goto clean3; /* shost, pci, lu, aer/h */
8805 rc = hpsa_alloc_cmd_pool(h);
8807 goto clean4; /* irq, shost, pci, lu, aer/h */
8808 rc = hpsa_alloc_sg_chain_blocks(h);
8810 goto clean5; /* cmd, irq, shost, pci, lu, aer/h */
8811 init_waitqueue_head(&h->scan_wait_queue);
8812 init_waitqueue_head(&h->abort_cmd_wait_queue);
8813 init_waitqueue_head(&h->event_sync_wait_queue);
8814 mutex_init(&h->reset_mutex);
8815 h->scan_finished = 1; /* no scan currently in progress */
8816 h->scan_waiting = 0;
8818 pci_set_drvdata(pdev, h);
8821 spin_lock_init(&h->devlock);
8822 rc = hpsa_put_ctlr_into_performant_mode(h);
8824 goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */
8826 /* create the resubmit workqueue */
8827 h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
8828 if (!h->rescan_ctlr_wq) {
8833 h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
8834 if (!h->resubmit_wq) {
8836 goto clean7; /* aer/h */
8840 * At this point, the controller is ready to take commands.
8841 * Now, if reset_devices and the hard reset didn't work, try
8842 * the soft reset and see if that works.
8844 if (try_soft_reset) {
8846 /* This is kind of gross. We may or may not get a completion
8847 * from the soft reset command, and if we do, then the value
8848 * from the fifo may or may not be valid. So, we wait 10 secs
8849 * after the reset throwing away any completions we get during
8850 * that time. Unregister the interrupt handler and register
8851 * fake ones to scoop up any residual completions.
8853 spin_lock_irqsave(&h->lock, flags);
8854 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8855 spin_unlock_irqrestore(&h->lock, flags);
8857 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
8858 hpsa_intx_discard_completions);
8860 dev_warn(&h->pdev->dev,
8861 "Failed to request_irq after soft reset.\n");
8863 * cannot goto clean7 or free_irqs will be called
8864 * again. Instead, do its work
8866 hpsa_free_performant_mode(h); /* clean7 */
8867 hpsa_free_sg_chain_blocks(h); /* clean6 */
8868 hpsa_free_cmd_pool(h); /* clean5 */
8870 * skip hpsa_free_irqs(h) clean4 since that
8871 * was just called before request_irqs failed
8876 rc = hpsa_kdump_soft_reset(h);
8878 /* Neither hard nor soft reset worked, we're hosed. */
8881 dev_info(&h->pdev->dev, "Board READY.\n");
8882 dev_info(&h->pdev->dev,
8883 "Waiting for stale completions to drain.\n");
8884 h->access.set_intr_mask(h, HPSA_INTR_ON);
8886 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8888 rc = controller_reset_failed(h->cfgtable);
8890 dev_info(&h->pdev->dev,
8891 "Soft reset appears to have failed.\n");
8893 /* since the controller's reset, we have to go back and re-init
8894 * everything. Easiest to just forget what we've done and do it
8897 hpsa_undo_allocations_after_kdump_soft_reset(h);
8900 /* don't goto clean, we already unallocated */
8903 goto reinit_after_soft_reset;
8906 /* Enable Accelerated IO path at driver layer */
8907 h->acciopath_status = 1;
8908 /* Disable discovery polling.*/
8909 h->discovery_polling = 0;
8912 /* Turn the interrupts on so we can service requests */
8913 h->access.set_intr_mask(h, HPSA_INTR_ON);
8915 hpsa_hba_inquiry(h);
8917 h->lastlogicals = kzalloc(sizeof(*(h->lastlogicals)), GFP_KERNEL);
8918 if (!h->lastlogicals)
8919 dev_info(&h->pdev->dev,
8920 "Can't track change to report lun data\n");
8922 /* hook into SCSI subsystem */
8923 rc = hpsa_scsi_add_host(h);
8925 goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
8927 /* Monitor the controller for firmware lockups */
8928 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
8929 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
8930 schedule_delayed_work(&h->monitor_ctlr_work,
8931 h->heartbeat_sample_interval);
8932 INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
8933 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8934 h->heartbeat_sample_interval);
8937 clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
8938 hpsa_free_performant_mode(h);
8939 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8940 clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */
8941 hpsa_free_sg_chain_blocks(h);
8942 clean5: /* cmd, irq, shost, pci, lu, aer/h */
8943 hpsa_free_cmd_pool(h);
8944 clean4: /* irq, shost, pci, lu, aer/h */
8946 clean3: /* shost, pci, lu, aer/h */
8947 scsi_host_put(h->scsi_host);
8948 h->scsi_host = NULL;
8949 clean2_5: /* pci, lu, aer/h */
8950 hpsa_free_pci_init(h);
8951 clean2: /* lu, aer/h */
8952 if (h->lockup_detected) {
8953 free_percpu(h->lockup_detected);
8954 h->lockup_detected = NULL;
8956 clean1: /* wq/aer/h */
8957 if (h->resubmit_wq) {
8958 destroy_workqueue(h->resubmit_wq);
8959 h->resubmit_wq = NULL;
8961 if (h->rescan_ctlr_wq) {
8962 destroy_workqueue(h->rescan_ctlr_wq);
8963 h->rescan_ctlr_wq = NULL;
8969 static void hpsa_flush_cache(struct ctlr_info *h)
8972 struct CommandList *c;
8975 if (unlikely(lockup_detected(h)))
8977 flush_buf = kzalloc(4, GFP_KERNEL);
8983 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
8984 RAID_CTLR_LUNID, TYPE_CMD)) {
8987 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
8988 PCI_DMA_TODEVICE, DEFAULT_TIMEOUT);
8991 if (c->err_info->CommandStatus != 0)
8993 dev_warn(&h->pdev->dev,
8994 "error flushing cache on controller\n");
8999 /* Make controller gather fresh report lun data each time we
9000 * send down a report luns request
9002 static void hpsa_disable_rld_caching(struct ctlr_info *h)
9005 struct CommandList *c;
9008 /* Don't bother trying to set diag options if locked up */
9009 if (unlikely(h->lockup_detected))
9012 options = kzalloc(sizeof(*options), GFP_KERNEL);
9014 dev_err(&h->pdev->dev,
9015 "Error: failed to disable rld caching, during alloc.\n");
9021 /* first, get the current diag options settings */
9022 if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
9023 RAID_CTLR_LUNID, TYPE_CMD))
9026 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
9027 PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
9028 if ((rc != 0) || (c->err_info->CommandStatus != 0))
9031 /* Now, set the bit for disabling the RLD caching */
9032 *options |= HPSA_DIAG_OPTS_DISABLE_RLD_CACHING;
9034 if (fill_cmd(c, BMIC_SET_DIAG_OPTIONS, h, options, 4, 0,
9035 RAID_CTLR_LUNID, TYPE_CMD))
9038 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
9039 PCI_DMA_TODEVICE, DEFAULT_TIMEOUT);
9040 if ((rc != 0) || (c->err_info->CommandStatus != 0))
9043 /* Now verify that it got set: */
9044 if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
9045 RAID_CTLR_LUNID, TYPE_CMD))
9048 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
9049 PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
9050 if ((rc != 0) || (c->err_info->CommandStatus != 0))
9053 if (*options & HPSA_DIAG_OPTS_DISABLE_RLD_CACHING)
9057 dev_err(&h->pdev->dev,
9058 "Error: failed to disable report lun data caching.\n");
9064 static void hpsa_shutdown(struct pci_dev *pdev)
9066 struct ctlr_info *h;
9068 h = pci_get_drvdata(pdev);
9069 /* Turn board interrupts off and send the flush cache command
9070 * sendcmd will turn off interrupt, and send the flush...
9071 * To write all data in the battery backed cache to disks
9073 hpsa_flush_cache(h);
9074 h->access.set_intr_mask(h, HPSA_INTR_OFF);
9075 hpsa_free_irqs(h); /* init_one 4 */
9076 hpsa_disable_interrupt_mode(h); /* pci_init 2 */
9079 static void hpsa_free_device_info(struct ctlr_info *h)
9083 for (i = 0; i < h->ndevices; i++) {
9089 static void hpsa_remove_one(struct pci_dev *pdev)
9091 struct ctlr_info *h;
9092 unsigned long flags;
9094 if (pci_get_drvdata(pdev) == NULL) {
9095 dev_err(&pdev->dev, "unable to remove device\n");
9098 h = pci_get_drvdata(pdev);
9100 /* Get rid of any controller monitoring work items */
9101 spin_lock_irqsave(&h->lock, flags);
9102 h->remove_in_progress = 1;
9103 spin_unlock_irqrestore(&h->lock, flags);
9104 cancel_delayed_work_sync(&h->monitor_ctlr_work);
9105 cancel_delayed_work_sync(&h->rescan_ctlr_work);
9106 destroy_workqueue(h->rescan_ctlr_wq);
9107 destroy_workqueue(h->resubmit_wq);
9109 hpsa_delete_sas_host(h);
9112 * Call before disabling interrupts.
9113 * scsi_remove_host can trigger I/O operations especially
9114 * when multipath is enabled. There can be SYNCHRONIZE CACHE
9115 * operations which cannot complete and will hang the system.
9118 scsi_remove_host(h->scsi_host); /* init_one 8 */
9119 /* includes hpsa_free_irqs - init_one 4 */
9120 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
9121 hpsa_shutdown(pdev);
9123 hpsa_free_device_info(h); /* scan */
9125 kfree(h->hba_inquiry_data); /* init_one 10 */
9126 h->hba_inquiry_data = NULL; /* init_one 10 */
9127 hpsa_free_ioaccel2_sg_chain_blocks(h);
9128 hpsa_free_performant_mode(h); /* init_one 7 */
9129 hpsa_free_sg_chain_blocks(h); /* init_one 6 */
9130 hpsa_free_cmd_pool(h); /* init_one 5 */
9131 kfree(h->lastlogicals);
9133 /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */
9135 scsi_host_put(h->scsi_host); /* init_one 3 */
9136 h->scsi_host = NULL; /* init_one 3 */
9138 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
9139 hpsa_free_pci_init(h); /* init_one 2.5 */
9141 free_percpu(h->lockup_detected); /* init_one 2 */
9142 h->lockup_detected = NULL; /* init_one 2 */
9143 /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */
9145 kfree(h); /* init_one 1 */
9148 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
9149 __attribute__((unused)) pm_message_t state)
9154 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
9159 static struct pci_driver hpsa_pci_driver = {
9161 .probe = hpsa_init_one,
9162 .remove = hpsa_remove_one,
9163 .id_table = hpsa_pci_device_id, /* id_table */
9164 .shutdown = hpsa_shutdown,
9165 .suspend = hpsa_suspend,
9166 .resume = hpsa_resume,
9169 /* Fill in bucket_map[], given nsgs (the max number of
9170 * scatter gather elements supported) and bucket[],
9171 * which is an array of 8 integers. The bucket[] array
9172 * contains 8 different DMA transfer sizes (in 16
9173 * byte increments) which the controller uses to fetch
9174 * commands. This function fills in bucket_map[], which
9175 * maps a given number of scatter gather elements to one of
9176 * the 8 DMA transfer sizes. The point of it is to allow the
9177 * controller to only do as much DMA as needed to fetch the
9178 * command, with the DMA transfer size encoded in the lower
9179 * bits of the command address.
9181 static void calc_bucket_map(int bucket[], int num_buckets,
9182 int nsgs, int min_blocks, u32 *bucket_map)
9186 /* Note, bucket_map must have nsgs+1 entries. */
9187 for (i = 0; i <= nsgs; i++) {
9188 /* Compute size of a command with i SG entries */
9189 size = i + min_blocks;
9190 b = num_buckets; /* Assume the biggest bucket */
9191 /* Find the bucket that is just big enough */
9192 for (j = 0; j < num_buckets; j++) {
9193 if (bucket[j] >= size) {
9198 /* for a command with i SG entries, use bucket b. */
9204 * return -ENODEV on err, 0 on success (or no action)
9205 * allocates numerous items that must be freed later
9207 static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
9210 unsigned long register_value;
9211 unsigned long transMethod = CFGTBL_Trans_Performant |
9212 (trans_support & CFGTBL_Trans_use_short_tags) |
9213 CFGTBL_Trans_enable_directed_msix |
9214 (trans_support & (CFGTBL_Trans_io_accel1 |
9215 CFGTBL_Trans_io_accel2));
9216 struct access_method access = SA5_performant_access;
9218 /* This is a bit complicated. There are 8 registers on
9219 * the controller which we write to to tell it 8 different
9220 * sizes of commands which there may be. It's a way of
9221 * reducing the DMA done to fetch each command. Encoded into
9222 * each command's tag are 3 bits which communicate to the controller
9223 * which of the eight sizes that command fits within. The size of
9224 * each command depends on how many scatter gather entries there are.
9225 * Each SG entry requires 16 bytes. The eight registers are programmed
9226 * with the number of 16-byte blocks a command of that size requires.
9227 * The smallest command possible requires 5 such 16 byte blocks.
9228 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
9229 * blocks. Note, this only extends to the SG entries contained
9230 * within the command block, and does not extend to chained blocks
9231 * of SG elements. bft[] contains the eight values we write to
9232 * the registers. They are not evenly distributed, but have more
9233 * sizes for small commands, and fewer sizes for larger commands.
9235 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
9236 #define MIN_IOACCEL2_BFT_ENTRY 5
9237 #define HPSA_IOACCEL2_HEADER_SZ 4
9238 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
9239 13, 14, 15, 16, 17, 18, 19,
9240 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
9241 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
9242 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
9243 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
9244 16 * MIN_IOACCEL2_BFT_ENTRY);
9245 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
9246 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
9247 /* 5 = 1 s/g entry or 4k
9248 * 6 = 2 s/g entry or 8k
9249 * 8 = 4 s/g entry or 16k
9250 * 10 = 6 s/g entry or 24k
9253 /* If the controller supports either ioaccel method then
9254 * we can also use the RAID stack submit path that does not
9255 * perform the superfluous readl() after each command submission.
9257 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
9258 access = SA5_performant_access_no_read;
9260 /* Controller spec: zero out this buffer. */
9261 for (i = 0; i < h->nreply_queues; i++)
9262 memset(h->reply_queue[i].head, 0, h->reply_queue_size);
9264 bft[7] = SG_ENTRIES_IN_CMD + 4;
9265 calc_bucket_map(bft, ARRAY_SIZE(bft),
9266 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
9267 for (i = 0; i < 8; i++)
9268 writel(bft[i], &h->transtable->BlockFetch[i]);
9270 /* size of controller ring buffer */
9271 writel(h->max_commands, &h->transtable->RepQSize);
9272 writel(h->nreply_queues, &h->transtable->RepQCount);
9273 writel(0, &h->transtable->RepQCtrAddrLow32);
9274 writel(0, &h->transtable->RepQCtrAddrHigh32);
9276 for (i = 0; i < h->nreply_queues; i++) {
9277 writel(0, &h->transtable->RepQAddr[i].upper);
9278 writel(h->reply_queue[i].busaddr,
9279 &h->transtable->RepQAddr[i].lower);
9282 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
9283 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
9285 * enable outbound interrupt coalescing in accelerator mode;
9287 if (trans_support & CFGTBL_Trans_io_accel1) {
9288 access = SA5_ioaccel_mode1_access;
9289 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
9290 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
9292 if (trans_support & CFGTBL_Trans_io_accel2) {
9293 access = SA5_ioaccel_mode2_access;
9294 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
9295 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
9298 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
9299 if (hpsa_wait_for_mode_change_ack(h)) {
9300 dev_err(&h->pdev->dev,
9301 "performant mode problem - doorbell timeout\n");
9304 register_value = readl(&(h->cfgtable->TransportActive));
9305 if (!(register_value & CFGTBL_Trans_Performant)) {
9306 dev_err(&h->pdev->dev,
9307 "performant mode problem - transport not active\n");
9310 /* Change the access methods to the performant access methods */
9312 h->transMethod = transMethod;
9314 if (!((trans_support & CFGTBL_Trans_io_accel1) ||
9315 (trans_support & CFGTBL_Trans_io_accel2)))
9318 if (trans_support & CFGTBL_Trans_io_accel1) {
9319 /* Set up I/O accelerator mode */
9320 for (i = 0; i < h->nreply_queues; i++) {
9321 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
9322 h->reply_queue[i].current_entry =
9323 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
9325 bft[7] = h->ioaccel_maxsg + 8;
9326 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
9327 h->ioaccel1_blockFetchTable);
9329 /* initialize all reply queue entries to unused */
9330 for (i = 0; i < h->nreply_queues; i++)
9331 memset(h->reply_queue[i].head,
9332 (u8) IOACCEL_MODE1_REPLY_UNUSED,
9333 h->reply_queue_size);
9335 /* set all the constant fields in the accelerator command
9336 * frames once at init time to save CPU cycles later.
9338 for (i = 0; i < h->nr_cmds; i++) {
9339 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
9341 cp->function = IOACCEL1_FUNCTION_SCSIIO;
9342 cp->err_info = (u32) (h->errinfo_pool_dhandle +
9343 (i * sizeof(struct ErrorInfo)));
9344 cp->err_info_len = sizeof(struct ErrorInfo);
9345 cp->sgl_offset = IOACCEL1_SGLOFFSET;
9346 cp->host_context_flags =
9347 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
9348 cp->timeout_sec = 0;
9351 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
9353 cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
9354 (i * sizeof(struct io_accel1_cmd)));
9356 } else if (trans_support & CFGTBL_Trans_io_accel2) {
9357 u64 cfg_offset, cfg_base_addr_index;
9358 u32 bft2_offset, cfg_base_addr;
9361 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
9362 &cfg_base_addr_index, &cfg_offset);
9363 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
9364 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
9365 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
9366 4, h->ioaccel2_blockFetchTable);
9367 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
9368 BUILD_BUG_ON(offsetof(struct CfgTable,
9369 io_accel_request_size_offset) != 0xb8);
9370 h->ioaccel2_bft2_regs =
9371 remap_pci_mem(pci_resource_start(h->pdev,
9372 cfg_base_addr_index) +
9373 cfg_offset + bft2_offset,
9375 sizeof(*h->ioaccel2_bft2_regs));
9376 for (i = 0; i < ARRAY_SIZE(bft2); i++)
9377 writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
9379 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
9380 if (hpsa_wait_for_mode_change_ack(h)) {
9381 dev_err(&h->pdev->dev,
9382 "performant mode problem - enabling ioaccel mode\n");
9388 /* Free ioaccel1 mode command blocks and block fetch table */
9389 static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
9391 if (h->ioaccel_cmd_pool) {
9392 pci_free_consistent(h->pdev,
9393 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
9394 h->ioaccel_cmd_pool,
9395 h->ioaccel_cmd_pool_dhandle);
9396 h->ioaccel_cmd_pool = NULL;
9397 h->ioaccel_cmd_pool_dhandle = 0;
9399 kfree(h->ioaccel1_blockFetchTable);
9400 h->ioaccel1_blockFetchTable = NULL;
9403 /* Allocate ioaccel1 mode command blocks and block fetch table */
9404 static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
9407 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
9408 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
9409 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
9411 /* Command structures must be aligned on a 128-byte boundary
9412 * because the 7 lower bits of the address are used by the
9415 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
9416 IOACCEL1_COMMANDLIST_ALIGNMENT);
9417 h->ioaccel_cmd_pool =
9418 pci_alloc_consistent(h->pdev,
9419 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
9420 &(h->ioaccel_cmd_pool_dhandle));
9422 h->ioaccel1_blockFetchTable =
9423 kmalloc(((h->ioaccel_maxsg + 1) *
9424 sizeof(u32)), GFP_KERNEL);
9426 if ((h->ioaccel_cmd_pool == NULL) ||
9427 (h->ioaccel1_blockFetchTable == NULL))
9430 memset(h->ioaccel_cmd_pool, 0,
9431 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
9435 hpsa_free_ioaccel1_cmd_and_bft(h);
9439 /* Free ioaccel2 mode command blocks and block fetch table */
9440 static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
9442 hpsa_free_ioaccel2_sg_chain_blocks(h);
9444 if (h->ioaccel2_cmd_pool) {
9445 pci_free_consistent(h->pdev,
9446 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
9447 h->ioaccel2_cmd_pool,
9448 h->ioaccel2_cmd_pool_dhandle);
9449 h->ioaccel2_cmd_pool = NULL;
9450 h->ioaccel2_cmd_pool_dhandle = 0;
9452 kfree(h->ioaccel2_blockFetchTable);
9453 h->ioaccel2_blockFetchTable = NULL;
9456 /* Allocate ioaccel2 mode command blocks and block fetch table */
9457 static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
9461 /* Allocate ioaccel2 mode command blocks and block fetch table */
9464 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
9465 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
9466 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
9468 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
9469 IOACCEL2_COMMANDLIST_ALIGNMENT);
9470 h->ioaccel2_cmd_pool =
9471 pci_alloc_consistent(h->pdev,
9472 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
9473 &(h->ioaccel2_cmd_pool_dhandle));
9475 h->ioaccel2_blockFetchTable =
9476 kmalloc(((h->ioaccel_maxsg + 1) *
9477 sizeof(u32)), GFP_KERNEL);
9479 if ((h->ioaccel2_cmd_pool == NULL) ||
9480 (h->ioaccel2_blockFetchTable == NULL)) {
9485 rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h);
9489 memset(h->ioaccel2_cmd_pool, 0,
9490 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
9494 hpsa_free_ioaccel2_cmd_and_bft(h);
9498 /* Free items allocated by hpsa_put_ctlr_into_performant_mode */
9499 static void hpsa_free_performant_mode(struct ctlr_info *h)
9501 kfree(h->blockFetchTable);
9502 h->blockFetchTable = NULL;
9503 hpsa_free_reply_queues(h);
9504 hpsa_free_ioaccel1_cmd_and_bft(h);
9505 hpsa_free_ioaccel2_cmd_and_bft(h);
9508 /* return -ENODEV on error, 0 on success (or no action)
9509 * allocates numerous items that must be freed later
9511 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
9514 unsigned long transMethod = CFGTBL_Trans_Performant |
9515 CFGTBL_Trans_use_short_tags;
9518 if (hpsa_simple_mode)
9521 trans_support = readl(&(h->cfgtable->TransportSupport));
9522 if (!(trans_support & PERFORMANT_MODE))
9525 /* Check for I/O accelerator mode support */
9526 if (trans_support & CFGTBL_Trans_io_accel1) {
9527 transMethod |= CFGTBL_Trans_io_accel1 |
9528 CFGTBL_Trans_enable_directed_msix;
9529 rc = hpsa_alloc_ioaccel1_cmd_and_bft(h);
9532 } else if (trans_support & CFGTBL_Trans_io_accel2) {
9533 transMethod |= CFGTBL_Trans_io_accel2 |
9534 CFGTBL_Trans_enable_directed_msix;
9535 rc = hpsa_alloc_ioaccel2_cmd_and_bft(h);
9540 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
9541 hpsa_get_max_perf_mode_cmds(h);
9542 /* Performant mode ring buffer and supporting data structures */
9543 h->reply_queue_size = h->max_commands * sizeof(u64);
9545 for (i = 0; i < h->nreply_queues; i++) {
9546 h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
9547 h->reply_queue_size,
9548 &(h->reply_queue[i].busaddr));
9549 if (!h->reply_queue[i].head) {
9551 goto clean1; /* rq, ioaccel */
9553 h->reply_queue[i].size = h->max_commands;
9554 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
9555 h->reply_queue[i].current_entry = 0;
9558 /* Need a block fetch table for performant mode */
9559 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
9560 sizeof(u32)), GFP_KERNEL);
9561 if (!h->blockFetchTable) {
9563 goto clean1; /* rq, ioaccel */
9566 rc = hpsa_enter_performant_mode(h, trans_support);
9568 goto clean2; /* bft, rq, ioaccel */
9571 clean2: /* bft, rq, ioaccel */
9572 kfree(h->blockFetchTable);
9573 h->blockFetchTable = NULL;
9574 clean1: /* rq, ioaccel */
9575 hpsa_free_reply_queues(h);
9576 hpsa_free_ioaccel1_cmd_and_bft(h);
9577 hpsa_free_ioaccel2_cmd_and_bft(h);
9581 static int is_accelerated_cmd(struct CommandList *c)
9583 return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
9586 static void hpsa_drain_accel_commands(struct ctlr_info *h)
9588 struct CommandList *c = NULL;
9589 int i, accel_cmds_out;
9592 do { /* wait for all outstanding ioaccel commands to drain out */
9594 for (i = 0; i < h->nr_cmds; i++) {
9595 c = h->cmd_pool + i;
9596 refcount = atomic_inc_return(&c->refcount);
9597 if (refcount > 1) /* Command is allocated */
9598 accel_cmds_out += is_accelerated_cmd(c);
9601 if (accel_cmds_out <= 0)
9607 static struct hpsa_sas_phy *hpsa_alloc_sas_phy(
9608 struct hpsa_sas_port *hpsa_sas_port)
9610 struct hpsa_sas_phy *hpsa_sas_phy;
9611 struct sas_phy *phy;
9613 hpsa_sas_phy = kzalloc(sizeof(*hpsa_sas_phy), GFP_KERNEL);
9617 phy = sas_phy_alloc(hpsa_sas_port->parent_node->parent_dev,
9618 hpsa_sas_port->next_phy_index);
9620 kfree(hpsa_sas_phy);
9624 hpsa_sas_port->next_phy_index++;
9625 hpsa_sas_phy->phy = phy;
9626 hpsa_sas_phy->parent_port = hpsa_sas_port;
9628 return hpsa_sas_phy;
9631 static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy)
9633 struct sas_phy *phy = hpsa_sas_phy->phy;
9635 sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy);
9636 if (hpsa_sas_phy->added_to_port)
9637 list_del(&hpsa_sas_phy->phy_list_entry);
9638 sas_phy_delete(phy);
9639 kfree(hpsa_sas_phy);
9642 static int hpsa_sas_port_add_phy(struct hpsa_sas_phy *hpsa_sas_phy)
9645 struct hpsa_sas_port *hpsa_sas_port;
9646 struct sas_phy *phy;
9647 struct sas_identify *identify;
9649 hpsa_sas_port = hpsa_sas_phy->parent_port;
9650 phy = hpsa_sas_phy->phy;
9652 identify = &phy->identify;
9653 memset(identify, 0, sizeof(*identify));
9654 identify->sas_address = hpsa_sas_port->sas_address;
9655 identify->device_type = SAS_END_DEVICE;
9656 identify->initiator_port_protocols = SAS_PROTOCOL_STP;
9657 identify->target_port_protocols = SAS_PROTOCOL_STP;
9658 phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
9659 phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
9660 phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN;
9661 phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN;
9662 phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
9664 rc = sas_phy_add(hpsa_sas_phy->phy);
9668 sas_port_add_phy(hpsa_sas_port->port, hpsa_sas_phy->phy);
9669 list_add_tail(&hpsa_sas_phy->phy_list_entry,
9670 &hpsa_sas_port->phy_list_head);
9671 hpsa_sas_phy->added_to_port = true;
9677 hpsa_sas_port_add_rphy(struct hpsa_sas_port *hpsa_sas_port,
9678 struct sas_rphy *rphy)
9680 struct sas_identify *identify;
9682 identify = &rphy->identify;
9683 identify->sas_address = hpsa_sas_port->sas_address;
9684 identify->initiator_port_protocols = SAS_PROTOCOL_STP;
9685 identify->target_port_protocols = SAS_PROTOCOL_STP;
9687 return sas_rphy_add(rphy);
9690 static struct hpsa_sas_port
9691 *hpsa_alloc_sas_port(struct hpsa_sas_node *hpsa_sas_node,
9695 struct hpsa_sas_port *hpsa_sas_port;
9696 struct sas_port *port;
9698 hpsa_sas_port = kzalloc(sizeof(*hpsa_sas_port), GFP_KERNEL);
9702 INIT_LIST_HEAD(&hpsa_sas_port->phy_list_head);
9703 hpsa_sas_port->parent_node = hpsa_sas_node;
9705 port = sas_port_alloc_num(hpsa_sas_node->parent_dev);
9707 goto free_hpsa_port;
9709 rc = sas_port_add(port);
9713 hpsa_sas_port->port = port;
9714 hpsa_sas_port->sas_address = sas_address;
9715 list_add_tail(&hpsa_sas_port->port_list_entry,
9716 &hpsa_sas_node->port_list_head);
9718 return hpsa_sas_port;
9721 sas_port_free(port);
9723 kfree(hpsa_sas_port);
9728 static void hpsa_free_sas_port(struct hpsa_sas_port *hpsa_sas_port)
9730 struct hpsa_sas_phy *hpsa_sas_phy;
9731 struct hpsa_sas_phy *next;
9733 list_for_each_entry_safe(hpsa_sas_phy, next,
9734 &hpsa_sas_port->phy_list_head, phy_list_entry)
9735 hpsa_free_sas_phy(hpsa_sas_phy);
9737 sas_port_delete(hpsa_sas_port->port);
9738 list_del(&hpsa_sas_port->port_list_entry);
9739 kfree(hpsa_sas_port);
9742 static struct hpsa_sas_node *hpsa_alloc_sas_node(struct device *parent_dev)
9744 struct hpsa_sas_node *hpsa_sas_node;
9746 hpsa_sas_node = kzalloc(sizeof(*hpsa_sas_node), GFP_KERNEL);
9747 if (hpsa_sas_node) {
9748 hpsa_sas_node->parent_dev = parent_dev;
9749 INIT_LIST_HEAD(&hpsa_sas_node->port_list_head);
9752 return hpsa_sas_node;
9755 static void hpsa_free_sas_node(struct hpsa_sas_node *hpsa_sas_node)
9757 struct hpsa_sas_port *hpsa_sas_port;
9758 struct hpsa_sas_port *next;
9763 list_for_each_entry_safe(hpsa_sas_port, next,
9764 &hpsa_sas_node->port_list_head, port_list_entry)
9765 hpsa_free_sas_port(hpsa_sas_port);
9767 kfree(hpsa_sas_node);
9770 static struct hpsa_scsi_dev_t
9771 *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
9772 struct sas_rphy *rphy)
9775 struct hpsa_scsi_dev_t *device;
9777 for (i = 0; i < h->ndevices; i++) {
9779 if (!device->sas_port)
9781 if (device->sas_port->rphy == rphy)
9788 static int hpsa_add_sas_host(struct ctlr_info *h)
9791 struct device *parent_dev;
9792 struct hpsa_sas_node *hpsa_sas_node;
9793 struct hpsa_sas_port *hpsa_sas_port;
9794 struct hpsa_sas_phy *hpsa_sas_phy;
9796 parent_dev = &h->scsi_host->shost_gendev;
9798 hpsa_sas_node = hpsa_alloc_sas_node(parent_dev);
9802 hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, h->sas_address);
9803 if (!hpsa_sas_port) {
9808 hpsa_sas_phy = hpsa_alloc_sas_phy(hpsa_sas_port);
9809 if (!hpsa_sas_phy) {
9814 rc = hpsa_sas_port_add_phy(hpsa_sas_phy);
9818 h->sas_host = hpsa_sas_node;
9823 hpsa_free_sas_phy(hpsa_sas_phy);
9825 hpsa_free_sas_port(hpsa_sas_port);
9827 hpsa_free_sas_node(hpsa_sas_node);
9832 static void hpsa_delete_sas_host(struct ctlr_info *h)
9834 hpsa_free_sas_node(h->sas_host);
9837 static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
9838 struct hpsa_scsi_dev_t *device)
9841 struct hpsa_sas_port *hpsa_sas_port;
9842 struct sas_rphy *rphy;
9844 hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, device->sas_address);
9848 rphy = sas_end_device_alloc(hpsa_sas_port->port);
9854 hpsa_sas_port->rphy = rphy;
9855 device->sas_port = hpsa_sas_port;
9857 rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy);
9864 hpsa_free_sas_port(hpsa_sas_port);
9865 device->sas_port = NULL;
9870 static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device)
9872 if (device->sas_port) {
9873 hpsa_free_sas_port(device->sas_port);
9874 device->sas_port = NULL;
9879 hpsa_sas_get_linkerrors(struct sas_phy *phy)
9885 hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
9892 hpsa_sas_get_bay_identifier(struct sas_rphy *rphy)
9898 hpsa_sas_phy_reset(struct sas_phy *phy, int hard_reset)
9904 hpsa_sas_phy_enable(struct sas_phy *phy, int enable)
9910 hpsa_sas_phy_setup(struct sas_phy *phy)
9916 hpsa_sas_phy_release(struct sas_phy *phy)
9921 hpsa_sas_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
9926 /* SMP = Serial Management Protocol */
9928 hpsa_sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
9929 struct request *req)
9934 static struct sas_function_template hpsa_sas_transport_functions = {
9935 .get_linkerrors = hpsa_sas_get_linkerrors,
9936 .get_enclosure_identifier = hpsa_sas_get_enclosure_identifier,
9937 .get_bay_identifier = hpsa_sas_get_bay_identifier,
9938 .phy_reset = hpsa_sas_phy_reset,
9939 .phy_enable = hpsa_sas_phy_enable,
9940 .phy_setup = hpsa_sas_phy_setup,
9941 .phy_release = hpsa_sas_phy_release,
9942 .set_phy_speed = hpsa_sas_phy_speed,
9943 .smp_handler = hpsa_sas_smp_handler,
9947 * This is it. Register the PCI driver information for the cards we control
9948 * the OS will call our registered routines when it finds one of our cards.
9950 static int __init hpsa_init(void)
9954 hpsa_sas_transport_template =
9955 sas_attach_transport(&hpsa_sas_transport_functions);
9956 if (!hpsa_sas_transport_template)
9959 rc = pci_register_driver(&hpsa_pci_driver);
9962 sas_release_transport(hpsa_sas_transport_template);
9967 static void __exit hpsa_cleanup(void)
9969 pci_unregister_driver(&hpsa_pci_driver);
9970 sas_release_transport(hpsa_sas_transport_template);
9973 static void __attribute__((unused)) verify_offsets(void)
9975 #define VERIFY_OFFSET(member, offset) \
9976 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
9978 VERIFY_OFFSET(structure_size, 0);
9979 VERIFY_OFFSET(volume_blk_size, 4);
9980 VERIFY_OFFSET(volume_blk_cnt, 8);
9981 VERIFY_OFFSET(phys_blk_shift, 16);
9982 VERIFY_OFFSET(parity_rotation_shift, 17);
9983 VERIFY_OFFSET(strip_size, 18);
9984 VERIFY_OFFSET(disk_starting_blk, 20);
9985 VERIFY_OFFSET(disk_blk_cnt, 28);
9986 VERIFY_OFFSET(data_disks_per_row, 36);
9987 VERIFY_OFFSET(metadata_disks_per_row, 38);
9988 VERIFY_OFFSET(row_cnt, 40);
9989 VERIFY_OFFSET(layout_map_count, 42);
9990 VERIFY_OFFSET(flags, 44);
9991 VERIFY_OFFSET(dekindex, 46);
9992 /* VERIFY_OFFSET(reserved, 48 */
9993 VERIFY_OFFSET(data, 64);
9995 #undef VERIFY_OFFSET
9997 #define VERIFY_OFFSET(member, offset) \
9998 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
10000 VERIFY_OFFSET(IU_type, 0);
10001 VERIFY_OFFSET(direction, 1);
10002 VERIFY_OFFSET(reply_queue, 2);
10003 /* VERIFY_OFFSET(reserved1, 3); */
10004 VERIFY_OFFSET(scsi_nexus, 4);
10005 VERIFY_OFFSET(Tag, 8);
10006 VERIFY_OFFSET(cdb, 16);
10007 VERIFY_OFFSET(cciss_lun, 32);
10008 VERIFY_OFFSET(data_len, 40);
10009 VERIFY_OFFSET(cmd_priority_task_attr, 44);
10010 VERIFY_OFFSET(sg_count, 45);
10011 /* VERIFY_OFFSET(reserved3 */
10012 VERIFY_OFFSET(err_ptr, 48);
10013 VERIFY_OFFSET(err_len, 56);
10014 /* VERIFY_OFFSET(reserved4 */
10015 VERIFY_OFFSET(sg, 64);
10017 #undef VERIFY_OFFSET
10019 #define VERIFY_OFFSET(member, offset) \
10020 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
10022 VERIFY_OFFSET(dev_handle, 0x00);
10023 VERIFY_OFFSET(reserved1, 0x02);
10024 VERIFY_OFFSET(function, 0x03);
10025 VERIFY_OFFSET(reserved2, 0x04);
10026 VERIFY_OFFSET(err_info, 0x0C);
10027 VERIFY_OFFSET(reserved3, 0x10);
10028 VERIFY_OFFSET(err_info_len, 0x12);
10029 VERIFY_OFFSET(reserved4, 0x13);
10030 VERIFY_OFFSET(sgl_offset, 0x14);
10031 VERIFY_OFFSET(reserved5, 0x15);
10032 VERIFY_OFFSET(transfer_len, 0x1C);
10033 VERIFY_OFFSET(reserved6, 0x20);
10034 VERIFY_OFFSET(io_flags, 0x24);
10035 VERIFY_OFFSET(reserved7, 0x26);
10036 VERIFY_OFFSET(LUN, 0x34);
10037 VERIFY_OFFSET(control, 0x3C);
10038 VERIFY_OFFSET(CDB, 0x40);
10039 VERIFY_OFFSET(reserved8, 0x50);
10040 VERIFY_OFFSET(host_context_flags, 0x60);
10041 VERIFY_OFFSET(timeout_sec, 0x62);
10042 VERIFY_OFFSET(ReplyQueue, 0x64);
10043 VERIFY_OFFSET(reserved9, 0x65);
10044 VERIFY_OFFSET(tag, 0x68);
10045 VERIFY_OFFSET(host_addr, 0x70);
10046 VERIFY_OFFSET(CISS_LUN, 0x78);
10047 VERIFY_OFFSET(SG, 0x78 + 8);
10048 #undef VERIFY_OFFSET
10051 module_init(hpsa_init);
10052 module_exit(hpsa_cleanup);