Linux-libre 4.4.148-gnu
[librecmc/linux-libre.git] / drivers / staging / unisys / visorhba / visorhba_main.c
1 /* Copyright (c) 2012 - 2015 UNISYS CORPORATION
2  * All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or (at
7  * your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12  * NON INFRINGEMENT.  See the GNU General Public License for more
13  * details.
14  */
15
16 #include <linux/debugfs.h>
17 #include <linux/skbuff.h>
18 #include <linux/kthread.h>
19 #include <scsi/scsi.h>
20 #include <scsi/scsi_host.h>
21 #include <scsi/scsi_cmnd.h>
22 #include <scsi/scsi_device.h>
23
24 #include "visorbus.h"
25 #include "iochannel.h"
26
27 /* The Send and Receive Buffers of the IO Queue may both be full */
28
29 #define IOS_ERROR_THRESHOLD     1000
30 /* MAX_BUF = 6 lines x 10 MAXVHBA x 80 characters
31  *         = 4800 bytes ~ 2^13 = 8192 bytes
32  */
33 #define MAX_BUF                 8192
34 #define MAX_PENDING_REQUESTS    (MIN_NUMSIGNALS * 2)
35 #define VISORHBA_ERROR_COUNT    30
36 #define VISORHBA_OPEN_MAX       1
37
38 static int visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
39                                       void (*visorhba_cmnd_done)
40                                             (struct scsi_cmnd *));
41 #ifdef DEF_SCSI_QCMD
42 static DEF_SCSI_QCMD(visorhba_queue_command)
43 #else
44 #define visorhba_queue_command visorhba_queue_command_lck
45 #endif
46 static int visorhba_probe(struct visor_device *dev);
47 static void visorhba_remove(struct visor_device *dev);
48 static int visorhba_pause(struct visor_device *dev,
49                           visorbus_state_complete_func complete_func);
50 static int visorhba_resume(struct visor_device *dev,
51                            visorbus_state_complete_func complete_func);
52
53 static ssize_t info_debugfs_read(struct file *file, char __user *buf,
54                                  size_t len, loff_t *offset);
55 static struct dentry *visorhba_debugfs_dir;
56 static const struct file_operations debugfs_info_fops = {
57         .read = info_debugfs_read,
58 };
59
60 /* GUIDS for HBA channel type supported by this driver */
61 static struct visor_channeltype_descriptor visorhba_channel_types[] = {
62         /* Note that the only channel type we expect to be reported by the
63          * bus driver is the SPAR_VHBA channel.
64          */
65         { SPAR_VHBA_CHANNEL_PROTOCOL_UUID, "sparvhba" },
66         { NULL_UUID_LE, NULL }
67 };
68
69 /* This is used to tell the visor bus driver which types of visor devices
70  * we support, and what functions to call when a visor device that we support
71  * is attached or removed.
72  */
73 static struct visor_driver visorhba_driver = {
74         .name = "visorhba",
75         .owner = THIS_MODULE,
76         .channel_types = visorhba_channel_types,
77         .probe = visorhba_probe,
78         .remove = visorhba_remove,
79         .pause = visorhba_pause,
80         .resume = visorhba_resume,
81         .channel_interrupt = NULL,
82 };
83 MODULE_DEVICE_TABLE(visorbus, visorhba_channel_types);
84 MODULE_ALIAS("visorbus:" SPAR_VHBA_CHANNEL_PROTOCOL_UUID_STR);
85
86 struct visor_thread_info {
87         struct task_struct *task;
88         struct completion has_stopped;
89         int id;
90 };
91
92 struct visordisk_info {
93         u32 valid;
94         u32 channel, id, lun;   /* Disk Path */
95         atomic_t ios_threshold;
96         atomic_t error_count;
97         struct visordisk_info *next;
98 };
99
100 struct scsipending {
101         struct uiscmdrsp cmdrsp;
102         void *sent;             /* The Data being tracked */
103         char cmdtype;           /* Type of pointer that is being stored */
104 };
105
106 /* Work Data for dar_work_queue */
107 struct diskaddremove {
108         u8 add;                 /* 0-remove, 1-add */
109         struct Scsi_Host *shost; /* Scsi Host for this visorhba instance */
110         u32 channel, id, lun;   /* Disk Path */
111         struct diskaddremove *next;
112 };
113
114 /* Each scsi_host has a host_data area that contains this struct. */
115 struct visorhba_devdata {
116         struct Scsi_Host *scsihost;
117         struct visor_device *dev;
118         struct list_head dev_info_list;
119         /* Tracks the requests that have been forwarded to
120          * the IOVM and haven't returned yet
121          */
122         struct scsipending pending[MAX_PENDING_REQUESTS];
123         /* Start search for next pending free slot here */
124         unsigned int nextinsert;
125         spinlock_t privlock; /* lock to protect data in devdata */
126         bool serverdown;
127         bool serverchangingstate;
128         unsigned long long acquire_failed_cnt;
129         unsigned long long interrupts_rcvd;
130         unsigned long long interrupts_notme;
131         unsigned long long interrupts_disabled;
132         u64 __iomem *flags_addr;
133         atomic_t interrupt_rcvd;
134         wait_queue_head_t rsp_queue;
135         struct visordisk_info head;
136         unsigned int max_buff_len;
137         int devnum;
138         struct visor_thread_info threadinfo;
139         int thread_wait_ms;
140 };
141
142 struct visorhba_devices_open {
143         struct visorhba_devdata *devdata;
144 };
145
146 static struct visorhba_devices_open visorhbas_open[VISORHBA_OPEN_MAX];
147
148 #define for_each_vdisk_match(iter, list, match)                   \
149         for (iter = &list->head; iter->next; iter = iter->next) \
150                 if ((iter->channel == match->channel) &&                  \
151                     (iter->id == match->id) &&                    \
152                     (iter->lun == match->lun))
153 /**
154  *      visor_thread_start - starts a thread for the device
155  *      @thrinfo: The thread to start
156  *      @threadfn: Function the thread starts
157  *      @thrcontext: Context to pass to the thread, i.e. devdata
158  *      @name: string describing name of thread
159  *
160  *      Starts a thread for the device.
161  *
162  *      Return 0 on success;
163  */
164 static int visor_thread_start(struct visor_thread_info *thrinfo,
165                               int (*threadfn)(void *),
166                               void *thrcontext, char *name)
167 {
168         /* used to stop the thread */
169         init_completion(&thrinfo->has_stopped);
170         thrinfo->task = kthread_run(threadfn, thrcontext, name);
171         if (IS_ERR(thrinfo->task)) {
172                 thrinfo->id = 0;
173                 return PTR_ERR(thrinfo->task);
174         }
175         thrinfo->id = thrinfo->task->pid;
176         return 0;
177 }
178
179 /**
180  *      add_scsipending_entry - save off io command that is pending in
181  *                              Service Partition
182  *      @devdata: Pointer to devdata
183  *      @cmdtype: Specifies the type of command pending
184  *      @new:   The command to be saved
185  *
186  *      Saves off the io command that is being handled by the Service
187  *      Partition so that it can be handled when it completes. If new is
188  *      NULL it is assumed the entry refers only to the cmdrsp.
189  *      Returns insert_location where entry was added,
190  *      SCSI_MLQUEUE_DEVICE_BUSY if it can't
191  */
192 static int add_scsipending_entry(struct visorhba_devdata *devdata,
193                                  char cmdtype, void *new)
194 {
195         unsigned long flags;
196         struct scsipending *entry;
197         int insert_location;
198
199         spin_lock_irqsave(&devdata->privlock, flags);
200         insert_location = devdata->nextinsert;
201         while (devdata->pending[insert_location].sent) {
202                 insert_location = (insert_location + 1) % MAX_PENDING_REQUESTS;
203                 if (insert_location == (int)devdata->nextinsert) {
204                         spin_unlock_irqrestore(&devdata->privlock, flags);
205                         return -1;
206                 }
207         }
208
209         entry = &devdata->pending[insert_location];
210         memset(&entry->cmdrsp, 0, sizeof(entry->cmdrsp));
211         entry->cmdtype = cmdtype;
212         if (new)
213                 entry->sent = new;
214         else /* wants to send cmdrsp */
215                 entry->sent = &entry->cmdrsp;
216         devdata->nextinsert = (insert_location + 1) % MAX_PENDING_REQUESTS;
217         spin_unlock_irqrestore(&devdata->privlock, flags);
218
219         return insert_location;
220 }
221
222 /**
223  *      del_scsipending_enty - removes an entry from the pending array
224  *      @devdata: Device holding the pending array
225  *      @del: Entry to remove
226  *
227  *      Removes the entry pointed at by del and returns it.
228  *      Returns the scsipending entry pointed at
229  */
230 static void *del_scsipending_ent(struct visorhba_devdata *devdata,
231                                  int del)
232 {
233         unsigned long flags;
234         void *sent = NULL;
235
236         if (del < MAX_PENDING_REQUESTS) {
237                 spin_lock_irqsave(&devdata->privlock, flags);
238                 sent = devdata->pending[del].sent;
239
240                 devdata->pending[del].cmdtype = 0;
241                 devdata->pending[del].sent = NULL;
242                 spin_unlock_irqrestore(&devdata->privlock, flags);
243         }
244
245         return sent;
246 }
247
248 /**
249  *      get_scsipending_cmdrsp - return the cmdrsp stored in a pending entry
250  *      #ddata: Device holding the pending array
251  *      @ent: Entry that stores the cmdrsp
252  *
253  *      Each scsipending entry has a cmdrsp in it. The cmdrsp is only valid
254  *      if the "sent" field is not NULL
255  *      Returns a pointer to the cmdrsp.
256  */
257 static struct uiscmdrsp *get_scsipending_cmdrsp(struct visorhba_devdata *ddata,
258                                                 int ent)
259 {
260         if (ddata->pending[ent].sent)
261                 return &ddata->pending[ent].cmdrsp;
262
263         return NULL;
264 }
265
266 /**
267  *      forward_taskmgmt_command - send taskmegmt command to the Service
268  *                                 Partition
269  *      @tasktype: Type of taskmgmt command
270  *      @scsidev: Scsidev that issued command
271  *
272  *      Create a cmdrsp packet and send it to the Serivce Partition
273  *      that will service this request.
274  *      Returns whether the command was queued successfully or not.
275  */
276 static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
277                                     struct scsi_cmnd *scsicmd)
278 {
279         struct uiscmdrsp *cmdrsp;
280         struct scsi_device *scsidev = scsicmd->device;
281         struct visorhba_devdata *devdata =
282                 (struct visorhba_devdata *)scsidev->host->hostdata;
283         int notifyresult = 0xffff;
284         wait_queue_head_t notifyevent;
285         int scsicmd_id = 0;
286
287         if (devdata->serverdown || devdata->serverchangingstate)
288                 return FAILED;
289
290         scsicmd_id = add_scsipending_entry(devdata, CMD_SCSITASKMGMT_TYPE,
291                                            NULL);
292         if (scsicmd_id < 0)
293                 return FAILED;
294
295         cmdrsp = get_scsipending_cmdrsp(devdata, scsicmd_id);
296
297         init_waitqueue_head(&notifyevent);
298
299         /* issue TASK_MGMT_ABORT_TASK */
300         cmdrsp->cmdtype = CMD_SCSITASKMGMT_TYPE;
301         /* specify the event that has to be triggered when this */
302         /* cmd is complete */
303         cmdrsp->scsitaskmgmt.notify_handle = (u64)&notifyevent;
304         cmdrsp->scsitaskmgmt.notifyresult_handle = (u64)&notifyresult;
305
306         /* save destination */
307         cmdrsp->scsitaskmgmt.tasktype = tasktype;
308         cmdrsp->scsitaskmgmt.vdest.channel = scsidev->channel;
309         cmdrsp->scsitaskmgmt.vdest.id = scsidev->id;
310         cmdrsp->scsitaskmgmt.vdest.lun = scsidev->lun;
311         cmdrsp->scsitaskmgmt.handle = scsicmd_id;
312
313         if (!visorchannel_signalinsert(devdata->dev->visorchannel,
314                                        IOCHAN_TO_IOPART,
315                                        cmdrsp))
316                 goto err_del_scsipending_ent;
317
318         /* It can take the Service Partition up to 35 seconds to complete
319          * an IO in some cases, so wait 45 seconds and error out
320          */
321         if (!wait_event_timeout(notifyevent, notifyresult != 0xffff,
322                                 msecs_to_jiffies(45000)))
323                 goto err_del_scsipending_ent;
324
325         if (tasktype == TASK_MGMT_ABORT_TASK)
326                 scsicmd->result = (DID_ABORT << 16);
327         else
328                 scsicmd->result = (DID_RESET << 16);
329
330         scsicmd->scsi_done(scsicmd);
331
332         return SUCCESS;
333
334 err_del_scsipending_ent:
335         del_scsipending_ent(devdata, scsicmd_id);
336         return FAILED;
337 }
338
339 /**
340  *      visorhba_abort_handler - Send TASK_MGMT_ABORT_TASK
341  *      @scsicmd: The scsicmd that needs aborted
342  *
343  *      Returns SUCCESS if inserted, failure otherwise
344  *
345  */
346 static int visorhba_abort_handler(struct scsi_cmnd *scsicmd)
347 {
348         /* issue TASK_MGMT_ABORT_TASK */
349         struct scsi_device *scsidev;
350         struct visordisk_info *vdisk;
351         struct visorhba_devdata *devdata;
352
353         scsidev = scsicmd->device;
354         devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
355         for_each_vdisk_match(vdisk, devdata, scsidev) {
356                 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
357                         atomic_inc(&vdisk->error_count);
358                 else
359                         atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
360         }
361         return forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsicmd);
362 }
363
364 /**
365  *      visorhba_device_reset_handler - Send TASK_MGMT_LUN_RESET
366  *      @scsicmd: The scsicmd that needs aborted
367  *
368  *      Returns SUCCESS if inserted, failure otherwise
369  */
370 static int visorhba_device_reset_handler(struct scsi_cmnd *scsicmd)
371 {
372         /* issue TASK_MGMT_LUN_RESET */
373         struct scsi_device *scsidev;
374         struct visordisk_info *vdisk;
375         struct visorhba_devdata *devdata;
376
377         scsidev = scsicmd->device;
378         devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
379         for_each_vdisk_match(vdisk, devdata, scsidev) {
380                 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
381                         atomic_inc(&vdisk->error_count);
382                 else
383                         atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
384         }
385         return forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsicmd);
386 }
387
388 /**
389  *      visorhba_bus_reset_handler - Send TASK_MGMT_TARGET_RESET for each
390  *                                   target on the bus
391  *      @scsicmd: The scsicmd that needs aborted
392  *
393  *      Returns SUCCESS
394  */
395 static int visorhba_bus_reset_handler(struct scsi_cmnd *scsicmd)
396 {
397         struct scsi_device *scsidev;
398         struct visordisk_info *vdisk;
399         struct visorhba_devdata *devdata;
400
401         scsidev = scsicmd->device;
402         devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
403         for_each_vdisk_match(vdisk, devdata, scsidev) {
404                 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
405                         atomic_inc(&vdisk->error_count);
406                 else
407                         atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
408         }
409         return forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsicmd);
410 }
411
412 /**
413  *      visorhba_host_reset_handler - Not supported
414  *      @scsicmd: The scsicmd that needs aborted
415  *
416  *      Not supported, return SUCCESS
417  *      Returns SUCCESS
418  */
419 static int
420 visorhba_host_reset_handler(struct scsi_cmnd *scsicmd)
421 {
422         /* issue TASK_MGMT_TARGET_RESET for each target on each bus for host */
423         return SUCCESS;
424 }
425
426 /**
427  *      visorhba_get_info
428  *      @shp: Scsi host that is requesting information
429  *
430  *      Returns string with info
431  */
432 static const char *visorhba_get_info(struct Scsi_Host *shp)
433 {
434         /* Return version string */
435         return "visorhba";
436 }
437
438 /**
439  *      visorhba_queue_command_lck -- queues command to the Service Partition
440  *      @scsicmd: Command to be queued
441  *      @vsiorhba_cmnd_done: Done command to call when scsicmd is returned
442  *
443  *      Queues to scsicmd to the ServicePartition after converting it to a
444  *      uiscmdrsp structure.
445  *
446  *      Returns success if queued to the Service Partition, otherwise
447  *      failure.
448  */
449 static int
450 visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
451                            void (*visorhba_cmnd_done)(struct scsi_cmnd *))
452 {
453         struct uiscmdrsp *cmdrsp;
454         struct scsi_device *scsidev = scsicmd->device;
455         int insert_location;
456         unsigned char op;
457         unsigned char *cdb = scsicmd->cmnd;
458         struct Scsi_Host *scsihost = scsidev->host;
459         unsigned int i;
460         struct visorhba_devdata *devdata =
461                 (struct visorhba_devdata *)scsihost->hostdata;
462         struct scatterlist *sg = NULL;
463         struct scatterlist *sglist = NULL;
464         int err = 0;
465
466         if (devdata->serverdown || devdata->serverchangingstate)
467                 return SCSI_MLQUEUE_DEVICE_BUSY;
468
469         insert_location = add_scsipending_entry(devdata, CMD_SCSI_TYPE,
470                                                 (void *)scsicmd);
471
472         if (insert_location < 0)
473                 return SCSI_MLQUEUE_DEVICE_BUSY;
474
475         cmdrsp = get_scsipending_cmdrsp(devdata, insert_location);
476
477         cmdrsp->cmdtype = CMD_SCSI_TYPE;
478         /* save the pending insertion location. Deletion from pending
479          * will return the scsicmd pointer for completion
480          */
481         cmdrsp->scsi.handle = insert_location;
482
483         /* save done function that we have call when cmd is complete */
484         scsicmd->scsi_done = visorhba_cmnd_done;
485         /* save destination */
486         cmdrsp->scsi.vdest.channel = scsidev->channel;
487         cmdrsp->scsi.vdest.id = scsidev->id;
488         cmdrsp->scsi.vdest.lun = scsidev->lun;
489         /* save datadir */
490         cmdrsp->scsi.data_dir = scsicmd->sc_data_direction;
491         memcpy(cmdrsp->scsi.cmnd, cdb, MAX_CMND_SIZE);
492
493         cmdrsp->scsi.bufflen = scsi_bufflen(scsicmd);
494
495         /* keep track of the max buffer length so far. */
496         if (cmdrsp->scsi.bufflen > devdata->max_buff_len)
497                 devdata->max_buff_len = cmdrsp->scsi.bufflen;
498
499         if (scsi_sg_count(scsicmd) > MAX_PHYS_INFO) {
500                 err = SCSI_MLQUEUE_DEVICE_BUSY;
501                 goto err_del_scsipending_ent;
502         }
503
504         /* convert buffer to phys information  */
505         /* buffer is scatterlist - copy it out */
506         sglist = scsi_sglist(scsicmd);
507
508         for_each_sg(sglist, sg, scsi_sg_count(scsicmd), i) {
509                 cmdrsp->scsi.gpi_list[i].address = sg_phys(sg);
510                 cmdrsp->scsi.gpi_list[i].length = sg->length;
511         }
512         cmdrsp->scsi.guest_phys_entries = scsi_sg_count(scsicmd);
513
514         op = cdb[0];
515         if (!visorchannel_signalinsert(devdata->dev->visorchannel,
516                                        IOCHAN_TO_IOPART,
517                                        cmdrsp)) {
518                 /* queue must be full and we aren't going to wait */
519                 err = SCSI_MLQUEUE_DEVICE_BUSY;
520                 goto err_del_scsipending_ent;
521         }
522         return 0;
523
524 err_del_scsipending_ent:
525         del_scsipending_ent(devdata, insert_location);
526         return err;
527 }
528
529 /**
530  *      visorhba_slave_alloc - called when new disk is discovered
531  *      @scsidev: New disk
532  *
533  *      Create a new visordisk_info structure and add it to our
534  *      list of vdisks.
535  *
536  *      Returns success when created, otherwise error.
537  */
538 static int visorhba_slave_alloc(struct scsi_device *scsidev)
539 {
540         /* this is called by the midlayer before scan for new devices --
541          * LLD can alloc any struct & do init if needed.
542          */
543         struct visordisk_info *vdisk;
544         struct visordisk_info *tmpvdisk;
545         struct visorhba_devdata *devdata;
546         struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
547
548         devdata = (struct visorhba_devdata *)scsihost->hostdata;
549         if (!devdata)
550                 return 0; /* even though we errored, treat as success */
551
552         for_each_vdisk_match(vdisk, devdata, scsidev)
553                 return 0; /* already allocated return success */
554
555         tmpvdisk = kzalloc(sizeof(*tmpvdisk), GFP_ATOMIC);
556         if (!tmpvdisk)
557                 return -ENOMEM;
558
559         tmpvdisk->channel = scsidev->channel;
560         tmpvdisk->id = scsidev->id;
561         tmpvdisk->lun = scsidev->lun;
562         vdisk->next = tmpvdisk;
563         return 0;
564 }
565
566 /**
567  *      visorhba_slave_destroy - disk is going away
568  *      @scsidev: scsi device going away
569  *
570  *      Disk is going away, clean up resources.
571  *      Returns void.
572  */
573 static void visorhba_slave_destroy(struct scsi_device *scsidev)
574 {
575         /* midlevel calls this after device has been quiesced and
576          * before it is to be deleted.
577          */
578         struct visordisk_info *vdisk, *delvdisk;
579         struct visorhba_devdata *devdata;
580         struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
581
582         devdata = (struct visorhba_devdata *)scsihost->hostdata;
583         for_each_vdisk_match(vdisk, devdata, scsidev) {
584                 delvdisk = vdisk->next;
585                 vdisk->next = delvdisk->next;
586                 kfree(delvdisk);
587                 return;
588         }
589 }
590
591 static struct scsi_host_template visorhba_driver_template = {
592         .name = "Unisys Visor HBA",
593         .info = visorhba_get_info,
594         .queuecommand = visorhba_queue_command,
595         .eh_abort_handler = visorhba_abort_handler,
596         .eh_device_reset_handler = visorhba_device_reset_handler,
597         .eh_bus_reset_handler = visorhba_bus_reset_handler,
598         .eh_host_reset_handler = visorhba_host_reset_handler,
599         .shost_attrs = NULL,
600 #define visorhba_MAX_CMNDS 128
601         .can_queue = visorhba_MAX_CMNDS,
602         .sg_tablesize = 64,
603         .this_id = -1,
604         .slave_alloc = visorhba_slave_alloc,
605         .slave_destroy = visorhba_slave_destroy,
606         .use_clustering = ENABLE_CLUSTERING,
607 };
608
609 /**
610  *      info_debugfs_read - debugfs interface to dump visorhba states
611  *      @file: Debug file
612  *      @buf: buffer to send back to user
613  *      @len: len that can be written to buf
614  *      @offset: offset into buf
615  *
616  *      Dumps information about the visorhba driver and devices
617  *      TODO: Make this per vhba
618  *      Returns bytes_read
619  */
620 static ssize_t info_debugfs_read(struct file *file, char __user *buf,
621                                  size_t len, loff_t *offset)
622 {
623         ssize_t bytes_read = 0;
624         int str_pos = 0;
625         u64 phys_flags_addr;
626         int i;
627         struct visorhba_devdata *devdata;
628         char *vbuf;
629
630         if (len > MAX_BUF)
631                 len = MAX_BUF;
632         vbuf = kzalloc(len, GFP_KERNEL);
633         if (!vbuf)
634                 return -ENOMEM;
635
636         for (i = 0; i < VISORHBA_OPEN_MAX; i++) {
637                 if (!visorhbas_open[i].devdata)
638                         continue;
639
640                 devdata = visorhbas_open[i].devdata;
641
642                 str_pos += scnprintf(vbuf + str_pos,
643                                 len - str_pos, "max_buff_len:%u\n",
644                                 devdata->max_buff_len);
645
646                 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
647                                 "\ninterrupts_rcvd = %llu, interrupts_disabled = %llu\n",
648                                 devdata->interrupts_rcvd,
649                                 devdata->interrupts_disabled);
650                 str_pos += scnprintf(vbuf + str_pos,
651                                 len - str_pos, "\ninterrupts_notme = %llu,\n",
652                                 devdata->interrupts_notme);
653                 phys_flags_addr = virt_to_phys((__force  void *)
654                                                devdata->flags_addr);
655                 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
656                                 "flags_addr = %p, phys_flags_addr=0x%016llx, FeatureFlags=%llu\n",
657                                 devdata->flags_addr, phys_flags_addr,
658                                 (__le64)readq(devdata->flags_addr));
659                 str_pos += scnprintf(vbuf + str_pos,
660                         len - str_pos, "acquire_failed_cnt:%llu\n",
661                         devdata->acquire_failed_cnt);
662                 str_pos += scnprintf(vbuf + str_pos, len - str_pos, "\n");
663         }
664
665         bytes_read = simple_read_from_buffer(buf, len, offset, vbuf, str_pos);
666         kfree(vbuf);
667         return bytes_read;
668 }
669
670 /**
671  *      visorhba_serverdown_complete - Called when we are done cleaning up
672  *                                     from serverdown
673  *      @work: work structure for this serverdown request
674  *
675  *      Called when we are done cleanning up from serverdown, stop processing
676  *      queue, fail pending IOs.
677  *      Returns void when finished cleaning up
678  */
679 static void visorhba_serverdown_complete(struct visorhba_devdata *devdata)
680 {
681         int i;
682         struct scsipending *pendingdel = NULL;
683         struct scsi_cmnd *scsicmd = NULL;
684         struct uiscmdrsp *cmdrsp;
685         unsigned long flags;
686
687         /* Stop using the IOVM response queue (queue should be drained
688          * by the end)
689          */
690         kthread_stop(devdata->threadinfo.task);
691
692         /* Fail commands that weren't completed */
693         spin_lock_irqsave(&devdata->privlock, flags);
694         for (i = 0; i < MAX_PENDING_REQUESTS; i++) {
695                 pendingdel = &devdata->pending[i];
696                 switch (pendingdel->cmdtype) {
697                 case CMD_SCSI_TYPE:
698                         scsicmd = pendingdel->sent;
699                         scsicmd->result = DID_RESET << 16;
700                         if (scsicmd->scsi_done)
701                                 scsicmd->scsi_done(scsicmd);
702                         break;
703                 case CMD_SCSITASKMGMT_TYPE:
704                         cmdrsp = pendingdel->sent;
705                         cmdrsp->scsitaskmgmt.notifyresult_handle
706                                                         = TASK_MGMT_FAILED;
707                         wake_up_all((wait_queue_head_t *)
708                                     cmdrsp->scsitaskmgmt.notify_handle);
709                         break;
710                 case CMD_VDISKMGMT_TYPE:
711                         cmdrsp = pendingdel->sent;
712                         cmdrsp->vdiskmgmt.notifyresult_handle
713                                                         = VDISK_MGMT_FAILED;
714                         wake_up_all((wait_queue_head_t *)
715                                     cmdrsp->vdiskmgmt.notify_handle);
716                         break;
717                 default:
718                         break;
719                 }
720                 pendingdel->cmdtype = 0;
721                 pendingdel->sent = NULL;
722         }
723         spin_unlock_irqrestore(&devdata->privlock, flags);
724
725         devdata->serverdown = true;
726         devdata->serverchangingstate = false;
727 }
728
729 /**
730  *      visorhba_serverdown - Got notified that the IOVM is down
731  *      @devdata: visorhba that is being serviced by downed IOVM.
732  *
733  *      Something happened to the IOVM, return immediately and
734  *      schedule work cleanup work.
735  *      Return SUCCESS or EINVAL
736  */
737 static int visorhba_serverdown(struct visorhba_devdata *devdata)
738 {
739         if (!devdata->serverdown && !devdata->serverchangingstate) {
740                 devdata->serverchangingstate = true;
741                 visorhba_serverdown_complete(devdata);
742         } else if (devdata->serverchangingstate) {
743                 return -EINVAL;
744         }
745         return 0;
746 }
747
748 /**
749  *      do_scsi_linuxstat - scsi command returned linuxstat
750  *      @cmdrsp: response from IOVM
751  *      @scsicmd: Command issued.
752  *
753  *      Don't log errors for disk-not-present inquiries
754  *      Returns void
755  */
756 static void
757 do_scsi_linuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
758 {
759         struct visorhba_devdata *devdata;
760         struct visordisk_info *vdisk;
761         struct scsi_device *scsidev;
762         struct sense_data *sd;
763
764         scsidev = scsicmd->device;
765         memcpy(scsicmd->sense_buffer, cmdrsp->scsi.sensebuf, MAX_SENSE_SIZE);
766         sd = (struct sense_data *)scsicmd->sense_buffer;
767
768         /* Do not log errors for disk-not-present inquiries */
769         if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
770             (host_byte(cmdrsp->scsi.linuxstat) == DID_NO_CONNECT) &&
771             (cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT))
772                 return;
773         /* Okay see what our error_count is here.... */
774         devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
775         for_each_vdisk_match(vdisk, devdata, scsidev) {
776                 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT) {
777                         atomic_inc(&vdisk->error_count);
778                         atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
779                 }
780         }
781 }
782
783 /**
784  *      do_scsi_nolinuxstat - scsi command didn't have linuxstat
785  *      @cmdrsp: response from IOVM
786  *      @scsicmd: Command issued.
787  *
788  *      Handle response when no linuxstat was returned
789  *      Returns void
790  */
791 static void
792 do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
793 {
794         struct scsi_device *scsidev;
795         unsigned char *buf;
796         struct scatterlist *sg;
797         unsigned int i;
798         char *this_page;
799         char *this_page_orig;
800         int bufind = 0;
801         struct visordisk_info *vdisk;
802         struct visorhba_devdata *devdata;
803
804         scsidev = scsicmd->device;
805         if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
806             (cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN)) {
807                 if (cmdrsp->scsi.no_disk_result == 0)
808                         return;
809
810                 buf = kzalloc(sizeof(char) * 36, GFP_KERNEL);
811                 if (!buf)
812                         return;
813
814                 /* Linux scsi code wants a device at Lun 0
815                  * to issue report luns, but we don't want
816                  * a disk there so we'll present a processor
817                  * there.
818                  */
819                 SET_NO_DISK_INQUIRY_RESULT(buf, cmdrsp->scsi.bufflen,
820                                            scsidev->lun,
821                                            DEV_DISK_CAPABLE_NOT_PRESENT,
822                                            DEV_NOT_CAPABLE);
823
824                 if (scsi_sg_count(scsicmd) == 0) {
825                         memcpy(scsi_sglist(scsicmd), buf,
826                                cmdrsp->scsi.bufflen);
827                         kfree(buf);
828                         return;
829                 }
830
831                 sg = scsi_sglist(scsicmd);
832                 for (i = 0; i < scsi_sg_count(scsicmd); i++) {
833                         this_page_orig = kmap_atomic(sg_page(sg + i));
834                         this_page = (void *)((unsigned long)this_page_orig |
835                                              sg[i].offset);
836                         memcpy(this_page, buf + bufind, sg[i].length);
837                         kunmap_atomic(this_page_orig);
838                 }
839                 kfree(buf);
840         } else {
841                 devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
842                 for_each_vdisk_match(vdisk, devdata, scsidev) {
843                         if (atomic_read(&vdisk->ios_threshold) > 0) {
844                                 atomic_dec(&vdisk->ios_threshold);
845                                 if (atomic_read(&vdisk->ios_threshold) == 0)
846                                         atomic_set(&vdisk->error_count, 0);
847                         }
848                 }
849         }
850 }
851
852 /**
853  *      complete_scsi_command - complete a scsi command
854  *      @uiscmdrsp: Response from Service Partition
855  *      @scsicmd: The scsi command
856  *
857  *      Response returned by the Service Partition, finish it and send
858  *      completion to the scsi midlayer.
859  *      Returns void.
860  */
861 static void
862 complete_scsi_command(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
863 {
864         /* take what we need out of cmdrsp and complete the scsicmd */
865         scsicmd->result = cmdrsp->scsi.linuxstat;
866         if (cmdrsp->scsi.linuxstat)
867                 do_scsi_linuxstat(cmdrsp, scsicmd);
868         else
869                 do_scsi_nolinuxstat(cmdrsp, scsicmd);
870
871         scsicmd->scsi_done(scsicmd);
872 }
873
874 /* DELETE VDISK TASK MGMT COMMANDS */
875 static inline void complete_vdiskmgmt_command(struct uiscmdrsp *cmdrsp)
876 {
877         /* copy the result of the taskmgmt and
878          * wake up the error handler that is waiting for this
879          */
880         cmdrsp->vdiskmgmt.notifyresult_handle = cmdrsp->vdiskmgmt.result;
881         wake_up_all((wait_queue_head_t *)cmdrsp->vdiskmgmt.notify_handle);
882 }
883
884 /**
885  *      complete_taskmgmt_command - complete task management
886  *      @cmdrsp: Response from the IOVM
887  *
888  *      Service Partition returned the result of the task management
889  *      command. Wake up anyone waiting for it.
890  *      Returns void
891  */
892 static inline void complete_taskmgmt_command(struct uiscmdrsp *cmdrsp)
893 {
894         /* copy the result of the taskgmgt and
895          * wake up the error handler that is waiting for this
896          */
897         cmdrsp->vdiskmgmt.notifyresult_handle = cmdrsp->vdiskmgmt.result;
898         wake_up_all((wait_queue_head_t *)cmdrsp->scsitaskmgmt.notify_handle);
899 }
900
901 static struct work_struct dar_work_queue;
902 static struct diskaddremove *dar_work_queue_head;
903 static spinlock_t dar_work_queue_lock; /* Lock to protet dar_work_queue_head */
904 static unsigned short dar_work_queue_sched;
905
906 /**
907  *      queue_disk_add_remove - IOSP has sent us a add/remove request
908  *      @dar: disk add/remove request
909  *
910  *      Queue the work needed to add/remove a disk.
911  *      Returns void
912  */
913 static inline void queue_disk_add_remove(struct diskaddremove *dar)
914 {
915         unsigned long flags;
916
917         spin_lock_irqsave(&dar_work_queue_lock, flags);
918         if (!dar_work_queue_head) {
919                 dar_work_queue_head = dar;
920                 dar->next = NULL;
921         } else {
922                 dar->next = dar_work_queue_head;
923                 dar_work_queue_head = dar;
924         }
925         if (!dar_work_queue_sched) {
926                 schedule_work(&dar_work_queue);
927                 dar_work_queue_sched = 1;
928         }
929         spin_unlock_irqrestore(&dar_work_queue_lock, flags);
930 }
931
932 /**
933  *      process_disk_notify - IOSP has sent a process disk notify event
934  *      @shost: Scsi hot
935  *      @cmdrsp: Response from the IOSP
936  *
937  *      Queue it to the work queue.
938  *      Return void.
939  */
940 static void process_disk_notify(struct Scsi_Host *shost,
941                                 struct uiscmdrsp *cmdrsp)
942 {
943         struct diskaddremove *dar;
944
945         dar = kzalloc(sizeof(*dar), GFP_ATOMIC);
946         if (dar) {
947                 dar->add = cmdrsp->disknotify.add;
948                 dar->shost = shost;
949                 dar->channel = cmdrsp->disknotify.channel;
950                 dar->id = cmdrsp->disknotify.id;
951                 dar->lun = cmdrsp->disknotify.lun;
952                 queue_disk_add_remove(dar);
953         }
954 }
955
956 /**
957  *      drain_queue - pull responses out of iochannel
958  *      @cmdrsp: Response from the IOSP
959  *      @devdata: device that owns this iochannel
960  *
961  *      Pulls responses out of the iochannel and process the responses.
962  *      Restuns void
963  */
964 static void
965 drain_queue(struct uiscmdrsp *cmdrsp, struct visorhba_devdata *devdata)
966 {
967         struct scsi_cmnd *scsicmd;
968         struct Scsi_Host *shost = devdata->scsihost;
969
970         while (1) {
971                 if (!visorchannel_signalremove(devdata->dev->visorchannel,
972                                                IOCHAN_FROM_IOPART,
973                                                cmdrsp))
974                         break; /* queue empty */
975
976                 if (cmdrsp->cmdtype == CMD_SCSI_TYPE) {
977                         /* scsicmd location is returned by the
978                          * deletion
979                          */
980                         scsicmd = del_scsipending_ent(devdata,
981                                                       cmdrsp->scsi.handle);
982                         if (!scsicmd)
983                                 break;
984                         /* complete the orig cmd */
985                         complete_scsi_command(cmdrsp, scsicmd);
986                 } else if (cmdrsp->cmdtype == CMD_SCSITASKMGMT_TYPE) {
987                         if (!del_scsipending_ent(devdata,
988                                                  cmdrsp->scsitaskmgmt.handle))
989                                 break;
990                         complete_taskmgmt_command(cmdrsp);
991                 } else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE) {
992                         /* The vHba pointer has no meaning in a
993                          * guest partition. Let's be safe and set it
994                          * to NULL now. Do not use it here!
995                          */
996                         cmdrsp->disknotify.v_hba = NULL;
997                         process_disk_notify(shost, cmdrsp);
998                 } else if (cmdrsp->cmdtype == CMD_VDISKMGMT_TYPE) {
999                         if (!del_scsipending_ent(devdata,
1000                                                  cmdrsp->vdiskmgmt.handle))
1001                                 break;
1002                         complete_vdiskmgmt_command(cmdrsp);
1003                 }
1004                 /* cmdrsp is now available for resuse */
1005         }
1006 }
1007
1008 /**
1009  *      process_incoming_rsps - Process responses from IOSP
1010  *      @v: void pointer to visorhba_devdata
1011  *
1012  *      Main function for the thread that processes the responses
1013  *      from the IO Service Partition. When the queue is empty, wait
1014  *      to check to see if it is full again.
1015  */
1016 static int process_incoming_rsps(void *v)
1017 {
1018         struct visorhba_devdata *devdata = v;
1019         struct uiscmdrsp *cmdrsp = NULL;
1020         const int size = sizeof(*cmdrsp);
1021
1022         cmdrsp = kmalloc(size, GFP_ATOMIC);
1023         if (!cmdrsp)
1024                 return -ENOMEM;
1025
1026         while (1) {
1027                 if (kthread_should_stop())
1028                         break;
1029                 wait_event_interruptible_timeout(
1030                         devdata->rsp_queue, (atomic_read(
1031                                              &devdata->interrupt_rcvd) == 1),
1032                                 msecs_to_jiffies(devdata->thread_wait_ms));
1033                 /* drain queue */
1034                 drain_queue(cmdrsp, devdata);
1035         }
1036         kfree(cmdrsp);
1037         return 0;
1038 }
1039
1040 /**
1041  *      visorhba_pause - function to handle visorbus pause messages
1042  *      @dev: device that is pausing.
1043  *      @complete_func: function to call when finished
1044  *
1045  *      Something has happened to the IO Service Partition that is
1046  *      handling this device. Quiet this device and reset commands
1047  *      so that the Service Partition can be corrected.
1048  *      Returns SUCCESS
1049  */
1050 static int visorhba_pause(struct visor_device *dev,
1051                           visorbus_state_complete_func complete_func)
1052 {
1053         struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
1054
1055         visorhba_serverdown(devdata);
1056         complete_func(dev, 0);
1057         return 0;
1058 }
1059
1060 /**
1061  *      visorhba_resume - function called when the IO Service Partition is back
1062  *      @dev: device that is pausing.
1063  *      @complete_func: function to call when finished
1064  *
1065  *      Yay! The IO Service Partition is back, the channel has been wiped
1066  *      so lets re-establish connection and start processing responses.
1067  *      Returns 0 on success, error on failure.
1068  */
1069 static int visorhba_resume(struct visor_device *dev,
1070                            visorbus_state_complete_func complete_func)
1071 {
1072         struct visorhba_devdata *devdata;
1073
1074         devdata = dev_get_drvdata(&dev->device);
1075         if (!devdata)
1076                 return -EINVAL;
1077
1078         if (devdata->serverdown && !devdata->serverchangingstate)
1079                 devdata->serverchangingstate = 1;
1080
1081         visor_thread_start(&devdata->threadinfo, process_incoming_rsps,
1082                            devdata, "vhba_incming");
1083
1084         devdata->serverdown = false;
1085         devdata->serverchangingstate = false;
1086
1087         return 0;
1088 }
1089
1090 /**
1091  *      visorhba_probe - device has been discovered, do acquire
1092  *      @dev: visor_device that was discovered
1093  *
1094  *      A new HBA was discovered, do the initial connections of it.
1095  *      Return 0 on success, otherwise error.
1096  */
1097 static int visorhba_probe(struct visor_device *dev)
1098 {
1099         struct Scsi_Host *scsihost;
1100         struct vhba_config_max max;
1101         struct visorhba_devdata *devdata = NULL;
1102         int i, err, channel_offset;
1103         u64 features;
1104
1105         scsihost = scsi_host_alloc(&visorhba_driver_template,
1106                                    sizeof(*devdata));
1107         if (!scsihost)
1108                 return -ENODEV;
1109
1110         channel_offset = offsetof(struct spar_io_channel_protocol,
1111                                   vhba.max);
1112         err = visorbus_read_channel(dev, channel_offset, &max,
1113                                     sizeof(struct vhba_config_max));
1114         if (err < 0)
1115                 goto err_scsi_host_put;
1116
1117         scsihost->max_id = (unsigned)max.max_id;
1118         scsihost->max_lun = (unsigned)max.max_lun;
1119         scsihost->cmd_per_lun = (unsigned)max.cmd_per_lun;
1120         scsihost->max_sectors =
1121             (unsigned short)(max.max_io_size >> 9);
1122         scsihost->sg_tablesize =
1123             (unsigned short)(max.max_io_size / PAGE_SIZE);
1124         if (scsihost->sg_tablesize > MAX_PHYS_INFO)
1125                 scsihost->sg_tablesize = MAX_PHYS_INFO;
1126         err = scsi_add_host(scsihost, &dev->device);
1127         if (err < 0)
1128                 goto err_scsi_host_put;
1129
1130         devdata = (struct visorhba_devdata *)scsihost->hostdata;
1131         for (i = 0; i < VISORHBA_OPEN_MAX; i++) {
1132                 if (!visorhbas_open[i].devdata) {
1133                         visorhbas_open[i].devdata = devdata;
1134                         break;
1135                 }
1136         }
1137
1138         devdata->dev = dev;
1139         dev_set_drvdata(&dev->device, devdata);
1140
1141         init_waitqueue_head(&devdata->rsp_queue);
1142         spin_lock_init(&devdata->privlock);
1143         devdata->serverdown = false;
1144         devdata->serverchangingstate = false;
1145         devdata->scsihost = scsihost;
1146
1147         channel_offset = offsetof(struct spar_io_channel_protocol,
1148                                   channel_header.features);
1149         err = visorbus_read_channel(dev, channel_offset, &features, 8);
1150         if (err)
1151                 goto err_scsi_remove_host;
1152         features |= ULTRA_IO_CHANNEL_IS_POLLING;
1153         err = visorbus_write_channel(dev, channel_offset, &features, 8);
1154         if (err)
1155                 goto err_scsi_remove_host;
1156
1157         devdata->thread_wait_ms = 2;
1158         visor_thread_start(&devdata->threadinfo, process_incoming_rsps,
1159                            devdata, "vhba_incoming");
1160
1161         scsi_scan_host(scsihost);
1162
1163         return 0;
1164
1165 err_scsi_remove_host:
1166         scsi_remove_host(scsihost);
1167
1168 err_scsi_host_put:
1169         scsi_host_put(scsihost);
1170         return err;
1171 }
1172
1173 /**
1174  *      visorhba_remove - remove a visorhba device
1175  *      @dev: Device to remove
1176  *
1177  *      Removes the visorhba device.
1178  *      Returns void.
1179  */
1180 static void visorhba_remove(struct visor_device *dev)
1181 {
1182         struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
1183         struct Scsi_Host *scsihost = NULL;
1184
1185         if (!devdata)
1186                 return;
1187
1188         scsihost = devdata->scsihost;
1189         kthread_stop(devdata->threadinfo.task);
1190         scsi_remove_host(scsihost);
1191         scsi_host_put(scsihost);
1192
1193         dev_set_drvdata(&dev->device, NULL);
1194 }
1195
1196 /**
1197  *      visorhba_init           - driver init routine
1198  *
1199  *      Initialize the visorhba driver and register it with visorbus
1200  *      to handle s-Par virtual host bus adapter.
1201  */
1202 static int visorhba_init(void)
1203 {
1204         struct dentry *ret;
1205         int rc = -ENOMEM;
1206
1207         visorhba_debugfs_dir = debugfs_create_dir("visorhba", NULL);
1208         if (!visorhba_debugfs_dir)
1209                 return -ENOMEM;
1210
1211         ret = debugfs_create_file("info", S_IRUSR, visorhba_debugfs_dir, NULL,
1212                                   &debugfs_info_fops);
1213
1214         if (!ret) {
1215                 rc = -EIO;
1216                 goto cleanup_debugfs;
1217         }
1218
1219         rc = visorbus_register_visor_driver(&visorhba_driver);
1220         if (rc)
1221                 goto cleanup_debugfs;
1222
1223         return rc;
1224
1225 cleanup_debugfs:
1226         debugfs_remove_recursive(visorhba_debugfs_dir);
1227
1228         return rc;
1229 }
1230
1231 /**
1232  *      visorhba_cleanup        - driver exit routine
1233  *
1234  *      Unregister driver from the bus and free up memory.
1235  */
1236 static void visorhba_exit(void)
1237 {
1238         visorbus_unregister_visor_driver(&visorhba_driver);
1239         debugfs_remove_recursive(visorhba_debugfs_dir);
1240 }
1241
1242 module_init(visorhba_init);
1243 module_exit(visorhba_exit);
1244
1245 MODULE_AUTHOR("Unisys");
1246 MODULE_LICENSE("GPL");
1247 MODULE_DESCRIPTION("s-Par hba driver");