Linux-libre 4.11.5-gnu
[librecmc/linux-libre.git] / drivers / staging / unisys / visorbus / visorchipset.c
1 /* visorchipset_main.c
2  *
3  * Copyright (C) 2010 - 2015 UNISYS CORPORATION
4  * All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13  * NON INFRINGEMENT.  See the GNU General Public License for more
14  * details.
15  */
16
17 #include <linux/acpi.h>
18 #include <linux/cdev.h>
19 #include <linux/ctype.h>
20 #include <linux/fs.h>
21 #include <linux/mm.h>
22 #include <linux/nls.h>
23 #include <linux/netdevice.h>
24 #include <linux/platform_device.h>
25 #include <linux/uuid.h>
26 #include <linux/crash_dump.h>
27
28 #include "visorbus.h"
29 #include "visorbus_private.h"
30 #include "vmcallinterface.h"
31
32 #define CURRENT_FILE_PC VISOR_BUS_PC_visorchipset_c
33
34 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST   1
35 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
36
37 #define MAX_CONTROLVM_PAYLOAD_BYTES (1024 * 128)
38
39 #define VISORCHIPSET_MMAP_CONTROLCHANOFFSET     0x00000000
40
41 #define UNISYS_SPAR_LEAF_ID 0x40000000
42
43 /* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
44 #define UNISYS_SPAR_ID_EBX 0x73696e55
45 #define UNISYS_SPAR_ID_ECX 0x70537379
46 #define UNISYS_SPAR_ID_EDX 0x34367261
47
48 /*
49  * Module parameters
50  */
51 static int visorchipset_major;
52
53 static int
54 visorchipset_open(struct inode *inode, struct file *file)
55 {
56         unsigned int minor_number = iminor(inode);
57
58         if (minor_number)
59                 return -ENODEV;
60         return 0;
61 }
62
63 static int
64 visorchipset_release(struct inode *inode, struct file *file)
65 {
66         return 0;
67 }
68
69 /*
70  * When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
71  * we switch to slow polling mode. As soon as we get a controlvm
72  * message, we switch back to fast polling mode.
73  */
74 #define MIN_IDLE_SECONDS 10
75 static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
76 /* when we got our last controlvm message */
77 static unsigned long most_recent_message_jiffies;
78
79 struct parser_context {
80         unsigned long allocbytes;
81         unsigned long param_bytes;
82         u8 *curr;
83         unsigned long bytes_remaining;
84         bool byte_stream;
85         char data[0];
86 };
87
88 static struct delayed_work periodic_controlvm_work;
89
90 static struct cdev file_cdev;
91 static struct visorchannel **file_controlvm_channel;
92
93 static struct visorchannel *controlvm_channel;
94 static unsigned long controlvm_payload_bytes_buffered;
95
96 /*
97  * The following globals are used to handle the scenario where we are unable to
98  * offload the payload from a controlvm message due to memory requirements. In
99  * this scenario, we simply stash the controlvm message, then attempt to
100  * process it again the next time controlvm_periodic_work() runs.
101  */
102 static struct controlvm_message controlvm_pending_msg;
103 static bool controlvm_pending_msg_valid;
104
105 struct parahotplug_request {
106         struct list_head list;
107         int id;
108         unsigned long expiration;
109         struct controlvm_message msg;
110 };
111
112 /* info for /dev/visorchipset */
113 static dev_t major_dev = -1; /*< indicates major num for device */
114
115 /* prototypes for attributes */
116 static ssize_t toolaction_show(struct device *dev,
117                                struct device_attribute *attr,
118                                char *buf)
119 {
120         u8 tool_action = 0;
121
122         visorchannel_read(controlvm_channel,
123                           offsetof(struct spar_controlvm_channel_protocol,
124                                    tool_action), &tool_action, sizeof(u8));
125         return sprintf(buf, "%u\n", tool_action);
126 }
127
128 static ssize_t toolaction_store(struct device *dev,
129                                 struct device_attribute *attr,
130                                 const char *buf, size_t count)
131 {
132         u8 tool_action;
133         int ret;
134
135         if (kstrtou8(buf, 10, &tool_action))
136                 return -EINVAL;
137
138         ret = visorchannel_write
139                 (controlvm_channel,
140                  offsetof(struct spar_controlvm_channel_protocol,
141                           tool_action),
142                  &tool_action, sizeof(u8));
143
144         if (ret)
145                 return ret;
146         return count;
147 }
148 static DEVICE_ATTR_RW(toolaction);
149
150 static ssize_t boottotool_show(struct device *dev,
151                                struct device_attribute *attr,
152                                char *buf)
153 {
154         struct efi_spar_indication efi_spar_indication;
155
156         visorchannel_read(controlvm_channel,
157                           offsetof(struct spar_controlvm_channel_protocol,
158                                    efi_spar_ind), &efi_spar_indication,
159                           sizeof(struct efi_spar_indication));
160         return sprintf(buf, "%u\n", efi_spar_indication.boot_to_tool);
161 }
162
163 static ssize_t boottotool_store(struct device *dev,
164                                 struct device_attribute *attr,
165                                 const char *buf, size_t count)
166 {
167         int val, ret;
168         struct efi_spar_indication efi_spar_indication;
169
170         if (kstrtoint(buf, 10, &val))
171                 return -EINVAL;
172
173         efi_spar_indication.boot_to_tool = val;
174         ret = visorchannel_write
175                 (controlvm_channel,
176                  offsetof(struct spar_controlvm_channel_protocol,
177                           efi_spar_ind), &(efi_spar_indication),
178                  sizeof(struct efi_spar_indication));
179
180         if (ret)
181                 return ret;
182         return count;
183 }
184 static DEVICE_ATTR_RW(boottotool);
185
186 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
187                           char *buf)
188 {
189         u32 error = 0;
190
191         visorchannel_read(controlvm_channel,
192                           offsetof(struct spar_controlvm_channel_protocol,
193                                    installation_error),
194                           &error, sizeof(u32));
195         return sprintf(buf, "%i\n", error);
196 }
197
198 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
199                            const char *buf, size_t count)
200 {
201         u32 error;
202         int ret;
203
204         if (kstrtou32(buf, 10, &error))
205                 return -EINVAL;
206
207         ret = visorchannel_write
208                 (controlvm_channel,
209                  offsetof(struct spar_controlvm_channel_protocol,
210                           installation_error),
211                  &error, sizeof(u32));
212         if (ret)
213                 return ret;
214         return count;
215 }
216 static DEVICE_ATTR_RW(error);
217
218 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
219                            char *buf)
220 {
221         u32 text_id = 0;
222
223         visorchannel_read
224                 (controlvm_channel,
225                  offsetof(struct spar_controlvm_channel_protocol,
226                           installation_text_id),
227                  &text_id, sizeof(u32));
228         return sprintf(buf, "%i\n", text_id);
229 }
230
231 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
232                             const char *buf, size_t count)
233 {
234         u32 text_id;
235         int ret;
236
237         if (kstrtou32(buf, 10, &text_id))
238                 return -EINVAL;
239
240         ret = visorchannel_write
241                 (controlvm_channel,
242                  offsetof(struct spar_controlvm_channel_protocol,
243                           installation_text_id),
244                  &text_id, sizeof(u32));
245         if (ret)
246                 return ret;
247         return count;
248 }
249 static DEVICE_ATTR_RW(textid);
250
251 static ssize_t remaining_steps_show(struct device *dev,
252                                     struct device_attribute *attr, char *buf)
253 {
254         u16 remaining_steps = 0;
255
256         visorchannel_read(controlvm_channel,
257                           offsetof(struct spar_controlvm_channel_protocol,
258                                    installation_remaining_steps),
259                           &remaining_steps, sizeof(u16));
260         return sprintf(buf, "%hu\n", remaining_steps);
261 }
262
263 static ssize_t remaining_steps_store(struct device *dev,
264                                      struct device_attribute *attr,
265                                      const char *buf, size_t count)
266 {
267         u16 remaining_steps;
268         int ret;
269
270         if (kstrtou16(buf, 10, &remaining_steps))
271                 return -EINVAL;
272
273         ret = visorchannel_write
274                 (controlvm_channel,
275                  offsetof(struct spar_controlvm_channel_protocol,
276                           installation_remaining_steps),
277                  &remaining_steps, sizeof(u16));
278         if (ret)
279                 return ret;
280         return count;
281 }
282 static DEVICE_ATTR_RW(remaining_steps);
283
284 static uuid_le
285 parser_id_get(struct parser_context *ctx)
286 {
287         struct spar_controlvm_parameters_header *phdr = NULL;
288
289         phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
290         return phdr->id;
291 }
292
293 static void parser_done(struct parser_context *ctx)
294 {
295         controlvm_payload_bytes_buffered -= ctx->param_bytes;
296         kfree(ctx);
297 }
298
299 static void *
300 parser_string_get(struct parser_context *ctx)
301 {
302         u8 *pscan;
303         unsigned long nscan;
304         int value_length = -1;
305         void *value = NULL;
306         int i;
307
308         pscan = ctx->curr;
309         nscan = ctx->bytes_remaining;
310         if (nscan == 0)
311                 return NULL;
312         if (!pscan)
313                 return NULL;
314         for (i = 0, value_length = -1; i < nscan; i++)
315                 if (pscan[i] == '\0') {
316                         value_length = i;
317                         break;
318                 }
319         if (value_length < 0)   /* '\0' was not included in the length */
320                 value_length = nscan;
321         value = kmalloc(value_length + 1, GFP_KERNEL | __GFP_NORETRY);
322         if (!value)
323                 return NULL;
324         if (value_length > 0)
325                 memcpy(value, pscan, value_length);
326         ((u8 *)(value))[value_length] = '\0';
327         return value;
328 }
329
330 static void *
331 parser_name_get(struct parser_context *ctx)
332 {
333         struct spar_controlvm_parameters_header *phdr = NULL;
334
335         phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
336
337         if (phdr->name_offset + phdr->name_length > ctx->param_bytes)
338                 return NULL;
339
340         ctx->curr = ctx->data + phdr->name_offset;
341         ctx->bytes_remaining = phdr->name_length;
342         return parser_string_get(ctx);
343 }
344
345 struct visor_busdev {
346         u32 bus_no;
347         u32 dev_no;
348 };
349
350 static int match_visorbus_dev_by_id(struct device *dev, void *data)
351 {
352         struct visor_device *vdev = to_visor_device(dev);
353         struct visor_busdev *id = data;
354         u32 bus_no = id->bus_no;
355         u32 dev_no = id->dev_no;
356
357         if ((vdev->chipset_bus_no == bus_no) &&
358             (vdev->chipset_dev_no == dev_no))
359                 return 1;
360
361         return 0;
362 }
363
364 struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
365                                                struct visor_device *from)
366 {
367         struct device *dev;
368         struct device *dev_start = NULL;
369         struct visor_device *vdev = NULL;
370         struct visor_busdev id = {
371                         .bus_no = bus_no,
372                         .dev_no = dev_no
373                 };
374
375         if (from)
376                 dev_start = &from->device;
377         dev = bus_find_device(&visorbus_type, dev_start, (void *)&id,
378                               match_visorbus_dev_by_id);
379         if (dev)
380                 vdev = to_visor_device(dev);
381         return vdev;
382 }
383
384 static void
385 controlvm_init_response(struct controlvm_message *msg,
386                         struct controlvm_message_header *msg_hdr, int response)
387 {
388         memset(msg, 0, sizeof(struct controlvm_message));
389         memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
390         msg->hdr.payload_bytes = 0;
391         msg->hdr.payload_vm_offset = 0;
392         msg->hdr.payload_max_bytes = 0;
393         if (response < 0) {
394                 msg->hdr.flags.failed = 1;
395                 msg->hdr.completion_status = (u32)(-response);
396         }
397 }
398
399 static int
400 controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
401                                int response,
402                                enum ultra_chipset_feature features)
403 {
404         struct controlvm_message outmsg;
405
406         controlvm_init_response(&outmsg, msg_hdr, response);
407         outmsg.cmd.init_chipset.features = features;
408         return visorchannel_signalinsert(controlvm_channel,
409                                          CONTROLVM_QUEUE_REQUEST, &outmsg);
410 }
411
412 static int
413 chipset_init(struct controlvm_message *inmsg)
414 {
415         static int chipset_inited;
416         enum ultra_chipset_feature features = 0;
417         int rc = CONTROLVM_RESP_SUCCESS;
418         int res = 0;
419
420         POSTCODE_LINUX(CHIPSET_INIT_ENTRY_PC, 0, 0, DIAG_SEVERITY_PRINT);
421         if (chipset_inited) {
422                 rc = -CONTROLVM_RESP_ALREADY_DONE;
423                 res = -EIO;
424                 goto out_respond;
425         }
426         chipset_inited = 1;
427         POSTCODE_LINUX(CHIPSET_INIT_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
428
429         /*
430          * Set features to indicate we support parahotplug (if Command
431          * also supports it).
432          */
433         features = inmsg->cmd.init_chipset.features &
434                    ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
435
436         /*
437          * Set the "reply" bit so Command knows this is a
438          * features-aware driver.
439          */
440         features |= ULTRA_CHIPSET_FEATURE_REPLY;
441
442 out_respond:
443         if (inmsg->hdr.flags.response_expected)
444                 res = controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
445
446         return res;
447 }
448
449 static int
450 controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
451 {
452         struct controlvm_message outmsg;
453
454         controlvm_init_response(&outmsg, msg_hdr, response);
455         if (outmsg.hdr.flags.test_message == 1)
456                 return -EINVAL;
457
458         return visorchannel_signalinsert(controlvm_channel,
459                                          CONTROLVM_QUEUE_REQUEST, &outmsg);
460 }
461
462 static int controlvm_respond_physdev_changestate(
463                 struct controlvm_message_header *msg_hdr, int response,
464                 struct spar_segment_state state)
465 {
466         struct controlvm_message outmsg;
467
468         controlvm_init_response(&outmsg, msg_hdr, response);
469         outmsg.cmd.device_change_state.state = state;
470         outmsg.cmd.device_change_state.flags.phys_device = 1;
471         return visorchannel_signalinsert(controlvm_channel,
472                                          CONTROLVM_QUEUE_REQUEST, &outmsg);
473 }
474
475 enum crash_obj_type {
476         CRASH_DEV,
477         CRASH_BUS,
478 };
479
480 static int
481 save_crash_message(struct controlvm_message *msg, enum crash_obj_type typ)
482 {
483         u32 local_crash_msg_offset;
484         u16 local_crash_msg_count;
485         int err;
486
487         err = visorchannel_read(controlvm_channel,
488                                 offsetof(struct spar_controlvm_channel_protocol,
489                                          saved_crash_message_count),
490                                 &local_crash_msg_count, sizeof(u16));
491         if (err) {
492                 POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
493                                DIAG_SEVERITY_ERR);
494                 return err;
495         }
496
497         if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
498                 POSTCODE_LINUX(CRASH_DEV_COUNT_FAILURE_PC, 0,
499                                local_crash_msg_count,
500                                DIAG_SEVERITY_ERR);
501                 return -EIO;
502         }
503
504         err = visorchannel_read(controlvm_channel,
505                                 offsetof(struct spar_controlvm_channel_protocol,
506                                          saved_crash_message_offset),
507                                 &local_crash_msg_offset, sizeof(u32));
508         if (err) {
509                 POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
510                                DIAG_SEVERITY_ERR);
511                 return err;
512         }
513
514         switch (typ) {
515         case CRASH_DEV:
516                 local_crash_msg_offset += sizeof(struct controlvm_message);
517                 err = visorchannel_write(controlvm_channel,
518                                          local_crash_msg_offset,
519                                          msg,
520                                          sizeof(struct controlvm_message));
521                 if (err) {
522                         POSTCODE_LINUX(SAVE_MSG_DEV_FAILURE_PC, 0, 0,
523                                        DIAG_SEVERITY_ERR);
524                         return err;
525                 }
526                 break;
527         case CRASH_BUS:
528                 err = visorchannel_write(controlvm_channel,
529                                          local_crash_msg_offset,
530                                          msg,
531                                          sizeof(struct controlvm_message));
532                 if (err) {
533                         POSTCODE_LINUX(SAVE_MSG_BUS_FAILURE_PC, 0, 0,
534                                        DIAG_SEVERITY_ERR);
535                         return err;
536                 }
537                 break;
538         default:
539                 pr_info("Invalid crash_obj_type\n");
540                 break;
541         }
542         return 0;
543 }
544
545 static int
546 bus_responder(enum controlvm_id cmd_id,
547               struct controlvm_message_header *pending_msg_hdr,
548               int response)
549 {
550         if (!pending_msg_hdr)
551                 return -EIO;
552
553         if (pending_msg_hdr->id != (u32)cmd_id)
554                 return -EINVAL;
555
556         return controlvm_respond(pending_msg_hdr, response);
557 }
558
559 static int
560 device_changestate_responder(enum controlvm_id cmd_id,
561                              struct visor_device *p, int response,
562                              struct spar_segment_state response_state)
563 {
564         struct controlvm_message outmsg;
565         u32 bus_no = p->chipset_bus_no;
566         u32 dev_no = p->chipset_dev_no;
567
568         if (!p->pending_msg_hdr)
569                 return -EIO;
570         if (p->pending_msg_hdr->id != cmd_id)
571                 return -EINVAL;
572
573         controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
574
575         outmsg.cmd.device_change_state.bus_no = bus_no;
576         outmsg.cmd.device_change_state.dev_no = dev_no;
577         outmsg.cmd.device_change_state.state = response_state;
578
579         return visorchannel_signalinsert(controlvm_channel,
580                                          CONTROLVM_QUEUE_REQUEST, &outmsg);
581 }
582
583 static int
584 device_responder(enum controlvm_id cmd_id,
585                  struct controlvm_message_header *pending_msg_hdr,
586                  int response)
587 {
588         if (!pending_msg_hdr)
589                 return -EIO;
590
591         if (pending_msg_hdr->id != (u32)cmd_id)
592                 return -EINVAL;
593
594         return controlvm_respond(pending_msg_hdr, response);
595 }
596
597 static int
598 bus_create(struct controlvm_message *inmsg)
599 {
600         struct controlvm_message_packet *cmd = &inmsg->cmd;
601         struct controlvm_message_header *pmsg_hdr = NULL;
602         u32 bus_no = cmd->create_bus.bus_no;
603         struct visor_device *bus_info;
604         struct visorchannel *visorchannel;
605         int err;
606
607         bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
608         if (bus_info && (bus_info->state.created == 1)) {
609                 POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
610                                DIAG_SEVERITY_ERR);
611                 err = -EEXIST;
612                 goto err_respond;
613         }
614
615         bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
616         if (!bus_info) {
617                 POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
618                                DIAG_SEVERITY_ERR);
619                 err = -ENOMEM;
620                 goto err_respond;
621         }
622
623         INIT_LIST_HEAD(&bus_info->list_all);
624         bus_info->chipset_bus_no = bus_no;
625         bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
626
627         POSTCODE_LINUX(BUS_CREATE_ENTRY_PC, 0, bus_no, DIAG_SEVERITY_PRINT);
628
629         if (uuid_le_cmp(cmd->create_bus.bus_inst_uuid, spar_siovm_uuid) == 0) {
630                 err = save_crash_message(inmsg, CRASH_BUS);
631                 if (err)
632                         goto err_free_bus_info;
633         }
634
635         if (inmsg->hdr.flags.response_expected == 1) {
636                 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr),
637                                    GFP_KERNEL);
638                 if (!pmsg_hdr) {
639                         POSTCODE_LINUX(MALLOC_FAILURE_PC, cmd,
640                                        bus_info->chipset_bus_no,
641                                        DIAG_SEVERITY_ERR);
642                         err = -ENOMEM;
643                         goto err_free_bus_info;
644                 }
645
646                 memcpy(pmsg_hdr, &inmsg->hdr,
647                        sizeof(struct controlvm_message_header));
648                 bus_info->pending_msg_hdr = pmsg_hdr;
649         }
650
651         visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
652                                            cmd->create_bus.channel_bytes,
653                                            GFP_KERNEL,
654                                            cmd->create_bus.bus_data_type_uuid);
655
656         if (!visorchannel) {
657                 POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
658                                DIAG_SEVERITY_ERR);
659                 err = -ENOMEM;
660                 goto err_free_pending_msg;
661         }
662         bus_info->visorchannel = visorchannel;
663
664         /* Response will be handled by chipset_bus_create */
665         chipset_bus_create(bus_info);
666
667         POSTCODE_LINUX(BUS_CREATE_EXIT_PC, 0, bus_no, DIAG_SEVERITY_PRINT);
668         return 0;
669
670 err_free_pending_msg:
671         kfree(bus_info->pending_msg_hdr);
672
673 err_free_bus_info:
674         kfree(bus_info);
675
676 err_respond:
677         if (inmsg->hdr.flags.response_expected == 1)
678                 bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
679         return err;
680 }
681
682 static int
683 bus_destroy(struct controlvm_message *inmsg)
684 {
685         struct controlvm_message_packet *cmd = &inmsg->cmd;
686         struct controlvm_message_header *pmsg_hdr = NULL;
687         u32 bus_no = cmd->destroy_bus.bus_no;
688         struct visor_device *bus_info;
689         int err;
690
691         bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
692         if (!bus_info) {
693                 err = -ENODEV;
694                 goto err_respond;
695         }
696         if (bus_info->state.created == 0) {
697                 err = -ENOENT;
698                 goto err_respond;
699         }
700         if (bus_info->pending_msg_hdr) {
701                 /* only non-NULL if dev is still waiting on a response */
702                 err = -EEXIST;
703                 goto err_respond;
704         }
705         if (inmsg->hdr.flags.response_expected == 1) {
706                 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
707                 if (!pmsg_hdr) {
708                         POSTCODE_LINUX(MALLOC_FAILURE_PC, cmd,
709                                        bus_info->chipset_bus_no,
710                                        DIAG_SEVERITY_ERR);
711                         err = -ENOMEM;
712                         goto err_respond;
713                 }
714
715                 memcpy(pmsg_hdr, &inmsg->hdr,
716                        sizeof(struct controlvm_message_header));
717                 bus_info->pending_msg_hdr = pmsg_hdr;
718         }
719
720         /* Response will be handled by chipset_bus_destroy */
721         chipset_bus_destroy(bus_info);
722         return 0;
723
724 err_respond:
725         if (inmsg->hdr.flags.response_expected == 1)
726                 bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
727         return err;
728 }
729
730 static int
731 bus_configure(struct controlvm_message *inmsg,
732               struct parser_context *parser_ctx)
733 {
734         struct controlvm_message_packet *cmd = &inmsg->cmd;
735         u32 bus_no;
736         struct visor_device *bus_info;
737         int err = 0;
738
739         bus_no = cmd->configure_bus.bus_no;
740         POSTCODE_LINUX(BUS_CONFIGURE_ENTRY_PC, 0, bus_no,
741                        DIAG_SEVERITY_PRINT);
742
743         bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
744         if (!bus_info) {
745                 POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no,
746                                DIAG_SEVERITY_ERR);
747                 err = -EINVAL;
748                 goto err_respond;
749         } else if (bus_info->state.created == 0) {
750                 POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no,
751                                DIAG_SEVERITY_ERR);
752                 err = -EINVAL;
753                 goto err_respond;
754         } else if (bus_info->pending_msg_hdr) {
755                 POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no,
756                                DIAG_SEVERITY_ERR);
757                 err = -EIO;
758                 goto err_respond;
759         }
760
761         err = visorchannel_set_clientpartition
762                 (bus_info->visorchannel,
763                  cmd->configure_bus.guest_handle);
764         if (err)
765                 goto err_respond;
766
767         if (parser_ctx) {
768                 bus_info->partition_uuid = parser_id_get(parser_ctx);
769                 bus_info->name = parser_name_get(parser_ctx);
770         }
771
772         POSTCODE_LINUX(BUS_CONFIGURE_EXIT_PC, 0, bus_no,
773                        DIAG_SEVERITY_PRINT);
774
775         if (inmsg->hdr.flags.response_expected == 1)
776                 bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
777         return 0;
778
779 err_respond:
780         if (inmsg->hdr.flags.response_expected == 1)
781                 bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
782         return err;
783 }
784
785 static int
786 my_device_create(struct controlvm_message *inmsg)
787 {
788         struct controlvm_message_packet *cmd = &inmsg->cmd;
789         struct controlvm_message_header *pmsg_hdr = NULL;
790         u32 bus_no = cmd->create_device.bus_no;
791         u32 dev_no = cmd->create_device.dev_no;
792         struct visor_device *dev_info = NULL;
793         struct visor_device *bus_info;
794         struct visorchannel *visorchannel;
795         int err;
796
797         bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
798         if (!bus_info) {
799                 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
800                                DIAG_SEVERITY_ERR);
801                 err = -ENODEV;
802                 goto err_respond;
803         }
804
805         if (bus_info->state.created == 0) {
806                 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
807                                DIAG_SEVERITY_ERR);
808                 err = -EINVAL;
809                 goto err_respond;
810         }
811
812         dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
813         if (dev_info && (dev_info->state.created == 1)) {
814                 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
815                                DIAG_SEVERITY_ERR);
816                 err = -EEXIST;
817                 goto err_respond;
818         }
819
820         dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
821         if (!dev_info) {
822                 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
823                                DIAG_SEVERITY_ERR);
824                 err = -ENOMEM;
825                 goto err_respond;
826         }
827
828         dev_info->chipset_bus_no = bus_no;
829         dev_info->chipset_dev_no = dev_no;
830         dev_info->inst = cmd->create_device.dev_inst_uuid;
831
832         /* not sure where the best place to set the 'parent' */
833         dev_info->device.parent = &bus_info->device;
834
835         POSTCODE_LINUX(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
836                        DIAG_SEVERITY_PRINT);
837
838         visorchannel =
839                visorchannel_create_with_lock(cmd->create_device.channel_addr,
840                                              cmd->create_device.channel_bytes,
841                                              GFP_KERNEL,
842                                              cmd->create_device.data_type_uuid);
843
844         if (!visorchannel) {
845                 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
846                                DIAG_SEVERITY_ERR);
847                 err = -ENOMEM;
848                 goto err_free_dev_info;
849         }
850         dev_info->visorchannel = visorchannel;
851         dev_info->channel_type_guid = cmd->create_device.data_type_uuid;
852         if (uuid_le_cmp(cmd->create_device.data_type_uuid,
853                         spar_vhba_channel_protocol_uuid) == 0) {
854                 err = save_crash_message(inmsg, CRASH_DEV);
855                 if (err)
856                         goto err_free_dev_info;
857         }
858
859         if (inmsg->hdr.flags.response_expected == 1) {
860                 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
861                 if (!pmsg_hdr) {
862                         err = -ENOMEM;
863                         goto err_free_dev_info;
864                 }
865
866                 memcpy(pmsg_hdr, &inmsg->hdr,
867                        sizeof(struct controlvm_message_header));
868                 dev_info->pending_msg_hdr = pmsg_hdr;
869         }
870         /* Chipset_device_create will send response */
871         chipset_device_create(dev_info);
872         POSTCODE_LINUX(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
873                        DIAG_SEVERITY_PRINT);
874         return 0;
875
876 err_free_dev_info:
877         kfree(dev_info);
878
879 err_respond:
880         if (inmsg->hdr.flags.response_expected == 1)
881                 device_responder(inmsg->hdr.id, &inmsg->hdr, err);
882         return err;
883 }
884
885 static int
886 my_device_changestate(struct controlvm_message *inmsg)
887 {
888         struct controlvm_message_packet *cmd = &inmsg->cmd;
889         struct controlvm_message_header *pmsg_hdr = NULL;
890         u32 bus_no = cmd->device_change_state.bus_no;
891         u32 dev_no = cmd->device_change_state.dev_no;
892         struct spar_segment_state state = cmd->device_change_state.state;
893         struct visor_device *dev_info;
894         int err;
895
896         dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
897         if (!dev_info) {
898                 POSTCODE_LINUX(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
899                                DIAG_SEVERITY_ERR);
900                 err = -ENODEV;
901                 goto err_respond;
902         }
903         if (dev_info->state.created == 0) {
904                 POSTCODE_LINUX(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
905                                DIAG_SEVERITY_ERR);
906                 err = -EINVAL;
907                 goto err_respond;
908         }
909         if (dev_info->pending_msg_hdr) {
910                 /* only non-NULL if dev is still waiting on a response */
911                 err = -EIO;
912                 goto err_respond;
913         }
914         if (inmsg->hdr.flags.response_expected == 1) {
915                 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
916                 if (!pmsg_hdr) {
917                         err = -ENOMEM;
918                         goto err_respond;
919                 }
920
921                 memcpy(pmsg_hdr, &inmsg->hdr,
922                        sizeof(struct controlvm_message_header));
923                 dev_info->pending_msg_hdr = pmsg_hdr;
924         }
925
926         if (state.alive == segment_state_running.alive &&
927             state.operating == segment_state_running.operating)
928                 /* Response will be sent from chipset_device_resume */
929                 chipset_device_resume(dev_info);
930         /* ServerNotReady / ServerLost / SegmentStateStandby */
931         else if (state.alive == segment_state_standby.alive &&
932                  state.operating == segment_state_standby.operating)
933                 /*
934                  * technically this is standby case where server is lost.
935                  * Response will be sent from chipset_device_pause.
936                  */
937                 chipset_device_pause(dev_info);
938         return 0;
939
940 err_respond:
941         if (inmsg->hdr.flags.response_expected == 1)
942                 device_responder(inmsg->hdr.id, &inmsg->hdr, err);
943         return err;
944 }
945
946 static int
947 my_device_destroy(struct controlvm_message *inmsg)
948 {
949         struct controlvm_message_packet *cmd = &inmsg->cmd;
950         struct controlvm_message_header *pmsg_hdr = NULL;
951         u32 bus_no = cmd->destroy_device.bus_no;
952         u32 dev_no = cmd->destroy_device.dev_no;
953         struct visor_device *dev_info;
954         int err;
955
956         dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
957         if (!dev_info) {
958                 err = -ENODEV;
959                 goto err_respond;
960         }
961         if (dev_info->state.created == 0) {
962                 err = -EINVAL;
963                 goto err_respond;
964         }
965
966         if (dev_info->pending_msg_hdr) {
967                 /* only non-NULL if dev is still waiting on a response */
968                 err = -EIO;
969                 goto err_respond;
970         }
971         if (inmsg->hdr.flags.response_expected == 1) {
972                 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
973                 if (!pmsg_hdr) {
974                         err = -ENOMEM;
975                         goto err_respond;
976                 }
977
978                 memcpy(pmsg_hdr, &inmsg->hdr,
979                        sizeof(struct controlvm_message_header));
980                 dev_info->pending_msg_hdr = pmsg_hdr;
981         }
982
983         chipset_device_destroy(dev_info);
984         return 0;
985
986 err_respond:
987         if (inmsg->hdr.flags.response_expected == 1)
988                 device_responder(inmsg->hdr.id, &inmsg->hdr, err);
989         return err;
990 }
991
992 /*
993  * The general parahotplug flow works as follows. The visorchipset receives
994  * a DEVICE_CHANGESTATE message from Command specifying a physical device
995  * to enable or disable. The CONTROLVM message handler calls
996  * parahotplug_process_message, which then adds the message to a global list
997  * and kicks off a udev event which causes a user level script to enable or
998  * disable the specified device. The udev script then writes to
999  * /sys/devices/platform/visorchipset/parahotplug, which causes the
1000  * parahotplug store functions to get called, at which point the
1001  * appropriate CONTROLVM message is retrieved from the list and responded
1002  * to.
1003  */
1004
1005 #define PARAHOTPLUG_TIMEOUT_MS 2000
1006
1007 /**
1008  * parahotplug_next_id() - generate unique int to match an outstanding
1009  *                         CONTROLVM message with a udev script /sys
1010  *                         response
1011  *
1012  * Return: a unique integer value
1013  */
1014 static int
1015 parahotplug_next_id(void)
1016 {
1017         static atomic_t id = ATOMIC_INIT(0);
1018
1019         return atomic_inc_return(&id);
1020 }
1021
1022 /**
1023  * parahotplug_next_expiration() - returns the time (in jiffies) when a
1024  *                                 CONTROLVM message on the list should expire
1025  *                                 -- PARAHOTPLUG_TIMEOUT_MS in the future
1026  *
1027  * Return: expected expiration time (in jiffies)
1028  */
1029 static unsigned long
1030 parahotplug_next_expiration(void)
1031 {
1032         return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
1033 }
1034
1035 /**
1036  * parahotplug_request_create() - create a parahotplug_request, which is
1037  *                                basically a wrapper for a CONTROLVM_MESSAGE
1038  *                                that we can stick on a list
1039  * @msg: the message to insert in the request
1040  *
1041  * Return: the request containing the provided message
1042  */
1043 static struct parahotplug_request *
1044 parahotplug_request_create(struct controlvm_message *msg)
1045 {
1046         struct parahotplug_request *req;
1047
1048         req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
1049         if (!req)
1050                 return NULL;
1051
1052         req->id = parahotplug_next_id();
1053         req->expiration = parahotplug_next_expiration();
1054         req->msg = *msg;
1055
1056         return req;
1057 }
1058
1059 /**
1060  * parahotplug_request_destroy() - free a parahotplug_request
1061  * @req: the request to deallocate
1062  */
1063 static void
1064 parahotplug_request_destroy(struct parahotplug_request *req)
1065 {
1066         kfree(req);
1067 }
1068
1069 static LIST_HEAD(parahotplug_request_list);
1070 static DEFINE_SPINLOCK(parahotplug_request_list_lock);  /* lock for above */
1071
1072 /**
1073  * parahotplug_request_complete() - mark request as complete
1074  * @id:     the id of the request
1075  * @active: indicates whether the request is assigned to active partition
1076  *
1077  * Called from the /sys handler, which means the user script has
1078  * finished the enable/disable. Find the matching identifier, and
1079  * respond to the CONTROLVM message with success.
1080  *
1081  * Return: 0 on success or -EINVAL on failure
1082  */
1083 static int
1084 parahotplug_request_complete(int id, u16 active)
1085 {
1086         struct list_head *pos;
1087         struct list_head *tmp;
1088
1089         spin_lock(&parahotplug_request_list_lock);
1090
1091         /* Look for a request matching "id". */
1092         list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1093                 struct parahotplug_request *req =
1094                     list_entry(pos, struct parahotplug_request, list);
1095                 if (req->id == id) {
1096                         /*
1097                          * Found a match. Remove it from the list and
1098                          * respond.
1099                          */
1100                         list_del(pos);
1101                         spin_unlock(&parahotplug_request_list_lock);
1102                         req->msg.cmd.device_change_state.state.active = active;
1103                         if (req->msg.hdr.flags.response_expected)
1104                                 controlvm_respond_physdev_changestate(
1105                                         &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1106                                         req->msg.cmd.device_change_state.state);
1107                         parahotplug_request_destroy(req);
1108                         return 0;
1109                 }
1110         }
1111
1112         spin_unlock(&parahotplug_request_list_lock);
1113         return -EINVAL;
1114 }
1115
1116 /**
1117  * devicedisabled_store() - disables the hotplug device
1118  * @dev:   sysfs interface variable not utilized in this function
1119  * @attr:  sysfs interface variable not utilized in this function
1120  * @buf:   buffer containing the device id
1121  * @count: the size of the buffer
1122  *
1123  * The parahotplug/devicedisabled interface gets called by our support script
1124  * when an SR-IOV device has been shut down. The ID is passed to the script
1125  * and then passed back when the device has been removed.
1126  *
1127  * Return: the size of the buffer for success or negative for error
1128  */
1129 static ssize_t devicedisabled_store(struct device *dev,
1130                                     struct device_attribute *attr,
1131                                     const char *buf, size_t count)
1132 {
1133         unsigned int id;
1134         int err;
1135
1136         if (kstrtouint(buf, 10, &id))
1137                 return -EINVAL;
1138
1139         err = parahotplug_request_complete(id, 0);
1140         if (err < 0)
1141                 return err;
1142         return count;
1143 }
1144 static DEVICE_ATTR_WO(devicedisabled);
1145
1146 /**
1147  * deviceenabled_store() - enables the hotplug device
1148  * @dev:   sysfs interface variable not utilized in this function
1149  * @attr:  sysfs interface variable not utilized in this function
1150  * @buf:   buffer containing the device id
1151  * @count: the size of the buffer
1152  *
1153  * The parahotplug/deviceenabled interface gets called by our support script
1154  * when an SR-IOV device has been recovered. The ID is passed to the script
1155  * and then passed back when the device has been brought back up.
1156  *
1157  * Return: the size of the buffer for success or negative for error
1158  */
1159 static ssize_t deviceenabled_store(struct device *dev,
1160                                    struct device_attribute *attr,
1161                                    const char *buf, size_t count)
1162 {
1163         unsigned int id;
1164
1165         if (kstrtouint(buf, 10, &id))
1166                 return -EINVAL;
1167
1168         parahotplug_request_complete(id, 1);
1169         return count;
1170 }
1171 static DEVICE_ATTR_WO(deviceenabled);
1172
1173 static struct attribute *visorchipset_install_attrs[] = {
1174         &dev_attr_toolaction.attr,
1175         &dev_attr_boottotool.attr,
1176         &dev_attr_error.attr,
1177         &dev_attr_textid.attr,
1178         &dev_attr_remaining_steps.attr,
1179         NULL
1180 };
1181
1182 static const struct attribute_group visorchipset_install_group = {
1183         .name = "install",
1184         .attrs = visorchipset_install_attrs
1185 };
1186
1187 static struct attribute *visorchipset_parahotplug_attrs[] = {
1188         &dev_attr_devicedisabled.attr,
1189         &dev_attr_deviceenabled.attr,
1190         NULL
1191 };
1192
1193 static struct attribute_group visorchipset_parahotplug_group = {
1194         .name = "parahotplug",
1195         .attrs = visorchipset_parahotplug_attrs
1196 };
1197
1198 static const struct attribute_group *visorchipset_dev_groups[] = {
1199         &visorchipset_install_group,
1200         &visorchipset_parahotplug_group,
1201         NULL
1202 };
1203
1204 static void visorchipset_dev_release(struct device *dev)
1205 {
1206 }
1207
1208 /* /sys/devices/platform/visorchipset */
1209 static struct platform_device visorchipset_platform_device = {
1210         .name = "visorchipset",
1211         .id = -1,
1212         .dev.groups = visorchipset_dev_groups,
1213         .dev.release = visorchipset_dev_release,
1214 };
1215
1216 /**
1217  * parahotplug_request_kickoff() - initiate parahotplug request
1218  * @req: the request to initiate
1219  *
1220  * Cause uevent to run the user level script to do the disable/enable specified
1221  * in the parahotplug_request.
1222  */
1223 static void
1224 parahotplug_request_kickoff(struct parahotplug_request *req)
1225 {
1226         struct controlvm_message_packet *cmd = &req->msg.cmd;
1227         char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1228             env_func[40];
1229         char *envp[] = {
1230                 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1231         };
1232
1233         sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1234         sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1235         sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
1236                 cmd->device_change_state.state.active);
1237         sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
1238                 cmd->device_change_state.bus_no);
1239         sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
1240                 cmd->device_change_state.dev_no >> 3);
1241         sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1242                 cmd->device_change_state.dev_no & 0x7);
1243
1244         kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1245                            envp);
1246 }
1247
1248 /**
1249  * parahotplug_process_message() - enables or disables a PCI device by kicking
1250  *                                 off a udev script
1251  * @inmsg: the message indicating whether to enable or disable
1252  */
1253 static void
1254 parahotplug_process_message(struct controlvm_message *inmsg)
1255 {
1256         struct parahotplug_request *req;
1257
1258         req = parahotplug_request_create(inmsg);
1259
1260         if (!req)
1261                 return;
1262
1263         if (inmsg->cmd.device_change_state.state.active) {
1264                 /*
1265                  * For enable messages, just respond with success
1266                  * right away. This is a bit of a hack, but there are
1267                  * issues with the early enable messages we get (with
1268                  * either the udev script not detecting that the device
1269                  * is up, or not getting called at all). Fortunately
1270                  * the messages that get lost don't matter anyway, as
1271                  *
1272                  * devices are automatically enabled at
1273                  * initialization.
1274                  */
1275                 parahotplug_request_kickoff(req);
1276                 controlvm_respond_physdev_changestate
1277                         (&inmsg->hdr,
1278                          CONTROLVM_RESP_SUCCESS,
1279                          inmsg->cmd.device_change_state.state);
1280                 parahotplug_request_destroy(req);
1281         } else {
1282                 /*
1283                  * For disable messages, add the request to the
1284                  * request list before kicking off the udev script. It
1285                  * won't get responded to until the script has
1286                  * indicated it's done.
1287                  */
1288                 spin_lock(&parahotplug_request_list_lock);
1289                 list_add_tail(&req->list, &parahotplug_request_list);
1290                 spin_unlock(&parahotplug_request_list_lock);
1291
1292                 parahotplug_request_kickoff(req);
1293         }
1294 }
1295
1296 /*
1297  * chipset_ready_uevent() - sends chipset_ready action
1298  *
1299  * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1300  *
1301  * Return: 0 on success, negative on failure
1302  */
1303 static int
1304 chipset_ready_uevent(struct controlvm_message_header *msg_hdr)
1305 {
1306         kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
1307
1308         if (msg_hdr->flags.response_expected)
1309                 return controlvm_respond(msg_hdr, CONTROLVM_RESP_SUCCESS);
1310
1311         return 0;
1312 }
1313
1314 /*
1315  * chipset_selftest_uevent() - sends chipset_selftest action
1316  *
1317  * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1318  *
1319  * Return: 0 on success, negative on failure
1320  */
1321 static int
1322 chipset_selftest_uevent(struct controlvm_message_header *msg_hdr)
1323 {
1324         char env_selftest[20];
1325         char *envp[] = { env_selftest, NULL };
1326
1327         sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1328         kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1329                            envp);
1330
1331         if (msg_hdr->flags.response_expected)
1332                 return controlvm_respond(msg_hdr, CONTROLVM_RESP_SUCCESS);
1333
1334         return 0;
1335 }
1336
1337 /*
1338  * chipset_notready_uevent() - sends chipset_notready action
1339  *
1340  * Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1341  *
1342  * Return: 0 on success, negative on failure
1343  */
1344 static int
1345 chipset_notready_uevent(struct controlvm_message_header *msg_hdr)
1346 {
1347         kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
1348
1349         if (msg_hdr->flags.response_expected)
1350                 return controlvm_respond(msg_hdr, CONTROLVM_RESP_SUCCESS);
1351
1352         return 0;
1353 }
1354
1355 static inline unsigned int
1356 issue_vmcall_io_controlvm_addr(u64 *control_addr, u32 *control_bytes)
1357 {
1358         struct vmcall_io_controlvm_addr_params params;
1359         int result = VMCALL_SUCCESS;
1360         u64 physaddr;
1361
1362         physaddr = virt_to_phys(&params);
1363         ISSUE_IO_VMCALL(VMCALL_IO_CONTROLVM_ADDR, physaddr, result);
1364         if (VMCALL_SUCCESSFUL(result)) {
1365                 *control_addr = params.address;
1366                 *control_bytes = params.channel_bytes;
1367         }
1368         return result;
1369 }
1370
1371 static u64 controlvm_get_channel_address(void)
1372 {
1373         u64 addr = 0;
1374         u32 size = 0;
1375
1376         if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
1377                 return 0;
1378
1379         return addr;
1380 }
1381
1382 static void
1383 setup_crash_devices_work_queue(struct work_struct *work)
1384 {
1385         struct controlvm_message local_crash_bus_msg;
1386         struct controlvm_message local_crash_dev_msg;
1387         struct controlvm_message msg;
1388         u32 local_crash_msg_offset;
1389         u16 local_crash_msg_count;
1390
1391         POSTCODE_LINUX(CRASH_DEV_ENTRY_PC, 0, 0, DIAG_SEVERITY_PRINT);
1392
1393         /* send init chipset msg */
1394         msg.hdr.id = CONTROLVM_CHIPSET_INIT;
1395         msg.cmd.init_chipset.bus_count = 23;
1396         msg.cmd.init_chipset.switch_count = 0;
1397
1398         chipset_init(&msg);
1399
1400         /* get saved message count */
1401         if (visorchannel_read(controlvm_channel,
1402                               offsetof(struct spar_controlvm_channel_protocol,
1403                                        saved_crash_message_count),
1404                               &local_crash_msg_count, sizeof(u16)) < 0) {
1405                 POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
1406                                DIAG_SEVERITY_ERR);
1407                 return;
1408         }
1409
1410         if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
1411                 POSTCODE_LINUX(CRASH_DEV_COUNT_FAILURE_PC, 0,
1412                                local_crash_msg_count,
1413                                DIAG_SEVERITY_ERR);
1414                 return;
1415         }
1416
1417         /* get saved crash message offset */
1418         if (visorchannel_read(controlvm_channel,
1419                               offsetof(struct spar_controlvm_channel_protocol,
1420                                        saved_crash_message_offset),
1421                               &local_crash_msg_offset, sizeof(u32)) < 0) {
1422                 POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
1423                                DIAG_SEVERITY_ERR);
1424                 return;
1425         }
1426
1427         /* read create device message for storage bus offset */
1428         if (visorchannel_read(controlvm_channel,
1429                               local_crash_msg_offset,
1430                               &local_crash_bus_msg,
1431                               sizeof(struct controlvm_message)) < 0) {
1432                 POSTCODE_LINUX(CRASH_DEV_RD_BUS_FAILURE_PC, 0, 0,
1433                                DIAG_SEVERITY_ERR);
1434                 return;
1435         }
1436
1437         /* read create device message for storage device */
1438         if (visorchannel_read(controlvm_channel,
1439                               local_crash_msg_offset +
1440                               sizeof(struct controlvm_message),
1441                               &local_crash_dev_msg,
1442                               sizeof(struct controlvm_message)) < 0) {
1443                 POSTCODE_LINUX(CRASH_DEV_RD_DEV_FAILURE_PC, 0, 0,
1444                                DIAG_SEVERITY_ERR);
1445                 return;
1446         }
1447
1448         /* reuse IOVM create bus message */
1449         if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
1450                 bus_create(&local_crash_bus_msg);
1451         } else {
1452                 POSTCODE_LINUX(CRASH_DEV_BUS_NULL_FAILURE_PC, 0, 0,
1453                                DIAG_SEVERITY_ERR);
1454                 return;
1455         }
1456
1457         /* reuse create device message for storage device */
1458         if (local_crash_dev_msg.cmd.create_device.channel_addr) {
1459                 my_device_create(&local_crash_dev_msg);
1460         } else {
1461                 POSTCODE_LINUX(CRASH_DEV_DEV_NULL_FAILURE_PC, 0, 0,
1462                                DIAG_SEVERITY_ERR);
1463                 return;
1464         }
1465         POSTCODE_LINUX(CRASH_DEV_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
1466 }
1467
1468 void
1469 bus_create_response(struct visor_device *bus_info, int response)
1470 {
1471         if (response >= 0)
1472                 bus_info->state.created = 1;
1473
1474         bus_responder(CONTROLVM_BUS_CREATE, bus_info->pending_msg_hdr,
1475                       response);
1476
1477         kfree(bus_info->pending_msg_hdr);
1478         bus_info->pending_msg_hdr = NULL;
1479 }
1480
1481 void
1482 bus_destroy_response(struct visor_device *bus_info, int response)
1483 {
1484         bus_responder(CONTROLVM_BUS_DESTROY, bus_info->pending_msg_hdr,
1485                       response);
1486
1487         kfree(bus_info->pending_msg_hdr);
1488         bus_info->pending_msg_hdr = NULL;
1489 }
1490
1491 void
1492 device_create_response(struct visor_device *dev_info, int response)
1493 {
1494         if (response >= 0)
1495                 dev_info->state.created = 1;
1496
1497         device_responder(CONTROLVM_DEVICE_CREATE, dev_info->pending_msg_hdr,
1498                          response);
1499
1500         kfree(dev_info->pending_msg_hdr);
1501         dev_info->pending_msg_hdr = NULL;
1502 }
1503
1504 void
1505 device_destroy_response(struct visor_device *dev_info, int response)
1506 {
1507         device_responder(CONTROLVM_DEVICE_DESTROY, dev_info->pending_msg_hdr,
1508                          response);
1509
1510         kfree(dev_info->pending_msg_hdr);
1511         dev_info->pending_msg_hdr = NULL;
1512 }
1513
1514 void
1515 device_pause_response(struct visor_device *dev_info,
1516                       int response)
1517 {
1518         device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
1519                                      dev_info, response,
1520                                      segment_state_standby);
1521
1522         kfree(dev_info->pending_msg_hdr);
1523         dev_info->pending_msg_hdr = NULL;
1524 }
1525
1526 void
1527 device_resume_response(struct visor_device *dev_info, int response)
1528 {
1529         device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
1530                                      dev_info, response,
1531                                      segment_state_running);
1532
1533         kfree(dev_info->pending_msg_hdr);
1534         dev_info->pending_msg_hdr = NULL;
1535 }
1536
1537 static int
1538 visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
1539 {
1540         unsigned long physaddr = 0;
1541         unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
1542         u64 addr = 0;
1543
1544         /* sv_enable_dfp(); */
1545         if (offset & (PAGE_SIZE - 1))
1546                 return -ENXIO;  /* need aligned offsets */
1547
1548         switch (offset) {
1549         case VISORCHIPSET_MMAP_CONTROLCHANOFFSET:
1550                 vma->vm_flags |= VM_IO;
1551                 if (!*file_controlvm_channel)
1552                         return -ENXIO;
1553
1554                 visorchannel_read
1555                         (*file_controlvm_channel,
1556                          offsetof(struct spar_controlvm_channel_protocol,
1557                                   gp_control_channel),
1558                          &addr, sizeof(addr));
1559                 if (!addr)
1560                         return -ENXIO;
1561
1562                 physaddr = (unsigned long)addr;
1563                 if (remap_pfn_range(vma, vma->vm_start,
1564                                     physaddr >> PAGE_SHIFT,
1565                                     vma->vm_end - vma->vm_start,
1566                                     /*pgprot_noncached */
1567                                     (vma->vm_page_prot))) {
1568                         return -EAGAIN;
1569                 }
1570                 break;
1571         default:
1572                 return -ENXIO;
1573         }
1574         return 0;
1575 }
1576
1577 static inline s64 issue_vmcall_query_guest_virtual_time_offset(void)
1578 {
1579         u64 result = VMCALL_SUCCESS;
1580         u64 physaddr = 0;
1581
1582         ISSUE_IO_VMCALL(VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET, physaddr,
1583                         result);
1584         return result;
1585 }
1586
1587 static inline int issue_vmcall_update_physical_time(u64 adjustment)
1588 {
1589         int result = VMCALL_SUCCESS;
1590
1591         ISSUE_IO_VMCALL(VMCALL_UPDATE_PHYSICAL_TIME, adjustment, result);
1592         return result;
1593 }
1594
1595 static long visorchipset_ioctl(struct file *file, unsigned int cmd,
1596                                unsigned long arg)
1597 {
1598         u64 adjustment;
1599         s64 vrtc_offset;
1600
1601         switch (cmd) {
1602         case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET:
1603                 /* get the physical rtc offset */
1604                 vrtc_offset = issue_vmcall_query_guest_virtual_time_offset();
1605                 if (copy_to_user((void __user *)arg, &vrtc_offset,
1606                                  sizeof(vrtc_offset))) {
1607                         return -EFAULT;
1608                 }
1609                 return 0;
1610         case VMCALL_UPDATE_PHYSICAL_TIME:
1611                 if (copy_from_user(&adjustment, (void __user *)arg,
1612                                    sizeof(adjustment))) {
1613                         return -EFAULT;
1614                 }
1615                 return issue_vmcall_update_physical_time(adjustment);
1616         default:
1617                 return -EFAULT;
1618         }
1619 }
1620
1621 static const struct file_operations visorchipset_fops = {
1622         .owner = THIS_MODULE,
1623         .open = visorchipset_open,
1624         .read = NULL,
1625         .write = NULL,
1626         .unlocked_ioctl = visorchipset_ioctl,
1627         .release = visorchipset_release,
1628         .mmap = visorchipset_mmap,
1629 };
1630
1631 static int
1632 visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
1633 {
1634         int rc = 0;
1635
1636         file_controlvm_channel = controlvm_channel;
1637         cdev_init(&file_cdev, &visorchipset_fops);
1638         file_cdev.owner = THIS_MODULE;
1639         if (MAJOR(major_dev) == 0) {
1640                 rc = alloc_chrdev_region(&major_dev, 0, 1, "visorchipset");
1641                 /* dynamic major device number registration required */
1642                 if (rc < 0)
1643                         return rc;
1644         } else {
1645                 /* static major device number registration required */
1646                 rc = register_chrdev_region(major_dev, 1, "visorchipset");
1647                 if (rc < 0)
1648                         return rc;
1649         }
1650         rc = cdev_add(&file_cdev, MKDEV(MAJOR(major_dev), 0), 1);
1651         if (rc < 0) {
1652                 unregister_chrdev_region(major_dev, 1);
1653                 return rc;
1654         }
1655         return 0;
1656 }
1657
1658 static void
1659 visorchipset_file_cleanup(dev_t major_dev)
1660 {
1661         if (file_cdev.ops)
1662                 cdev_del(&file_cdev);
1663         file_cdev.ops = NULL;
1664         unregister_chrdev_region(major_dev, 1);
1665 }
1666
1667 static struct parser_context *
1668 parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
1669 {
1670         int allocbytes = sizeof(struct parser_context) + bytes;
1671         struct parser_context *ctx;
1672
1673         *retry = false;
1674
1675         /*
1676          * alloc an 0 extra byte to ensure payload is
1677          * '\0'-terminated
1678          */
1679         allocbytes++;
1680         if ((controlvm_payload_bytes_buffered + bytes)
1681             > MAX_CONTROLVM_PAYLOAD_BYTES) {
1682                 *retry = true;
1683                 return NULL;
1684         }
1685         ctx = kzalloc(allocbytes, GFP_KERNEL | __GFP_NORETRY);
1686         if (!ctx) {
1687                 *retry = true;
1688                 return NULL;
1689         }
1690
1691         ctx->allocbytes = allocbytes;
1692         ctx->param_bytes = bytes;
1693         ctx->curr = NULL;
1694         ctx->bytes_remaining = 0;
1695         ctx->byte_stream = false;
1696         if (local) {
1697                 void *p;
1698
1699                 if (addr > virt_to_phys(high_memory - 1))
1700                         goto err_finish_ctx;
1701                 p = __va((unsigned long)(addr));
1702                 memcpy(ctx->data, p, bytes);
1703         } else {
1704                 void *mapping = memremap(addr, bytes, MEMREMAP_WB);
1705
1706                 if (!mapping)
1707                         goto err_finish_ctx;
1708                 memcpy(ctx->data, mapping, bytes);
1709                 memunmap(mapping);
1710         }
1711
1712         ctx->byte_stream = true;
1713         controlvm_payload_bytes_buffered += ctx->param_bytes;
1714
1715         return ctx;
1716
1717 err_finish_ctx:
1718         parser_done(ctx);
1719         return NULL;
1720 }
1721
1722 /**
1723  * handle_command() - process a controlvm message
1724  * @inmsg:        the message to process
1725  * @channel_addr: address of the controlvm channel
1726  *
1727  * Return:
1728  *    false - this function will return false only in the case where the
1729  *            controlvm message was NOT processed, but processing must be
1730  *            retried before reading the next controlvm message; a
1731  *            scenario where this can occur is when we need to throttle
1732  *            the allocation of memory in which to copy out controlvm
1733  *            payload data
1734  *    true  - processing of the controlvm message completed,
1735  *            either successfully or with an error
1736  */
1737 static bool
1738 handle_command(struct controlvm_message inmsg, u64 channel_addr)
1739 {
1740         struct controlvm_message_packet *cmd = &inmsg.cmd;
1741         u64 parm_addr;
1742         u32 parm_bytes;
1743         struct parser_context *parser_ctx = NULL;
1744         bool local_addr;
1745         struct controlvm_message ackmsg;
1746
1747         /* create parsing context if necessary */
1748         local_addr = (inmsg.hdr.flags.test_message == 1);
1749         if (channel_addr == 0)
1750                 return true;
1751         parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1752         parm_bytes = inmsg.hdr.payload_bytes;
1753
1754         /*
1755          * Parameter and channel addresses within test messages actually lie
1756          * within our OS-controlled memory. We need to know that, because it
1757          * makes a difference in how we compute the virtual address.
1758          */
1759         if (parm_addr && parm_bytes) {
1760                 bool retry = false;
1761
1762                 parser_ctx =
1763                     parser_init_byte_stream(parm_addr, parm_bytes,
1764                                             local_addr, &retry);
1765                 if (!parser_ctx && retry)
1766                         return false;
1767         }
1768
1769         if (!local_addr) {
1770                 controlvm_init_response(&ackmsg, &inmsg.hdr,
1771                                         CONTROLVM_RESP_SUCCESS);
1772                 if (controlvm_channel)
1773                         visorchannel_signalinsert(controlvm_channel,
1774                                                   CONTROLVM_QUEUE_ACK,
1775                                                   &ackmsg);
1776         }
1777         switch (inmsg.hdr.id) {
1778         case CONTROLVM_CHIPSET_INIT:
1779                 chipset_init(&inmsg);
1780                 break;
1781         case CONTROLVM_BUS_CREATE:
1782                 bus_create(&inmsg);
1783                 break;
1784         case CONTROLVM_BUS_DESTROY:
1785                 bus_destroy(&inmsg);
1786                 break;
1787         case CONTROLVM_BUS_CONFIGURE:
1788                 bus_configure(&inmsg, parser_ctx);
1789                 break;
1790         case CONTROLVM_DEVICE_CREATE:
1791                 my_device_create(&inmsg);
1792                 break;
1793         case CONTROLVM_DEVICE_CHANGESTATE:
1794                 if (cmd->device_change_state.flags.phys_device) {
1795                         parahotplug_process_message(&inmsg);
1796                 } else {
1797                         /*
1798                          * save the hdr and cmd structures for later use
1799                          * when sending back the response to Command
1800                          */
1801                         my_device_changestate(&inmsg);
1802                         break;
1803                 }
1804                 break;
1805         case CONTROLVM_DEVICE_DESTROY:
1806                 my_device_destroy(&inmsg);
1807                 break;
1808         case CONTROLVM_DEVICE_CONFIGURE:
1809                 /* no op for now, just send a respond that we passed */
1810                 if (inmsg.hdr.flags.response_expected)
1811                         controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1812                 break;
1813         case CONTROLVM_CHIPSET_READY:
1814                 chipset_ready_uevent(&inmsg.hdr);
1815                 break;
1816         case CONTROLVM_CHIPSET_SELFTEST:
1817                 chipset_selftest_uevent(&inmsg.hdr);
1818                 break;
1819         case CONTROLVM_CHIPSET_STOP:
1820                 chipset_notready_uevent(&inmsg.hdr);
1821                 break;
1822         default:
1823                 if (inmsg.hdr.flags.response_expected)
1824                         controlvm_respond
1825                                 (&inmsg.hdr, -CONTROLVM_RESP_ID_UNKNOWN);
1826                 break;
1827         }
1828
1829         if (parser_ctx) {
1830                 parser_done(parser_ctx);
1831                 parser_ctx = NULL;
1832         }
1833         return true;
1834 }
1835
1836 /**
1837  * read_controlvm_event() - retreives the next message from the
1838  *                          CONTROLVM_QUEUE_EVENT queue in the controlvm
1839  *                          channel
1840  * @msg: pointer to the retrieved message
1841  *
1842  * Return: true if a valid message was retrieved or false otherwise
1843  */
1844 static bool
1845 read_controlvm_event(struct controlvm_message *msg)
1846 {
1847         if (!visorchannel_signalremove(controlvm_channel,
1848                                        CONTROLVM_QUEUE_EVENT, msg)) {
1849                 /* got a message */
1850                 if (msg->hdr.flags.test_message == 1)
1851                         return false;
1852                 return true;
1853         }
1854         return false;
1855 }
1856
1857 /**
1858  * parahotplug_process_list() - remove any request from the list that's been on
1859  *                              there too long and respond with an error
1860  */
1861 static void
1862 parahotplug_process_list(void)
1863 {
1864         struct list_head *pos;
1865         struct list_head *tmp;
1866
1867         spin_lock(&parahotplug_request_list_lock);
1868
1869         list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1870                 struct parahotplug_request *req =
1871                     list_entry(pos, struct parahotplug_request, list);
1872
1873                 if (!time_after_eq(jiffies, req->expiration))
1874                         continue;
1875
1876                 list_del(pos);
1877                 if (req->msg.hdr.flags.response_expected)
1878                         controlvm_respond_physdev_changestate(
1879                                 &req->msg.hdr,
1880                                 CONTROLVM_RESP_DEVICE_UDEV_TIMEOUT,
1881                                 req->msg.cmd.device_change_state.state);
1882                 parahotplug_request_destroy(req);
1883         }
1884
1885         spin_unlock(&parahotplug_request_list_lock);
1886 }
1887
1888 static void
1889 controlvm_periodic_work(struct work_struct *work)
1890 {
1891         struct controlvm_message inmsg;
1892         bool got_command = false;
1893         bool handle_command_failed = false;
1894
1895         while (!visorchannel_signalremove(controlvm_channel,
1896                                           CONTROLVM_QUEUE_RESPONSE,
1897                                           &inmsg))
1898                 ;
1899         if (!got_command) {
1900                 if (controlvm_pending_msg_valid) {
1901                         /*
1902                          * we throttled processing of a prior
1903                          * msg, so try to process it again
1904                          * rather than reading a new one
1905                          */
1906                         inmsg = controlvm_pending_msg;
1907                         controlvm_pending_msg_valid = false;
1908                         got_command = true;
1909                 } else {
1910                         got_command = read_controlvm_event(&inmsg);
1911                 }
1912         }
1913
1914         handle_command_failed = false;
1915         while (got_command && (!handle_command_failed)) {
1916                 most_recent_message_jiffies = jiffies;
1917                 if (handle_command(inmsg,
1918                                    visorchannel_get_physaddr
1919                                    (controlvm_channel)))
1920                         got_command = read_controlvm_event(&inmsg);
1921                 else {
1922                         /*
1923                          * this is a scenario where throttling
1924                          * is required, but probably NOT an
1925                          * error...; we stash the current
1926                          * controlvm msg so we will attempt to
1927                          * reprocess it on our next loop
1928                          */
1929                         handle_command_failed = true;
1930                         controlvm_pending_msg = inmsg;
1931                         controlvm_pending_msg_valid = true;
1932                 }
1933         }
1934
1935         /* parahotplug_worker */
1936         parahotplug_process_list();
1937
1938         if (time_after(jiffies,
1939                        most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
1940                 /*
1941                  * it's been longer than MIN_IDLE_SECONDS since we
1942                  * processed our last controlvm message; slow down the
1943                  * polling
1944                  */
1945                 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1946                         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1947         } else {
1948                 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1949                         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1950         }
1951
1952         schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
1953 }
1954
1955 static int
1956 visorchipset_init(struct acpi_device *acpi_device)
1957 {
1958         int err = -ENODEV;
1959         u64 addr;
1960         uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
1961
1962         addr = controlvm_get_channel_address();
1963         if (!addr)
1964                 goto error;
1965
1966         controlvm_channel = visorchannel_create_with_lock(addr, 0,
1967                                                           GFP_KERNEL, uuid);
1968         if (!controlvm_channel)
1969                 goto error;
1970
1971         if (!SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
1972                                 visorchannel_get_header(controlvm_channel)))
1973                 goto error_destroy_channel;
1974
1975         major_dev = MKDEV(visorchipset_major, 0);
1976         err = visorchipset_file_init(major_dev, &controlvm_channel);
1977         if (err < 0)
1978                 goto error_destroy_channel;
1979
1980         /* if booting in a crash kernel */
1981         if (is_kdump_kernel())
1982                 INIT_DELAYED_WORK(&periodic_controlvm_work,
1983                                   setup_crash_devices_work_queue);
1984         else
1985                 INIT_DELAYED_WORK(&periodic_controlvm_work,
1986                                   controlvm_periodic_work);
1987
1988         most_recent_message_jiffies = jiffies;
1989         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1990         schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
1991
1992         visorchipset_platform_device.dev.devt = major_dev;
1993         if (platform_device_register(&visorchipset_platform_device) < 0) {
1994                 POSTCODE_LINUX(DEVICE_REGISTER_FAILURE_PC, 0, 0,
1995                                DIAG_SEVERITY_ERR);
1996                 err = -ENODEV;
1997                 goto error_cancel_work;
1998         }
1999         POSTCODE_LINUX(CHIPSET_INIT_SUCCESS_PC, 0, 0, DIAG_SEVERITY_PRINT);
2000
2001         err = visorbus_init();
2002         if (err < 0)
2003                 goto error_unregister;
2004
2005         return 0;
2006
2007 error_unregister:
2008         platform_device_unregister(&visorchipset_platform_device);
2009
2010 error_cancel_work:
2011         cancel_delayed_work_sync(&periodic_controlvm_work);
2012         visorchipset_file_cleanup(major_dev);
2013
2014 error_destroy_channel:
2015         visorchannel_destroy(controlvm_channel);
2016
2017 error:
2018         POSTCODE_LINUX(CHIPSET_INIT_FAILURE_PC, 0, err, DIAG_SEVERITY_ERR);
2019         return err;
2020 }
2021
2022 static int
2023 visorchipset_exit(struct acpi_device *acpi_device)
2024 {
2025         POSTCODE_LINUX(DRIVER_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
2026
2027         visorbus_exit();
2028
2029         cancel_delayed_work_sync(&periodic_controlvm_work);
2030
2031         visorchannel_destroy(controlvm_channel);
2032
2033         visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
2034         platform_device_unregister(&visorchipset_platform_device);
2035         POSTCODE_LINUX(DRIVER_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
2036
2037         return 0;
2038 }
2039
2040 static const struct acpi_device_id unisys_device_ids[] = {
2041         {"PNP0A07", 0},
2042         {"", 0},
2043 };
2044
2045 static struct acpi_driver unisys_acpi_driver = {
2046         .name = "unisys_acpi",
2047         .class = "unisys_acpi_class",
2048         .owner = THIS_MODULE,
2049         .ids = unisys_device_ids,
2050         .ops = {
2051                 .add = visorchipset_init,
2052                 .remove = visorchipset_exit,
2053                 },
2054 };
2055
2056 MODULE_DEVICE_TABLE(acpi, unisys_device_ids);
2057
2058 static __init uint32_t visorutil_spar_detect(void)
2059 {
2060         unsigned int eax, ebx, ecx, edx;
2061
2062         if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
2063                 /* check the ID */
2064                 cpuid(UNISYS_SPAR_LEAF_ID, &eax, &ebx, &ecx, &edx);
2065                 return  (ebx == UNISYS_SPAR_ID_EBX) &&
2066                         (ecx == UNISYS_SPAR_ID_ECX) &&
2067                         (edx == UNISYS_SPAR_ID_EDX);
2068         } else {
2069                 return 0;
2070         }
2071 }
2072
2073 static int init_unisys(void)
2074 {
2075         int result;
2076
2077         if (!visorutil_spar_detect())
2078                 return -ENODEV;
2079
2080         result = acpi_bus_register_driver(&unisys_acpi_driver);
2081         if (result)
2082                 return -ENODEV;
2083
2084         pr_info("Unisys Visorchipset Driver Loaded.\n");
2085         return 0;
2086 };
2087
2088 static void exit_unisys(void)
2089 {
2090         acpi_bus_unregister_driver(&unisys_acpi_driver);
2091 }
2092
2093 module_param_named(major, visorchipset_major, int, 0444);
2094 MODULE_PARM_DESC(visorchipset_major,
2095                  "major device number to use for the device node");
2096
2097 module_init(init_unisys);
2098 module_exit(exit_unisys);
2099
2100 MODULE_AUTHOR("Unisys");
2101 MODULE_LICENSE("GPL");
2102 MODULE_DESCRIPTION("s-Par visorbus driver for virtual device buses");