3 * Copyright (C) 2010 - 2015 UNISYS CORPORATION
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
17 #include <linux/acpi.h>
18 #include <linux/cdev.h>
19 #include <linux/ctype.h>
22 #include <linux/nls.h>
23 #include <linux/netdevice.h>
24 #include <linux/platform_device.h>
25 #include <linux/uuid.h>
26 #include <linux/crash_dump.h>
29 #include "visorbus_private.h"
30 #include "vmcallinterface.h"
32 #define CURRENT_FILE_PC VISOR_BUS_PC_visorchipset_c
34 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
35 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
37 #define MAX_CONTROLVM_PAYLOAD_BYTES (1024 * 128)
39 #define VISORCHIPSET_MMAP_CONTROLCHANOFFSET 0x00000000
41 #define UNISYS_SPAR_LEAF_ID 0x40000000
43 /* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
44 #define UNISYS_SPAR_ID_EBX 0x73696e55
45 #define UNISYS_SPAR_ID_ECX 0x70537379
46 #define UNISYS_SPAR_ID_EDX 0x34367261
51 static int visorchipset_major;
54 visorchipset_open(struct inode *inode, struct file *file)
56 unsigned int minor_number = iminor(inode);
64 visorchipset_release(struct inode *inode, struct file *file)
70 * When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
71 * we switch to slow polling mode. As soon as we get a controlvm
72 * message, we switch back to fast polling mode.
74 #define MIN_IDLE_SECONDS 10
75 static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
76 /* when we got our last controlvm message */
77 static unsigned long most_recent_message_jiffies;
79 struct parser_context {
80 unsigned long allocbytes;
81 unsigned long param_bytes;
83 unsigned long bytes_remaining;
88 static struct delayed_work periodic_controlvm_work;
90 static struct cdev file_cdev;
91 static struct visorchannel **file_controlvm_channel;
93 static struct visorchannel *controlvm_channel;
94 static unsigned long controlvm_payload_bytes_buffered;
97 * The following globals are used to handle the scenario where we are unable to
98 * offload the payload from a controlvm message due to memory requirements. In
99 * this scenario, we simply stash the controlvm message, then attempt to
100 * process it again the next time controlvm_periodic_work() runs.
102 static struct controlvm_message controlvm_pending_msg;
103 static bool controlvm_pending_msg_valid;
105 struct parahotplug_request {
106 struct list_head list;
108 unsigned long expiration;
109 struct controlvm_message msg;
112 /* info for /dev/visorchipset */
113 static dev_t major_dev = -1; /*< indicates major num for device */
115 /* prototypes for attributes */
116 static ssize_t toolaction_show(struct device *dev,
117 struct device_attribute *attr,
122 visorchannel_read(controlvm_channel,
123 offsetof(struct spar_controlvm_channel_protocol,
124 tool_action), &tool_action, sizeof(u8));
125 return sprintf(buf, "%u\n", tool_action);
128 static ssize_t toolaction_store(struct device *dev,
129 struct device_attribute *attr,
130 const char *buf, size_t count)
135 if (kstrtou8(buf, 10, &tool_action))
138 ret = visorchannel_write
140 offsetof(struct spar_controlvm_channel_protocol,
142 &tool_action, sizeof(u8));
148 static DEVICE_ATTR_RW(toolaction);
150 static ssize_t boottotool_show(struct device *dev,
151 struct device_attribute *attr,
154 struct efi_spar_indication efi_spar_indication;
156 visorchannel_read(controlvm_channel,
157 offsetof(struct spar_controlvm_channel_protocol,
158 efi_spar_ind), &efi_spar_indication,
159 sizeof(struct efi_spar_indication));
160 return sprintf(buf, "%u\n", efi_spar_indication.boot_to_tool);
163 static ssize_t boottotool_store(struct device *dev,
164 struct device_attribute *attr,
165 const char *buf, size_t count)
168 struct efi_spar_indication efi_spar_indication;
170 if (kstrtoint(buf, 10, &val))
173 efi_spar_indication.boot_to_tool = val;
174 ret = visorchannel_write
176 offsetof(struct spar_controlvm_channel_protocol,
177 efi_spar_ind), &(efi_spar_indication),
178 sizeof(struct efi_spar_indication));
184 static DEVICE_ATTR_RW(boottotool);
186 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
191 visorchannel_read(controlvm_channel,
192 offsetof(struct spar_controlvm_channel_protocol,
194 &error, sizeof(u32));
195 return sprintf(buf, "%i\n", error);
198 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
199 const char *buf, size_t count)
204 if (kstrtou32(buf, 10, &error))
207 ret = visorchannel_write
209 offsetof(struct spar_controlvm_channel_protocol,
211 &error, sizeof(u32));
216 static DEVICE_ATTR_RW(error);
218 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
225 offsetof(struct spar_controlvm_channel_protocol,
226 installation_text_id),
227 &text_id, sizeof(u32));
228 return sprintf(buf, "%i\n", text_id);
231 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
232 const char *buf, size_t count)
237 if (kstrtou32(buf, 10, &text_id))
240 ret = visorchannel_write
242 offsetof(struct spar_controlvm_channel_protocol,
243 installation_text_id),
244 &text_id, sizeof(u32));
249 static DEVICE_ATTR_RW(textid);
251 static ssize_t remaining_steps_show(struct device *dev,
252 struct device_attribute *attr, char *buf)
254 u16 remaining_steps = 0;
256 visorchannel_read(controlvm_channel,
257 offsetof(struct spar_controlvm_channel_protocol,
258 installation_remaining_steps),
259 &remaining_steps, sizeof(u16));
260 return sprintf(buf, "%hu\n", remaining_steps);
263 static ssize_t remaining_steps_store(struct device *dev,
264 struct device_attribute *attr,
265 const char *buf, size_t count)
270 if (kstrtou16(buf, 10, &remaining_steps))
273 ret = visorchannel_write
275 offsetof(struct spar_controlvm_channel_protocol,
276 installation_remaining_steps),
277 &remaining_steps, sizeof(u16));
282 static DEVICE_ATTR_RW(remaining_steps);
285 parser_id_get(struct parser_context *ctx)
287 struct spar_controlvm_parameters_header *phdr = NULL;
289 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
293 static void parser_done(struct parser_context *ctx)
295 controlvm_payload_bytes_buffered -= ctx->param_bytes;
300 parser_string_get(struct parser_context *ctx)
304 int value_length = -1;
309 nscan = ctx->bytes_remaining;
314 for (i = 0, value_length = -1; i < nscan; i++)
315 if (pscan[i] == '\0') {
319 if (value_length < 0) /* '\0' was not included in the length */
320 value_length = nscan;
321 value = kmalloc(value_length + 1, GFP_KERNEL | __GFP_NORETRY);
324 if (value_length > 0)
325 memcpy(value, pscan, value_length);
326 ((u8 *)(value))[value_length] = '\0';
331 parser_name_get(struct parser_context *ctx)
333 struct spar_controlvm_parameters_header *phdr = NULL;
335 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
337 if (phdr->name_offset + phdr->name_length > ctx->param_bytes)
340 ctx->curr = ctx->data + phdr->name_offset;
341 ctx->bytes_remaining = phdr->name_length;
342 return parser_string_get(ctx);
345 struct visor_busdev {
350 static int match_visorbus_dev_by_id(struct device *dev, void *data)
352 struct visor_device *vdev = to_visor_device(dev);
353 struct visor_busdev *id = data;
354 u32 bus_no = id->bus_no;
355 u32 dev_no = id->dev_no;
357 if ((vdev->chipset_bus_no == bus_no) &&
358 (vdev->chipset_dev_no == dev_no))
364 struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
365 struct visor_device *from)
368 struct device *dev_start = NULL;
369 struct visor_device *vdev = NULL;
370 struct visor_busdev id = {
376 dev_start = &from->device;
377 dev = bus_find_device(&visorbus_type, dev_start, (void *)&id,
378 match_visorbus_dev_by_id);
380 vdev = to_visor_device(dev);
385 controlvm_init_response(struct controlvm_message *msg,
386 struct controlvm_message_header *msg_hdr, int response)
388 memset(msg, 0, sizeof(struct controlvm_message));
389 memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
390 msg->hdr.payload_bytes = 0;
391 msg->hdr.payload_vm_offset = 0;
392 msg->hdr.payload_max_bytes = 0;
394 msg->hdr.flags.failed = 1;
395 msg->hdr.completion_status = (u32)(-response);
400 controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
402 enum ultra_chipset_feature features)
404 struct controlvm_message outmsg;
406 controlvm_init_response(&outmsg, msg_hdr, response);
407 outmsg.cmd.init_chipset.features = features;
408 return visorchannel_signalinsert(controlvm_channel,
409 CONTROLVM_QUEUE_REQUEST, &outmsg);
413 chipset_init(struct controlvm_message *inmsg)
415 static int chipset_inited;
416 enum ultra_chipset_feature features = 0;
417 int rc = CONTROLVM_RESP_SUCCESS;
420 POSTCODE_LINUX(CHIPSET_INIT_ENTRY_PC, 0, 0, DIAG_SEVERITY_PRINT);
421 if (chipset_inited) {
422 rc = -CONTROLVM_RESP_ALREADY_DONE;
427 POSTCODE_LINUX(CHIPSET_INIT_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
430 * Set features to indicate we support parahotplug (if Command
433 features = inmsg->cmd.init_chipset.features &
434 ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
437 * Set the "reply" bit so Command knows this is a
438 * features-aware driver.
440 features |= ULTRA_CHIPSET_FEATURE_REPLY;
443 if (inmsg->hdr.flags.response_expected)
444 res = controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
450 controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
452 struct controlvm_message outmsg;
454 controlvm_init_response(&outmsg, msg_hdr, response);
455 if (outmsg.hdr.flags.test_message == 1)
458 return visorchannel_signalinsert(controlvm_channel,
459 CONTROLVM_QUEUE_REQUEST, &outmsg);
462 static int controlvm_respond_physdev_changestate(
463 struct controlvm_message_header *msg_hdr, int response,
464 struct spar_segment_state state)
466 struct controlvm_message outmsg;
468 controlvm_init_response(&outmsg, msg_hdr, response);
469 outmsg.cmd.device_change_state.state = state;
470 outmsg.cmd.device_change_state.flags.phys_device = 1;
471 return visorchannel_signalinsert(controlvm_channel,
472 CONTROLVM_QUEUE_REQUEST, &outmsg);
475 enum crash_obj_type {
481 save_crash_message(struct controlvm_message *msg, enum crash_obj_type typ)
483 u32 local_crash_msg_offset;
484 u16 local_crash_msg_count;
487 err = visorchannel_read(controlvm_channel,
488 offsetof(struct spar_controlvm_channel_protocol,
489 saved_crash_message_count),
490 &local_crash_msg_count, sizeof(u16));
492 POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
497 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
498 POSTCODE_LINUX(CRASH_DEV_COUNT_FAILURE_PC, 0,
499 local_crash_msg_count,
504 err = visorchannel_read(controlvm_channel,
505 offsetof(struct spar_controlvm_channel_protocol,
506 saved_crash_message_offset),
507 &local_crash_msg_offset, sizeof(u32));
509 POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
516 local_crash_msg_offset += sizeof(struct controlvm_message);
517 err = visorchannel_write(controlvm_channel,
518 local_crash_msg_offset,
520 sizeof(struct controlvm_message));
522 POSTCODE_LINUX(SAVE_MSG_DEV_FAILURE_PC, 0, 0,
528 err = visorchannel_write(controlvm_channel,
529 local_crash_msg_offset,
531 sizeof(struct controlvm_message));
533 POSTCODE_LINUX(SAVE_MSG_BUS_FAILURE_PC, 0, 0,
539 pr_info("Invalid crash_obj_type\n");
546 bus_responder(enum controlvm_id cmd_id,
547 struct controlvm_message_header *pending_msg_hdr,
550 if (!pending_msg_hdr)
553 if (pending_msg_hdr->id != (u32)cmd_id)
556 return controlvm_respond(pending_msg_hdr, response);
560 device_changestate_responder(enum controlvm_id cmd_id,
561 struct visor_device *p, int response,
562 struct spar_segment_state response_state)
564 struct controlvm_message outmsg;
565 u32 bus_no = p->chipset_bus_no;
566 u32 dev_no = p->chipset_dev_no;
568 if (!p->pending_msg_hdr)
570 if (p->pending_msg_hdr->id != cmd_id)
573 controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
575 outmsg.cmd.device_change_state.bus_no = bus_no;
576 outmsg.cmd.device_change_state.dev_no = dev_no;
577 outmsg.cmd.device_change_state.state = response_state;
579 return visorchannel_signalinsert(controlvm_channel,
580 CONTROLVM_QUEUE_REQUEST, &outmsg);
584 device_responder(enum controlvm_id cmd_id,
585 struct controlvm_message_header *pending_msg_hdr,
588 if (!pending_msg_hdr)
591 if (pending_msg_hdr->id != (u32)cmd_id)
594 return controlvm_respond(pending_msg_hdr, response);
598 bus_create(struct controlvm_message *inmsg)
600 struct controlvm_message_packet *cmd = &inmsg->cmd;
601 struct controlvm_message_header *pmsg_hdr = NULL;
602 u32 bus_no = cmd->create_bus.bus_no;
603 struct visor_device *bus_info;
604 struct visorchannel *visorchannel;
607 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
608 if (bus_info && (bus_info->state.created == 1)) {
609 POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
615 bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
617 POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
623 INIT_LIST_HEAD(&bus_info->list_all);
624 bus_info->chipset_bus_no = bus_no;
625 bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
627 POSTCODE_LINUX(BUS_CREATE_ENTRY_PC, 0, bus_no, DIAG_SEVERITY_PRINT);
629 if (uuid_le_cmp(cmd->create_bus.bus_inst_uuid, spar_siovm_uuid) == 0) {
630 err = save_crash_message(inmsg, CRASH_BUS);
632 goto err_free_bus_info;
635 if (inmsg->hdr.flags.response_expected == 1) {
636 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr),
639 POSTCODE_LINUX(MALLOC_FAILURE_PC, cmd,
640 bus_info->chipset_bus_no,
643 goto err_free_bus_info;
646 memcpy(pmsg_hdr, &inmsg->hdr,
647 sizeof(struct controlvm_message_header));
648 bus_info->pending_msg_hdr = pmsg_hdr;
651 visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
652 cmd->create_bus.channel_bytes,
654 cmd->create_bus.bus_data_type_uuid);
657 POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
660 goto err_free_pending_msg;
662 bus_info->visorchannel = visorchannel;
664 /* Response will be handled by chipset_bus_create */
665 chipset_bus_create(bus_info);
667 POSTCODE_LINUX(BUS_CREATE_EXIT_PC, 0, bus_no, DIAG_SEVERITY_PRINT);
670 err_free_pending_msg:
671 kfree(bus_info->pending_msg_hdr);
677 if (inmsg->hdr.flags.response_expected == 1)
678 bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
683 bus_destroy(struct controlvm_message *inmsg)
685 struct controlvm_message_packet *cmd = &inmsg->cmd;
686 struct controlvm_message_header *pmsg_hdr = NULL;
687 u32 bus_no = cmd->destroy_bus.bus_no;
688 struct visor_device *bus_info;
691 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
696 if (bus_info->state.created == 0) {
700 if (bus_info->pending_msg_hdr) {
701 /* only non-NULL if dev is still waiting on a response */
705 if (inmsg->hdr.flags.response_expected == 1) {
706 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
708 POSTCODE_LINUX(MALLOC_FAILURE_PC, cmd,
709 bus_info->chipset_bus_no,
715 memcpy(pmsg_hdr, &inmsg->hdr,
716 sizeof(struct controlvm_message_header));
717 bus_info->pending_msg_hdr = pmsg_hdr;
720 /* Response will be handled by chipset_bus_destroy */
721 chipset_bus_destroy(bus_info);
725 if (inmsg->hdr.flags.response_expected == 1)
726 bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
731 bus_configure(struct controlvm_message *inmsg,
732 struct parser_context *parser_ctx)
734 struct controlvm_message_packet *cmd = &inmsg->cmd;
736 struct visor_device *bus_info;
739 bus_no = cmd->configure_bus.bus_no;
740 POSTCODE_LINUX(BUS_CONFIGURE_ENTRY_PC, 0, bus_no,
741 DIAG_SEVERITY_PRINT);
743 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
745 POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no,
749 } else if (bus_info->state.created == 0) {
750 POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no,
754 } else if (bus_info->pending_msg_hdr) {
755 POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no,
761 err = visorchannel_set_clientpartition
762 (bus_info->visorchannel,
763 cmd->configure_bus.guest_handle);
768 bus_info->partition_uuid = parser_id_get(parser_ctx);
769 bus_info->name = parser_name_get(parser_ctx);
772 POSTCODE_LINUX(BUS_CONFIGURE_EXIT_PC, 0, bus_no,
773 DIAG_SEVERITY_PRINT);
775 if (inmsg->hdr.flags.response_expected == 1)
776 bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
780 if (inmsg->hdr.flags.response_expected == 1)
781 bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
786 my_device_create(struct controlvm_message *inmsg)
788 struct controlvm_message_packet *cmd = &inmsg->cmd;
789 struct controlvm_message_header *pmsg_hdr = NULL;
790 u32 bus_no = cmd->create_device.bus_no;
791 u32 dev_no = cmd->create_device.dev_no;
792 struct visor_device *dev_info = NULL;
793 struct visor_device *bus_info;
794 struct visorchannel *visorchannel;
797 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
799 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
805 if (bus_info->state.created == 0) {
806 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
812 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
813 if (dev_info && (dev_info->state.created == 1)) {
814 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
820 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
822 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
828 dev_info->chipset_bus_no = bus_no;
829 dev_info->chipset_dev_no = dev_no;
830 dev_info->inst = cmd->create_device.dev_inst_uuid;
832 /* not sure where the best place to set the 'parent' */
833 dev_info->device.parent = &bus_info->device;
835 POSTCODE_LINUX(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
836 DIAG_SEVERITY_PRINT);
839 visorchannel_create_with_lock(cmd->create_device.channel_addr,
840 cmd->create_device.channel_bytes,
842 cmd->create_device.data_type_uuid);
845 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
848 goto err_free_dev_info;
850 dev_info->visorchannel = visorchannel;
851 dev_info->channel_type_guid = cmd->create_device.data_type_uuid;
852 if (uuid_le_cmp(cmd->create_device.data_type_uuid,
853 spar_vhba_channel_protocol_uuid) == 0) {
854 err = save_crash_message(inmsg, CRASH_DEV);
856 goto err_free_dev_info;
859 if (inmsg->hdr.flags.response_expected == 1) {
860 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
863 goto err_free_dev_info;
866 memcpy(pmsg_hdr, &inmsg->hdr,
867 sizeof(struct controlvm_message_header));
868 dev_info->pending_msg_hdr = pmsg_hdr;
870 /* Chipset_device_create will send response */
871 chipset_device_create(dev_info);
872 POSTCODE_LINUX(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
873 DIAG_SEVERITY_PRINT);
880 if (inmsg->hdr.flags.response_expected == 1)
881 device_responder(inmsg->hdr.id, &inmsg->hdr, err);
886 my_device_changestate(struct controlvm_message *inmsg)
888 struct controlvm_message_packet *cmd = &inmsg->cmd;
889 struct controlvm_message_header *pmsg_hdr = NULL;
890 u32 bus_no = cmd->device_change_state.bus_no;
891 u32 dev_no = cmd->device_change_state.dev_no;
892 struct spar_segment_state state = cmd->device_change_state.state;
893 struct visor_device *dev_info;
896 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
898 POSTCODE_LINUX(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
903 if (dev_info->state.created == 0) {
904 POSTCODE_LINUX(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
909 if (dev_info->pending_msg_hdr) {
910 /* only non-NULL if dev is still waiting on a response */
914 if (inmsg->hdr.flags.response_expected == 1) {
915 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
921 memcpy(pmsg_hdr, &inmsg->hdr,
922 sizeof(struct controlvm_message_header));
923 dev_info->pending_msg_hdr = pmsg_hdr;
926 if (state.alive == segment_state_running.alive &&
927 state.operating == segment_state_running.operating)
928 /* Response will be sent from chipset_device_resume */
929 chipset_device_resume(dev_info);
930 /* ServerNotReady / ServerLost / SegmentStateStandby */
931 else if (state.alive == segment_state_standby.alive &&
932 state.operating == segment_state_standby.operating)
934 * technically this is standby case where server is lost.
935 * Response will be sent from chipset_device_pause.
937 chipset_device_pause(dev_info);
941 if (inmsg->hdr.flags.response_expected == 1)
942 device_responder(inmsg->hdr.id, &inmsg->hdr, err);
947 my_device_destroy(struct controlvm_message *inmsg)
949 struct controlvm_message_packet *cmd = &inmsg->cmd;
950 struct controlvm_message_header *pmsg_hdr = NULL;
951 u32 bus_no = cmd->destroy_device.bus_no;
952 u32 dev_no = cmd->destroy_device.dev_no;
953 struct visor_device *dev_info;
956 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
961 if (dev_info->state.created == 0) {
966 if (dev_info->pending_msg_hdr) {
967 /* only non-NULL if dev is still waiting on a response */
971 if (inmsg->hdr.flags.response_expected == 1) {
972 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
978 memcpy(pmsg_hdr, &inmsg->hdr,
979 sizeof(struct controlvm_message_header));
980 dev_info->pending_msg_hdr = pmsg_hdr;
983 chipset_device_destroy(dev_info);
987 if (inmsg->hdr.flags.response_expected == 1)
988 device_responder(inmsg->hdr.id, &inmsg->hdr, err);
993 * The general parahotplug flow works as follows. The visorchipset receives
994 * a DEVICE_CHANGESTATE message from Command specifying a physical device
995 * to enable or disable. The CONTROLVM message handler calls
996 * parahotplug_process_message, which then adds the message to a global list
997 * and kicks off a udev event which causes a user level script to enable or
998 * disable the specified device. The udev script then writes to
999 * /sys/devices/platform/visorchipset/parahotplug, which causes the
1000 * parahotplug store functions to get called, at which point the
1001 * appropriate CONTROLVM message is retrieved from the list and responded
1005 #define PARAHOTPLUG_TIMEOUT_MS 2000
1008 * parahotplug_next_id() - generate unique int to match an outstanding
1009 * CONTROLVM message with a udev script /sys
1012 * Return: a unique integer value
1015 parahotplug_next_id(void)
1017 static atomic_t id = ATOMIC_INIT(0);
1019 return atomic_inc_return(&id);
1023 * parahotplug_next_expiration() - returns the time (in jiffies) when a
1024 * CONTROLVM message on the list should expire
1025 * -- PARAHOTPLUG_TIMEOUT_MS in the future
1027 * Return: expected expiration time (in jiffies)
1029 static unsigned long
1030 parahotplug_next_expiration(void)
1032 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
1036 * parahotplug_request_create() - create a parahotplug_request, which is
1037 * basically a wrapper for a CONTROLVM_MESSAGE
1038 * that we can stick on a list
1039 * @msg: the message to insert in the request
1041 * Return: the request containing the provided message
1043 static struct parahotplug_request *
1044 parahotplug_request_create(struct controlvm_message *msg)
1046 struct parahotplug_request *req;
1048 req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
1052 req->id = parahotplug_next_id();
1053 req->expiration = parahotplug_next_expiration();
1060 * parahotplug_request_destroy() - free a parahotplug_request
1061 * @req: the request to deallocate
1064 parahotplug_request_destroy(struct parahotplug_request *req)
1069 static LIST_HEAD(parahotplug_request_list);
1070 static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* lock for above */
1073 * parahotplug_request_complete() - mark request as complete
1074 * @id: the id of the request
1075 * @active: indicates whether the request is assigned to active partition
1077 * Called from the /sys handler, which means the user script has
1078 * finished the enable/disable. Find the matching identifier, and
1079 * respond to the CONTROLVM message with success.
1081 * Return: 0 on success or -EINVAL on failure
1084 parahotplug_request_complete(int id, u16 active)
1086 struct list_head *pos;
1087 struct list_head *tmp;
1089 spin_lock(¶hotplug_request_list_lock);
1091 /* Look for a request matching "id". */
1092 list_for_each_safe(pos, tmp, ¶hotplug_request_list) {
1093 struct parahotplug_request *req =
1094 list_entry(pos, struct parahotplug_request, list);
1095 if (req->id == id) {
1097 * Found a match. Remove it from the list and
1101 spin_unlock(¶hotplug_request_list_lock);
1102 req->msg.cmd.device_change_state.state.active = active;
1103 if (req->msg.hdr.flags.response_expected)
1104 controlvm_respond_physdev_changestate(
1105 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1106 req->msg.cmd.device_change_state.state);
1107 parahotplug_request_destroy(req);
1112 spin_unlock(¶hotplug_request_list_lock);
1117 * devicedisabled_store() - disables the hotplug device
1118 * @dev: sysfs interface variable not utilized in this function
1119 * @attr: sysfs interface variable not utilized in this function
1120 * @buf: buffer containing the device id
1121 * @count: the size of the buffer
1123 * The parahotplug/devicedisabled interface gets called by our support script
1124 * when an SR-IOV device has been shut down. The ID is passed to the script
1125 * and then passed back when the device has been removed.
1127 * Return: the size of the buffer for success or negative for error
1129 static ssize_t devicedisabled_store(struct device *dev,
1130 struct device_attribute *attr,
1131 const char *buf, size_t count)
1136 if (kstrtouint(buf, 10, &id))
1139 err = parahotplug_request_complete(id, 0);
1144 static DEVICE_ATTR_WO(devicedisabled);
1147 * deviceenabled_store() - enables the hotplug device
1148 * @dev: sysfs interface variable not utilized in this function
1149 * @attr: sysfs interface variable not utilized in this function
1150 * @buf: buffer containing the device id
1151 * @count: the size of the buffer
1153 * The parahotplug/deviceenabled interface gets called by our support script
1154 * when an SR-IOV device has been recovered. The ID is passed to the script
1155 * and then passed back when the device has been brought back up.
1157 * Return: the size of the buffer for success or negative for error
1159 static ssize_t deviceenabled_store(struct device *dev,
1160 struct device_attribute *attr,
1161 const char *buf, size_t count)
1165 if (kstrtouint(buf, 10, &id))
1168 parahotplug_request_complete(id, 1);
1171 static DEVICE_ATTR_WO(deviceenabled);
1173 static struct attribute *visorchipset_install_attrs[] = {
1174 &dev_attr_toolaction.attr,
1175 &dev_attr_boottotool.attr,
1176 &dev_attr_error.attr,
1177 &dev_attr_textid.attr,
1178 &dev_attr_remaining_steps.attr,
1182 static const struct attribute_group visorchipset_install_group = {
1184 .attrs = visorchipset_install_attrs
1187 static struct attribute *visorchipset_parahotplug_attrs[] = {
1188 &dev_attr_devicedisabled.attr,
1189 &dev_attr_deviceenabled.attr,
1193 static struct attribute_group visorchipset_parahotplug_group = {
1194 .name = "parahotplug",
1195 .attrs = visorchipset_parahotplug_attrs
1198 static const struct attribute_group *visorchipset_dev_groups[] = {
1199 &visorchipset_install_group,
1200 &visorchipset_parahotplug_group,
1204 static void visorchipset_dev_release(struct device *dev)
1208 /* /sys/devices/platform/visorchipset */
1209 static struct platform_device visorchipset_platform_device = {
1210 .name = "visorchipset",
1212 .dev.groups = visorchipset_dev_groups,
1213 .dev.release = visorchipset_dev_release,
1217 * parahotplug_request_kickoff() - initiate parahotplug request
1218 * @req: the request to initiate
1220 * Cause uevent to run the user level script to do the disable/enable specified
1221 * in the parahotplug_request.
1224 parahotplug_request_kickoff(struct parahotplug_request *req)
1226 struct controlvm_message_packet *cmd = &req->msg.cmd;
1227 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1230 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1233 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1234 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1235 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
1236 cmd->device_change_state.state.active);
1237 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
1238 cmd->device_change_state.bus_no);
1239 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
1240 cmd->device_change_state.dev_no >> 3);
1241 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1242 cmd->device_change_state.dev_no & 0x7);
1244 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1249 * parahotplug_process_message() - enables or disables a PCI device by kicking
1251 * @inmsg: the message indicating whether to enable or disable
1254 parahotplug_process_message(struct controlvm_message *inmsg)
1256 struct parahotplug_request *req;
1258 req = parahotplug_request_create(inmsg);
1263 if (inmsg->cmd.device_change_state.state.active) {
1265 * For enable messages, just respond with success
1266 * right away. This is a bit of a hack, but there are
1267 * issues with the early enable messages we get (with
1268 * either the udev script not detecting that the device
1269 * is up, or not getting called at all). Fortunately
1270 * the messages that get lost don't matter anyway, as
1272 * devices are automatically enabled at
1275 parahotplug_request_kickoff(req);
1276 controlvm_respond_physdev_changestate
1278 CONTROLVM_RESP_SUCCESS,
1279 inmsg->cmd.device_change_state.state);
1280 parahotplug_request_destroy(req);
1283 * For disable messages, add the request to the
1284 * request list before kicking off the udev script. It
1285 * won't get responded to until the script has
1286 * indicated it's done.
1288 spin_lock(¶hotplug_request_list_lock);
1289 list_add_tail(&req->list, ¶hotplug_request_list);
1290 spin_unlock(¶hotplug_request_list_lock);
1292 parahotplug_request_kickoff(req);
1297 * chipset_ready_uevent() - sends chipset_ready action
1299 * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1301 * Return: 0 on success, negative on failure
1304 chipset_ready_uevent(struct controlvm_message_header *msg_hdr)
1306 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
1308 if (msg_hdr->flags.response_expected)
1309 return controlvm_respond(msg_hdr, CONTROLVM_RESP_SUCCESS);
1315 * chipset_selftest_uevent() - sends chipset_selftest action
1317 * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1319 * Return: 0 on success, negative on failure
1322 chipset_selftest_uevent(struct controlvm_message_header *msg_hdr)
1324 char env_selftest[20];
1325 char *envp[] = { env_selftest, NULL };
1327 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1328 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1331 if (msg_hdr->flags.response_expected)
1332 return controlvm_respond(msg_hdr, CONTROLVM_RESP_SUCCESS);
1338 * chipset_notready_uevent() - sends chipset_notready action
1340 * Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1342 * Return: 0 on success, negative on failure
1345 chipset_notready_uevent(struct controlvm_message_header *msg_hdr)
1347 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
1349 if (msg_hdr->flags.response_expected)
1350 return controlvm_respond(msg_hdr, CONTROLVM_RESP_SUCCESS);
1355 static inline unsigned int
1356 issue_vmcall_io_controlvm_addr(u64 *control_addr, u32 *control_bytes)
1358 struct vmcall_io_controlvm_addr_params params;
1359 int result = VMCALL_SUCCESS;
1362 physaddr = virt_to_phys(¶ms);
1363 ISSUE_IO_VMCALL(VMCALL_IO_CONTROLVM_ADDR, physaddr, result);
1364 if (VMCALL_SUCCESSFUL(result)) {
1365 *control_addr = params.address;
1366 *control_bytes = params.channel_bytes;
1371 static u64 controlvm_get_channel_address(void)
1376 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
1383 setup_crash_devices_work_queue(struct work_struct *work)
1385 struct controlvm_message local_crash_bus_msg;
1386 struct controlvm_message local_crash_dev_msg;
1387 struct controlvm_message msg;
1388 u32 local_crash_msg_offset;
1389 u16 local_crash_msg_count;
1391 POSTCODE_LINUX(CRASH_DEV_ENTRY_PC, 0, 0, DIAG_SEVERITY_PRINT);
1393 /* send init chipset msg */
1394 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
1395 msg.cmd.init_chipset.bus_count = 23;
1396 msg.cmd.init_chipset.switch_count = 0;
1400 /* get saved message count */
1401 if (visorchannel_read(controlvm_channel,
1402 offsetof(struct spar_controlvm_channel_protocol,
1403 saved_crash_message_count),
1404 &local_crash_msg_count, sizeof(u16)) < 0) {
1405 POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
1410 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
1411 POSTCODE_LINUX(CRASH_DEV_COUNT_FAILURE_PC, 0,
1412 local_crash_msg_count,
1417 /* get saved crash message offset */
1418 if (visorchannel_read(controlvm_channel,
1419 offsetof(struct spar_controlvm_channel_protocol,
1420 saved_crash_message_offset),
1421 &local_crash_msg_offset, sizeof(u32)) < 0) {
1422 POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
1427 /* read create device message for storage bus offset */
1428 if (visorchannel_read(controlvm_channel,
1429 local_crash_msg_offset,
1430 &local_crash_bus_msg,
1431 sizeof(struct controlvm_message)) < 0) {
1432 POSTCODE_LINUX(CRASH_DEV_RD_BUS_FAILURE_PC, 0, 0,
1437 /* read create device message for storage device */
1438 if (visorchannel_read(controlvm_channel,
1439 local_crash_msg_offset +
1440 sizeof(struct controlvm_message),
1441 &local_crash_dev_msg,
1442 sizeof(struct controlvm_message)) < 0) {
1443 POSTCODE_LINUX(CRASH_DEV_RD_DEV_FAILURE_PC, 0, 0,
1448 /* reuse IOVM create bus message */
1449 if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
1450 bus_create(&local_crash_bus_msg);
1452 POSTCODE_LINUX(CRASH_DEV_BUS_NULL_FAILURE_PC, 0, 0,
1457 /* reuse create device message for storage device */
1458 if (local_crash_dev_msg.cmd.create_device.channel_addr) {
1459 my_device_create(&local_crash_dev_msg);
1461 POSTCODE_LINUX(CRASH_DEV_DEV_NULL_FAILURE_PC, 0, 0,
1465 POSTCODE_LINUX(CRASH_DEV_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
1469 bus_create_response(struct visor_device *bus_info, int response)
1472 bus_info->state.created = 1;
1474 bus_responder(CONTROLVM_BUS_CREATE, bus_info->pending_msg_hdr,
1477 kfree(bus_info->pending_msg_hdr);
1478 bus_info->pending_msg_hdr = NULL;
1482 bus_destroy_response(struct visor_device *bus_info, int response)
1484 bus_responder(CONTROLVM_BUS_DESTROY, bus_info->pending_msg_hdr,
1487 kfree(bus_info->pending_msg_hdr);
1488 bus_info->pending_msg_hdr = NULL;
1492 device_create_response(struct visor_device *dev_info, int response)
1495 dev_info->state.created = 1;
1497 device_responder(CONTROLVM_DEVICE_CREATE, dev_info->pending_msg_hdr,
1500 kfree(dev_info->pending_msg_hdr);
1501 dev_info->pending_msg_hdr = NULL;
1505 device_destroy_response(struct visor_device *dev_info, int response)
1507 device_responder(CONTROLVM_DEVICE_DESTROY, dev_info->pending_msg_hdr,
1510 kfree(dev_info->pending_msg_hdr);
1511 dev_info->pending_msg_hdr = NULL;
1515 device_pause_response(struct visor_device *dev_info,
1518 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
1520 segment_state_standby);
1522 kfree(dev_info->pending_msg_hdr);
1523 dev_info->pending_msg_hdr = NULL;
1527 device_resume_response(struct visor_device *dev_info, int response)
1529 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
1531 segment_state_running);
1533 kfree(dev_info->pending_msg_hdr);
1534 dev_info->pending_msg_hdr = NULL;
1538 visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
1540 unsigned long physaddr = 0;
1541 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
1544 /* sv_enable_dfp(); */
1545 if (offset & (PAGE_SIZE - 1))
1546 return -ENXIO; /* need aligned offsets */
1549 case VISORCHIPSET_MMAP_CONTROLCHANOFFSET:
1550 vma->vm_flags |= VM_IO;
1551 if (!*file_controlvm_channel)
1555 (*file_controlvm_channel,
1556 offsetof(struct spar_controlvm_channel_protocol,
1557 gp_control_channel),
1558 &addr, sizeof(addr));
1562 physaddr = (unsigned long)addr;
1563 if (remap_pfn_range(vma, vma->vm_start,
1564 physaddr >> PAGE_SHIFT,
1565 vma->vm_end - vma->vm_start,
1566 /*pgprot_noncached */
1567 (vma->vm_page_prot))) {
1577 static inline s64 issue_vmcall_query_guest_virtual_time_offset(void)
1579 u64 result = VMCALL_SUCCESS;
1582 ISSUE_IO_VMCALL(VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET, physaddr,
1587 static inline int issue_vmcall_update_physical_time(u64 adjustment)
1589 int result = VMCALL_SUCCESS;
1591 ISSUE_IO_VMCALL(VMCALL_UPDATE_PHYSICAL_TIME, adjustment, result);
1595 static long visorchipset_ioctl(struct file *file, unsigned int cmd,
1602 case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET:
1603 /* get the physical rtc offset */
1604 vrtc_offset = issue_vmcall_query_guest_virtual_time_offset();
1605 if (copy_to_user((void __user *)arg, &vrtc_offset,
1606 sizeof(vrtc_offset))) {
1610 case VMCALL_UPDATE_PHYSICAL_TIME:
1611 if (copy_from_user(&adjustment, (void __user *)arg,
1612 sizeof(adjustment))) {
1615 return issue_vmcall_update_physical_time(adjustment);
1621 static const struct file_operations visorchipset_fops = {
1622 .owner = THIS_MODULE,
1623 .open = visorchipset_open,
1626 .unlocked_ioctl = visorchipset_ioctl,
1627 .release = visorchipset_release,
1628 .mmap = visorchipset_mmap,
1632 visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
1636 file_controlvm_channel = controlvm_channel;
1637 cdev_init(&file_cdev, &visorchipset_fops);
1638 file_cdev.owner = THIS_MODULE;
1639 if (MAJOR(major_dev) == 0) {
1640 rc = alloc_chrdev_region(&major_dev, 0, 1, "visorchipset");
1641 /* dynamic major device number registration required */
1645 /* static major device number registration required */
1646 rc = register_chrdev_region(major_dev, 1, "visorchipset");
1650 rc = cdev_add(&file_cdev, MKDEV(MAJOR(major_dev), 0), 1);
1652 unregister_chrdev_region(major_dev, 1);
1659 visorchipset_file_cleanup(dev_t major_dev)
1662 cdev_del(&file_cdev);
1663 file_cdev.ops = NULL;
1664 unregister_chrdev_region(major_dev, 1);
1667 static struct parser_context *
1668 parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
1670 int allocbytes = sizeof(struct parser_context) + bytes;
1671 struct parser_context *ctx;
1676 * alloc an 0 extra byte to ensure payload is
1680 if ((controlvm_payload_bytes_buffered + bytes)
1681 > MAX_CONTROLVM_PAYLOAD_BYTES) {
1685 ctx = kzalloc(allocbytes, GFP_KERNEL | __GFP_NORETRY);
1691 ctx->allocbytes = allocbytes;
1692 ctx->param_bytes = bytes;
1694 ctx->bytes_remaining = 0;
1695 ctx->byte_stream = false;
1699 if (addr > virt_to_phys(high_memory - 1))
1700 goto err_finish_ctx;
1701 p = __va((unsigned long)(addr));
1702 memcpy(ctx->data, p, bytes);
1704 void *mapping = memremap(addr, bytes, MEMREMAP_WB);
1707 goto err_finish_ctx;
1708 memcpy(ctx->data, mapping, bytes);
1712 ctx->byte_stream = true;
1713 controlvm_payload_bytes_buffered += ctx->param_bytes;
1723 * handle_command() - process a controlvm message
1724 * @inmsg: the message to process
1725 * @channel_addr: address of the controlvm channel
1728 * false - this function will return false only in the case where the
1729 * controlvm message was NOT processed, but processing must be
1730 * retried before reading the next controlvm message; a
1731 * scenario where this can occur is when we need to throttle
1732 * the allocation of memory in which to copy out controlvm
1734 * true - processing of the controlvm message completed,
1735 * either successfully or with an error
1738 handle_command(struct controlvm_message inmsg, u64 channel_addr)
1740 struct controlvm_message_packet *cmd = &inmsg.cmd;
1743 struct parser_context *parser_ctx = NULL;
1745 struct controlvm_message ackmsg;
1747 /* create parsing context if necessary */
1748 local_addr = (inmsg.hdr.flags.test_message == 1);
1749 if (channel_addr == 0)
1751 parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1752 parm_bytes = inmsg.hdr.payload_bytes;
1755 * Parameter and channel addresses within test messages actually lie
1756 * within our OS-controlled memory. We need to know that, because it
1757 * makes a difference in how we compute the virtual address.
1759 if (parm_addr && parm_bytes) {
1763 parser_init_byte_stream(parm_addr, parm_bytes,
1764 local_addr, &retry);
1765 if (!parser_ctx && retry)
1770 controlvm_init_response(&ackmsg, &inmsg.hdr,
1771 CONTROLVM_RESP_SUCCESS);
1772 if (controlvm_channel)
1773 visorchannel_signalinsert(controlvm_channel,
1774 CONTROLVM_QUEUE_ACK,
1777 switch (inmsg.hdr.id) {
1778 case CONTROLVM_CHIPSET_INIT:
1779 chipset_init(&inmsg);
1781 case CONTROLVM_BUS_CREATE:
1784 case CONTROLVM_BUS_DESTROY:
1785 bus_destroy(&inmsg);
1787 case CONTROLVM_BUS_CONFIGURE:
1788 bus_configure(&inmsg, parser_ctx);
1790 case CONTROLVM_DEVICE_CREATE:
1791 my_device_create(&inmsg);
1793 case CONTROLVM_DEVICE_CHANGESTATE:
1794 if (cmd->device_change_state.flags.phys_device) {
1795 parahotplug_process_message(&inmsg);
1798 * save the hdr and cmd structures for later use
1799 * when sending back the response to Command
1801 my_device_changestate(&inmsg);
1805 case CONTROLVM_DEVICE_DESTROY:
1806 my_device_destroy(&inmsg);
1808 case CONTROLVM_DEVICE_CONFIGURE:
1809 /* no op for now, just send a respond that we passed */
1810 if (inmsg.hdr.flags.response_expected)
1811 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1813 case CONTROLVM_CHIPSET_READY:
1814 chipset_ready_uevent(&inmsg.hdr);
1816 case CONTROLVM_CHIPSET_SELFTEST:
1817 chipset_selftest_uevent(&inmsg.hdr);
1819 case CONTROLVM_CHIPSET_STOP:
1820 chipset_notready_uevent(&inmsg.hdr);
1823 if (inmsg.hdr.flags.response_expected)
1825 (&inmsg.hdr, -CONTROLVM_RESP_ID_UNKNOWN);
1830 parser_done(parser_ctx);
1837 * read_controlvm_event() - retreives the next message from the
1838 * CONTROLVM_QUEUE_EVENT queue in the controlvm
1840 * @msg: pointer to the retrieved message
1842 * Return: true if a valid message was retrieved or false otherwise
1845 read_controlvm_event(struct controlvm_message *msg)
1847 if (!visorchannel_signalremove(controlvm_channel,
1848 CONTROLVM_QUEUE_EVENT, msg)) {
1850 if (msg->hdr.flags.test_message == 1)
1858 * parahotplug_process_list() - remove any request from the list that's been on
1859 * there too long and respond with an error
1862 parahotplug_process_list(void)
1864 struct list_head *pos;
1865 struct list_head *tmp;
1867 spin_lock(¶hotplug_request_list_lock);
1869 list_for_each_safe(pos, tmp, ¶hotplug_request_list) {
1870 struct parahotplug_request *req =
1871 list_entry(pos, struct parahotplug_request, list);
1873 if (!time_after_eq(jiffies, req->expiration))
1877 if (req->msg.hdr.flags.response_expected)
1878 controlvm_respond_physdev_changestate(
1880 CONTROLVM_RESP_DEVICE_UDEV_TIMEOUT,
1881 req->msg.cmd.device_change_state.state);
1882 parahotplug_request_destroy(req);
1885 spin_unlock(¶hotplug_request_list_lock);
1889 controlvm_periodic_work(struct work_struct *work)
1891 struct controlvm_message inmsg;
1892 bool got_command = false;
1893 bool handle_command_failed = false;
1895 while (!visorchannel_signalremove(controlvm_channel,
1896 CONTROLVM_QUEUE_RESPONSE,
1900 if (controlvm_pending_msg_valid) {
1902 * we throttled processing of a prior
1903 * msg, so try to process it again
1904 * rather than reading a new one
1906 inmsg = controlvm_pending_msg;
1907 controlvm_pending_msg_valid = false;
1910 got_command = read_controlvm_event(&inmsg);
1914 handle_command_failed = false;
1915 while (got_command && (!handle_command_failed)) {
1916 most_recent_message_jiffies = jiffies;
1917 if (handle_command(inmsg,
1918 visorchannel_get_physaddr
1919 (controlvm_channel)))
1920 got_command = read_controlvm_event(&inmsg);
1923 * this is a scenario where throttling
1924 * is required, but probably NOT an
1925 * error...; we stash the current
1926 * controlvm msg so we will attempt to
1927 * reprocess it on our next loop
1929 handle_command_failed = true;
1930 controlvm_pending_msg = inmsg;
1931 controlvm_pending_msg_valid = true;
1935 /* parahotplug_worker */
1936 parahotplug_process_list();
1938 if (time_after(jiffies,
1939 most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
1941 * it's been longer than MIN_IDLE_SECONDS since we
1942 * processed our last controlvm message; slow down the
1945 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1946 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1948 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1949 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1952 schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
1956 visorchipset_init(struct acpi_device *acpi_device)
1960 uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
1962 addr = controlvm_get_channel_address();
1966 controlvm_channel = visorchannel_create_with_lock(addr, 0,
1968 if (!controlvm_channel)
1971 if (!SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
1972 visorchannel_get_header(controlvm_channel)))
1973 goto error_destroy_channel;
1975 major_dev = MKDEV(visorchipset_major, 0);
1976 err = visorchipset_file_init(major_dev, &controlvm_channel);
1978 goto error_destroy_channel;
1980 /* if booting in a crash kernel */
1981 if (is_kdump_kernel())
1982 INIT_DELAYED_WORK(&periodic_controlvm_work,
1983 setup_crash_devices_work_queue);
1985 INIT_DELAYED_WORK(&periodic_controlvm_work,
1986 controlvm_periodic_work);
1988 most_recent_message_jiffies = jiffies;
1989 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1990 schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
1992 visorchipset_platform_device.dev.devt = major_dev;
1993 if (platform_device_register(&visorchipset_platform_device) < 0) {
1994 POSTCODE_LINUX(DEVICE_REGISTER_FAILURE_PC, 0, 0,
1997 goto error_cancel_work;
1999 POSTCODE_LINUX(CHIPSET_INIT_SUCCESS_PC, 0, 0, DIAG_SEVERITY_PRINT);
2001 err = visorbus_init();
2003 goto error_unregister;
2008 platform_device_unregister(&visorchipset_platform_device);
2011 cancel_delayed_work_sync(&periodic_controlvm_work);
2012 visorchipset_file_cleanup(major_dev);
2014 error_destroy_channel:
2015 visorchannel_destroy(controlvm_channel);
2018 POSTCODE_LINUX(CHIPSET_INIT_FAILURE_PC, 0, err, DIAG_SEVERITY_ERR);
2023 visorchipset_exit(struct acpi_device *acpi_device)
2025 POSTCODE_LINUX(DRIVER_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
2029 cancel_delayed_work_sync(&periodic_controlvm_work);
2031 visorchannel_destroy(controlvm_channel);
2033 visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
2034 platform_device_unregister(&visorchipset_platform_device);
2035 POSTCODE_LINUX(DRIVER_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
2040 static const struct acpi_device_id unisys_device_ids[] = {
2045 static struct acpi_driver unisys_acpi_driver = {
2046 .name = "unisys_acpi",
2047 .class = "unisys_acpi_class",
2048 .owner = THIS_MODULE,
2049 .ids = unisys_device_ids,
2051 .add = visorchipset_init,
2052 .remove = visorchipset_exit,
2056 MODULE_DEVICE_TABLE(acpi, unisys_device_ids);
2058 static __init uint32_t visorutil_spar_detect(void)
2060 unsigned int eax, ebx, ecx, edx;
2062 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
2064 cpuid(UNISYS_SPAR_LEAF_ID, &eax, &ebx, &ecx, &edx);
2065 return (ebx == UNISYS_SPAR_ID_EBX) &&
2066 (ecx == UNISYS_SPAR_ID_ECX) &&
2067 (edx == UNISYS_SPAR_ID_EDX);
2073 static int init_unisys(void)
2077 if (!visorutil_spar_detect())
2080 result = acpi_bus_register_driver(&unisys_acpi_driver);
2084 pr_info("Unisys Visorchipset Driver Loaded.\n");
2088 static void exit_unisys(void)
2090 acpi_bus_unregister_driver(&unisys_acpi_driver);
2093 module_param_named(major, visorchipset_major, int, 0444);
2094 MODULE_PARM_DESC(visorchipset_major,
2095 "major device number to use for the device node");
2097 module_init(init_unisys);
2098 module_exit(exit_unisys);
2100 MODULE_AUTHOR("Unisys");
2101 MODULE_LICENSE("GPL");
2102 MODULE_DESCRIPTION("s-Par visorbus driver for virtual device buses");