3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
19 #include "visorchipset.h"
20 #include "procobjecttree.h"
21 #include "visorchannel.h"
22 #include "periodic_work.h"
26 #include "controlvmcompletionstatus.h"
27 #include "guestlinuxdebug.h"
29 #include <linux/nls.h>
30 #include <linux/netdevice.h>
31 #include <linux/platform_device.h>
32 #include <linux/uuid.h>
34 #define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
35 #define TEST_VNIC_PHYSITF "eth0" /* physical network itf for
36 * vnic loopback test */
37 #define TEST_VNIC_SWITCHNO 1
38 #define TEST_VNIC_BUSNO 9
40 #define MAX_NAME_SIZE 128
41 #define MAX_IP_SIZE 50
42 #define MAXOUTSTANDINGCHANNELCOMMAND 256
43 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
44 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
46 /* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
47 * we switch to slow polling mode. As soon as we get a controlvm
48 * message, we switch back to fast polling mode.
50 #define MIN_IDLE_SECONDS 10
51 static ulong poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
52 static ulong most_recent_message_jiffies; /* when we got our last
53 * controlvm message */
62 static int serverregistered;
63 static int clientregistered;
65 #define MAX_CHIPSET_EVENTS 2
66 static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
68 static struct delayed_work periodic_controlvm_work;
69 static struct workqueue_struct *periodic_controlvm_workqueue;
70 static DEFINE_SEMAPHORE(notifier_lock);
72 static struct controlvm_message_header g_diag_msg_hdr;
73 static struct controlvm_message_header g_chipset_msg_hdr;
74 static struct controlvm_message_header g_del_dump_msg_hdr;
75 static const uuid_le spar_diag_pool_channel_protocol_uuid =
76 SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
77 /* 0xffffff is an invalid Bus/Device number */
78 static ulong g_diagpool_bus_no = 0xffffff;
79 static ulong g_diagpool_dev_no = 0xffffff;
80 static struct controlvm_message_packet g_devicechangestate_packet;
82 /* Only VNIC and VHBA channels are sent to visorclientbus (aka
85 #define FOR_VISORHACKBUS(channel_type_guid) \
86 (((uuid_le_cmp(channel_type_guid,\
87 spar_vnic_channel_protocol_uuid) == 0) ||\
88 (uuid_le_cmp(channel_type_guid,\
89 spar_vhba_channel_protocol_uuid) == 0)))
90 #define FOR_VISORBUS(channel_type_guid) (!(FOR_VISORHACKBUS(channel_type_guid)))
92 #define is_diagpool_channel(channel_type_guid) \
93 (uuid_le_cmp(channel_type_guid,\
94 spar_diag_pool_channel_protocol_uuid) == 0)
96 static LIST_HEAD(bus_info_list);
97 static LIST_HEAD(dev_info_list);
99 static struct visorchannel *controlvm_channel;
101 /* Manages the request payload in the controlvm channel */
102 static struct controlvm_payload_info {
103 u8 __iomem *ptr; /* pointer to base address of payload pool */
104 u64 offset; /* offset from beginning of controlvm
105 * channel to beginning of payload * pool */
106 u32 bytes; /* number of bytes in payload pool */
107 } controlvm_payload_info;
109 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
110 * CONTROLVM_DUMP_GETTEXTDUMP / CONTROLVM_DUMP_COMPLETE conversation.
112 static struct livedump_info {
113 struct controlvm_message_header dumpcapture_header;
114 struct controlvm_message_header gettextdump_header;
115 struct controlvm_message_header dumpcomplete_header;
116 BOOL gettextdump_outstanding;
119 atomic_t buffers_in_use;
123 /* The following globals are used to handle the scenario where we are unable to
124 * offload the payload from a controlvm message due to memory requirements. In
125 * this scenario, we simply stash the controlvm message, then attempt to
126 * process it again the next time controlvm_periodic_work() runs.
128 static struct controlvm_message controlvm_pending_msg;
129 static BOOL controlvm_pending_msg_valid = FALSE;
131 /* Pool of struct putfile_buffer_entry, for keeping track of pending (incoming)
132 * TRANSMIT_FILE PutFile payloads.
134 static struct kmem_cache *putfile_buffer_list_pool;
135 static const char putfile_buffer_list_pool_name[] =
136 "controlvm_putfile_buffer_list_pool";
138 /* This identifies a data buffer that has been received via a controlvm messages
139 * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
141 struct putfile_buffer_entry {
142 struct list_head next; /* putfile_buffer_entry list */
143 struct parser_context *parser_ctx; /* points to input data buffer */
146 /* List of struct putfile_request *, via next_putfile_request member.
147 * Each entry in this list identifies an outstanding TRANSMIT_FILE
150 static LIST_HEAD(putfile_request_list);
152 /* This describes a buffer and its current state of transfer (e.g., how many
153 * bytes have already been supplied as putfile data, and how many bytes are
154 * remaining) for a putfile_request.
156 struct putfile_active_buffer {
157 /* a payload from a controlvm message, containing a file data buffer */
158 struct parser_context *parser_ctx;
159 /* points within data area of parser_ctx to next byte of data */
161 /* # bytes left from <pnext> to the end of this data buffer */
162 size_t bytes_remaining;
165 #define PUTFILE_REQUEST_SIG 0x0906101302281211
166 /* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
167 * conversation. Structs of this type are dynamically linked into
168 * <Putfile_request_list>.
170 struct putfile_request {
171 u64 sig; /* PUTFILE_REQUEST_SIG */
173 /* header from original TransmitFile request */
174 struct controlvm_message_header controlvm_header;
175 u64 file_request_number; /* from original TransmitFile request */
177 /* link to next struct putfile_request */
178 struct list_head next_putfile_request;
180 /* most-recent sequence number supplied via a controlvm message */
181 u64 data_sequence_number;
183 /* head of putfile_buffer_entry list, which describes the data to be
184 * supplied as putfile data;
185 * - this list is added to when controlvm messages come in that supply
187 * - this list is removed from via the hotplug program that is actually
188 * consuming these buffers to write as file data */
189 struct list_head input_buffer_list;
190 spinlock_t req_list_lock; /* lock for input_buffer_list */
192 /* waiters for input_buffer_list to go non-empty */
193 wait_queue_head_t input_buffer_wq;
195 /* data not yet read within current putfile_buffer_entry */
196 struct putfile_active_buffer active_buf;
198 /* <0 = failed, 0 = in-progress, >0 = successful; */
199 /* note that this must be set with req_list_lock, and if you set <0, */
200 /* it is your responsibility to also free up all of the other objects */
201 /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
202 /* before releasing the lock */
203 int completion_status;
206 static atomic_t visorchipset_cache_buffers_in_use = ATOMIC_INIT(0);
208 struct parahotplug_request {
209 struct list_head list;
211 unsigned long expiration;
212 struct controlvm_message msg;
215 static LIST_HEAD(parahotplug_request_list);
216 static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* lock for above */
217 static void parahotplug_process_list(void);
219 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
220 * CONTROLVM_REPORTEVENT.
222 static struct visorchipset_busdev_notifiers busdev_server_notifiers;
223 static struct visorchipset_busdev_notifiers busdev_client_notifiers;
225 static void bus_create_response(ulong bus_no, int response);
226 static void bus_destroy_response(ulong bus_no, int response);
227 static void device_create_response(ulong bus_no, ulong dev_no, int response);
228 static void device_destroy_response(ulong bus_no, ulong dev_no, int response);
229 static void device_resume_response(ulong bus_no, ulong dev_no, int response);
231 static struct visorchipset_busdev_responders busdev_responders = {
232 .bus_create = bus_create_response,
233 .bus_destroy = bus_destroy_response,
234 .device_create = device_create_response,
235 .device_destroy = device_destroy_response,
236 .device_pause = visorchipset_device_pause_response,
237 .device_resume = device_resume_response,
240 /* info for /dev/visorchipset */
241 static dev_t major_dev = -1; /**< indicates major num for device */
243 /* prototypes for attributes */
244 static ssize_t toolaction_show(struct device *dev,
245 struct device_attribute *attr, char *buf);
246 static ssize_t toolaction_store(struct device *dev,
247 struct device_attribute *attr,
248 const char *buf, size_t count);
249 static DEVICE_ATTR_RW(toolaction);
251 static ssize_t boottotool_show(struct device *dev,
252 struct device_attribute *attr, char *buf);
253 static ssize_t boottotool_store(struct device *dev,
254 struct device_attribute *attr, const char *buf,
256 static DEVICE_ATTR_RW(boottotool);
258 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
260 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
261 const char *buf, size_t count);
262 static DEVICE_ATTR_RW(error);
264 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
266 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
267 const char *buf, size_t count);
268 static DEVICE_ATTR_RW(textid);
270 static ssize_t remaining_steps_show(struct device *dev,
271 struct device_attribute *attr, char *buf);
272 static ssize_t remaining_steps_store(struct device *dev,
273 struct device_attribute *attr,
274 const char *buf, size_t count);
275 static DEVICE_ATTR_RW(remaining_steps);
277 static ssize_t chipsetready_store(struct device *dev,
278 struct device_attribute *attr,
279 const char *buf, size_t count);
280 static DEVICE_ATTR_WO(chipsetready);
282 static ssize_t devicedisabled_store(struct device *dev,
283 struct device_attribute *attr,
284 const char *buf, size_t count);
285 static DEVICE_ATTR_WO(devicedisabled);
287 static ssize_t deviceenabled_store(struct device *dev,
288 struct device_attribute *attr,
289 const char *buf, size_t count);
290 static DEVICE_ATTR_WO(deviceenabled);
292 static struct attribute *visorchipset_install_attrs[] = {
293 &dev_attr_toolaction.attr,
294 &dev_attr_boottotool.attr,
295 &dev_attr_error.attr,
296 &dev_attr_textid.attr,
297 &dev_attr_remaining_steps.attr,
301 static struct attribute_group visorchipset_install_group = {
303 .attrs = visorchipset_install_attrs
306 static struct attribute *visorchipset_guest_attrs[] = {
307 &dev_attr_chipsetready.attr,
311 static struct attribute_group visorchipset_guest_group = {
313 .attrs = visorchipset_guest_attrs
316 static struct attribute *visorchipset_parahotplug_attrs[] = {
317 &dev_attr_devicedisabled.attr,
318 &dev_attr_deviceenabled.attr,
322 static struct attribute_group visorchipset_parahotplug_group = {
323 .name = "parahotplug",
324 .attrs = visorchipset_parahotplug_attrs
327 static const struct attribute_group *visorchipset_dev_groups[] = {
328 &visorchipset_install_group,
329 &visorchipset_guest_group,
330 &visorchipset_parahotplug_group,
334 /* /sys/devices/platform/visorchipset */
335 static struct platform_device visorchipset_platform_device = {
336 .name = "visorchipset",
338 .dev.groups = visorchipset_dev_groups,
341 /* Function prototypes */
342 static void controlvm_respond(struct controlvm_message_header *msg_hdr,
344 static void controlvm_respond_chipset_init(
345 struct controlvm_message_header *msg_hdr, int response,
346 enum ultra_chipset_feature features);
347 static void controlvm_respond_physdev_changestate(
348 struct controlvm_message_header *msg_hdr, int response,
349 struct spar_segment_state state);
351 static ssize_t toolaction_show(struct device *dev,
352 struct device_attribute *attr,
357 visorchannel_read(controlvm_channel,
358 offsetof(struct spar_controlvm_channel_protocol,
359 tool_action), &tool_action, sizeof(u8));
360 return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action);
363 static ssize_t toolaction_store(struct device *dev,
364 struct device_attribute *attr,
365 const char *buf, size_t count)
370 if (kstrtou8(buf, 10, &tool_action) != 0)
373 ret = visorchannel_write(controlvm_channel,
374 offsetof(struct spar_controlvm_channel_protocol,
376 &tool_action, sizeof(u8));
383 static ssize_t boottotool_show(struct device *dev,
384 struct device_attribute *attr,
387 struct efi_spar_indication efi_spar_indication;
389 visorchannel_read(controlvm_channel,
390 offsetof(struct spar_controlvm_channel_protocol,
391 efi_spar_ind), &efi_spar_indication,
392 sizeof(struct efi_spar_indication));
393 return scnprintf(buf, PAGE_SIZE, "%u\n",
394 efi_spar_indication.boot_to_tool);
397 static ssize_t boottotool_store(struct device *dev,
398 struct device_attribute *attr,
399 const char *buf, size_t count)
402 struct efi_spar_indication efi_spar_indication;
404 if (kstrtoint(buf, 10, &val) != 0)
407 efi_spar_indication.boot_to_tool = val;
408 ret = visorchannel_write(controlvm_channel,
409 offsetof(struct spar_controlvm_channel_protocol,
410 efi_spar_ind), &(efi_spar_indication),
411 sizeof(struct efi_spar_indication));
418 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
423 visorchannel_read(controlvm_channel,
424 offsetof(struct spar_controlvm_channel_protocol,
426 &error, sizeof(u32));
427 return scnprintf(buf, PAGE_SIZE, "%i\n", error);
430 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
431 const char *buf, size_t count)
436 if (kstrtou32(buf, 10, &error) != 0)
439 ret = visorchannel_write(controlvm_channel,
440 offsetof(struct spar_controlvm_channel_protocol,
442 &error, sizeof(u32));
448 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
453 visorchannel_read(controlvm_channel,
454 offsetof(struct spar_controlvm_channel_protocol,
455 installation_text_id),
456 &text_id, sizeof(u32));
457 return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
460 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
461 const char *buf, size_t count)
466 if (kstrtou32(buf, 10, &text_id) != 0)
469 ret = visorchannel_write(controlvm_channel,
470 offsetof(struct spar_controlvm_channel_protocol,
471 installation_text_id),
472 &text_id, sizeof(u32));
478 static ssize_t remaining_steps_show(struct device *dev,
479 struct device_attribute *attr, char *buf)
483 visorchannel_read(controlvm_channel,
484 offsetof(struct spar_controlvm_channel_protocol,
485 installation_remaining_steps),
486 &remaining_steps, sizeof(u16));
487 return scnprintf(buf, PAGE_SIZE, "%hu\n", remaining_steps);
490 static ssize_t remaining_steps_store(struct device *dev,
491 struct device_attribute *attr,
492 const char *buf, size_t count)
497 if (kstrtou16(buf, 10, &remaining_steps) != 0)
500 ret = visorchannel_write(controlvm_channel,
501 offsetof(struct spar_controlvm_channel_protocol,
502 installation_remaining_steps),
503 &remaining_steps, sizeof(u16));
510 bus_info_clear(void *v)
512 struct visorchipset_bus_info *p = (struct visorchipset_bus_info *) (v);
517 kfree(p->description);
518 p->description = NULL;
520 p->state.created = 0;
521 memset(p, 0, sizeof(struct visorchipset_bus_info));
525 dev_info_clear(void *v)
527 struct visorchipset_device_info *p =
528 (struct visorchipset_device_info *)(v);
530 p->state.created = 0;
531 memset(p, 0, sizeof(struct visorchipset_device_info));
535 check_chipset_events(void)
539 /* Check events to determine if response should be sent */
540 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
541 send_msg &= chipset_events[i];
546 clear_chipset_events(void)
549 /* Clear chipset_events */
550 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
551 chipset_events[i] = 0;
555 visorchipset_register_busdev_server(
556 struct visorchipset_busdev_notifiers *notifiers,
557 struct visorchipset_busdev_responders *responders,
558 struct ultra_vbus_deviceinfo *driver_info)
560 down(¬ifier_lock);
562 memset(&busdev_server_notifiers, 0,
563 sizeof(busdev_server_notifiers));
564 serverregistered = 0; /* clear flag */
566 busdev_server_notifiers = *notifiers;
567 serverregistered = 1; /* set flag */
570 *responders = busdev_responders;
572 bus_device_info_init(driver_info, "chipset", "visorchipset",
577 EXPORT_SYMBOL_GPL(visorchipset_register_busdev_server);
580 visorchipset_register_busdev_client(
581 struct visorchipset_busdev_notifiers *notifiers,
582 struct visorchipset_busdev_responders *responders,
583 struct ultra_vbus_deviceinfo *driver_info)
585 down(¬ifier_lock);
587 memset(&busdev_client_notifiers, 0,
588 sizeof(busdev_client_notifiers));
589 clientregistered = 0; /* clear flag */
591 busdev_client_notifiers = *notifiers;
592 clientregistered = 1; /* set flag */
595 *responders = busdev_responders;
597 bus_device_info_init(driver_info, "chipset(bolts)",
598 "visorchipset", VERSION, NULL);
601 EXPORT_SYMBOL_GPL(visorchipset_register_busdev_client);
604 cleanup_controlvm_structures(void)
606 struct visorchipset_bus_info *bi, *tmp_bi;
607 struct visorchipset_device_info *di, *tmp_di;
609 list_for_each_entry_safe(bi, tmp_bi, &bus_info_list, entry) {
611 list_del(&bi->entry);
615 list_for_each_entry_safe(di, tmp_di, &dev_info_list, entry) {
617 list_del(&di->entry);
623 chipset_init(struct controlvm_message *inmsg)
625 static int chipset_inited;
626 enum ultra_chipset_feature features = 0;
627 int rc = CONTROLVM_RESP_SUCCESS;
629 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
630 if (chipset_inited) {
631 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
635 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
637 /* Set features to indicate we support parahotplug (if Command
638 * also supports it). */
640 inmsg->cmd.init_chipset.
641 features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
643 /* Set the "reply" bit so Command knows this is a
644 * features-aware driver. */
645 features |= ULTRA_CHIPSET_FEATURE_REPLY;
649 cleanup_controlvm_structures();
650 if (inmsg->hdr.flags.response_expected)
651 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
655 controlvm_init_response(struct controlvm_message *msg,
656 struct controlvm_message_header *msg_hdr, int response)
658 memset(msg, 0, sizeof(struct controlvm_message));
659 memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
660 msg->hdr.payload_bytes = 0;
661 msg->hdr.payload_vm_offset = 0;
662 msg->hdr.payload_max_bytes = 0;
664 msg->hdr.flags.failed = 1;
665 msg->hdr.completion_status = (u32) (-response);
670 controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
672 struct controlvm_message outmsg;
674 controlvm_init_response(&outmsg, msg_hdr, response);
675 /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
676 * back the deviceChangeState structure in the packet. */
677 if (msg_hdr->id == CONTROLVM_DEVICE_CHANGESTATE &&
678 g_devicechangestate_packet.device_change_state.bus_no ==
680 g_devicechangestate_packet.device_change_state.dev_no ==
682 outmsg.cmd = g_devicechangestate_packet;
683 if (outmsg.hdr.flags.test_message == 1)
686 if (!visorchannel_signalinsert(controlvm_channel,
687 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
693 controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
695 enum ultra_chipset_feature features)
697 struct controlvm_message outmsg;
699 controlvm_init_response(&outmsg, msg_hdr, response);
700 outmsg.cmd.init_chipset.features = features;
701 if (!visorchannel_signalinsert(controlvm_channel,
702 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
707 static void controlvm_respond_physdev_changestate(
708 struct controlvm_message_header *msg_hdr, int response,
709 struct spar_segment_state state)
711 struct controlvm_message outmsg;
713 controlvm_init_response(&outmsg, msg_hdr, response);
714 outmsg.cmd.device_change_state.state = state;
715 outmsg.cmd.device_change_state.flags.phys_device = 1;
716 if (!visorchannel_signalinsert(controlvm_channel,
717 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
723 visorchipset_save_message(struct controlvm_message *msg,
724 enum crash_obj_type type)
726 u32 crash_msg_offset;
729 /* get saved message count */
730 if (visorchannel_read(controlvm_channel,
731 offsetof(struct spar_controlvm_channel_protocol,
732 saved_crash_message_count),
733 &crash_msg_count, sizeof(u16)) < 0) {
734 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
735 POSTCODE_SEVERITY_ERR);
739 if (crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
740 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
742 POSTCODE_SEVERITY_ERR);
746 /* get saved crash message offset */
747 if (visorchannel_read(controlvm_channel,
748 offsetof(struct spar_controlvm_channel_protocol,
749 saved_crash_message_offset),
750 &crash_msg_offset, sizeof(u32)) < 0) {
751 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
752 POSTCODE_SEVERITY_ERR);
756 if (type == CRASH_BUS) {
757 if (visorchannel_write(controlvm_channel,
760 sizeof(struct controlvm_message)) < 0) {
761 POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
762 POSTCODE_SEVERITY_ERR);
766 if (visorchannel_write(controlvm_channel,
768 sizeof(struct controlvm_message), msg,
769 sizeof(struct controlvm_message)) < 0) {
770 POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
771 POSTCODE_SEVERITY_ERR);
776 EXPORT_SYMBOL_GPL(visorchipset_save_message);
779 bus_responder(enum controlvm_id cmd_id, ulong bus_no, int response)
781 struct visorchipset_bus_info *p = NULL;
782 BOOL need_clear = FALSE;
784 p = findbus(&bus_info_list, bus_no);
789 if ((cmd_id == CONTROLVM_BUS_CREATE) &&
790 (response != (-CONTROLVM_RESP_ERROR_ALREADY_DONE)))
791 /* undo the row we just created... */
792 delbusdevices(&dev_info_list, bus_no);
794 if (cmd_id == CONTROLVM_BUS_CREATE)
795 p->state.created = 1;
796 if (cmd_id == CONTROLVM_BUS_DESTROY)
800 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
801 return; /* no controlvm response needed */
802 if (p->pending_msg_hdr.id != (u32)cmd_id)
804 controlvm_respond(&p->pending_msg_hdr, response);
805 p->pending_msg_hdr.id = CONTROLVM_INVALID;
808 delbusdevices(&dev_info_list, bus_no);
813 device_changestate_responder(enum controlvm_id cmd_id,
814 ulong bus_no, ulong dev_no, int response,
815 struct spar_segment_state response_state)
817 struct visorchipset_device_info *p = NULL;
818 struct controlvm_message outmsg;
820 p = finddevice(&dev_info_list, bus_no, dev_no);
823 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
824 return; /* no controlvm response needed */
825 if (p->pending_msg_hdr.id != cmd_id)
828 controlvm_init_response(&outmsg, &p->pending_msg_hdr, response);
830 outmsg.cmd.device_change_state.bus_no = bus_no;
831 outmsg.cmd.device_change_state.dev_no = dev_no;
832 outmsg.cmd.device_change_state.state = response_state;
834 if (!visorchannel_signalinsert(controlvm_channel,
835 CONTROLVM_QUEUE_REQUEST, &outmsg))
838 p->pending_msg_hdr.id = CONTROLVM_INVALID;
842 device_responder(enum controlvm_id cmd_id, ulong bus_no, ulong dev_no,
845 struct visorchipset_device_info *p = NULL;
846 BOOL need_clear = FALSE;
848 p = finddevice(&dev_info_list, bus_no, dev_no);
852 if (cmd_id == CONTROLVM_DEVICE_CREATE)
853 p->state.created = 1;
854 if (cmd_id == CONTROLVM_DEVICE_DESTROY)
858 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
859 return; /* no controlvm response needed */
861 if (p->pending_msg_hdr.id != (u32)cmd_id)
864 controlvm_respond(&p->pending_msg_hdr, response);
865 p->pending_msg_hdr.id = CONTROLVM_INVALID;
871 bus_epilog(u32 bus_no,
872 u32 cmd, struct controlvm_message_header *msg_hdr,
873 int response, BOOL need_response)
875 BOOL notified = FALSE;
877 struct visorchipset_bus_info *bus_info = findbus(&bus_info_list,
884 memcpy(&bus_info->pending_msg_hdr, msg_hdr,
885 sizeof(struct controlvm_message_header));
887 bus_info->pending_msg_hdr.id = CONTROLVM_INVALID;
890 down(¬ifier_lock);
891 if (response == CONTROLVM_RESP_SUCCESS) {
893 case CONTROLVM_BUS_CREATE:
894 /* We can't tell from the bus_create
895 * information which of our 2 bus flavors the
896 * devices on this bus will ultimately end up.
897 * FORTUNATELY, it turns out it is harmless to
898 * send the bus_create to both of them. We can
899 * narrow things down a little bit, though,
900 * because we know: - BusDev_Server can handle
901 * either server or client devices
902 * - BusDev_Client can handle ONLY client
904 if (busdev_server_notifiers.bus_create) {
905 (*busdev_server_notifiers.bus_create) (bus_no);
908 if ((!bus_info->flags.server) /*client */ &&
909 busdev_client_notifiers.bus_create) {
910 (*busdev_client_notifiers.bus_create) (bus_no);
914 case CONTROLVM_BUS_DESTROY:
915 if (busdev_server_notifiers.bus_destroy) {
916 (*busdev_server_notifiers.bus_destroy) (bus_no);
919 if ((!bus_info->flags.server) /*client */ &&
920 busdev_client_notifiers.bus_destroy) {
921 (*busdev_client_notifiers.bus_destroy) (bus_no);
928 /* The callback function just called above is responsible
929 * for calling the appropriate visorchipset_busdev_responders
930 * function, which will call bus_responder()
934 bus_responder(cmd, bus_no, response);
939 device_epilog(u32 bus_no, u32 dev_no, struct spar_segment_state state, u32 cmd,
940 struct controlvm_message_header *msg_hdr, int response,
941 BOOL need_response, BOOL for_visorbus)
943 struct visorchipset_busdev_notifiers *notifiers = NULL;
944 BOOL notified = FALSE;
946 struct visorchipset_device_info *dev_info =
947 finddevice(&dev_info_list, bus_no, dev_no);
949 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
957 notifiers = &busdev_server_notifiers;
959 notifiers = &busdev_client_notifiers;
961 memcpy(&dev_info->pending_msg_hdr, msg_hdr,
962 sizeof(struct controlvm_message_header));
964 dev_info->pending_msg_hdr.id = CONTROLVM_INVALID;
967 down(¬ifier_lock);
970 case CONTROLVM_DEVICE_CREATE:
971 if (notifiers->device_create) {
972 (*notifiers->device_create) (bus_no, dev_no);
976 case CONTROLVM_DEVICE_CHANGESTATE:
977 /* ServerReady / ServerRunning / SegmentStateRunning */
978 if (state.alive == segment_state_running.alive &&
980 segment_state_running.operating) {
981 if (notifiers->device_resume) {
982 (*notifiers->device_resume) (bus_no,
987 /* ServerNotReady / ServerLost / SegmentStateStandby */
988 else if (state.alive == segment_state_standby.alive &&
990 segment_state_standby.operating) {
991 /* technically this is standby case
992 * where server is lost
994 if (notifiers->device_pause) {
995 (*notifiers->device_pause) (bus_no,
999 } else if (state.alive == segment_state_paused.alive &&
1001 segment_state_paused.operating) {
1002 /* this is lite pause where channel is
1003 * still valid just 'pause' of it
1005 if (bus_no == g_diagpool_bus_no &&
1006 dev_no == g_diagpool_dev_no) {
1007 /* this will trigger the
1008 * diag_shutdown.sh script in
1009 * the visorchipset hotplug */
1011 (&visorchipset_platform_device.dev.
1012 kobj, KOBJ_ONLINE, envp);
1016 case CONTROLVM_DEVICE_DESTROY:
1017 if (notifiers->device_destroy) {
1018 (*notifiers->device_destroy) (bus_no, dev_no);
1025 /* The callback function just called above is responsible
1026 * for calling the appropriate visorchipset_busdev_responders
1027 * function, which will call device_responder()
1031 device_responder(cmd, bus_no, dev_no, response);
1036 bus_create(struct controlvm_message *inmsg)
1038 struct controlvm_message_packet *cmd = &inmsg->cmd;
1039 ulong bus_no = cmd->create_bus.bus_no;
1040 int rc = CONTROLVM_RESP_SUCCESS;
1041 struct visorchipset_bus_info *bus_info = NULL;
1043 bus_info = findbus(&bus_info_list, bus_no);
1044 if (bus_info && (bus_info->state.created == 1)) {
1045 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1046 POSTCODE_SEVERITY_ERR);
1047 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1050 bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
1052 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1053 POSTCODE_SEVERITY_ERR);
1054 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1058 INIT_LIST_HEAD(&bus_info->entry);
1059 bus_info->bus_no = bus_no;
1060 bus_info->dev_no = cmd->create_bus.dev_count;
1062 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
1064 if (inmsg->hdr.flags.test_message == 1)
1065 bus_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1067 bus_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1069 bus_info->flags.server = inmsg->hdr.flags.server;
1070 bus_info->chan_info.channel_addr = cmd->create_bus.channel_addr;
1071 bus_info->chan_info.n_channel_bytes = cmd->create_bus.channel_bytes;
1072 bus_info->chan_info.channel_type_uuid =
1073 cmd->create_bus.bus_data_type_uuid;
1074 bus_info->chan_info.channel_inst_uuid = cmd->create_bus.bus_inst_uuid;
1076 list_add(&bus_info->entry, &bus_info_list);
1078 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
1081 bus_epilog(bus_no, CONTROLVM_BUS_CREATE, &inmsg->hdr,
1082 rc, inmsg->hdr.flags.response_expected == 1);
1086 bus_destroy(struct controlvm_message *inmsg)
1088 struct controlvm_message_packet *cmd = &inmsg->cmd;
1089 ulong bus_no = cmd->destroy_bus.bus_no;
1090 struct visorchipset_bus_info *bus_info;
1091 int rc = CONTROLVM_RESP_SUCCESS;
1093 bus_info = findbus(&bus_info_list, bus_no);
1095 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1096 else if (bus_info->state.created == 0)
1097 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1099 bus_epilog(bus_no, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
1100 rc, inmsg->hdr.flags.response_expected == 1);
1104 bus_configure(struct controlvm_message *inmsg,
1105 struct parser_context *parser_ctx)
1107 struct controlvm_message_packet *cmd = &inmsg->cmd;
1108 ulong bus_no = cmd->configure_bus.bus_no;
1109 struct visorchipset_bus_info *bus_info = NULL;
1110 int rc = CONTROLVM_RESP_SUCCESS;
1113 bus_no = cmd->configure_bus.bus_no;
1114 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
1115 POSTCODE_SEVERITY_INFO);
1117 bus_info = findbus(&bus_info_list, bus_no);
1119 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1120 POSTCODE_SEVERITY_ERR);
1121 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1122 } else if (bus_info->state.created == 0) {
1123 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1124 POSTCODE_SEVERITY_ERR);
1125 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1126 } else if (bus_info->pending_msg_hdr.id != CONTROLVM_INVALID) {
1127 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1128 POSTCODE_SEVERITY_ERR);
1129 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1131 bus_info->partition_handle = cmd->configure_bus.guest_handle;
1132 bus_info->partition_uuid = parser_id_get(parser_ctx);
1133 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1134 bus_info->name = parser_string_get(parser_ctx);
1136 visorchannel_uuid_id(&bus_info->partition_uuid, s);
1137 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
1138 POSTCODE_SEVERITY_INFO);
1140 bus_epilog(bus_no, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
1141 rc, inmsg->hdr.flags.response_expected == 1);
1145 my_device_create(struct controlvm_message *inmsg)
1147 struct controlvm_message_packet *cmd = &inmsg->cmd;
1148 ulong bus_no = cmd->create_device.bus_no;
1149 ulong dev_no = cmd->create_device.dev_no;
1150 struct visorchipset_device_info *dev_info = NULL;
1151 struct visorchipset_bus_info *bus_info = NULL;
1152 int rc = CONTROLVM_RESP_SUCCESS;
1154 dev_info = finddevice(&dev_info_list, bus_no, dev_no);
1155 if (dev_info && (dev_info->state.created == 1)) {
1156 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1157 POSTCODE_SEVERITY_ERR);
1158 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1161 bus_info = findbus(&bus_info_list, bus_no);
1163 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1164 POSTCODE_SEVERITY_ERR);
1165 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1168 if (bus_info->state.created == 0) {
1169 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1170 POSTCODE_SEVERITY_ERR);
1171 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1174 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
1176 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1177 POSTCODE_SEVERITY_ERR);
1178 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1182 INIT_LIST_HEAD(&dev_info->entry);
1183 dev_info->bus_no = bus_no;
1184 dev_info->dev_no = dev_no;
1185 dev_info->dev_inst_uuid = cmd->create_device.dev_inst_uuid;
1186 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
1187 POSTCODE_SEVERITY_INFO);
1189 if (inmsg->hdr.flags.test_message == 1)
1190 dev_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1192 dev_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1193 dev_info->chan_info.channel_addr = cmd->create_device.channel_addr;
1194 dev_info->chan_info.n_channel_bytes = cmd->create_device.channel_bytes;
1195 dev_info->chan_info.channel_type_uuid =
1196 cmd->create_device.data_type_uuid;
1197 dev_info->chan_info.intr = cmd->create_device.intr;
1198 list_add(&dev_info->entry, &dev_info_list);
1199 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
1200 POSTCODE_SEVERITY_INFO);
1202 /* get the bus and devNo for DiagPool channel */
1204 is_diagpool_channel(dev_info->chan_info.channel_type_uuid)) {
1205 g_diagpool_bus_no = bus_no;
1206 g_diagpool_dev_no = dev_no;
1208 device_epilog(bus_no, dev_no, segment_state_running,
1209 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
1210 inmsg->hdr.flags.response_expected == 1,
1211 FOR_VISORBUS(dev_info->chan_info.channel_type_uuid));
1215 my_device_changestate(struct controlvm_message *inmsg)
1217 struct controlvm_message_packet *cmd = &inmsg->cmd;
1218 ulong bus_no = cmd->device_change_state.bus_no;
1219 ulong dev_no = cmd->device_change_state.dev_no;
1220 struct spar_segment_state state = cmd->device_change_state.state;
1221 struct visorchipset_device_info *dev_info = NULL;
1222 int rc = CONTROLVM_RESP_SUCCESS;
1224 dev_info = finddevice(&dev_info_list, bus_no, dev_no);
1226 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1227 POSTCODE_SEVERITY_ERR);
1228 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1229 } else if (dev_info->state.created == 0) {
1230 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1231 POSTCODE_SEVERITY_ERR);
1232 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1234 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1235 device_epilog(bus_no, dev_no, state,
1236 CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
1237 inmsg->hdr.flags.response_expected == 1,
1239 dev_info->chan_info.channel_type_uuid));
1243 my_device_destroy(struct controlvm_message *inmsg)
1245 struct controlvm_message_packet *cmd = &inmsg->cmd;
1246 ulong bus_no = cmd->destroy_device.bus_no;
1247 ulong dev_no = cmd->destroy_device.dev_no;
1248 struct visorchipset_device_info *dev_info = NULL;
1249 int rc = CONTROLVM_RESP_SUCCESS;
1251 dev_info = finddevice(&dev_info_list, bus_no, dev_no);
1253 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1254 else if (dev_info->state.created == 0)
1255 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1257 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1258 device_epilog(bus_no, dev_no, segment_state_running,
1259 CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
1260 inmsg->hdr.flags.response_expected == 1,
1262 dev_info->chan_info.channel_type_uuid));
1265 /* When provided with the physical address of the controlvm channel
1266 * (phys_addr), the offset to the payload area we need to manage
1267 * (offset), and the size of this payload area (bytes), fills in the
1268 * controlvm_payload_info struct. Returns TRUE for success or FALSE
1272 initialize_controlvm_payload_info(HOSTADDRESS phys_addr, u64 offset, u32 bytes,
1273 struct controlvm_payload_info *info)
1275 u8 __iomem *payload = NULL;
1276 int rc = CONTROLVM_RESP_SUCCESS;
1279 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1282 memset(info, 0, sizeof(struct controlvm_payload_info));
1283 if ((offset == 0) || (bytes == 0)) {
1284 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1287 payload = ioremap_cache(phys_addr + offset, bytes);
1289 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
1293 info->offset = offset;
1294 info->bytes = bytes;
1295 info->ptr = payload;
1308 destroy_controlvm_payload_info(struct controlvm_payload_info *info)
1314 memset(info, 0, sizeof(struct controlvm_payload_info));
1318 initialize_controlvm_payload(void)
1320 HOSTADDRESS phys_addr = visorchannel_get_physaddr(controlvm_channel);
1321 u64 payload_offset = 0;
1322 u32 payload_bytes = 0;
1324 if (visorchannel_read(controlvm_channel,
1325 offsetof(struct spar_controlvm_channel_protocol,
1326 request_payload_offset),
1327 &payload_offset, sizeof(payload_offset)) < 0) {
1328 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1329 POSTCODE_SEVERITY_ERR);
1332 if (visorchannel_read(controlvm_channel,
1333 offsetof(struct spar_controlvm_channel_protocol,
1334 request_payload_bytes),
1335 &payload_bytes, sizeof(payload_bytes)) < 0) {
1336 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1337 POSTCODE_SEVERITY_ERR);
1340 initialize_controlvm_payload_info(phys_addr,
1341 payload_offset, payload_bytes,
1342 &controlvm_payload_info);
1345 /* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1346 * Returns CONTROLVM_RESP_xxx code.
1349 visorchipset_chipset_ready(void)
1351 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
1352 return CONTROLVM_RESP_SUCCESS;
1354 EXPORT_SYMBOL_GPL(visorchipset_chipset_ready);
1357 visorchipset_chipset_selftest(void)
1359 char env_selftest[20];
1360 char *envp[] = { env_selftest, NULL };
1362 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1363 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1365 return CONTROLVM_RESP_SUCCESS;
1367 EXPORT_SYMBOL_GPL(visorchipset_chipset_selftest);
1369 /* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1370 * Returns CONTROLVM_RESP_xxx code.
1373 visorchipset_chipset_notready(void)
1375 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
1376 return CONTROLVM_RESP_SUCCESS;
1378 EXPORT_SYMBOL_GPL(visorchipset_chipset_notready);
1381 chipset_ready(struct controlvm_message_header *msg_hdr)
1383 int rc = visorchipset_chipset_ready();
1385 if (rc != CONTROLVM_RESP_SUCCESS)
1387 if (msg_hdr->flags.response_expected && !visorchipset_holdchipsetready)
1388 controlvm_respond(msg_hdr, rc);
1389 if (msg_hdr->flags.response_expected && visorchipset_holdchipsetready) {
1390 /* Send CHIPSET_READY response when all modules have been loaded
1391 * and disks mounted for the partition
1393 g_chipset_msg_hdr = *msg_hdr;
1398 chipset_selftest(struct controlvm_message_header *msg_hdr)
1400 int rc = visorchipset_chipset_selftest();
1402 if (rc != CONTROLVM_RESP_SUCCESS)
1404 if (msg_hdr->flags.response_expected)
1405 controlvm_respond(msg_hdr, rc);
1409 chipset_notready(struct controlvm_message_header *msg_hdr)
1411 int rc = visorchipset_chipset_notready();
1413 if (rc != CONTROLVM_RESP_SUCCESS)
1415 if (msg_hdr->flags.response_expected)
1416 controlvm_respond(msg_hdr, rc);
1419 /* This is your "one-stop" shop for grabbing the next message from the
1420 * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1423 read_controlvm_event(struct controlvm_message *msg)
1425 if (visorchannel_signalremove(controlvm_channel,
1426 CONTROLVM_QUEUE_EVENT, msg)) {
1428 if (msg->hdr.flags.test_message == 1)
1436 * The general parahotplug flow works as follows. The visorchipset
1437 * driver receives a DEVICE_CHANGESTATE message from Command
1438 * specifying a physical device to enable or disable. The CONTROLVM
1439 * message handler calls parahotplug_process_message, which then adds
1440 * the message to a global list and kicks off a udev event which
1441 * causes a user level script to enable or disable the specified
1442 * device. The udev script then writes to
1443 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1444 * to get called, at which point the appropriate CONTROLVM message is
1445 * retrieved from the list and responded to.
1448 #define PARAHOTPLUG_TIMEOUT_MS 2000
1451 * Generate unique int to match an outstanding CONTROLVM message with a
1452 * udev script /proc response
1455 parahotplug_next_id(void)
1457 static atomic_t id = ATOMIC_INIT(0);
1459 return atomic_inc_return(&id);
1463 * Returns the time (in jiffies) when a CONTROLVM message on the list
1464 * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1466 static unsigned long
1467 parahotplug_next_expiration(void)
1469 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
1473 * Create a parahotplug_request, which is basically a wrapper for a
1474 * CONTROLVM_MESSAGE that we can stick on a list
1476 static struct parahotplug_request *
1477 parahotplug_request_create(struct controlvm_message *msg)
1479 struct parahotplug_request *req;
1481 req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
1485 req->id = parahotplug_next_id();
1486 req->expiration = parahotplug_next_expiration();
1493 * Free a parahotplug_request.
1496 parahotplug_request_destroy(struct parahotplug_request *req)
1502 * Cause uevent to run the user level script to do the disable/enable
1503 * specified in (the CONTROLVM message in) the specified
1504 * parahotplug_request
1507 parahotplug_request_kickoff(struct parahotplug_request *req)
1509 struct controlvm_message_packet *cmd = &req->msg.cmd;
1510 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1513 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1516 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1517 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1518 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
1519 cmd->device_change_state.state.active);
1520 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
1521 cmd->device_change_state.bus_no);
1522 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
1523 cmd->device_change_state.dev_no >> 3);
1524 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1525 cmd->device_change_state.dev_no & 0x7);
1527 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1532 * Remove any request from the list that's been on there too long and
1533 * respond with an error.
1536 parahotplug_process_list(void)
1538 struct list_head *pos = NULL;
1539 struct list_head *tmp = NULL;
1541 spin_lock(¶hotplug_request_list_lock);
1543 list_for_each_safe(pos, tmp, ¶hotplug_request_list) {
1544 struct parahotplug_request *req =
1545 list_entry(pos, struct parahotplug_request, list);
1547 if (!time_after_eq(jiffies, req->expiration))
1551 if (req->msg.hdr.flags.response_expected)
1552 controlvm_respond_physdev_changestate(
1554 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1555 req->msg.cmd.device_change_state.state);
1556 parahotplug_request_destroy(req);
1559 spin_unlock(¶hotplug_request_list_lock);
1563 * Called from the /proc handler, which means the user script has
1564 * finished the enable/disable. Find the matching identifier, and
1565 * respond to the CONTROLVM message with success.
1568 parahotplug_request_complete(int id, u16 active)
1570 struct list_head *pos = NULL;
1571 struct list_head *tmp = NULL;
1573 spin_lock(¶hotplug_request_list_lock);
1575 /* Look for a request matching "id". */
1576 list_for_each_safe(pos, tmp, ¶hotplug_request_list) {
1577 struct parahotplug_request *req =
1578 list_entry(pos, struct parahotplug_request, list);
1579 if (req->id == id) {
1580 /* Found a match. Remove it from the list and
1584 spin_unlock(¶hotplug_request_list_lock);
1585 req->msg.cmd.device_change_state.state.active = active;
1586 if (req->msg.hdr.flags.response_expected)
1587 controlvm_respond_physdev_changestate(
1588 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1589 req->msg.cmd.device_change_state.state);
1590 parahotplug_request_destroy(req);
1595 spin_unlock(¶hotplug_request_list_lock);
1600 * Enables or disables a PCI device by kicking off a udev script
1603 parahotplug_process_message(struct controlvm_message *inmsg)
1605 struct parahotplug_request *req;
1607 req = parahotplug_request_create(inmsg);
1612 if (inmsg->cmd.device_change_state.state.active) {
1613 /* For enable messages, just respond with success
1614 * right away. This is a bit of a hack, but there are
1615 * issues with the early enable messages we get (with
1616 * either the udev script not detecting that the device
1617 * is up, or not getting called at all). Fortunately
1618 * the messages that get lost don't matter anyway, as
1619 * devices are automatically enabled at
1622 parahotplug_request_kickoff(req);
1623 controlvm_respond_physdev_changestate(&inmsg->hdr,
1624 CONTROLVM_RESP_SUCCESS,
1625 inmsg->cmd.device_change_state.state);
1626 parahotplug_request_destroy(req);
1628 /* For disable messages, add the request to the
1629 * request list before kicking off the udev script. It
1630 * won't get responded to until the script has
1631 * indicated it's done.
1633 spin_lock(¶hotplug_request_list_lock);
1634 list_add_tail(&req->list, ¶hotplug_request_list);
1635 spin_unlock(¶hotplug_request_list_lock);
1637 parahotplug_request_kickoff(req);
1641 /* Process a controlvm message.
1643 * FALSE - this function will return FALSE only in the case where the
1644 * controlvm message was NOT processed, but processing must be
1645 * retried before reading the next controlvm message; a
1646 * scenario where this can occur is when we need to throttle
1647 * the allocation of memory in which to copy out controlvm
1649 * TRUE - processing of the controlvm message completed,
1650 * either successfully or with an error.
1653 handle_command(struct controlvm_message inmsg, HOSTADDRESS channel_addr)
1655 struct controlvm_message_packet *cmd = &inmsg.cmd;
1658 struct parser_context *parser_ctx = NULL;
1659 bool local_addr = false;
1660 struct controlvm_message ackmsg;
1662 /* create parsing context if necessary */
1663 local_addr = (inmsg.hdr.flags.test_message == 1);
1664 if (channel_addr == 0)
1666 parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1667 parm_bytes = inmsg.hdr.payload_bytes;
1669 /* Parameter and channel addresses within test messages actually lie
1670 * within our OS-controlled memory. We need to know that, because it
1671 * makes a difference in how we compute the virtual address.
1673 if (parm_addr != 0 && parm_bytes != 0) {
1677 parser_init_byte_stream(parm_addr, parm_bytes,
1678 local_addr, &retry);
1679 if (!parser_ctx && retry)
1684 controlvm_init_response(&ackmsg, &inmsg.hdr,
1685 CONTROLVM_RESP_SUCCESS);
1686 if (controlvm_channel)
1687 visorchannel_signalinsert(controlvm_channel,
1688 CONTROLVM_QUEUE_ACK,
1691 switch (inmsg.hdr.id) {
1692 case CONTROLVM_CHIPSET_INIT:
1693 chipset_init(&inmsg);
1695 case CONTROLVM_BUS_CREATE:
1698 case CONTROLVM_BUS_DESTROY:
1699 bus_destroy(&inmsg);
1701 case CONTROLVM_BUS_CONFIGURE:
1702 bus_configure(&inmsg, parser_ctx);
1704 case CONTROLVM_DEVICE_CREATE:
1705 my_device_create(&inmsg);
1707 case CONTROLVM_DEVICE_CHANGESTATE:
1708 if (cmd->device_change_state.flags.phys_device) {
1709 parahotplug_process_message(&inmsg);
1711 /* save the hdr and cmd structures for later use */
1712 /* when sending back the response to Command */
1713 my_device_changestate(&inmsg);
1714 g_diag_msg_hdr = inmsg.hdr;
1715 g_devicechangestate_packet = inmsg.cmd;
1719 case CONTROLVM_DEVICE_DESTROY:
1720 my_device_destroy(&inmsg);
1722 case CONTROLVM_DEVICE_CONFIGURE:
1723 /* no op for now, just send a respond that we passed */
1724 if (inmsg.hdr.flags.response_expected)
1725 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1727 case CONTROLVM_CHIPSET_READY:
1728 chipset_ready(&inmsg.hdr);
1730 case CONTROLVM_CHIPSET_SELFTEST:
1731 chipset_selftest(&inmsg.hdr);
1733 case CONTROLVM_CHIPSET_STOP:
1734 chipset_notready(&inmsg.hdr);
1737 if (inmsg.hdr.flags.response_expected)
1738 controlvm_respond(&inmsg.hdr,
1739 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
1744 parser_done(parser_ctx);
1750 static HOSTADDRESS controlvm_get_channel_address(void)
1755 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
1762 controlvm_periodic_work(struct work_struct *work)
1764 struct controlvm_message inmsg;
1765 BOOL got_command = FALSE;
1766 BOOL handle_command_failed = FALSE;
1767 static u64 poll_count;
1769 /* make sure visorbus server is registered for controlvm callbacks */
1770 if (visorchipset_serverregwait && !serverregistered)
1772 /* make sure visorclientbus server is regsitered for controlvm
1775 if (visorchipset_clientregwait && !clientregistered)
1779 if (poll_count >= 250)
1784 /* Check events to determine if response to CHIPSET_READY
1787 if (visorchipset_holdchipsetready &&
1788 (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
1789 if (check_chipset_events() == 1) {
1790 controlvm_respond(&g_chipset_msg_hdr, 0);
1791 clear_chipset_events();
1792 memset(&g_chipset_msg_hdr, 0,
1793 sizeof(struct controlvm_message_header));
1797 while (visorchannel_signalremove(controlvm_channel,
1798 CONTROLVM_QUEUE_RESPONSE,
1802 if (controlvm_pending_msg_valid) {
1803 /* we throttled processing of a prior
1804 * msg, so try to process it again
1805 * rather than reading a new one
1807 inmsg = controlvm_pending_msg;
1808 controlvm_pending_msg_valid = FALSE;
1811 got_command = read_controlvm_event(&inmsg);
1815 handle_command_failed = FALSE;
1816 while (got_command && (!handle_command_failed)) {
1817 most_recent_message_jiffies = jiffies;
1818 if (handle_command(inmsg,
1819 visorchannel_get_physaddr
1820 (controlvm_channel)))
1821 got_command = read_controlvm_event(&inmsg);
1823 /* this is a scenario where throttling
1824 * is required, but probably NOT an
1825 * error...; we stash the current
1826 * controlvm msg so we will attempt to
1827 * reprocess it on our next loop
1829 handle_command_failed = TRUE;
1830 controlvm_pending_msg = inmsg;
1831 controlvm_pending_msg_valid = TRUE;
1835 /* parahotplug_worker */
1836 parahotplug_process_list();
1840 if (time_after(jiffies,
1841 most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
1842 /* it's been longer than MIN_IDLE_SECONDS since we
1843 * processed our last controlvm message; slow down the
1846 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1847 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1849 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1850 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1853 queue_delayed_work(periodic_controlvm_workqueue,
1854 &periodic_controlvm_work, poll_jiffies);
1858 setup_crash_devices_work_queue(struct work_struct *work)
1860 struct controlvm_message local_crash_bus_msg;
1861 struct controlvm_message local_crash_dev_msg;
1862 struct controlvm_message msg;
1863 u32 local_crash_msg_offset;
1864 u16 local_crash_msg_count;
1866 /* make sure visorbus server is registered for controlvm callbacks */
1867 if (visorchipset_serverregwait && !serverregistered)
1870 /* make sure visorclientbus server is regsitered for controlvm
1873 if (visorchipset_clientregwait && !clientregistered)
1876 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1878 /* send init chipset msg */
1879 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
1880 msg.cmd.init_chipset.bus_count = 23;
1881 msg.cmd.init_chipset.switch_count = 0;
1885 /* get saved message count */
1886 if (visorchannel_read(controlvm_channel,
1887 offsetof(struct spar_controlvm_channel_protocol,
1888 saved_crash_message_count),
1889 &local_crash_msg_count, sizeof(u16)) < 0) {
1890 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1891 POSTCODE_SEVERITY_ERR);
1895 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
1896 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
1897 local_crash_msg_count,
1898 POSTCODE_SEVERITY_ERR);
1902 /* get saved crash message offset */
1903 if (visorchannel_read(controlvm_channel,
1904 offsetof(struct spar_controlvm_channel_protocol,
1905 saved_crash_message_offset),
1906 &local_crash_msg_offset, sizeof(u32)) < 0) {
1907 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1908 POSTCODE_SEVERITY_ERR);
1912 /* read create device message for storage bus offset */
1913 if (visorchannel_read(controlvm_channel,
1914 local_crash_msg_offset,
1915 &local_crash_bus_msg,
1916 sizeof(struct controlvm_message)) < 0) {
1917 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
1918 POSTCODE_SEVERITY_ERR);
1922 /* read create device message for storage device */
1923 if (visorchannel_read(controlvm_channel,
1924 local_crash_msg_offset +
1925 sizeof(struct controlvm_message),
1926 &local_crash_dev_msg,
1927 sizeof(struct controlvm_message)) < 0) {
1928 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
1929 POSTCODE_SEVERITY_ERR);
1933 /* reuse IOVM create bus message */
1934 if (local_crash_bus_msg.cmd.create_bus.channel_addr != 0) {
1935 bus_create(&local_crash_bus_msg);
1937 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
1938 POSTCODE_SEVERITY_ERR);
1942 /* reuse create device message for storage device */
1943 if (local_crash_dev_msg.cmd.create_device.channel_addr != 0) {
1944 my_device_create(&local_crash_dev_msg);
1946 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
1947 POSTCODE_SEVERITY_ERR);
1950 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
1955 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1957 queue_delayed_work(periodic_controlvm_workqueue,
1958 &periodic_controlvm_work, poll_jiffies);
1962 bus_create_response(ulong bus_no, int response)
1964 bus_responder(CONTROLVM_BUS_CREATE, bus_no, response);
1968 bus_destroy_response(ulong bus_no, int response)
1970 bus_responder(CONTROLVM_BUS_DESTROY, bus_no, response);
1974 device_create_response(ulong bus_no, ulong dev_no, int response)
1976 device_responder(CONTROLVM_DEVICE_CREATE, bus_no, dev_no, response);
1980 device_destroy_response(ulong bus_no, ulong dev_no, int response)
1982 device_responder(CONTROLVM_DEVICE_DESTROY, bus_no, dev_no, response);
1986 visorchipset_device_pause_response(ulong bus_no, ulong dev_no, int response)
1988 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
1989 bus_no, dev_no, response,
1990 segment_state_standby);
1992 EXPORT_SYMBOL_GPL(visorchipset_device_pause_response);
1995 device_resume_response(ulong bus_no, ulong dev_no, int response)
1997 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
1998 bus_no, dev_no, response,
1999 segment_state_running);
2003 visorchipset_get_bus_info(ulong bus_no, struct visorchipset_bus_info *bus_info)
2005 void *p = findbus(&bus_info_list, bus_no);
2009 memcpy(bus_info, p, sizeof(struct visorchipset_bus_info));
2012 EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
2015 visorchipset_set_bus_context(ulong bus_no, void *context)
2017 struct visorchipset_bus_info *p = findbus(&bus_info_list, bus_no);
2021 p->bus_driver_context = context;
2024 EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
2027 visorchipset_get_device_info(ulong bus_no, ulong dev_no,
2028 struct visorchipset_device_info *dev_info)
2030 void *p = finddevice(&dev_info_list, bus_no, dev_no);
2034 memcpy(dev_info, p, sizeof(struct visorchipset_device_info));
2037 EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
2040 visorchipset_set_device_context(ulong bus_no, ulong dev_no, void *context)
2042 struct visorchipset_device_info *p =
2043 finddevice(&dev_info_list, bus_no, dev_no);
2047 p->bus_driver_context = context;
2050 EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
2052 /* Generic wrapper function for allocating memory from a kmem_cache pool.
2055 visorchipset_cache_alloc(struct kmem_cache *pool, BOOL ok_to_block,
2065 /* __GFP_NORETRY means "ok to fail", meaning
2066 * kmem_cache_alloc() can return NULL, implying the caller CAN
2067 * cope with failure. If you do NOT specify __GFP_NORETRY,
2068 * Linux will go to extreme measures to get memory for you
2069 * (like, invoke oom killer), which will probably cripple the
2072 gfp |= __GFP_NORETRY;
2073 p = kmem_cache_alloc(pool, gfp);
2077 atomic_inc(&visorchipset_cache_buffers_in_use);
2081 /* Generic wrapper function for freeing memory from a kmem_cache pool.
2084 visorchipset_cache_free(struct kmem_cache *pool, void *p, char *fn, int ln)
2089 atomic_dec(&visorchipset_cache_buffers_in_use);
2090 kmem_cache_free(pool, p);
2093 static ssize_t chipsetready_store(struct device *dev,
2094 struct device_attribute *attr,
2095 const char *buf, size_t count)
2099 if (sscanf(buf, "%63s", msgtype) != 1)
2102 if (strcmp(msgtype, "CALLHOMEDISK_MOUNTED") == 0) {
2103 chipset_events[0] = 1;
2105 } else if (strcmp(msgtype, "MODULES_LOADED") == 0) {
2106 chipset_events[1] = 1;
2112 /* The parahotplug/devicedisabled interface gets called by our support script
2113 * when an SR-IOV device has been shut down. The ID is passed to the script
2114 * and then passed back when the device has been removed.
2116 static ssize_t devicedisabled_store(struct device *dev,
2117 struct device_attribute *attr,
2118 const char *buf, size_t count)
2122 if (kstrtouint(buf, 10, &id) != 0)
2125 parahotplug_request_complete(id, 0);
2129 /* The parahotplug/deviceenabled interface gets called by our support script
2130 * when an SR-IOV device has been recovered. The ID is passed to the script
2131 * and then passed back when the device has been brought back up.
2133 static ssize_t deviceenabled_store(struct device *dev,
2134 struct device_attribute *attr,
2135 const char *buf, size_t count)
2139 if (kstrtouint(buf, 10, &id) != 0)
2142 parahotplug_request_complete(id, 1);
2147 visorchipset_init(void)
2152 if (!unisys_spar_platform)
2155 memset(&busdev_server_notifiers, 0, sizeof(busdev_server_notifiers));
2156 memset(&busdev_client_notifiers, 0, sizeof(busdev_client_notifiers));
2157 memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
2158 memset(&livedump_info, 0, sizeof(livedump_info));
2159 atomic_set(&livedump_info.buffers_in_use, 0);
2161 if (visorchipset_testvnic) {
2162 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, x, DIAG_SEVERITY_ERR);
2167 addr = controlvm_get_channel_address();
2170 visorchannel_create_with_lock
2172 sizeof(struct spar_controlvm_channel_protocol),
2173 spar_controlvm_channel_protocol_uuid);
2174 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2175 visorchannel_get_header(controlvm_channel))) {
2176 initialize_controlvm_payload();
2178 visorchannel_destroy(controlvm_channel);
2179 controlvm_channel = NULL;
2186 major_dev = MKDEV(visorchipset_major, 0);
2187 rc = visorchipset_file_init(major_dev, &controlvm_channel);
2189 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2193 memset(&g_diag_msg_hdr, 0, sizeof(struct controlvm_message_header));
2195 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2197 memset(&g_del_dump_msg_hdr, 0, sizeof(struct controlvm_message_header));
2199 putfile_buffer_list_pool =
2200 kmem_cache_create(putfile_buffer_list_pool_name,
2201 sizeof(struct putfile_buffer_entry),
2202 0, SLAB_HWCACHE_ALIGN, NULL);
2203 if (!putfile_buffer_list_pool) {
2204 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2208 if (!visorchipset_disable_controlvm) {
2209 /* if booting in a crash kernel */
2210 if (visorchipset_crash_kernel)
2211 INIT_DELAYED_WORK(&periodic_controlvm_work,
2212 setup_crash_devices_work_queue);
2214 INIT_DELAYED_WORK(&periodic_controlvm_work,
2215 controlvm_periodic_work);
2216 periodic_controlvm_workqueue =
2217 create_singlethread_workqueue("visorchipset_controlvm");
2219 if (!periodic_controlvm_workqueue) {
2220 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2225 most_recent_message_jiffies = jiffies;
2226 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2227 rc = queue_delayed_work(periodic_controlvm_workqueue,
2228 &periodic_controlvm_work, poll_jiffies);
2230 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2236 visorchipset_platform_device.dev.devt = major_dev;
2237 if (platform_device_register(&visorchipset_platform_device) < 0) {
2238 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2242 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
2246 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2247 POSTCODE_SEVERITY_ERR);
2253 visorchipset_exit(void)
2255 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2257 if (visorchipset_disable_controlvm) {
2260 cancel_delayed_work(&periodic_controlvm_work);
2261 flush_workqueue(periodic_controlvm_workqueue);
2262 destroy_workqueue(periodic_controlvm_workqueue);
2263 periodic_controlvm_workqueue = NULL;
2264 destroy_controlvm_payload_info(&controlvm_payload_info);
2266 if (putfile_buffer_list_pool) {
2267 kmem_cache_destroy(putfile_buffer_list_pool);
2268 putfile_buffer_list_pool = NULL;
2271 cleanup_controlvm_structures();
2273 memset(&g_diag_msg_hdr, 0, sizeof(struct controlvm_message_header));
2275 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2277 memset(&g_del_dump_msg_hdr, 0, sizeof(struct controlvm_message_header));
2279 visorchannel_destroy(controlvm_channel);
2281 visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
2282 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2285 module_param_named(testvnic, visorchipset_testvnic, int, S_IRUGO);
2286 MODULE_PARM_DESC(visorchipset_testvnic, "1 to test vnic, using dummy VNIC connected via a loopback to a physical ethernet");
2287 int visorchipset_testvnic = 0;
2289 module_param_named(testvnicclient, visorchipset_testvnicclient, int, S_IRUGO);
2290 MODULE_PARM_DESC(visorchipset_testvnicclient, "1 to test vnic, using real VNIC channel attached to a separate IOVM guest");
2291 int visorchipset_testvnicclient = 0;
2293 module_param_named(testmsg, visorchipset_testmsg, int, S_IRUGO);
2294 MODULE_PARM_DESC(visorchipset_testmsg,
2295 "1 to manufacture the chipset, bus, and switch messages");
2296 int visorchipset_testmsg = 0;
2298 module_param_named(major, visorchipset_major, int, S_IRUGO);
2299 MODULE_PARM_DESC(visorchipset_major, "major device number to use for the device node");
2300 int visorchipset_major = 0;
2302 module_param_named(serverregwait, visorchipset_serverregwait, int, S_IRUGO);
2303 MODULE_PARM_DESC(visorchipset_serverreqwait,
2304 "1 to have the module wait for the visor bus to register");
2305 int visorchipset_serverregwait = 0; /* default is off */
2306 module_param_named(clientregwait, visorchipset_clientregwait, int, S_IRUGO);
2307 MODULE_PARM_DESC(visorchipset_clientregwait, "1 to have the module wait for the visorclientbus to register");
2308 int visorchipset_clientregwait = 1; /* default is on */
2309 module_param_named(testteardown, visorchipset_testteardown, int, S_IRUGO);
2310 MODULE_PARM_DESC(visorchipset_testteardown,
2311 "1 to test teardown of the chipset, bus, and switch");
2312 int visorchipset_testteardown = 0; /* default is off */
2313 module_param_named(disable_controlvm, visorchipset_disable_controlvm, int,
2315 MODULE_PARM_DESC(visorchipset_disable_controlvm,
2316 "1 to disable polling of controlVm channel");
2317 int visorchipset_disable_controlvm = 0; /* default is off */
2318 module_param_named(crash_kernel, visorchipset_crash_kernel, int, S_IRUGO);
2319 MODULE_PARM_DESC(visorchipset_crash_kernel,
2320 "1 means we are running in crash kernel");
2321 int visorchipset_crash_kernel = 0; /* default is running in non-crash kernel */
2322 module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2324 MODULE_PARM_DESC(visorchipset_holdchipsetready,
2325 "1 to hold response to CHIPSET_READY");
2326 int visorchipset_holdchipsetready = 0; /* default is to send CHIPSET_READY
2327 * response immediately */
2328 module_init(visorchipset_init);
2329 module_exit(visorchipset_exit);
2331 MODULE_AUTHOR("Unisys");
2332 MODULE_LICENSE("GPL");
2333 MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2335 MODULE_VERSION(VERSION);