3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
20 #include <linux/kernel.h>
21 #include <linux/highmem.h>
22 #ifdef CONFIG_MODVERSIONS
23 #include <config/modversions.h>
25 #include <linux/module.h>
26 #include <linux/debugfs.h>
28 #include <linux/types.h>
29 #include <linux/uuid.h>
31 #include <linux/version.h>
32 #include "diagnostics/appos_subsystems.h"
34 #include "vbuschannel.h"
36 #include <linux/proc_fs.h>
37 #include <linux/uaccess.h> /* for copy_from_user */
38 #include <linux/ctype.h> /* for toupper */
39 #include <linux/list.h>
42 #include "visorchipset.h"
44 #include "guestlinuxdebug.h"
46 #define SET_PROC_OWNER(x, y)
48 #define POLLJIFFIES_NORMAL 1
49 /* Choose whether or not you want to wakeup the request-polling thread
50 * after an IO termination:
51 * this is shorter than using __FILE__ (full path name) in
52 * debug/info/error messages
54 #define CURRENT_FILE_PC UISLIB_PC_uislib_c
55 #define __MYFILE__ "uislib.c"
57 /* global function pointers that act as callback functions into virtpcimod */
58 int (*virt_control_chan_func)(struct guest_msgs *);
60 static int debug_buf_valid;
61 static char *debug_buf; /* Note this MUST be global,
62 * because the contents must */
63 static unsigned int chipset_inited;
65 #define WAIT_ON_CALLBACK(handle) \
72 static struct bus_info *bus_list;
73 static rwlock_t bus_list_lock;
74 static int bus_list_count; /* number of buses in the list */
75 static int max_bus_count; /* maximum number of buses expected */
76 static u64 phys_data_chan;
77 static int platform_no;
79 static struct uisthread_info incoming_ti;
80 static BOOL incoming_started = FALSE;
81 static LIST_HEAD(poll_dev_chan);
82 static unsigned long long tot_moved_to_tail_cnt;
83 static unsigned long long tot_wait_cnt;
84 static unsigned long long tot_wakeup_cnt;
85 static unsigned long long tot_schedule_cnt;
86 static int en_smart_wakeup = 1;
87 static DEFINE_SEMAPHORE(poll_dev_lock); /* unlocked */
88 static DECLARE_WAIT_QUEUE_HEAD(poll_dev_wake_q);
89 static int poll_dev_start;
91 #define CALLHOME_PROC_ENTRY_FN "callhome"
92 #define CALLHOME_THROTTLED_PROC_ENTRY_FN "callhome_throttled"
94 #define DIR_DEBUGFS_ENTRY "uislib"
95 static struct dentry *dir_debugfs;
97 #define PLATFORMNUMBER_DEBUGFS_ENTRY_FN "platform"
98 static struct dentry *platformnumber_debugfs_read;
100 #define CYCLES_BEFORE_WAIT_DEBUGFS_ENTRY_FN "cycles_before_wait"
101 static struct dentry *cycles_before_wait_debugfs_read;
103 #define SMART_WAKEUP_DEBUGFS_ENTRY_FN "smart_wakeup"
104 static struct dentry *smart_wakeup_debugfs_entry;
106 #define INFO_DEBUGFS_ENTRY_FN "info"
107 static struct dentry *info_debugfs_entry;
109 static unsigned long long cycles_before_wait, wait_cycles;
111 /*****************************************************/
112 /* local functions */
113 /*****************************************************/
115 static ssize_t info_debugfs_read(struct file *file, char __user *buf,
116 size_t len, loff_t *offset);
117 static const struct file_operations debugfs_info_fops = {
118 .read = info_debugfs_read,
122 init_msg_header(struct controlvm_message *msg, u32 id, uint rsp, uint svr)
124 memset(msg, 0, sizeof(struct controlvm_message));
126 msg->hdr.flags.response_expected = rsp;
127 msg->hdr.flags.server = svr;
130 static __iomem void *init_vbus_channel(u64 ch_addr, u32 ch_bytes)
132 void __iomem *ch = uislib_ioremap_cache(ch_addr, ch_bytes);
137 if (!SPAR_VBUS_CHANNEL_OK_CLIENT(ch)) {
145 create_bus(struct controlvm_message *msg, char *buf)
147 u32 bus_no, dev_count;
148 struct bus_info *tmp, *bus;
151 if (max_bus_count == bus_list_count) {
152 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, max_bus_count,
153 POSTCODE_SEVERITY_ERR);
154 return CONTROLVM_RESP_ERROR_MAX_BUSES;
157 bus_no = msg->cmd.create_bus.bus_no;
158 dev_count = msg->cmd.create_bus.dev_count;
160 POSTCODE_LINUX_4(BUS_CREATE_ENTRY_PC, bus_no, dev_count,
161 POSTCODE_SEVERITY_INFO);
164 sizeof(struct bus_info) +
165 (dev_count * sizeof(struct device_info *));
166 bus = kzalloc(size, GFP_ATOMIC);
168 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
169 POSTCODE_SEVERITY_ERR);
170 return CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
173 /* Currently by default, the bus Number is the GuestHandle.
174 * Configure Bus message can override this.
176 if (msg->hdr.flags.test_message) {
177 /* This implies we're the IOVM so set guest handle to 0... */
178 bus->guest_handle = 0;
179 bus->bus_no = bus_no;
182 bus->bus_no = bus_no;
183 bus->guest_handle = bus_no;
185 sprintf(bus->name, "%d", (int)bus->bus_no);
186 bus->device_count = dev_count;
188 (struct device_info **)((char *)bus + sizeof(struct bus_info));
189 bus->bus_inst_uuid = msg->cmd.create_bus.bus_inst_uuid;
190 bus->bus_channel_bytes = 0;
191 bus->bus_channel = NULL;
193 /* add bus to our bus list - but check for duplicates first */
194 read_lock(&bus_list_lock);
195 for (tmp = bus_list; tmp; tmp = tmp->next) {
196 if (tmp->bus_no == bus->bus_no)
199 read_unlock(&bus_list_lock);
201 /* found a bus already in the list with same bus_no -
204 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->bus_no,
205 POSTCODE_SEVERITY_ERR);
207 return CONTROLVM_RESP_ERROR_ALREADY_DONE;
209 if ((msg->cmd.create_bus.channel_addr != 0) &&
210 (msg->cmd.create_bus.channel_bytes != 0)) {
211 bus->bus_channel_bytes = msg->cmd.create_bus.channel_bytes;
213 init_vbus_channel(msg->cmd.create_bus.channel_addr,
214 msg->cmd.create_bus.channel_bytes);
216 /* the msg is bound for virtpci; send guest_msgs struct to callback */
217 if (!msg->hdr.flags.server) {
218 struct guest_msgs cmd;
220 cmd.msgtype = GUEST_ADD_VBUS;
221 cmd.add_vbus.bus_no = bus_no;
222 cmd.add_vbus.chanptr = bus->bus_channel;
223 cmd.add_vbus.dev_count = dev_count;
224 cmd.add_vbus.bus_uuid = msg->cmd.create_bus.bus_data_type_uuid;
225 cmd.add_vbus.instance_uuid = msg->cmd.create_bus.bus_inst_uuid;
226 if (!virt_control_chan_func) {
227 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->bus_no,
228 POSTCODE_SEVERITY_ERR);
230 return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
232 if (!virt_control_chan_func(&cmd)) {
233 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->bus_no,
234 POSTCODE_SEVERITY_ERR);
237 CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
241 /* add bus at the head of our list */
242 write_lock(&bus_list_lock);
246 bus->next = bus_list;
250 write_unlock(&bus_list_lock);
252 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus->bus_no,
253 POSTCODE_SEVERITY_INFO);
254 return CONTROLVM_RESP_SUCCESS;
258 destroy_bus(struct controlvm_message *msg, char *buf)
261 struct bus_info *bus, *prev = NULL;
262 struct guest_msgs cmd;
265 bus_no = msg->cmd.destroy_bus.bus_no;
267 read_lock(&bus_list_lock);
271 if (bus->bus_no == bus_no)
278 read_unlock(&bus_list_lock);
279 return CONTROLVM_RESP_ERROR_ALREADY_DONE;
282 /* verify that this bus has no devices. */
283 for (i = 0; i < bus->device_count; i++) {
284 if (bus->device[i]) {
285 read_unlock(&bus_list_lock);
286 return CONTROLVM_RESP_ERROR_BUS_DEVICE_ATTACHED;
289 read_unlock(&bus_list_lock);
291 if (msg->hdr.flags.server)
294 /* client messages require us to call the virtpci callback associated
296 cmd.msgtype = GUEST_DEL_VBUS;
297 cmd.del_vbus.bus_no = bus_no;
298 if (!virt_control_chan_func)
299 return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
301 if (!virt_control_chan_func(&cmd))
302 return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
304 /* finally, remove the bus from the list */
306 write_lock(&bus_list_lock);
307 if (prev) /* not at head */
308 prev->next = bus->next;
310 bus_list = bus->next;
312 write_unlock(&bus_list_lock);
314 if (bus->bus_channel) {
315 uislib_iounmap(bus->bus_channel);
316 bus->bus_channel = NULL;
320 return CONTROLVM_RESP_SUCCESS;
323 static int create_device(struct controlvm_message *msg, char *buf)
325 struct device_info *dev;
326 struct bus_info *bus;
327 struct guest_msgs cmd;
329 int result = CONTROLVM_RESP_SUCCESS;
330 u64 min_size = MIN_IO_CHANNEL_SIZE;
331 struct req_handler_info *req_handler;
333 bus_no = msg->cmd.create_device.bus_no;
334 dev_no = msg->cmd.create_device.dev_no;
336 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
337 POSTCODE_SEVERITY_INFO);
339 dev = kzalloc(sizeof(*dev), GFP_ATOMIC);
341 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
342 POSTCODE_SEVERITY_ERR);
343 return CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
346 dev->channel_uuid = msg->cmd.create_device.data_type_uuid;
347 dev->intr = msg->cmd.create_device.intr;
348 dev->channel_addr = msg->cmd.create_device.channel_addr;
349 dev->bus_no = bus_no;
350 dev->dev_no = dev_no;
351 sema_init(&dev->interrupt_callback_lock, 1); /* unlocked */
352 sprintf(dev->devid, "vbus%u:dev%u", (unsigned)bus_no, (unsigned)dev_no);
353 /* map the channel memory for the device. */
354 if (msg->hdr.flags.test_message) {
355 dev->chanptr = (void __iomem *)__va(dev->channel_addr);
357 req_handler = req_handler_find(dev->channel_uuid);
359 /* generic service handler registered for this
362 min_size = req_handler->min_channel_bytes;
363 if (min_size > msg->cmd.create_device.channel_bytes) {
364 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no,
365 bus_no, POSTCODE_SEVERITY_ERR);
366 result = CONTROLVM_RESP_ERROR_CHANNEL_SIZE_TOO_SMALL;
370 uislib_ioremap_cache(dev->channel_addr,
371 msg->cmd.create_device.channel_bytes);
373 result = CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
374 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no,
375 bus_no, POSTCODE_SEVERITY_ERR);
379 dev->instance_uuid = msg->cmd.create_device.dev_inst_uuid;
380 dev->channel_bytes = msg->cmd.create_device.channel_bytes;
382 read_lock(&bus_list_lock);
383 for (bus = bus_list; bus; bus = bus->next) {
384 if (bus->bus_no != bus_no)
386 /* make sure the device number is valid */
387 if (dev_no >= bus->device_count) {
388 result = CONTROLVM_RESP_ERROR_MAX_DEVICES;
389 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no,
390 bus_no, POSTCODE_SEVERITY_ERR);
391 read_unlock(&bus_list_lock);
394 /* make sure this device is not already set */
395 if (bus->device[dev_no]) {
396 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC,
398 POSTCODE_SEVERITY_ERR);
399 result = CONTROLVM_RESP_ERROR_ALREADY_DONE;
400 read_unlock(&bus_list_lock);
403 read_unlock(&bus_list_lock);
404 /* the msg is bound for virtpci; send
405 * guest_msgs struct to callback
407 if (msg->hdr.flags.server) {
408 bus->device[dev_no] = dev;
409 POSTCODE_LINUX_4(DEVICE_CREATE_SUCCESS_PC, dev_no,
410 bus_no, POSTCODE_SEVERITY_INFO);
411 return CONTROLVM_RESP_SUCCESS;
413 if (uuid_le_cmp(dev->channel_uuid,
414 spar_vhba_channel_protocol_uuid) == 0) {
415 wait_for_valid_guid(&((struct channel_header __iomem *)
416 (dev->chanptr))->chtype);
417 if (!SPAR_VHBA_CHANNEL_OK_CLIENT(dev->chanptr)) {
418 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC,
420 POSTCODE_SEVERITY_ERR);
421 result = CONTROLVM_RESP_ERROR_CHANNEL_INVALID;
424 cmd.msgtype = GUEST_ADD_VHBA;
425 cmd.add_vhba.chanptr = dev->chanptr;
426 cmd.add_vhba.bus_no = bus_no;
427 cmd.add_vhba.device_no = dev_no;
428 cmd.add_vhba.instance_uuid = dev->instance_uuid;
429 cmd.add_vhba.intr = dev->intr;
430 } else if (uuid_le_cmp(dev->channel_uuid,
431 spar_vnic_channel_protocol_uuid) == 0) {
432 wait_for_valid_guid(&((struct channel_header __iomem *)
433 (dev->chanptr))->chtype);
434 if (!SPAR_VNIC_CHANNEL_OK_CLIENT(dev->chanptr)) {
435 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC,
437 POSTCODE_SEVERITY_ERR);
438 result = CONTROLVM_RESP_ERROR_CHANNEL_INVALID;
441 cmd.msgtype = GUEST_ADD_VNIC;
442 cmd.add_vnic.chanptr = dev->chanptr;
443 cmd.add_vnic.bus_no = bus_no;
444 cmd.add_vnic.device_no = dev_no;
445 cmd.add_vnic.instance_uuid = dev->instance_uuid;
446 cmd.add_vhba.intr = dev->intr;
448 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no,
449 bus_no, POSTCODE_SEVERITY_ERR);
450 result = CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN;
454 if (!virt_control_chan_func) {
455 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no,
456 bus_no, POSTCODE_SEVERITY_ERR);
457 result = CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
461 if (!virt_control_chan_func(&cmd)) {
462 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no,
463 bus_no, POSTCODE_SEVERITY_ERR);
465 CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
469 bus->device[dev_no] = dev;
470 POSTCODE_LINUX_4(DEVICE_CREATE_SUCCESS_PC, dev_no,
471 bus_no, POSTCODE_SEVERITY_INFO);
472 return CONTROLVM_RESP_SUCCESS;
474 read_unlock(&bus_list_lock);
476 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
477 POSTCODE_SEVERITY_ERR);
478 result = CONTROLVM_RESP_ERROR_BUS_INVALID;
481 if (!msg->hdr.flags.test_message) {
482 uislib_iounmap(dev->chanptr);
490 static int pause_device(struct controlvm_message *msg)
493 struct bus_info *bus;
494 struct device_info *dev;
495 struct guest_msgs cmd;
496 int retval = CONTROLVM_RESP_SUCCESS;
498 bus_no = msg->cmd.device_change_state.bus_no;
499 dev_no = msg->cmd.device_change_state.dev_no;
501 read_lock(&bus_list_lock);
502 for (bus = bus_list; bus; bus = bus->next) {
503 if (bus->bus_no == bus_no) {
504 /* make sure the device number is valid */
505 if (dev_no >= bus->device_count) {
506 retval = CONTROLVM_RESP_ERROR_DEVICE_INVALID;
508 /* make sure this device exists */
509 dev = bus->device[dev_no];
512 CONTROLVM_RESP_ERROR_ALREADY_DONE;
519 retval = CONTROLVM_RESP_ERROR_BUS_INVALID;
521 read_unlock(&bus_list_lock);
522 if (retval == CONTROLVM_RESP_SUCCESS) {
523 /* the msg is bound for virtpci; send
524 * guest_msgs struct to callback
526 if (uuid_le_cmp(dev->channel_uuid,
527 spar_vhba_channel_protocol_uuid) == 0) {
528 cmd.msgtype = GUEST_PAUSE_VHBA;
529 cmd.pause_vhba.chanptr = dev->chanptr;
530 } else if (uuid_le_cmp(dev->channel_uuid,
531 spar_vnic_channel_protocol_uuid) == 0) {
532 cmd.msgtype = GUEST_PAUSE_VNIC;
533 cmd.pause_vnic.chanptr = dev->chanptr;
535 return CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN;
537 if (!virt_control_chan_func)
538 return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
539 if (!virt_control_chan_func(&cmd)) {
541 CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
547 static int resume_device(struct controlvm_message *msg)
550 struct bus_info *bus;
551 struct device_info *dev;
552 struct guest_msgs cmd;
553 int retval = CONTROLVM_RESP_SUCCESS;
555 bus_no = msg->cmd.device_change_state.bus_no;
556 dev_no = msg->cmd.device_change_state.dev_no;
558 read_lock(&bus_list_lock);
559 for (bus = bus_list; bus; bus = bus->next) {
560 if (bus->bus_no == bus_no) {
561 /* make sure the device number is valid */
562 if (dev_no >= bus->device_count) {
563 retval = CONTROLVM_RESP_ERROR_DEVICE_INVALID;
565 /* make sure this device exists */
566 dev = bus->device[dev_no];
569 CONTROLVM_RESP_ERROR_ALREADY_DONE;
577 retval = CONTROLVM_RESP_ERROR_BUS_INVALID;
579 read_unlock(&bus_list_lock);
580 /* the msg is bound for virtpci; send
581 * guest_msgs struct to callback
583 if (retval == CONTROLVM_RESP_SUCCESS) {
584 if (uuid_le_cmp(dev->channel_uuid,
585 spar_vhba_channel_protocol_uuid) == 0) {
586 cmd.msgtype = GUEST_RESUME_VHBA;
587 cmd.resume_vhba.chanptr = dev->chanptr;
588 } else if (uuid_le_cmp(dev->channel_uuid,
589 spar_vnic_channel_protocol_uuid) == 0) {
590 cmd.msgtype = GUEST_RESUME_VNIC;
591 cmd.resume_vnic.chanptr = dev->chanptr;
593 return CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN;
595 if (!virt_control_chan_func)
596 return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
597 if (!virt_control_chan_func(&cmd)) {
599 CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
605 static int destroy_device(struct controlvm_message *msg, char *buf)
608 struct bus_info *bus;
609 struct device_info *dev;
610 struct guest_msgs cmd;
611 int retval = CONTROLVM_RESP_SUCCESS;
613 bus_no = msg->cmd.destroy_device.bus_no;
614 dev_no = msg->cmd.destroy_device.bus_no;
616 read_lock(&bus_list_lock);
617 for (bus = bus_list; bus; bus = bus->next) {
618 if (bus->bus_no == bus_no) {
619 /* make sure the device number is valid */
620 if (dev_no >= bus->device_count) {
621 retval = CONTROLVM_RESP_ERROR_DEVICE_INVALID;
623 /* make sure this device exists */
624 dev = bus->device[dev_no];
627 CONTROLVM_RESP_ERROR_ALREADY_DONE;
635 retval = CONTROLVM_RESP_ERROR_BUS_INVALID;
636 read_unlock(&bus_list_lock);
637 if (retval == CONTROLVM_RESP_SUCCESS) {
638 /* the msg is bound for virtpci; send
639 * guest_msgs struct to callback
641 if (uuid_le_cmp(dev->channel_uuid,
642 spar_vhba_channel_protocol_uuid) == 0) {
643 cmd.msgtype = GUEST_DEL_VHBA;
644 cmd.del_vhba.chanptr = dev->chanptr;
645 } else if (uuid_le_cmp(dev->channel_uuid,
646 spar_vnic_channel_protocol_uuid) == 0) {
647 cmd.msgtype = GUEST_DEL_VNIC;
648 cmd.del_vnic.chanptr = dev->chanptr;
651 CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN;
653 if (!virt_control_chan_func) {
655 CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
657 if (!virt_control_chan_func(&cmd)) {
659 CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
661 /* you must disable channel interrupts BEFORE you unmap the channel,
662 * because if you unmap first, there may still be some activity going
663 * on which accesses the channel and you will get a "unable to handle
664 * kernel paging request"
667 uislib_disable_channel_interrupts(bus_no, dev_no);
668 /* unmap the channel memory for the device. */
669 if (!msg->hdr.flags.test_message)
670 uislib_iounmap(dev->chanptr);
672 bus->device[dev_no] = NULL;
678 init_chipset(struct controlvm_message *msg, char *buf)
680 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
682 max_bus_count = msg->cmd.init_chipset.bus_count;
683 platform_no = msg->cmd.init_chipset.platform_number;
686 /* We need to make sure we have our functions registered
687 * before processing messages. If we are a test vehicle the
688 * test_message for init_chipset will be set. We can ignore the
689 * waits for the callbacks, since this will be manually entered
690 * from a user. If no test_message is set, we will wait for the
693 if (!msg->hdr.flags.test_message)
694 WAIT_ON_CALLBACK(virt_control_chan_func);
697 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
699 return CONTROLVM_RESP_SUCCESS;
702 static int delete_bus_glue(u32 bus_no)
704 struct controlvm_message msg;
706 init_msg_header(&msg, CONTROLVM_BUS_DESTROY, 0, 0);
707 msg.cmd.destroy_bus.bus_no = bus_no;
708 if (destroy_bus(&msg, NULL) != CONTROLVM_RESP_SUCCESS)
713 static int delete_device_glue(u32 bus_no, u32 dev_no)
715 struct controlvm_message msg;
717 init_msg_header(&msg, CONTROLVM_DEVICE_DESTROY, 0, 0);
718 msg.cmd.destroy_device.bus_no = bus_no;
719 msg.cmd.destroy_device.dev_no = dev_no;
720 if (destroy_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS)
726 uislib_client_inject_add_bus(u32 bus_no, uuid_le inst_uuid,
727 u64 channel_addr, ulong n_channel_bytes)
729 struct controlvm_message msg;
731 /* step 0: init the chipset */
732 POSTCODE_LINUX_3(CHIPSET_INIT_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
734 if (!chipset_inited) {
735 /* step: initialize the chipset */
736 init_msg_header(&msg, CONTROLVM_CHIPSET_INIT, 0, 0);
737 /* this change is needed so that console will come up
738 * OK even when the bus 0 create comes in late. If the
739 * bus 0 create is the first create, then the add_vnic
740 * will work fine, but if the bus 0 create arrives
741 * after number 4, then the add_vnic will fail, and the
742 * ultraboot will fail.
744 msg.cmd.init_chipset.bus_count = 23;
745 msg.cmd.init_chipset.switch_count = 0;
746 if (init_chipset(&msg, NULL) != CONTROLVM_RESP_SUCCESS)
748 POSTCODE_LINUX_3(CHIPSET_INIT_EXIT_PC, bus_no,
749 POSTCODE_SEVERITY_INFO);
752 /* step 1: create a bus */
753 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no,
754 POSTCODE_SEVERITY_WARNING);
755 init_msg_header(&msg, CONTROLVM_BUS_CREATE, 0, 0);
756 msg.cmd.create_bus.bus_no = bus_no;
757 msg.cmd.create_bus.dev_count = 23; /* devNo+1; */
758 msg.cmd.create_bus.channel_addr = channel_addr;
759 msg.cmd.create_bus.channel_bytes = n_channel_bytes;
760 if (create_bus(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
761 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
762 POSTCODE_SEVERITY_ERR);
765 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
769 EXPORT_SYMBOL_GPL(uislib_client_inject_add_bus);
772 uislib_client_inject_del_bus(u32 bus_no)
774 return delete_bus_glue(bus_no);
776 EXPORT_SYMBOL_GPL(uislib_client_inject_del_bus);
779 uislib_client_inject_pause_vhba(u32 bus_no, u32 dev_no)
781 struct controlvm_message msg;
784 init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0);
785 msg.cmd.device_change_state.bus_no = bus_no;
786 msg.cmd.device_change_state.dev_no = dev_no;
787 msg.cmd.device_change_state.state = segment_state_standby;
788 rc = pause_device(&msg);
789 if (rc != CONTROLVM_RESP_SUCCESS)
793 EXPORT_SYMBOL_GPL(uislib_client_inject_pause_vhba);
796 uislib_client_inject_resume_vhba(u32 bus_no, u32 dev_no)
798 struct controlvm_message msg;
801 init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0);
802 msg.cmd.device_change_state.bus_no = bus_no;
803 msg.cmd.device_change_state.dev_no = dev_no;
804 msg.cmd.device_change_state.state = segment_state_running;
805 rc = resume_device(&msg);
806 if (rc != CONTROLVM_RESP_SUCCESS)
810 EXPORT_SYMBOL_GPL(uislib_client_inject_resume_vhba);
813 uislib_client_inject_add_vhba(u32 bus_no, u32 dev_no,
814 u64 phys_chan_addr, u32 chan_bytes,
815 int is_test_addr, uuid_le inst_uuid,
816 struct irq_info *intr)
818 struct controlvm_message msg;
820 /* chipset init'ed with bus bus has been previously created -
821 * Verify it still exists step 2: create the VHBA device on the
824 POSTCODE_LINUX_4(VHBA_CREATE_ENTRY_PC, dev_no, bus_no,
825 POSTCODE_SEVERITY_INFO);
827 init_msg_header(&msg, CONTROLVM_DEVICE_CREATE, 0, 0);
829 /* signify that the physical channel address does NOT
830 * need to be ioremap()ed
832 msg.hdr.flags.test_message = 1;
833 msg.cmd.create_device.bus_no = bus_no;
834 msg.cmd.create_device.dev_no = dev_no;
835 msg.cmd.create_device.dev_inst_uuid = inst_uuid;
837 msg.cmd.create_device.intr = *intr;
839 memset(&msg.cmd.create_device.intr, 0,
840 sizeof(struct irq_info));
841 msg.cmd.create_device.channel_addr = phys_chan_addr;
842 if (chan_bytes < MIN_IO_CHANNEL_SIZE) {
843 POSTCODE_LINUX_4(VHBA_CREATE_FAILURE_PC, chan_bytes,
844 MIN_IO_CHANNEL_SIZE, POSTCODE_SEVERITY_ERR);
847 msg.cmd.create_device.channel_bytes = chan_bytes;
848 msg.cmd.create_device.data_type_uuid = spar_vhba_channel_protocol_uuid;
849 if (create_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
850 POSTCODE_LINUX_4(VHBA_CREATE_FAILURE_PC, dev_no, bus_no,
851 POSTCODE_SEVERITY_ERR);
854 POSTCODE_LINUX_4(VHBA_CREATE_SUCCESS_PC, dev_no, bus_no,
855 POSTCODE_SEVERITY_INFO);
858 EXPORT_SYMBOL_GPL(uislib_client_inject_add_vhba);
861 uislib_client_inject_del_vhba(u32 bus_no, u32 dev_no)
863 return delete_device_glue(bus_no, dev_no);
865 EXPORT_SYMBOL_GPL(uislib_client_inject_del_vhba);
868 uislib_client_inject_add_vnic(u32 bus_no, u32 dev_no,
869 u64 phys_chan_addr, u32 chan_bytes,
870 int is_test_addr, uuid_le inst_uuid,
871 struct irq_info *intr)
873 struct controlvm_message msg;
875 /* chipset init'ed with bus bus has been previously created -
876 * Verify it still exists step 2: create the VNIC device on the
879 POSTCODE_LINUX_4(VNIC_CREATE_ENTRY_PC, dev_no, bus_no,
880 POSTCODE_SEVERITY_INFO);
882 init_msg_header(&msg, CONTROLVM_DEVICE_CREATE, 0, 0);
884 /* signify that the physical channel address does NOT
885 * need to be ioremap()ed
887 msg.hdr.flags.test_message = 1;
888 msg.cmd.create_device.bus_no = bus_no;
889 msg.cmd.create_device.dev_no = dev_no;
890 msg.cmd.create_device.dev_inst_uuid = inst_uuid;
892 msg.cmd.create_device.intr = *intr;
894 memset(&msg.cmd.create_device.intr, 0,
895 sizeof(struct irq_info));
896 msg.cmd.create_device.channel_addr = phys_chan_addr;
897 if (chan_bytes < MIN_IO_CHANNEL_SIZE) {
898 POSTCODE_LINUX_4(VNIC_CREATE_FAILURE_PC, chan_bytes,
899 MIN_IO_CHANNEL_SIZE, POSTCODE_SEVERITY_ERR);
902 msg.cmd.create_device.channel_bytes = chan_bytes;
903 msg.cmd.create_device.data_type_uuid = spar_vnic_channel_protocol_uuid;
904 if (create_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
905 POSTCODE_LINUX_4(VNIC_CREATE_FAILURE_PC, dev_no, bus_no,
906 POSTCODE_SEVERITY_ERR);
910 POSTCODE_LINUX_4(VNIC_CREATE_SUCCESS_PC, dev_no, bus_no,
911 POSTCODE_SEVERITY_INFO);
914 EXPORT_SYMBOL_GPL(uislib_client_inject_add_vnic);
917 uislib_client_inject_pause_vnic(u32 bus_no, u32 dev_no)
919 struct controlvm_message msg;
922 init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0);
923 msg.cmd.device_change_state.bus_no = bus_no;
924 msg.cmd.device_change_state.dev_no = dev_no;
925 msg.cmd.device_change_state.state = segment_state_standby;
926 rc = pause_device(&msg);
927 if (rc != CONTROLVM_RESP_SUCCESS)
931 EXPORT_SYMBOL_GPL(uislib_client_inject_pause_vnic);
934 uislib_client_inject_resume_vnic(u32 bus_no, u32 dev_no)
936 struct controlvm_message msg;
939 init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0);
940 msg.cmd.device_change_state.bus_no = bus_no;
941 msg.cmd.device_change_state.dev_no = dev_no;
942 msg.cmd.device_change_state.state = segment_state_running;
943 rc = resume_device(&msg);
944 if (rc != CONTROLVM_RESP_SUCCESS)
948 EXPORT_SYMBOL_GPL(uislib_client_inject_resume_vnic);
951 uislib_client_inject_del_vnic(u32 bus_no, u32 dev_no)
953 return delete_device_glue(bus_no, dev_no);
955 EXPORT_SYMBOL_GPL(uislib_client_inject_del_vnic);
958 uislib_cache_alloc(struct kmem_cache *cur_pool, char *fn, int ln)
960 /* __GFP_NORETRY means "ok to fail", meaning kmalloc() can
961 * return NULL. If you do NOT specify __GFP_NORETRY, Linux
962 * will go to extreme measures to get memory for you (like,
963 * invoke oom killer), which will probably cripple the system.
965 void *p = kmem_cache_alloc(cur_pool, GFP_ATOMIC | __GFP_NORETRY);
971 EXPORT_SYMBOL_GPL(uislib_cache_alloc);
974 uislib_cache_free(struct kmem_cache *cur_pool, void *p, char *fn, int ln)
978 kmem_cache_free(cur_pool, p);
980 EXPORT_SYMBOL_GPL(uislib_cache_free);
982 /*****************************************************/
983 /* proc filesystem callback functions */
984 /*****************************************************/
986 #define PLINE(...) uisutil_add_proc_line_ex(&tot, buff, \
987 buff_len, __VA_ARGS__)
990 info_debugfs_read_helper(char **buff, int *buff_len)
993 struct bus_info *bus;
995 if (PLINE("\nBuses:\n") < 0)
998 read_lock(&bus_list_lock);
999 for (bus = bus_list; bus; bus = bus->next) {
1000 if (PLINE(" bus=0x%p, busNo=%d, deviceCount=%d\n",
1001 bus, bus->bus_no, bus->device_count) < 0)
1002 goto err_done_unlock;
1004 if (PLINE(" Devices:\n") < 0)
1005 goto err_done_unlock;
1007 for (i = 0; i < bus->device_count; i++) {
1008 if (bus->device[i]) {
1009 if (PLINE(" busNo %d, device[%i]: 0x%p, chanptr=0x%p, swtch=0x%p\n",
1010 bus->bus_no, i, bus->device[i],
1011 bus->device[i]->chanptr,
1012 bus->device[i]->swtch) < 0)
1013 goto err_done_unlock;
1015 if (PLINE(" first_busy_cnt=%llu, moved_to_tail_cnt=%llu, last_on_list_cnt=%llu\n",
1016 bus->device[i]->first_busy_cnt,
1017 bus->device[i]->moved_to_tail_cnt,
1018 bus->device[i]->last_on_list_cnt) < 0)
1019 goto err_done_unlock;
1023 read_unlock(&bus_list_lock);
1025 if (PLINE("UisUtils_Registered_Services: %d\n",
1026 atomic_read(&uisutils_registered_services)) < 0)
1028 if (PLINE("cycles_before_wait %llu wait_cycles:%llu\n",
1029 cycles_before_wait, wait_cycles) < 0)
1031 if (PLINE("tot_wakeup_cnt %llu:tot_wait_cnt %llu:tot_schedule_cnt %llu\n",
1032 tot_wakeup_cnt, tot_wait_cnt, tot_schedule_cnt) < 0)
1034 if (PLINE("en_smart_wakeup %d\n", en_smart_wakeup) < 0)
1036 if (PLINE("tot_moved_to_tail_cnt %llu\n", tot_moved_to_tail_cnt) < 0)
1042 read_unlock(&bus_list_lock);
1047 static ssize_t info_debugfs_read(struct file *file, char __user *buf,
1048 size_t len, loff_t *offset)
1051 int total_bytes = 0;
1052 int remaining_bytes = PROC_READ_BUFFER_SIZE;
1056 debug_buf = vmalloc(PROC_READ_BUFFER_SIZE);
1064 if ((*offset == 0) || (!debug_buf_valid)) {
1065 /* if the read fails, then -1 will be returned */
1066 total_bytes = info_debugfs_read_helper(&temp, &remaining_bytes);
1067 debug_buf_valid = 1;
1069 total_bytes = strlen(debug_buf);
1072 return simple_read_from_buffer(buf, len, offset,
1073 debug_buf, total_bytes);
1076 static struct device_info *find_dev(u32 bus_no, u32 dev_no)
1078 struct bus_info *bus;
1079 struct device_info *dev = NULL;
1081 read_lock(&bus_list_lock);
1082 for (bus = bus_list; bus; bus = bus->next) {
1083 if (bus->bus_no == bus_no) {
1084 /* make sure the device number is valid */
1085 if (dev_no >= bus->device_count)
1087 dev = bus->device[dev_no];
1091 read_unlock(&bus_list_lock);
1095 /* This thread calls the "interrupt" function for each device that has
1096 * enabled such using uislib_enable_channel_interrupts(). The "interrupt"
1097 * function typically reads and processes the devices's channel input
1098 * queue. This thread repeatedly does this, until the thread is told to stop
1099 * (via uisthread_stop()). Sleeping rules:
1100 * - If we have called the "interrupt" function for all devices, and all of
1101 * them have reported "nothing processed" (returned 0), then we will go to
1102 * sleep for a maximum of POLLJIFFIES_NORMAL jiffies.
1103 * - If anyone calls uislib_force_channel_interrupt(), the above jiffy
1104 * sleep will be interrupted, and we will resume calling the "interrupt"
1105 * function for all devices.
1106 * - The list of devices is dynamically re-ordered in order to
1107 * attempt to preserve fairness. Whenever we spin thru the list of
1108 * devices and call the dev->interrupt() function, if we find
1109 * devices which report that there is still more work to do, the
1110 * the first such device we find is moved to the end of the device
1111 * list. This ensures that extremely busy devices don't starve out
1115 static int process_incoming(void *v)
1117 unsigned long long cur_cycles, old_cycles, idle_cycles, delta_cycles;
1118 struct list_head *new_tail = NULL;
1121 UIS_DAEMONIZE("dev_incoming");
1122 for (i = 0; i < 16; i++) {
1123 old_cycles = get_cycles();
1124 wait_event_timeout(poll_dev_wake_q,
1125 0, POLLJIFFIES_NORMAL);
1126 cur_cycles = get_cycles();
1127 if (wait_cycles == 0) {
1128 wait_cycles = (cur_cycles - old_cycles);
1130 if (wait_cycles < (cur_cycles - old_cycles))
1131 wait_cycles = (cur_cycles - old_cycles);
1134 cycles_before_wait = wait_cycles;
1138 struct list_head *lelt, *tmp;
1139 struct device_info *dev = NULL;
1141 /* poll each channel for input */
1142 down(&poll_dev_lock);
1144 list_for_each_safe(lelt, tmp, &poll_dev_chan) {
1147 dev = list_entry(lelt, struct device_info,
1148 list_polling_device_channels);
1149 down(&dev->interrupt_callback_lock);
1151 rc = dev->interrupt(dev->interrupt_context);
1154 up(&dev->interrupt_callback_lock);
1156 /* dev->interrupt returned, but there
1157 * is still more work to do.
1158 * Reschedule work to occur as soon as
1162 dev->first_busy_cnt++;
1168 dev->moved_to_tail_cnt++;
1170 dev->last_on_list_cnt++;
1174 if (kthread_should_stop())
1178 tot_moved_to_tail_cnt++;
1179 list_move_tail(new_tail, &poll_dev_chan);
1182 cur_cycles = get_cycles();
1183 delta_cycles = cur_cycles - old_cycles;
1184 old_cycles = cur_cycles;
1186 /* At this point, we have scanned thru all of the
1187 * channels, and at least one of the following is true:
1188 * - there is no input waiting on any of the channels
1189 * - we have received a signal to stop this thread
1191 if (kthread_should_stop())
1193 if (en_smart_wakeup == 0xFF)
1195 /* wait for POLLJIFFIES_NORMAL jiffies, or until
1196 * someone wakes up poll_dev_wake_q,
1197 * whichever comes first only do a wait when we have
1198 * been idle for cycles_before_wait cycles.
1200 if (idle_cycles > cycles_before_wait) {
1203 wait_event_timeout(poll_dev_wake_q,
1205 POLLJIFFIES_NORMAL);
1210 idle_cycles = idle_cycles + delta_cycles;
1213 complete_and_exit(&incoming_ti.has_stopped, 0);
1217 initialize_incoming_thread(void)
1219 if (incoming_started)
1221 if (!uisthread_start(&incoming_ti,
1222 &process_incoming, NULL, "dev_incoming")) {
1225 incoming_started = TRUE;
1229 /* Add a new device/channel to the list being processed by
1230 * process_incoming().
1231 * <interrupt> - indicates the function to call periodically.
1232 * <interrupt_context> - indicates the data to pass to the <interrupt>
1236 uislib_enable_channel_interrupts(u32 bus_no, u32 dev_no,
1237 int (*interrupt)(void *),
1238 void *interrupt_context)
1240 struct device_info *dev;
1242 dev = find_dev(bus_no, dev_no);
1246 down(&poll_dev_lock);
1247 initialize_incoming_thread();
1248 dev->interrupt = interrupt;
1249 dev->interrupt_context = interrupt_context;
1250 dev->polling = TRUE;
1251 list_add_tail(&dev->list_polling_device_channels,
1255 EXPORT_SYMBOL_GPL(uislib_enable_channel_interrupts);
1257 /* Remove a device/channel from the list being processed by
1258 * process_incoming().
1261 uislib_disable_channel_interrupts(u32 bus_no, u32 dev_no)
1263 struct device_info *dev;
1265 dev = find_dev(bus_no, dev_no);
1268 down(&poll_dev_lock);
1269 list_del(&dev->list_polling_device_channels);
1270 dev->polling = FALSE;
1271 dev->interrupt = NULL;
1274 EXPORT_SYMBOL_GPL(uislib_disable_channel_interrupts);
1277 do_wakeup_polling_device_channels(struct work_struct *dummy)
1279 if (!poll_dev_start) {
1281 wake_up(&poll_dev_wake_q);
1285 static DECLARE_WORK(work_wakeup_polling_device_channels,
1286 do_wakeup_polling_device_channels);
1288 /* Call this function when you want to send a hint to process_incoming() that
1289 * your device might have more requests.
1292 uislib_force_channel_interrupt(u32 bus_no, u32 dev_no)
1294 if (en_smart_wakeup == 0)
1298 /* The point of using schedule_work() instead of just doing
1299 * the work inline is to force a slight delay before waking up
1300 * the process_incoming() thread.
1303 schedule_work(&work_wakeup_polling_device_channels);
1305 EXPORT_SYMBOL_GPL(uislib_force_channel_interrupt);
1307 /*****************************************************/
1308 /* Module Init & Exit functions */
1309 /*****************************************************/
1312 uislib_mod_init(void)
1314 if (!unisys_spar_platform)
1317 /* initialize global pointers to NULL */
1321 rwlock_init(&bus_list_lock);
1322 virt_control_chan_func = NULL;
1324 /* Issue VMCALL_GET_CONTROLVM_ADDR to get CtrlChanPhysAddr and
1325 * then map this physical address to a virtual address. */
1326 POSTCODE_LINUX_2(DRIVER_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1328 dir_debugfs = debugfs_create_dir(DIR_DEBUGFS_ENTRY, NULL);
1330 info_debugfs_entry = debugfs_create_file(
1331 INFO_DEBUGFS_ENTRY_FN, 0444, dir_debugfs, NULL,
1332 &debugfs_info_fops);
1334 platformnumber_debugfs_read = debugfs_create_u32(
1335 PLATFORMNUMBER_DEBUGFS_ENTRY_FN, 0444, dir_debugfs,
1338 cycles_before_wait_debugfs_read = debugfs_create_u64(
1339 CYCLES_BEFORE_WAIT_DEBUGFS_ENTRY_FN, 0666, dir_debugfs,
1340 &cycles_before_wait);
1342 smart_wakeup_debugfs_entry = debugfs_create_bool(
1343 SMART_WAKEUP_DEBUGFS_ENTRY_FN, 0666, dir_debugfs,
1347 POSTCODE_LINUX_3(DRIVER_EXIT_PC, 0, POSTCODE_SEVERITY_INFO);
1352 uislib_mod_exit(void)
1359 debugfs_remove(info_debugfs_entry);
1360 debugfs_remove(smart_wakeup_debugfs_entry);
1361 debugfs_remove(cycles_before_wait_debugfs_read);
1362 debugfs_remove(platformnumber_debugfs_read);
1363 debugfs_remove(dir_debugfs);
1366 module_init(uislib_mod_init);
1367 module_exit(uislib_mod_exit);
1369 MODULE_LICENSE("GPL");
1370 MODULE_AUTHOR("Usha Srinivasan");
1371 MODULE_ALIAS("uislib");
1372 /* this is extracted during depmod and kept in modules.dep */