/* visorchipset_main.c
*
- * Copyright (C) 2010 - 2013 UNISYS CORPORATION
+ * Copyright (C) 2010 - 2015 UNISYS CORPORATION
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* details.
*/
-#include "globals.h"
-#include "visorchipset.h"
-#include "procobjecttree.h"
-#include "visorchannel.h"
-#include "periodic_work.h"
-#include "file.h"
-#include "parser.h"
-#include "uisutils.h"
-#include "controlvmcompletionstatus.h"
-#include "guestlinuxdebug.h"
-
+#include <linux/acpi.h>
+#include <linux/cdev.h>
+#include <linux/ctype.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
#include <linux/nls.h>
#include <linux/netdevice.h>
#include <linux/platform_device.h>
#include <linux/uuid.h>
+#include <linux/crash_dump.h>
+
+#include "channel_guid.h"
+#include "controlvmchannel.h"
+#include "controlvmcompletionstatus.h"
+#include "guestlinuxdebug.h"
+#include "periodic_work.h"
+#include "version.h"
+#include "visorbus.h"
+#include "visorbus_private.h"
+#include "vmcallinterface.h"
#define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
-#define TEST_VNIC_PHYSITF "eth0" /* physical network itf for
- * vnic loopback test */
-#define TEST_VNIC_SWITCHNO 1
-#define TEST_VNIC_BUSNO 9
#define MAX_NAME_SIZE 128
#define MAX_IP_SIZE 50
#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
#define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
+#define MAX_CONTROLVM_PAYLOAD_BYTES (1024*128)
+
+#define VISORCHIPSET_MMAP_CONTROLCHANOFFSET 0x00000000
+
+
+#define UNISYS_SPAR_LEAF_ID 0x40000000
+
+/* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
+#define UNISYS_SPAR_ID_EBX 0x73696e55
+#define UNISYS_SPAR_ID_ECX 0x70537379
+#define UNISYS_SPAR_ID_EDX 0x34367261
+
+/*
+ * Module parameters
+ */
+static int visorchipset_major;
+static int visorchipset_visorbusregwait = 1; /* default is on */
+static int visorchipset_holdchipsetready;
+static unsigned long controlvm_payload_bytes_buffered;
+
+static int
+visorchipset_open(struct inode *inode, struct file *file)
+{
+ unsigned minor_number = iminor(inode);
+
+ if (minor_number)
+ return -ENODEV;
+ file->private_data = NULL;
+ return 0;
+}
+
+static int
+visorchipset_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
/* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
* we switch to slow polling mode. As soon as we get a controlvm
* message, we switch back to fast polling mode.
*/
#define MIN_IDLE_SECONDS 10
-static ulong poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
-static ulong most_recent_message_jiffies; /* when we got our last
+static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
+static unsigned long most_recent_message_jiffies; /* when we got our last
* controlvm message */
-static inline char *
-NONULLSTR(char *s)
-{
- if (s)
- return s;
- return "";
-}
-
-static int serverregistered;
-static int clientregistered;
+static int visorbusregistered;
#define MAX_CHIPSET_EVENTS 2
static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
+struct parser_context {
+ unsigned long allocbytes;
+ unsigned long param_bytes;
+ u8 *curr;
+ unsigned long bytes_remaining;
+ bool byte_stream;
+ char data[0];
+};
+
static struct delayed_work periodic_controlvm_work;
static struct workqueue_struct *periodic_controlvm_workqueue;
static DEFINE_SEMAPHORE(notifier_lock);
-static struct controlvm_message_header g_diag_msg_hdr;
+static struct cdev file_cdev;
+static struct visorchannel **file_controlvm_channel;
static struct controlvm_message_header g_chipset_msg_hdr;
-static struct controlvm_message_header g_del_dump_msg_hdr;
-static const uuid_le spar_diag_pool_channel_protocol_uuid =
- SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
-/* 0xffffff is an invalid Bus/Device number */
-static ulong g_diagpool_bus_no = 0xffffff;
-static ulong g_diagpool_dev_no = 0xffffff;
static struct controlvm_message_packet g_devicechangestate_packet;
-/* Only VNIC and VHBA channels are sent to visorclientbus (aka
- * "visorhackbus")
- */
-#define FOR_VISORHACKBUS(channel_type_guid) \
- (((uuid_le_cmp(channel_type_guid,\
- spar_vnic_channel_protocol_uuid) == 0) ||\
- (uuid_le_cmp(channel_type_guid,\
- spar_vhba_channel_protocol_uuid) == 0)))
-#define FOR_VISORBUS(channel_type_guid) (!(FOR_VISORHACKBUS(channel_type_guid)))
-
-#define is_diagpool_channel(channel_type_guid) \
- (uuid_le_cmp(channel_type_guid,\
- spar_diag_pool_channel_protocol_uuid) == 0)
-
static LIST_HEAD(bus_info_list);
static LIST_HEAD(dev_info_list);
static struct visorchannel *controlvm_channel;
/* Manages the request payload in the controlvm channel */
-static struct controlvm_payload_info {
- u8 __iomem *ptr; /* pointer to base address of payload pool */
+struct visor_controlvm_payload_info {
+ u8 *ptr; /* pointer to base address of payload pool */
u64 offset; /* offset from beginning of controlvm
* channel to beginning of payload * pool */
u32 bytes; /* number of bytes in payload pool */
-} controlvm_payload_info;
+};
-/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
- * CONTROLVM_DUMP_GETTEXTDUMP / CONTROLVM_DUMP_COMPLETE conversation.
- */
-static struct livedump_info {
- struct controlvm_message_header dumpcapture_header;
- struct controlvm_message_header gettextdump_header;
- struct controlvm_message_header dumpcomplete_header;
- BOOL gettextdump_outstanding;
- u32 crc32;
- ulong length;
- atomic_t buffers_in_use;
- ulong destination;
-} livedump_info;
+static struct visor_controlvm_payload_info controlvm_payload_info;
/* The following globals are used to handle the scenario where we are unable to
* offload the payload from a controlvm message due to memory requirements. In
* process it again the next time controlvm_periodic_work() runs.
*/
static struct controlvm_message controlvm_pending_msg;
-static BOOL controlvm_pending_msg_valid = FALSE;
-
-/* Pool of struct putfile_buffer_entry, for keeping track of pending (incoming)
- * TRANSMIT_FILE PutFile payloads.
- */
-static struct kmem_cache *putfile_buffer_list_pool;
-static const char putfile_buffer_list_pool_name[] =
- "controlvm_putfile_buffer_list_pool";
+static bool controlvm_pending_msg_valid;
/* This identifies a data buffer that has been received via a controlvm messages
* in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
int completion_status;
};
-static atomic_t visorchipset_cache_buffers_in_use = ATOMIC_INIT(0);
-
struct parahotplug_request {
struct list_head list;
int id;
/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
* CONTROLVM_REPORTEVENT.
*/
-static struct visorchipset_busdev_notifiers busdev_server_notifiers;
-static struct visorchipset_busdev_notifiers busdev_client_notifiers;
+static struct visorchipset_busdev_notifiers busdev_notifiers;
-static void bus_create_response(ulong bus_no, int response);
-static void bus_destroy_response(ulong bus_no, int response);
-static void device_create_response(ulong bus_no, ulong dev_no, int response);
-static void device_destroy_response(ulong bus_no, ulong dev_no, int response);
-static void device_resume_response(ulong bus_no, ulong dev_no, int response);
+static void bus_create_response(struct visor_device *p, int response);
+static void bus_destroy_response(struct visor_device *p, int response);
+static void device_create_response(struct visor_device *p, int response);
+static void device_destroy_response(struct visor_device *p, int response);
+static void device_resume_response(struct visor_device *p, int response);
+
+static void visorchipset_device_pause_response(struct visor_device *p,
+ int response);
static struct visorchipset_busdev_responders busdev_responders = {
.bus_create = bus_create_response,
NULL
};
+static void visorchipset_dev_release(struct device *dev)
+{
+}
+
/* /sys/devices/platform/visorchipset */
static struct platform_device visorchipset_platform_device = {
.name = "visorchipset",
.id = -1,
.dev.groups = visorchipset_dev_groups,
+ .dev.release = visorchipset_dev_release,
};
/* Function prototypes */
struct controlvm_message_header *msg_hdr, int response,
struct spar_segment_state state);
+
+static void parser_done(struct parser_context *ctx);
+
+static struct parser_context *
+parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
+{
+ int allocbytes = sizeof(struct parser_context) + bytes;
+ struct parser_context *rc = NULL;
+ struct parser_context *ctx = NULL;
+
+ if (retry)
+ *retry = false;
+
+ /*
+ * alloc an 0 extra byte to ensure payload is
+ * '\0'-terminated
+ */
+ allocbytes++;
+ if ((controlvm_payload_bytes_buffered + bytes)
+ > MAX_CONTROLVM_PAYLOAD_BYTES) {
+ if (retry)
+ *retry = true;
+ rc = NULL;
+ goto cleanup;
+ }
+ ctx = kzalloc(allocbytes, GFP_KERNEL|__GFP_NORETRY);
+ if (!ctx) {
+ if (retry)
+ *retry = true;
+ rc = NULL;
+ goto cleanup;
+ }
+
+ ctx->allocbytes = allocbytes;
+ ctx->param_bytes = bytes;
+ ctx->curr = NULL;
+ ctx->bytes_remaining = 0;
+ ctx->byte_stream = false;
+ if (local) {
+ void *p;
+
+ if (addr > virt_to_phys(high_memory - 1)) {
+ rc = NULL;
+ goto cleanup;
+ }
+ p = __va((unsigned long) (addr));
+ memcpy(ctx->data, p, bytes);
+ } else {
+ void *mapping;
+
+ if (!request_mem_region(addr, bytes, "visorchipset")) {
+ rc = NULL;
+ goto cleanup;
+ }
+
+ mapping = memremap(addr, bytes, MEMREMAP_WB);
+ if (!mapping) {
+ release_mem_region(addr, bytes);
+ rc = NULL;
+ goto cleanup;
+ }
+ memcpy(ctx->data, mapping, bytes);
+ release_mem_region(addr, bytes);
+ memunmap(mapping);
+ }
+
+ ctx->byte_stream = true;
+ rc = ctx;
+cleanup:
+ if (rc) {
+ controlvm_payload_bytes_buffered += ctx->param_bytes;
+ } else {
+ if (ctx) {
+ parser_done(ctx);
+ ctx = NULL;
+ }
+ }
+ return rc;
+}
+
+static uuid_le
+parser_id_get(struct parser_context *ctx)
+{
+ struct spar_controlvm_parameters_header *phdr = NULL;
+
+ if (ctx == NULL)
+ return NULL_UUID_LE;
+ phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
+ return phdr->id;
+}
+
+/** Describes the state from the perspective of which controlvm messages have
+ * been received for a bus or device.
+ */
+
+enum PARSER_WHICH_STRING {
+ PARSERSTRING_INITIATOR,
+ PARSERSTRING_TARGET,
+ PARSERSTRING_CONNECTION,
+ PARSERSTRING_NAME, /* TODO: only PARSERSTRING_NAME is used ? */
+};
+
+static void
+parser_param_start(struct parser_context *ctx,
+ enum PARSER_WHICH_STRING which_string)
+{
+ struct spar_controlvm_parameters_header *phdr = NULL;
+
+ if (ctx == NULL)
+ goto Away;
+ phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
+ switch (which_string) {
+ case PARSERSTRING_INITIATOR:
+ ctx->curr = ctx->data + phdr->initiator_offset;
+ ctx->bytes_remaining = phdr->initiator_length;
+ break;
+ case PARSERSTRING_TARGET:
+ ctx->curr = ctx->data + phdr->target_offset;
+ ctx->bytes_remaining = phdr->target_length;
+ break;
+ case PARSERSTRING_CONNECTION:
+ ctx->curr = ctx->data + phdr->connection_offset;
+ ctx->bytes_remaining = phdr->connection_length;
+ break;
+ case PARSERSTRING_NAME:
+ ctx->curr = ctx->data + phdr->name_offset;
+ ctx->bytes_remaining = phdr->name_length;
+ break;
+ default:
+ break;
+ }
+
+Away:
+ return;
+}
+
+static void parser_done(struct parser_context *ctx)
+{
+ if (!ctx)
+ return;
+ controlvm_payload_bytes_buffered -= ctx->param_bytes;
+ kfree(ctx);
+}
+
+static void *
+parser_string_get(struct parser_context *ctx)
+{
+ u8 *pscan;
+ unsigned long nscan;
+ int value_length = -1;
+ void *value = NULL;
+ int i;
+
+ if (!ctx)
+ return NULL;
+ pscan = ctx->curr;
+ nscan = ctx->bytes_remaining;
+ if (nscan == 0)
+ return NULL;
+ if (!pscan)
+ return NULL;
+ for (i = 0, value_length = -1; i < nscan; i++)
+ if (pscan[i] == '\0') {
+ value_length = i;
+ break;
+ }
+ if (value_length < 0) /* '\0' was not included in the length */
+ value_length = nscan;
+ value = kmalloc(value_length + 1, GFP_KERNEL|__GFP_NORETRY);
+ if (value == NULL)
+ return NULL;
+ if (value_length > 0)
+ memcpy(value, pscan, value_length);
+ ((u8 *) (value))[value_length] = '\0';
+ return value;
+}
+
+
static ssize_t toolaction_show(struct device *dev,
struct device_attribute *attr,
char *buf)
u8 tool_action;
int ret;
- if (kstrtou8(buf, 10, &tool_action) != 0)
+ if (kstrtou8(buf, 10, &tool_action))
return -EINVAL;
ret = visorchannel_write(controlvm_channel,
int val, ret;
struct efi_spar_indication efi_spar_indication;
- if (kstrtoint(buf, 10, &val) != 0)
+ if (kstrtoint(buf, 10, &val))
return -EINVAL;
efi_spar_indication.boot_to_tool = val;
u32 error;
int ret;
- if (kstrtou32(buf, 10, &error) != 0)
+ if (kstrtou32(buf, 10, &error))
return -EINVAL;
ret = visorchannel_write(controlvm_channel,
u32 text_id;
int ret;
- if (kstrtou32(buf, 10, &text_id) != 0)
+ if (kstrtou32(buf, 10, &text_id))
return -EINVAL;
ret = visorchannel_write(controlvm_channel,
u16 remaining_steps;
int ret;
- if (kstrtou16(buf, 10, &remaining_steps) != 0)
+ if (kstrtou16(buf, 10, &remaining_steps))
return -EINVAL;
ret = visorchannel_write(controlvm_channel,
return count;
}
-static void
-bus_info_clear(void *v)
-{
- struct visorchipset_bus_info *p = (struct visorchipset_bus_info *) (v);
+struct visor_busdev {
+ u32 bus_no;
+ u32 dev_no;
+};
- kfree(p->name);
- p->name = NULL;
+static int match_visorbus_dev_by_id(struct device *dev, void *data)
+{
+ struct visor_device *vdev = to_visor_device(dev);
+ struct visor_busdev *id = data;
+ u32 bus_no = id->bus_no;
+ u32 dev_no = id->dev_no;
- kfree(p->description);
- p->description = NULL;
+ if ((vdev->chipset_bus_no == bus_no) &&
+ (vdev->chipset_dev_no == dev_no))
+ return 1;
- p->state.created = 0;
- memset(p, 0, sizeof(struct visorchipset_bus_info));
+ return 0;
}
-
-static void
-dev_info_clear(void *v)
+struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
+ struct visor_device *from)
{
- struct visorchipset_device_info *p =
- (struct visorchipset_device_info *)(v);
+ struct device *dev;
+ struct device *dev_start = NULL;
+ struct visor_device *vdev = NULL;
+ struct visor_busdev id = {
+ .bus_no = bus_no,
+ .dev_no = dev_no
+ };
- p->state.created = 0;
- memset(p, 0, sizeof(struct visorchipset_device_info));
+ if (from)
+ dev_start = &from->device;
+ dev = bus_find_device(&visorbus_type, dev_start, (void *)&id,
+ match_visorbus_dev_by_id);
+ if (dev)
+ vdev = to_visor_device(dev);
+ return vdev;
}
+EXPORT_SYMBOL(visorbus_get_device_by_id);
static u8
check_chipset_events(void)
}
void
-visorchipset_register_busdev_server(
+visorchipset_register_busdev(
struct visorchipset_busdev_notifiers *notifiers,
struct visorchipset_busdev_responders *responders,
struct ultra_vbus_deviceinfo *driver_info)
{
down(¬ifier_lock);
if (!notifiers) {
- memset(&busdev_server_notifiers, 0,
- sizeof(busdev_server_notifiers));
- serverregistered = 0; /* clear flag */
+ memset(&busdev_notifiers, 0,
+ sizeof(busdev_notifiers));
+ visorbusregistered = 0; /* clear flag */
} else {
- busdev_server_notifiers = *notifiers;
- serverregistered = 1; /* set flag */
+ busdev_notifiers = *notifiers;
+ visorbusregistered = 1; /* set flag */
}
if (responders)
*responders = busdev_responders;
up(¬ifier_lock);
}
-EXPORT_SYMBOL_GPL(visorchipset_register_busdev_server);
-
-void
-visorchipset_register_busdev_client(
- struct visorchipset_busdev_notifiers *notifiers,
- struct visorchipset_busdev_responders *responders,
- struct ultra_vbus_deviceinfo *driver_info)
-{
- down(¬ifier_lock);
- if (!notifiers) {
- memset(&busdev_client_notifiers, 0,
- sizeof(busdev_client_notifiers));
- clientregistered = 0; /* clear flag */
- } else {
- busdev_client_notifiers = *notifiers;
- clientregistered = 1; /* set flag */
- }
- if (responders)
- *responders = busdev_responders;
- if (driver_info)
- bus_device_info_init(driver_info, "chipset(bolts)",
- "visorchipset", VERSION, NULL);
- up(¬ifier_lock);
-}
-EXPORT_SYMBOL_GPL(visorchipset_register_busdev_client);
-
-static void
-cleanup_controlvm_structures(void)
-{
- struct visorchipset_bus_info *bi, *tmp_bi;
- struct visorchipset_device_info *di, *tmp_di;
-
- list_for_each_entry_safe(bi, tmp_bi, &bus_info_list, entry) {
- bus_info_clear(bi);
- list_del(&bi->entry);
- kfree(bi);
- }
-
- list_for_each_entry_safe(di, tmp_di, &dev_info_list, entry) {
- dev_info_clear(di);
- list_del(&di->entry);
- kfree(di);
- }
-}
+EXPORT_SYMBOL_GPL(visorchipset_register_busdev);
static void
chipset_init(struct controlvm_message *inmsg)
features |= ULTRA_CHIPSET_FEATURE_REPLY;
cleanup:
- if (rc < 0)
- cleanup_controlvm_structures();
if (inmsg->hdr.flags.response_expected)
controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
}
struct controlvm_message outmsg;
controlvm_init_response(&outmsg, msg_hdr, response);
- /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
- * back the deviceChangeState structure in the packet. */
- if (msg_hdr->id == CONTROLVM_DEVICE_CHANGESTATE &&
- g_devicechangestate_packet.device_change_state.bus_no ==
- g_diagpool_bus_no &&
- g_devicechangestate_packet.device_change_state.dev_no ==
- g_diagpool_dev_no)
- outmsg.cmd = g_devicechangestate_packet;
if (outmsg.hdr.flags.test_message == 1)
return;
}
}
-void
-visorchipset_save_message(struct controlvm_message *msg,
- enum crash_obj_type type)
-{
- u32 crash_msg_offset;
- u16 crash_msg_count;
-
- /* get saved message count */
- if (visorchannel_read(controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- saved_crash_message_count),
- &crash_msg_count, sizeof(u16)) < 0) {
- POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
- POSTCODE_SEVERITY_ERR);
- return;
- }
-
- if (crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
- POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
- crash_msg_count,
- POSTCODE_SEVERITY_ERR);
- return;
- }
-
- /* get saved crash message offset */
- if (visorchannel_read(controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- saved_crash_message_offset),
- &crash_msg_offset, sizeof(u32)) < 0) {
- POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
- POSTCODE_SEVERITY_ERR);
- return;
- }
-
- if (type == CRASH_BUS) {
- if (visorchannel_write(controlvm_channel,
- crash_msg_offset,
- msg,
- sizeof(struct controlvm_message)) < 0) {
- POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
- POSTCODE_SEVERITY_ERR);
- return;
- }
- } else {
- if (visorchannel_write(controlvm_channel,
- crash_msg_offset +
- sizeof(struct controlvm_message), msg,
- sizeof(struct controlvm_message)) < 0) {
- POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
- POSTCODE_SEVERITY_ERR);
- return;
- }
- }
-}
-EXPORT_SYMBOL_GPL(visorchipset_save_message);
+enum crash_obj_type {
+ CRASH_DEV,
+ CRASH_BUS,
+};
static void
-bus_responder(enum controlvm_id cmd_id, ulong bus_no, int response)
+bus_responder(enum controlvm_id cmd_id,
+ struct controlvm_message_header *pending_msg_hdr,
+ int response)
{
- struct visorchipset_bus_info *p = NULL;
- BOOL need_clear = FALSE;
+ if (pending_msg_hdr == NULL)
+ return; /* no controlvm response needed */
- p = findbus(&bus_info_list, bus_no);
- if (!p)
+ if (pending_msg_hdr->id != (u32)cmd_id)
return;
- if (response < 0) {
- if ((cmd_id == CONTROLVM_BUS_CREATE) &&
- (response != (-CONTROLVM_RESP_ERROR_ALREADY_DONE)))
- /* undo the row we just created... */
- delbusdevices(&dev_info_list, bus_no);
- } else {
- if (cmd_id == CONTROLVM_BUS_CREATE)
- p->state.created = 1;
- if (cmd_id == CONTROLVM_BUS_DESTROY)
- need_clear = TRUE;
- }
-
- if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
- return; /* no controlvm response needed */
- if (p->pending_msg_hdr.id != (u32)cmd_id)
- return;
- controlvm_respond(&p->pending_msg_hdr, response);
- p->pending_msg_hdr.id = CONTROLVM_INVALID;
- if (need_clear) {
- bus_info_clear(p);
- delbusdevices(&dev_info_list, bus_no);
- }
+ controlvm_respond(pending_msg_hdr, response);
}
static void
device_changestate_responder(enum controlvm_id cmd_id,
- ulong bus_no, ulong dev_no, int response,
+ struct visor_device *p, int response,
struct spar_segment_state response_state)
{
- struct visorchipset_device_info *p = NULL;
struct controlvm_message outmsg;
+ u32 bus_no = p->chipset_bus_no;
+ u32 dev_no = p->chipset_dev_no;
- p = finddevice(&dev_info_list, bus_no, dev_no);
- if (!p)
- return;
- if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
+ if (p->pending_msg_hdr == NULL)
return; /* no controlvm response needed */
- if (p->pending_msg_hdr.id != cmd_id)
+ if (p->pending_msg_hdr->id != cmd_id)
return;
- controlvm_init_response(&outmsg, &p->pending_msg_hdr, response);
+ controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
outmsg.cmd.device_change_state.bus_no = bus_no;
outmsg.cmd.device_change_state.dev_no = dev_no;
if (!visorchannel_signalinsert(controlvm_channel,
CONTROLVM_QUEUE_REQUEST, &outmsg))
return;
-
- p->pending_msg_hdr.id = CONTROLVM_INVALID;
}
static void
-device_responder(enum controlvm_id cmd_id, ulong bus_no, ulong dev_no,
+device_responder(enum controlvm_id cmd_id,
+ struct controlvm_message_header *pending_msg_hdr,
int response)
{
- struct visorchipset_device_info *p = NULL;
- BOOL need_clear = FALSE;
-
- p = finddevice(&dev_info_list, bus_no, dev_no);
- if (!p)
- return;
- if (response >= 0) {
- if (cmd_id == CONTROLVM_DEVICE_CREATE)
- p->state.created = 1;
- if (cmd_id == CONTROLVM_DEVICE_DESTROY)
- need_clear = TRUE;
- }
-
- if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
+ if (pending_msg_hdr == NULL)
return; /* no controlvm response needed */
- if (p->pending_msg_hdr.id != (u32)cmd_id)
+ if (pending_msg_hdr->id != (u32)cmd_id)
return;
- controlvm_respond(&p->pending_msg_hdr, response);
- p->pending_msg_hdr.id = CONTROLVM_INVALID;
- if (need_clear)
- dev_info_clear(p);
+ controlvm_respond(pending_msg_hdr, response);
}
static void
-bus_epilog(u32 bus_no,
+bus_epilog(struct visor_device *bus_info,
u32 cmd, struct controlvm_message_header *msg_hdr,
- int response, BOOL need_response)
+ int response, bool need_response)
{
- BOOL notified = FALSE;
+ bool notified = false;
+ struct controlvm_message_header *pmsg_hdr = NULL;
- struct visorchipset_bus_info *bus_info = findbus(&bus_info_list,
- bus_no);
+ if (!bus_info) {
+ /* relying on a valid passed in response code */
+ /* be lazy and re-use msg_hdr for this failure, is this ok?? */
+ pmsg_hdr = msg_hdr;
+ goto away;
+ }
- if (!bus_info)
- return;
+ if (bus_info->pending_msg_hdr) {
+ /* only non-NULL if dev is still waiting on a response */
+ response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
+ pmsg_hdr = bus_info->pending_msg_hdr;
+ goto away;
+ }
if (need_response) {
- memcpy(&bus_info->pending_msg_hdr, msg_hdr,
+ pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
+ if (!pmsg_hdr) {
+ response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
+ goto away;
+ }
+
+ memcpy(pmsg_hdr, msg_hdr,
sizeof(struct controlvm_message_header));
- } else {
- bus_info->pending_msg_hdr.id = CONTROLVM_INVALID;
+ bus_info->pending_msg_hdr = pmsg_hdr;
}
down(¬ifier_lock);
if (response == CONTROLVM_RESP_SUCCESS) {
switch (cmd) {
case CONTROLVM_BUS_CREATE:
- /* We can't tell from the bus_create
- * information which of our 2 bus flavors the
- * devices on this bus will ultimately end up.
- * FORTUNATELY, it turns out it is harmless to
- * send the bus_create to both of them. We can
- * narrow things down a little bit, though,
- * because we know: - BusDev_Server can handle
- * either server or client devices
- * - BusDev_Client can handle ONLY client
- * devices */
- if (busdev_server_notifiers.bus_create) {
- (*busdev_server_notifiers.bus_create) (bus_no);
- notified = TRUE;
- }
- if ((!bus_info->flags.server) /*client */ &&
- busdev_client_notifiers.bus_create) {
- (*busdev_client_notifiers.bus_create) (bus_no);
- notified = TRUE;
+ if (busdev_notifiers.bus_create) {
+ (*busdev_notifiers.bus_create) (bus_info);
+ notified = true;
}
break;
case CONTROLVM_BUS_DESTROY:
- if (busdev_server_notifiers.bus_destroy) {
- (*busdev_server_notifiers.bus_destroy) (bus_no);
- notified = TRUE;
- }
- if ((!bus_info->flags.server) /*client */ &&
- busdev_client_notifiers.bus_destroy) {
- (*busdev_client_notifiers.bus_destroy) (bus_no);
- notified = TRUE;
+ if (busdev_notifiers.bus_destroy) {
+ (*busdev_notifiers.bus_destroy) (bus_info);
+ notified = true;
}
break;
}
}
+away:
if (notified)
/* The callback function just called above is responsible
* for calling the appropriate visorchipset_busdev_responders
*/
;
else
- bus_responder(cmd, bus_no, response);
+ /*
+ * Do not kfree(pmsg_hdr) as this is the failure path.
+ * The success path ('notified') will call the responder
+ * directly and kfree() there.
+ */
+ bus_responder(cmd, pmsg_hdr, response);
up(¬ifier_lock);
}
static void
-device_epilog(u32 bus_no, u32 dev_no, struct spar_segment_state state, u32 cmd,
+device_epilog(struct visor_device *dev_info,
+ struct spar_segment_state state, u32 cmd,
struct controlvm_message_header *msg_hdr, int response,
- BOOL need_response, BOOL for_visorbus)
+ bool need_response, bool for_visorbus)
{
- struct visorchipset_busdev_notifiers *notifiers = NULL;
- BOOL notified = FALSE;
+ struct visorchipset_busdev_notifiers *notifiers;
+ bool notified = false;
+ struct controlvm_message_header *pmsg_hdr = NULL;
- struct visorchipset_device_info *dev_info =
- finddevice(&dev_info_list, bus_no, dev_no);
- char *envp[] = {
- "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
- NULL
- };
+ notifiers = &busdev_notifiers;
- if (!dev_info)
- return;
+ if (!dev_info) {
+ /* relying on a valid passed in response code */
+ /* be lazy and re-use msg_hdr for this failure, is this ok?? */
+ pmsg_hdr = msg_hdr;
+ goto away;
+ }
+
+ if (dev_info->pending_msg_hdr) {
+ /* only non-NULL if dev is still waiting on a response */
+ response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
+ pmsg_hdr = dev_info->pending_msg_hdr;
+ goto away;
+ }
- if (for_visorbus)
- notifiers = &busdev_server_notifiers;
- else
- notifiers = &busdev_client_notifiers;
if (need_response) {
- memcpy(&dev_info->pending_msg_hdr, msg_hdr,
+ pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
+ if (!pmsg_hdr) {
+ response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
+ goto away;
+ }
+
+ memcpy(pmsg_hdr, msg_hdr,
sizeof(struct controlvm_message_header));
- } else {
- dev_info->pending_msg_hdr.id = CONTROLVM_INVALID;
+ dev_info->pending_msg_hdr = pmsg_hdr;
}
down(¬ifier_lock);
switch (cmd) {
case CONTROLVM_DEVICE_CREATE:
if (notifiers->device_create) {
- (*notifiers->device_create) (bus_no, dev_no);
- notified = TRUE;
+ (*notifiers->device_create) (dev_info);
+ notified = true;
}
break;
case CONTROLVM_DEVICE_CHANGESTATE:
state.operating ==
segment_state_running.operating) {
if (notifiers->device_resume) {
- (*notifiers->device_resume) (bus_no,
- dev_no);
- notified = TRUE;
+ (*notifiers->device_resume) (dev_info);
+ notified = true;
}
}
/* ServerNotReady / ServerLost / SegmentStateStandby */
* where server is lost
*/
if (notifiers->device_pause) {
- (*notifiers->device_pause) (bus_no,
- dev_no);
- notified = TRUE;
- }
- } else if (state.alive == segment_state_paused.alive &&
- state.operating ==
- segment_state_paused.operating) {
- /* this is lite pause where channel is
- * still valid just 'pause' of it
- */
- if (bus_no == g_diagpool_bus_no &&
- dev_no == g_diagpool_dev_no) {
- /* this will trigger the
- * diag_shutdown.sh script in
- * the visorchipset hotplug */
- kobject_uevent_env
- (&visorchipset_platform_device.dev.
- kobj, KOBJ_ONLINE, envp);
+ (*notifiers->device_pause) (dev_info);
+ notified = true;
}
}
break;
case CONTROLVM_DEVICE_DESTROY:
if (notifiers->device_destroy) {
- (*notifiers->device_destroy) (bus_no, dev_no);
- notified = TRUE;
+ (*notifiers->device_destroy) (dev_info);
+ notified = true;
}
break;
}
}
+away:
if (notified)
/* The callback function just called above is responsible
* for calling the appropriate visorchipset_busdev_responders
*/
;
else
- device_responder(cmd, bus_no, dev_no, response);
+ /*
+ * Do not kfree(pmsg_hdr) as this is the failure path.
+ * The success path ('notified') will call the responder
+ * directly and kfree() there.
+ */
+ device_responder(cmd, pmsg_hdr, response);
up(¬ifier_lock);
}
bus_create(struct controlvm_message *inmsg)
{
struct controlvm_message_packet *cmd = &inmsg->cmd;
- ulong bus_no = cmd->create_bus.bus_no;
+ u32 bus_no = cmd->create_bus.bus_no;
int rc = CONTROLVM_RESP_SUCCESS;
- struct visorchipset_bus_info *bus_info = NULL;
+ struct visor_device *bus_info;
+ struct visorchannel *visorchannel;
- bus_info = findbus(&bus_info_list, bus_no);
+ bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
if (bus_info && (bus_info->state.created == 1)) {
POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
POSTCODE_SEVERITY_ERR);
goto cleanup;
}
- INIT_LIST_HEAD(&bus_info->entry);
- bus_info->bus_no = bus_no;
- bus_info->dev_no = cmd->create_bus.dev_count;
+ INIT_LIST_HEAD(&bus_info->list_all);
+ bus_info->chipset_bus_no = bus_no;
+ bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
- if (inmsg->hdr.flags.test_message == 1)
- bus_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
- else
- bus_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
-
- bus_info->flags.server = inmsg->hdr.flags.server;
- bus_info->chan_info.channel_addr = cmd->create_bus.channel_addr;
- bus_info->chan_info.n_channel_bytes = cmd->create_bus.channel_bytes;
- bus_info->chan_info.channel_type_uuid =
- cmd->create_bus.bus_data_type_uuid;
- bus_info->chan_info.channel_inst_uuid = cmd->create_bus.bus_inst_uuid;
+ visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
+ cmd->create_bus.channel_bytes,
+ GFP_KERNEL,
+ cmd->create_bus.bus_data_type_uuid);
- list_add(&bus_info->entry, &bus_info_list);
+ if (!visorchannel) {
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
+ kfree(bus_info);
+ bus_info = NULL;
+ goto cleanup;
+ }
+ bus_info->visorchannel = visorchannel;
POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
cleanup:
- bus_epilog(bus_no, CONTROLVM_BUS_CREATE, &inmsg->hdr,
+ bus_epilog(bus_info, CONTROLVM_BUS_CREATE, &inmsg->hdr,
rc, inmsg->hdr.flags.response_expected == 1);
}
bus_destroy(struct controlvm_message *inmsg)
{
struct controlvm_message_packet *cmd = &inmsg->cmd;
- ulong bus_no = cmd->destroy_bus.bus_no;
- struct visorchipset_bus_info *bus_info;
+ u32 bus_no = cmd->destroy_bus.bus_no;
+ struct visor_device *bus_info;
int rc = CONTROLVM_RESP_SUCCESS;
- bus_info = findbus(&bus_info_list, bus_no);
+ bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
if (!bus_info)
rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
else if (bus_info->state.created == 0)
rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
- bus_epilog(bus_no, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
+ bus_epilog(bus_info, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
rc, inmsg->hdr.flags.response_expected == 1);
+
+ /* bus_info is freed as part of the busdevice_release function */
}
static void
struct parser_context *parser_ctx)
{
struct controlvm_message_packet *cmd = &inmsg->cmd;
- ulong bus_no = cmd->configure_bus.bus_no;
- struct visorchipset_bus_info *bus_info = NULL;
+ u32 bus_no;
+ struct visor_device *bus_info;
int rc = CONTROLVM_RESP_SUCCESS;
- char s[99];
bus_no = cmd->configure_bus.bus_no;
POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
POSTCODE_SEVERITY_INFO);
- bus_info = findbus(&bus_info_list, bus_no);
+ bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
if (!bus_info) {
POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
POSTCODE_SEVERITY_ERR);
POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
POSTCODE_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
- } else if (bus_info->pending_msg_hdr.id != CONTROLVM_INVALID) {
+ } else if (bus_info->pending_msg_hdr != NULL) {
POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
POSTCODE_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
} else {
- bus_info->partition_handle = cmd->configure_bus.guest_handle;
+ visorchannel_set_clientpartition(bus_info->visorchannel,
+ cmd->configure_bus.guest_handle);
bus_info->partition_uuid = parser_id_get(parser_ctx);
parser_param_start(parser_ctx, PARSERSTRING_NAME);
bus_info->name = parser_string_get(parser_ctx);
- visorchannel_uuid_id(&bus_info->partition_uuid, s);
POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
POSTCODE_SEVERITY_INFO);
}
- bus_epilog(bus_no, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
+ bus_epilog(bus_info, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
rc, inmsg->hdr.flags.response_expected == 1);
}
my_device_create(struct controlvm_message *inmsg)
{
struct controlvm_message_packet *cmd = &inmsg->cmd;
- ulong bus_no = cmd->create_device.bus_no;
- ulong dev_no = cmd->create_device.dev_no;
- struct visorchipset_device_info *dev_info = NULL;
- struct visorchipset_bus_info *bus_info = NULL;
+ u32 bus_no = cmd->create_device.bus_no;
+ u32 dev_no = cmd->create_device.dev_no;
+ struct visor_device *dev_info = NULL;
+ struct visor_device *bus_info;
+ struct visorchannel *visorchannel;
int rc = CONTROLVM_RESP_SUCCESS;
- dev_info = finddevice(&dev_info_list, bus_no, dev_no);
- if (dev_info && (dev_info->state.created == 1)) {
+ bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
+ if (!bus_info) {
POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
POSTCODE_SEVERITY_ERR);
- rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
goto cleanup;
}
- bus_info = findbus(&bus_info_list, bus_no);
- if (!bus_info) {
+
+ if (bus_info->state.created == 0) {
POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
POSTCODE_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
goto cleanup;
}
- if (bus_info->state.created == 0) {
+
+ dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
+ if (dev_info && (dev_info->state.created == 1)) {
POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
POSTCODE_SEVERITY_ERR);
- rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
+ rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
goto cleanup;
}
+
dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
if (!dev_info) {
POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
goto cleanup;
}
- INIT_LIST_HEAD(&dev_info->entry);
- dev_info->bus_no = bus_no;
- dev_info->dev_no = dev_no;
- dev_info->dev_inst_uuid = cmd->create_device.dev_inst_uuid;
+ dev_info->chipset_bus_no = bus_no;
+ dev_info->chipset_dev_no = dev_no;
+ dev_info->inst = cmd->create_device.dev_inst_uuid;
+
+ /* not sure where the best place to set the 'parent' */
+ dev_info->device.parent = &bus_info->device;
+
POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
POSTCODE_SEVERITY_INFO);
- if (inmsg->hdr.flags.test_message == 1)
- dev_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
- else
- dev_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
- dev_info->chan_info.channel_addr = cmd->create_device.channel_addr;
- dev_info->chan_info.n_channel_bytes = cmd->create_device.channel_bytes;
- dev_info->chan_info.channel_type_uuid =
- cmd->create_device.data_type_uuid;
- dev_info->chan_info.intr = cmd->create_device.intr;
- list_add(&dev_info->entry, &dev_info_list);
+ visorchannel =
+ visorchannel_create_with_lock(cmd->create_device.channel_addr,
+ cmd->create_device.channel_bytes,
+ GFP_KERNEL,
+ cmd->create_device.data_type_uuid);
+
+ if (!visorchannel) {
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
+ kfree(dev_info);
+ dev_info = NULL;
+ goto cleanup;
+ }
+ dev_info->visorchannel = visorchannel;
+ dev_info->channel_type_guid = cmd->create_device.data_type_uuid;
POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
POSTCODE_SEVERITY_INFO);
cleanup:
- /* get the bus and devNo for DiagPool channel */
- if (dev_info &&
- is_diagpool_channel(dev_info->chan_info.channel_type_uuid)) {
- g_diagpool_bus_no = bus_no;
- g_diagpool_dev_no = dev_no;
- }
- device_epilog(bus_no, dev_no, segment_state_running,
+ device_epilog(dev_info, segment_state_running,
CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
- inmsg->hdr.flags.response_expected == 1,
- FOR_VISORBUS(dev_info->chan_info.channel_type_uuid));
+ inmsg->hdr.flags.response_expected == 1, 1);
}
static void
my_device_changestate(struct controlvm_message *inmsg)
{
struct controlvm_message_packet *cmd = &inmsg->cmd;
- ulong bus_no = cmd->device_change_state.bus_no;
- ulong dev_no = cmd->device_change_state.dev_no;
+ u32 bus_no = cmd->device_change_state.bus_no;
+ u32 dev_no = cmd->device_change_state.dev_no;
struct spar_segment_state state = cmd->device_change_state.state;
- struct visorchipset_device_info *dev_info = NULL;
+ struct visor_device *dev_info;
int rc = CONTROLVM_RESP_SUCCESS;
- dev_info = finddevice(&dev_info_list, bus_no, dev_no);
+ dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
if (!dev_info) {
POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
POSTCODE_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
}
if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
- device_epilog(bus_no, dev_no, state,
+ device_epilog(dev_info, state,
CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
- inmsg->hdr.flags.response_expected == 1,
- FOR_VISORBUS(
- dev_info->chan_info.channel_type_uuid));
+ inmsg->hdr.flags.response_expected == 1, 1);
}
static void
my_device_destroy(struct controlvm_message *inmsg)
{
struct controlvm_message_packet *cmd = &inmsg->cmd;
- ulong bus_no = cmd->destroy_device.bus_no;
- ulong dev_no = cmd->destroy_device.dev_no;
- struct visorchipset_device_info *dev_info = NULL;
+ u32 bus_no = cmd->destroy_device.bus_no;
+ u32 dev_no = cmd->destroy_device.dev_no;
+ struct visor_device *dev_info;
int rc = CONTROLVM_RESP_SUCCESS;
- dev_info = finddevice(&dev_info_list, bus_no, dev_no);
+ dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
if (!dev_info)
rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
else if (dev_info->state.created == 0)
rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
- device_epilog(bus_no, dev_no, segment_state_running,
+ device_epilog(dev_info, segment_state_running,
CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
- inmsg->hdr.flags.response_expected == 1,
- FOR_VISORBUS(
- dev_info->chan_info.channel_type_uuid));
+ inmsg->hdr.flags.response_expected == 1, 1);
}
/* When provided with the physical address of the controlvm channel
* (phys_addr), the offset to the payload area we need to manage
* (offset), and the size of this payload area (bytes), fills in the
- * controlvm_payload_info struct. Returns TRUE for success or FALSE
+ * controlvm_payload_info struct. Returns true for success or false
* for failure.
*/
static int
-initialize_controlvm_payload_info(HOSTADDRESS phys_addr, u64 offset, u32 bytes,
- struct controlvm_payload_info *info)
+initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
+ struct visor_controlvm_payload_info *info)
{
- u8 __iomem *payload = NULL;
+ u8 *payload = NULL;
int rc = CONTROLVM_RESP_SUCCESS;
if (!info) {
rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
goto cleanup;
}
- memset(info, 0, sizeof(struct controlvm_payload_info));
+ memset(info, 0, sizeof(struct visor_controlvm_payload_info));
if ((offset == 0) || (bytes == 0)) {
rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
goto cleanup;
}
- payload = ioremap_cache(phys_addr + offset, bytes);
+ payload = memremap(phys_addr + offset, bytes, MEMREMAP_WB);
if (!payload) {
rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
goto cleanup;
cleanup:
if (rc < 0) {
if (payload) {
- iounmap(payload);
+ memunmap(payload);
payload = NULL;
}
}
}
static void
-destroy_controlvm_payload_info(struct controlvm_payload_info *info)
+destroy_controlvm_payload_info(struct visor_controlvm_payload_info *info)
{
if (info->ptr) {
- iounmap(info->ptr);
+ memunmap(info->ptr);
info->ptr = NULL;
}
- memset(info, 0, sizeof(struct controlvm_payload_info));
+ memset(info, 0, sizeof(struct visor_controlvm_payload_info));
}
static void
initialize_controlvm_payload(void)
{
- HOSTADDRESS phys_addr = visorchannel_get_physaddr(controlvm_channel);
+ u64 phys_addr = visorchannel_get_physaddr(controlvm_channel);
u64 payload_offset = 0;
u32 payload_bytes = 0;
/* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
* Returns CONTROLVM_RESP_xxx code.
*/
-int
+static int
visorchipset_chipset_ready(void)
{
kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
return CONTROLVM_RESP_SUCCESS;
}
-EXPORT_SYMBOL_GPL(visorchipset_chipset_ready);
-int
+static int
visorchipset_chipset_selftest(void)
{
char env_selftest[20];
envp);
return CONTROLVM_RESP_SUCCESS;
}
-EXPORT_SYMBOL_GPL(visorchipset_chipset_selftest);
/* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
* Returns CONTROLVM_RESP_xxx code.
*/
-int
+static int
visorchipset_chipset_notready(void)
{
kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
return CONTROLVM_RESP_SUCCESS;
}
-EXPORT_SYMBOL_GPL(visorchipset_chipset_notready);
static void
chipset_ready(struct controlvm_message_header *msg_hdr)
/* This is your "one-stop" shop for grabbing the next message from the
* CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
*/
-static BOOL
+static bool
read_controlvm_event(struct controlvm_message *msg)
{
if (visorchannel_signalremove(controlvm_channel,
CONTROLVM_QUEUE_EVENT, msg)) {
/* got a message */
if (msg->hdr.flags.test_message == 1)
- return FALSE;
- return TRUE;
+ return false;
+ return true;
}
- return FALSE;
+ return false;
}
/*
static void
parahotplug_process_list(void)
{
- struct list_head *pos = NULL;
- struct list_head *tmp = NULL;
+ struct list_head *pos;
+ struct list_head *tmp;
spin_lock(¶hotplug_request_list_lock);
static int
parahotplug_request_complete(int id, u16 active)
{
- struct list_head *pos = NULL;
- struct list_head *tmp = NULL;
+ struct list_head *pos;
+ struct list_head *tmp;
spin_lock(¶hotplug_request_list_lock);
/* Process a controlvm message.
* Return result:
- * FALSE - this function will return FALSE only in the case where the
+ * false - this function will return false only in the case where the
* controlvm message was NOT processed, but processing must be
* retried before reading the next controlvm message; a
* scenario where this can occur is when we need to throttle
* the allocation of memory in which to copy out controlvm
* payload data
- * TRUE - processing of the controlvm message completed,
+ * true - processing of the controlvm message completed,
* either successfully or with an error.
*/
-static BOOL
-handle_command(struct controlvm_message inmsg, HOSTADDRESS channel_addr)
+static bool
+handle_command(struct controlvm_message inmsg, u64 channel_addr)
{
struct controlvm_message_packet *cmd = &inmsg.cmd;
- u64 parm_addr = 0;
- u32 parm_bytes = 0;
+ u64 parm_addr;
+ u32 parm_bytes;
struct parser_context *parser_ctx = NULL;
- bool local_addr = false;
+ bool local_addr;
struct controlvm_message ackmsg;
/* create parsing context if necessary */
local_addr = (inmsg.hdr.flags.test_message == 1);
if (channel_addr == 0)
- return TRUE;
+ return true;
parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
parm_bytes = inmsg.hdr.payload_bytes;
* within our OS-controlled memory. We need to know that, because it
* makes a difference in how we compute the virtual address.
*/
- if (parm_addr != 0 && parm_bytes != 0) {
- BOOL retry = FALSE;
+ if (parm_addr && parm_bytes) {
+ bool retry = false;
parser_ctx =
parser_init_byte_stream(parm_addr, parm_bytes,
local_addr, &retry);
if (!parser_ctx && retry)
- return FALSE;
+ return false;
}
if (!local_addr) {
/* save the hdr and cmd structures for later use */
/* when sending back the response to Command */
my_device_changestate(&inmsg);
- g_diag_msg_hdr = inmsg.hdr;
g_devicechangestate_packet = inmsg.cmd;
break;
}
parser_done(parser_ctx);
parser_ctx = NULL;
}
- return TRUE;
+ return true;
+}
+
+static inline unsigned int
+issue_vmcall_io_controlvm_addr(u64 *control_addr, u32 *control_bytes)
+{
+ struct vmcall_io_controlvm_addr_params params;
+ int result = VMCALL_SUCCESS;
+ u64 physaddr;
+
+ physaddr = virt_to_phys(¶ms);
+ ISSUE_IO_VMCALL(VMCALL_IO_CONTROLVM_ADDR, physaddr, result);
+ if (VMCALL_SUCCESSFUL(result)) {
+ *control_addr = params.address;
+ *control_bytes = params.channel_bytes;
+ }
+ return result;
}
-static HOSTADDRESS controlvm_get_channel_address(void)
+static u64 controlvm_get_channel_address(void)
{
u64 addr = 0;
u32 size = 0;
controlvm_periodic_work(struct work_struct *work)
{
struct controlvm_message inmsg;
- BOOL got_command = FALSE;
- BOOL handle_command_failed = FALSE;
+ bool got_command = false;
+ bool handle_command_failed = false;
static u64 poll_count;
/* make sure visorbus server is registered for controlvm callbacks */
- if (visorchipset_serverregwait && !serverregistered)
- goto cleanup;
- /* make sure visorclientbus server is regsitered for controlvm
- * callbacks
- */
- if (visorchipset_clientregwait && !clientregistered)
+ if (visorchipset_visorbusregwait && !visorbusregistered)
goto cleanup;
poll_count++;
* rather than reading a new one
*/
inmsg = controlvm_pending_msg;
- controlvm_pending_msg_valid = FALSE;
+ controlvm_pending_msg_valid = false;
got_command = true;
} else {
got_command = read_controlvm_event(&inmsg);
}
}
- handle_command_failed = FALSE;
+ handle_command_failed = false;
while (got_command && (!handle_command_failed)) {
most_recent_message_jiffies = jiffies;
if (handle_command(inmsg,
* controlvm msg so we will attempt to
* reprocess it on our next loop
*/
- handle_command_failed = TRUE;
+ handle_command_failed = true;
controlvm_pending_msg = inmsg;
- controlvm_pending_msg_valid = TRUE;
+ controlvm_pending_msg_valid = true;
}
}
u32 local_crash_msg_offset;
u16 local_crash_msg_count;
- /* make sure visorbus server is registered for controlvm callbacks */
- if (visorchipset_serverregwait && !serverregistered)
- goto cleanup;
-
- /* make sure visorclientbus server is regsitered for controlvm
- * callbacks
- */
- if (visorchipset_clientregwait && !clientregistered)
+ /* make sure visorbus is registered for controlvm callbacks */
+ if (visorchipset_visorbusregwait && !visorbusregistered)
goto cleanup;
POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
}
/* reuse IOVM create bus message */
- if (local_crash_bus_msg.cmd.create_bus.channel_addr != 0) {
+ if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
bus_create(&local_crash_bus_msg);
} else {
POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
}
/* reuse create device message for storage device */
- if (local_crash_dev_msg.cmd.create_device.channel_addr != 0) {
+ if (local_crash_dev_msg.cmd.create_device.channel_addr) {
my_device_create(&local_crash_dev_msg);
} else {
POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
}
static void
-bus_create_response(ulong bus_no, int response)
+bus_create_response(struct visor_device *bus_info, int response)
{
- bus_responder(CONTROLVM_BUS_CREATE, bus_no, response);
-}
+ if (response >= 0)
+ bus_info->state.created = 1;
-static void
-bus_destroy_response(ulong bus_no, int response)
-{
- bus_responder(CONTROLVM_BUS_DESTROY, bus_no, response);
-}
+ bus_responder(CONTROLVM_BUS_CREATE, bus_info->pending_msg_hdr,
+ response);
-static void
-device_create_response(ulong bus_no, ulong dev_no, int response)
-{
- device_responder(CONTROLVM_DEVICE_CREATE, bus_no, dev_no, response);
+ kfree(bus_info->pending_msg_hdr);
+ bus_info->pending_msg_hdr = NULL;
}
static void
-device_destroy_response(ulong bus_no, ulong dev_no, int response)
+bus_destroy_response(struct visor_device *bus_info, int response)
{
- device_responder(CONTROLVM_DEVICE_DESTROY, bus_no, dev_no, response);
-}
+ bus_responder(CONTROLVM_BUS_DESTROY, bus_info->pending_msg_hdr,
+ response);
-void
-visorchipset_device_pause_response(ulong bus_no, ulong dev_no, int response)
-{
- device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
- bus_no, dev_no, response,
- segment_state_standby);
+ kfree(bus_info->pending_msg_hdr);
+ bus_info->pending_msg_hdr = NULL;
}
-EXPORT_SYMBOL_GPL(visorchipset_device_pause_response);
static void
-device_resume_response(ulong bus_no, ulong dev_no, int response)
-{
- device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
- bus_no, dev_no, response,
- segment_state_running);
-}
-
-BOOL
-visorchipset_get_bus_info(ulong bus_no, struct visorchipset_bus_info *bus_info)
-{
- void *p = findbus(&bus_info_list, bus_no);
-
- if (!p)
- return FALSE;
- memcpy(bus_info, p, sizeof(struct visorchipset_bus_info));
- return TRUE;
-}
-EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
-
-BOOL
-visorchipset_set_bus_context(ulong bus_no, void *context)
+device_create_response(struct visor_device *dev_info, int response)
{
- struct visorchipset_bus_info *p = findbus(&bus_info_list, bus_no);
-
- if (!p)
- return FALSE;
- p->bus_driver_context = context;
- return TRUE;
-}
-EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
+ if (response >= 0)
+ dev_info->state.created = 1;
-BOOL
-visorchipset_get_device_info(ulong bus_no, ulong dev_no,
- struct visorchipset_device_info *dev_info)
-{
- void *p = finddevice(&dev_info_list, bus_no, dev_no);
+ device_responder(CONTROLVM_DEVICE_CREATE, dev_info->pending_msg_hdr,
+ response);
- if (!p)
- return FALSE;
- memcpy(dev_info, p, sizeof(struct visorchipset_device_info));
- return TRUE;
+ kfree(dev_info->pending_msg_hdr);
+ dev_info->pending_msg_hdr = NULL;
}
-EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
-BOOL
-visorchipset_set_device_context(ulong bus_no, ulong dev_no, void *context)
+static void
+device_destroy_response(struct visor_device *dev_info, int response)
{
- struct visorchipset_device_info *p =
- finddevice(&dev_info_list, bus_no, dev_no);
+ device_responder(CONTROLVM_DEVICE_DESTROY, dev_info->pending_msg_hdr,
+ response);
- if (!p)
- return FALSE;
- p->bus_driver_context = context;
- return TRUE;
+ kfree(dev_info->pending_msg_hdr);
+ dev_info->pending_msg_hdr = NULL;
}
-EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
-/* Generic wrapper function for allocating memory from a kmem_cache pool.
- */
-void *
-visorchipset_cache_alloc(struct kmem_cache *pool, BOOL ok_to_block,
- char *fn, int ln)
+static void
+visorchipset_device_pause_response(struct visor_device *dev_info,
+ int response)
{
- gfp_t gfp;
- void *p;
-
- if (ok_to_block)
- gfp = GFP_KERNEL;
- else
- gfp = GFP_ATOMIC;
- /* __GFP_NORETRY means "ok to fail", meaning
- * kmem_cache_alloc() can return NULL, implying the caller CAN
- * cope with failure. If you do NOT specify __GFP_NORETRY,
- * Linux will go to extreme measures to get memory for you
- * (like, invoke oom killer), which will probably cripple the
- * system.
- */
- gfp |= __GFP_NORETRY;
- p = kmem_cache_alloc(pool, gfp);
- if (!p)
- return NULL;
+ device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
+ dev_info, response,
+ segment_state_standby);
- atomic_inc(&visorchipset_cache_buffers_in_use);
- return p;
+ kfree(dev_info->pending_msg_hdr);
+ dev_info->pending_msg_hdr = NULL;
}
-/* Generic wrapper function for freeing memory from a kmem_cache pool.
- */
-void
-visorchipset_cache_free(struct kmem_cache *pool, void *p, char *fn, int ln)
+static void
+device_resume_response(struct visor_device *dev_info, int response)
{
- if (!p)
- return;
+ device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
+ dev_info, response,
+ segment_state_running);
- atomic_dec(&visorchipset_cache_buffers_in_use);
- kmem_cache_free(pool, p);
+ kfree(dev_info->pending_msg_hdr);
+ dev_info->pending_msg_hdr = NULL;
}
static ssize_t chipsetready_store(struct device *dev,
if (sscanf(buf, "%63s", msgtype) != 1)
return -EINVAL;
- if (strcmp(msgtype, "CALLHOMEDISK_MOUNTED") == 0) {
+ if (!strcmp(msgtype, "CALLHOMEDISK_MOUNTED")) {
chipset_events[0] = 1;
return count;
- } else if (strcmp(msgtype, "MODULES_LOADED") == 0) {
+ } else if (!strcmp(msgtype, "MODULES_LOADED")) {
chipset_events[1] = 1;
return count;
}
struct device_attribute *attr,
const char *buf, size_t count)
{
- uint id;
+ unsigned int id;
- if (kstrtouint(buf, 10, &id) != 0)
+ if (kstrtouint(buf, 10, &id))
return -EINVAL;
parahotplug_request_complete(id, 0);
struct device_attribute *attr,
const char *buf, size_t count)
{
- uint id;
+ unsigned int id;
- if (kstrtouint(buf, 10, &id) != 0)
+ if (kstrtouint(buf, 10, &id))
return -EINVAL;
parahotplug_request_complete(id, 1);
return count;
}
-static int __init
-visorchipset_init(void)
+static int
+visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
{
- int rc = 0, x = 0;
- HOSTADDRESS addr;
+ unsigned long physaddr = 0;
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ u64 addr = 0;
- if (!unisys_spar_platform)
- return -ENODEV;
+ /* sv_enable_dfp(); */
+ if (offset & (PAGE_SIZE - 1))
+ return -ENXIO; /* need aligned offsets */
- memset(&busdev_server_notifiers, 0, sizeof(busdev_server_notifiers));
- memset(&busdev_client_notifiers, 0, sizeof(busdev_client_notifiers));
- memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
- memset(&livedump_info, 0, sizeof(livedump_info));
- atomic_set(&livedump_info.buffers_in_use, 0);
+ switch (offset) {
+ case VISORCHIPSET_MMAP_CONTROLCHANOFFSET:
+ vma->vm_flags |= VM_IO;
+ if (!*file_controlvm_channel)
+ return -ENXIO;
- if (visorchipset_testvnic) {
- POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, x, DIAG_SEVERITY_ERR);
- rc = x;
- goto cleanup;
+ visorchannel_read(*file_controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ gp_control_channel),
+ &addr, sizeof(addr));
+ if (!addr)
+ return -ENXIO;
+
+ physaddr = (unsigned long)addr;
+ if (remap_pfn_range(vma, vma->vm_start,
+ physaddr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ /*pgprot_noncached */
+ (vma->vm_page_prot))) {
+ return -EAGAIN;
+ }
+ break;
+ default:
+ return -ENXIO;
}
+ return 0;
+}
- addr = controlvm_get_channel_address();
- if (addr != 0) {
- controlvm_channel =
- visorchannel_create_with_lock
- (addr,
- sizeof(struct spar_controlvm_channel_protocol),
- spar_controlvm_channel_protocol_uuid);
- if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
- visorchannel_get_header(controlvm_channel))) {
- initialize_controlvm_payload();
- } else {
- visorchannel_destroy(controlvm_channel);
- controlvm_channel = NULL;
- return -ENODEV;
+static inline s64 issue_vmcall_query_guest_virtual_time_offset(void)
+{
+ u64 result = VMCALL_SUCCESS;
+ u64 physaddr = 0;
+
+ ISSUE_IO_VMCALL(VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET, physaddr,
+ result);
+ return result;
+}
+
+static inline int issue_vmcall_update_physical_time(u64 adjustment)
+{
+ int result = VMCALL_SUCCESS;
+
+ ISSUE_IO_VMCALL(VMCALL_UPDATE_PHYSICAL_TIME, adjustment, result);
+ return result;
+}
+
+static long visorchipset_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ s64 adjustment;
+ s64 vrtc_offset;
+
+ switch (cmd) {
+ case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET:
+ /* get the physical rtc offset */
+ vrtc_offset = issue_vmcall_query_guest_virtual_time_offset();
+ if (copy_to_user((void __user *)arg, &vrtc_offset,
+ sizeof(vrtc_offset))) {
+ return -EFAULT;
}
+ return 0;
+ case VMCALL_UPDATE_PHYSICAL_TIME:
+ if (copy_from_user(&adjustment, (void __user *)arg,
+ sizeof(adjustment))) {
+ return -EFAULT;
+ }
+ return issue_vmcall_update_physical_time(adjustment);
+ default:
+ return -EFAULT;
+ }
+}
+
+static const struct file_operations visorchipset_fops = {
+ .owner = THIS_MODULE,
+ .open = visorchipset_open,
+ .read = NULL,
+ .write = NULL,
+ .unlocked_ioctl = visorchipset_ioctl,
+ .release = visorchipset_release,
+ .mmap = visorchipset_mmap,
+};
+
+static int
+visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
+{
+ int rc = 0;
+
+ file_controlvm_channel = controlvm_channel;
+ cdev_init(&file_cdev, &visorchipset_fops);
+ file_cdev.owner = THIS_MODULE;
+ if (MAJOR(major_dev) == 0) {
+ rc = alloc_chrdev_region(&major_dev, 0, 1, "visorchipset");
+ /* dynamic major device number registration required */
+ if (rc < 0)
+ return rc;
+ } else {
+ /* static major device number registration required */
+ rc = register_chrdev_region(major_dev, 1, "visorchipset");
+ if (rc < 0)
+ return rc;
+ }
+ rc = cdev_add(&file_cdev, MKDEV(MAJOR(major_dev), 0), 1);
+ if (rc < 0) {
+ unregister_chrdev_region(major_dev, 1);
+ return rc;
+ }
+ return 0;
+}
+
+static int
+visorchipset_init(struct acpi_device *acpi_device)
+{
+ int rc = 0;
+ u64 addr;
+ int tmp_sz = sizeof(struct spar_controlvm_channel_protocol);
+ uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
+
+ addr = controlvm_get_channel_address();
+ if (!addr)
+ return -ENODEV;
+
+ memset(&busdev_notifiers, 0, sizeof(busdev_notifiers));
+ memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
+
+ controlvm_channel = visorchannel_create_with_lock(addr, tmp_sz,
+ GFP_KERNEL, uuid);
+ if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
+ visorchannel_get_header(controlvm_channel))) {
+ initialize_controlvm_payload();
} else {
+ visorchannel_destroy(controlvm_channel);
+ controlvm_channel = NULL;
return -ENODEV;
}
goto cleanup;
}
- memset(&g_diag_msg_hdr, 0, sizeof(struct controlvm_message_header));
-
memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
- memset(&g_del_dump_msg_hdr, 0, sizeof(struct controlvm_message_header));
-
- putfile_buffer_list_pool =
- kmem_cache_create(putfile_buffer_list_pool_name,
- sizeof(struct putfile_buffer_entry),
- 0, SLAB_HWCACHE_ALIGN, NULL);
- if (!putfile_buffer_list_pool) {
- POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
- rc = -1;
+ /* if booting in a crash kernel */
+ if (is_kdump_kernel())
+ INIT_DELAYED_WORK(&periodic_controlvm_work,
+ setup_crash_devices_work_queue);
+ else
+ INIT_DELAYED_WORK(&periodic_controlvm_work,
+ controlvm_periodic_work);
+ periodic_controlvm_workqueue =
+ create_singlethread_workqueue("visorchipset_controlvm");
+
+ if (!periodic_controlvm_workqueue) {
+ POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
+ DIAG_SEVERITY_ERR);
+ rc = -ENOMEM;
goto cleanup;
}
- if (!visorchipset_disable_controlvm) {
- /* if booting in a crash kernel */
- if (visorchipset_crash_kernel)
- INIT_DELAYED_WORK(&periodic_controlvm_work,
- setup_crash_devices_work_queue);
- else
- INIT_DELAYED_WORK(&periodic_controlvm_work,
- controlvm_periodic_work);
- periodic_controlvm_workqueue =
- create_singlethread_workqueue("visorchipset_controlvm");
-
- if (!periodic_controlvm_workqueue) {
- POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
- DIAG_SEVERITY_ERR);
- rc = -ENOMEM;
- goto cleanup;
- }
- most_recent_message_jiffies = jiffies;
- poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
- rc = queue_delayed_work(periodic_controlvm_workqueue,
- &periodic_controlvm_work, poll_jiffies);
- if (rc < 0) {
- POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
- DIAG_SEVERITY_ERR);
- goto cleanup;
- }
+ most_recent_message_jiffies = jiffies;
+ poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
+ rc = queue_delayed_work(periodic_controlvm_workqueue,
+ &periodic_controlvm_work, poll_jiffies);
+ if (rc < 0) {
+ POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
+ DIAG_SEVERITY_ERR);
+ goto cleanup;
}
visorchipset_platform_device.dev.devt = major_dev;
goto cleanup;
}
POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
- rc = 0;
+
+ rc = visorbus_init();
cleanup:
if (rc) {
POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
}
static void
-visorchipset_exit(void)
+visorchipset_file_cleanup(dev_t major_dev)
{
- POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
+ if (file_cdev.ops)
+ cdev_del(&file_cdev);
+ file_cdev.ops = NULL;
+ unregister_chrdev_region(major_dev, 1);
+}
- if (visorchipset_disable_controlvm) {
- ;
- } else {
- cancel_delayed_work(&periodic_controlvm_work);
- flush_workqueue(periodic_controlvm_workqueue);
- destroy_workqueue(periodic_controlvm_workqueue);
- periodic_controlvm_workqueue = NULL;
- destroy_controlvm_payload_info(&controlvm_payload_info);
- }
- if (putfile_buffer_list_pool) {
- kmem_cache_destroy(putfile_buffer_list_pool);
- putfile_buffer_list_pool = NULL;
- }
+static int
+visorchipset_exit(struct acpi_device *acpi_device)
+{
+ POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
- cleanup_controlvm_structures();
+ visorbus_exit();
- memset(&g_diag_msg_hdr, 0, sizeof(struct controlvm_message_header));
+ cancel_delayed_work(&periodic_controlvm_work);
+ flush_workqueue(periodic_controlvm_workqueue);
+ destroy_workqueue(periodic_controlvm_workqueue);
+ periodic_controlvm_workqueue = NULL;
+ destroy_controlvm_payload_info(&controlvm_payload_info);
memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
- memset(&g_del_dump_msg_hdr, 0, sizeof(struct controlvm_message_header));
-
visorchannel_destroy(controlvm_channel);
visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
+ platform_device_unregister(&visorchipset_platform_device);
POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
+
+ return 0;
}
-module_param_named(testvnic, visorchipset_testvnic, int, S_IRUGO);
-MODULE_PARM_DESC(visorchipset_testvnic, "1 to test vnic, using dummy VNIC connected via a loopback to a physical ethernet");
-int visorchipset_testvnic = 0;
+static const struct acpi_device_id unisys_device_ids[] = {
+ {"PNP0A07", 0},
+ {"", 0},
+};
-module_param_named(testvnicclient, visorchipset_testvnicclient, int, S_IRUGO);
-MODULE_PARM_DESC(visorchipset_testvnicclient, "1 to test vnic, using real VNIC channel attached to a separate IOVM guest");
-int visorchipset_testvnicclient = 0;
+static struct acpi_driver unisys_acpi_driver = {
+ .name = "unisys_acpi",
+ .class = "unisys_acpi_class",
+ .owner = THIS_MODULE,
+ .ids = unisys_device_ids,
+ .ops = {
+ .add = visorchipset_init,
+ .remove = visorchipset_exit,
+ },
+};
-module_param_named(testmsg, visorchipset_testmsg, int, S_IRUGO);
-MODULE_PARM_DESC(visorchipset_testmsg,
- "1 to manufacture the chipset, bus, and switch messages");
-int visorchipset_testmsg = 0;
+MODULE_DEVICE_TABLE(acpi, unisys_device_ids);
-module_param_named(major, visorchipset_major, int, S_IRUGO);
-MODULE_PARM_DESC(visorchipset_major, "major device number to use for the device node");
-int visorchipset_major = 0;
+static __init uint32_t visorutil_spar_detect(void)
+{
+ unsigned int eax, ebx, ecx, edx;
+
+ if (cpu_has_hypervisor) {
+ /* check the ID */
+ cpuid(UNISYS_SPAR_LEAF_ID, &eax, &ebx, &ecx, &edx);
+ return (ebx == UNISYS_SPAR_ID_EBX) &&
+ (ecx == UNISYS_SPAR_ID_ECX) &&
+ (edx == UNISYS_SPAR_ID_EDX);
+ } else {
+ return 0;
+ }
+}
+
+static int init_unisys(void)
+{
+ int result;
+
+ if (!visorutil_spar_detect())
+ return -ENODEV;
+
+ result = acpi_bus_register_driver(&unisys_acpi_driver);
+ if (result)
+ return -ENODEV;
+
+ pr_info("Unisys Visorchipset Driver Loaded.\n");
+ return 0;
+};
+
+static void exit_unisys(void)
+{
+ acpi_bus_unregister_driver(&unisys_acpi_driver);
+}
-module_param_named(serverregwait, visorchipset_serverregwait, int, S_IRUGO);
-MODULE_PARM_DESC(visorchipset_serverreqwait,
+module_param_named(major, visorchipset_major, int, S_IRUGO);
+MODULE_PARM_DESC(visorchipset_major,
+ "major device number to use for the device node");
+module_param_named(visorbusregwait, visorchipset_visorbusregwait, int, S_IRUGO);
+MODULE_PARM_DESC(visorchipset_visorbusreqwait,
"1 to have the module wait for the visor bus to register");
-int visorchipset_serverregwait = 0; /* default is off */
-module_param_named(clientregwait, visorchipset_clientregwait, int, S_IRUGO);
-MODULE_PARM_DESC(visorchipset_clientregwait, "1 to have the module wait for the visorclientbus to register");
-int visorchipset_clientregwait = 1; /* default is on */
-module_param_named(testteardown, visorchipset_testteardown, int, S_IRUGO);
-MODULE_PARM_DESC(visorchipset_testteardown,
- "1 to test teardown of the chipset, bus, and switch");
-int visorchipset_testteardown = 0; /* default is off */
-module_param_named(disable_controlvm, visorchipset_disable_controlvm, int,
- S_IRUGO);
-MODULE_PARM_DESC(visorchipset_disable_controlvm,
- "1 to disable polling of controlVm channel");
-int visorchipset_disable_controlvm = 0; /* default is off */
-module_param_named(crash_kernel, visorchipset_crash_kernel, int, S_IRUGO);
-MODULE_PARM_DESC(visorchipset_crash_kernel,
- "1 means we are running in crash kernel");
-int visorchipset_crash_kernel = 0; /* default is running in non-crash kernel */
module_param_named(holdchipsetready, visorchipset_holdchipsetready,
int, S_IRUGO);
MODULE_PARM_DESC(visorchipset_holdchipsetready,
"1 to hold response to CHIPSET_READY");
-int visorchipset_holdchipsetready = 0; /* default is to send CHIPSET_READY
- * response immediately */
-module_init(visorchipset_init);
-module_exit(visorchipset_exit);
+
+module_init(init_unisys);
+module_exit(exit_unisys);
MODULE_AUTHOR("Unisys");
MODULE_LICENSE("GPL");