3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
20 /* if you want to turn on some debugging of write device data or read
21 * device data, define these two undefs. You will probably want to
22 * customize the code which is here since it was written assuming
23 * reading and writing a specific data file df.64M.txt which is a
24 * 64Megabyte file created by Art Nilson using a scritp I wrote called
25 * cr_test_data.pl. The data file consists of 256 byte lines of text
26 * which start with an 8 digit sequence number, a colon, and then
27 * letters after that */
29 #include <linux/kernel.h>
30 #ifdef CONFIG_MODVERSIONS
31 #include <config/modversions.h>
34 #include "diagnostics/appos_subsystems.h"
37 #include "uisthread.h"
39 #include <linux/module.h>
40 #include <linux/init.h>
41 #include <linux/pci.h>
42 #include <linux/spinlock.h>
43 #include <linux/device.h>
44 #include <linux/slab.h>
45 #include <scsi/scsi.h>
46 #include <scsi/scsi_host.h>
47 #include <scsi/scsi_cmnd.h>
48 #include <scsi/scsi_device.h>
49 #include <asm/param.h>
50 #include <linux/debugfs.h>
51 #include <linux/types.h>
55 #include "visorchipset.h"
57 #include "guestlinuxdebug.h"
58 /* this is shorter than using __FILE__ (full path name) in
59 * debug/info/error messages
61 #define CURRENT_FILE_PC VIRT_HBA_PC_virthba_c
62 #define __MYFILE__ "virthba.c"
64 /* NOTE: L1_CACHE_BYTES >=128 */
65 #define DEVICE_ATTRIBUTE struct device_attribute
67 /* MAX_BUF = 6 lines x 10 MAXVHBA x 80 characters
68 * = 4800 bytes ~ 2^13 = 8192 bytes
72 /*****************************************************/
73 /* Forward declarations */
74 /*****************************************************/
75 static int virthba_probe(struct virtpci_dev *dev,
76 const struct pci_device_id *id);
77 static void virthba_remove(struct virtpci_dev *dev);
78 static int virthba_abort_handler(struct scsi_cmnd *scsicmd);
79 static int virthba_bus_reset_handler(struct scsi_cmnd *scsicmd);
80 static int virthba_device_reset_handler(struct scsi_cmnd *scsicmd);
81 static int virthba_host_reset_handler(struct scsi_cmnd *scsicmd);
82 static const char *virthba_get_info(struct Scsi_Host *shp);
83 static int virthba_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
84 static int virthba_queue_command_lck(struct scsi_cmnd *scsicmd,
85 void (*virthba_cmnd_done)
86 (struct scsi_cmnd *));
88 static const struct x86_cpu_id unisys_spar_ids[] = {
89 { X86_VENDOR_INTEL, 6, 62, X86_FEATURE_ANY },
94 MODULE_DEVICE_TABLE(x86cpu, unisys_spar_ids);
97 static DEF_SCSI_QCMD(virthba_queue_command)
99 #define virthba_queue_command virthba_queue_command_lck
102 static int virthba_slave_alloc(struct scsi_device *scsidev);
103 static int virthba_slave_configure(struct scsi_device *scsidev);
104 static void virthba_slave_destroy(struct scsi_device *scsidev);
105 static int process_incoming_rsps(void *);
106 static int virthba_serverup(struct virtpci_dev *virtpcidev);
107 static int virthba_serverdown(struct virtpci_dev *virtpcidev, u32 state);
108 static void do_disk_add_remove(struct work_struct *work);
109 static void virthba_serverdown_complete(struct work_struct *work);
110 static ssize_t info_debugfs_read(struct file *file, char __user *buf,
111 size_t len, loff_t *offset);
112 static ssize_t enable_ints_write(struct file *file,
113 const char __user *buffer, size_t count,
116 /*****************************************************/
118 /*****************************************************/
120 static int rsltq_wait_usecs = 4000; /* Default 4ms */
121 static unsigned int max_buff_len;
124 static char *virthba_options = "NONE";
126 static const struct pci_device_id virthba_id_table[] = {
127 {PCI_DEVICE(PCI_VENDOR_ID_UNISYS, PCI_DEVICE_ID_VIRTHBA)},
131 /* export virthba_id_table */
132 MODULE_DEVICE_TABLE(pci, virthba_id_table);
134 static struct workqueue_struct *virthba_serverdown_workqueue;
136 static struct virtpci_driver virthba_driver = {
137 .name = "uisvirthba",
140 .id_table = virthba_id_table,
141 .probe = virthba_probe,
142 .remove = virthba_remove,
143 .resume = virthba_serverup,
144 .suspend = virthba_serverdown
147 /* The Send and Recive Buffers of the IO Queue may both be full */
148 #define MAX_PENDING_REQUESTS (MIN_NUMSIGNALS*2)
149 #define INTERRUPT_VECTOR_MASK 0x3F
152 char cmdtype; /* Type of pointer that is being stored */
153 void *sent; /* The Data being tracked */
154 /* struct scsi_cmnd *type for virthba_queue_command */
155 /* struct uiscmdrsp *type for management commands */
158 #define VIRTHBA_ERROR_COUNT 30
159 #define IOS_ERROR_THRESHOLD 1000
160 struct virtdisk_info {
162 u32 channel, id, lun; /* Disk Path */
163 atomic_t ios_threshold;
164 atomic_t error_count;
165 struct virtdisk_info *next;
168 /* Each Scsi_Host has a host_data area that contains this struct. */
169 struct virthba_info {
170 struct Scsi_Host *scsihost;
171 struct virtpci_dev *virtpcidev;
172 struct list_head dev_info_list;
173 struct chaninfo chinfo;
174 struct irq_info intr; /* use recvInterrupt info to receive
175 interrupts when IOs complete */
176 int interrupt_vector;
177 struct scsipending pending[MAX_PENDING_REQUESTS]; /* Tracks the requests
179 /* forwarded to the IOVM and haven't returned yet */
180 unsigned int nextinsert; /* Start search for next pending
184 bool serverchangingstate;
185 unsigned long long acquire_failed_cnt;
186 unsigned long long interrupts_rcvd;
187 unsigned long long interrupts_notme;
188 unsigned long long interrupts_disabled;
189 struct work_struct serverdown_completion;
190 u64 __iomem *flags_addr;
191 atomic_t interrupt_rcvd;
192 wait_queue_head_t rsp_queue;
193 struct virtdisk_info head;
196 /* Work Data for dar_work_queue */
197 struct diskaddremove {
198 u8 add; /* 0-remove, 1-add */
199 struct Scsi_Host *shost; /* Scsi Host for this virthba instance */
200 u32 channel, id, lun; /* Disk Path */
201 struct diskaddremove *next;
204 #define virtpci_dev_to_virthba_virthba_get_info(d) \
205 container_of(d, struct virthba_info, virtpcidev)
207 static DEVICE_ATTRIBUTE *virthba_shost_attrs[];
208 static struct scsi_host_template virthba_driver_template = {
209 .name = "Unisys Virtual HBA",
210 .info = virthba_get_info,
211 .ioctl = virthba_ioctl,
212 .queuecommand = virthba_queue_command,
213 .eh_abort_handler = virthba_abort_handler,
214 .eh_device_reset_handler = virthba_device_reset_handler,
215 .eh_bus_reset_handler = virthba_bus_reset_handler,
216 .eh_host_reset_handler = virthba_host_reset_handler,
217 .shost_attrs = virthba_shost_attrs,
219 #define VIRTHBA_MAX_CMNDS 128
220 .can_queue = VIRTHBA_MAX_CMNDS,
221 .sg_tablesize = 64, /* largest number of address/length pairs */
223 .slave_alloc = virthba_slave_alloc,
224 .slave_configure = virthba_slave_configure,
225 .slave_destroy = virthba_slave_destroy,
226 .use_clustering = ENABLE_CLUSTERING,
229 struct virthba_devices_open {
230 struct virthba_info *virthbainfo;
233 static const struct file_operations debugfs_info_fops = {
234 .read = info_debugfs_read,
237 static const struct file_operations debugfs_enable_ints_fops = {
238 .write = enable_ints_write,
241 /*****************************************************/
243 /*****************************************************/
245 #define VIRTHBASOPENMAX 1
246 /* array of open devices maintained by open() and close(); */
247 static struct virthba_devices_open virthbas_open[VIRTHBASOPENMAX];
248 static struct dentry *virthba_debugfs_dir;
250 /*****************************************************/
251 /* Local Functions */
252 /*****************************************************/
254 add_scsipending_entry(struct virthba_info *vhbainfo, char cmdtype, void *new)
259 spin_lock_irqsave(&vhbainfo->privlock, flags);
260 insert_location = vhbainfo->nextinsert;
261 while (vhbainfo->pending[insert_location].sent) {
262 insert_location = (insert_location + 1) % MAX_PENDING_REQUESTS;
263 if (insert_location == (int)vhbainfo->nextinsert) {
264 spin_unlock_irqrestore(&vhbainfo->privlock, flags);
269 vhbainfo->pending[insert_location].cmdtype = cmdtype;
270 vhbainfo->pending[insert_location].sent = new;
271 vhbainfo->nextinsert = (insert_location + 1) % MAX_PENDING_REQUESTS;
272 spin_unlock_irqrestore(&vhbainfo->privlock, flags);
274 return insert_location;
278 add_scsipending_entry_with_wait(struct virthba_info *vhbainfo, char cmdtype,
281 int insert_location = add_scsipending_entry(vhbainfo, cmdtype, new);
283 while (insert_location == -1) {
284 set_current_state(TASK_INTERRUPTIBLE);
285 schedule_timeout(msecs_to_jiffies(10));
286 insert_location = add_scsipending_entry(vhbainfo, cmdtype, new);
289 return (unsigned int)insert_location;
293 del_scsipending_entry(struct virthba_info *vhbainfo, uintptr_t del)
298 if (del < MAX_PENDING_REQUESTS) {
299 spin_lock_irqsave(&vhbainfo->privlock, flags);
300 sent = vhbainfo->pending[del].sent;
302 vhbainfo->pending[del].cmdtype = 0;
303 vhbainfo->pending[del].sent = NULL;
304 spin_unlock_irqrestore(&vhbainfo->privlock, flags);
310 /* dar_work_queue (Disk Add/Remove) */
311 static struct work_struct dar_work_queue;
312 static struct diskaddremove *dar_work_queue_head;
313 static spinlock_t dar_work_queue_lock;
314 static unsigned short dar_work_queue_sched;
315 #define QUEUE_DISKADDREMOVE(dar) { \
316 spin_lock_irqsave(&dar_work_queue_lock, flags); \
317 if (!dar_work_queue_head) { \
318 dar_work_queue_head = dar; \
322 dar->next = dar_work_queue_head; \
323 dar_work_queue_head = dar; \
325 if (!dar_work_queue_sched) { \
326 schedule_work(&dar_work_queue); \
327 dar_work_queue_sched = 1; \
329 spin_unlock_irqrestore(&dar_work_queue_lock, flags); \
333 send_disk_add_remove(struct diskaddremove *dar)
335 struct scsi_device *sdev;
338 sdev = scsi_device_lookup(dar->shost, dar->channel, dar->id, dar->lun);
341 scsi_remove_device(sdev);
342 } else if (dar->add) {
344 scsi_add_device(dar->shost, dar->channel, dar->id,
350 /*****************************************************/
351 /* dar_work_queue Handler Thread */
352 /*****************************************************/
354 do_disk_add_remove(struct work_struct *work)
356 struct diskaddremove *dar;
357 struct diskaddremove *tmphead;
361 spin_lock_irqsave(&dar_work_queue_lock, flags);
362 tmphead = dar_work_queue_head;
363 dar_work_queue_head = NULL;
364 dar_work_queue_sched = 0;
365 spin_unlock_irqrestore(&dar_work_queue_lock, flags);
369 send_disk_add_remove(dar);
374 /*****************************************************/
375 /* Routine to add entry to dar_work_queue */
376 /*****************************************************/
378 process_disk_notify(struct Scsi_Host *shost, struct uiscmdrsp *cmdrsp)
380 struct diskaddremove *dar;
383 dar = kzalloc(sizeof(*dar), GFP_ATOMIC);
385 dar->add = cmdrsp->disknotify.add;
387 dar->channel = cmdrsp->disknotify.channel;
388 dar->id = cmdrsp->disknotify.id;
389 dar->lun = cmdrsp->disknotify.lun;
390 QUEUE_DISKADDREMOVE(dar);
394 /*****************************************************/
395 /* Probe Remove Functions */
396 /*****************************************************/
398 virthba_isr(int irq, void *dev_id)
400 struct virthba_info *virthbainfo = (struct virthba_info *)dev_id;
401 struct channel_header __iomem *channel_header;
402 struct signal_queue_header __iomem *pqhdr;
404 unsigned long long rc1;
408 virthbainfo->interrupts_rcvd++;
409 channel_header = virthbainfo->chinfo.queueinfo->chan;
410 if (((readq(&channel_header->features)
411 & ULTRA_IO_IOVM_IS_OK_WITH_DRIVER_DISABLING_INTS) != 0) &&
412 ((readq(&channel_header->features) &
413 ULTRA_IO_DRIVER_DISABLES_INTS) !=
415 virthbainfo->interrupts_disabled++;
416 mask = ~ULTRA_CHANNEL_ENABLE_INTS;
417 rc1 = uisqueue_interlocked_and(virthbainfo->flags_addr, mask);
419 if (spar_signalqueue_empty(channel_header, IOCHAN_FROM_IOPART)) {
420 virthbainfo->interrupts_notme++;
423 pqhdr = (struct signal_queue_header __iomem *)
424 ((char __iomem *)channel_header +
425 readq(&channel_header->ch_space_offset)) + IOCHAN_FROM_IOPART;
426 writeq(readq(&pqhdr->num_irq_received) + 1,
427 &pqhdr->num_irq_received);
428 atomic_set(&virthbainfo->interrupt_rcvd, 1);
429 wake_up_interruptible(&virthbainfo->rsp_queue);
434 virthba_probe(struct virtpci_dev *virtpcidev, const struct pci_device_id *id)
437 struct Scsi_Host *scsihost;
438 struct virthba_info *virthbainfo;
441 irq_handler_t handler = virthba_isr;
442 struct channel_header __iomem *channel_header;
443 struct signal_queue_header __iomem *pqhdr;
446 POSTCODE_LINUX_2(VHBA_PROBE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
447 /* call scsi_host_alloc to register a scsi host adapter
448 * instance - this virthba that has just been created is an
449 * instance of a scsi host adapter. This scsi_host_alloc
450 * function allocates a new Scsi_Host struct & performs basic
451 * initialization. The host is not published to the scsi
452 * midlayer until scsi_add_host is called.
455 /* arg 2 passed in length of extra space we want allocated
456 * with scsi_host struct for our own use scsi_host_alloc
459 scsihost = scsi_host_alloc(&virthba_driver_template,
460 sizeof(struct virthba_info));
464 scsihost->this_id = UIS_MAGIC_VHBA;
465 /* linux treats max-channel differently than max-id & max-lun.
466 * In the latter cases, those two values result in 0 to max-1
467 * (inclusive) being scanned. But in the case of channels, the
468 * scan is 0 to max (inclusive); so we will subtract one from
469 * the max-channel value.
471 scsihost->max_channel = (unsigned)virtpcidev->scsi.max.max_channel;
472 scsihost->max_id = (unsigned)virtpcidev->scsi.max.max_id;
473 scsihost->max_lun = (unsigned)virtpcidev->scsi.max.max_lun;
474 scsihost->cmd_per_lun = (unsigned)virtpcidev->scsi.max.cmd_per_lun;
475 scsihost->max_sectors =
476 (unsigned short)(virtpcidev->scsi.max.max_io_size >> 9);
477 scsihost->sg_tablesize =
478 (unsigned short)(virtpcidev->scsi.max.max_io_size / PAGE_SIZE);
479 if (scsihost->sg_tablesize > MAX_PHYS_INFO)
480 scsihost->sg_tablesize = MAX_PHYS_INFO;
482 /* this creates "host%d" in sysfs. If 2nd argument is NULL,
483 * then this generic /sys/devices/platform/host? device is
484 * created and /sys/scsi_host/host? ->
485 * /sys/devices/platform/host? If 2nd argument is not NULL,
486 * then this generic /sys/devices/<path>/host? is created and
487 * host? points to that device instead.
489 error = scsi_add_host(scsihost, &virtpcidev->generic_dev);
491 POSTCODE_LINUX_2(VHBA_PROBE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
492 /* decr refcount on scsihost which was incremented by
493 * scsi_add_host so the scsi_host gets deleted
495 scsi_host_put(scsihost);
499 virthbainfo = (struct virthba_info *)scsihost->hostdata;
500 memset(virthbainfo, 0, sizeof(struct virthba_info));
501 for (i = 0; i < VIRTHBASOPENMAX; i++) {
502 if (!virthbas_open[i].virthbainfo) {
503 virthbas_open[i].virthbainfo = virthbainfo;
507 virthbainfo->interrupt_vector = -1;
508 virthbainfo->chinfo.queueinfo = &virtpcidev->queueinfo;
509 virthbainfo->virtpcidev = virtpcidev;
510 spin_lock_init(&virthbainfo->chinfo.insertlock);
512 init_waitqueue_head(&virthbainfo->rsp_queue);
513 spin_lock_init(&virthbainfo->privlock);
514 memset(&virthbainfo->pending, 0, sizeof(virthbainfo->pending));
515 virthbainfo->serverdown = false;
516 virthbainfo->serverchangingstate = false;
518 virthbainfo->intr = virtpcidev->intr;
519 /* save of host within virthba_info */
520 virthbainfo->scsihost = scsihost;
522 /* save of host within virtpci_dev */
523 virtpcidev->scsi.scsihost = scsihost;
525 /* Setup workqueue for serverdown messages */
526 INIT_WORK(&virthbainfo->serverdown_completion,
527 virthba_serverdown_complete);
529 writeq(readq(&virthbainfo->chinfo.queueinfo->chan->features) |
530 ULTRA_IO_CHANNEL_IS_POLLING,
531 &virthbainfo->chinfo.queueinfo->chan->features);
532 /* start thread that will receive scsicmnd responses */
534 channel_header = virthbainfo->chinfo.queueinfo->chan;
535 pqhdr = (struct signal_queue_header __iomem *)
536 ((char __iomem *)channel_header +
537 readq(&channel_header->ch_space_offset)) + IOCHAN_FROM_IOPART;
538 virthbainfo->flags_addr = &pqhdr->features;
540 if (!uisthread_start(&virthbainfo->chinfo.threadinfo,
541 process_incoming_rsps,
542 virthbainfo, "vhba_incoming")) {
543 /* decr refcount on scsihost which was incremented by
544 * scsi_add_host so the scsi_host gets deleted
546 POSTCODE_LINUX_2(VHBA_PROBE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
547 scsi_host_put(scsihost);
550 virthbainfo->interrupt_vector =
551 virthbainfo->intr.recv_irq_handle & INTERRUPT_VECTOR_MASK;
552 rsp = request_irq(virthbainfo->interrupt_vector, handler, IRQF_SHARED,
553 scsihost->hostt->name, virthbainfo);
555 virthbainfo->interrupt_vector = -1;
556 POSTCODE_LINUX_2(VHBA_PROBE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
558 u64 __iomem *features_addr =
559 &virthbainfo->chinfo.queueinfo->chan->features;
560 mask = ~(ULTRA_IO_CHANNEL_IS_POLLING |
561 ULTRA_IO_DRIVER_DISABLES_INTS);
562 uisqueue_interlocked_and(features_addr, mask);
563 mask = ULTRA_IO_DRIVER_ENABLES_INTS;
564 uisqueue_interlocked_or(features_addr, mask);
565 rsltq_wait_usecs = 4000000;
568 scsi_scan_host(scsihost);
570 POSTCODE_LINUX_2(VHBA_PROBE_EXIT_PC, POSTCODE_SEVERITY_INFO);
575 virthba_remove(struct virtpci_dev *virtpcidev)
577 struct virthba_info *virthbainfo;
578 struct Scsi_Host *scsihost =
579 (struct Scsi_Host *)virtpcidev->scsi.scsihost;
581 virthbainfo = (struct virthba_info *)scsihost->hostdata;
582 if (virthbainfo->interrupt_vector != -1)
583 free_irq(virthbainfo->interrupt_vector, virthbainfo);
585 scsi_remove_host(scsihost);
587 uisthread_stop(&virthbainfo->chinfo.threadinfo);
589 /* decr refcount on scsihost which was incremented by
590 * scsi_add_host so the scsi_host gets deleted
592 scsi_host_put(scsihost);
596 forward_vdiskmgmt_command(enum vdisk_mgmt_types vdiskcmdtype,
597 struct Scsi_Host *scsihost,
598 struct uisscsi_dest *vdest)
600 struct uiscmdrsp *cmdrsp;
601 struct virthba_info *virthbainfo =
602 (struct virthba_info *)scsihost->hostdata;
603 int notifyresult = 0xffff;
604 wait_queue_head_t notifyevent;
606 if (virthbainfo->serverdown || virthbainfo->serverchangingstate)
609 cmdrsp = kzalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
611 return FAILED; /* reject */
613 init_waitqueue_head(¬ifyevent);
615 /* issue VDISK_MGMT_CMD
616 * set type to command - as opposed to task mgmt
618 cmdrsp->cmdtype = CMD_VDISKMGMT_TYPE;
619 /* specify the event that has to be triggered when this cmd is
622 cmdrsp->vdiskmgmt.notify = (void *)¬ifyevent;
623 cmdrsp->vdiskmgmt.notifyresult = (void *)¬ifyresult;
625 /* save destination */
626 cmdrsp->vdiskmgmt.vdisktype = vdiskcmdtype;
627 cmdrsp->vdiskmgmt.vdest.channel = vdest->channel;
628 cmdrsp->vdiskmgmt.vdest.id = vdest->id;
629 cmdrsp->vdiskmgmt.vdest.lun = vdest->lun;
630 cmdrsp->vdiskmgmt.scsicmd =
632 add_scsipending_entry_with_wait(virthbainfo, CMD_VDISKMGMT_TYPE,
635 uisqueue_put_cmdrsp_with_lock_client(virthbainfo->chinfo.queueinfo,
636 cmdrsp, IOCHAN_TO_IOPART,
637 &virthbainfo->chinfo.insertlock,
638 DONT_ISSUE_INTERRUPT, (u64)NULL,
640 wait_event(notifyevent, notifyresult != 0xffff);
645 /*****************************************************/
646 /* Scsi Host support functions */
647 /*****************************************************/
650 forward_taskmgmt_command(enum task_mgmt_types tasktype,
651 struct scsi_device *scsidev)
653 struct uiscmdrsp *cmdrsp;
654 struct virthba_info *virthbainfo =
655 (struct virthba_info *)scsidev->host->hostdata;
656 int notifyresult = 0xffff;
657 wait_queue_head_t notifyevent;
659 if (virthbainfo->serverdown || virthbainfo->serverchangingstate)
662 cmdrsp = kzalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
664 return FAILED; /* reject */
666 init_waitqueue_head(¬ifyevent);
668 /* issue TASK_MGMT_ABORT_TASK */
669 /* set type to command - as opposed to task mgmt */
670 cmdrsp->cmdtype = CMD_SCSITASKMGMT_TYPE;
671 /* specify the event that has to be triggered when this */
672 /* cmd is complete */
673 cmdrsp->scsitaskmgmt.notify = (void *)¬ifyevent;
674 cmdrsp->scsitaskmgmt.notifyresult = (void *)¬ifyresult;
676 /* save destination */
677 cmdrsp->scsitaskmgmt.tasktype = tasktype;
678 cmdrsp->scsitaskmgmt.vdest.channel = scsidev->channel;
679 cmdrsp->scsitaskmgmt.vdest.id = scsidev->id;
680 cmdrsp->scsitaskmgmt.vdest.lun = scsidev->lun;
681 cmdrsp->scsitaskmgmt.scsicmd =
683 add_scsipending_entry_with_wait(virthbainfo,
684 CMD_SCSITASKMGMT_TYPE,
687 uisqueue_put_cmdrsp_with_lock_client(virthbainfo->chinfo.queueinfo,
688 cmdrsp, IOCHAN_TO_IOPART,
689 &virthbainfo->chinfo.insertlock,
690 DONT_ISSUE_INTERRUPT, (u64)NULL,
692 wait_event(notifyevent, notifyresult != 0xffff);
697 /* The abort handler returns SUCCESS if it has succeeded to make LLDD
698 * and all related hardware forget about the scmd.
701 virthba_abort_handler(struct scsi_cmnd *scsicmd)
703 /* issue TASK_MGMT_ABORT_TASK */
704 struct scsi_device *scsidev;
705 struct virtdisk_info *vdisk;
707 scsidev = scsicmd->device;
708 for (vdisk = &((struct virthba_info *)scsidev->host->hostdata)->head;
709 vdisk->next; vdisk = vdisk->next) {
710 if ((scsidev->channel == vdisk->channel) &&
711 (scsidev->id == vdisk->id) &&
712 (scsidev->lun == vdisk->lun)) {
713 if (atomic_read(&vdisk->error_count) <
714 VIRTHBA_ERROR_COUNT) {
715 atomic_inc(&vdisk->error_count);
716 POSTCODE_LINUX_2(VHBA_COMMAND_HANDLER_PC,
717 POSTCODE_SEVERITY_INFO);
719 atomic_set(&vdisk->ios_threshold,
720 IOS_ERROR_THRESHOLD);
723 return forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsicmd->device);
727 virthba_bus_reset_handler(struct scsi_cmnd *scsicmd)
729 /* issue TASK_MGMT_TARGET_RESET for each target on the bus */
730 struct scsi_device *scsidev;
731 struct virtdisk_info *vdisk;
733 scsidev = scsicmd->device;
734 for (vdisk = &((struct virthba_info *)scsidev->host->hostdata)->head;
735 vdisk->next; vdisk = vdisk->next) {
736 if ((scsidev->channel == vdisk->channel) &&
737 (scsidev->id == vdisk->id) &&
738 (scsidev->lun == vdisk->lun)) {
739 if (atomic_read(&vdisk->error_count) <
740 VIRTHBA_ERROR_COUNT) {
741 atomic_inc(&vdisk->error_count);
742 POSTCODE_LINUX_2(VHBA_COMMAND_HANDLER_PC,
743 POSTCODE_SEVERITY_INFO);
745 atomic_set(&vdisk->ios_threshold,
746 IOS_ERROR_THRESHOLD);
749 return forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsicmd->device);
753 virthba_device_reset_handler(struct scsi_cmnd *scsicmd)
755 /* issue TASK_MGMT_LUN_RESET */
756 struct scsi_device *scsidev;
757 struct virtdisk_info *vdisk;
759 scsidev = scsicmd->device;
760 for (vdisk = &((struct virthba_info *)scsidev->host->hostdata)->head;
761 vdisk->next; vdisk = vdisk->next) {
762 if ((scsidev->channel == vdisk->channel) &&
763 (scsidev->id == vdisk->id) &&
764 (scsidev->lun == vdisk->lun)) {
765 if (atomic_read(&vdisk->error_count) <
766 VIRTHBA_ERROR_COUNT) {
767 atomic_inc(&vdisk->error_count);
768 POSTCODE_LINUX_2(VHBA_COMMAND_HANDLER_PC,
769 POSTCODE_SEVERITY_INFO);
771 atomic_set(&vdisk->ios_threshold,
772 IOS_ERROR_THRESHOLD);
775 return forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsicmd->device);
779 virthba_host_reset_handler(struct scsi_cmnd *scsicmd)
781 /* issue TASK_MGMT_TARGET_RESET for each target on each bus for host */
785 static char virthba_get_info_str[256];
788 virthba_get_info(struct Scsi_Host *shp)
790 /* Return version string */
791 sprintf(virthba_get_info_str, "virthba, version %s\n", VIRTHBA_VERSION);
792 return virthba_get_info_str;
796 virthba_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
801 /* This returns SCSI_MLQUEUE_DEVICE_BUSY if the signal queue to IOpart
805 virthba_queue_command_lck(struct scsi_cmnd *scsicmd,
806 void (*virthba_cmnd_done)(struct scsi_cmnd *))
808 struct scsi_device *scsidev = scsicmd->device;
811 unsigned char *cdb = scsicmd->cmnd;
812 struct Scsi_Host *scsihost = scsidev->host;
813 struct uiscmdrsp *cmdrsp;
815 struct virthba_info *virthbainfo =
816 (struct virthba_info *)scsihost->hostdata;
817 struct scatterlist *sg = NULL;
818 struct scatterlist *sgl = NULL;
821 if (virthbainfo->serverdown || virthbainfo->serverchangingstate)
822 return SCSI_MLQUEUE_DEVICE_BUSY;
823 cmdrsp = kzalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
825 return 1; /* reject the command */
827 /* now saving everything we need from scsi_cmd into cmdrsp
828 * before we queue cmdrsp set type to command - as opposed to
831 cmdrsp->cmdtype = CMD_SCSI_TYPE;
832 /* save the pending insertion location. Deletion from pending
833 * will return the scsicmd pointer for completion
836 add_scsipending_entry(virthbainfo, CMD_SCSI_TYPE, (void *)scsicmd);
837 if (insert_location != -1) {
838 cmdrsp->scsi.scsicmd = (void *)(uintptr_t)insert_location;
841 return SCSI_MLQUEUE_DEVICE_BUSY;
843 /* save done function that we have call when cmd is complete */
844 scsicmd->scsi_done = virthba_cmnd_done;
845 /* save destination */
846 cmdrsp->scsi.vdest.channel = scsidev->channel;
847 cmdrsp->scsi.vdest.id = scsidev->id;
848 cmdrsp->scsi.vdest.lun = scsidev->lun;
850 cmdrsp->scsi.data_dir = scsicmd->sc_data_direction;
851 memcpy(cmdrsp->scsi.cmnd, cdb, MAX_CMND_SIZE);
853 cmdrsp->scsi.bufflen = scsi_bufflen(scsicmd);
855 /* keep track of the max buffer length so far. */
856 if (cmdrsp->scsi.bufflen > max_buff_len)
857 max_buff_len = cmdrsp->scsi.bufflen;
859 if (scsi_sg_count(scsicmd) > MAX_PHYS_INFO) {
860 del_scsipending_entry(virthbainfo, (uintptr_t)insert_location);
862 return 1; /* reject the command */
865 /* This is what we USED to do when we assumed we were running */
866 /* uissd & virthba on the same Linux system. */
867 /* cmdrsp->scsi.buffer = scsicmd->request_buffer; */
868 /* The following code does NOT make that assumption. */
869 /* convert buffer to phys information */
870 if (scsi_sg_count(scsicmd) == 0) {
871 if (scsi_bufflen(scsicmd) > 0) {
872 BUG_ON(scsi_sg_count(scsicmd) == 0);
875 /* buffer is scatterlist - copy it out */
876 sgl = scsi_sglist(scsicmd);
878 for_each_sg(sgl, sg, scsi_sg_count(scsicmd), i) {
879 cmdrsp->scsi.gpi_list[i].address = sg_phys(sg);
880 cmdrsp->scsi.gpi_list[i].length = sg->length;
884 /* BUG(); ***** For now, let it fail in uissd
885 * if it is a problem, as it might just
890 cmdrsp->scsi.guest_phys_entries = scsi_sg_count(scsicmd);
894 i = uisqueue_put_cmdrsp_with_lock_client(virthbainfo->chinfo.queueinfo,
895 cmdrsp, IOCHAN_TO_IOPART,
896 &virthbainfo->chinfo.
898 DONT_ISSUE_INTERRUPT,
899 (u64)NULL, DONT_WAIT, "vhba");
901 /* queue must be full - and we said don't wait - return busy */
903 del_scsipending_entry(virthbainfo, (uintptr_t)insert_location);
904 return SCSI_MLQUEUE_DEVICE_BUSY;
907 /* we're done with cmdrsp space - data from it has been copied
908 * into channel - free it now.
911 return 0; /* non-zero implies host/device is busy */
915 virthba_slave_alloc(struct scsi_device *scsidev)
917 /* this called by the midlayer before scan for new devices -
918 * LLD can alloc any struct & do init if needed.
920 struct virtdisk_info *vdisk;
921 struct virtdisk_info *tmpvdisk;
922 struct virthba_info *virthbainfo;
923 struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
925 virthbainfo = (struct virthba_info *)scsihost->hostdata;
927 return 0; /* even though we errored, treat as success */
929 for (vdisk = &virthbainfo->head; vdisk->next; vdisk = vdisk->next) {
930 if (vdisk->next->valid &&
931 (vdisk->next->channel == scsidev->channel) &&
932 (vdisk->next->id == scsidev->id) &&
933 (vdisk->next->lun == scsidev->lun))
936 tmpvdisk = kzalloc(sizeof(*tmpvdisk), GFP_ATOMIC);
940 tmpvdisk->channel = scsidev->channel;
941 tmpvdisk->id = scsidev->id;
942 tmpvdisk->lun = scsidev->lun;
944 vdisk->next = tmpvdisk;
945 return 0; /* success */
949 virthba_slave_configure(struct scsi_device *scsidev)
951 return 0; /* success */
955 virthba_slave_destroy(struct scsi_device *scsidev)
957 /* midlevel calls this after device has been quiesced and
958 * before it is to be deleted.
960 struct virtdisk_info *vdisk, *delvdisk;
961 struct virthba_info *virthbainfo;
962 struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
964 virthbainfo = (struct virthba_info *)scsihost->hostdata;
965 for (vdisk = &virthbainfo->head; vdisk->next; vdisk = vdisk->next) {
966 if (vdisk->next->valid &&
967 (vdisk->next->channel == scsidev->channel) &&
968 (vdisk->next->id == scsidev->id) &&
969 (vdisk->next->lun == scsidev->lun)) {
970 delvdisk = vdisk->next;
971 vdisk->next = vdisk->next->next;
978 /*****************************************************/
979 /* Scsi Cmnd support thread */
980 /*****************************************************/
983 do_scsi_linuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
985 struct virtdisk_info *vdisk;
986 struct scsi_device *scsidev;
987 struct sense_data *sd;
989 scsidev = scsicmd->device;
990 memcpy(scsicmd->sense_buffer, cmdrsp->scsi.sensebuf, MAX_SENSE_SIZE);
991 sd = (struct sense_data *)scsicmd->sense_buffer;
993 /* Do not log errors for disk-not-present inquiries */
994 if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
995 (host_byte(cmdrsp->scsi.linuxstat) == DID_NO_CONNECT) &&
996 (cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT))
999 /* Okay see what our error_count is here.... */
1000 for (vdisk = &((struct virthba_info *)scsidev->host->hostdata)->head;
1001 vdisk->next; vdisk = vdisk->next) {
1002 if ((scsidev->channel != vdisk->channel) ||
1003 (scsidev->id != vdisk->id) ||
1004 (scsidev->lun != vdisk->lun))
1007 if (atomic_read(&vdisk->error_count) < VIRTHBA_ERROR_COUNT) {
1008 atomic_inc(&vdisk->error_count);
1009 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
1015 do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
1017 struct scsi_device *scsidev;
1018 unsigned char buf[36];
1019 struct scatterlist *sg;
1022 char *thispage_orig;
1024 struct virtdisk_info *vdisk;
1026 scsidev = scsicmd->device;
1027 if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
1028 (cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN)) {
1029 if (cmdrsp->scsi.no_disk_result == 0)
1032 /* Linux scsi code is weird; it wants
1033 * a device at Lun 0 to issue report
1034 * luns, but we don't want a disk
1035 * there so we'll present a processor
1037 SET_NO_DISK_INQUIRY_RESULT(buf, cmdrsp->scsi.bufflen,
1039 DEV_DISK_CAPABLE_NOT_PRESENT,
1042 if (scsi_sg_count(scsicmd) == 0) {
1043 if (scsi_bufflen(scsicmd) > 0) {
1044 BUG_ON(scsi_sg_count(scsicmd) ==
1047 memcpy(scsi_sglist(scsicmd), buf,
1048 cmdrsp->scsi.bufflen);
1052 sg = scsi_sglist(scsicmd);
1053 for (i = 0; i < scsi_sg_count(scsicmd); i++) {
1054 thispage_orig = kmap_atomic(sg_page(sg + i));
1055 thispage = (void *)((unsigned long)thispage_orig |
1057 memcpy(thispage, buf + bufind, sg[i].length);
1058 kunmap_atomic(thispage_orig);
1059 bufind += sg[i].length;
1062 vdisk = &((struct virthba_info *)scsidev->host->hostdata)->head;
1063 for ( ; vdisk->next; vdisk = vdisk->next) {
1064 if ((scsidev->channel != vdisk->channel) ||
1065 (scsidev->id != vdisk->id) ||
1066 (scsidev->lun != vdisk->lun))
1069 if (atomic_read(&vdisk->ios_threshold) > 0) {
1070 atomic_dec(&vdisk->ios_threshold);
1071 if (atomic_read(&vdisk->ios_threshold) == 0) {
1072 atomic_set(&vdisk->error_count, 0);
1080 complete_scsi_command(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
1082 /* take what we need out of cmdrsp and complete the scsicmd */
1083 scsicmd->result = cmdrsp->scsi.linuxstat;
1084 if (cmdrsp->scsi.linuxstat)
1085 do_scsi_linuxstat(cmdrsp, scsicmd);
1087 do_scsi_nolinuxstat(cmdrsp, scsicmd);
1089 if (scsicmd->scsi_done)
1090 scsicmd->scsi_done(scsicmd);
1094 complete_vdiskmgmt_command(struct uiscmdrsp *cmdrsp)
1096 /* copy the result of the taskmgmt and */
1097 /* wake up the error handler that is waiting for this */
1098 *(int *)cmdrsp->vdiskmgmt.notifyresult = cmdrsp->vdiskmgmt.result;
1099 wake_up_all((wait_queue_head_t *)cmdrsp->vdiskmgmt.notify);
1103 complete_taskmgmt_command(struct uiscmdrsp *cmdrsp)
1105 /* copy the result of the taskmgmt and */
1106 /* wake up the error handler that is waiting for this */
1107 *(int *)cmdrsp->scsitaskmgmt.notifyresult =
1108 cmdrsp->scsitaskmgmt.result;
1109 wake_up_all((wait_queue_head_t *)cmdrsp->scsitaskmgmt.notify);
1113 drain_queue(struct virthba_info *virthbainfo, struct chaninfo *dc,
1114 struct uiscmdrsp *cmdrsp)
1116 unsigned long flags;
1118 struct scsi_cmnd *scsicmd;
1119 struct Scsi_Host *shost = virthbainfo->scsihost;
1122 spin_lock_irqsave(&virthbainfo->chinfo.insertlock, flags);
1123 if (!spar_channel_client_acquire_os(dc->queueinfo->chan,
1125 spin_unlock_irqrestore(&virthbainfo->chinfo.insertlock,
1127 virthbainfo->acquire_failed_cnt++;
1130 qrslt = uisqueue_get_cmdrsp(dc->queueinfo, cmdrsp,
1131 IOCHAN_FROM_IOPART);
1132 spar_channel_client_release_os(dc->queueinfo->chan, "vhba");
1133 spin_unlock_irqrestore(&virthbainfo->chinfo.insertlock, flags);
1136 if (cmdrsp->cmdtype == CMD_SCSI_TYPE) {
1137 /* scsicmd location is returned by the
1140 scsicmd = del_scsipending_entry(virthbainfo,
1142 cmdrsp->scsi.scsicmd);
1145 /* complete the orig cmd */
1146 complete_scsi_command(cmdrsp, scsicmd);
1147 } else if (cmdrsp->cmdtype == CMD_SCSITASKMGMT_TYPE) {
1148 if (!del_scsipending_entry(virthbainfo,
1149 (uintptr_t)cmdrsp->scsitaskmgmt.scsicmd))
1151 complete_taskmgmt_command(cmdrsp);
1152 } else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE) {
1153 /* The vHba pointer has no meaning in
1154 * a Client/Guest Partition. Let's be
1155 * safe and set it to NULL now. Do
1156 * not use it here! */
1157 cmdrsp->disknotify.v_hba = NULL;
1158 process_disk_notify(shost, cmdrsp);
1159 } else if (cmdrsp->cmdtype == CMD_VDISKMGMT_TYPE) {
1160 if (!del_scsipending_entry(virthbainfo,
1162 cmdrsp->vdiskmgmt.scsicmd))
1164 complete_vdiskmgmt_command(cmdrsp);
1166 /* cmdrsp is now available for reuse */
1170 /* main function for the thread that waits for scsi commands to arrive
1171 * in a specified queue
1174 process_incoming_rsps(void *v)
1176 struct virthba_info *virthbainfo = v;
1177 struct chaninfo *dc = &virthbainfo->chinfo;
1178 struct uiscmdrsp *cmdrsp = NULL;
1179 const int SZ = sizeof(struct uiscmdrsp);
1181 unsigned long long rc1;
1183 UIS_DAEMONIZE("vhba_incoming");
1184 /* alloc once and reuse */
1185 cmdrsp = kmalloc(SZ, GFP_ATOMIC);
1187 complete_and_exit(&dc->threadinfo.has_stopped, 0);
1190 mask = ULTRA_CHANNEL_ENABLE_INTS;
1192 if (kthread_should_stop())
1194 wait_event_interruptible_timeout(virthbainfo->rsp_queue,
1195 (atomic_read(&virthbainfo->interrupt_rcvd) == 1),
1196 usecs_to_jiffies(rsltq_wait_usecs));
1197 atomic_set(&virthbainfo->interrupt_rcvd, 0);
1199 drain_queue(virthbainfo, dc, cmdrsp);
1200 rc1 = uisqueue_interlocked_or(virthbainfo->flags_addr, mask);
1205 complete_and_exit(&dc->threadinfo.has_stopped, 0);
1208 /*****************************************************/
1209 /* Debugfs filesystem functions */
1210 /*****************************************************/
1212 static ssize_t info_debugfs_read(struct file *file,
1213 char __user *buf, size_t len, loff_t *offset)
1215 ssize_t bytes_read = 0;
1217 u64 phys_flags_addr;
1219 struct virthba_info *virthbainfo;
1224 vbuf = kzalloc(len, GFP_KERNEL);
1228 for (i = 0; i < VIRTHBASOPENMAX; i++) {
1229 if (!virthbas_open[i].virthbainfo)
1232 virthbainfo = virthbas_open[i].virthbainfo;
1234 str_pos += scnprintf(vbuf + str_pos,
1235 len - str_pos, "max_buff_len:%u\n",
1238 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1239 "\nvirthba result queue poll wait:%d usecs.\n",
1241 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1242 "\ninterrupts_rcvd = %llu, interrupts_disabled = %llu\n",
1243 virthbainfo->interrupts_rcvd,
1244 virthbainfo->interrupts_disabled);
1245 str_pos += scnprintf(vbuf + str_pos,
1246 len - str_pos, "\ninterrupts_notme = %llu,\n",
1247 virthbainfo->interrupts_notme);
1248 phys_flags_addr = virt_to_phys((__force void *)
1249 virthbainfo->flags_addr);
1250 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1251 "flags_addr = %p, phys_flags_addr=0x%016llx, FeatureFlags=%llu\n",
1252 virthbainfo->flags_addr, phys_flags_addr,
1253 (__le64)readq(virthbainfo->flags_addr));
1254 str_pos += scnprintf(vbuf + str_pos,
1255 len - str_pos, "acquire_failed_cnt:%llu\n",
1256 virthbainfo->acquire_failed_cnt);
1257 str_pos += scnprintf(vbuf + str_pos, len - str_pos, "\n");
1260 bytes_read = simple_read_from_buffer(buf, len, offset, vbuf, str_pos);
1265 static ssize_t enable_ints_write(struct file *file, const char __user *buffer,
1266 size_t count, loff_t *ppos)
1270 struct virthba_info *virthbainfo;
1272 u64 __iomem *features_addr;
1275 if (count >= ARRAY_SIZE(buf))
1279 if (copy_from_user(buf, buffer, count))
1282 i = kstrtoint(buf, 10, &new_value);
1287 /* set all counts to new_value usually 0 */
1288 for (i = 0; i < VIRTHBASOPENMAX; i++) {
1289 if (virthbas_open[i].virthbainfo) {
1290 virthbainfo = virthbas_open[i].virthbainfo;
1292 &virthbainfo->chinfo.queueinfo->chan->features;
1293 if (new_value == 1) {
1294 mask = ~(ULTRA_IO_CHANNEL_IS_POLLING |
1295 ULTRA_IO_DRIVER_DISABLES_INTS);
1296 uisqueue_interlocked_and(features_addr, mask);
1297 mask = ULTRA_IO_DRIVER_ENABLES_INTS;
1298 uisqueue_interlocked_or(features_addr, mask);
1299 rsltq_wait_usecs = 4000000;
1301 mask = ~(ULTRA_IO_DRIVER_ENABLES_INTS |
1302 ULTRA_IO_DRIVER_DISABLES_INTS);
1303 uisqueue_interlocked_and(features_addr, mask);
1304 mask = ULTRA_IO_CHANNEL_IS_POLLING;
1305 uisqueue_interlocked_or(features_addr, mask);
1306 rsltq_wait_usecs = 4000;
1313 /* As per VirtpciFunc returns 1 for success and 0 for failure */
1315 virthba_serverup(struct virtpci_dev *virtpcidev)
1317 struct virthba_info *virthbainfo =
1318 (struct virthba_info *)((struct Scsi_Host *)virtpcidev->scsi.
1319 scsihost)->hostdata;
1321 if (!virthbainfo->serverdown)
1324 if (virthbainfo->serverchangingstate)
1327 virthbainfo->serverchangingstate = true;
1328 /* Must transition channel to ATTACHED state BEFORE we
1329 * can start using the device again
1331 SPAR_CHANNEL_CLIENT_TRANSITION(virthbainfo->chinfo.queueinfo->chan,
1332 dev_name(&virtpcidev->generic_dev),
1333 CHANNELCLI_ATTACHED, NULL);
1335 /* Start Processing the IOVM Response Queue Again */
1336 if (!uisthread_start(&virthbainfo->chinfo.threadinfo,
1337 process_incoming_rsps,
1338 virthbainfo, "vhba_incoming")) {
1341 virthbainfo->serverdown = false;
1342 virthbainfo->serverchangingstate = false;
1348 virthba_serverdown_complete(struct work_struct *work)
1350 struct virthba_info *virthbainfo;
1351 struct virtpci_dev *virtpcidev;
1353 struct scsipending *pendingdel = NULL;
1354 struct scsi_cmnd *scsicmd = NULL;
1355 struct uiscmdrsp *cmdrsp;
1356 unsigned long flags;
1358 virthbainfo = container_of(work, struct virthba_info,
1359 serverdown_completion);
1361 /* Stop Using the IOVM Response Queue (queue should be drained
1364 uisthread_stop(&virthbainfo->chinfo.threadinfo);
1366 /* Fail Commands that weren't completed */
1367 spin_lock_irqsave(&virthbainfo->privlock, flags);
1368 for (i = 0; i < MAX_PENDING_REQUESTS; i++) {
1369 pendingdel = &virthbainfo->pending[i];
1370 switch (pendingdel->cmdtype) {
1372 scsicmd = (struct scsi_cmnd *)pendingdel->sent;
1373 scsicmd->result = DID_RESET << 16;
1374 if (scsicmd->scsi_done)
1375 scsicmd->scsi_done(scsicmd);
1377 case CMD_SCSITASKMGMT_TYPE:
1378 cmdrsp = (struct uiscmdrsp *)pendingdel->sent;
1379 wake_up_all((wait_queue_head_t *)
1380 cmdrsp->scsitaskmgmt.notify);
1381 *(int *)cmdrsp->scsitaskmgmt.notifyresult =
1384 case CMD_VDISKMGMT_TYPE:
1385 cmdrsp = (struct uiscmdrsp *)pendingdel->sent;
1386 *(int *)cmdrsp->vdiskmgmt.notifyresult =
1388 wake_up_all((wait_queue_head_t *)
1389 cmdrsp->vdiskmgmt.notify);
1394 pendingdel->cmdtype = 0;
1395 pendingdel->sent = NULL;
1397 spin_unlock_irqrestore(&virthbainfo->privlock, flags);
1399 virtpcidev = virthbainfo->virtpcidev;
1401 virthbainfo->serverdown = true;
1402 virthbainfo->serverchangingstate = false;
1403 /* Return the ServerDown response to Command */
1404 visorchipset_device_pause_response(virtpcidev->bus_no,
1405 virtpcidev->device_no, 0);
1408 /* As per VirtpciFunc returns 1 for success and 0 for failure */
1410 virthba_serverdown(struct virtpci_dev *virtpcidev, u32 state)
1414 struct virthba_info *virthbainfo =
1415 (struct virthba_info *)((struct Scsi_Host *)virtpcidev->scsi.
1416 scsihost)->hostdata;
1418 if (!virthbainfo->serverdown && !virthbainfo->serverchangingstate) {
1419 virthbainfo->serverchangingstate = true;
1420 queue_work(virthba_serverdown_workqueue,
1421 &virthbainfo->serverdown_completion);
1422 } else if (virthbainfo->serverchangingstate) {
1429 /*****************************************************/
1430 /* Module Init & Exit functions */
1431 /*****************************************************/
1434 virthba_parse_line(char *str)
1440 virthba_parse_options(char *line)
1444 POSTCODE_LINUX_2(VHBA_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1445 if (!line || !*line)
1447 while ((line = next)) {
1448 next = strchr(line, ' ');
1451 virthba_parse_line(line);
1454 POSTCODE_LINUX_2(VHBA_CREATE_EXIT_PC, POSTCODE_SEVERITY_INFO);
1458 virthba_mod_init(void)
1463 if (!unisys_spar_platform)
1466 POSTCODE_LINUX_2(VHBA_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1467 virthba_parse_options(virthba_options);
1469 error = virtpci_register_driver(&virthba_driver);
1471 POSTCODE_LINUX_3(VHBA_CREATE_FAILURE_PC, error,
1472 POSTCODE_SEVERITY_ERR);
1474 /* create the debugfs directories and entries */
1475 virthba_debugfs_dir = debugfs_create_dir("virthba", NULL);
1476 debugfs_create_file("info", S_IRUSR, virthba_debugfs_dir,
1477 NULL, &debugfs_info_fops);
1478 debugfs_create_u32("rqwait_usecs", S_IRUSR | S_IWUSR,
1479 virthba_debugfs_dir, &rsltq_wait_usecs);
1480 debugfs_create_file("enable_ints", S_IWUSR,
1481 virthba_debugfs_dir, NULL,
1482 &debugfs_enable_ints_fops);
1483 /* Initialize dar_work_queue */
1484 INIT_WORK(&dar_work_queue, do_disk_add_remove);
1485 spin_lock_init(&dar_work_queue_lock);
1487 /* clear out array */
1488 for (i = 0; i < VIRTHBASOPENMAX; i++)
1489 virthbas_open[i].virthbainfo = NULL;
1490 /* Initialize the serverdown workqueue */
1491 virthba_serverdown_workqueue =
1492 create_singlethread_workqueue("virthba_serverdown");
1493 if (!virthba_serverdown_workqueue) {
1494 POSTCODE_LINUX_2(VHBA_CREATE_FAILURE_PC,
1495 POSTCODE_SEVERITY_ERR);
1500 POSTCODE_LINUX_2(VHBA_CREATE_EXIT_PC, POSTCODE_SEVERITY_INFO);
1505 virthba_acquire_lun(struct device *cdev, struct device_attribute *attr,
1506 const char *buf, size_t count)
1508 struct uisscsi_dest vdest;
1509 struct Scsi_Host *shost = class_to_shost(cdev);
1512 i = sscanf(buf, "%d-%d-%d", &vdest.channel, &vdest.id, &vdest.lun);
1516 return forward_vdiskmgmt_command(VDISK_MGMT_ACQUIRE, shost, &vdest);
1520 virthba_release_lun(struct device *cdev, struct device_attribute *attr,
1521 const char *buf, size_t count)
1523 struct uisscsi_dest vdest;
1524 struct Scsi_Host *shost = class_to_shost(cdev);
1527 i = sscanf(buf, "%d-%d-%d", &vdest.channel, &vdest.id, &vdest.lun);
1531 return forward_vdiskmgmt_command(VDISK_MGMT_RELEASE, shost, &vdest);
1534 #define CLASS_DEVICE_ATTR(_name, _mode, _show, _store) \
1535 struct device_attribute class_device_attr_##_name = \
1536 __ATTR(_name, _mode, _show, _store)
1538 static CLASS_DEVICE_ATTR(acquire_lun, S_IWUSR, NULL, virthba_acquire_lun);
1539 static CLASS_DEVICE_ATTR(release_lun, S_IWUSR, NULL, virthba_release_lun);
1541 static DEVICE_ATTRIBUTE *virthba_shost_attrs[] = {
1542 &class_device_attr_acquire_lun,
1543 &class_device_attr_release_lun,
1548 virthba_mod_exit(void)
1550 virtpci_unregister_driver(&virthba_driver);
1551 /* unregister is going to call virthba_remove */
1552 /* destroy serverdown completion workqueue */
1553 if (virthba_serverdown_workqueue) {
1554 destroy_workqueue(virthba_serverdown_workqueue);
1555 virthba_serverdown_workqueue = NULL;
1558 debugfs_remove_recursive(virthba_debugfs_dir);
1561 /* specify function to be run at module insertion time */
1562 module_init(virthba_mod_init);
1564 /* specify function to be run when module is removed */
1565 module_exit(virthba_mod_exit);
1567 MODULE_LICENSE("GPL");
1568 MODULE_AUTHOR("Usha Srinivasan");
1569 MODULE_ALIAS("uisvirthba");
1570 /* this is extracted during depmod and kept in modules.dep */
1571 /* module parameter */
1572 module_param(virthba_options, charp, S_IRUGO);