[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH V2 3/4] Introduce XEN scsiback module
From: Juergen Gross <jgross@xxxxxxxx> Introduces the XEN pvSCSI backend. With pvSCSI it is possible for a XEN domU to issue SCSI commands to a SCSI LUN assigned to that domU. The SCSI commands are passed to the pvSCSI backend in a driver domain (usually Dom0) which is owner of the physical device. This allows e.g. to use SCSI tape drives in a XEN domU. The code is taken from the pvSCSI implementation in XEN done by Fujitsu based on Linux kernel 2.6.18. Changes from the original version are: - port to upstream kernel - put all code in just one source file - adapt to Linux style guide - use target core infrastructure instead doing pure pass-through - enable module unloading - support SG-list in grant page(s) - support task abort - remove redundant struct backend - allocate resources dynamically - correct minor error in scsiback_fast_flush_area - free allocated resources in case of error during I/O preparation - initialize SCSI emulation table statically - fix emulation of "report LUNs" to not needing retry Signed-off-by: Juergen Gross <jgross@xxxxxxxx> --- drivers/xen/Kconfig | 9 + drivers/xen/Makefile | 1 + drivers/xen/xen-scsiback.c | 2655 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 2665 insertions(+) create mode 100644 drivers/xen/xen-scsiback.c diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index 38fb36e..fc8f420 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -172,6 +172,15 @@ config XEN_PCIDEV_BACKEND If in doubt, say m. +config XEN_SCSI_BACKEND + tristate "XEN SCSI backend driver" + depends on SCSI && XEN && XEN_BACKEND && TARGET_CORE + help + The SCSI backend driver allows the kernel to export its SCSI Devices + to other guests via a high-performance shared-memory interface. + Only needed for systems running as XEN driver domains (e.g. Dom0) and + if guests need generic access to SCSI devices. + config XEN_PRIVCMD tristate depends on XEN diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index 45e00af..b42ee75 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile @@ -33,6 +33,7 @@ obj-$(CONFIG_XEN_STUB) += xen-stub.o obj-$(CONFIG_XEN_ACPI_HOTPLUG_MEMORY) += xen-acpi-memhotplug.o obj-$(CONFIG_XEN_ACPI_HOTPLUG_CPU) += xen-acpi-cpuhotplug.o obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o +obj-$(CONFIG_XEN_SCSI_BACKEND) += xen-scsiback.o xen-evtchn-y := evtchn.o xen-gntdev-y := gntdev.o xen-gntalloc-y := gntalloc.o diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c new file mode 100644 index 0000000..6eed255 --- /dev/null +++ b/drivers/xen/xen-scsiback.c @@ -0,0 +1,2655 @@ +/* + * Xen SCSI backend driver + * + * Copyright (c) 2008, FUJITSU Limited + * + * Based on the blkback driver code. + * Adaption to kernel taget core infrastructure taken from vhost/scsi.c + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation; or, when distributed + * separately from the Linux kernel or incorporated into other + * software packages, subject to the following license: + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this source file (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +/* + * Patched to support >2TB drives + allow tape & autoloader operations + * 2010, Samuel Kvasnica, IMS Nanofabrication AG + */ + +#include <stdarg.h> + +#include <linux/module.h> +#include <linux/utsname.h> +#include <linux/interrupt.h> +#include <linux/slab.h> +#include <linux/wait.h> +#include <linux/sched.h> +#include <linux/blkdev.h> +#include <linux/list.h> +#include <linux/gfp.h> +#include <linux/kthread.h> +#include <linux/delay.h> +#include <linux/vmalloc.h> +#include <linux/spinlock.h> +#include <linux/configfs.h> + +#include <generated/utsrelease.h> + +#include <scsi/scsi.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_dbg.h> +#include <scsi/scsi_eh.h> +#include <scsi/scsi_tcq.h> +#include <scsi/osd_sense.h> + +#include <target/target_core_base.h> +#include <target/target_core_fabric.h> +#include <target/target_core_configfs.h> +#include <target/target_core_fabric_configfs.h> + +#include <asm/hypervisor.h> + +#include <xen/xen.h> +#include <xen/balloon.h> +#include <xen/events.h> +#include <xen/xenbus.h> +#include <xen/grant_table.h> +#include <xen/page.h> + +#include <xen/interface/grant_table.h> +#include <xen/interface/io/vscsiif.h> + +#define DPRINTK(_f, _a...) \ + pr_debug("(file=%s, line=%d) " _f, __FILE__ , __LINE__ , ## _a) + +#define VSCSI_VERSION "v0.1" +#define VSCSI_NAMELEN 32 + +struct ids_tuple { + unsigned int hst; /* host */ + unsigned int chn; /* channel */ + unsigned int tgt; /* target */ + unsigned int lun; /* LUN */ +}; + +struct v2p_entry { + struct ids_tuple v; /* translate from */ + struct scsiback_tpg *tpg; /* translate to */ + unsigned int lun; + struct kref kref; + struct list_head l; +}; + +struct vscsibk_info { + struct xenbus_device *dev; + + domid_t domid; + unsigned int evtchn; + unsigned int irq; + + int feature; + + struct vscsiif_back_ring ring; + + spinlock_t ring_lock; + atomic_t nr_unreplied_reqs; + + spinlock_t v2p_lock; + struct list_head v2p_entry_lists; + + struct task_struct *kthread; + wait_queue_head_t waiting_to_free; + wait_queue_head_t wq; + wait_queue_head_t shutdown_wq; + unsigned int waiting_reqs; +}; + +/* theoretical maximum of grants for one request */ +#define VSCSI_MAX_GRANTS (SG_ALL + VSCSIIF_SG_TABLESIZE) + +/* + * VSCSI_GRANT_BATCH is the maximum number of grants to be processed in one + * call to map/unmap grants. Don't choose it too large, as there are arrays + * with VSCSI_GRANT_BATCH elements allocated on the stack. + */ +#define VSCSI_GRANT_BATCH 16 + +struct vscsibk_pend { + uint16_t rqid; + + uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE]; + uint8_t cmd_len; + + uint8_t sc_data_direction; + uint16_t n_sg; /* real length of SG list */ + uint16_t n_grants; /* SG pages and potentially SG list */ + uint32_t data_len; + uint32_t result; + + struct vscsibk_info *info; + struct v2p_entry *v2p; + struct scatterlist *sgl; + + uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE]; + + grant_handle_t grant_handles[VSCSI_MAX_GRANTS]; + struct page *pages[VSCSI_MAX_GRANTS]; + + struct se_cmd se_cmd; +}; + +struct scsiback_tmr { + atomic_t tmr_complete; + wait_queue_head_t tmr_wait; +}; + +struct vscsibk_emulate { + void (*pre_function)(struct vscsibk_pend *, void *); + void (*post_function)(struct vscsibk_pend *, void *); +}; + +struct scsiback_nexus { + /* Pointer to TCM session for I_T Nexus */ + struct se_session *tvn_se_sess; +}; + +struct scsiback_nacl { + /* Binary World Wide unique Port Name for pvscsi Initiator port */ + u64 iport_wwpn; + /* ASCII formatted WWPN for Sas Initiator port */ + char iport_name[VSCSI_NAMELEN]; + /* Returned by scsiback_make_nodeacl() */ + struct se_node_acl se_node_acl; +}; + +struct scsiback_tport { + /* SCSI protocol the tport is providing */ + u8 tport_proto_id; + /* Binary World Wide unique Port Name for pvscsi Target port */ + u64 tport_wwpn; + /* ASCII formatted WWPN for pvscsi Target port */ + char tport_name[VSCSI_NAMELEN]; + /* Returned by scsiback_make_tport() */ + struct se_wwn tport_wwn; +}; + +struct scsiback_tpg { + /* scsiback port target portal group tag for TCM */ + u16 tport_tpgt; + /* track number of TPG Port/Lun Links wrt explicit I_T Nexus shutdown */ + int tv_tpg_port_count; + /* xen-pvscsi references to tpg_nexus, protected by tv_tpg_mutex */ + int tv_tpg_fe_count; + /* list for scsiback_list */ + struct list_head tv_tpg_list; + /* Used to protect access for tpg_nexus */ + struct mutex tv_tpg_mutex; + /* Pointer to the TCM pvscsi I_T Nexus for this TPG endpoint */ + struct scsiback_nexus *tpg_nexus; + /* Pointer back to scsiback_tport */ + struct scsiback_tport *tport; + /* Returned by scsiback_make_tpg() */ + struct se_portal_group se_tpg; + /* alias used in xenstore */ + char param_alias[VSCSI_NAMELEN]; + /* list of info structures related to this target portal group */ + struct list_head info_list; +}; + +#define scsiback_get(_b) (atomic_inc(&(_b)->nr_unreplied_reqs)) +#define scsiback_put(_b) \ + do { \ + if (atomic_dec_and_test(&(_b)->nr_unreplied_reqs)) \ + wake_up(&(_b)->waiting_to_free);\ + } while (0) + +#define VSCSI_TIMEOUT (900*HZ) + +#define VSCSI_TYPE_HOST 1 + +/* Number of SCSI op_code */ +#define VSCSI_MAX_SCSI_OP_CODE 256 + +#define SCSIBACK_INVALID_HANDLE (~0) + +static bool log_print_stat; +module_param(log_print_stat, bool, 0644); + +static int scsiback_max_buffer_pages = 1024; +module_param_named(max_buffer_pages, scsiback_max_buffer_pages, int, 0644); +MODULE_PARM_DESC(max_buffer_pages, +"Maximum number of free pages to keep in backend buffer"); + +static struct kmem_cache *scsiback_cachep; +static DEFINE_SPINLOCK(free_pages_lock); +static int free_pages_num; +static LIST_HEAD(scsiback_free_pages); + +/* Global spinlock to protect scsiback TPG list */ +static DEFINE_MUTEX(scsiback_mutex); +static LIST_HEAD(scsiback_list); + +/* Local pointer to allocated TCM configfs fabric module */ +static struct target_fabric_configfs *scsiback_fabric_configfs; + +static void put_free_pages(struct page **page, int num) +{ + unsigned long flags; + int i = free_pages_num + num, n = num; + + if (num == 0) + return; + if (i > scsiback_max_buffer_pages) { + n = min(num, i - scsiback_max_buffer_pages); + free_xenballooned_pages(n, page + num - n); + n = num - n; + } + spin_lock_irqsave(&free_pages_lock, flags); + for (i = 0; i < n; i++) + list_add(&page[i]->lru, &scsiback_free_pages); + free_pages_num += n; + spin_unlock_irqrestore(&free_pages_lock, flags); +} + +static int get_free_page(struct page **page) +{ + unsigned long flags; + + spin_lock_irqsave(&free_pages_lock, flags); + if (list_empty(&scsiback_free_pages)) { + spin_unlock_irqrestore(&free_pages_lock, flags); + return alloc_xenballooned_pages(1, page, false); + } + page[0] = list_first_entry(&scsiback_free_pages, struct page, lru); + list_del(&page[0]->lru); + free_pages_num--; + spin_unlock_irqrestore(&free_pages_lock, flags); + return 0; +} + +static unsigned long vaddr_page(struct page *page) +{ + unsigned long pfn = page_to_pfn(page); + + return (unsigned long)pfn_to_kaddr(pfn); +} + +static unsigned long vaddr(struct vscsibk_pend *req, int seg) +{ + return vaddr_page(req->pages[seg]); +} + +static void scsiback_resp_err(struct vscsibk_pend *pending_req, uint16_t asc) +{ + pending_req->sense_buffer[0] = 0x70; /* fixed, current */ + pending_req->sense_buffer[2] = ILLEGAL_REQUEST; + pending_req->sense_buffer[7] = 0xa; /* implies 18 bytes sense */ + pending_req->sense_buffer[12] = asc >> 8; + pending_req->sense_buffer[13] = asc & 0xff; + pending_req->se_cmd.residual_count = 0; + pending_req->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; +} + +static void resp_not_supported_cmd(struct vscsibk_pend *pending_req, void *data) +{ + scsiback_resp_err(pending_req, scsi_invalid_command_operation_code); +} + +static int __copy_to_sg(struct scatterlist *sgl, unsigned int nr_sg, + void *buf, unsigned int buflen, unsigned int off) +{ + struct scatterlist *sg; + void *from = buf; + void *to; + unsigned int from_rest = buflen; + unsigned int off_rest = off; + unsigned int to_capa; + unsigned int copy_size = 0; + unsigned int i; + unsigned long pfn; + + for_each_sg(sgl, sg, nr_sg, i) { + if (sg_page(sg) == NULL) { + pr_warn("%s: inconsistent length field in scatterlist\n", + __func__); + return -ENOMEM; + } + + if (off_rest >= sg->length) { + off_rest -= sg->length; + continue; + } + + to_capa = sg->length - off_rest; + copy_size = min_t(unsigned int, to_capa, from_rest); + + pfn = page_to_pfn(sg_page(sg)); + to = pfn_to_kaddr(pfn) + (sg->offset) + off_rest; + memcpy(to, from, copy_size); + + off_rest = 0; + from_rest -= copy_size; + if (from_rest == 0) + return 0; + + from += copy_size; + } + + return -ENOMEM; +} + +static int __maybe_unused __copy_from_sg(struct scatterlist *sgl, + unsigned int nr_sg, void *buf, + unsigned int buflen) +{ + struct scatterlist *sg; + void *from; + void *to = buf; + unsigned int from_rest; + unsigned int to_capa = buflen; + unsigned int copy_size; + unsigned int i; + unsigned long pfn; + + for_each_sg(sgl, sg, nr_sg, i) { + if (sg_page(sg) == NULL) { + pr_warn("%s: inconsistent length field in scatterlist\n", + __func__); + return -ENOMEM; + } + + from_rest = sg->length; + if ((from_rest > 0) && (to_capa < from_rest)) { + pr_warn("%s: no space in destination buffer\n", + __func__); + return -ENOMEM; + } + copy_size = from_rest; + + pfn = page_to_pfn(sg_page(sg)); + from = pfn_to_kaddr(pfn) + (sg->offset); + memcpy(to, from, copy_size); + + to_capa -= copy_size; + to += copy_size; + } + + return 0; +} + +/* REPORT LUNS Define*/ +#define VSCSI_REPORT_LUNS_HEADER 8 + +/* quoted scsi_debug.c/resp_report_luns() */ +static void __report_luns(struct vscsibk_pend *pending_req, void *data) +{ + struct vscsibk_info *info = pending_req->info; + unsigned int nr_seg = pending_req->n_sg; + uint8_t *cmd = pending_req->cmnd; + unsigned char buff[VSCSI_REPORT_LUNS_HEADER]; + unsigned int req_bufflen; + unsigned int actual_len = VSCSI_REPORT_LUNS_HEADER; + uint8_t select_report = cmd[2]; + int i, lun_cnt = 0, lun; + struct v2p_entry *entry; + struct list_head *head = &(info->v2p_entry_lists); + unsigned long flags; + struct scsi_lun one_lun; + uint16_t v_chn, v_tgt; + + req_bufflen = be32_to_cpup((const __be32 *)(cmd + 6)); + if ((req_bufflen < 4) || (select_report != 0)) { + scsiback_resp_err(pending_req, scsi_invalid_field_in_cdb); + return; + } + + memset(buff, 0, VSCSI_REPORT_LUNS_HEADER); + memset(&one_lun, 0, sizeof(one_lun)); + + v_chn = pending_req->v2p->v.chn; + v_tgt = pending_req->v2p->v.tgt; + + spin_lock_irqsave(&info->v2p_lock, flags); + list_for_each_entry(entry, head, l) { + if ((entry->v.chn == v_chn) && (entry->v.tgt == v_tgt)) { + lun = entry->v.lun; + one_lun.scsi_lun[0] = (lun >> 8) & 0x3f; + one_lun.scsi_lun[1] = lun & 0xff; + lun_cnt++; + __copy_to_sg(pending_req->sgl, nr_seg, &one_lun, + sizeof(one_lun), actual_len); + actual_len += sizeof(one_lun); + } + } + + spin_unlock_irqrestore(&info->v2p_lock, flags); + + buff[2] = ((sizeof(struct scsi_lun) * lun_cnt) >> 8) & 0xff; + buff[3] = (sizeof(struct scsi_lun) * lun_cnt) & 0xff; + + __copy_to_sg(pending_req->sgl, nr_seg, buff, + VSCSI_REPORT_LUNS_HEADER, 0); + + req_bufflen = 0; + for (i = 0; i < nr_seg; i++) + req_bufflen += pending_req->sgl[i].length; + + memset(pending_req->sense_buffer, 0, VSCSIIF_SENSE_BUFFERSIZE); + pending_req->result = 0x00; + pending_req->se_cmd.residual_count = req_bufflen - + min(req_bufflen, actual_len); +} + +/* + Emulation routines for each SCSI op_code. +*/ +static struct vscsibk_emulate emul[VSCSI_MAX_SCSI_OP_CODE] = { + /* + Register appropriate functions below as you need. + (See scsi/scsi.h for definition of SCSI op_code.) + */ + [TEST_UNIT_READY] = { NULL, NULL }, /*0x00 sd,st */ + [REZERO_UNIT] = { NULL, NULL }, /*0x01 st */ + [0x02] = { resp_not_supported_cmd, NULL }, + [REQUEST_SENSE] = { NULL, NULL }, /*0x03*/ + [FORMAT_UNIT] = { NULL, NULL }, /*0x04*/ + [READ_BLOCK_LIMITS] = { NULL, NULL }, /*0x05 st */ + [0x06] = { resp_not_supported_cmd, NULL }, + [INITIALIZE_ELEMENT_STATUS] = { NULL, NULL }, /*0x07 ch */ + [READ_6] = { NULL, NULL }, /*0x08 sd,st */ + [0x09] = { resp_not_supported_cmd, NULL }, + [WRITE_6] = { NULL, NULL }, /*0x0a sd,st */ + [SEEK_6] = { NULL, NULL }, /*0x0b*/ + [0x0c ... 0x0e] = { resp_not_supported_cmd, NULL }, + [READ_REVERSE] = { resp_not_supported_cmd, NULL }, + [WRITE_FILEMARKS] = { NULL, NULL }, /*0x10 st */ + [SPACE] = { NULL, NULL }, /*0x11 st */ + [INQUIRY] = { NULL, NULL }, /*0x12*/ + [0x13] = { resp_not_supported_cmd, NULL }, + [RECOVER_BUFFERED_DATA] = { resp_not_supported_cmd, NULL }, + [MODE_SELECT] = { NULL, NULL }, /*0x15 st */ + [RESERVE] = { NULL, NULL }, /*0x16*/ + [RELEASE] = { NULL, NULL }, /*0x17*/ + [COPY] = { resp_not_supported_cmd, NULL }, + [ERASE] = { NULL, NULL }, /*0x19 st */ + [MODE_SENSE] = { NULL, NULL }, /*0x1a st */ + [START_STOP] = { NULL, NULL }, /*0x1b sd,st */ + [RECEIVE_DIAGNOSTIC] = { NULL, NULL }, /*0x1c*/ + [SEND_DIAGNOSTIC] = { NULL, NULL }, /*0x1d*/ + [ALLOW_MEDIUM_REMOVAL] = { NULL, NULL }, /*0x1e*/ + [0x1f ... 0x22] = { resp_not_supported_cmd, NULL }, + [READ_FORMAT_CAPACITIES] = { resp_not_supported_cmd, NULL }, + [SET_WINDOW] = { resp_not_supported_cmd, NULL }, + [READ_CAPACITY] = { NULL, NULL }, /*0x25 sd */ + [0x26 ... 0x27] = { resp_not_supported_cmd, NULL }, + [READ_10] = { NULL, NULL }, /*0x28 sd */ + [0x29] = { resp_not_supported_cmd, NULL }, + [WRITE_10] = { NULL, NULL }, /*0x2a sd */ + [SEEK_10] = { NULL, NULL }, /*0x2b st */ + /*[POSITION_TO_ELEMENT] = { NULL, NULL }, 0x2b ch */ + [0x2c ... 0x2d] = { resp_not_supported_cmd, NULL }, + [WRITE_VERIFY] = { resp_not_supported_cmd, NULL }, + [VERIFY] = { resp_not_supported_cmd, NULL }, + [SEARCH_HIGH] = { resp_not_supported_cmd, NULL }, + [SEARCH_EQUAL] = { resp_not_supported_cmd, NULL }, + [SEARCH_LOW] = { resp_not_supported_cmd, NULL }, + [SET_LIMITS] = { NULL, NULL }, /*0x33*/ + [PRE_FETCH] = { NULL, NULL }, /*0x34 st */ + /*[READ_POSITION] = { NULL, NULL }, 0x34 st */ + [SYNCHRONIZE_CACHE] = { NULL, NULL }, /*0x35 sd */ + [LOCK_UNLOCK_CACHE] = { NULL, NULL }, /*0x36*/ + [READ_DEFECT_DATA] = { NULL, NULL }, /*0x37*/ + [MEDIUM_SCAN] = { NULL, NULL }, /*0x38*/ + [COMPARE] = { resp_not_supported_cmd, NULL }, + [COPY_VERIFY] = { resp_not_supported_cmd, NULL }, + [WRITE_BUFFER] = { NULL, NULL }, /*0x3b*/ + [READ_BUFFER] = { NULL, NULL }, /*0x3c osst */ + [UPDATE_BLOCK] = { resp_not_supported_cmd, NULL }, + [READ_LONG] = { resp_not_supported_cmd, NULL }, + [WRITE_LONG] = { resp_not_supported_cmd, NULL }, + [CHANGE_DEFINITION] = { resp_not_supported_cmd, NULL }, + [WRITE_SAME] = { resp_not_supported_cmd, NULL }, + [UNMAP] = { resp_not_supported_cmd, NULL }, + [READ_TOC] = { NULL, NULL }, /*0x43 sr */ + [READ_HEADER] = { resp_not_supported_cmd, NULL }, + [0x45 ... 0x49] = { resp_not_supported_cmd, NULL }, + [GET_EVENT_STATUS_NOTIFICATION] = { resp_not_supported_cmd, NULL }, + [0x4b] = { resp_not_supported_cmd, NULL }, + [LOG_SELECT] = { NULL, NULL }, /*0x4c*/ + [LOG_SENSE] = { NULL, NULL }, /*0x4d st */ + [0x4e ... 0x52] = { resp_not_supported_cmd, NULL }, + [XDWRITEREAD_10] = { resp_not_supported_cmd, NULL }, + [0x54] = { resp_not_supported_cmd, NULL }, + [MODE_SELECT_10] = { resp_not_supported_cmd, NULL }, + [RESERVE_10] = { resp_not_supported_cmd, NULL }, + [RELEASE_10] = { resp_not_supported_cmd, NULL }, + [0x58 ... 0x59] = { resp_not_supported_cmd, NULL }, + [MODE_SENSE_10] = { NULL, NULL }, /*0x5a scsi_lib */ + [0x5b ... 0x5d] = { resp_not_supported_cmd, NULL }, + [PERSISTENT_RESERVE_IN] = { resp_not_supported_cmd, NULL }, + [PERSISTENT_RESERVE_OUT] = { resp_not_supported_cmd, NULL }, + [0x60 ... 0x7e] = { resp_not_supported_cmd, NULL }, + [VARIABLE_LENGTH_CMD] = { resp_not_supported_cmd, NULL }, + [0x80 ... 0x82] = { resp_not_supported_cmd, NULL }, + [EXTENDED_COPY] = { resp_not_supported_cmd, NULL }, + [RECEIVE_COPY_RESULTS] = { resp_not_supported_cmd, NULL }, + [0x85] = { resp_not_supported_cmd, NULL }, + [ACCESS_CONTROL_IN] = { resp_not_supported_cmd, NULL }, + [ACCESS_CONTROL_OUT] = { resp_not_supported_cmd, NULL }, + [READ_16] = { NULL, NULL }, /*0x88 sd >2TB */ + [COMPARE_AND_WRITE] = { resp_not_supported_cmd, NULL }, + [WRITE_16] = { NULL, NULL }, /*0x8a sd >2TB */ + [0x8b] = { resp_not_supported_cmd, NULL }, + [READ_ATTRIBUTE] = { resp_not_supported_cmd, NULL }, + [WRITE_ATTRIBUTE] = { resp_not_supported_cmd, NULL }, + [0x8e] = { resp_not_supported_cmd, NULL }, + [VERIFY_16] = { NULL, NULL }, /*0x8f*/ + [0x90] = { resp_not_supported_cmd, NULL }, + [SYNCHRONIZE_CACHE_16] = { resp_not_supported_cmd, NULL }, + [0x92] = { resp_not_supported_cmd, NULL }, + [WRITE_SAME_16] = { resp_not_supported_cmd, NULL }, + [0x94 ... 0x9d] = { resp_not_supported_cmd, NULL }, + [SERVICE_ACTION_IN] = { NULL, NULL }, /*0x9e sd >2TB */ + [0x9f] = { resp_not_supported_cmd, NULL }, + [REPORT_LUNS] = { __report_luns, NULL }, /* emulate */ + [0xa1] = { resp_not_supported_cmd, NULL }, + [SECURITY_PROTOCOL_IN] = { resp_not_supported_cmd, NULL }, + [MAINTENANCE_IN] = { NULL, NULL }, /*0xa3 IFT alua */ + [MAINTENANCE_OUT] = { NULL, NULL }, /*0xa4 IFT alua */ + [MOVE_MEDIUM] = { NULL, NULL }, /*0xa5 ch */ + [EXCHANGE_MEDIUM] = { NULL, NULL }, /*0xa6 ch */ + [0xa7] = { resp_not_supported_cmd, NULL }, + [READ_12] = { resp_not_supported_cmd, NULL }, + [0xa9] = { resp_not_supported_cmd, NULL }, + [WRITE_12] = { resp_not_supported_cmd, NULL }, + [READ_MEDIA_SERIAL_NUMBER] = { resp_not_supported_cmd, NULL }, + [0xac, 0xad] = { resp_not_supported_cmd, NULL }, + [WRITE_VERIFY_12] = { resp_not_supported_cmd, NULL }, + [VERIFY_12] = { resp_not_supported_cmd, NULL }, + [SEARCH_HIGH_12] = { resp_not_supported_cmd, NULL }, + [SEARCH_EQUAL_12] = { resp_not_supported_cmd, NULL }, + [SEARCH_LOW_12] = { resp_not_supported_cmd, NULL }, + [0xb3 ... 0xb4] = { resp_not_supported_cmd, NULL }, + [SECURITY_PROTOCOL_OUT] = { resp_not_supported_cmd, NULL }, + [SEND_VOLUME_TAG] = { NULL, NULL }, /*0xb6 ch */ + [0xb7] = { resp_not_supported_cmd, NULL }, + [READ_ELEMENT_STATUS] = { NULL, NULL }, /*0xb8 ch */ + [0xb9 ... 0xe9] = { resp_not_supported_cmd, NULL }, + [WRITE_LONG_2] = { resp_not_supported_cmd, NULL }, + [0xeb ... 0xff] = { resp_not_supported_cmd, NULL }, + +/* st: QFA_REQUEST_BLOCK, QFA_SEEK_BLOCK might be needed ? */ + +}; + +static void scsiback_rsp_emulation(struct vscsibk_pend *pending_req) +{ + uint8_t op_code = pending_req->cmnd[0]; + + if (emul[op_code].post_function != NULL) + emul[op_code].post_function(pending_req, NULL); +} + +static void scsiback_print_status(char *sense_buffer, int errors, + struct vscsibk_pend *pending_req) +{ + struct scsiback_tpg *tpg = pending_req->v2p->tpg; + + pr_err("xen-pvscsi[%s:%d] cmnd[0]=%02x -> st=%02x msg=%02x host=%02x drv=%02x\n", + tpg->tport->tport_name, pending_req->v2p->lun, + pending_req->cmnd[0], status_byte(errors), msg_byte(errors), + host_byte(errors), driver_byte(errors)); + + if (CHECK_CONDITION & status_byte(errors)) + __scsi_print_sense("xen-pvscsi", sense_buffer, + SCSI_SENSE_BUFFERSIZE); +} + +static void scsiback_fast_flush_area(struct vscsibk_pend *req) +{ + struct gnttab_unmap_grant_ref unmap[VSCSI_GRANT_BATCH]; + struct page *pages[VSCSI_GRANT_BATCH]; + unsigned int i, invcount = 0; + grant_handle_t handle; + int err; + + kfree(req->sgl); + req->sgl = NULL; + req->n_sg = 0; + + if (!req->n_grants) + return; + + for (i = 0; i < req->n_grants; i++) { + handle = req->grant_handles[i]; + if (handle == SCSIBACK_INVALID_HANDLE) + continue; + gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i), + GNTMAP_host_map, handle); + req->grant_handles[i] = SCSIBACK_INVALID_HANDLE; + pages[invcount] = req->pages[i]; + put_page(pages[invcount]); + invcount++; + if (invcount < VSCSI_GRANT_BATCH) + continue; + err = gnttab_unmap_refs(unmap, NULL, pages, invcount); + BUG_ON(err); + invcount = 0; + } + + if (invcount) { + err = gnttab_unmap_refs(unmap, NULL, pages, invcount); + BUG_ON(err); + } + + put_free_pages(req->pages, req->n_grants); + req->n_grants = 0; +} + +static void scsiback_free_translation_entry(struct kref *kref) +{ + struct v2p_entry *entry = container_of(kref, struct v2p_entry, kref); + struct scsiback_tpg *tpg = entry->tpg; + + mutex_lock(&tpg->tv_tpg_mutex); + tpg->tv_tpg_fe_count--; + mutex_unlock(&tpg->tv_tpg_mutex); + + kfree(entry); +} + +static void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result, + uint32_t resid, struct vscsibk_pend *pending_req) +{ + struct vscsiif_response *ring_res; + struct vscsibk_info *info = pending_req->info; + int notify; + struct scsi_sense_hdr sshdr; + unsigned long flags; + unsigned len; + + DPRINTK("%s\n", __func__); + + spin_lock_irqsave(&info->ring_lock, flags); + + ring_res = RING_GET_RESPONSE(&info->ring, info->ring.rsp_prod_pvt); + info->ring.rsp_prod_pvt++; + + ring_res->rslt = result; + ring_res->rqid = pending_req->rqid; + + if (sense_buffer != NULL && + scsi_normalize_sense(sense_buffer, VSCSIIF_SENSE_BUFFERSIZE, + &sshdr)) { + len = min_t(unsigned, 8 + sense_buffer[7], + VSCSIIF_SENSE_BUFFERSIZE); + memcpy(ring_res->sense_buffer, sense_buffer, len); + ring_res->sense_len = len; + } else { + ring_res->sense_len = 0; + } + + ring_res->residual_len = resid; + + RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&info->ring, notify); + spin_unlock_irqrestore(&info->ring_lock, flags); + + if (notify) + notify_remote_via_irq(info->irq); + + if (pending_req->v2p) + kref_put(&pending_req->v2p->kref, + scsiback_free_translation_entry); + + kmem_cache_free(scsiback_cachep, pending_req); +} + +static void scsiback_cmd_done(struct vscsibk_pend *pending_req) +{ + struct vscsibk_info *info = pending_req->info; + unsigned char *sense_buffer; + unsigned int resid; + int errors; + + sense_buffer = pending_req->sense_buffer; + resid = pending_req->se_cmd.residual_count; + errors = pending_req->result; + + if (errors && log_print_stat) + scsiback_print_status(sense_buffer, errors, pending_req); + + /* The Host mode is through as for Emulation. */ + if (info->feature != VSCSI_TYPE_HOST) + scsiback_rsp_emulation(pending_req); + + scsiback_fast_flush_area(pending_req); + scsiback_do_resp_with_sense(sense_buffer, errors, resid, pending_req); + scsiback_put(info); + + transport_generic_free_cmd(&pending_req->se_cmd, 0); +} + +static void scsiback_cmd_exec(struct vscsibk_pend *pending_req) +{ + struct se_cmd *se_cmd = &pending_req->se_cmd; + struct se_session *sess = pending_req->v2p->tpg->tpg_nexus->tvn_se_sess; + int rc; + + DPRINTK("%s\n", __func__); + + memset(pending_req->sense_buffer, 0, VSCSIIF_SENSE_BUFFERSIZE); + + memset(se_cmd, 0, sizeof(*se_cmd)); + se_cmd->prot_pto = true; + + scsiback_get(pending_req->info); + rc = target_submit_cmd_map_sgls(se_cmd, sess, pending_req->cmnd, + pending_req->sense_buffer, pending_req->v2p->lun, + pending_req->data_len, 0, + pending_req->sc_data_direction, 0, + pending_req->sgl, pending_req->n_sg, + NULL, 0, NULL, 0); + if (rc < 0) { + transport_send_check_condition_and_sense(se_cmd, + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); + transport_generic_free_cmd(se_cmd, 0); + } +} + +static int scsiback_gnttab_data_map_batch(struct gnttab_map_grant_ref *map, + struct page **pg, grant_handle_t *grant, int cnt) +{ + int err, i; + + if (!cnt) + return 0; + + err = gnttab_map_refs(map, NULL, pg, cnt); + BUG_ON(err); + for (i = 0; i < cnt; i++) { + if (unlikely(map[i].status != GNTST_okay)) { + pr_err("xen-pvscsi: invalid buffer -- could not remap it\n"); + map[i].handle = SCSIBACK_INVALID_HANDLE; + err = -ENOMEM; + } else { + get_page(pg[i]); + } + grant[i] = map[i].handle; + } + return err; +} + +static int scsiback_gnttab_data_map_list(struct vscsibk_pend *pending_req, + struct scsiif_request_segment *seg, struct page **pg, + grant_handle_t *grant, int cnt, u32 flags) +{ + int mapcount = 0, i, err = 0; + struct gnttab_map_grant_ref map[VSCSI_GRANT_BATCH]; + struct vscsibk_info *info = pending_req->info; + + for (i = 0; i < cnt; i++) { + if (get_free_page(pg + mapcount)) { + put_free_pages(pg, mapcount); + pr_err("xen-pvscsi: no grant page\n"); + return -ENOMEM; + } + gnttab_set_map_op(&map[mapcount], vaddr_page(pg[mapcount]), + flags, seg[i].gref, info->domid); + mapcount++; + if (mapcount < VSCSI_GRANT_BATCH) + continue; + err = scsiback_gnttab_data_map_batch(map, pg, grant, mapcount); + pg += mapcount; + grant += mapcount; + pending_req->n_grants += mapcount; + if (err) + return err; + mapcount = 0; + } + err = scsiback_gnttab_data_map_batch(map, pg, grant, mapcount); + pending_req->n_grants += mapcount; + return err; +} + +static int scsiback_gnttab_data_map(struct vscsiif_request *ring_req, + struct vscsibk_pend *pending_req) +{ + u32 flags; + int i, err, n_segs, i_seg = 0; + struct page **pg; + struct scsiif_request_segment *seg; + unsigned long end_seg = 0; + unsigned int nr_segments = (unsigned int)ring_req->nr_segments; + unsigned int nr_sgl = 0; + struct scatterlist *sg; + grant_handle_t *grant; + + pending_req->n_sg = 0; + pending_req->n_grants = 0; + pending_req->data_len = 0; + + nr_segments &= ~VSCSIIF_SG_GRANT; + if (!nr_segments) + return 0; + + if (nr_segments > VSCSIIF_SG_TABLESIZE) { + DPRINTK("xen-pvscsi: invalid parameter nr_seg = %d\n", + ring_req->nr_segments); + return -EINVAL; + } + + if (ring_req->nr_segments & VSCSIIF_SG_GRANT) { + err = scsiback_gnttab_data_map_list(pending_req, ring_req->seg, + pending_req->pages, pending_req->grant_handles, + nr_segments, GNTMAP_host_map | GNTMAP_readonly); + if (err) + return err; + nr_sgl = nr_segments; + nr_segments = 0; + for (i = 0; i < nr_sgl; i++) { + n_segs = ring_req->seg[i].length / + sizeof(struct scsiif_request_segment); + if ((unsigned)ring_req->seg[i].offset + + (unsigned)ring_req->seg[i].length > PAGE_SIZE || + n_segs * sizeof(struct scsiif_request_segment) != + ring_req->seg[i].length) + return -EINVAL; + nr_segments += n_segs; + } + if (nr_segments > SG_ALL) { + DPRINTK("xen-pvscsi: invalid nr_seg = %d\n", + nr_segments); + return -EINVAL; + } + } + + /* free of (sgl) in fast_flush_area()*/ + pending_req->sgl = kmalloc_array(nr_segments, + sizeof(struct scatterlist), GFP_KERNEL); + if (!pending_req->sgl) { + pr_err("xen-pvscsi: %s: kmalloc() error\n", __func__); + return -ENOMEM; + } + + sg_init_table(pending_req->sgl, nr_segments); + pending_req->n_sg = nr_segments; + + flags = GNTMAP_host_map; + if (pending_req->sc_data_direction == DMA_TO_DEVICE) + flags |= GNTMAP_readonly; + + pg = pending_req->pages + nr_sgl; + grant = pending_req->grant_handles + nr_sgl; + if (!nr_sgl) { + seg = ring_req->seg; + err = scsiback_gnttab_data_map_list(pending_req, seg, + pg, grant, nr_segments, flags); + if (err) + return err; + } else { + for (i = 0; i < nr_sgl; i++) { + seg = (struct scsiif_request_segment *)( + vaddr(pending_req, i) + ring_req->seg[i].offset); + n_segs = ring_req->seg[i].length / + sizeof(struct scsiif_request_segment); + err = scsiback_gnttab_data_map_list(pending_req, seg, + pg, grant, n_segs, flags); + if (err) + return err; + pg += n_segs; + grant += n_segs; + } + end_seg = vaddr(pending_req, 0) + ring_req->seg[0].offset; + seg = (struct scsiif_request_segment *)end_seg; + end_seg += ring_req->seg[0].length; + pg = pending_req->pages + nr_sgl; + } + + for_each_sg(pending_req->sgl, sg, nr_segments, i) { + sg_set_page(sg, pg[i], seg->length, seg->offset); + pending_req->data_len += seg->length; + seg++; + if (nr_sgl && (unsigned long)seg >= end_seg) { + i_seg++; + end_seg = vaddr(pending_req, i_seg) + + ring_req->seg[i_seg].offset; + seg = (struct scsiif_request_segment *)end_seg; + end_seg += ring_req->seg[i_seg].length; + } + barrier(); + if (sg->offset >= PAGE_SIZE || + sg->length > PAGE_SIZE || + sg->offset + sg->length > PAGE_SIZE) + return -EINVAL; + } + + return 0; +} + +static void scsiback_req_emulation_or_cmdexec(struct vscsiif_request *ring_req, + struct vscsibk_pend *pending_req) +{ + struct vscsibk_info *info = pending_req->info; + uint8_t op_code = pending_req->cmnd[0]; + + if (scsiback_gnttab_data_map(ring_req, pending_req)) + goto out; + + if (info->feature != VSCSI_TYPE_HOST && emul[op_code].pre_function) { + emul[op_code].pre_function(pending_req, NULL); + scsiback_fast_flush_area(pending_req); + scsiback_do_resp_with_sense(pending_req->sense_buffer, + pending_req->result, + pending_req->se_cmd.residual_count, + pending_req); + } else { + scsiback_cmd_exec(pending_req); + } + return; + +out: + scsiback_fast_flush_area(pending_req); + scsiback_do_resp_with_sense(NULL, DRIVER_ERROR << 24, 0, pending_req); +} + +static void scsiback_notify_work(struct vscsibk_info *info) +{ + info->waiting_reqs = 1; + wake_up(&info->wq); +} + +static irqreturn_t scsiback_intr(int irq, void *dev_id) +{ + scsiback_notify_work((struct vscsibk_info *)dev_id); + return IRQ_HANDLED; +} + +static int scsiback_init_sring(struct vscsibk_info *info, grant_ref_t ring_ref, + evtchn_port_t evtchn) +{ + void *area; + struct vscsiif_sring *sring; + int err; + + if (info->irq) { + pr_err("xen-pvscsi: Already connected through?\n"); + return -1; + } + + err = xenbus_map_ring_valloc(info->dev, ring_ref, &area); + if (err) + return err; + + sring = (struct vscsiif_sring *)area; + BACK_RING_INIT(&info->ring, sring, PAGE_SIZE); + + err = bind_interdomain_evtchn_to_irqhandler( + info->domid, evtchn, + scsiback_intr, 0, "vscsiif-backend", info); + + if (err < 0) + goto unmap_page; + + info->irq = err; + + return 0; + +unmap_page: + xenbus_unmap_ring_vfree(info->dev, area); + + return err; +} + +static void scsiback_disconnect(struct vscsibk_info *info) +{ + if (info->kthread) { + kthread_stop(info->kthread); + info->kthread = NULL; + wake_up(&info->shutdown_wq); + } + + wait_event(info->waiting_to_free, + atomic_read(&info->nr_unreplied_reqs) == 0); + + if (info->irq) { + unbind_from_irqhandler(info->irq, info); + info->irq = 0; + } + + if (info->ring.sring) { + xenbus_unmap_ring_vfree(info->dev, info->ring.sring); + info->ring.sring = NULL; + } +} + +static void scsiback_device_action(struct vscsibk_pend *pending_req, + enum tcm_tmreq_table act) +{ + int rc, err = FAILED; + struct scsiback_tpg *tpg = pending_req->v2p->tpg; + struct se_cmd *se_cmd = &pending_req->se_cmd; + struct scsiback_tmr *tmr; + + tmr = kzalloc(sizeof(struct scsiback_tmr), GFP_KERNEL); + if (!tmr) { + pr_err("xen-pvscsi: %s: kmalloc() error\n", __func__); + goto out; + } + init_waitqueue_head(&tmr->tmr_wait); + + transport_init_se_cmd(se_cmd, tpg->se_tpg.se_tpg_tfo, + tpg->tpg_nexus->tvn_se_sess, 0, DMA_NONE, MSG_SIMPLE_TAG, + &pending_req->sense_buffer[0]); + + rc = core_tmr_alloc_req(se_cmd, tmr, act, GFP_KERNEL); + if (rc < 0) + goto out; + + se_cmd->se_tmr_req->ref_task_tag = 0; + + if (transport_lookup_tmr_lun(se_cmd, pending_req->v2p->lun) < 0) + goto out; + + transport_generic_handle_tmr(se_cmd); + wait_event(tmr->tmr_wait, atomic_read(&tmr->tmr_complete)); + + err = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ? + SUCCESS : FAILED; + +out: + if (tmr) { + transport_generic_free_cmd(&pending_req->se_cmd, 1); + kfree(tmr); + } + + scsiback_do_resp_with_sense(NULL, err, 0, pending_req); +} + +/* + Perform virtual to physical translation +*/ +static struct v2p_entry *scsiback_do_translation(struct vscsibk_info *info, + struct ids_tuple *v) +{ + struct v2p_entry *entry; + struct list_head *head = &(info->v2p_entry_lists); + unsigned long flags; + + spin_lock_irqsave(&info->v2p_lock, flags); + list_for_each_entry(entry, head, l) { + if ((entry->v.chn == v->chn) && + (entry->v.tgt == v->tgt) && + (entry->v.lun == v->lun)) { + kref_get(&entry->kref); + goto out; + } + } + entry = NULL; + +out: + spin_unlock_irqrestore(&info->v2p_lock, flags); + return entry; +} + +static int prepare_pending_reqs(struct vscsibk_info *info, + struct vscsiif_request *ring_req, + struct vscsibk_pend *pending_req) +{ + struct v2p_entry *v2p; + struct ids_tuple vir; + + DPRINTK("%s\n", __func__); + + pending_req->rqid = ring_req->rqid; + pending_req->info = info; + + vir.chn = ring_req->channel; + vir.tgt = ring_req->id; + vir.lun = ring_req->lun; + + v2p = scsiback_do_translation(info, &vir); + if (!v2p) { + pending_req->v2p = NULL; + DPRINTK("xen-pvscsi: doesn't exist.\n"); + return -ENODEV; + } + pending_req->v2p = v2p; + + /* request range check from frontend */ + pending_req->sc_data_direction = ring_req->sc_data_direction; + if ((pending_req->sc_data_direction != DMA_BIDIRECTIONAL) && + (pending_req->sc_data_direction != DMA_TO_DEVICE) && + (pending_req->sc_data_direction != DMA_FROM_DEVICE) && + (pending_req->sc_data_direction != DMA_NONE)) { + DPRINTK("xen-pvscsi: invalid parameter data_dir = %d\n", + pending_req->sc_data_direction); + return -EINVAL; + } + + pending_req->cmd_len = ring_req->cmd_len; + if (pending_req->cmd_len > VSCSIIF_MAX_COMMAND_SIZE) { + DPRINTK("xen-pvscsi: invalid parameter cmd_len = %d\n", + pending_req->cmd_len); + return -EINVAL; + } + memcpy(pending_req->cmnd, ring_req->cmnd, pending_req->cmd_len); + + return 0; +} + +static int _scsiback_do_cmd_fn(struct vscsibk_info *info) +{ + struct vscsiif_back_ring *ring = &info->ring; + struct vscsiif_request *ring_req; + struct vscsibk_pend *pending_req; + RING_IDX rc, rp; + int err, more_to_do = 0; + uint32_t result; + uint8_t act; + + DPRINTK("%s\n", __func__); + + rc = ring->req_cons; + rp = ring->sring->req_prod; + rmb(); /* guest system is accessing ring, too */ + + if (RING_REQUEST_PROD_OVERFLOW(ring, rp)) { + rc = ring->rsp_prod_pvt; + pr_warn("xen-pvscsi: Dom%d provided bogus ring requests (%#x - %#x = %u). Halting ring processing\n", + info->domid, rp, rc, rp - rc); + return -EACCES; + } + + while ((rc != rp)) { + if (RING_REQUEST_CONS_OVERFLOW(ring, rc)) + break; + pending_req = kmem_cache_alloc(scsiback_cachep, GFP_KERNEL); + if (NULL == pending_req) { + more_to_do = 1; + break; + } + + ring_req = RING_GET_REQUEST(ring, rc); + ring->req_cons = ++rc; + + act = ring_req->act; + err = prepare_pending_reqs(info, ring_req, pending_req); + if (err) { + switch (err) { + case -ENODEV: + result = DID_NO_CONNECT; + break; + default: + result = DRIVER_ERROR; + break; + } + scsiback_do_resp_with_sense(NULL, result << 24, 0, + pending_req); + more_to_do = 1; + break; + } + + switch (act) { + case VSCSIIF_ACT_SCSI_CDB: + scsiback_req_emulation_or_cmdexec(ring_req, + pending_req); + break; + case VSCSIIF_ACT_SCSI_ABORT: + scsiback_device_action(pending_req, TMR_ABORT_TASK); + break; + case VSCSIIF_ACT_SCSI_RESET: + scsiback_device_action(pending_req, TMR_LUN_RESET); + break; + default: + pr_err_ratelimited("xen-pvscsi: invalid request\n"); + scsiback_do_resp_with_sense(NULL, DRIVER_ERROR << 24, + 0, pending_req); + break; + } + + /* Yield point for this unbounded loop. */ + cond_resched(); + } + + if (RING_HAS_UNCONSUMED_REQUESTS(ring)) + more_to_do = 1; + + return more_to_do; +} + +static int scsiback_do_cmd_fn(struct vscsibk_info *info) +{ + int more_to_do; + + do { + more_to_do = _scsiback_do_cmd_fn(info); + if (more_to_do) + break; + + RING_FINAL_CHECK_FOR_REQUESTS(&info->ring, more_to_do); + } while (more_to_do); + + return more_to_do; +} + +static int scsiback_schedule(void *data) +{ + struct vscsibk_info *info = (struct vscsibk_info *)data; + + DPRINTK("%s\n", __func__); + + while (!kthread_should_stop()) { + wait_event_interruptible(info->wq, + info->waiting_reqs || kthread_should_stop()); + + info->waiting_reqs = 0; + smp_mb(); /* waiting_reqs used by other thread */ + + switch (scsiback_do_cmd_fn(info)) { + case 1: + info->waiting_reqs = 1; + case 0: + break; + case -EACCES: + wait_event_interruptible(info->shutdown_wq, + kthread_should_stop()); + break; + default: + BUG(); + } + } + + return 0; +} + +static void __vscsiif_name(struct vscsibk_info *info, char *buf) +{ + struct xenbus_device *dev = info->dev; + unsigned int domid, id; + + if (sscanf(dev->nodename, "backend/vscsi/%u/%u", &domid, &id) == 2) + snprintf(buf, TASK_COMM_LEN, "vscsi.%u.%u", info->domid, id); + else + snprintf(buf, TASK_COMM_LEN, "vscsi.%u..", info->domid); +} + +static int scsiback_map(struct vscsibk_info *info) +{ + struct xenbus_device *dev = info->dev; + unsigned int ring_ref, evtchn; + int err; + char name[TASK_COMM_LEN]; + + err = xenbus_gather(XBT_NIL, dev->otherend, + "ring-ref", "%u", &ring_ref, + "event-channel", "%u", &evtchn, NULL); + if (err) { + xenbus_dev_fatal(dev, err, "reading %s ring", dev->otherend); + return err; + } + + err = scsiback_init_sring(info, ring_ref, evtchn); + if (err) + return err; + + __vscsiif_name(info, name); + + info->kthread = kthread_run(scsiback_schedule, info, name); + if (IS_ERR(info->kthread)) { + err = PTR_ERR(info->kthread); + info->kthread = NULL; + xenbus_dev_error(info->dev, err, "start vscsiif"); + return err; + } + + return 0; +} + +/* + Add a new translation entry +*/ +static int scsiback_add_translation_entry(struct vscsibk_info *info, + char *phy, struct ids_tuple *v) +{ + int err = 0; + struct v2p_entry *entry; + struct v2p_entry *new; + struct list_head *head = &(info->v2p_entry_lists); + unsigned long flags; + char *lunp; + unsigned int lun; + struct scsiback_tpg *tpg_entry, *tpg = NULL; + char *error = "doesn't exist"; + + lunp = strrchr(phy, ':'); + if (!lunp) { + pr_err("xen-pvscsi: illegal format of physical device %s\n", + phy); + return -EINVAL; + } + *lunp = 0; + lunp++; + if (kstrtouint(lunp, 10, &lun) || lun >= TRANSPORT_MAX_LUNS_PER_TPG) { + pr_err("xen-pvscsi: lun number not valid: %s\n", lunp); + return -EINVAL; + } + + mutex_lock(&scsiback_mutex); + list_for_each_entry(tpg_entry, &scsiback_list, tv_tpg_list) { + if (!strcmp(phy, tpg_entry->tport->tport_name) || + !strcmp(phy, tpg_entry->param_alias)) { + spin_lock(&tpg_entry->se_tpg.tpg_lun_lock); + if (tpg_entry->se_tpg.tpg_lun_list[lun]->lun_status == + TRANSPORT_LUN_STATUS_ACTIVE) { + if (!tpg_entry->tpg_nexus) + error = "nexus undefined"; + else + tpg = tpg_entry; + } + spin_unlock(&tpg_entry->se_tpg.tpg_lun_lock); + break; + } + } + if (tpg) { + mutex_lock(&tpg->tv_tpg_mutex); + tpg->tv_tpg_fe_count++; + mutex_unlock(&tpg->tv_tpg_mutex); + } + mutex_unlock(&scsiback_mutex); + + if (!tpg) { + pr_err("xen-pvscsi: %s:%d %s\n", phy, lun, error); + return -ENODEV; + } + + new = kmalloc(sizeof(struct v2p_entry), GFP_KERNEL); + if (new == NULL) { + pr_err("xen-pvscsi: %s: kmalloc() error\n", __func__); + err = -ENOMEM; + goto out_free; + } + + spin_lock_irqsave(&info->v2p_lock, flags); + + /* Check double assignment to identical virtual ID */ + list_for_each_entry(entry, head, l) { + if ((entry->v.chn == v->chn) && + (entry->v.tgt == v->tgt) && + (entry->v.lun == v->lun)) { + pr_warn("xen-pvscsi: Virtual ID is already used. Assignment was not performed.\n"); + err = -EEXIST; + goto out; + } + + } + + /* Create a new translation entry and add to the list */ + kref_init(&new->kref); + new->v = *v; + new->tpg = tpg; + new->lun = lun; + list_add_tail(&new->l, head); + +out: + spin_unlock_irqrestore(&info->v2p_lock, flags); + +out_free: + mutex_lock(&tpg->tv_tpg_mutex); + tpg->tv_tpg_fe_count--; + mutex_unlock(&tpg->tv_tpg_mutex); + + if (err) + kfree(new); + + return err; +} + +static void __scsiback_del_translation_entry(struct v2p_entry *entry) +{ + list_del(&entry->l); + kref_put(&entry->kref, scsiback_free_translation_entry); + kfree(entry); +} + +/* + Delete the translation entry specfied +*/ +static int scsiback_del_translation_entry(struct vscsibk_info *info, + struct ids_tuple *v) +{ + struct v2p_entry *entry; + struct list_head *head = &(info->v2p_entry_lists); + unsigned long flags; + + spin_lock_irqsave(&info->v2p_lock, flags); + /* Find out the translation entry specified */ + list_for_each_entry(entry, head, l) { + if ((entry->v.chn == v->chn) && + (entry->v.tgt == v->tgt) && + (entry->v.lun == v->lun)) { + goto found; + } + } + + spin_unlock_irqrestore(&info->v2p_lock, flags); + return 1; + +found: + /* Delete the translation entry specfied */ + __scsiback_del_translation_entry(entry); + + spin_unlock_irqrestore(&info->v2p_lock, flags); + return 0; +} + +static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state, + char *phy, struct ids_tuple *vir) +{ + if (!scsiback_add_translation_entry(info, phy, vir)) { + if (xenbus_printf(XBT_NIL, info->dev->nodename, state, + "%d", XenbusStateInitialised)) { + pr_err("xen-pvscsi: xenbus_printf error %s\n", state); + scsiback_del_translation_entry(info, vir); + } + } else { + xenbus_printf(XBT_NIL, info->dev->nodename, state, + "%d", XenbusStateClosed); + } +} + +static void scsiback_do_del_lun(struct vscsibk_info *info, const char *state, + struct ids_tuple *vir) +{ + if (!scsiback_del_translation_entry(info, vir)) { + if (xenbus_printf(XBT_NIL, info->dev->nodename, state, + "%d", XenbusStateClosed)) + pr_err("xen-pvscsi: xenbus_printf error %s\n", state); + } +} + +#define VSCSIBACK_OP_ADD_OR_DEL_LUN 1 +#define VSCSIBACK_OP_UPDATEDEV_STATE 2 + +static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op, + char *ent) +{ + int err; + struct ids_tuple vir; + char *val; + int device_state; + char phy[VSCSI_NAMELEN]; + char str[64]; + char state[64]; + struct xenbus_device *dev = info->dev; + + /* read status */ + snprintf(state, sizeof(state), "vscsi-devs/%s/state", ent); + err = xenbus_scanf(XBT_NIL, dev->nodename, state, "%u", &device_state); + if (XENBUS_EXIST_ERR(err)) + return; + + /* physical SCSI device */ + snprintf(str, sizeof(str), "vscsi-devs/%s/p-dev", ent); + val = xenbus_read(XBT_NIL, dev->nodename, str, NULL); + if (IS_ERR(val)) { + xenbus_printf(XBT_NIL, dev->nodename, state, + "%d", XenbusStateClosed); + return; + } + strlcpy(phy, val, VSCSI_NAMELEN); + kfree(val); + + /* virtual SCSI device */ + snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", ent); + err = xenbus_scanf(XBT_NIL, dev->nodename, str, "%u:%u:%u:%u", + &vir.hst, &vir.chn, &vir.tgt, &vir.lun); + if (XENBUS_EXIST_ERR(err)) { + xenbus_printf(XBT_NIL, dev->nodename, state, + "%d", XenbusStateClosed); + return; + } + + switch (op) { + case VSCSIBACK_OP_ADD_OR_DEL_LUN: + if (device_state == XenbusStateInitialising) + scsiback_do_add_lun(info, state, phy, &vir); + if (device_state == XenbusStateClosing) + scsiback_do_del_lun(info, state, &vir); + break; + + case VSCSIBACK_OP_UPDATEDEV_STATE: + if (device_state == XenbusStateInitialised) { + /* modify vscsi-devs/dev-x/state */ + if (xenbus_printf(XBT_NIL, dev->nodename, state, + "%d", XenbusStateConnected)) { + pr_err("xen-pvscsi: xenbus_printf error %s\n", + str); + scsiback_del_translation_entry(info, &vir); + xenbus_printf(XBT_NIL, dev->nodename, state, + "%d", XenbusStateClosed); + } + } + break; + /*When it is necessary, processing is added here.*/ + default: + break; + } +} + +static void scsiback_do_lun_hotplug(struct vscsibk_info *info, int op) +{ + int i; + char **dir; + unsigned int ndir = 0; + + dir = xenbus_directory(XBT_NIL, info->dev->nodename, "vscsi-devs", + &ndir); + if (IS_ERR(dir)) + return; + + for (i = 0; i < ndir; i++) + scsiback_do_1lun_hotplug(info, op, dir[i]); + + kfree(dir); +} + +static void scsiback_frontend_changed(struct xenbus_device *dev, + enum xenbus_state frontend_state) +{ + struct vscsibk_info *info = dev_get_drvdata(&dev->dev); + int err; + + switch (frontend_state) { + case XenbusStateInitialising: + break; + case XenbusStateInitialised: + err = scsiback_map(info); + if (err) + break; + + scsiback_do_lun_hotplug(info, VSCSIBACK_OP_ADD_OR_DEL_LUN); + xenbus_switch_state(dev, XenbusStateConnected); + + break; + case XenbusStateConnected: + + scsiback_do_lun_hotplug(info, VSCSIBACK_OP_UPDATEDEV_STATE); + + if (dev->state == XenbusStateConnected) + break; + + xenbus_switch_state(dev, XenbusStateConnected); + + break; + + case XenbusStateClosing: + scsiback_disconnect(info); + xenbus_switch_state(dev, XenbusStateClosing); + break; + + case XenbusStateClosed: + xenbus_switch_state(dev, XenbusStateClosed); + if (xenbus_dev_is_online(dev)) + break; + /* fall through if not online */ + case XenbusStateUnknown: + device_unregister(&dev->dev); + break; + + case XenbusStateReconfiguring: + scsiback_do_lun_hotplug(info, VSCSIBACK_OP_ADD_OR_DEL_LUN); + + xenbus_switch_state(dev, XenbusStateReconfigured); + + break; + + default: + xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend", + frontend_state); + break; + } +} + +/* + Release the translation entry specfied +*/ +static void scsiback_release_translation_entry(struct vscsibk_info *info) +{ + struct v2p_entry *entry, *tmp; + struct list_head *head = &(info->v2p_entry_lists); + unsigned long flags; + + spin_lock_irqsave(&info->v2p_lock, flags); + + list_for_each_entry_safe(entry, tmp, head, l) + __scsiback_del_translation_entry(entry); + + spin_unlock_irqrestore(&info->v2p_lock, flags); +} + +static int scsiback_remove(struct xenbus_device *dev) +{ + struct vscsibk_info *info = dev_get_drvdata(&dev->dev); + + scsiback_disconnect(info); + scsiback_release_translation_entry(info); + + dev_set_drvdata(&dev->dev, NULL); + + return 0; +} + +static int scsiback_probe(struct xenbus_device *dev, + const struct xenbus_device_id *id) +{ + int err; + unsigned val = 0; + + struct vscsibk_info *info = kzalloc(sizeof(struct vscsibk_info), + GFP_KERNEL); + + DPRINTK("%p %d\n", dev, dev->otherend_id); + + if (!info) { + xenbus_dev_fatal(dev, -ENOMEM, "allocating backend structure"); + return -ENOMEM; + } + info->dev = dev; + dev_set_drvdata(&dev->dev, info); + + info->domid = dev->otherend_id; + spin_lock_init(&info->ring_lock); + atomic_set(&info->nr_unreplied_reqs, 0); + init_waitqueue_head(&info->wq); + init_waitqueue_head(&info->shutdown_wq); + init_waitqueue_head(&info->waiting_to_free); + info->dev = dev; + info->irq = 0; + info->feature = 0; /*default not HOSTMODE.*/ + INIT_LIST_HEAD(&info->v2p_entry_lists); + spin_lock_init(&info->v2p_lock); + + err = xenbus_scanf(XBT_NIL, dev->nodename, "feature-host", "%d", &val); + if (XENBUS_EXIST_ERR(err)) + val = 0; + + if (val) + info->feature = VSCSI_TYPE_HOST; + + err = xenbus_printf(XBT_NIL, dev->nodename, "feature-sg-grant", "%u", + SG_ALL); + if (err) + xenbus_dev_error(dev, err, "writing feature-sg-grant"); + + err = xenbus_switch_state(dev, XenbusStateInitWait); + if (err) + goto fail; + + return 0; + +fail: + pr_warn("xen-pvscsi: %s failed\n", __func__); + scsiback_remove(dev); + + return err; +} + +static char *scsiback_dump_proto_id(struct scsiback_tport *tport) +{ + switch (tport->tport_proto_id) { + case SCSI_PROTOCOL_SAS: + return "SAS"; + case SCSI_PROTOCOL_FCP: + return "FCP"; + case SCSI_PROTOCOL_ISCSI: + return "iSCSI"; + default: + break; + } + + return "Unknown"; +} + +static u8 scsiback_get_fabric_proto_ident(struct se_portal_group *se_tpg) +{ + struct scsiback_tpg *tpg = container_of(se_tpg, + struct scsiback_tpg, se_tpg); + struct scsiback_tport *tport = tpg->tport; + + switch (tport->tport_proto_id) { + case SCSI_PROTOCOL_SAS: + return sas_get_fabric_proto_ident(se_tpg); + case SCSI_PROTOCOL_FCP: + return fc_get_fabric_proto_ident(se_tpg); + case SCSI_PROTOCOL_ISCSI: + return iscsi_get_fabric_proto_ident(se_tpg); + default: + pr_err("Unknown tport_proto_id: 0x%02x, using SAS emulation\n", + tport->tport_proto_id); + break; + } + + return sas_get_fabric_proto_ident(se_tpg); +} + +static char *scsiback_get_fabric_wwn(struct se_portal_group *se_tpg) +{ + struct scsiback_tpg *tpg = container_of(se_tpg, + struct scsiback_tpg, se_tpg); + struct scsiback_tport *tport = tpg->tport; + + return &tport->tport_name[0]; +} + +static u16 scsiback_get_tag(struct se_portal_group *se_tpg) +{ + struct scsiback_tpg *tpg = container_of(se_tpg, + struct scsiback_tpg, se_tpg); + return tpg->tport_tpgt; +} + +static u32 scsiback_get_default_depth(struct se_portal_group *se_tpg) +{ + return 1; +} + +static u32 +scsiback_get_pr_transport_id(struct se_portal_group *se_tpg, + struct se_node_acl *se_nacl, + struct t10_pr_registration *pr_reg, + int *format_code, + unsigned char *buf) +{ + struct scsiback_tpg *tpg = container_of(se_tpg, + struct scsiback_tpg, se_tpg); + struct scsiback_tport *tport = tpg->tport; + + switch (tport->tport_proto_id) { + case SCSI_PROTOCOL_SAS: + return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg, + format_code, buf); + case SCSI_PROTOCOL_FCP: + return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg, + format_code, buf); + case SCSI_PROTOCOL_ISCSI: + return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg, + format_code, buf); + default: + pr_err("Unknown tport_proto_id: 0x%02x, using SAS emulation\n", + tport->tport_proto_id); + break; + } + + return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg, + format_code, buf); +} + +static u32 +scsiback_get_pr_transport_id_len(struct se_portal_group *se_tpg, + struct se_node_acl *se_nacl, + struct t10_pr_registration *pr_reg, + int *format_code) +{ + struct scsiback_tpg *tpg = container_of(se_tpg, + struct scsiback_tpg, se_tpg); + struct scsiback_tport *tport = tpg->tport; + + switch (tport->tport_proto_id) { + case SCSI_PROTOCOL_SAS: + return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, + format_code); + case SCSI_PROTOCOL_FCP: + return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, + format_code); + case SCSI_PROTOCOL_ISCSI: + return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, + format_code); + default: + pr_err("Unknown tport_proto_id: 0x%02x, using SAS emulation\n", + tport->tport_proto_id); + break; + } + + return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, + format_code); +} + +static char * +scsiback_parse_pr_out_transport_id(struct se_portal_group *se_tpg, + const char *buf, + u32 *out_tid_len, + char **port_nexus_ptr) +{ + struct scsiback_tpg *tpg = container_of(se_tpg, + struct scsiback_tpg, se_tpg); + struct scsiback_tport *tport = tpg->tport; + + switch (tport->tport_proto_id) { + case SCSI_PROTOCOL_SAS: + return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, + port_nexus_ptr); + case SCSI_PROTOCOL_FCP: + return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, + port_nexus_ptr); + case SCSI_PROTOCOL_ISCSI: + return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, + port_nexus_ptr); + default: + pr_err("Unknown tport_proto_id: 0x%02x, using SAS emulation\n", + tport->tport_proto_id); + break; + } + + return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, + port_nexus_ptr); +} + +static struct se_wwn * +scsiback_make_tport(struct target_fabric_configfs *tf, + struct config_group *group, + const char *name) +{ + struct scsiback_tport *tport; + char *ptr; + u64 wwpn = 0; + int off = 0; + + tport = kzalloc(sizeof(struct scsiback_tport), GFP_KERNEL); + if (!tport) { + pr_err("Unable to allocate struct scsiback_tport"); + return ERR_PTR(-ENOMEM); + } + tport->tport_wwpn = wwpn; + /* + * Determine the emulated Protocol Identifier and Target Port Name + * based on the incoming configfs directory name. + */ + ptr = strstr(name, "naa."); + if (ptr) { + tport->tport_proto_id = SCSI_PROTOCOL_SAS; + goto check_len; + } + ptr = strstr(name, "fc."); + if (ptr) { + tport->tport_proto_id = SCSI_PROTOCOL_FCP; + off = 3; /* Skip over "fc." */ + goto check_len; + } + ptr = strstr(name, "iqn."); + if (ptr) { + tport->tport_proto_id = SCSI_PROTOCOL_ISCSI; + goto check_len; + } + + pr_err("Unable to locate prefix for emulated Target Port: %s\n", name); + kfree(tport); + return ERR_PTR(-EINVAL); + +check_len: + if (strlen(name) >= VSCSI_NAMELEN) { + pr_err("Emulated %s Address: %s, exceeds max: %d\n", name, + scsiback_dump_proto_id(tport), VSCSI_NAMELEN); + kfree(tport); + return ERR_PTR(-EINVAL); + } + snprintf(&tport->tport_name[0], VSCSI_NAMELEN, "%s", &name[off]); + + pr_debug("xen-pvscsi: Allocated emulated Target %s Address: %s\n", + scsiback_dump_proto_id(tport), name); + + return &tport->tport_wwn; +} + +static void scsiback_drop_tport(struct se_wwn *wwn) +{ + struct scsiback_tport *tport = container_of(wwn, + struct scsiback_tport, tport_wwn); + + pr_debug("xen-pvscsi: Deallocating emulated Target %s Address: %s\n", + scsiback_dump_proto_id(tport), tport->tport_name); + + kfree(tport); +} + +static struct se_node_acl * +scsiback_alloc_fabric_acl(struct se_portal_group *se_tpg) +{ + struct scsiback_nacl *nacl; + + nacl = kzalloc(sizeof(struct scsiback_nacl), GFP_KERNEL); + if (!nacl) { + pr_err("Unable to allocate struct scsiback_nacl\n"); + return NULL; + } + + return &nacl->se_node_acl; +} + +static void +scsiback_release_fabric_acl(struct se_portal_group *se_tpg, + struct se_node_acl *se_nacl) +{ + struct scsiback_nacl *nacl = container_of(se_nacl, + struct scsiback_nacl, se_node_acl); + kfree(nacl); +} + +static u32 scsiback_tpg_get_inst_index(struct se_portal_group *se_tpg) +{ + return 1; +} + +static struct se_node_acl * +scsiback_make_nodeacl(struct se_portal_group *se_tpg, + struct config_group *group, + const char *name) +{ + struct se_node_acl *se_nacl, *se_nacl_new; + struct scsiback_nacl *nacl; + u64 wwpn = 0; + u32 nexus_depth; + + se_nacl_new = scsiback_alloc_fabric_acl(se_tpg); + if (!se_nacl_new) + return ERR_PTR(-ENOMEM); + + nexus_depth = 1; + /* + * se_nacl_new may be released by core_tpg_add_initiator_node_acl() + * when converting a NodeACL from demo mode -> explict + */ + se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new, + name, nexus_depth); + if (IS_ERR(se_nacl)) { + scsiback_release_fabric_acl(se_tpg, se_nacl_new); + return se_nacl; + } + /* + * Locate our struct scsiback_nacl and set the FC Nport WWPN + */ + nacl = container_of(se_nacl, struct scsiback_nacl, se_node_acl); + nacl->iport_wwpn = wwpn; + + return se_nacl; +} + +static void scsiback_drop_nodeacl(struct se_node_acl *se_acl) +{ + struct scsiback_nacl *nacl = container_of(se_acl, + struct scsiback_nacl, se_node_acl); + core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1); + kfree(nacl); +} + +static int scsiback_check_stop_free(struct se_cmd *se_cmd) +{ + return target_put_sess_cmd(se_cmd->se_sess, se_cmd); +} + +static void scsiback_release_cmd(struct se_cmd *se_cmd) +{ +} + +static int scsiback_shutdown_session(struct se_session *se_sess) +{ + return 0; +} + +static void scsiback_close_session(struct se_session *se_sess) +{ +} + +static u32 scsiback_sess_get_index(struct se_session *se_sess) +{ + return 0; +} + +static int scsiback_write_pending(struct se_cmd *se_cmd) +{ + /* Go ahead and process the write immediately */ + target_execute_cmd(se_cmd); + + return 0; +} + +static int scsiback_write_pending_status(struct se_cmd *se_cmd) +{ + return 0; +} + +static void scsiback_set_default_node_attrs(struct se_node_acl *nacl) +{ +} + +static u32 scsiback_get_task_tag(struct se_cmd *se_cmd) +{ + return 0; +} + +static int scsiback_get_cmd_state(struct se_cmd *se_cmd) +{ + return 0; +} + +static int scsiback_queue_data_in(struct se_cmd *se_cmd) +{ + struct vscsibk_pend *pending_req = container_of(se_cmd, + struct vscsibk_pend, se_cmd); + + pending_req->result = SAM_STAT_GOOD; + scsiback_cmd_done(pending_req); + return 0; +} + +static int scsiback_queue_status(struct se_cmd *se_cmd) +{ + struct vscsibk_pend *pending_req = container_of(se_cmd, + struct vscsibk_pend, se_cmd); + + if (se_cmd->sense_buffer && + ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || + (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) + pending_req->result = (DRIVER_SENSE << 24) | + SAM_STAT_CHECK_CONDITION; + else + pending_req->result = se_cmd->scsi_status; + + scsiback_cmd_done(pending_req); + return 0; +} + +static void scsiback_queue_tm_rsp(struct se_cmd *se_cmd) +{ + struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; + struct scsiback_tmr *tmr = se_tmr->fabric_tmr_ptr; + + atomic_set(&tmr->tmr_complete, 1); + wake_up(&tmr->tmr_wait); +} + +static void scsiback_aborted_task(struct se_cmd *se_cmd) +{ + struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; + struct scsiback_tmr *tmr = se_tmr->fabric_tmr_ptr; + + atomic_set(&tmr->tmr_complete, 1); + wake_up(&tmr->tmr_wait); +} + +static ssize_t scsiback_tpg_param_show_alias(struct se_portal_group *se_tpg, + char *page) +{ + struct scsiback_tpg *tpg = container_of(se_tpg, struct scsiback_tpg, + se_tpg); + ssize_t rb; + + mutex_lock(&tpg->tv_tpg_mutex); + rb = snprintf(page, PAGE_SIZE, "%s\n", tpg->param_alias); + mutex_unlock(&tpg->tv_tpg_mutex); + + return rb; +} + +static ssize_t scsiback_tpg_param_store_alias(struct se_portal_group *se_tpg, + const char *page, size_t count) +{ + struct scsiback_tpg *tpg = container_of(se_tpg, struct scsiback_tpg, + se_tpg); + int len; + + if (strlen(page) >= VSCSI_NAMELEN) { + pr_err("param alias: %s, exceeds max: %d\n", page, + VSCSI_NAMELEN); + return -EINVAL; + } + + mutex_lock(&tpg->tv_tpg_mutex); + len = snprintf(tpg->param_alias, VSCSI_NAMELEN, "%s", page); + if (tpg->param_alias[len - 1] == '\n') + tpg->param_alias[len - 1] = '\0'; + mutex_unlock(&tpg->tv_tpg_mutex); + + return count; +} + +TF_TPG_PARAM_ATTR(scsiback, alias, S_IRUGO | S_IWUSR); + +static struct configfs_attribute *scsiback_param_attrs[] = { + &scsiback_tpg_param_alias.attr, + NULL, +}; + +static int scsiback_make_nexus(struct scsiback_tpg *tpg, + const char *name) +{ + struct se_portal_group *se_tpg; + struct se_session *se_sess; + struct scsiback_nexus *tv_nexus; + + mutex_lock(&tpg->tv_tpg_mutex); + if (tpg->tpg_nexus) { + mutex_unlock(&tpg->tv_tpg_mutex); + pr_debug("tpg->tpg_nexus already exists\n"); + return -EEXIST; + } + se_tpg = &tpg->se_tpg; + + tv_nexus = kzalloc(sizeof(struct scsiback_nexus), GFP_KERNEL); + if (!tv_nexus) { + mutex_unlock(&tpg->tv_tpg_mutex); + pr_err("Unable to allocate struct scsiback_nexus\n"); + return -ENOMEM; + } + /* + * Initialize the struct se_session pointer + */ + tv_nexus->tvn_se_sess = transport_init_session(TARGET_PROT_DIN_PASS | + TARGET_PROT_DOUT_PASS); + if (IS_ERR(tv_nexus->tvn_se_sess)) { + mutex_unlock(&tpg->tv_tpg_mutex); + kfree(tv_nexus); + return -ENOMEM; + } + se_sess = tv_nexus->tvn_se_sess; + /* + * Since we are running in 'demo mode' this call with generate a + * struct se_node_acl for the scsiback struct se_portal_group with + * the SCSI Initiator port name of the passed configfs group 'name'. + */ + tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl( + se_tpg, (unsigned char *)name); + if (!tv_nexus->tvn_se_sess->se_node_acl) { + mutex_unlock(&tpg->tv_tpg_mutex); + pr_debug("core_tpg_check_initiator_node_acl() failed for %s\n", + name); + goto out; + } + /* + * Now register the TCM pvscsi virtual I_T Nexus as active with the + * call to __transport_register_session() + */ + __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, + tv_nexus->tvn_se_sess, tv_nexus); + tpg->tpg_nexus = tv_nexus; + + mutex_unlock(&tpg->tv_tpg_mutex); + return 0; + +out: + transport_free_session(se_sess); + kfree(tv_nexus); + return -ENOMEM; +} + +static int scsiback_drop_nexus(struct scsiback_tpg *tpg) +{ + struct se_session *se_sess; + struct scsiback_nexus *tv_nexus; + + mutex_lock(&tpg->tv_tpg_mutex); + tv_nexus = tpg->tpg_nexus; + if (!tv_nexus) { + mutex_unlock(&tpg->tv_tpg_mutex); + return -ENODEV; + } + + se_sess = tv_nexus->tvn_se_sess; + if (!se_sess) { + mutex_unlock(&tpg->tv_tpg_mutex); + return -ENODEV; + } + + if (tpg->tv_tpg_port_count != 0) { + mutex_unlock(&tpg->tv_tpg_mutex); + pr_err("Unable to remove xen-pvscsi I_T Nexus with active TPG port count: %d\n", + tpg->tv_tpg_port_count); + return -EBUSY; + } + + if (tpg->tv_tpg_fe_count != 0) { + mutex_unlock(&tpg->tv_tpg_mutex); + pr_err("Unable to remove xen-pvscsi I_T Nexus with active TPG frontend count: %d\n", + tpg->tv_tpg_fe_count); + return -EBUSY; + } + + pr_debug("xen-pvscsi: Removing I_T Nexus to emulated %s Initiator Port: %s\n", + scsiback_dump_proto_id(tpg->tport), + tv_nexus->tvn_se_sess->se_node_acl->initiatorname); + + /* + * Release the SCSI I_T Nexus to the emulated xen-pvscsi Target Port + */ + transport_deregister_session(tv_nexus->tvn_se_sess); + tpg->tpg_nexus = NULL; + mutex_unlock(&tpg->tv_tpg_mutex); + + kfree(tv_nexus); + return 0; +} + +static ssize_t scsiback_tpg_show_nexus(struct se_portal_group *se_tpg, + char *page) +{ + struct scsiback_tpg *tpg = container_of(se_tpg, + struct scsiback_tpg, se_tpg); + struct scsiback_nexus *tv_nexus; + ssize_t ret; + + mutex_lock(&tpg->tv_tpg_mutex); + tv_nexus = tpg->tpg_nexus; + if (!tv_nexus) { + mutex_unlock(&tpg->tv_tpg_mutex); + return -ENODEV; + } + ret = snprintf(page, PAGE_SIZE, "%s\n", + tv_nexus->tvn_se_sess->se_node_acl->initiatorname); + mutex_unlock(&tpg->tv_tpg_mutex); + + return ret; +} + +static ssize_t scsiback_tpg_store_nexus(struct se_portal_group *se_tpg, + const char *page, + size_t count) +{ + struct scsiback_tpg *tpg = container_of(se_tpg, + struct scsiback_tpg, se_tpg); + struct scsiback_tport *tport_wwn = tpg->tport; + unsigned char i_port[VSCSI_NAMELEN], *ptr, *port_ptr; + int ret; + /* + * Shutdown the active I_T nexus if 'NULL' is passed.. + */ + if (!strncmp(page, "NULL", 4)) { + ret = scsiback_drop_nexus(tpg); + return (!ret) ? count : ret; + } + /* + * Otherwise make sure the passed virtual Initiator port WWN matches + * the fabric protocol_id set in scsiback_make_tport(), and call + * scsiback_make_nexus(). + */ + if (strlen(page) >= VSCSI_NAMELEN) { + pr_err("Emulated NAA Sas Address: %s, exceeds max: %d\n", + page, VSCSI_NAMELEN); + return -EINVAL; + } + snprintf(&i_port[0], VSCSI_NAMELEN, "%s", page); + + ptr = strstr(i_port, "naa."); + if (ptr) { + if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) { + pr_err("Passed SAS Initiator Port %s does not match target port protoid: %s\n", + i_port, scsiback_dump_proto_id(tport_wwn)); + return -EINVAL; + } + port_ptr = &i_port[0]; + goto check_newline; + } + ptr = strstr(i_port, "fc."); + if (ptr) { + if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) { + pr_err("Passed FCP Initiator Port %s does not match target port protoid: %s\n", + i_port, scsiback_dump_proto_id(tport_wwn)); + return -EINVAL; + } + port_ptr = &i_port[3]; /* Skip over "fc." */ + goto check_newline; + } + ptr = strstr(i_port, "iqn."); + if (ptr) { + if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) { + pr_err("Passed iSCSI Initiator Port %s does not match target port protoid: %s\n", + i_port, scsiback_dump_proto_id(tport_wwn)); + return -EINVAL; + } + port_ptr = &i_port[0]; + goto check_newline; + } + pr_err("Unable to locate prefix for emulated Initiator Port: %s\n", + i_port); + return -EINVAL; + /* + * Clear any trailing newline for the NAA WWN + */ +check_newline: + if (i_port[strlen(i_port) - 1] == '\n') + i_port[strlen(i_port) - 1] = '\0'; + + ret = scsiback_make_nexus(tpg, port_ptr); + if (ret < 0) + return ret; + + return count; +} + +TF_TPG_BASE_ATTR(scsiback, nexus, S_IRUGO | S_IWUSR); + +static struct configfs_attribute *scsiback_tpg_attrs[] = { + &scsiback_tpg_nexus.attr, + NULL, +}; + +static ssize_t +scsiback_wwn_show_attr_version(struct target_fabric_configfs *tf, + char *page) +{ + return sprintf(page, "xen-pvscsi fabric module %s on %s/%s on " + UTS_RELEASE"\n", + VSCSI_VERSION, utsname()->sysname, utsname()->machine); +} + +TF_WWN_ATTR_RO(scsiback, version); + +static struct configfs_attribute *scsiback_wwn_attrs[] = { + &scsiback_wwn_version.attr, + NULL, +}; + +static char *scsiback_get_fabric_name(void) +{ + return "xen-pvscsi"; +} + +static int scsiback_port_link(struct se_portal_group *se_tpg, + struct se_lun *lun) +{ + struct scsiback_tpg *tpg = container_of(se_tpg, + struct scsiback_tpg, se_tpg); + + mutex_lock(&scsiback_mutex); + + mutex_lock(&tpg->tv_tpg_mutex); + tpg->tv_tpg_port_count++; + mutex_unlock(&tpg->tv_tpg_mutex); + + mutex_unlock(&scsiback_mutex); + + return 0; +} + +static void scsiback_port_unlink(struct se_portal_group *se_tpg, + struct se_lun *lun) +{ + struct scsiback_tpg *tpg = container_of(se_tpg, + struct scsiback_tpg, se_tpg); + + mutex_lock(&scsiback_mutex); + + mutex_lock(&tpg->tv_tpg_mutex); + tpg->tv_tpg_port_count--; + mutex_unlock(&tpg->tv_tpg_mutex); + + mutex_unlock(&scsiback_mutex); +} + +static struct se_portal_group * +scsiback_make_tpg(struct se_wwn *wwn, + struct config_group *group, + const char *name) +{ + struct scsiback_tport *tport = container_of(wwn, + struct scsiback_tport, tport_wwn); + + struct scsiback_tpg *tpg; + unsigned long tpgt; + int ret; + + if (strstr(name, "tpgt_") != name) + return ERR_PTR(-EINVAL); + if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX) + return ERR_PTR(-EINVAL); + + tpg = kzalloc(sizeof(struct scsiback_tpg), GFP_KERNEL); + if (!tpg) { + pr_err("Unable to allocate struct scsiback_tpg"); + return ERR_PTR(-ENOMEM); + } + mutex_init(&tpg->tv_tpg_mutex); + INIT_LIST_HEAD(&tpg->tv_tpg_list); + INIT_LIST_HEAD(&tpg->info_list); + tpg->tport = tport; + tpg->tport_tpgt = tpgt; + + ret = core_tpg_register(&scsiback_fabric_configfs->tf_ops, wwn, + &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); + if (ret < 0) { + kfree(tpg); + return NULL; + } + mutex_lock(&scsiback_mutex); + list_add_tail(&tpg->tv_tpg_list, &scsiback_list); + mutex_unlock(&scsiback_mutex); + + return &tpg->se_tpg; +} + +static void scsiback_drop_tpg(struct se_portal_group *se_tpg) +{ + struct scsiback_tpg *tpg = container_of(se_tpg, + struct scsiback_tpg, se_tpg); + + mutex_lock(&scsiback_mutex); + list_del(&tpg->tv_tpg_list); + mutex_unlock(&scsiback_mutex); + /* + * Release the virtual I_T Nexus for this xen-pvscsi TPG + */ + scsiback_drop_nexus(tpg); + /* + * Deregister the se_tpg from TCM.. + */ + core_tpg_deregister(se_tpg); + kfree(tpg); +} + +static int scsiback_check_true(struct se_portal_group *se_tpg) +{ + return 1; +} + +static int scsiback_check_false(struct se_portal_group *se_tpg) +{ + return 0; +} + +static struct target_core_fabric_ops scsiback_ops = { + .get_fabric_name = scsiback_get_fabric_name, + .get_fabric_proto_ident = scsiback_get_fabric_proto_ident, + .tpg_get_wwn = scsiback_get_fabric_wwn, + .tpg_get_tag = scsiback_get_tag, + .tpg_get_default_depth = scsiback_get_default_depth, + .tpg_get_pr_transport_id = scsiback_get_pr_transport_id, + .tpg_get_pr_transport_id_len = scsiback_get_pr_transport_id_len, + .tpg_parse_pr_out_transport_id = scsiback_parse_pr_out_transport_id, + .tpg_check_demo_mode = scsiback_check_true, + .tpg_check_demo_mode_cache = scsiback_check_true, + .tpg_check_demo_mode_write_protect = scsiback_check_false, + .tpg_check_prod_mode_write_protect = scsiback_check_false, + .tpg_alloc_fabric_acl = scsiback_alloc_fabric_acl, + .tpg_release_fabric_acl = scsiback_release_fabric_acl, + .tpg_get_inst_index = scsiback_tpg_get_inst_index, + .check_stop_free = scsiback_check_stop_free, + .release_cmd = scsiback_release_cmd, + .put_session = NULL, + .shutdown_session = scsiback_shutdown_session, + .close_session = scsiback_close_session, + .sess_get_index = scsiback_sess_get_index, + .sess_get_initiator_sid = NULL, + .write_pending = scsiback_write_pending, + .write_pending_status = scsiback_write_pending_status, + .set_default_node_attributes = scsiback_set_default_node_attrs, + .get_task_tag = scsiback_get_task_tag, + .get_cmd_state = scsiback_get_cmd_state, + .queue_data_in = scsiback_queue_data_in, + .queue_status = scsiback_queue_status, + .queue_tm_rsp = scsiback_queue_tm_rsp, + .aborted_task = scsiback_aborted_task, + /* + * Setup callers for generic logic in target_core_fabric_configfs.c + */ + .fabric_make_wwn = scsiback_make_tport, + .fabric_drop_wwn = scsiback_drop_tport, + .fabric_make_tpg = scsiback_make_tpg, + .fabric_drop_tpg = scsiback_drop_tpg, + .fabric_post_link = scsiback_port_link, + .fabric_pre_unlink = scsiback_port_unlink, + .fabric_make_np = NULL, + .fabric_drop_np = NULL, + .fabric_make_nodeacl = scsiback_make_nodeacl, + .fabric_drop_nodeacl = scsiback_drop_nodeacl, +}; + +static int scsiback_register_configfs(void) +{ + struct target_fabric_configfs *fabric; + int ret; + + pr_debug("xen-pvscsi: fabric module %s on %s/%s on "UTS_RELEASE"\n", + VSCSI_VERSION, utsname()->sysname, utsname()->machine); + /* + * Register the top level struct config_item_type with TCM core + */ + fabric = target_fabric_configfs_init(THIS_MODULE, "xen-pvscsi"); + if (IS_ERR(fabric)) { + pr_err("target_fabric_configfs_init() failed\n"); + return PTR_ERR(fabric); + } + /* + * Setup fabric->tf_ops from our local scsiback_ops + */ + fabric->tf_ops = scsiback_ops; + /* + * Setup default attribute lists for various fabric->tf_cit_tmpl + */ + fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = scsiback_wwn_attrs; + fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = scsiback_tpg_attrs; + fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = scsiback_param_attrs; + fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; + /* + * Register the fabric for use within TCM + */ + ret = target_fabric_configfs_register(fabric); + if (ret < 0) { + pr_err("xen-pvscsi: target_fabric_configfs_register() failed\n"); + return ret; + } + /* + * Setup our local pointer to *fabric + */ + scsiback_fabric_configfs = fabric; + pr_debug("xen-pvscsi: Set fabric -> scsiback_fabric_configfs\n"); + return 0; +}; + +static void scsiback_deregister_configfs(void) +{ + if (!scsiback_fabric_configfs) + return; + + target_fabric_configfs_deregister(scsiback_fabric_configfs); + scsiback_fabric_configfs = NULL; + pr_debug("xen-pvscsi: Cleared scsiback_fabric_configfs\n"); +}; + +static const struct xenbus_device_id scsiback_ids[] = { + { "vscsi" }, + { "" } +}; + +static DEFINE_XENBUS_DRIVER(scsiback, , + .probe = scsiback_probe, + .remove = scsiback_remove, + .otherend_changed = scsiback_frontend_changed +); + +static void scsiback_init_pend(void *p) +{ + struct vscsibk_pend *pend = p; + int i; + + memset(pend, 0, sizeof(*pend)); + for (i = 0; i < VSCSI_MAX_GRANTS; i++) + pend->grant_handles[i] = SCSIBACK_INVALID_HANDLE; +} + +static int __init scsiback_init(void) +{ + int ret; + + if (!xen_domain()) + return -ENODEV; + + scsiback_cachep = kmem_cache_create("vscsiif_cache", + sizeof(struct vscsibk_pend), 0, 0, scsiback_init_pend); + if (!scsiback_cachep) { + pr_err("xen-pvscsi: can't init scsi cache\n"); + return -ENOMEM; + } + + ret = xenbus_register_backend(&scsiback_driver); + if (ret) + goto out_cache_destroy; + + ret = scsiback_register_configfs(); + if (ret) + goto out_unregister_xenbus; + + return 0; + +out_unregister_xenbus: + xenbus_unregister_driver(&scsiback_driver); +out_cache_destroy: + kmem_cache_destroy(scsiback_cachep); + pr_err("xen-pvscsi: %s: error %d\n", __func__, ret); + return ret; +} + +static void __exit scsiback_exit(void) +{ + struct page *page; + + while (free_pages_num) { + if (get_free_page(&page)) + BUG(); + free_xenballooned_pages(1, &page); + } + scsiback_deregister_configfs(); + xenbus_unregister_driver(&scsiback_driver); + kmem_cache_destroy(scsiback_cachep); +} + +module_init(scsiback_init); +module_exit(scsiback_exit); + +MODULE_DESCRIPTION("Xen SCSI backend driver"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_ALIAS("xen-backend:vscsi"); -- 1.8.4.5 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |