|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [qemu-xen master] Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging
=== This changeset includes merge from high-traffic branch ===
Commits on that branch are not reported individually.
commit c2a4b384f5484fed94b4466151c7f9a705414a57
Merge: 4eb28abd52d48657cff6ff45e8dbbbefe4dbb414
53000638f233d6ba1d584a68b74f2cde79615b80
Author: Stefan Hajnoczi <stefanha@xxxxxxxxxx>
AuthorDate: Thu Nov 3 14:41:53 2016 +0000
Commit: Stefan Hajnoczi <stefanha@xxxxxxxxxx>
CommitDate: Thu Nov 3 14:41:53 2016 +0000
Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging
virtio, pc: fixes and features
nvdimm hotplug support
virtio migration and ioeventfd rework
virtio crypto device
ipmi fixes
Signed-off-by: Michael S. Tsirkin <mst@xxxxxxxxxx>
# gpg: Signature made Tue 01 Nov 2016 05:23:40 PM GMT
# gpg: using RSA key 0x281F0DB8D28D5469
# gpg: Good signature from "Michael S. Tsirkin <mst@xxxxxxxxxx>"
# gpg: aka "Michael S. Tsirkin <mst@xxxxxxxxxx>"
# Primary key fingerprint: 0270 606B 6F3C DF3D 0B17 0970 C350 3912 AFBE
8E67
# Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA 8A0D 281F 0DB8 D28D
5469
* remotes/mst/tags/for_upstream: (47 commits)
acpi: fix assert failure caused by commit 35c5a52d
acpi/ipmi: Initialize the fwinfo before fetching it
ipmi: Add graceful shutdown handling to the external BMC
ipmi: fix build config variable name for ipmi_bmc_extern.o
ipmi: Implement shutdown via ACPI overtemp
ipmi: chassis poweroff should use qemu_system_shutdown_request()
ipmi_bmc_sim: Remove an unnecessary mutex
ipmi: Remove hotplug from IPMI BMCs
pc: memhp: enable nvdimm device hotplug
nvdimm acpi: introduce _FIT
nvdimm acpi: introduce fit buffer
nvdimm acpi: prebuild nvdimm devices for available slots
nvdimm acpi: use common macros instead of magic names
acpi nvdimm: rename result_size to dsm_out_buf_siz
nvdimm acpi: compile nvdimm acpi code arch-independently
acpi nvdimm: fix Arg6 usage
acpi nvdimm: fix ARG3 conflict
acpi nvdimm: fix device physical address base
acpi nvdimm: fix OperationRegion definition
acpi nvdimm: fix wrong buffer size returned by DSM method
...
Signed-off-by: Stefan Hajnoczi <stefanha@xxxxxxxxxx>
MAINTAINERS | 13 +
backends/Makefile.objs | 3 +
backends/cryptodev-builtin.c | 361 ++++++++++
backends/cryptodev.c | 245 +++++++
docs/specs/acpi_mem_hotplug.txt | 3 +
docs/specs/acpi_nvdimm.txt | 58 +-
hw/acpi/Makefile.objs | 2 +-
hw/acpi/ipmi.c | 1 +
hw/acpi/memory_hotplug.c | 31 +-
hw/acpi/nvdimm.c | 468 ++++++++++---
hw/block/dataplane/virtio-blk.c | 73 +-
hw/block/dataplane/virtio-blk.h | 6 +-
hw/block/virtio-blk.c | 15 +-
hw/core/hotplug.c | 11 +
hw/core/qdev.c | 20 +-
hw/i386/acpi-build.c | 9 +-
hw/i386/pc.c | 31 +
hw/ipmi/Makefile.objs | 2 +-
hw/ipmi/ipmi.c | 10 +-
hw/ipmi/ipmi_bmc_extern.c | 12 +-
hw/ipmi/ipmi_bmc_sim.c | 7 +-
hw/mem/nvdimm.c | 4 -
hw/s390x/virtio-ccw.c | 44 +-
hw/s390x/virtio-ccw.h | 2 -
hw/scsi/virtio-scsi-dataplane.c | 56 +-
hw/scsi/virtio-scsi.c | 24 +-
hw/virtio/Makefile.objs | 2 +
hw/virtio/vhost.c | 5 +-
hw/virtio/virtio-balloon.c | 31 +-
hw/virtio/virtio-bus.c | 154 ++---
hw/virtio/virtio-crypto-pci.c | 77 +++
hw/virtio/virtio-crypto.c | 898 +++++++++++++++++++++++++
hw/virtio/virtio-mmio.c | 35 +-
hw/virtio/virtio-pci.c | 40 +-
hw/virtio/virtio-pci.h | 17 +-
hw/virtio/virtio.c | 153 +++--
include/hw/acpi/acpi_dev_interface.h | 1 +
include/hw/hotplug.h | 10 +
include/hw/mem/nvdimm.h | 27 +-
include/hw/virtio/virtio-bus.h | 27 +-
include/hw/virtio/virtio-crypto.h | 101 +++
include/hw/virtio/virtio-scsi.h | 6 +-
include/hw/virtio/virtio.h | 15 +-
include/standard-headers/linux/virtio_crypto.h | 429 ++++++++++++
include/standard-headers/linux/virtio_ids.h | 2 +-
include/sysemu/cryptodev.h | 298 ++++++++
qemu-options.hx | 18 +
tests/ipmi-bt-test.c | 2 +-
48 files changed, 3352 insertions(+), 507 deletions(-)
diff --git a/MAINTAINERS b/MAINTAINERS
index 653f52e..82c814a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1016,6 +1016,13 @@ F: include/sysemu/rng*.h
F: backends/rng*.c
F: tests/virtio-rng-test.c
+virtio-crypto
+M: Gonglei <arei.gonglei@xxxxxxxxxx>
+S: Supported
+F: hw/virtio/virtio-crypto.c
+F: hw/virtio/virtio-crypto-pci.c
+F: include/hw/virtio/virtio-crypto.h
+
nvme
M: Keith Busch <keith.busch@xxxxxxxxx>
L: qemu-block@xxxxxxxxxx
@@ -1261,6 +1268,12 @@ S: Maintained
F: backends/hostmem*.c
F: include/sysemu/hostmem.h
+Cryptodev Backends
+M: Gonglei <arei.gonglei@xxxxxxxxxx>
+S: Maintained
+F: include/sysemu/cryptodev*.h
+F: backends/cryptodev*.c
+
QAPI
M: Markus Armbruster <armbru@xxxxxxxxxx>
M: Michael Roth <mdroth@xxxxxxxxxxxxxxxxxx>
diff --git a/backends/Makefile.objs b/backends/Makefile.objs
index 31a3a89..1846998 100644
--- a/backends/Makefile.objs
+++ b/backends/Makefile.objs
@@ -9,3 +9,6 @@ common-obj-$(CONFIG_TPM) += tpm.o
common-obj-y += hostmem.o hostmem-ram.o
common-obj-$(CONFIG_LINUX) += hostmem-file.o
+
+common-obj-y += cryptodev.o
+common-obj-y += cryptodev-builtin.o
diff --git a/backends/cryptodev-builtin.c b/backends/cryptodev-builtin.c
new file mode 100644
index 0000000..eda954b
--- /dev/null
+++ b/backends/cryptodev-builtin.c
@@ -0,0 +1,361 @@
+/*
+ * QEMU Cryptodev backend for QEMU cipher APIs
+ *
+ * Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD.
+ *
+ * Authors:
+ * Gonglei <arei.gonglei@xxxxxxxxxx>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "sysemu/cryptodev.h"
+#include "hw/boards.h"
+#include "qapi/error.h"
+#include "standard-headers/linux/virtio_crypto.h"
+#include "crypto/cipher.h"
+
+
+/**
+ * @TYPE_CRYPTODEV_BACKEND_BUILTIN:
+ * name of backend that uses QEMU cipher API
+ */
+#define TYPE_CRYPTODEV_BACKEND_BUILTIN "cryptodev-backend-builtin"
+
+#define CRYPTODEV_BACKEND_BUILTIN(obj) \
+ OBJECT_CHECK(CryptoDevBackendBuiltin, \
+ (obj), TYPE_CRYPTODEV_BACKEND_BUILTIN)
+
+typedef struct CryptoDevBackendBuiltin
+ CryptoDevBackendBuiltin;
+
+typedef struct CryptoDevBackendBuiltinSession {
+ QCryptoCipher *cipher;
+ uint8_t direction; /* encryption or decryption */
+ uint8_t type; /* cipher? hash? aead? */
+ QTAILQ_ENTRY(CryptoDevBackendBuiltinSession) next;
+} CryptoDevBackendBuiltinSession;
+
+/* Max number of symmetric sessions */
+#define MAX_NUM_SESSIONS 256
+
+#define CRYPTODEV_BUITLIN_MAX_AUTH_KEY_LEN 512
+#define CRYPTODEV_BUITLIN_MAX_CIPHER_KEY_LEN 64
+
+struct CryptoDevBackendBuiltin {
+ CryptoDevBackend parent_obj;
+
+ CryptoDevBackendBuiltinSession *sessions[MAX_NUM_SESSIONS];
+};
+
+static void cryptodev_builtin_init(
+ CryptoDevBackend *backend, Error **errp)
+{
+ /* Only support one queue */
+ int queues = backend->conf.peers.queues;
+ CryptoDevBackendClient *cc;
+
+ if (queues != 1) {
+ error_setg(errp,
+ "Only support one queue in cryptdov-builtin backend");
+ return;
+ }
+
+ cc = cryptodev_backend_new_client(
+ "cryptodev-builtin", NULL);
+ cc->info_str = g_strdup_printf("cryptodev-builtin0");
+ cc->queue_index = 0;
+ backend->conf.peers.ccs[0] = cc;
+
+ backend->conf.crypto_services =
+ 1u << VIRTIO_CRYPTO_SERVICE_CIPHER |
+ 1u << VIRTIO_CRYPTO_SERVICE_HASH |
+ 1u << VIRTIO_CRYPTO_SERVICE_MAC;
+ backend->conf.cipher_algo_l = 1u << VIRTIO_CRYPTO_CIPHER_AES_CBC;
+ backend->conf.hash_algo = 1u << VIRTIO_CRYPTO_HASH_SHA1;
+ /*
+ * Set the Maximum length of crypto request.
+ * Why this value? Just avoid to overflow when
+ * memory allocation for each crypto request.
+ */
+ backend->conf.max_size = LONG_MAX - sizeof(CryptoDevBackendSymOpInfo);
+ backend->conf.max_cipher_key_len = CRYPTODEV_BUITLIN_MAX_CIPHER_KEY_LEN;
+ backend->conf.max_auth_key_len = CRYPTODEV_BUITLIN_MAX_AUTH_KEY_LEN;
+}
+
+static int
+cryptodev_builtin_get_unused_session_index(
+ CryptoDevBackendBuiltin *builtin)
+{
+ size_t i;
+
+ for (i = 0; i < MAX_NUM_SESSIONS; i++) {
+ if (builtin->sessions[i] == NULL) {
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+static int
+cryptodev_builtin_get_aes_algo(uint32_t key_len, Error **errp)
+{
+ int algo;
+
+ if (key_len == 128 / 8) {
+ algo = QCRYPTO_CIPHER_ALG_AES_128;
+ } else if (key_len == 192 / 8) {
+ algo = QCRYPTO_CIPHER_ALG_AES_192;
+ } else if (key_len == 256 / 8) {
+ algo = QCRYPTO_CIPHER_ALG_AES_256;
+ } else {
+ error_setg(errp, "Unsupported key length :%u", key_len);
+ return -1;
+ }
+
+ return algo;
+}
+
+static int cryptodev_builtin_create_cipher_session(
+ CryptoDevBackendBuiltin *builtin,
+ CryptoDevBackendSymSessionInfo *sess_info,
+ Error **errp)
+{
+ int algo;
+ int mode;
+ QCryptoCipher *cipher;
+ int index;
+ CryptoDevBackendBuiltinSession *sess;
+
+ if (sess_info->op_type != VIRTIO_CRYPTO_SYM_OP_CIPHER) {
+ error_setg(errp, "Unsupported optype :%u", sess_info->op_type);
+ return -1;
+ }
+
+ index = cryptodev_builtin_get_unused_session_index(builtin);
+ if (index < 0) {
+ error_setg(errp, "Total number of sessions created exceeds %u",
+ MAX_NUM_SESSIONS);
+ return -1;
+ }
+
+ switch (sess_info->cipher_alg) {
+ case VIRTIO_CRYPTO_CIPHER_AES_ECB:
+ algo = cryptodev_builtin_get_aes_algo(sess_info->key_len,
+ errp);
+ if (algo < 0) {
+ return -1;
+ }
+ mode = QCRYPTO_CIPHER_MODE_ECB;
+ break;
+ case VIRTIO_CRYPTO_CIPHER_AES_CBC:
+ algo = cryptodev_builtin_get_aes_algo(sess_info->key_len,
+ errp);
+ if (algo < 0) {
+ return -1;
+ }
+ mode = QCRYPTO_CIPHER_MODE_CBC;
+ break;
+ case VIRTIO_CRYPTO_CIPHER_AES_CTR:
+ algo = cryptodev_builtin_get_aes_algo(sess_info->key_len,
+ errp);
+ if (algo < 0) {
+ return -1;
+ }
+ mode = QCRYPTO_CIPHER_MODE_CTR;
+ break;
+ case VIRTIO_CRYPTO_CIPHER_DES_ECB:
+ algo = QCRYPTO_CIPHER_ALG_DES_RFB;
+ mode = QCRYPTO_CIPHER_MODE_ECB;
+ break;
+ default:
+ error_setg(errp, "Unsupported cipher alg :%u",
+ sess_info->cipher_alg);
+ return -1;
+ }
+
+ cipher = qcrypto_cipher_new(algo, mode,
+ sess_info->cipher_key,
+ sess_info->key_len,
+ errp);
+ if (!cipher) {
+ return -1;
+ }
+
+ sess = g_new0(CryptoDevBackendBuiltinSession, 1);
+ sess->cipher = cipher;
+ sess->direction = sess_info->direction;
+ sess->type = sess_info->op_type;
+
+ builtin->sessions[index] = sess;
+
+ return index;
+}
+
+static int64_t cryptodev_builtin_sym_create_session(
+ CryptoDevBackend *backend,
+ CryptoDevBackendSymSessionInfo *sess_info,
+ uint32_t queue_index, Error **errp)
+{
+ CryptoDevBackendBuiltin *builtin =
+ CRYPTODEV_BACKEND_BUILTIN(backend);
+ int64_t session_id = -1;
+ int ret;
+
+ switch (sess_info->op_code) {
+ case VIRTIO_CRYPTO_CIPHER_CREATE_SESSION:
+ ret = cryptodev_builtin_create_cipher_session(
+ builtin, sess_info, errp);
+ if (ret < 0) {
+ return ret;
+ } else {
+ session_id = ret;
+ }
+ break;
+ case VIRTIO_CRYPTO_HASH_CREATE_SESSION:
+ case VIRTIO_CRYPTO_MAC_CREATE_SESSION:
+ default:
+ error_setg(errp, "Unsupported opcode :%" PRIu32 "",
+ sess_info->op_code);
+ return -1;
+ }
+
+ return session_id;
+}
+
+static int cryptodev_builtin_sym_close_session(
+ CryptoDevBackend *backend,
+ uint64_t session_id,
+ uint32_t queue_index, Error **errp)
+{
+ CryptoDevBackendBuiltin *builtin =
+ CRYPTODEV_BACKEND_BUILTIN(backend);
+
+ if (session_id >= MAX_NUM_SESSIONS ||
+ builtin->sessions[session_id] == NULL) {
+ error_setg(errp, "Cannot find a valid session id: %" PRIu64 "",
+ session_id);
+ return -1;
+ }
+
+ qcrypto_cipher_free(builtin->sessions[session_id]->cipher);
+ g_free(builtin->sessions[session_id]);
+ builtin->sessions[session_id] = NULL;
+ return 0;
+}
+
+static int cryptodev_builtin_sym_operation(
+ CryptoDevBackend *backend,
+ CryptoDevBackendSymOpInfo *op_info,
+ uint32_t queue_index, Error **errp)
+{
+ CryptoDevBackendBuiltin *builtin =
+ CRYPTODEV_BACKEND_BUILTIN(backend);
+ CryptoDevBackendBuiltinSession *sess;
+ int ret;
+
+ if (op_info->session_id >= MAX_NUM_SESSIONS ||
+ builtin->sessions[op_info->session_id] == NULL) {
+ error_setg(errp, "Cannot find a valid session id: %" PRIu64 "",
+ op_info->session_id);
+ return -VIRTIO_CRYPTO_INVSESS;
+ }
+
+ if (op_info->op_type == VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) {
+ error_setg(errp,
+ "Algorithm chain is unsupported for cryptdoev-builtin");
+ return -VIRTIO_CRYPTO_NOTSUPP;
+ }
+
+ sess = builtin->sessions[op_info->session_id];
+
+ ret = qcrypto_cipher_setiv(sess->cipher, op_info->iv,
+ op_info->iv_len, errp);
+ if (ret < 0) {
+ return -VIRTIO_CRYPTO_ERR;
+ }
+
+ if (sess->direction == VIRTIO_CRYPTO_OP_ENCRYPT) {
+ ret = qcrypto_cipher_encrypt(sess->cipher, op_info->src,
+ op_info->dst, op_info->src_len, errp);
+ if (ret < 0) {
+ return -VIRTIO_CRYPTO_ERR;
+ }
+ } else {
+ ret = qcrypto_cipher_decrypt(sess->cipher, op_info->src,
+ op_info->dst, op_info->src_len, errp);
+ if (ret < 0) {
+ return -VIRTIO_CRYPTO_ERR;
+ }
+ }
+ return VIRTIO_CRYPTO_OK;
+}
+
+static void cryptodev_builtin_cleanup(
+ CryptoDevBackend *backend,
+ Error **errp)
+{
+ CryptoDevBackendBuiltin *builtin =
+ CRYPTODEV_BACKEND_BUILTIN(backend);
+ size_t i;
+ int queues = backend->conf.peers.queues;
+ CryptoDevBackendClient *cc;
+
+ for (i = 0; i < MAX_NUM_SESSIONS; i++) {
+ if (builtin->sessions[i] != NULL) {
+ cryptodev_builtin_sym_close_session(
+ backend, i, 0, errp);
+ }
+ }
+
+ assert(queues == 1);
+
+ for (i = 0; i < queues; i++) {
+ cc = backend->conf.peers.ccs[i];
+ if (cc) {
+ cryptodev_backend_free_client(cc);
+ backend->conf.peers.ccs[i] = NULL;
+ }
+ }
+}
+
+static void
+cryptodev_builtin_class_init(ObjectClass *oc, void *data)
+{
+ CryptoDevBackendClass *bc = CRYPTODEV_BACKEND_CLASS(oc);
+
+ bc->init = cryptodev_builtin_init;
+ bc->cleanup = cryptodev_builtin_cleanup;
+ bc->create_session = cryptodev_builtin_sym_create_session;
+ bc->close_session = cryptodev_builtin_sym_close_session;
+ bc->do_sym_op = cryptodev_builtin_sym_operation;
+}
+
+static const TypeInfo cryptodev_builtin_info = {
+ .name = TYPE_CRYPTODEV_BACKEND_BUILTIN,
+ .parent = TYPE_CRYPTODEV_BACKEND,
+ .class_init = cryptodev_builtin_class_init,
+ .instance_size = sizeof(CryptoDevBackendBuiltin),
+};
+
+static void
+cryptodev_builtin_register_types(void)
+{
+ type_register_static(&cryptodev_builtin_info);
+}
+
+type_init(cryptodev_builtin_register_types);
diff --git a/backends/cryptodev.c b/backends/cryptodev.c
new file mode 100644
index 0000000..4a49f97
--- /dev/null
+++ b/backends/cryptodev.c
@@ -0,0 +1,245 @@
+/*
+ * QEMU Crypto Device Implementation
+ *
+ * Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD.
+ *
+ * Authors:
+ * Gonglei <arei.gonglei@xxxxxxxxxx>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "sysemu/cryptodev.h"
+#include "hw/boards.h"
+#include "qapi/error.h"
+#include "qapi/visitor.h"
+#include "qapi-types.h"
+#include "qapi-visit.h"
+#include "qemu/config-file.h"
+#include "qom/object_interfaces.h"
+#include "hw/virtio/virtio-crypto.h"
+
+
+static QTAILQ_HEAD(, CryptoDevBackendClient) crypto_clients;
+
+
+CryptoDevBackendClient *
+cryptodev_backend_new_client(const char *model,
+ const char *name)
+{
+ CryptoDevBackendClient *cc;
+
+ cc = g_malloc0(sizeof(CryptoDevBackendClient));
+ cc->model = g_strdup(model);
+ if (name) {
+ cc->name = g_strdup(name);
+ }
+
+ QTAILQ_INSERT_TAIL(&crypto_clients, cc, next);
+
+ return cc;
+}
+
+void cryptodev_backend_free_client(
+ CryptoDevBackendClient *cc)
+{
+ QTAILQ_REMOVE(&crypto_clients, cc, next);
+ g_free(cc->name);
+ g_free(cc->model);
+ g_free(cc->info_str);
+ g_free(cc);
+}
+
+void cryptodev_backend_cleanup(
+ CryptoDevBackend *backend,
+ Error **errp)
+{
+ CryptoDevBackendClass *bc =
+ CRYPTODEV_BACKEND_GET_CLASS(backend);
+
+ if (bc->cleanup) {
+ bc->cleanup(backend, errp);
+ }
+
+ backend->ready = false;
+}
+
+int64_t cryptodev_backend_sym_create_session(
+ CryptoDevBackend *backend,
+ CryptoDevBackendSymSessionInfo *sess_info,
+ uint32_t queue_index, Error **errp)
+{
+ CryptoDevBackendClass *bc =
+ CRYPTODEV_BACKEND_GET_CLASS(backend);
+
+ if (bc->create_session) {
+ return bc->create_session(backend, sess_info, queue_index, errp);
+ }
+
+ return -1;
+}
+
+int cryptodev_backend_sym_close_session(
+ CryptoDevBackend *backend,
+ uint64_t session_id,
+ uint32_t queue_index, Error **errp)
+{
+ CryptoDevBackendClass *bc =
+ CRYPTODEV_BACKEND_GET_CLASS(backend);
+
+ if (bc->close_session) {
+ return bc->close_session(backend, session_id, queue_index, errp);
+ }
+
+ return -1;
+}
+
+static int cryptodev_backend_sym_operation(
+ CryptoDevBackend *backend,
+ CryptoDevBackendSymOpInfo *op_info,
+ uint32_t queue_index, Error **errp)
+{
+ CryptoDevBackendClass *bc =
+ CRYPTODEV_BACKEND_GET_CLASS(backend);
+
+ if (bc->do_sym_op) {
+ return bc->do_sym_op(backend, op_info, queue_index, errp);
+ }
+
+ return -VIRTIO_CRYPTO_ERR;
+}
+
+int cryptodev_backend_crypto_operation(
+ CryptoDevBackend *backend,
+ void *opaque,
+ uint32_t queue_index, Error **errp)
+{
+ VirtIOCryptoReq *req = opaque;
+
+ if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) {
+ CryptoDevBackendSymOpInfo *op_info;
+ op_info = req->u.sym_op_info;
+
+ return cryptodev_backend_sym_operation(backend,
+ op_info, queue_index, errp);
+ } else {
+ error_setg(errp, "Unsupported cryptodev alg type: %" PRIu32 "",
+ req->flags);
+ return -VIRTIO_CRYPTO_NOTSUPP;
+ }
+
+ return -VIRTIO_CRYPTO_ERR;
+}
+
+static void
+cryptodev_backend_get_queues(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ CryptoDevBackend *backend = CRYPTODEV_BACKEND(obj);
+ uint32_t value = backend->conf.peers.queues;
+
+ visit_type_uint32(v, name, &value, errp);
+}
+
+static void
+cryptodev_backend_set_queues(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ CryptoDevBackend *backend = CRYPTODEV_BACKEND(obj);
+ Error *local_err = NULL;
+ uint32_t value;
+
+ visit_type_uint32(v, name, &value, &local_err);
+ if (local_err) {
+ goto out;
+ }
+ if (!value) {
+ error_setg(&local_err, "Property '%s.%s' doesn't take value '%"
+ PRIu32 "'", object_get_typename(obj), name, value);
+ goto out;
+ }
+ backend->conf.peers.queues = value;
+out:
+ error_propagate(errp, local_err);
+}
+
+static void
+cryptodev_backend_complete(UserCreatable *uc, Error **errp)
+{
+ CryptoDevBackend *backend = CRYPTODEV_BACKEND(uc);
+ CryptoDevBackendClass *bc = CRYPTODEV_BACKEND_GET_CLASS(uc);
+ Error *local_err = NULL;
+
+ if (bc->init) {
+ bc->init(backend, &local_err);
+ if (local_err) {
+ goto out;
+ }
+ }
+ backend->ready = true;
+ return;
+
+out:
+ backend->ready = false;
+ error_propagate(errp, local_err);
+}
+
+static void cryptodev_backend_instance_init(Object *obj)
+{
+ object_property_add(obj, "queues", "int",
+ cryptodev_backend_get_queues,
+ cryptodev_backend_set_queues,
+ NULL, NULL, NULL);
+ /* Initialize devices' queues property to 1 */
+ object_property_set_int(obj, 1, "queues", NULL);
+}
+
+static void cryptodev_backend_finalize(Object *obj)
+{
+
+}
+
+static void
+cryptodev_backend_class_init(ObjectClass *oc, void *data)
+{
+ UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
+
+ ucc->complete = cryptodev_backend_complete;
+
+ QTAILQ_INIT(&crypto_clients);
+}
+
+static const TypeInfo cryptodev_backend_info = {
+ .name = TYPE_CRYPTODEV_BACKEND,
+ .parent = TYPE_OBJECT,
+ .instance_size = sizeof(CryptoDevBackend),
+ .instance_init = cryptodev_backend_instance_init,
+ .instance_finalize = cryptodev_backend_finalize,
+ .class_size = sizeof(CryptoDevBackendClass),
+ .class_init = cryptodev_backend_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_USER_CREATABLE },
+ { }
+ }
+};
+
+static void
+cryptodev_backend_register_types(void)
+{
+ type_register_static(&cryptodev_backend_info);
+}
+
+type_init(cryptodev_backend_register_types);
diff --git a/docs/specs/acpi_mem_hotplug.txt b/docs/specs/acpi_mem_hotplug.txt
index 3df3620..cb26dd2 100644
--- a/docs/specs/acpi_mem_hotplug.txt
+++ b/docs/specs/acpi_mem_hotplug.txt
@@ -4,6 +4,9 @@ QEMU<->ACPI BIOS memory hotplug interface
ACPI BIOS GPE.3 handler is dedicated for notifying OS about memory hot-add
and hot-remove events.
+ACPI BIOS GPE.4 handler is dedicated for notifying OS about nvdimm device
+hot-add and hot-remove events.
+
Memory hot-plug interface (IO port 0xa00-0xa17, 1-4 byte access):
---------------------------------------------------------------
0xa00:
diff --git a/docs/specs/acpi_nvdimm.txt b/docs/specs/acpi_nvdimm.txt
index 0fdd251..4aa5e3d 100644
--- a/docs/specs/acpi_nvdimm.txt
+++ b/docs/specs/acpi_nvdimm.txt
@@ -127,6 +127,58 @@ _DSM process diagram:
| result from the page | | |
+--------------------------+ +--------------+
- _FIT implementation
- -------------------
- TODO (will fill it when nvdimm hotplug is introduced)
+Device Handle Reservation
+-------------------------
+As we mentioned above, byte 0 ~ byte 3 in the DSM memory save NVDIMM device
+handle. The handle is completely QEMU internal thing, the values in range
+[0, 0xFFFF] indicate nvdimm device (O means nvdimm root device named NVDR),
+other values are reserved by other purpose.
+
+Current reserved handle:
+0x10000 is reserved for QEMU internal DSM function called on the root
+device.
+
+QEMU internal use only _DSM function
+------------------------------------
+UUID, 648B9CF2-CDA1-4312-8AD9-49C4AF32BD62, is reserved for QEMU internal
+DSM function.
+
+There is the function introduced by QEMU and only used by QEMU internal.
+
+1) Read FIT
+ As we only reserved one page for NVDIMM ACPI it is impossible to map the
+ whole FIT data to guest's address space. This function is used by _FIT
+ method to read a piece of FIT data from QEMU.
+
+ Input parameters:
+ Arg0 â?? UUID {set to 648B9CF2-CDA1-4312-8AD9-49C4AF32BD62}
+ Arg1 â?? Revision ID (set to 1)
+ Arg2 - Function Index, 0x1
+ Arg3 - A package containing a buffer whose layout is as follows:
+
+ +----------+-------------+-------------+-----------------------------------+
+ | Filed | Byte Length | Byte Offset | Description |
+ +----------+-------------+-------------+-----------------------------------+
+ | offset | 4 | 0 | the offset of FIT buffer |
+ +----------+-------------+-------------+-----------------------------------+
+
+ Output:
+ +----------+-------------+-------------+-----------------------------------+
+ | Filed | Byte Length | Byte Offset | Description |
+ +----------+-------------+-------------+-----------------------------------+
+ | | | | return status codes |
+ | | | | 0x100 indicates fit has been |
+ | status | 4 | 0 | updated |
+ | | | | other follows Chapter 3 in DSM |
+ | | | | Spec Rev1 |
+ +----------+-------------+-------------+-----------------------------------+
+ | fit data | Varies | 4 | FIT data |
+ | | | | |
+ +----------+-------------+-------------+-----------------------------------+
+
+ The FIT offset is maintained by the caller itself, current offset plugs
+ the length returned by the function is the next offset we should read.
+ When all the FIT data has been read out, zero length is returned.
+
+ If it returns 0x100, OSPM should restart to read FIT (read from offset 0
+ again).
diff --git a/hw/acpi/Makefile.objs b/hw/acpi/Makefile.objs
index 4b7da66..489e63b 100644
--- a/hw/acpi/Makefile.objs
+++ b/hw/acpi/Makefile.objs
@@ -3,7 +3,7 @@ common-obj-$(CONFIG_ACPI_X86_ICH) += ich9.o tco.o
common-obj-$(CONFIG_ACPI_CPU_HOTPLUG) += cpu_hotplug.o
common-obj-$(CONFIG_ACPI_MEMORY_HOTPLUG) += memory_hotplug.o
memory_hotplug_acpi_table.o
common-obj-$(CONFIG_ACPI_CPU_HOTPLUG) += cpu.o
-obj-$(CONFIG_ACPI_NVDIMM) += nvdimm.o
+common-obj-$(CONFIG_ACPI_NVDIMM) += nvdimm.o
common-obj-$(CONFIG_ACPI) += acpi_interface.o
common-obj-$(CONFIG_ACPI) += bios-linker-loader.o
common-obj-$(CONFIG_ACPI) += aml-build.o
diff --git a/hw/acpi/ipmi.c b/hw/acpi/ipmi.c
index 7e74ce4..651e2e9 100644
--- a/hw/acpi/ipmi.c
+++ b/hw/acpi/ipmi.c
@@ -99,6 +99,7 @@ void build_acpi_ipmi_devices(Aml *scope, BusState *bus)
ii = IPMI_INTERFACE(obj);
iic = IPMI_INTERFACE_GET_CLASS(obj);
+ memset(&info, 0, sizeof(info));
iic->get_fwinfo(ii, &info);
aml_append(scope, aml_ipmi_device(&info));
}
diff --git a/hw/acpi/memory_hotplug.c b/hw/acpi/memory_hotplug.c
index ec4e64b..70f6451 100644
--- a/hw/acpi/memory_hotplug.c
+++ b/hw/acpi/memory_hotplug.c
@@ -2,6 +2,7 @@
#include "hw/acpi/memory_hotplug.h"
#include "hw/acpi/pc-hotplug.h"
#include "hw/mem/pc-dimm.h"
+#include "hw/mem/nvdimm.h"
#include "hw/boards.h"
#include "hw/qdev-core.h"
#include "trace.h"
@@ -232,11 +233,8 @@ void acpi_memory_plug_cb(HotplugHandler *hotplug_dev,
MemHotplugState *mem_st,
DeviceState *dev, Error **errp)
{
MemStatus *mdev;
- DeviceClass *dc = DEVICE_GET_CLASS(dev);
-
- if (!dc->hotpluggable) {
- return;
- }
+ AcpiEventStatusBits event;
+ bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM);
mdev = acpi_memory_slot_status(mem_st, dev, errp);
if (!mdev) {
@@ -244,10 +242,23 @@ void acpi_memory_plug_cb(HotplugHandler *hotplug_dev,
MemHotplugState *mem_st,
}
mdev->dimm = dev;
- mdev->is_enabled = true;
+
+ /*
+ * do not set is_enabled and is_inserting if the slot is plugged with
+ * a nvdimm device to stop OSPM inquires memory region from the slot.
+ */
+ if (is_nvdimm) {
+ event = ACPI_NVDIMM_HOTPLUG_STATUS;
+ } else {
+ mdev->is_enabled = true;
+ event = ACPI_MEMORY_HOTPLUG_STATUS;
+ }
+
if (dev->hotplugged) {
- mdev->is_inserting = true;
- acpi_send_event(DEVICE(hotplug_dev), ACPI_MEMORY_HOTPLUG_STATUS);
+ if (!is_nvdimm) {
+ mdev->is_inserting = true;
+ }
+ acpi_send_event(DEVICE(hotplug_dev), event);
}
}
@@ -262,6 +273,8 @@ void acpi_memory_unplug_request_cb(HotplugHandler
*hotplug_dev,
return;
}
+ /* nvdimm device hot unplug is not supported yet. */
+ assert(!object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM));
mdev->is_removing = true;
acpi_send_event(DEVICE(hotplug_dev), ACPI_MEMORY_HOTPLUG_STATUS);
}
@@ -276,6 +289,8 @@ void acpi_memory_unplug_cb(MemHotplugState *mem_st,
return;
}
+ /* nvdimm device hot unplug is not supported yet. */
+ assert(!object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM));
mdev->is_enabled = false;
mdev->dimm = NULL;
}
diff --git a/hw/acpi/nvdimm.c b/hw/acpi/nvdimm.c
index e486128..602ec54 100644
--- a/hw/acpi/nvdimm.c
+++ b/hw/acpi/nvdimm.c
@@ -289,8 +289,6 @@ static void
nvdimm_build_structure_memdev(GArray *structures, DeviceState *dev)
{
NvdimmNfitMemDev *nfit_memdev;
- uint64_t addr = object_property_get_int(OBJECT(dev), PC_DIMM_ADDR_PROP,
- NULL);
uint64_t size = object_property_get_int(OBJECT(dev), PC_DIMM_SIZE_PROP,
NULL);
int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP,
@@ -314,7 +312,8 @@ nvdimm_build_structure_memdev(GArray *structures,
DeviceState *dev)
/* The memory region on the device. */
nfit_memdev->region_len = cpu_to_le64(size);
- nfit_memdev->region_dpa = cpu_to_le64(addr);
+ /* The device address starts from 0. */
+ nfit_memdev->region_dpa = cpu_to_le64(0);
/* Only one interleave for PMEM. */
nfit_memdev->interleave_ways = cpu_to_le16(1);
@@ -349,8 +348,9 @@ static void nvdimm_build_structure_dcr(GArray *structures,
DeviceState *dev)
(DSM) in DSM Spec Rev1.*/);
}
-static GArray *nvdimm_build_device_structure(GSList *device_list)
+static GArray *nvdimm_build_device_structure(void)
{
+ GSList *device_list = nvdimm_get_plugged_device_list();
GArray *structures = g_array_new(false, true /* clear */, 1);
for (; device_list; device_list = device_list->next) {
@@ -368,28 +368,58 @@ static GArray *nvdimm_build_device_structure(GSList
*device_list)
/* build NVDIMM Control Region Structure. */
nvdimm_build_structure_dcr(structures, dev);
}
+ g_slist_free(device_list);
return structures;
}
-static void nvdimm_build_nfit(GSList *device_list, GArray *table_offsets,
+static void nvdimm_init_fit_buffer(NvdimmFitBuffer *fit_buf)
+{
+ qemu_mutex_init(&fit_buf->lock);
+ fit_buf->fit = g_array_new(false, true /* clear */, 1);
+}
+
+static void nvdimm_build_fit_buffer(NvdimmFitBuffer *fit_buf)
+{
+ qemu_mutex_lock(&fit_buf->lock);
+ g_array_free(fit_buf->fit, true);
+ fit_buf->fit = nvdimm_build_device_structure();
+ fit_buf->dirty = true;
+ qemu_mutex_unlock(&fit_buf->lock);
+}
+
+void nvdimm_acpi_hotplug(AcpiNVDIMMState *state)
+{
+ nvdimm_build_fit_buffer(&state->fit_buf);
+}
+
+static void nvdimm_build_nfit(AcpiNVDIMMState *state, GArray *table_offsets,
GArray *table_data, BIOSLinker *linker)
{
- GArray *structures = nvdimm_build_device_structure(device_list);
+ NvdimmFitBuffer *fit_buf = &state->fit_buf;
unsigned int header;
+ qemu_mutex_lock(&fit_buf->lock);
+
+ /* NVDIMM device is not plugged? */
+ if (!fit_buf->fit->len) {
+ goto exit;
+ }
+
acpi_add_table(table_offsets, table_data);
/* NFIT header. */
header = table_data->len;
acpi_data_push(table_data, sizeof(NvdimmNfitHeader));
/* NVDIMM device structures. */
- g_array_append_vals(table_data, structures->data, structures->len);
+ g_array_append_vals(table_data, fit_buf->fit->data, fit_buf->fit->len);
build_header(linker, table_data,
(void *)(table_data->data + header), "NFIT",
- sizeof(NvdimmNfitHeader) + structures->len, 1, NULL, NULL);
- g_array_free(structures, true);
+ sizeof(NvdimmNfitHeader) + fit_buf->fit->len, 1, NULL, NULL);
+
+exit:
+ qemu_mutex_unlock(&fit_buf->lock);
}
struct NvdimmDsmIn {
@@ -466,6 +496,22 @@ typedef struct NvdimmFuncSetLabelDataIn
NvdimmFuncSetLabelDataIn;
QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncSetLabelDataIn) +
offsetof(NvdimmDsmIn, arg3) > 4096);
+struct NvdimmFuncReadFITIn {
+ uint32_t offset; /* the offset of FIT buffer. */
+} QEMU_PACKED;
+typedef struct NvdimmFuncReadFITIn NvdimmFuncReadFITIn;
+QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncReadFITIn) +
+ offsetof(NvdimmDsmIn, arg3) > 4096);
+
+struct NvdimmFuncReadFITOut {
+ /* the size of buffer filled by QEMU. */
+ uint32_t len;
+ uint32_t func_ret_status; /* return status code. */
+ uint8_t fit[0]; /* the FIT data. */
+} QEMU_PACKED;
+typedef struct NvdimmFuncReadFITOut NvdimmFuncReadFITOut;
+QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncReadFITOut) > 4096);
+
static void
nvdimm_dsm_function0(uint32_t supported_func, hwaddr dsm_mem_addr)
{
@@ -486,6 +532,74 @@ nvdimm_dsm_no_payload(uint32_t func_ret_status, hwaddr
dsm_mem_addr)
cpu_physical_memory_write(dsm_mem_addr, &out, sizeof(out));
}
+#define NVDIMM_QEMU_RSVD_HANDLE_ROOT 0x10000
+
+/* Read FIT data, defined in docs/specs/acpi_nvdimm.txt. */
+static void nvdimm_dsm_func_read_fit(AcpiNVDIMMState *state, NvdimmDsmIn *in,
+ hwaddr dsm_mem_addr)
+{
+ NvdimmFitBuffer *fit_buf = &state->fit_buf;
+ NvdimmFuncReadFITIn *read_fit;
+ NvdimmFuncReadFITOut *read_fit_out;
+ GArray *fit;
+ uint32_t read_len = 0, func_ret_status;
+ int size;
+
+ read_fit = (NvdimmFuncReadFITIn *)in->arg3;
+ le32_to_cpus(&read_fit->offset);
+
+ qemu_mutex_lock(&fit_buf->lock);
+ fit = fit_buf->fit;
+
+ nvdimm_debug("Read FIT: offset %#x FIT size %#x Dirty %s.\n",
+ read_fit->offset, fit->len, fit_buf->dirty ? "Yes" : "No");
+
+ if (read_fit->offset > fit->len) {
+ func_ret_status = 3 /* Invalid Input Parameters */;
+ goto exit;
+ }
+
+ /* It is the first time to read FIT. */
+ if (!read_fit->offset) {
+ fit_buf->dirty = false;
+ } else if (fit_buf->dirty) { /* FIT has been changed during RFIT. */
+ func_ret_status = 0x100 /* fit changed */;
+ goto exit;
+ }
+
+ func_ret_status = 0 /* Success */;
+ read_len = MIN(fit->len - read_fit->offset,
+ 4096 - sizeof(NvdimmFuncReadFITOut));
+
+exit:
+ size = sizeof(NvdimmFuncReadFITOut) + read_len;
+ read_fit_out = g_malloc(size);
+
+ read_fit_out->len = cpu_to_le32(size);
+ read_fit_out->func_ret_status = cpu_to_le32(func_ret_status);
+ memcpy(read_fit_out->fit, fit->data + read_fit->offset, read_len);
+
+ cpu_physical_memory_write(dsm_mem_addr, read_fit_out, size);
+
+ g_free(read_fit_out);
+ qemu_mutex_unlock(&fit_buf->lock);
+}
+
+static void nvdimm_dsm_reserved_root(AcpiNVDIMMState *state, NvdimmDsmIn *in,
+ hwaddr dsm_mem_addr)
+{
+ switch (in->function) {
+ case 0x0:
+ nvdimm_dsm_function0(0x1 | 1 << 1 /* Read FIT */, dsm_mem_addr);
+ return;
+ case 0x1 /*Read FIT */:
+ nvdimm_dsm_func_read_fit(state, in, dsm_mem_addr);
+ return;
+ }
+
+ nvdimm_dsm_no_payload(1 /* Not Supported */, dsm_mem_addr);
+}
+
static void nvdimm_dsm_root(NvdimmDsmIn *in, hwaddr dsm_mem_addr)
{
/*
@@ -643,8 +757,8 @@ static void nvdimm_dsm_set_label_data(NVDIMMDevice *nvdimm,
NvdimmDsmIn *in,
return;
}
- assert(sizeof(*in) + sizeof(*set_label_data) + set_label_data->length <=
- 4096);
+ assert(offsetof(NvdimmDsmIn, arg3) +
+ sizeof(*set_label_data) + set_label_data->length <= 4096);
nvc->write_label_data(nvdimm, set_label_data->in_buf,
set_label_data->length, set_label_data->offset);
@@ -712,6 +826,7 @@ nvdimm_dsm_read(void *opaque, hwaddr addr, unsigned size)
static void
nvdimm_dsm_write(void *opaque, hwaddr addr, uint64_t val, unsigned size)
{
+ AcpiNVDIMMState *state = opaque;
NvdimmDsmIn *in;
hwaddr dsm_mem_addr = val;
@@ -739,6 +854,11 @@ nvdimm_dsm_write(void *opaque, hwaddr addr, uint64_t val,
unsigned size)
goto exit;
}
+ if (in->handle == NVDIMM_QEMU_RSVD_HANDLE_ROOT) {
+ nvdimm_dsm_reserved_root(state, in, dsm_mem_addr);
+ goto exit;
+ }
+
/* Handle 0 is reserved for NVDIMM Root Device. */
if (!in->handle) {
nvdimm_dsm_root(in, dsm_mem_addr);
@@ -772,23 +892,105 @@ void nvdimm_init_acpi_state(AcpiNVDIMMState *state,
MemoryRegion *io,
acpi_data_push(state->dsm_mem, sizeof(NvdimmDsmIn));
fw_cfg_add_file(fw_cfg, NVDIMM_DSM_MEM_FILE, state->dsm_mem->data,
state->dsm_mem->len);
+
+ nvdimm_init_fit_buffer(&state->fit_buf);
}
-#define NVDIMM_COMMON_DSM "NCAL"
-#define NVDIMM_ACPI_MEM_ADDR "MEMA"
+#define NVDIMM_COMMON_DSM "NCAL"
+#define NVDIMM_ACPI_MEM_ADDR "MEMA"
+
+#define NVDIMM_DSM_MEMORY "NRAM"
+#define NVDIMM_DSM_IOPORT "NPIO"
+
+#define NVDIMM_DSM_NOTIFY "NTFI"
+#define NVDIMM_DSM_HANDLE "HDLE"
+#define NVDIMM_DSM_REVISION "REVS"
+#define NVDIMM_DSM_FUNCTION "FUNC"
+#define NVDIMM_DSM_ARG3 "FARG"
+
+#define NVDIMM_DSM_OUT_BUF_SIZE "RLEN"
+#define NVDIMM_DSM_OUT_BUF "ODAT"
+
+#define NVDIMM_DSM_RFIT_STATUS "RSTA"
+
+#define NVDIMM_QEMU_RSVD_UUID "648B9CF2-CDA1-4312-8AD9-49C4AF32BD62"
static void nvdimm_build_common_dsm(Aml *dev)
{
- Aml *method, *ifctx, *function, *handle, *uuid, *dsm_mem, *result_size;
+ Aml *method, *ifctx, *function, *handle, *uuid, *dsm_mem, *elsectx2;
Aml *elsectx, *unsupport, *unpatched, *expected_uuid, *uuid_invalid;
- Aml *pckg, *pckg_index, *pckg_buf;
+ Aml *pckg, *pckg_index, *pckg_buf, *field, *dsm_out_buf, *dsm_out_buf_size;
uint8_t byte_list[1];
method = aml_method(NVDIMM_COMMON_DSM, 5, AML_SERIALIZED);
uuid = aml_arg(0);
function = aml_arg(2);
handle = aml_arg(4);
- dsm_mem = aml_name(NVDIMM_ACPI_MEM_ADDR);
+ dsm_mem = aml_local(6);
+ dsm_out_buf = aml_local(7);
+
+ aml_append(method, aml_store(aml_name(NVDIMM_ACPI_MEM_ADDR), dsm_mem));
+
+ /* map DSM memory and IO into ACPI namespace. */
+ aml_append(method, aml_operation_region(NVDIMM_DSM_IOPORT, AML_SYSTEM_IO,
+ aml_int(NVDIMM_ACPI_IO_BASE), NVDIMM_ACPI_IO_LEN));
+ aml_append(method, aml_operation_region(NVDIMM_DSM_MEMORY,
+ AML_SYSTEM_MEMORY, dsm_mem, sizeof(NvdimmDsmIn)));
+
+ /*
+ * DSM notifier:
+ * NVDIMM_DSM_NOTIFY: write the address of DSM memory and notify QEMU to
+ * emulate the access.
+ *
+ * It is the IO port so that accessing them will cause VM-exit, the
+ * control will be transferred to QEMU.
+ */
+ field = aml_field(NVDIMM_DSM_IOPORT, AML_DWORD_ACC, AML_NOLOCK,
+ AML_PRESERVE);
+ aml_append(field, aml_named_field(NVDIMM_DSM_NOTIFY,
+ sizeof(uint32_t) * BITS_PER_BYTE));
+ aml_append(method, field);
+
+ /*
+ * DSM input:
+ * NVDIMM_DSM_HANDLE: store device's handle, it's zero if the _DSM call
+ * happens on NVDIMM Root Device.
+ * NVDIMM_DSM_REVISION: store the Arg1 of _DSM call.
+ * NVDIMM_DSM_FUNCTION: store the Arg2 of _DSM call.
+ * NVDIMM_DSM_ARG3: store the Arg3 of _DSM call which is a Package
+ * containing function-specific arguments.
+ *
+ * They are RAM mapping on host so that these accesses never cause
+ * VM-EXIT.
+ */
+ field = aml_field(NVDIMM_DSM_MEMORY, AML_DWORD_ACC, AML_NOLOCK,
+ AML_PRESERVE);
+ aml_append(field, aml_named_field(NVDIMM_DSM_HANDLE,
+ sizeof(typeof_field(NvdimmDsmIn, handle)) * BITS_PER_BYTE));
+ aml_append(field, aml_named_field(NVDIMM_DSM_REVISION,
+ sizeof(typeof_field(NvdimmDsmIn, revision)) * BITS_PER_BYTE));
+ aml_append(field, aml_named_field(NVDIMM_DSM_FUNCTION,
+ sizeof(typeof_field(NvdimmDsmIn, function)) * BITS_PER_BYTE));
+ aml_append(field, aml_named_field(NVDIMM_DSM_ARG3,
+ (sizeof(NvdimmDsmIn) - offsetof(NvdimmDsmIn, arg3)) * BITS_PER_BYTE));
+ aml_append(method, field);
+
+ /*
+ * DSM output:
+ * NVDIMM_DSM_OUT_BUF_SIZE: the size of the buffer filled by QEMU.
+ * NVDIMM_DSM_OUT_BUF: the buffer QEMU uses to store the result.
+ *
+ * Since the page is reused by both input and out, the input data
+ * will be lost after storing new result into ODAT so we should fetch
+ * all the input data before writing the result.
+ */
+ field = aml_field(NVDIMM_DSM_MEMORY, AML_DWORD_ACC, AML_NOLOCK,
+ AML_PRESERVE);
+ aml_append(field, aml_named_field(NVDIMM_DSM_OUT_BUF_SIZE,
+ sizeof(typeof_field(NvdimmDsmOut, len)) * BITS_PER_BYTE));
+ aml_append(field, aml_named_field(NVDIMM_DSM_OUT_BUF,
+ (sizeof(NvdimmDsmOut) - offsetof(NvdimmDsmOut, data)) * BITS_PER_BYTE));
+ aml_append(method, field);
/*
* do not support any method if DSM memory address has not been
@@ -804,9 +1006,15 @@ static void nvdimm_build_common_dsm(Aml *dev)
/* UUID for NVDIMM Root Device */, expected_uuid));
aml_append(method, ifctx);
elsectx = aml_else();
- aml_append(elsectx, aml_store(
+ ifctx = aml_if(aml_equal(handle, aml_int(NVDIMM_QEMU_RSVD_HANDLE_ROOT)));
+ aml_append(ifctx, aml_store(aml_touuid(NVDIMM_QEMU_RSVD_UUID
+ /* UUID for QEMU internal use */), expected_uuid));
+ aml_append(elsectx, ifctx);
+ elsectx2 = aml_else();
+ aml_append(elsectx2, aml_store(
aml_touuid("4309AC30-0D11-11E4-9191-0800200C9A66")
/* UUID for NVDIMM Devices */, expected_uuid));
+ aml_append(elsectx, elsectx2);
aml_append(method, elsectx);
uuid_invalid = aml_lnot(aml_equal(uuid, expected_uuid));
@@ -832,9 +1040,9 @@ static void nvdimm_build_common_dsm(Aml *dev)
* it reserves 0 for root device and is the handle for NVDIMM devices.
* See the comments in nvdimm_slot_to_handle().
*/
- aml_append(method, aml_store(handle, aml_name("HDLE")));
- aml_append(method, aml_store(aml_arg(1), aml_name("REVS")));
- aml_append(method, aml_store(aml_arg(2), aml_name("FUNC")));
+ aml_append(method, aml_store(handle, aml_name(NVDIMM_DSM_HANDLE)));
+ aml_append(method, aml_store(aml_arg(1), aml_name(NVDIMM_DSM_REVISION)));
+ aml_append(method, aml_store(aml_arg(2), aml_name(NVDIMM_DSM_FUNCTION)));
/*
* The fourth parameter (Arg3) of _DSM is a package which contains
@@ -852,24 +1060,26 @@ static void nvdimm_build_common_dsm(Aml *dev)
pckg_buf = aml_local(3);
aml_append(ifctx, aml_store(aml_index(pckg, aml_int(0)), pckg_index));
aml_append(ifctx, aml_store(aml_derefof(pckg_index), pckg_buf));
- aml_append(ifctx, aml_store(pckg_buf, aml_name("ARG3")));
+ aml_append(ifctx, aml_store(pckg_buf, aml_name(NVDIMM_DSM_ARG3)));
aml_append(method, ifctx);
/*
* tell QEMU about the real address of DSM memory, then QEMU
* gets the control and fills the result in DSM memory.
*/
- aml_append(method, aml_store(dsm_mem, aml_name("NTFI")));
-
- result_size = aml_local(1);
- aml_append(method, aml_store(aml_name("RLEN"), result_size));
- aml_append(method, aml_store(aml_shiftleft(result_size, aml_int(3)),
- result_size));
- aml_append(method, aml_create_field(aml_name("ODAT"), aml_int(0),
- result_size, "OBUF"));
+ aml_append(method, aml_store(dsm_mem, aml_name(NVDIMM_DSM_NOTIFY)));
+
+ dsm_out_buf_size = aml_local(1);
+ /* RLEN is not included in the payload returned to guest. */
+ aml_append(method, aml_subtract(aml_name(NVDIMM_DSM_OUT_BUF_SIZE),
+ aml_int(4), dsm_out_buf_size));
+ aml_append(method, aml_store(aml_shiftleft(dsm_out_buf_size, aml_int(3)),
+ dsm_out_buf_size));
+ aml_append(method, aml_create_field(aml_name(NVDIMM_DSM_OUT_BUF),
+ aml_int(0), dsm_out_buf_size, "OBUF"));
aml_append(method, aml_concatenate(aml_buffer(0, NULL), aml_name("OBUF"),
- aml_arg(6)));
- aml_append(method, aml_return(aml_arg(6)));
+ dsm_out_buf));
+ aml_append(method, aml_return(dsm_out_buf));
aml_append(dev, method);
}
@@ -884,12 +1094,110 @@ static void nvdimm_build_device_dsm(Aml *dev, uint32_t
handle)
aml_append(dev, method);
}
-static void nvdimm_build_nvdimm_devices(GSList *device_list, Aml *root_dev)
+static void nvdimm_build_fit(Aml *dev)
{
- for (; device_list; device_list = device_list->next) {
- DeviceState *dev = device_list->data;
- int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP,
- NULL);
+ Aml *method, *pkg, *buf, *buf_size, *offset, *call_result;
+ Aml *whilectx, *ifcond, *ifctx, *elsectx, *fit;
+
+ buf = aml_local(0);
+ buf_size = aml_local(1);
+ fit = aml_local(2);
+
+ aml_append(dev, aml_create_dword_field(aml_buffer(4, NULL),
+ aml_int(0), NVDIMM_DSM_RFIT_STATUS));
+
+ /* build helper function, RFIT. */
+ method = aml_method("RFIT", 1, AML_SERIALIZED);
+ aml_append(method, aml_create_dword_field(aml_buffer(4, NULL),
+ aml_int(0), "OFST"));
+
+ /* prepare input package. */
+ pkg = aml_package(1);
+ aml_append(method, aml_store(aml_arg(0), aml_name("OFST")));
+ aml_append(pkg, aml_name("OFST"));
+
+ /* call Read_FIT function. */
+ call_result = aml_call5(NVDIMM_COMMON_DSM,
+ aml_touuid(NVDIMM_QEMU_RSVD_UUID),
+ aml_int(1) /* Revision 1 */,
+ aml_int(0x1) /* Read FIT */,
+ pkg, aml_int(NVDIMM_QEMU_RSVD_HANDLE_ROOT));
+ aml_append(method, aml_store(call_result, buf));
+
+ /* handle _DSM result. */
+ aml_append(method, aml_create_dword_field(buf,
+ aml_int(0) /* offset at byte 0 */, "STAU"));
+
+ aml_append(method, aml_store(aml_name("STAU"),
+ aml_name(NVDIMM_DSM_RFIT_STATUS)));
+
+ /* if something is wrong during _DSM. */
+ ifcond = aml_equal(aml_int(0 /* Success */), aml_name("STAU"));
+ ifctx = aml_if(aml_lnot(ifcond));
+ aml_append(ifctx, aml_return(aml_buffer(0, NULL)));
+ aml_append(method, ifctx);
+
+ aml_append(method, aml_store(aml_sizeof(buf), buf_size));
+ aml_append(method, aml_subtract(buf_size,
+ aml_int(4) /* the size of "STAU" */,
+ buf_size));
+
+ /* if we read the end of fit. */
+ ifctx = aml_if(aml_equal(buf_size, aml_int(0)));
+ aml_append(ifctx, aml_return(aml_buffer(0, NULL)));
+ aml_append(method, ifctx);
+
+ aml_append(method, aml_store(aml_shiftleft(buf_size, aml_int(3)),
+ buf_size));
+ aml_append(method, aml_create_field(buf,
+ aml_int(4 * BITS_PER_BYTE), /* offset at byte 4.*/
+ buf_size, "BUFF"));
+ aml_append(method, aml_return(aml_name("BUFF")));
+ aml_append(dev, method);
+
+ /* build _FIT. */
+ method = aml_method("_FIT", 0, AML_SERIALIZED);
+ offset = aml_local(3);
+
+ aml_append(method, aml_store(aml_buffer(0, NULL), fit));
+ aml_append(method, aml_store(aml_int(0), offset));
+
+ whilectx = aml_while(aml_int(1));
+ aml_append(whilectx, aml_store(aml_call1("RFIT", offset), buf));
+ aml_append(whilectx, aml_store(aml_sizeof(buf), buf_size));
+
+ /*
+ * if fit buffer was changed during RFIT, read from the beginning
+ * again.
+ */
+ ifctx = aml_if(aml_equal(aml_name(NVDIMM_DSM_RFIT_STATUS),
+ aml_int(0x100 /* fit changed */)));
+ aml_append(ifctx, aml_store(aml_buffer(0, NULL), fit));
+ aml_append(ifctx, aml_store(aml_int(0), offset));
+ aml_append(whilectx, ifctx);
+
+ elsectx = aml_else();
+
+ /* finish fit read if no data is read out. */
+ ifctx = aml_if(aml_equal(buf_size, aml_int(0)));
+ aml_append(ifctx, aml_return(fit));
+ aml_append(elsectx, ifctx);
+
+ /* update the offset. */
+ aml_append(elsectx, aml_add(offset, buf_size, offset));
+ /* append the data we read out to the fit buffer. */
+ aml_append(elsectx, aml_concatenate(fit, buf, fit));
+ aml_append(whilectx, elsectx);
+ aml_append(method, whilectx);
+
+ aml_append(dev, method);
+}
+
+static void nvdimm_build_nvdimm_devices(Aml *root_dev, uint32_t ram_slots)
+{
+ uint32_t slot;
+
+ for (slot = 0; slot < ram_slots; slot++) {
uint32_t handle = nvdimm_slot_to_handle(slot);
Aml *nvdimm_dev;
@@ -910,11 +1218,11 @@ static void nvdimm_build_nvdimm_devices(GSList
*device_list, Aml *root_dev)
}
}
-static void nvdimm_build_ssdt(GSList *device_list, GArray *table_offsets,
- GArray *table_data, BIOSLinker *linker,
- GArray *dsm_dma_arrea)
+static void nvdimm_build_ssdt(GArray *table_offsets, GArray *table_data,
+ BIOSLinker *linker, GArray *dsm_dma_arrea,
+ uint32_t ram_slots)
{
- Aml *ssdt, *sb_scope, *dev, *field;
+ Aml *ssdt, *sb_scope, *dev;
int mem_addr_offset, nvdimm_ssdt;
acpi_add_table(table_offsets, table_data);
@@ -939,69 +1247,13 @@ static void nvdimm_build_ssdt(GSList *device_list,
GArray *table_offsets,
*/
aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0012")));
- /* map DSM memory and IO into ACPI namespace. */
- aml_append(dev, aml_operation_region("NPIO", AML_SYSTEM_IO,
- aml_int(NVDIMM_ACPI_IO_BASE), NVDIMM_ACPI_IO_LEN));
- aml_append(dev, aml_operation_region("NRAM", AML_SYSTEM_MEMORY,
- aml_name(NVDIMM_ACPI_MEM_ADDR), sizeof(NvdimmDsmIn)));
-
- /*
- * DSM notifier:
- * NTFI: write the address of DSM memory and notify QEMU to emulate
- * the access.
- *
- * It is the IO port so that accessing them will cause VM-exit, the
- * control will be transferred to QEMU.
- */
- field = aml_field("NPIO", AML_DWORD_ACC, AML_NOLOCK, AML_PRESERVE);
- aml_append(field, aml_named_field("NTFI",
- sizeof(uint32_t) * BITS_PER_BYTE));
- aml_append(dev, field);
-
- /*
- * DSM input:
- * HDLE: store device's handle, it's zero if the _DSM call happens
- * on NVDIMM Root Device.
- * REVS: store the Arg1 of _DSM call.
- * FUNC: store the Arg2 of _DSM call.
- * ARG3: store the Arg3 of _DSM call.
- *
- * They are RAM mapping on host so that these accesses never cause
- * VM-EXIT.
- */
- field = aml_field("NRAM", AML_DWORD_ACC, AML_NOLOCK, AML_PRESERVE);
- aml_append(field, aml_named_field("HDLE",
- sizeof(typeof_field(NvdimmDsmIn, handle)) * BITS_PER_BYTE));
- aml_append(field, aml_named_field("REVS",
- sizeof(typeof_field(NvdimmDsmIn, revision)) * BITS_PER_BYTE));
- aml_append(field, aml_named_field("FUNC",
- sizeof(typeof_field(NvdimmDsmIn, function)) * BITS_PER_BYTE));
- aml_append(field, aml_named_field("ARG3",
- (sizeof(NvdimmDsmIn) - offsetof(NvdimmDsmIn, arg3)) *
BITS_PER_BYTE));
- aml_append(dev, field);
-
- /*
- * DSM output:
- * RLEN: the size of the buffer filled by QEMU.
- * ODAT: the buffer QEMU uses to store the result.
- *
- * Since the page is reused by both input and out, the input data
- * will be lost after storing new result into ODAT so we should fetch
- * all the input data before writing the result.
- */
- field = aml_field("NRAM", AML_DWORD_ACC, AML_NOLOCK, AML_PRESERVE);
- aml_append(field, aml_named_field("RLEN",
- sizeof(typeof_field(NvdimmDsmOut, len)) * BITS_PER_BYTE));
- aml_append(field, aml_named_field("ODAT",
- (sizeof(NvdimmDsmOut) - offsetof(NvdimmDsmOut, data)) *
BITS_PER_BYTE));
- aml_append(dev, field);
-
nvdimm_build_common_dsm(dev);
/* 0 is reserved for root device. */
nvdimm_build_device_dsm(dev, 0);
+ nvdimm_build_fit(dev);
- nvdimm_build_nvdimm_devices(device_list, dev);
+ nvdimm_build_nvdimm_devices(dev, ram_slots);
aml_append(sb_scope, dev);
aml_append(ssdt, sb_scope);
@@ -1026,17 +1278,17 @@ static void nvdimm_build_ssdt(GSList *device_list,
GArray *table_offsets,
}
void nvdimm_build_acpi(GArray *table_offsets, GArray *table_data,
- BIOSLinker *linker, GArray *dsm_dma_arrea)
+ BIOSLinker *linker, AcpiNVDIMMState *state,
+ uint32_t ram_slots)
{
- GSList *device_list;
+ nvdimm_build_nfit(state, table_offsets, table_data, linker);
- /* no NVDIMM device is plugged. */
- device_list = nvdimm_get_plugged_device_list();
- if (!device_list) {
- return;
+ /*
+ * NVDIMM device is allowed to be plugged only if there is available
+ * slot.
+ */
+ if (ram_slots) {
+ nvdimm_build_ssdt(table_offsets, table_data, linker, state->dsm_mem,
+ ram_slots);
}
- nvdimm_build_nfit(device_list, table_offsets, table_data, linker);
- nvdimm_build_ssdt(device_list, table_offsets, table_data, linker,
- dsm_dma_arrea);
- g_slist_free(device_list);
}
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
index 704a763..90ef557 100644
--- a/hw/block/dataplane/virtio-blk.c
+++ b/hw/block/dataplane/virtio-blk.c
@@ -88,23 +88,28 @@ void virtio_blk_data_plane_create(VirtIODevice *vdev,
VirtIOBlkConf *conf,
*dataplane = NULL;
- if (!conf->iothread) {
- return;
- }
+ if (conf->iothread) {
+ if (!k->set_guest_notifiers || !k->ioeventfd_assign) {
+ error_setg(errp,
+ "device is incompatible with iothread "
+ "(transport does not support notifiers)");
+ return;
+ }
+ if (!virtio_device_ioeventfd_enabled(vdev)) {
+ error_setg(errp, "ioeventfd is required for iothread");
+ return;
+ }
- /* Don't try if transport does not support notifiers. */
- if (!k->set_guest_notifiers || !k->ioeventfd_started) {
- error_setg(errp,
- "device is incompatible with dataplane "
- "(transport does not support notifiers)");
- return;
+ /* If dataplane is (re-)enabled while the guest is running there could
+ * be block jobs that can conflict.
+ */
+ if (blk_op_is_blocked(conf->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) {
+ error_prepend(errp, "cannot start virtio-blk dataplane: ");
+ return;
+ }
}
-
- /* If dataplane is (re-)enabled while the guest is running there could be
- * block jobs that can conflict.
- */
- if (blk_op_is_blocked(conf->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) {
- error_prepend(errp, "cannot start dataplane thread: ");
+ /* Don't try if transport does not support notifiers. */
+ if (!virtio_device_ioeventfd_enabled(vdev)) {
return;
}
@@ -112,9 +117,13 @@ void virtio_blk_data_plane_create(VirtIODevice *vdev,
VirtIOBlkConf *conf,
s->vdev = vdev;
s->conf = conf;
- s->iothread = conf->iothread;
- object_ref(OBJECT(s->iothread));
- s->ctx = iothread_get_aio_context(s->iothread);
+ if (conf->iothread) {
+ s->iothread = conf->iothread;
+ object_ref(OBJECT(s->iothread));
+ s->ctx = iothread_get_aio_context(s->iothread);
+ } else {
+ s->ctx = qemu_get_aio_context();
+ }
s->bh = aio_bh_new(s->ctx, notify_guest_bh, s);
s->batch_notify_vqs = bitmap_new(conf->num_queues);
@@ -124,14 +133,19 @@ void virtio_blk_data_plane_create(VirtIODevice *vdev,
VirtIOBlkConf *conf,
/* Context: QEMU global mutex held */
void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
{
+ VirtIOBlock *vblk;
+
if (!s) {
return;
}
- virtio_blk_data_plane_stop(s);
+ vblk = VIRTIO_BLK(s->vdev);
+ assert(!vblk->dataplane_started);
g_free(s->batch_notify_vqs);
qemu_bh_delete(s->bh);
- object_unref(OBJECT(s->iothread));
+ if (s->iothread) {
+ object_unref(OBJECT(s->iothread));
+ }
g_free(s);
}
@@ -147,17 +161,18 @@ static void
virtio_blk_data_plane_handle_output(VirtIODevice *vdev,
}
/* Context: QEMU global mutex held */
-void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
+int virtio_blk_data_plane_start(VirtIODevice *vdev)
{
- BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s->vdev)));
+ VirtIOBlock *vblk = VIRTIO_BLK(vdev);
+ VirtIOBlockDataPlane *s = vblk->dataplane;
+ BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vblk)));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
- VirtIOBlock *vblk = VIRTIO_BLK(s->vdev);
unsigned i;
unsigned nvqs = s->conf->num_queues;
int r;
if (vblk->dataplane_started || s->starting) {
- return;
+ return 0;
}
s->starting = true;
@@ -204,20 +219,22 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
virtio_blk_data_plane_handle_output);
}
aio_context_release(s->ctx);
- return;
+ return 0;
fail_guest_notifiers:
vblk->dataplane_disabled = true;
s->starting = false;
vblk->dataplane_started = true;
+ return -ENOSYS;
}
/* Context: QEMU global mutex held */
-void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s)
+void virtio_blk_data_plane_stop(VirtIODevice *vdev)
{
- BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s->vdev)));
+ VirtIOBlock *vblk = VIRTIO_BLK(vdev);
+ VirtIOBlockDataPlane *s = vblk->dataplane;
+ BusState *qbus = qdev_get_parent_bus(DEVICE(vblk));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
- VirtIOBlock *vblk = VIRTIO_BLK(s->vdev);
unsigned i;
unsigned nvqs = s->conf->num_queues;
diff --git a/hw/block/dataplane/virtio-blk.h b/hw/block/dataplane/virtio-blk.h
index b1f0b95..db3f47b 100644
--- a/hw/block/dataplane/virtio-blk.h
+++ b/hw/block/dataplane/virtio-blk.h
@@ -23,9 +23,9 @@ void virtio_blk_data_plane_create(VirtIODevice *vdev,
VirtIOBlkConf *conf,
VirtIOBlockDataPlane **dataplane,
Error **errp);
void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s);
-void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s);
-void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s);
-void virtio_blk_data_plane_drain(VirtIOBlockDataPlane *s);
void virtio_blk_data_plane_notify(VirtIOBlockDataPlane *s, VirtQueue *vq);
+int virtio_blk_data_plane_start(VirtIODevice *vdev);
+void virtio_blk_data_plane_stop(VirtIODevice *vdev);
+
#endif /* HW_DATAPLANE_VIRTIO_BLK_H */
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index 37fe72b..0c5fd27 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -611,7 +611,7 @@ static void virtio_blk_handle_output(VirtIODevice *vdev,
VirtQueue *vq)
/* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start
* dataplane here instead of waiting for .set_status().
*/
- virtio_blk_data_plane_start(s->dataplane);
+ virtio_device_start_ioeventfd(vdev);
if (!s->dataplane_disabled) {
return;
}
@@ -687,11 +687,9 @@ static void virtio_blk_reset(VirtIODevice *vdev)
virtio_blk_free_request(req);
}
- if (s->dataplane) {
- virtio_blk_data_plane_stop(s->dataplane);
- }
aio_context_release(ctx);
+ assert(!s->dataplane_started);
blk_set_enable_write_cache(s->blk, s->original_wce);
}
@@ -789,9 +787,8 @@ static void virtio_blk_set_status(VirtIODevice *vdev,
uint8_t status)
{
VirtIOBlock *s = VIRTIO_BLK(vdev);
- if (s->dataplane && !(status & (VIRTIO_CONFIG_S_DRIVER |
- VIRTIO_CONFIG_S_DRIVER_OK))) {
- virtio_blk_data_plane_stop(s->dataplane);
+ if (!(status & (VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK))) {
+ assert(!s->dataplane_started);
}
if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
@@ -919,7 +916,7 @@ static void virtio_blk_device_realize(DeviceState *dev,
Error **errp)
s->sector_mask = (s->conf.conf.logical_block_size / BDRV_SECTOR_SIZE) - 1;
for (i = 0; i < conf->num_queues; i++) {
- virtio_add_queue_aio(vdev, 128, virtio_blk_handle_output);
+ virtio_add_queue(vdev, 128, virtio_blk_handle_output);
}
virtio_blk_data_plane_create(vdev, conf, &s->dataplane, &err);
if (err != NULL) {
@@ -1002,6 +999,8 @@ static void virtio_blk_class_init(ObjectClass *klass, void
*data)
vdc->reset = virtio_blk_reset;
vdc->save = virtio_blk_save_device;
vdc->load = virtio_blk_load_device;
+ vdc->start_ioeventfd = virtio_blk_data_plane_start;
+ vdc->stop_ioeventfd = virtio_blk_data_plane_stop;
}
static const TypeInfo virtio_blk_info = {
diff --git a/hw/core/hotplug.c b/hw/core/hotplug.c
index 17ac986..ab34c19 100644
--- a/hw/core/hotplug.c
+++ b/hw/core/hotplug.c
@@ -35,6 +35,17 @@ void hotplug_handler_plug(HotplugHandler *plug_handler,
}
}
+void hotplug_handler_post_plug(HotplugHandler *plug_handler,
+ DeviceState *plugged_dev,
+ Error **errp)
+{
+ HotplugHandlerClass *hdc = HOTPLUG_HANDLER_GET_CLASS(plug_handler);
+
+ if (hdc->post_plug) {
+ hdc->post_plug(plug_handler, plugged_dev, errp);
+ }
+}
+
void hotplug_handler_unplug_request(HotplugHandler *plug_handler,
DeviceState *plugged_dev,
Error **errp)
diff --git a/hw/core/qdev.c b/hw/core/qdev.c
index 5783442..d835e62 100644
--- a/hw/core/qdev.c
+++ b/hw/core/qdev.c
@@ -945,10 +945,21 @@ static void device_set_realized(Object *obj, bool value,
Error **errp)
goto child_realize_fail;
}
}
+
if (dev->hotplugged) {
device_reset(dev);
}
dev->pending_deleted_event = false;
+ dev->realized = value;
+
+ if (hotplug_ctrl) {
+ hotplug_handler_post_plug(hotplug_ctrl, dev, &local_err);
+ }
+
+ if (local_err != NULL) {
+ dev->realized = value;
+ goto post_realize_fail;
+ }
} else if (!value && dev->realized) {
Error **local_errp = NULL;
QLIST_FOREACH(bus, &dev->child_bus, sibling) {
@@ -965,13 +976,14 @@ static void device_set_realized(Object *obj, bool value,
Error **errp)
}
dev->pending_deleted_event = true;
DEVICE_LISTENER_CALL(unrealize, Reverse, dev);
- }
- if (local_err != NULL) {
- goto fail;
+ if (local_err != NULL) {
+ goto fail;
+ }
+
+ dev->realized = value;
}
- dev->realized = value;
return;
child_realize_fail:
diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c
index 5cd1da9..ce9cc93 100644
--- a/hw/i386/acpi-build.c
+++ b/hw/i386/acpi-build.c
@@ -2068,6 +2068,13 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
method = aml_method("_E03", 0, AML_NOTSERIALIZED);
aml_append(method, aml_call0(MEMORY_HOTPLUG_HANDLER_PATH));
aml_append(scope, method);
+
+ if (pcms->acpi_nvdimm_state.is_enabled) {
+ method = aml_method("_E04", 0, AML_NOTSERIALIZED);
+ aml_append(method, aml_notify(aml_name("\\_SB.NVDR"),
+ aml_int(0x80)));
+ aml_append(scope, method);
+ }
}
aml_append(dsdt, scope);
@@ -2810,7 +2817,7 @@ void acpi_build(AcpiBuildTables *tables, MachineState
*machine)
}
if (pcms->acpi_nvdimm_state.is_enabled) {
nvdimm_build_acpi(table_offsets, tables_blob, tables->linker,
- pcms->acpi_nvdimm_state.dsm_mem);
+ &pcms->acpi_nvdimm_state, machine->ram_slots);
}
/* Add tables supplied by user (if any) */
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index f56ea0f..c011552 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -1721,6 +1721,16 @@ out:
error_propagate(errp, local_err);
}
+static void pc_dimm_post_plug(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ PCMachineState *pcms = PC_MACHINE(hotplug_dev);
+
+ if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) {
+ nvdimm_acpi_hotplug(&pcms->acpi_nvdimm_state);
+ }
+}
+
static void pc_dimm_unplug_request(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp)
{
@@ -1734,6 +1744,12 @@ static void pc_dimm_unplug_request(HotplugHandler
*hotplug_dev,
goto out;
}
+ if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) {
+ error_setg(&local_err,
+ "nvdimm device hot unplug is not supported yet.");
+ goto out;
+ }
+
hhc = HOTPLUG_HANDLER_GET_CLASS(pcms->acpi_dev);
hhc->unplug_request(HOTPLUG_HANDLER(pcms->acpi_dev), dev, &local_err);
@@ -1751,6 +1767,12 @@ static void pc_dimm_unplug(HotplugHandler *hotplug_dev,
HotplugHandlerClass *hhc;
Error *local_err = NULL;
+ if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) {
+ error_setg(&local_err,
+ "nvdimm device hot unplug is not supported yet.");
+ goto out;
+ }
+
hhc = HOTPLUG_HANDLER_GET_CLASS(pcms->acpi_dev);
hhc->unplug(HOTPLUG_HANDLER(pcms->acpi_dev), dev, &local_err);
@@ -1986,6 +2008,14 @@ static void pc_machine_device_plug_cb(HotplugHandler
*hotplug_dev,
}
}
+static void pc_machine_device_post_plug_cb(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
+ pc_dimm_post_plug(hotplug_dev, dev, errp);
+ }
+}
+
static void pc_machine_device_unplug_request_cb(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp)
{
@@ -2290,6 +2320,7 @@ static void pc_machine_class_init(ObjectClass *oc, void
*data)
mc->reset = pc_machine_reset;
hc->pre_plug = pc_machine_device_pre_plug_cb;
hc->plug = pc_machine_device_plug_cb;
+ hc->post_plug = pc_machine_device_post_plug_cb;
hc->unplug_request = pc_machine_device_unplug_request_cb;
hc->unplug = pc_machine_device_unplug_cb;
nc->nmi_monitor_handler = x86_nmi;
diff --git a/hw/ipmi/Makefile.objs b/hw/ipmi/Makefile.objs
index a90318d..1b422bb 100644
--- a/hw/ipmi/Makefile.objs
+++ b/hw/ipmi/Makefile.objs
@@ -1,5 +1,5 @@
common-obj-$(CONFIG_IPMI) += ipmi.o
common-obj-$(CONFIG_IPMI_LOCAL) += ipmi_bmc_sim.o
-common-obj-$(CONFIG_IPMI_LOCAL) += ipmi_bmc_extern.o
+common-obj-$(CONFIG_IPMI_EXTERN) += ipmi_bmc_extern.o
common-obj-$(CONFIG_ISA_IPMI_KCS) += isa_ipmi_kcs.o
common-obj-$(CONFIG_ISA_IPMI_BT) += isa_ipmi_bt.o
diff --git a/hw/ipmi/ipmi.c b/hw/ipmi/ipmi.c
index f09f217..5cf1caa 100644
--- a/hw/ipmi/ipmi.c
+++ b/hw/ipmi/ipmi.c
@@ -51,7 +51,7 @@ static int ipmi_do_hw_op(IPMIInterface *s, enum ipmi_op op,
int checkonly)
if (checkonly) {
return 0;
}
- qemu_system_powerdown_request();
+ qemu_system_shutdown_request();
return 0;
case IPMI_SEND_NMI:
@@ -61,9 +61,15 @@ static int ipmi_do_hw_op(IPMIInterface *s, enum ipmi_op op,
int checkonly)
qmp_inject_nmi(NULL);
return 0;
+ case IPMI_SHUTDOWN_VIA_ACPI_OVERTEMP:
+ if (checkonly) {
+ return 0;
+ }
+ qemu_system_powerdown_request();
+ return 0;
+
case IPMI_POWERCYCLE_CHASSIS:
case IPMI_PULSE_DIAG_IRQ:
- case IPMI_SHUTDOWN_VIA_ACPI_OVERTEMP:
case IPMI_POWERON_CHASSIS:
default:
return IPMI_CC_COMMAND_NOT_SUPPORTED;
diff --git a/hw/ipmi/ipmi_bmc_extern.c b/hw/ipmi/ipmi_bmc_extern.c
index 4b310e5..e8e3d25 100644
--- a/hw/ipmi/ipmi_bmc_extern.c
+++ b/hw/ipmi/ipmi_bmc_extern.c
@@ -54,7 +54,8 @@
#define VM_CAPABILITIES_IRQ 0x04
#define VM_CAPABILITIES_NMI 0x08
#define VM_CAPABILITIES_ATTN 0x10
-#define VM_CMD_FORCEOFF 0x09
+#define VM_CAPABILITIES_GRACEFUL_SHUTDOWN 0x20
+#define VM_CMD_GRACEFUL_SHUTDOWN 0x09
#define TYPE_IPMI_BMC_EXTERN "ipmi-bmc-extern"
#define IPMI_BMC_EXTERN(obj) OBJECT_CHECK(IPMIBmcExtern, (obj), \
@@ -276,8 +277,8 @@ static void handle_hw_op(IPMIBmcExtern *ibe, unsigned char
hw_op)
k->do_hw_op(s, IPMI_SEND_NMI, 0);
break;
- case VM_CMD_FORCEOFF:
- qemu_system_shutdown_request();
+ case VM_CMD_GRACEFUL_SHUTDOWN:
+ k->do_hw_op(s, IPMI_SHUTDOWN_VIA_ACPI_OVERTEMP, 0);
break;
}
}
@@ -401,6 +402,10 @@ static void chr_event(void *opaque, int event)
if (k->do_hw_op(ibe->parent.intf, IPMI_POWEROFF_CHASSIS, 1) == 0) {
v |= VM_CAPABILITIES_POWER;
}
+ if (k->do_hw_op(ibe->parent.intf, IPMI_SHUTDOWN_VIA_ACPI_OVERTEMP, 1)
+ == 0) {
+ v |= VM_CAPABILITIES_GRACEFUL_SHUTDOWN;
+ }
if (k->do_hw_op(ibe->parent.intf, IPMI_RESET_CHASSIS, 1) == 0) {
v |= VM_CAPABILITIES_RESET;
}
@@ -512,6 +517,7 @@ static void ipmi_bmc_extern_class_init(ObjectClass *oc,
void *data)
bk->handle_command = ipmi_bmc_extern_handle_command;
bk->handle_reset = ipmi_bmc_extern_handle_reset;
+ dc->hotpluggable = false;
dc->realize = ipmi_bmc_extern_realize;
dc->props = ipmi_bmc_extern_properties;
}
diff --git a/hw/ipmi/ipmi_bmc_sim.c b/hw/ipmi/ipmi_bmc_sim.c
index 17c7c0e..c7883d6 100644
--- a/hw/ipmi/ipmi_bmc_sim.c
+++ b/hw/ipmi/ipmi_bmc_sim.c
@@ -217,7 +217,6 @@ struct IPMIBmcSim {
/* Odd netfns are for responses, so we only need the even ones. */
const IPMINetfn *netfns[MAX_NETFNS / 2];
- QemuMutex lock;
/* We allow one event in the buffer */
uint8_t evtbuf[16];
@@ -940,7 +939,6 @@ static void get_msg(IPMIBmcSim *ibs,
{
IPMIRcvBufEntry *msg;
- qemu_mutex_lock(&ibs->lock);
if (QTAILQ_EMPTY(&ibs->rcvbufs)) {
rsp_buffer_set_error(rsp, 0x80); /* Queue empty */
goto out;
@@ -960,7 +958,6 @@ static void get_msg(IPMIBmcSim *ibs,
}
out:
- qemu_mutex_unlock(&ibs->lock);
return;
}
@@ -1055,11 +1052,9 @@ static void send_msg(IPMIBmcSim *ibs,
end_msg:
msg->buf[msg->len] = ipmb_checksum(msg->buf, msg->len, 0);
msg->len++;
- qemu_mutex_lock(&ibs->lock);
QTAILQ_INSERT_TAIL(&ibs->rcvbufs, msg, entry);
ibs->msg_flags |= IPMI_BMC_MSG_FLAG_RCV_MSG_QUEUE;
k->set_atn(s, 1, attn_irq_enabled(ibs));
- qemu_mutex_unlock(&ibs->lock);
}
static void do_watchdog_reset(IPMIBmcSim *ibs)
@@ -1753,7 +1748,6 @@ static void ipmi_sim_realize(DeviceState *dev, Error
**errp)
unsigned int i;
IPMIBmcSim *ibs = IPMI_BMC_SIMULATOR(b);
- qemu_mutex_init(&ibs->lock);
QTAILQ_INIT(&ibs->rcvbufs);
ibs->bmc_global_enables = (1 << IPMI_BMC_EVENT_LOG_BIT);
@@ -1791,6 +1785,7 @@ static void ipmi_sim_class_init(ObjectClass *oc, void
*data)
DeviceClass *dc = DEVICE_CLASS(oc);
IPMIBmcClass *bk = IPMI_BMC_CLASS(oc);
+ dc->hotpluggable = false;
dc->realize = ipmi_sim_realize;
bk->handle_command = ipmi_sim_handle_command;
}
diff --git a/hw/mem/nvdimm.c b/hw/mem/nvdimm.c
index 7895805..db896b0 100644
--- a/hw/mem/nvdimm.c
+++ b/hw/mem/nvdimm.c
@@ -148,13 +148,9 @@ static MemoryRegion
*nvdimm_get_vmstate_memory_region(PCDIMMDevice *dimm)
static void nvdimm_class_init(ObjectClass *oc, void *data)
{
- DeviceClass *dc = DEVICE_CLASS(oc);
PCDIMMDeviceClass *ddc = PC_DIMM_CLASS(oc);
NVDIMMClass *nvc = NVDIMM_CLASS(oc);
- /* nvdimm hotplug has not been supported yet. */
- dc->hotpluggable = false;
-
ddc->realize = nvdimm_realize;
ddc->get_memory_region = nvdimm_get_memory_region;
ddc->get_vmstate_memory_region = nvdimm_get_vmstate_memory_region;
diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c
index ee136ab..7d7f8f6 100644
--- a/hw/s390x/virtio-ccw.c
+++ b/hw/s390x/virtio-ccw.c
@@ -59,38 +59,11 @@ static void virtio_ccw_stop_ioeventfd(VirtioCcwDevice *dev)
virtio_bus_stop_ioeventfd(&dev->bus);
}
-static bool virtio_ccw_ioeventfd_started(DeviceState *d)
+static bool virtio_ccw_ioeventfd_enabled(DeviceState *d)
{
VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
- return dev->ioeventfd_started;
-}
-
-static void virtio_ccw_ioeventfd_set_started(DeviceState *d, bool started,
- bool err)
-{
- VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
-
- dev->ioeventfd_started = started;
- if (err) {
- /* Disable ioeventfd for this device. */
- dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD;
- }
-}
-
-static bool virtio_ccw_ioeventfd_disabled(DeviceState *d)
-{
- VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
-
- return dev->ioeventfd_disabled ||
- !(dev->flags & VIRTIO_CCW_FLAG_USE_IOEVENTFD);
-}
-
-static void virtio_ccw_ioeventfd_set_disabled(DeviceState *d, bool disabled)
-{
- VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
-
- dev->ioeventfd_disabled = disabled;
+ return (dev->flags & VIRTIO_CCW_FLAG_USE_IOEVENTFD) != 0;
}
static int virtio_ccw_ioeventfd_assign(DeviceState *d, EventNotifier *notifier,
@@ -709,6 +682,10 @@ static void virtio_ccw_device_realize(VirtioCcwDevice
*dev, Error **errp)
sch->cssid, sch->ssid, sch->schid, sch->devno,
ccw_dev->bus_id.valid ? "user-configured" : "auto-configured");
+ if (!kvm_eventfds_enabled()) {
+ dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD;
+ }
+
if (k->realize) {
k->realize(dev, &err);
}
@@ -1311,10 +1288,6 @@ static void virtio_ccw_device_plugged(DeviceState *d,
Error **errp)
return;
}
- if (!kvm_eventfds_enabled()) {
- dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD;
- }
-
sch->id.cu_model = virtio_bus_get_vdev_id(&dev->bus);
@@ -1616,10 +1589,7 @@ static void virtio_ccw_bus_class_init(ObjectClass
*klass, void *data)
k->pre_plugged = virtio_ccw_pre_plugged;
k->device_plugged = virtio_ccw_device_plugged;
k->device_unplugged = virtio_ccw_device_unplugged;
- k->ioeventfd_started = virtio_ccw_ioeventfd_started;
- k->ioeventfd_set_started = virtio_ccw_ioeventfd_set_started;
- k->ioeventfd_disabled = virtio_ccw_ioeventfd_disabled;
- k->ioeventfd_set_disabled = virtio_ccw_ioeventfd_set_disabled;
+ k->ioeventfd_enabled = virtio_ccw_ioeventfd_enabled;
k->ioeventfd_assign = virtio_ccw_ioeventfd_assign;
}
diff --git a/hw/s390x/virtio-ccw.h b/hw/s390x/virtio-ccw.h
index 565094e..77d10f1 100644
--- a/hw/s390x/virtio-ccw.h
+++ b/hw/s390x/virtio-ccw.h
@@ -86,8 +86,6 @@ struct VirtioCcwDevice {
int revision;
uint32_t max_rev;
VirtioBusState bus;
- bool ioeventfd_started;
- bool ioeventfd_disabled;
uint32_t flags;
uint8_t thinint_isc;
AdapterRoutes routes;
diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c
index 9424f0e..f2ea29d 100644
--- a/hw/scsi/virtio-scsi-dataplane.c
+++ b/hw/scsi/virtio-scsi-dataplane.c
@@ -12,6 +12,7 @@
*/
#include "qemu/osdep.h"
+#include "qapi/error.h"
#include "hw/virtio/virtio-scsi.h"
#include "qemu/error-report.h"
#include "sysemu/block-backend.h"
@@ -21,20 +22,30 @@
#include "hw/virtio/virtio-access.h"
/* Context: QEMU global mutex held */
-void virtio_scsi_set_iothread(VirtIOSCSI *s, IOThread *iothread)
+void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp)
{
- BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
- VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
+ VirtIODevice *vdev = VIRTIO_DEVICE(s);
+ BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
- assert(!s->ctx);
- s->ctx = iothread_get_aio_context(vs->conf.iothread);
-
- /* Don't try if transport does not support notifiers. */
- if (!k->set_guest_notifiers || !k->ioeventfd_started) {
- fprintf(stderr, "virtio-scsi: Failed to set iothread "
- "(transport does not support notifiers)");
- exit(1);
+ if (vs->conf.iothread) {
+ if (!k->set_guest_notifiers || !k->ioeventfd_assign) {
+ error_setg(errp,
+ "device is incompatible with iothread "
+ "(transport does not support notifiers)");
+ return;
+ }
+ if (!virtio_device_ioeventfd_enabled(vdev)) {
+ error_setg(errp, "ioeventfd is required for iothread");
+ return;
+ }
+ s->ctx = iothread_get_aio_context(vs->conf.iothread);
+ } else {
+ if (!virtio_device_ioeventfd_enabled(vdev)) {
+ return;
+ }
+ s->ctx = qemu_get_aio_context();
}
}
@@ -105,19 +116,19 @@ static void virtio_scsi_clear_aio(VirtIOSCSI *s)
}
/* Context: QEMU global mutex held */
-void virtio_scsi_dataplane_start(VirtIOSCSI *s)
+int virtio_scsi_dataplane_start(VirtIODevice *vdev)
{
int i;
int rc;
- BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
+ BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
- VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
+ VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
+ VirtIOSCSI *s = VIRTIO_SCSI(vdev);
if (s->dataplane_started ||
s->dataplane_starting ||
- s->dataplane_fenced ||
- s->ctx != iothread_get_aio_context(vs->conf.iothread)) {
- return;
+ s->dataplane_fenced) {
+ return 0;
}
s->dataplane_starting = true;
@@ -152,7 +163,7 @@ void virtio_scsi_dataplane_start(VirtIOSCSI *s)
s->dataplane_starting = false;
s->dataplane_started = true;
aio_context_release(s->ctx);
- return;
+ return 0;
fail_vrings:
virtio_scsi_clear_aio(s);
@@ -165,14 +176,16 @@ fail_guest_notifiers:
s->dataplane_fenced = true;
s->dataplane_starting = false;
s->dataplane_started = true;
+ return -ENOSYS;
}
/* Context: QEMU global mutex held */
-void virtio_scsi_dataplane_stop(VirtIOSCSI *s)
+void virtio_scsi_dataplane_stop(VirtIODevice *vdev)
{
- BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
+ BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
- VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
+ VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
+ VirtIOSCSI *s = VIRTIO_SCSI(vdev);
int i;
if (!s->dataplane_started || s->dataplane_stopping) {
@@ -186,7 +199,6 @@ void virtio_scsi_dataplane_stop(VirtIOSCSI *s)
return;
}
s->dataplane_stopping = true;
- assert(s->ctx == iothread_get_aio_context(vs->conf.iothread));
aio_context_acquire(s->ctx);
virtio_scsi_clear_aio(s);
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
index 4762f05..3e5ae6a 100644
--- a/hw/scsi/virtio-scsi.c
+++ b/hw/scsi/virtio-scsi.c
@@ -434,7 +434,7 @@ static void virtio_scsi_handle_ctrl(VirtIODevice *vdev,
VirtQueue *vq)
VirtIOSCSI *s = (VirtIOSCSI *)vdev;
if (s->ctx) {
- virtio_scsi_dataplane_start(s);
+ virtio_device_start_ioeventfd(vdev);
if (!s->dataplane_fenced) {
return;
}
@@ -610,7 +610,7 @@ static void virtio_scsi_handle_cmd(VirtIODevice *vdev,
VirtQueue *vq)
VirtIOSCSI *s = (VirtIOSCSI *)vdev;
if (s->ctx) {
- virtio_scsi_dataplane_start(s);
+ virtio_device_start_ioeventfd(vdev);
if (!s->dataplane_fenced) {
return;
}
@@ -669,9 +669,7 @@ static void virtio_scsi_reset(VirtIODevice *vdev)
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
- if (s->ctx) {
- virtio_scsi_dataplane_stop(s);
- }
+ assert(!s->dataplane_started);
s->resetting++;
qbus_reset_all(&s->bus.qbus);
s->resetting--;
@@ -749,7 +747,7 @@ static void virtio_scsi_handle_event(VirtIODevice *vdev,
VirtQueue *vq)
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
if (s->ctx) {
- virtio_scsi_dataplane_start(s);
+ virtio_device_start_ioeventfd(vdev);
if (!s->dataplane_fenced) {
return;
}
@@ -848,14 +846,10 @@ void virtio_scsi_common_realize(DeviceState *dev, Error
**errp,
s->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE;
s->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE;
- s->ctrl_vq = virtio_add_queue_aio(vdev, VIRTIO_SCSI_VQ_SIZE, ctrl);
- s->event_vq = virtio_add_queue_aio(vdev, VIRTIO_SCSI_VQ_SIZE, evt);
+ s->ctrl_vq = virtio_add_queue(vdev, VIRTIO_SCSI_VQ_SIZE, ctrl);
+ s->event_vq = virtio_add_queue(vdev, VIRTIO_SCSI_VQ_SIZE, evt);
for (i = 0; i < s->conf.num_queues; i++) {
- s->cmd_vqs[i] = virtio_add_queue_aio(vdev, VIRTIO_SCSI_VQ_SIZE, cmd);
- }
-
- if (s->conf.iothread) {
- virtio_scsi_set_iothread(VIRTIO_SCSI(s), s->conf.iothread);
+ s->cmd_vqs[i] = virtio_add_queue(vdev, VIRTIO_SCSI_VQ_SIZE, cmd);
}
}
@@ -885,6 +879,8 @@ static void virtio_scsi_device_realize(DeviceState *dev,
Error **errp)
return;
}
}
+
+ virtio_scsi_dataplane_setup(s, errp);
}
static void virtio_scsi_instance_init(Object *obj)
@@ -957,6 +953,8 @@ static void virtio_scsi_class_init(ObjectClass *klass, void
*data)
vdc->set_config = virtio_scsi_set_config;
vdc->get_features = virtio_scsi_get_features;
vdc->reset = virtio_scsi_reset;
+ vdc->start_ioeventfd = virtio_scsi_dataplane_start;
+ vdc->stop_ioeventfd = virtio_scsi_dataplane_stop;
hc->plug = virtio_scsi_hotplug;
hc->unplug = virtio_scsi_hotunplug;
}
diff --git a/hw/virtio/Makefile.objs b/hw/virtio/Makefile.objs
index e716308..95c4c30 100644
--- a/hw/virtio/Makefile.objs
+++ b/hw/virtio/Makefile.objs
@@ -7,3 +7,5 @@ obj-y += virtio.o virtio-balloon.o
obj-$(CONFIG_LINUX) += vhost.o vhost-backend.o vhost-user.o
obj-$(CONFIG_VHOST_VSOCK) += vhost-vsock.o
+obj-y += virtio-crypto.o
+obj-$(CONFIG_VIRTIO_PCI) += virtio-crypto-pci.o
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index bd051ab..131f164 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -1190,12 +1190,13 @@ int vhost_dev_enable_notifiers(struct vhost_dev *hdev,
VirtIODevice *vdev)
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
int i, r, e;
- if (!k->ioeventfd_started) {
+ if (!k->ioeventfd_assign) {
error_report("binding does not support host notifiers");
r = -ENOSYS;
goto fail;
}
+ virtio_device_stop_ioeventfd(vdev);
for (i = 0; i < hdev->nvqs; ++i) {
r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
true);
@@ -1215,6 +1216,7 @@ fail_vq:
}
assert (e >= 0);
}
+ virtio_device_start_ioeventfd(vdev);
fail:
return r;
}
@@ -1237,6 +1239,7 @@ void vhost_dev_disable_notifiers(struct vhost_dev *hdev,
VirtIODevice *vdev)
}
assert (r >= 0);
}
+ virtio_device_start_ioeventfd(vdev);
}
/* Test and clear event pending status.
diff --git a/hw/virtio/virtio-balloon.c b/hw/virtio/virtio-balloon.c
index 1d77028..cfba053 100644
--- a/hw/virtio/virtio-balloon.c
+++ b/hw/virtio/virtio-balloon.c
@@ -394,21 +394,9 @@ static void virtio_balloon_to_target(void *opaque,
ram_addr_t target)
trace_virtio_balloon_to_target(target, dev->num_pages);
}
-static void virtio_balloon_save_device(VirtIODevice *vdev, QEMUFile *f)
+static int virtio_balloon_post_load_device(void *opaque, int version_id)
{
- VirtIOBalloon *s = VIRTIO_BALLOON(vdev);
-
- qemu_put_be32(f, s->num_pages);
- qemu_put_be32(f, s->actual);
-}
-
-static int virtio_balloon_load_device(VirtIODevice *vdev, QEMUFile *f,
- int version_id)
-{
- VirtIOBalloon *s = VIRTIO_BALLOON(vdev);
-
- s->num_pages = qemu_get_be32(f);
- s->actual = qemu_get_be32(f);
+ VirtIOBalloon *s = VIRTIO_BALLOON(opaque);
if (balloon_stats_enabled(s)) {
balloon_stats_change_timer(s, s->stats_poll_interval);
@@ -416,6 +404,18 @@ static int virtio_balloon_load_device(VirtIODevice *vdev,
QEMUFile *f,
return 0;
}
+static const VMStateDescription vmstate_virtio_balloon_device = {
+ .name = "virtio-balloon-device",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .post_load = virtio_balloon_post_load_device,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(num_pages, VirtIOBalloon),
+ VMSTATE_UINT32(actual, VirtIOBalloon),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
static void virtio_balloon_device_realize(DeviceState *dev, Error **errp)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
@@ -517,9 +517,8 @@ static void virtio_balloon_class_init(ObjectClass *klass,
void *data)
vdc->get_config = virtio_balloon_get_config;
vdc->set_config = virtio_balloon_set_config;
vdc->get_features = virtio_balloon_get_features;
- vdc->save = virtio_balloon_save_device;
- vdc->load = virtio_balloon_load_device;
vdc->set_status = virtio_balloon_set_status;
+ vdc->vmsd = &vmstate_virtio_balloon_device;
}
static const TypeInfo virtio_balloon_info = {
diff --git a/hw/virtio/virtio-bus.c b/hw/virtio/virtio-bus.c
index 11f65bd..bf61f66 100644
--- a/hw/virtio/virtio-bus.c
+++ b/hw/virtio/virtio-bus.c
@@ -147,131 +147,97 @@ void virtio_bus_set_vdev_config(VirtioBusState *bus,
uint8_t *config)
}
}
-/*
- * This function handles both assigning the ioeventfd handler and
- * registering it with the kernel.
- * assign: register/deregister ioeventfd with the kernel
- * set_handler: use the generic ioeventfd handler
- */
-static int set_host_notifier_internal(DeviceState *proxy, VirtioBusState *bus,
- int n, bool assign, bool set_handler)
+int virtio_bus_start_ioeventfd(VirtioBusState *bus)
{
- VirtIODevice *vdev = virtio_bus_get_device(bus);
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus);
- VirtQueue *vq = virtio_get_queue(vdev, n);
- EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
- int r = 0;
+ DeviceState *proxy = DEVICE(BUS(bus)->parent);
+ VirtIODevice *vdev = virtio_bus_get_device(bus);
+ VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
+ int r;
- if (assign) {
- r = event_notifier_init(notifier, 1);
- if (r < 0) {
- error_report("%s: unable to init event notifier: %s (%d)",
- __func__, strerror(-r), r);
- return r;
- }
- virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler);
- r = k->ioeventfd_assign(proxy, notifier, n, assign);
- if (r < 0) {
- error_report("%s: unable to assign ioeventfd: %d", __func__, r);
- virtio_queue_set_host_notifier_fd_handler(vq, false, false);
- event_notifier_cleanup(notifier);
- return r;
- }
- } else {
- k->ioeventfd_assign(proxy, notifier, n, assign);
- virtio_queue_set_host_notifier_fd_handler(vq, false, false);
- event_notifier_cleanup(notifier);
+ if (!k->ioeventfd_assign || !k->ioeventfd_enabled(proxy)) {
+ return -ENOSYS;
}
- return r;
+ if (bus->ioeventfd_started) {
+ return 0;
+ }
+ r = vdc->start_ioeventfd(vdev);
+ if (r < 0) {
+ error_report("%s: failed. Fallback to userspace (slower).", __func__);
+ return r;
+ }
+ bus->ioeventfd_started = true;
+ return 0;
}
-void virtio_bus_start_ioeventfd(VirtioBusState *bus)
+void virtio_bus_stop_ioeventfd(VirtioBusState *bus)
{
- VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus);
- DeviceState *proxy = DEVICE(BUS(bus)->parent);
VirtIODevice *vdev;
- int n, r;
+ VirtioDeviceClass *vdc;
- if (!k->ioeventfd_started || k->ioeventfd_started(proxy)) {
+ if (!bus->ioeventfd_started) {
return;
}
- if (k->ioeventfd_disabled(proxy)) {
- return;
- }
- vdev = virtio_bus_get_device(bus);
- for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
- if (!virtio_queue_get_num(vdev, n)) {
- continue;
- }
- r = set_host_notifier_internal(proxy, bus, n, true, true);
- if (r < 0) {
- goto assign_error;
- }
- }
- k->ioeventfd_set_started(proxy, true, false);
- return;
-
-assign_error:
- while (--n >= 0) {
- if (!virtio_queue_get_num(vdev, n)) {
- continue;
- }
- r = set_host_notifier_internal(proxy, bus, n, false, false);
- assert(r >= 0);
- }
- k->ioeventfd_set_started(proxy, false, true);
- error_report("%s: failed. Fallback to userspace (slower).", __func__);
+ vdev = virtio_bus_get_device(bus);
+ vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
+ vdc->stop_ioeventfd(vdev);
+ bus->ioeventfd_started = false;
}
-void virtio_bus_stop_ioeventfd(VirtioBusState *bus)
+bool virtio_bus_ioeventfd_enabled(VirtioBusState *bus)
{
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus);
DeviceState *proxy = DEVICE(BUS(bus)->parent);
- VirtIODevice *vdev;
- int n, r;
- if (!k->ioeventfd_started || !k->ioeventfd_started(proxy)) {
- return;
- }
- vdev = virtio_bus_get_device(bus);
- for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
- if (!virtio_queue_get_num(vdev, n)) {
- continue;
- }
- r = set_host_notifier_internal(proxy, bus, n, false, false);
- assert(r >= 0);
- }
- k->ioeventfd_set_started(proxy, false, false);
+ return k->ioeventfd_assign && k->ioeventfd_enabled(proxy);
}
/*
- * This function switches from/to the generic ioeventfd handler.
- * assign==false means 'use generic ioeventfd handler'.
+ * This function switches ioeventfd on/off in the device.
+ * The caller must set or clear the handlers for the EventNotifier.
*/
int virtio_bus_set_host_notifier(VirtioBusState *bus, int n, bool assign)
{
+ VirtIODevice *vdev = virtio_bus_get_device(bus);
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus);
DeviceState *proxy = DEVICE(BUS(bus)->parent);
+ VirtQueue *vq = virtio_get_queue(vdev, n);
+ EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
+ int r = 0;
- if (!k->ioeventfd_started) {
+ if (!k->ioeventfd_assign) {
return -ENOSYS;
}
- k->ioeventfd_set_disabled(proxy, assign);
+
if (assign) {
- /*
- * Stop using the generic ioeventfd, we are doing eventfd handling
- * ourselves below
- *
- * FIXME: We should just switch the handler and not deassign the
- * ioeventfd.
- * Otherwise, there's a window where we don't have an
- * ioeventfd and we may end up with a notification where
- * we don't expect one.
- */
- virtio_bus_stop_ioeventfd(bus);
+ assert(!bus->ioeventfd_started);
+ r = event_notifier_init(notifier, 1);
+ if (r < 0) {
+ error_report("%s: unable to init event notifier: %s (%d)",
+ __func__, strerror(-r), r);
+ return r;
+ }
+ r = k->ioeventfd_assign(proxy, notifier, n, true);
+ if (r < 0) {
+ error_report("%s: unable to assign ioeventfd: %d", __func__, r);
+ goto cleanup_event_notifier;
+ }
+ return 0;
+ } else {
+ if (!bus->ioeventfd_started) {
+ return 0;
+ }
+ k->ioeventfd_assign(proxy, notifier, n, false);
}
- return set_host_notifier_internal(proxy, bus, n, assign, false);
+
+cleanup_event_notifier:
+ /* Test and clear notifier after disabling event,
+ * in case poll callback didn't have time to run.
+ */
+ virtio_queue_host_notifier_read(notifier);
+ event_notifier_cleanup(notifier);
+ return r;
}
static char *virtio_bus_get_dev_path(DeviceState *dev)
diff --git a/hw/virtio/virtio-crypto-pci.c b/hw/virtio/virtio-crypto-pci.c
new file mode 100644
index 0000000..21d9984
--- /dev/null
+++ b/hw/virtio/virtio-crypto-pci.c
@@ -0,0 +1,77 @@
+/*
+ * Virtio crypto device
+ *
+ * Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD.
+ *
+ * Authors:
+ * Gonglei <arei.gonglei@xxxxxxxxxx>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ *
+ */
+#include "qemu/osdep.h"
+#include "hw/pci/pci.h"
+#include "hw/virtio/virtio.h"
+#include "hw/virtio/virtio-bus.h"
+#include "hw/virtio/virtio-pci.h"
+#include "hw/virtio/virtio-crypto.h"
+#include "qapi/error.h"
+
+static Property virtio_crypto_pci_properties[] = {
+ DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
+ VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
+ DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_crypto_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
+{
+ VirtIOCryptoPCI *vcrypto = VIRTIO_CRYPTO_PCI(vpci_dev);
+ DeviceState *vdev = DEVICE(&vcrypto->vdev);
+
+ qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
+ virtio_pci_force_virtio_1(vpci_dev);
+ object_property_set_bool(OBJECT(vdev), true, "realized", errp);
+ object_property_set_link(OBJECT(vcrypto),
+ OBJECT(vcrypto->vdev.conf.cryptodev), "cryptodev",
+ NULL);
+}
+
+static void virtio_crypto_pci_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
+ PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
+
+ k->realize = virtio_crypto_pci_realize;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+ dc->props = virtio_crypto_pci_properties;
+
+ pcidev_k->class_id = PCI_CLASS_OTHERS;
+}
+
+static void virtio_crypto_initfn(Object *obj)
+{
+ VirtIOCryptoPCI *dev = VIRTIO_CRYPTO_PCI(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VIRTIO_CRYPTO);
+ object_property_add_alias(obj, "cryptodev", OBJECT(&dev->vdev),
+ "cryptodev", &error_abort);
+}
+
+static const TypeInfo virtio_crypto_pci_info = {
+ .name = TYPE_VIRTIO_CRYPTO_PCI,
+ .parent = TYPE_VIRTIO_PCI,
+ .instance_size = sizeof(VirtIOCryptoPCI),
+ .instance_init = virtio_crypto_initfn,
+ .class_init = virtio_crypto_pci_class_init,
+};
+
+static void virtio_crypto_pci_register_types(void)
+{
+ type_register_static(&virtio_crypto_pci_info);
+}
+type_init(virtio_crypto_pci_register_types)
diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c
new file mode 100644
index 0000000..170114f
--- /dev/null
+++ b/hw/virtio/virtio-crypto.c
@@ -0,0 +1,898 @@
+/*
+ * Virtio crypto Support
+ *
+ * Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD.
+ *
+ * Authors:
+ * Gonglei <arei.gonglei@xxxxxxxxxx>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+#include "qemu/osdep.h"
+#include "qemu/iov.h"
+#include "hw/qdev.h"
+#include "qapi/error.h"
+#include "qemu/error-report.h"
+
+#include "hw/virtio/virtio.h"
+#include "hw/virtio/virtio-crypto.h"
+#include "hw/virtio/virtio-access.h"
+#include "standard-headers/linux/virtio_ids.h"
+
+#define VIRTIO_CRYPTO_VM_VERSION 1
+
+/*
+ * Transfer virtqueue index to crypto queue index.
+ * The control virtqueue is after the data virtqueues
+ * so the input value doesn't need to be adjusted
+ */
+static inline int virtio_crypto_vq2q(int queue_index)
+{
+ return queue_index;
+}
+
+static int
+virtio_crypto_cipher_session_helper(VirtIODevice *vdev,
+ CryptoDevBackendSymSessionInfo *info,
+ struct virtio_crypto_cipher_session_para *cipher_para,
+ struct iovec **iov, unsigned int *out_num)
+{
+ VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
+ unsigned int num = *out_num;
+
+ info->cipher_alg = ldl_le_p(&cipher_para->algo);
+ info->key_len = ldl_le_p(&cipher_para->keylen);
+ info->direction = ldl_le_p(&cipher_para->op);
+ DPRINTF("cipher_alg=%" PRIu32 ", info->direction=%" PRIu32 "\n",
+ info->cipher_alg, info->direction);
+
+ if (info->key_len > vcrypto->conf.max_cipher_key_len) {
+ error_report("virtio-crypto length of cipher key is too big: %u",
+ info->key_len);
+ return -VIRTIO_CRYPTO_ERR;
+ }
+ /* Get cipher key */
+ if (info->key_len > 0) {
+ size_t s;
+ DPRINTF("keylen=%" PRIu32 "\n", info->key_len);
+
+ info->cipher_key = g_malloc(info->key_len);
+ s = iov_to_buf(*iov, num, 0, info->cipher_key, info->key_len);
+ if (unlikely(s != info->key_len)) {
+ virtio_error(vdev, "virtio-crypto cipher key incorrect");
+ return -EFAULT;
+ }
+ iov_discard_front(iov, &num, info->key_len);
+ *out_num = num;
+ }
+
+ return 0;
+}
+
+static int64_t
+virtio_crypto_create_sym_session(VirtIOCrypto *vcrypto,
+ struct virtio_crypto_sym_create_session_req *sess_req,
+ uint32_t queue_id,
+ uint32_t opcode,
+ struct iovec *iov, unsigned int out_num)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
+ CryptoDevBackendSymSessionInfo info;
+ int64_t session_id;
+ int queue_index;
+ uint32_t op_type;
+ Error *local_err = NULL;
+ int ret;
+
+ memset(&info, 0, sizeof(info));
+ op_type = ldl_le_p(&sess_req->op_type);
+ info.op_type = op_type;
+ info.op_code = opcode;
+
+ if (op_type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
+ ret = virtio_crypto_cipher_session_helper(vdev, &info,
+ &sess_req->u.cipher.para,
+ &iov, &out_num);
+ if (ret < 0) {
+ goto err;
+ }
+ } else if (op_type == VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) {
+ size_t s;
+ /* cipher part */
+ ret = virtio_crypto_cipher_session_helper(vdev, &info,
+ &sess_req->u.chain.para.cipher_param,
+ &iov, &out_num);
+ if (ret < 0) {
+ goto err;
+ }
+ /* hash part */
+ info.alg_chain_order = ldl_le_p(
+ &sess_req->u.chain.para.alg_chain_order);
+ info.add_len = ldl_le_p(&sess_req->u.chain.para.aad_len);
+ info.hash_mode = ldl_le_p(&sess_req->u.chain.para.hash_mode);
+ if (info.hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH) {
+ info.hash_alg = ldl_le_p(&sess_req->u.chain.para.u.mac_param.algo);
+ info.auth_key_len = ldl_le_p(
+ &sess_req->u.chain.para.u.mac_param.auth_key_len);
+ info.hash_result_len = ldl_le_p(
+
&sess_req->u.chain.para.u.mac_param.hash_result_len);
+ if (info.auth_key_len > vcrypto->conf.max_auth_key_len) {
+ error_report("virtio-crypto length of auth key is too big: %u",
+ info.auth_key_len);
+ ret = -VIRTIO_CRYPTO_ERR;
+ goto err;
+ }
+ /* get auth key */
+ if (info.auth_key_len > 0) {
+ DPRINTF("auth_keylen=%" PRIu32 "\n", info.auth_key_len);
+ info.auth_key = g_malloc(info.auth_key_len);
+ s = iov_to_buf(iov, out_num, 0, info.auth_key,
+ info.auth_key_len);
+ if (unlikely(s != info.auth_key_len)) {
+ virtio_error(vdev,
+ "virtio-crypto authenticated key incorrect");
+ ret = -EFAULT;
+ goto err;
+ }
+ iov_discard_front(&iov, &out_num, info.auth_key_len);
+ }
+ } else if (info.hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN) {
+ info.hash_alg = ldl_le_p(
+ &sess_req->u.chain.para.u.hash_param.algo);
+ info.hash_result_len = ldl_le_p(
+ &sess_req->u.chain.para.u.hash_param.hash_result_len);
+ } else {
+ /* VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED */
+ error_report("unsupported hash mode");
+ ret = -VIRTIO_CRYPTO_NOTSUPP;
+ goto err;
+ }
+ } else {
+ /* VIRTIO_CRYPTO_SYM_OP_NONE */
+ error_report("unsupported cipher op_type: VIRTIO_CRYPTO_SYM_OP_NONE");
+ ret = -VIRTIO_CRYPTO_NOTSUPP;
+ goto err;
+ }
+
+ queue_index = virtio_crypto_vq2q(queue_id);
+ session_id = cryptodev_backend_sym_create_session(
+ vcrypto->cryptodev,
+ &info, queue_index, &local_err);
+ if (session_id >= 0) {
+ DPRINTF("create session_id=%" PRIu64 " successfully\n",
+ session_id);
+
+ ret = session_id;
+ } else {
+ if (local_err) {
+ error_report_err(local_err);
+ }
+ ret = -VIRTIO_CRYPTO_ERR;
+ }
+
+err:
+ g_free(info.cipher_key);
+ g_free(info.auth_key);
+ return ret;
+}
+
+static uint8_t
+virtio_crypto_handle_close_session(VirtIOCrypto *vcrypto,
+ struct virtio_crypto_destroy_session_req *close_sess_req,
+ uint32_t queue_id)
+{
+ int ret;
+ uint64_t session_id;
+ uint32_t status;
+ Error *local_err = NULL;
+
+ session_id = ldq_le_p(&close_sess_req->session_id);
+ DPRINTF("close session, id=%" PRIu64 "\n", session_id);
+
+ ret = cryptodev_backend_sym_close_session(
+ vcrypto->cryptodev, session_id, queue_id, &local_err);
+ if (ret == 0) {
+ status = VIRTIO_CRYPTO_OK;
+ } else {
+ if (local_err) {
+ error_report_err(local_err);
+ } else {
+ error_report("destroy session failed");
+ }
+ status = VIRTIO_CRYPTO_ERR;
+ }
+
+ return status;
+}
+
+static void virtio_crypto_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
+{
_______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx https://lists.xenproject.org/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |