|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [linux-2.6.18-xen] backends: guarantee one time reads of shared ring contents
# HG changeset patch
# User Jan Beulich <jbeulich@xxxxxxxx>
# Date 1450362767 -3600
# Node ID 52a7179e39e70f9095cd93fee508116a342717c4
# Parent a0a79976ffebcfa5ff55feb09f833fc1adbeb2e8
backends: guarantee one time reads of shared ring contents
Address double fetch vulnerabilities, occurring when a variable is
fetched twice from shared memory but a security check is only
performed on the first fetched value.
This is part of CVE-2015-8550 / XSA-155.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
diff -r a0a79976ffeb -r 52a7179e39e7 drivers/xen/blkback/blkback.c
--- a/drivers/xen/blkback/blkback.c Mon Nov 16 13:40:35 2015 +0100
+++ b/drivers/xen/blkback/blkback.c Thu Dec 17 15:32:47 2015 +0100
@@ -363,6 +363,9 @@ static int _do_block_io_op(blkif_t *blki
++rc;
+ /* Apply all sanity checks to /private copy/ of request. */
+ barrier();
+
switch (req.operation) {
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
@@ -375,10 +378,6 @@ static int _do_block_io_op(blkif_t *blki
/* before make_response() */
blk_rings->common.req_cons = rc;
-
- /* Apply all sanity checks to /private copy/ of
request. */
- barrier();
-
dispatch_rw_block_io(blkif, &req, pending_req);
break;
default:
@@ -386,7 +385,6 @@ static int _do_block_io_op(blkif_t *blki
* avoid excessive CPU consumption by a bad guest. */
msleep(1);
blk_rings->common.req_cons = rc;
- barrier();
DPRINTK("error: unknown block io operation [%d]\n",
req.operation);
make_response(blkif, req.id, req.operation,
diff -r a0a79976ffeb -r 52a7179e39e7 drivers/xen/netback/netback.c
--- a/drivers/xen/netback/netback.c Mon Nov 16 13:40:35 2015 +0100
+++ b/drivers/xen/netback/netback.c Thu Dec 17 15:32:47 2015 +0100
@@ -1057,6 +1057,7 @@ static int netbk_count_requests(netif_t
memcpy(txp, RING_GET_REQUEST(&netif->tx, cons + frags),
sizeof(*txp));
+ barrier();
/*
* If the guest submitted a frame >= 64 KiB then first->size
@@ -1237,6 +1238,7 @@ int netbk_get_extras(netif_t *netif, str
memcpy(&extra, RING_GET_REQUEST(&netif->tx, cons),
sizeof(extra));
+ barrier();
if (unlikely(!extra.type ||
extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
netif->tx.req_cons = ++cons;
@@ -1335,6 +1337,7 @@ static void net_tx_action(unsigned long
i = netif->tx.req_cons;
rmb(); /* Ensure that we see the request before we copy it. */
memcpy(&txreq, RING_GET_REQUEST(&netif->tx, i), sizeof(txreq));
+ barrier();
/* Credit-based scheduling. */
if (txreq.size > netif->remaining_credit) {
diff -r a0a79976ffeb -r 52a7179e39e7 drivers/xen/pciback/pciback.h
--- a/drivers/xen/pciback/pciback.h Mon Nov 16 13:40:35 2015 +0100
+++ b/drivers/xen/pciback/pciback.h Thu Dec 17 15:32:47 2015 +0100
@@ -42,6 +42,7 @@ struct pciback_device {
unsigned long flags;
struct work_struct op_work;
+ struct xen_pci_op op;
};
struct pciback_dev_data {
diff -r a0a79976ffeb -r 52a7179e39e7 drivers/xen/pciback/pciback_ops.c
--- a/drivers/xen/pciback/pciback_ops.c Mon Nov 16 13:40:35 2015 +0100
+++ b/drivers/xen/pciback/pciback_ops.c Thu Dec 17 15:32:47 2015 +0100
@@ -79,8 +79,10 @@ void pciback_do_op(void *data)
{
struct pciback_device *pdev = data;
struct pci_dev *dev;
- struct xen_pci_op *op = &pdev->sh_info->op;
+ struct xen_pci_op *op = &pdev->op;
+ *op = pdev->sh_info->op;
+ barrier();
dev = pciback_get_pci_dev(pdev, op->domain, op->bus, op->devfn);
if (dev == NULL)
@@ -116,6 +118,17 @@ void pciback_do_op(void *data)
break;
}
}
+ pdev->sh_info->op.err = op->err;
+ pdev->sh_info->op.value = op->value;
+#ifdef CONFIG_PCI_MSI
+ if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) {
+ unsigned int i;
+
+ for (i = 0; i < op->value; i++)
+ pdev->sh_info->op.msix_entries[i].vector =
+ op->msix_entries[i].vector;
+ }
+#endif
/* Tell the driver domain that we're done. */
wmb();
clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
diff -r a0a79976ffeb -r 52a7179e39e7 drivers/xen/scsiback/scsiback.c
--- a/drivers/xen/scsiback/scsiback.c Mon Nov 16 13:40:35 2015 +0100
+++ b/drivers/xen/scsiback/scsiback.c Thu Dec 17 15:32:47 2015 +0100
@@ -515,11 +515,13 @@ static int prepare_pending_reqs(struct v
pending_req->info = info;
- pending_req->v_chn = vir.chn = ring_req->channel;
- pending_req->v_tgt = vir.tgt = ring_req->id;
+ vir.chn = ring_req->channel;
+ vir.tgt = ring_req->id;
vir.lun = ring_req->lun;
+ rmb();
+ pending_req->v_chn = vir.chn;
+ pending_req->v_tgt = vir.tgt;
- rmb();
sdev = scsiback_do_translation(info, &vir);
if (!sdev) {
pending_req->sdev = NULL;
diff -r a0a79976ffeb -r 52a7179e39e7 drivers/xen/tpmback/tpmback.c
--- a/drivers/xen/tpmback/tpmback.c Mon Nov 16 13:40:35 2015 +0100
+++ b/drivers/xen/tpmback/tpmback.c Thu Dec 17 15:32:47 2015 +0100
@@ -245,17 +245,16 @@ int _packet_write(struct packet *pak,
unsigned int tocopy;
struct gnttab_map_grant_ref map_op;
struct gnttab_unmap_grant_ref unmap_op;
- tpmif_tx_request_t *tx;
+ tpmif_tx_request_t tx = tpmif->tx->ring[i].req;
- tx = &tpmif->tx->ring[i].req;
-
- if (0 == tx->addr) {
+ rmb();
+ if (0 == tx.addr) {
DPRINTK("ERROR: Buffer for outgoing packet NULL?!
i=%d\n", i);
return 0;
}
gnttab_set_map_op(&map_op, idx_to_kaddr(tpmif, i),
- GNTMAP_host_map, tx->ref, tpmif->domid);
+ GNTMAP_host_map, tx.ref, tpmif->domid);
gnttab_check_GNTST_eagain_do_while(GNTTABOP_map_grant_ref,
&map_op);
@@ -269,12 +268,12 @@ int _packet_write(struct packet *pak,
tocopy = min_t(size_t, size - offset, PAGE_SIZE);
if (copy_from_buffer((void *)(idx_to_kaddr(tpmif, i) |
- (tx->addr & ~PAGE_MASK)),
+ (tx.addr & ~PAGE_MASK)),
&data[offset], tocopy, isuserbuffer)) {
tpmif_put(tpmif);
return -EFAULT;
}
- tx->size = tocopy;
+ tpmif->tx->ring[i].req.size = tocopy;
gnttab_set_unmap_op(&unmap_op, idx_to_kaddr(tpmif, i),
GNTMAP_host_map, handle);
@@ -373,9 +372,6 @@ static int packet_read_shmem(struct pack
u32 to_copy;
grant_handle_t handle;
- tpmif_tx_request_t *tx;
-
- tx = &tpmif->tx->ring[0].req;
/*
* Start copying data at the page with index 'index'
* and within that page at offset 'offset'.
@@ -386,11 +382,11 @@ static int packet_read_shmem(struct pack
void *src;
struct gnttab_map_grant_ref map_op;
struct gnttab_unmap_grant_ref unmap_op;
+ tpmif_tx_request_t tx = tpmif->tx->ring[i].req;
- tx = &tpmif->tx->ring[i].req;
-
+ rmb();
gnttab_set_map_op(&map_op, idx_to_kaddr(tpmif, i),
- GNTMAP_host_map, tx->ref, tpmif->domid);
+ GNTMAP_host_map, tx.ref, tpmif->domid);
gnttab_check_GNTST_eagain_do_while(GNTTABOP_map_grant_ref,
&map_op);
@@ -401,19 +397,19 @@ static int packet_read_shmem(struct pack
handle = map_op.handle;
- if (to_copy > tx->size) {
+ if (to_copy > tx.size) {
/*
* User requests more than what's available
*/
- to_copy = min_t(u32, tx->size, to_copy);
+ to_copy = min_t(u32, tx.size, to_copy);
}
DPRINTK("Copying from mapped memory at %08lx\n",
(unsigned long)(idx_to_kaddr(tpmif, i) |
- (tx->addr & ~PAGE_MASK)));
+ (tx.addr & ~PAGE_MASK)));
src = (void *)(idx_to_kaddr(tpmif, i) |
- ((tx->addr & ~PAGE_MASK) + pg_offset));
+ ((tx.addr & ~PAGE_MASK) + pg_offset));
if (copy_to_buffer(&buffer[offset],
src, to_copy, isuserbuffer)) {
return -EFAULT;
@@ -874,21 +870,23 @@ static void tpm_tx_action(unsigned long
{
struct list_head *ent;
tpmif_t *tpmif;
- tpmif_tx_request_t *tx;
DPRINTK("%s: Getting data from front-end(s)!\n", __FUNCTION__);
while (!list_empty(&tpm_schedule_list)) {
+ tpmif_tx_request_t tx;
+
/* Get a tpmif from the list with work to do. */
ent = tpm_schedule_list.next;
tpmif = list_entry(ent, tpmif_t, list);
tpmif_get(tpmif);
remove_from_tpm_schedule_list(tpmif);
- tx = &tpmif->tx->ring[0].req;
+ tx = tpmif->tx->ring[0].req;
+ rmb();
/* pass it up */
- vtpm_receive(tpmif, tx->size);
+ vtpm_receive(tpmif, tx.size);
tpmif_put(tpmif);
}
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |