summaryrefslogtreecommitdiff
path: root/drivers/vhost
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/vhost')
-rw-r--r--drivers/vhost/scsi.c9
-rw-r--r--drivers/vhost/vhost.c53
-rw-r--r--drivers/vhost/vhost.h3
-rw-r--r--drivers/vhost/vringh.c5
-rw-r--r--drivers/vhost/vsock.c38
5 files changed, 68 insertions, 40 deletions
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 6e29d053843d..fd6c8b66f06f 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -843,7 +843,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
struct iov_iter out_iter, in_iter, prot_iter, data_iter;
u64 tag;
u32 exp_data_len, data_direction;
- unsigned out, in;
+ unsigned int out = 0, in = 0;
int head, ret, prot_bytes;
size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
size_t out_size, in_size;
@@ -922,8 +922,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
*/
iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size);
- ret = copy_from_iter(req, req_size, &out_iter);
- if (unlikely(ret != req_size)) {
+ if (unlikely(!copy_from_iter_full(req, req_size, &out_iter))) {
vq_err(vq, "Faulted on copy_from_iter\n");
vhost_scsi_send_bad_target(vs, vq, head, out);
continue;
@@ -1749,7 +1748,6 @@ out:
static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
const char *name)
{
- struct se_portal_group *se_tpg;
struct vhost_scsi_nexus *tv_nexus;
mutex_lock(&tpg->tv_tpg_mutex);
@@ -1758,7 +1756,6 @@ static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
pr_debug("tpg->tpg_nexus already exists\n");
return -EEXIST;
}
- se_tpg = &tpg->se_tpg;
tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL);
if (!tv_nexus) {
@@ -2090,7 +2087,7 @@ static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
NULL,
};
-static struct target_core_fabric_ops vhost_scsi_ops = {
+static const struct target_core_fabric_ops vhost_scsi_ops = {
.module = THIS_MODULE,
.name = "vhost",
.get_fabric_name = vhost_scsi_get_fabric_name,
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 266354390c8f..8f99fe08de02 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -49,7 +49,7 @@ enum {
INTERVAL_TREE_DEFINE(struct vhost_umem_node,
rb, __u64, __subtree_last,
- START, LAST, , vhost_umem_interval_tree);
+ START, LAST, static inline, vhost_umem_interval_tree);
#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
@@ -130,14 +130,14 @@ static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
static void vhost_init_is_le(struct vhost_virtqueue *vq)
{
- if (vhost_has_feature(vq, VIRTIO_F_VERSION_1))
- vq->is_le = true;
+ vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
+ || virtio_legacy_is_little_endian();
}
#endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
static void vhost_reset_is_le(struct vhost_virtqueue *vq)
{
- vq->is_le = virtio_legacy_is_little_endian();
+ vhost_init_is_le(vq);
}
struct vhost_flush_struct {
@@ -290,6 +290,7 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->avail = NULL;
vq->used = NULL;
vq->last_avail_idx = 0;
+ vq->last_used_event = 0;
vq->avail_idx = 0;
vq->last_used_idx = 0;
vq->signalled_used = 0;
@@ -719,7 +720,7 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem,
static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
struct iovec iov[], int iov_size, int access);
-static int vhost_copy_to_user(struct vhost_virtqueue *vq, void *to,
+static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to,
const void *from, unsigned size)
{
int ret;
@@ -749,7 +750,7 @@ out:
}
static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
- void *from, unsigned size)
+ void __user *from, unsigned size)
{
int ret;
@@ -783,7 +784,7 @@ out:
}
static void __user *__vhost_get_user(struct vhost_virtqueue *vq,
- void *addr, unsigned size)
+ void __user *addr, unsigned size)
{
int ret;
@@ -934,8 +935,8 @@ static int umem_access_ok(u64 uaddr, u64 size, int access)
return 0;
}
-int vhost_process_iotlb_msg(struct vhost_dev *dev,
- struct vhost_iotlb_msg *msg)
+static int vhost_process_iotlb_msg(struct vhost_dev *dev,
+ struct vhost_iotlb_msg *msg)
{
int ret = 0;
@@ -1324,7 +1325,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
r = -EINVAL;
break;
}
- vq->last_avail_idx = s.num;
+ vq->last_avail_idx = vq->last_used_event = s.num;
/* Forget the cached index value. */
vq->avail_idx = vq->last_avail_idx;
break;
@@ -1713,10 +1714,8 @@ int vhost_vq_init_access(struct vhost_virtqueue *vq)
int r;
bool is_le = vq->is_le;
- if (!vq->private_data) {
- vhost_reset_is_le(vq);
+ if (!vq->private_data)
return 0;
- }
vhost_init_is_le(vq);
@@ -1862,8 +1861,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
i, count);
return -EINVAL;
}
- if (unlikely(copy_from_iter(&desc, sizeof(desc), &from) !=
- sizeof(desc))) {
+ if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) {
vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
return -EINVAL;
@@ -2159,10 +2157,6 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
__u16 old, new;
__virtio16 event;
bool v;
- /* Flush out used index updates. This is paired
- * with the barrier that the Guest executes when enabling
- * interrupts. */
- smp_mb();
if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
unlikely(vq->avail_idx == vq->last_avail_idx))
@@ -2170,6 +2164,10 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
__virtio16 flags;
+ /* Flush out used index updates. This is paired
+ * with the barrier that the Guest executes when enabling
+ * interrupts. */
+ smp_mb();
if (vhost_get_user(vq, flags, &vq->avail->flags)) {
vq_err(vq, "Failed to get flags");
return true;
@@ -2184,11 +2182,26 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
if (unlikely(!v))
return true;
+ /* We're sure if the following conditions are met, there's no
+ * need to notify guest:
+ * 1) cached used event is ahead of new
+ * 2) old to new updating does not cross cached used event. */
+ if (vring_need_event(vq->last_used_event, new + vq->num, new) &&
+ !vring_need_event(vq->last_used_event, new, old))
+ return false;
+
+ /* Flush out used index updates. This is paired
+ * with the barrier that the Guest executes when enabling
+ * interrupts. */
+ smp_mb();
+
if (vhost_get_user(vq, event, vhost_used_event(vq))) {
vq_err(vq, "Failed to get used event idx");
return true;
}
- return vring_need_event(vhost16_to_cpu(vq, event), new, old);
+ vq->last_used_event = vhost16_to_cpu(vq, event);
+
+ return vring_need_event(vq->last_used_event, new, old);
}
/* This actually signals the guest, using eventfd. */
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 78f3c5fc02e4..a9cbbb148f46 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -107,6 +107,9 @@ struct vhost_virtqueue {
/* Last index we used. */
u16 last_used_idx;
+ /* Last used evet we've seen */
+ u16 last_used_event;
+
/* Used flags */
u16 used_flags;
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index 3bb02c60a2f5..bb8971f2a634 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -3,6 +3,7 @@
*
* Since these may be in userspace, we use (inline) accessors.
*/
+#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/vringh.h>
#include <linux/virtio_ring.h>
@@ -820,13 +821,13 @@ EXPORT_SYMBOL(vringh_need_notify_user);
static inline int getu16_kern(const struct vringh *vrh,
u16 *val, const __virtio16 *p)
{
- *val = vringh16_to_cpu(vrh, ACCESS_ONCE(*p));
+ *val = vringh16_to_cpu(vrh, READ_ONCE(*p));
return 0;
}
static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
{
- ACCESS_ONCE(*p) = cpu_to_vringh16(vrh, val);
+ WRITE_ONCE(*p, cpu_to_vringh16(vrh, val));
return 0;
}
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index e6b70966c19d..ce5e63d2c66a 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -50,11 +50,10 @@ static u32 vhost_transport_get_local_cid(void)
return VHOST_VSOCK_DEFAULT_HOST_CID;
}
-static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
+static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid)
{
struct vhost_vsock *vsock;
- spin_lock_bh(&vhost_vsock_lock);
list_for_each_entry(vsock, &vhost_vsock_list, list) {
u32 other_cid = vsock->guest_cid;
@@ -63,15 +62,24 @@ static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
continue;
if (other_cid == guest_cid) {
- spin_unlock_bh(&vhost_vsock_lock);
return vsock;
}
}
- spin_unlock_bh(&vhost_vsock_lock);
return NULL;
}
+static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
+{
+ struct vhost_vsock *vsock;
+
+ spin_lock_bh(&vhost_vsock_lock);
+ vsock = __vhost_vsock_get(guest_cid);
+ spin_unlock_bh(&vhost_vsock_lock);
+
+ return vsock;
+}
+
static void
vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
struct vhost_virtqueue *vq)
@@ -365,6 +373,7 @@ static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
static int vhost_vsock_start(struct vhost_vsock *vsock)
{
+ struct vhost_virtqueue *vq;
size_t i;
int ret;
@@ -375,19 +384,20 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
goto err;
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
- struct vhost_virtqueue *vq = &vsock->vqs[i];
+ vq = &vsock->vqs[i];
mutex_lock(&vq->mutex);
if (!vhost_vq_access_ok(vq)) {
ret = -EFAULT;
- mutex_unlock(&vq->mutex);
goto err_vq;
}
if (!vq->private_data) {
vq->private_data = vsock;
- vhost_vq_init_access(vq);
+ ret = vhost_vq_init_access(vq);
+ if (ret)
+ goto err_vq;
}
mutex_unlock(&vq->mutex);
@@ -397,8 +407,11 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
return 0;
err_vq:
+ vq->private_data = NULL;
+ mutex_unlock(&vq->mutex);
+
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
- struct vhost_virtqueue *vq = &vsock->vqs[i];
+ vq = &vsock->vqs[i];
mutex_lock(&vq->mutex);
vq->private_data = NULL;
@@ -559,11 +572,12 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
return -EINVAL;
/* Refuse if CID is already in use */
- other = vhost_vsock_get(guest_cid);
- if (other && other != vsock)
- return -EADDRINUSE;
-
spin_lock_bh(&vhost_vsock_lock);
+ other = __vhost_vsock_get(guest_cid);
+ if (other && other != vsock) {
+ spin_unlock_bh(&vhost_vsock_lock);
+ return -EADDRINUSE;
+ }
vsock->guest_cid = guest_cid;
spin_unlock_bh(&vhost_vsock_lock);