aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRoger Pau Monné <royger@FreeBSD.org>2020-11-25 11:34:38 +0000
committerEd Maste <emaste@FreeBSD.org>2021-01-29 00:08:11 +0000
commit0279031f358307c4c807dde6be6c08bbb5ec515c (patch)
treefbe359bb41f1e003d5e960335ed988d56beadbc5
parent60417799b7bb90dc326867ba0af9bd245e03944c (diff)
downloadsrc-0279031f358307c4c807dde6be6c08bbb5ec515c.tar.gz
src-0279031f358307c4c807dde6be6c08bbb5ec515c.zip
xen: allow limiting the amount of duplicated pending xenstore watches
Xenstore watches received are queued in a list and processed in a deferred thread. Such queuing was done without any checking, so a guest could potentially trigger a resource starvation against the FreeBSD kernel if such kernel is watching any user-controlled xenstore path. Allowing limiting the amount of pending events a watch can accumulate to prevent a remote guest from triggering this resource starvation issue. For the PV device backends and frontends this limitation is only applied to the other end /state node, which is limited to 1 pending event, the rest of the watched paths can still have unlimited pending watches because they are either local or controlled by a privileged domain. The xenstore user-space device gets special treatment as it's not possible for the kernel to know whether the paths being watched by user-space processes are controlled by a guest domain. For this reason watches set by the xenstore user-space device are limited to 1000 pending events. Note this can be modified using the max_pending_watch_events sysctl of the device. This is XSA-349. Sponsored by: Citrix Systems R&D MFC after: 3 days (cherry picked from commit 4e4e43dc9e1afc863670a031cc5cc75eb5e668d6) Note the xenstore user-space device part of this backport is dropped, as in stable/11 the device doesn't support setting up watches. (cherry picked from commit d9bd043f93df1a31ef16d2198d720a0a0831357f) Approved by: so Security: XSA-349, CVE-2020-29568
-rw-r--r--sys/dev/xen/balloon/balloon.c3
-rw-r--r--sys/dev/xen/blkback/blkback.c6
-rw-r--r--sys/dev/xen/control/control.c6
-rw-r--r--sys/dev/xen/xenstore/xenstore.c14
-rw-r--r--sys/xen/xenbus/xenbusb.c17
-rw-r--r--sys/xen/xenstore/xenstorevar.h9
6 files changed, 51 insertions, 4 deletions
diff --git a/sys/dev/xen/balloon/balloon.c b/sys/dev/xen/balloon/balloon.c
index aefa5cdb9250..87163000d4a8 100644
--- a/sys/dev/xen/balloon/balloon.c
+++ b/sys/dev/xen/balloon/balloon.c
@@ -310,7 +310,8 @@ set_new_target(unsigned long target)
static struct xs_watch target_watch =
{
- .node = "memory/target"
+ .node = "memory/target",
+ .max_pending = 1,
};
/* React to a change in the target key */
diff --git a/sys/dev/xen/blkback/blkback.c b/sys/dev/xen/blkback/blkback.c
index a5f9cc101582..4b6eeb0cf62f 100644
--- a/sys/dev/xen/blkback/blkback.c
+++ b/sys/dev/xen/blkback/blkback.c
@@ -3767,6 +3767,12 @@ xbb_attach(device_t dev)
xbb->hotplug_watch.callback = xbb_attach_disk;
KASSERT(xbb->hotplug_watch.node == NULL, ("watch node already setup"));
xbb->hotplug_watch.node = strdup(sbuf_data(watch_path), M_XENBLOCKBACK);
+ /*
+ * We don't care about the path updated, just about the value changes
+ * on that single node, hence there's no need to queue more that one
+ * event.
+ */
+ xbb->hotplug_watch.max_pending = 1;
sbuf_delete(watch_path);
error = xs_register_watch(&xbb->hotplug_watch);
if (error != 0) {
diff --git a/sys/dev/xen/control/control.c b/sys/dev/xen/control/control.c
index d5aa97e03b73..e1739419826b 100644
--- a/sys/dev/xen/control/control.c
+++ b/sys/dev/xen/control/control.c
@@ -432,6 +432,12 @@ xctrl_attach(device_t dev)
xctrl->xctrl_watch.node = "control/shutdown";
xctrl->xctrl_watch.callback = xctrl_on_watch_event;
xctrl->xctrl_watch.callback_data = (uintptr_t)xctrl;
+ /*
+ * We don't care about the path updated, just about the value changes
+ * on that single node, hence there's no need to queue more that one
+ * event.
+ */
+ xctrl->xctrl_watch.max_pending = 1;
xs_register_watch(&xctrl->xctrl_watch);
if (xen_pv_domain())
diff --git a/sys/dev/xen/xenstore/xenstore.c b/sys/dev/xen/xenstore/xenstore.c
index 9289adfc1a79..7cba2a55f973 100644
--- a/sys/dev/xen/xenstore/xenstore.c
+++ b/sys/dev/xen/xenstore/xenstore.c
@@ -668,12 +668,17 @@ xs_process_msg(enum xsd_sockmsg_type *type)
mtx_lock(&xs.registered_watches_lock);
msg->u.watch.handle = find_watch(
msg->u.watch.vec[XS_WATCH_TOKEN]);
- if (msg->u.watch.handle != NULL) {
- mtx_lock(&xs.watch_events_lock);
+ mtx_lock(&xs.watch_events_lock);
+ if (msg->u.watch.handle != NULL &&
+ (!msg->u.watch.handle->max_pending ||
+ msg->u.watch.handle->pending <
+ msg->u.watch.handle->max_pending)) {
+ msg->u.watch.handle->pending++;
TAILQ_INSERT_TAIL(&xs.watch_events, msg, list);
wakeup(&xs.watch_events);
mtx_unlock(&xs.watch_events_lock);
} else {
+ mtx_unlock(&xs.watch_events_lock);
free(msg->u.watch.vec, M_XENSTORE);
free(msg, M_XENSTORE);
}
@@ -1045,8 +1050,10 @@ xenwatch_thread(void *unused)
mtx_lock(&xs.watch_events_lock);
msg = TAILQ_FIRST(&xs.watch_events);
- if (msg)
+ if (msg) {
TAILQ_REMOVE(&xs.watch_events, msg, list);
+ msg->u.watch.handle->pending--;
+ }
mtx_unlock(&xs.watch_events_lock);
if (msg != NULL) {
@@ -1629,6 +1636,7 @@ xs_register_watch(struct xs_watch *watch)
char token[sizeof(watch) * 2 + 1];
int error;
+ watch->pending = 0;
sprintf(token, "%lX", (long)watch);
sx_slock(&xs.suspend_mutex);
diff --git a/sys/xen/xenbus/xenbusb.c b/sys/xen/xenbus/xenbusb.c
index 241efaa8fb4e..f191c7f62311 100644
--- a/sys/xen/xenbus/xenbusb.c
+++ b/sys/xen/xenbus/xenbusb.c
@@ -702,10 +702,21 @@ xenbusb_add_device(device_t dev, const char *type, const char *id)
ivars->xd_otherend_watch.node = statepath;
ivars->xd_otherend_watch.callback = xenbusb_otherend_watch_cb;
ivars->xd_otherend_watch.callback_data = (uintptr_t)ivars;
+ /*
+ * Other end state node watch, limit to one pending event
+ * to prevent frontends from queuing too many events that
+ * could cause resource starvation.
+ */
+ ivars->xd_otherend_watch.max_pending = 1;
ivars->xd_local_watch.node = ivars->xd_node;
ivars->xd_local_watch.callback = xenbusb_local_watch_cb;
ivars->xd_local_watch.callback_data = (uintptr_t)ivars;
+ /*
+ * Watch our local path, only writable by us or a privileged
+ * domain, no need to limit.
+ */
+ ivars->xd_local_watch.max_pending = 0;
mtx_lock(&xbs->xbs_lock);
xbs->xbs_connecting_children++;
@@ -764,6 +775,12 @@ xenbusb_attach(device_t dev, char *bus_node, u_int id_components)
xbs->xbs_device_watch.node = bus_node;
xbs->xbs_device_watch.callback = xenbusb_devices_changed;
xbs->xbs_device_watch.callback_data = (uintptr_t)xbs;
+ /*
+ * Allow for unlimited pending watches, as those are local paths
+ * either controlled by the guest or only writable by privileged
+ * domains.
+ */
+ xbs->xbs_device_watch.max_pending = 0;
TASK_INIT(&xbs->xbs_probe_children, 0, xenbusb_probe_children_cb, dev);
diff --git a/sys/xen/xenstore/xenstorevar.h b/sys/xen/xenstore/xenstorevar.h
index 4c612b4b6881..e58d4e1d5721 100644
--- a/sys/xen/xenstore/xenstorevar.h
+++ b/sys/xen/xenstore/xenstorevar.h
@@ -72,6 +72,15 @@ struct xs_watch
/* Callback client data untouched by the XenStore watch mechanism. */
uintptr_t callback_data;
+
+ /* Maximum number of pending watch events to be delivered. */
+ unsigned int max_pending;
+
+ /*
+ * Private counter used by xenstore to keep track of the pending
+ * watches. Protected by xs.watch_events_lock.
+ */
+ unsigned int pending;
};
LIST_HEAD(xs_watch_list, xs_watch);