Skip to content

Commit d32e097

Browse files
jbeulichSasha Levin
authored and
Sasha Levin
committed
xen-netback: properly sync TX responses
commit 7b55984 upstream. Invoking the make_tx_response() / push_tx_responses() pair with no lock held would be acceptable only if all such invocations happened from the same context (NAPI instance or dealloc thread). Since this isn't the case, and since the interface "spec" also doesn't demand that multicast operations may only be performed with no in-flight transmits, MCAST_{ADD,DEL} processing also needs to acquire the response lock around the invocations. To prevent similar mistakes going forward, "downgrade" the present functions to private helpers of just the two remaining ones using them directly, with no forward declarations anymore. This involves renaming what so far was make_tx_response(), for the new function of that name to serve the new (wrapper) purpose. While there, - constify the txp parameters, - correct xenvif_idx_release()'s status parameter's type, - rename {,_}make_tx_response()'s status parameters for consistency with xenvif_idx_release()'s. Fixes: 210c34d ("xen-netback: add support for multicast control") Cc: [email protected] Signed-off-by: Jan Beulich <[email protected]> Reviewed-by: Paul Durrant <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jakub Kicinski <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent 2ab0786 commit d32e097

File tree

1 file changed

+40
-44
lines changed

1 file changed

+40
-44
lines changed

drivers/net/xen-netback/netback.c

Lines changed: 40 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -104,13 +104,12 @@ bool provides_xdp_headroom = true;
104104
module_param(provides_xdp_headroom, bool, 0644);
105105

106106
static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
107-
u8 status);
107+
s8 status);
108108

109109
static void make_tx_response(struct xenvif_queue *queue,
110-
struct xen_netif_tx_request *txp,
110+
const struct xen_netif_tx_request *txp,
111111
unsigned int extra_count,
112-
s8 st);
113-
static void push_tx_responses(struct xenvif_queue *queue);
112+
s8 status);
114113

115114
static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
116115

@@ -208,13 +207,9 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
208207
unsigned int extra_count, RING_IDX end)
209208
{
210209
RING_IDX cons = queue->tx.req_cons;
211-
unsigned long flags;
212210

213211
do {
214-
spin_lock_irqsave(&queue->response_lock, flags);
215212
make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
216-
push_tx_responses(queue);
217-
spin_unlock_irqrestore(&queue->response_lock, flags);
218213
if (cons == end)
219214
break;
220215
RING_COPY_REQUEST(&queue->tx, cons++, txp);
@@ -465,12 +460,7 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
465460
for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS;
466461
nr_slots--) {
467462
if (unlikely(!txp->size)) {
468-
unsigned long flags;
469-
470-
spin_lock_irqsave(&queue->response_lock, flags);
471463
make_tx_response(queue, txp, 0, XEN_NETIF_RSP_OKAY);
472-
push_tx_responses(queue);
473-
spin_unlock_irqrestore(&queue->response_lock, flags);
474464
++txp;
475465
continue;
476466
}
@@ -496,14 +486,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
496486

497487
for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; ++txp) {
498488
if (unlikely(!txp->size)) {
499-
unsigned long flags;
500-
501-
spin_lock_irqsave(&queue->response_lock, flags);
502489
make_tx_response(queue, txp, 0,
503490
XEN_NETIF_RSP_OKAY);
504-
push_tx_responses(queue);
505-
spin_unlock_irqrestore(&queue->response_lock,
506-
flags);
507491
continue;
508492
}
509493

@@ -995,7 +979,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
995979
(ret == 0) ?
996980
XEN_NETIF_RSP_OKAY :
997981
XEN_NETIF_RSP_ERROR);
998-
push_tx_responses(queue);
999982
continue;
1000983
}
1001984

@@ -1007,7 +990,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1007990

1008991
make_tx_response(queue, &txreq, extra_count,
1009992
XEN_NETIF_RSP_OKAY);
1010-
push_tx_responses(queue);
1011993
continue;
1012994
}
1013995

@@ -1433,8 +1415,35 @@ int xenvif_tx_action(struct xenvif_queue *queue, int budget)
14331415
return work_done;
14341416
}
14351417

1418+
static void _make_tx_response(struct xenvif_queue *queue,
1419+
const struct xen_netif_tx_request *txp,
1420+
unsigned int extra_count,
1421+
s8 status)
1422+
{
1423+
RING_IDX i = queue->tx.rsp_prod_pvt;
1424+
struct xen_netif_tx_response *resp;
1425+
1426+
resp = RING_GET_RESPONSE(&queue->tx, i);
1427+
resp->id = txp->id;
1428+
resp->status = status;
1429+
1430+
while (extra_count-- != 0)
1431+
RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1432+
1433+
queue->tx.rsp_prod_pvt = ++i;
1434+
}
1435+
1436+
static void push_tx_responses(struct xenvif_queue *queue)
1437+
{
1438+
int notify;
1439+
1440+
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1441+
if (notify)
1442+
notify_remote_via_irq(queue->tx_irq);
1443+
}
1444+
14361445
static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1437-
u8 status)
1446+
s8 status)
14381447
{
14391448
struct pending_tx_info *pending_tx_info;
14401449
pending_ring_idx_t index;
@@ -1444,8 +1453,8 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
14441453

14451454
spin_lock_irqsave(&queue->response_lock, flags);
14461455

1447-
make_tx_response(queue, &pending_tx_info->req,
1448-
pending_tx_info->extra_count, status);
1456+
_make_tx_response(queue, &pending_tx_info->req,
1457+
pending_tx_info->extra_count, status);
14491458

14501459
/* Release the pending index before pusing the Tx response so
14511460
* its available before a new Tx request is pushed by the
@@ -1459,32 +1468,19 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
14591468
spin_unlock_irqrestore(&queue->response_lock, flags);
14601469
}
14611470

1462-
14631471
static void make_tx_response(struct xenvif_queue *queue,
1464-
struct xen_netif_tx_request *txp,
1472+
const struct xen_netif_tx_request *txp,
14651473
unsigned int extra_count,
1466-
s8 st)
1474+
s8 status)
14671475
{
1468-
RING_IDX i = queue->tx.rsp_prod_pvt;
1469-
struct xen_netif_tx_response *resp;
1470-
1471-
resp = RING_GET_RESPONSE(&queue->tx, i);
1472-
resp->id = txp->id;
1473-
resp->status = st;
1474-
1475-
while (extra_count-- != 0)
1476-
RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1476+
unsigned long flags;
14771477

1478-
queue->tx.rsp_prod_pvt = ++i;
1479-
}
1478+
spin_lock_irqsave(&queue->response_lock, flags);
14801479

1481-
static void push_tx_responses(struct xenvif_queue *queue)
1482-
{
1483-
int notify;
1480+
_make_tx_response(queue, txp, extra_count, status);
1481+
push_tx_responses(queue);
14841482

1485-
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1486-
if (notify)
1487-
notify_remote_via_irq(queue->tx_irq);
1483+
spin_unlock_irqrestore(&queue->response_lock, flags);
14881484
}
14891485

14901486
static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)

0 commit comments

Comments
 (0)