2010-07-07 11:12:45 +00:00
|
|
|
From: Peter Zijlstra <a.p.zijlstra@chello.nl>
|
|
|
|
Subject: [PATCH 20/31] netvm: prevent a stream specific deadlock
|
|
|
|
Patch-mainline: not yet
|
|
|
|
|
|
|
|
It could happen that all !SOCK_MEMALLOC sockets have buffered so much data
|
|
|
|
that we're over the global rmem limit. This will prevent SOCK_MEMALLOC buffers
|
|
|
|
from receiving data, which will prevent userspace from running, which is needed
|
|
|
|
to reduce the buffered data.
|
|
|
|
|
|
|
|
Fix this by exempting the SOCK_MEMALLOC sockets from the rmem limit.
|
|
|
|
|
|
|
|
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
|
|
|
|
Signed-off-by: Suresh Jayaraman <sjayaraman@suse.de>
|
|
|
|
---
|
2011-04-19 20:09:59 +00:00
|
|
|
|
|
|
|
include/net/sock.h | 7 ++++---
|
|
|
|
net/caif/caif_socket.c | 2 +-
|
|
|
|
net/core/sock.c | 2 +-
|
|
|
|
net/ipv4/tcp_input.c | 12 ++++++------
|
|
|
|
net/sctp/ulpevent.c | 2 +-
|
|
|
|
5 files changed, 13 insertions(+), 12 deletions(-)
|
2010-07-07 11:12:45 +00:00
|
|
|
|
|
|
|
--- a/include/net/sock.h
|
|
|
|
+++ b/include/net/sock.h
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -980,12 +980,13 @@ static inline int sk_wmem_schedule(struc
|
2010-07-07 11:12:45 +00:00
|
|
|
__sk_mem_schedule(sk, size, SK_MEM_SEND);
|
|
|
|
}
|
|
|
|
|
|
|
|
-static inline int sk_rmem_schedule(struct sock *sk, int size)
|
|
|
|
+static inline int sk_rmem_schedule(struct sock *sk, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
if (!sk_has_account(sk))
|
|
|
|
return 1;
|
|
|
|
- return size <= sk->sk_forward_alloc ||
|
|
|
|
- __sk_mem_schedule(sk, size, SK_MEM_RECV);
|
|
|
|
+ return skb->truesize <= sk->sk_forward_alloc ||
|
|
|
|
+ __sk_mem_schedule(sk, skb->truesize, SK_MEM_RECV) ||
|
|
|
|
+ skb_emergency(skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void sk_mem_reclaim(struct sock *sk)
|
|
|
|
--- a/net/core/sock.c
|
|
|
|
+++ b/net/core/sock.c
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -399,7 +399,7 @@ int sock_queue_rcv_skb(struct sock *sk,
|
2010-07-07 11:12:45 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
- if (!sk_rmem_schedule(sk, skb->truesize)) {
|
|
|
|
+ if (!sk_rmem_schedule(sk, skb)) {
|
|
|
|
atomic_inc(&sk->sk_drops);
|
|
|
|
return -ENOBUFS;
|
|
|
|
}
|
|
|
|
--- a/net/ipv4/tcp_input.c
|
|
|
|
+++ b/net/ipv4/tcp_input.c
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -4347,19 +4347,19 @@ static void tcp_ofo_queue(struct sock *s
|
2010-07-07 11:12:45 +00:00
|
|
|
static int tcp_prune_ofo_queue(struct sock *sk);
|
|
|
|
static int tcp_prune_queue(struct sock *sk);
|
|
|
|
|
|
|
|
-static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size)
|
|
|
|
+static inline int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
|
|
|
|
- !sk_rmem_schedule(sk, size)) {
|
|
|
|
+ !sk_rmem_schedule(sk, skb)) {
|
|
|
|
|
|
|
|
if (tcp_prune_queue(sk) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
- if (!sk_rmem_schedule(sk, size)) {
|
|
|
|
+ if (!sk_rmem_schedule(sk, skb)) {
|
|
|
|
if (!tcp_prune_ofo_queue(sk))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
- if (!sk_rmem_schedule(sk, size))
|
|
|
|
+ if (!sk_rmem_schedule(sk, skb))
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -4412,7 +4412,7 @@ static void tcp_data_queue(struct sock *
|
2010-07-07 11:12:45 +00:00
|
|
|
if (eaten <= 0) {
|
|
|
|
queue_and_out:
|
|
|
|
if (eaten < 0 &&
|
|
|
|
- tcp_try_rmem_schedule(sk, skb->truesize))
|
|
|
|
+ tcp_try_rmem_schedule(sk, skb))
|
|
|
|
goto drop;
|
|
|
|
|
|
|
|
skb_set_owner_r(skb, sk);
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -4483,7 +4483,7 @@ drop:
|
2010-07-07 11:12:45 +00:00
|
|
|
|
|
|
|
TCP_ECN_check_ce(tp, skb);
|
|
|
|
|
|
|
|
- if (tcp_try_rmem_schedule(sk, skb->truesize))
|
|
|
|
+ if (tcp_try_rmem_schedule(sk, skb))
|
|
|
|
goto drop;
|
|
|
|
|
|
|
|
/* Disable header prediction. */
|
|
|
|
--- a/net/sctp/ulpevent.c
|
|
|
|
+++ b/net/sctp/ulpevent.c
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -702,7 +702,7 @@ struct sctp_ulpevent *sctp_ulpevent_make
|
2010-07-07 11:12:45 +00:00
|
|
|
if (rx_count >= asoc->base.sk->sk_rcvbuf) {
|
|
|
|
|
|
|
|
if ((asoc->base.sk->sk_userlocks & SOCK_RCVBUF_LOCK) ||
|
|
|
|
- (!sk_rmem_schedule(asoc->base.sk, chunk->skb->truesize)))
|
|
|
|
+ (!sk_rmem_schedule(asoc->base.sk, chunk->skb)))
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
--- a/net/caif/caif_socket.c
|
|
|
|
+++ b/net/caif/caif_socket.c
|
|
|
|
@@ -170,7 +170,7 @@ static int caif_queue_rcv_skb(struct soc
|
|
|
|
err = sk_filter(sk, skb);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
- if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) {
|
|
|
|
+ if (!sk_rmem_schedule(sk, skb) && rx_flow_is_on(cf_sk)) {
|
|
|
|
set_rx_flow_off(cf_sk);
|
|
|
|
pr_debug("sending flow OFF due to rmem_schedule\n");
|
|
|
|
dbfs_atomic_inc(&cnt.num_rx_flow_off);
|