Refreshed all patches.
Fixes:
- CVE-2019-11479
- CVE-2019-11478
- CVE-2019-11477
Compile-tested on: ar71xx
Runtime-tested on: ar71xx
Signed-off-by: Koen Vandeputte <koen.vandeputte@ncentric.com>
LINUX_RELEASE?=1
-LINUX_VERSION-4.9 = .181
+LINUX_VERSION-4.9 = .182
LINUX_VERSION-4.14 = .126
-LINUX_KERNEL_HASH-4.9.181 = 8fcd223e11cba322801bc38cdb8b581d64c0115f585dcb6604de8561b574fced
+LINUX_KERNEL_HASH-4.9.182 = b16e12681a0638368479d73a9b1b8e9407c1ae4b7ae52fdf236d9e5657999695
LINUX_KERNEL_HASH-4.14.126 = 6a2e89504d8560b132ab743a0206ffce026bff2697b705819421c5f125633970
remove_uri_prefix=$(subst git://,,$(subst http://,,$(subst https://,,$(1))))
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
-@@ -3910,14 +3910,16 @@ static bool tcp_parse_aligned_timestamp(
+@@ -3926,14 +3926,16 @@ static bool tcp_parse_aligned_timestamp(
{
const __be32 *ptr = (const __be32 *)(th + 1);
nval = cmpxchg(&tp->tsq_flags, oval, nval);
if (nval != oval)
continue;
-@@ -2222,6 +2222,8 @@ static bool tcp_write_xmit(struct sock *
+@@ -2226,6 +2226,8 @@ static bool tcp_write_xmit(struct sock *
unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
break;
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
-@@ -2127,6 +2127,15 @@ static bool tcp_small_queue_check(struct
+@@ -2131,6 +2131,15 @@ static bool tcp_small_queue_check(struct
limit <<= factor;
if (atomic_read(&sk->sk_wmem_alloc) > limit) {
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
-@@ -1960,26 +1960,26 @@ static bool tcp_can_coalesce_send_queue_
+@@ -1964,26 +1964,26 @@ static bool tcp_can_coalesce_send_queue_
*/
static int tcp_mtu_probe(struct sock *sk)
{
if (nval != oval)
continue;
-@@ -2136,7 +2136,7 @@ static bool tcp_small_queue_check(struct
+@@ -2140,7 +2140,7 @@ static bool tcp_small_queue_check(struct
skb->prev == sk->sk_write_queue.next)
return false;
/* It is possible TX completion already happened
* before we set TSQ_THROTTLED, so we must
* test again the condition.
-@@ -2234,8 +2234,8 @@ static bool tcp_write_xmit(struct sock *
+@@ -2238,8 +2238,8 @@ static bool tcp_write_xmit(struct sock *
unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
break;
if (tcp_small_queue_check(sk, skb, 0))
break;
-@@ -3546,8 +3546,6 @@ void __tcp_send_ack(struct sock *sk, u32
+@@ -3550,8 +3550,6 @@ void __tcp_send_ack(struct sock *sk, u32
/* We do not want pure acks influencing TCP Small Queues or fq/pacing
* too much.
* SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
-@@ -326,7 +326,7 @@ static void tcp_delack_timer(unsigned lo
+@@ -327,7 +327,7 @@ static void tcp_delack_timer(unsigned lo
inet_csk(sk)->icsk_ack.blocked = 1;
__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
/* deleguate our work to tcp_release_cb() */
sock_hold(sk);
}
bh_unlock_sock(sk);
-@@ -609,7 +609,7 @@ static void tcp_write_timer(unsigned lon
+@@ -610,7 +610,7 @@ static void tcp_write_timer(unsigned lon
tcp_write_timer_handler(sk);
} else {
/* delegate our work to tcp_release_cb() */
* Before updating sk_refcnt, we must commit prior changes to memory
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
-@@ -1593,7 +1593,7 @@ u32 tcp_tso_autosize(const struct sock *
+@@ -1597,7 +1597,7 @@ u32 tcp_tso_autosize(const struct sock *
{
u32 bytes, segs;
sk->sk_gso_max_size - 1 - MAX_TCP_HEADER);
/* Goal is to send at least one packet per ms,
-@@ -2123,7 +2123,7 @@ static bool tcp_small_queue_check(struct
+@@ -2127,7 +2127,7 @@ static bool tcp_small_queue_check(struct
{
unsigned int limit;
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
-@@ -565,6 +565,9 @@ static __net_initdata struct pernet_oper
+@@ -566,6 +566,9 @@ static __net_initdata struct pernet_oper
int __init ip_misc_proc_init(void)
{