blob: 4f6276ce0af317a4cf159b780bd8a1718126fb73 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * ROUTE - implementation of the IP router.
7 *
Jesper Juhl02c30a82005-05-05 16:16:16 -07008 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 *
14 * Fixes:
15 * Alan Cox : Verify area fixes.
16 * Alan Cox : cli() protects routing changes
17 * Rui Oliveira : ICMP routing table updates
18 * (rco@di.uminho.pt) Routing table insertion and update
19 * Linus Torvalds : Rewrote bits to be sensible
20 * Alan Cox : Added BSD route gw semantics
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090021 * Alan Cox : Super /proc >4K
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 * Alan Cox : MTU in route table
23 * Alan Cox : MSS actually. Also added the window
24 * clamper.
25 * Sam Lantinga : Fixed route matching in rt_del()
26 * Alan Cox : Routing cache support.
27 * Alan Cox : Removed compatibility cruft.
28 * Alan Cox : RTF_REJECT support.
29 * Alan Cox : TCP irtt support.
30 * Jonathan Naylor : Added Metric support.
31 * Miquel van Smoorenburg : BSD API fixes.
32 * Miquel van Smoorenburg : Metrics.
33 * Alan Cox : Use __u32 properly
34 * Alan Cox : Aligned routing errors more closely with BSD
35 * our system is still very different.
36 * Alan Cox : Faster /proc handling
37 * Alexey Kuznetsov : Massive rework to support tree based routing,
38 * routing caches and better behaviour.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090039 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 * Olaf Erb : irtt wasn't being copied right.
41 * Bjorn Ekwall : Kerneld route support.
42 * Alan Cox : Multicast fixed (I hope)
43 * Pavel Krauz : Limited broadcast fixed
44 * Mike McLagan : Routing by source
45 * Alexey Kuznetsov : End of old history. Split to fib.c and
46 * route.c and rewritten from scratch.
47 * Andi Kleen : Load-limit warning messages.
48 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
49 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
50 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
51 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
52 * Marc Boucher : routing by fwmark
53 * Robert Olsson : Added rt_cache statistics
54 * Arnaldo C. Melo : Convert proc stuff to seq_file
Eric Dumazetbb1d23b2005-07-05 15:00:32 -070055 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
Ilia Sotnikovcef26852006-03-25 01:38:55 -080056 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
57 * Ilia Sotnikov : Removed TOS from hash calculations
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 *
59 * This program is free software; you can redistribute it and/or
60 * modify it under the terms of the GNU General Public License
61 * as published by the Free Software Foundation; either version
62 * 2 of the License, or (at your option) any later version.
63 */
64
Joe Perchesafd465032012-03-12 07:03:32 +000065#define pr_fmt(fmt) "IPv4: " fmt
66
Linus Torvalds1da177e2005-04-16 15:20:36 -070067#include <linux/module.h>
68#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#include <linux/bitops.h>
70#include <linux/types.h>
71#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070072#include <linux/mm.h>
Eric Dumazet424c4b72005-07-05 14:58:19 -070073#include <linux/bootmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070074#include <linux/string.h>
75#include <linux/socket.h>
76#include <linux/sockios.h>
77#include <linux/errno.h>
78#include <linux/in.h>
79#include <linux/inet.h>
80#include <linux/netdevice.h>
81#include <linux/proc_fs.h>
82#include <linux/init.h>
Eric Dumazet39c90ec2007-09-15 10:55:54 -070083#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#include <linux/skbuff.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/inetdevice.h>
86#include <linux/igmp.h>
87#include <linux/pkt_sched.h>
88#include <linux/mroute.h>
89#include <linux/netfilter_ipv4.h>
90#include <linux/random.h>
91#include <linux/jhash.h>
92#include <linux/rcupdate.h>
93#include <linux/times.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090094#include <linux/slab.h>
Stephen Rothwellb9eda062011-12-22 17:03:29 +110095#include <linux/prefetch.h>
Herbert Xu352e5122007-11-13 21:34:06 -080096#include <net/dst.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020097#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <net/protocol.h>
99#include <net/ip.h>
100#include <net/route.h>
101#include <net/inetpeer.h>
102#include <net/sock.h>
103#include <net/ip_fib.h>
104#include <net/arp.h>
105#include <net/tcp.h>
106#include <net/icmp.h>
107#include <net/xfrm.h>
Tom Tucker8d717402006-07-30 20:43:36 -0700108#include <net/netevent.h>
Thomas Graf63f34442007-03-22 11:55:17 -0700109#include <net/rtnetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110#ifdef CONFIG_SYSCTL
111#include <linux/sysctl.h>
Shan Wei7426a562012-04-18 18:05:46 +0000112#include <linux/kmemleak.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113#endif
David S. Miller6e5714e2011-08-03 20:50:44 -0700114#include <net/secure_seq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115
David S. Miller68a5e3d2011-03-11 20:07:33 -0500116#define RT_FL_TOS(oldflp4) \
Julian Anastasovf61759e2011-12-02 11:39:42 +0000117 ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118
119#define IP_MAX_MTU 0xFFF0
120
121#define RT_GC_TIMEOUT (300*HZ)
122
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123static int ip_rt_max_size;
Stephen Hemminger817bc4d2008-03-22 17:43:59 -0700124static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
Eric Dumazet9f28a2f2011-12-21 15:47:16 -0500125static int ip_rt_gc_interval __read_mostly = 60 * HZ;
Stephen Hemminger817bc4d2008-03-22 17:43:59 -0700126static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
127static int ip_rt_redirect_number __read_mostly = 9;
128static int ip_rt_redirect_load __read_mostly = HZ / 50;
129static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
130static int ip_rt_error_cost __read_mostly = HZ;
131static int ip_rt_error_burst __read_mostly = 5 * HZ;
132static int ip_rt_gc_elasticity __read_mostly = 8;
133static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
134static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
135static int ip_rt_min_advmss __read_mostly = 256;
Eric Dumazet9f28a2f2011-12-21 15:47:16 -0500136
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137/*
138 * Interface to generic destination cache.
139 */
140
141static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
David S. Miller0dbaee32010-12-13 12:52:14 -0800142static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
Steffen Klassertebb762f2011-11-23 02:12:51 +0000143static unsigned int ipv4_mtu(const struct dst_entry *dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
145static void ipv4_link_failure(struct sk_buff *skb);
David S. Miller6700c272012-07-17 03:29:28 -0700146static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
147 struct sk_buff *skb, u32 mtu);
148static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
149 struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150
Eric Dumazet72cdd1d2010-11-11 07:14:07 +0000151static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
152 int how)
153{
154}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
David S. Miller62fa8a82011-01-26 20:51:05 -0800156static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
157{
David S. Miller31248732012-07-10 07:08:18 -0700158 WARN_ON(1);
159 return NULL;
David S. Miller62fa8a82011-01-26 20:51:05 -0800160}
161
David S. Millerf894cbf2012-07-02 21:52:24 -0700162static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
163 struct sk_buff *skb,
164 const void *daddr);
David S. Millerd3aaeb32011-07-18 00:40:17 -0700165
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166static struct dst_ops ipv4_dst_ops = {
167 .family = AF_INET,
Harvey Harrison09640e62009-02-01 00:45:17 -0800168 .protocol = cpu_to_be16(ETH_P_IP),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 .check = ipv4_dst_check,
David S. Miller0dbaee32010-12-13 12:52:14 -0800170 .default_advmss = ipv4_default_advmss,
Steffen Klassertebb762f2011-11-23 02:12:51 +0000171 .mtu = ipv4_mtu,
David S. Miller62fa8a82011-01-26 20:51:05 -0800172 .cow_metrics = ipv4_cow_metrics,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 .ifdown = ipv4_dst_ifdown,
174 .negative_advice = ipv4_negative_advice,
175 .link_failure = ipv4_link_failure,
176 .update_pmtu = ip_rt_update_pmtu,
David S. Millere47a1852012-07-11 20:55:47 -0700177 .redirect = ip_do_redirect,
Herbert Xu1ac06e02008-05-20 14:32:14 -0700178 .local_out = __ip_local_out,
David S. Millerd3aaeb32011-07-18 00:40:17 -0700179 .neigh_lookup = ipv4_neigh_lookup,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180};
181
182#define ECN_OR_COST(class) TC_PRIO_##class
183
Philippe De Muyter4839c522007-07-09 15:32:57 -0700184const __u8 ip_tos2prio[16] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 TC_PRIO_BESTEFFORT,
Dan Siemon4a2b9c32011-03-15 13:56:07 +0000186 ECN_OR_COST(BESTEFFORT),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 TC_PRIO_BESTEFFORT,
188 ECN_OR_COST(BESTEFFORT),
189 TC_PRIO_BULK,
190 ECN_OR_COST(BULK),
191 TC_PRIO_BULK,
192 ECN_OR_COST(BULK),
193 TC_PRIO_INTERACTIVE,
194 ECN_OR_COST(INTERACTIVE),
195 TC_PRIO_INTERACTIVE,
196 ECN_OR_COST(INTERACTIVE),
197 TC_PRIO_INTERACTIVE_BULK,
198 ECN_OR_COST(INTERACTIVE_BULK),
199 TC_PRIO_INTERACTIVE_BULK,
200 ECN_OR_COST(INTERACTIVE_BULK)
201};
Amir Vadaid4a96862012-04-04 21:33:28 +0000202EXPORT_SYMBOL(ip_tos2prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203
Eric Dumazet2f970d82006-01-17 02:54:36 -0800204static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
Eric Dumazet27f39c73e2010-05-19 22:07:23 +0000205#define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206
Denis V. Luneve84f84f2008-07-05 19:04:32 -0700207static inline int rt_genid(struct net *net)
208{
209 return atomic_read(&net->ipv4.rt_genid);
210}
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
214{
Eric Dumazet29e75252008-01-31 17:05:09 -0800215 if (*pos)
David S. Miller89aef892012-07-17 11:00:09 -0700216 return NULL;
Eric Dumazet29e75252008-01-31 17:05:09 -0800217 return SEQ_START_TOKEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218}
219
220static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
221{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 ++*pos;
David S. Miller89aef892012-07-17 11:00:09 -0700223 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224}
225
226static void rt_cache_seq_stop(struct seq_file *seq, void *v)
227{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228}
229
230static int rt_cache_seq_show(struct seq_file *seq, void *v)
231{
232 if (v == SEQ_START_TOKEN)
233 seq_printf(seq, "%-127s\n",
234 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
235 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
236 "HHUptod\tSpecDst");
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900237 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238}
239
Stephen Hemmingerf6908082007-03-12 14:34:29 -0700240static const struct seq_operations rt_cache_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 .start = rt_cache_seq_start,
242 .next = rt_cache_seq_next,
243 .stop = rt_cache_seq_stop,
244 .show = rt_cache_seq_show,
245};
246
247static int rt_cache_seq_open(struct inode *inode, struct file *file)
248{
David S. Miller89aef892012-07-17 11:00:09 -0700249 return seq_open(file, &rt_cache_seq_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250}
251
Arjan van de Ven9a321442007-02-12 00:55:35 -0800252static const struct file_operations rt_cache_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 .owner = THIS_MODULE,
254 .open = rt_cache_seq_open,
255 .read = seq_read,
256 .llseek = seq_lseek,
David S. Miller89aef892012-07-17 11:00:09 -0700257 .release = seq_release,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258};
259
260
261static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
262{
263 int cpu;
264
265 if (*pos == 0)
266 return SEQ_START_TOKEN;
267
Rusty Russell0f23174a2008-12-29 12:23:42 +0000268 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 if (!cpu_possible(cpu))
270 continue;
271 *pos = cpu+1;
Eric Dumazet2f970d82006-01-17 02:54:36 -0800272 return &per_cpu(rt_cache_stat, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 }
274 return NULL;
275}
276
277static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
278{
279 int cpu;
280
Rusty Russell0f23174a2008-12-29 12:23:42 +0000281 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 if (!cpu_possible(cpu))
283 continue;
284 *pos = cpu+1;
Eric Dumazet2f970d82006-01-17 02:54:36 -0800285 return &per_cpu(rt_cache_stat, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 }
287 return NULL;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900288
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289}
290
291static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
292{
293
294}
295
296static int rt_cpu_seq_show(struct seq_file *seq, void *v)
297{
298 struct rt_cache_stat *st = v;
299
300 if (v == SEQ_START_TOKEN) {
Olaf Rempel5bec0032005-04-28 12:16:08 -0700301 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 return 0;
303 }
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900304
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
306 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
Eric Dumazetfc66f952010-10-08 06:37:34 +0000307 dst_entries_get_slow(&ipv4_dst_ops),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 st->in_hit,
309 st->in_slow_tot,
310 st->in_slow_mc,
311 st->in_no_route,
312 st->in_brd,
313 st->in_martian_dst,
314 st->in_martian_src,
315
316 st->out_hit,
317 st->out_slow_tot,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900318 st->out_slow_mc,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319
320 st->gc_total,
321 st->gc_ignored,
322 st->gc_goal_miss,
323 st->gc_dst_overflow,
324 st->in_hlist_search,
325 st->out_hlist_search
326 );
327 return 0;
328}
329
Stephen Hemmingerf6908082007-03-12 14:34:29 -0700330static const struct seq_operations rt_cpu_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 .start = rt_cpu_seq_start,
332 .next = rt_cpu_seq_next,
333 .stop = rt_cpu_seq_stop,
334 .show = rt_cpu_seq_show,
335};
336
337
338static int rt_cpu_seq_open(struct inode *inode, struct file *file)
339{
340 return seq_open(file, &rt_cpu_seq_ops);
341}
342
Arjan van de Ven9a321442007-02-12 00:55:35 -0800343static const struct file_operations rt_cpu_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 .owner = THIS_MODULE,
345 .open = rt_cpu_seq_open,
346 .read = seq_read,
347 .llseek = seq_lseek,
348 .release = seq_release,
349};
350
Patrick McHardyc7066f72011-01-14 13:36:42 +0100351#ifdef CONFIG_IP_ROUTE_CLASSID
Alexey Dobriyana661c412009-11-25 15:40:35 -0800352static int rt_acct_proc_show(struct seq_file *m, void *v)
Pavel Emelyanov78c686e2007-12-05 21:13:48 -0800353{
Alexey Dobriyana661c412009-11-25 15:40:35 -0800354 struct ip_rt_acct *dst, *src;
355 unsigned int i, j;
Pavel Emelyanov78c686e2007-12-05 21:13:48 -0800356
Alexey Dobriyana661c412009-11-25 15:40:35 -0800357 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
358 if (!dst)
359 return -ENOMEM;
Pavel Emelyanov78c686e2007-12-05 21:13:48 -0800360
Alexey Dobriyana661c412009-11-25 15:40:35 -0800361 for_each_possible_cpu(i) {
362 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
363 for (j = 0; j < 256; j++) {
364 dst[j].o_bytes += src[j].o_bytes;
365 dst[j].o_packets += src[j].o_packets;
366 dst[j].i_bytes += src[j].i_bytes;
367 dst[j].i_packets += src[j].i_packets;
Pavel Emelyanov78c686e2007-12-05 21:13:48 -0800368 }
369 }
Alexey Dobriyana661c412009-11-25 15:40:35 -0800370
371 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
372 kfree(dst);
373 return 0;
Pavel Emelyanov78c686e2007-12-05 21:13:48 -0800374}
Alexey Dobriyana661c412009-11-25 15:40:35 -0800375
376static int rt_acct_proc_open(struct inode *inode, struct file *file)
377{
378 return single_open(file, rt_acct_proc_show, NULL);
379}
380
381static const struct file_operations rt_acct_proc_fops = {
382 .owner = THIS_MODULE,
383 .open = rt_acct_proc_open,
384 .read = seq_read,
385 .llseek = seq_lseek,
386 .release = single_release,
387};
Pavel Emelyanov78c686e2007-12-05 21:13:48 -0800388#endif
Pavel Emelyanov107f1632007-12-05 21:14:28 -0800389
Denis V. Lunev73b38712008-02-28 20:51:18 -0800390static int __net_init ip_rt_do_proc_init(struct net *net)
Pavel Emelyanov107f1632007-12-05 21:14:28 -0800391{
392 struct proc_dir_entry *pde;
393
394 pde = proc_net_fops_create(net, "rt_cache", S_IRUGO,
395 &rt_cache_seq_fops);
396 if (!pde)
397 goto err1;
398
Wang Chen77020722008-02-28 14:14:25 -0800399 pde = proc_create("rt_cache", S_IRUGO,
400 net->proc_net_stat, &rt_cpu_seq_fops);
Pavel Emelyanov107f1632007-12-05 21:14:28 -0800401 if (!pde)
402 goto err2;
403
Patrick McHardyc7066f72011-01-14 13:36:42 +0100404#ifdef CONFIG_IP_ROUTE_CLASSID
Alexey Dobriyana661c412009-11-25 15:40:35 -0800405 pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
Pavel Emelyanov107f1632007-12-05 21:14:28 -0800406 if (!pde)
407 goto err3;
408#endif
409 return 0;
410
Patrick McHardyc7066f72011-01-14 13:36:42 +0100411#ifdef CONFIG_IP_ROUTE_CLASSID
Pavel Emelyanov107f1632007-12-05 21:14:28 -0800412err3:
413 remove_proc_entry("rt_cache", net->proc_net_stat);
414#endif
415err2:
416 remove_proc_entry("rt_cache", net->proc_net);
417err1:
418 return -ENOMEM;
419}
Denis V. Lunev73b38712008-02-28 20:51:18 -0800420
421static void __net_exit ip_rt_do_proc_exit(struct net *net)
422{
423 remove_proc_entry("rt_cache", net->proc_net_stat);
424 remove_proc_entry("rt_cache", net->proc_net);
Patrick McHardyc7066f72011-01-14 13:36:42 +0100425#ifdef CONFIG_IP_ROUTE_CLASSID
Denis V. Lunev73b38712008-02-28 20:51:18 -0800426 remove_proc_entry("rt_acct", net->proc_net);
Alexey Dobriyan0a931ac2010-01-17 03:32:50 +0000427#endif
Denis V. Lunev73b38712008-02-28 20:51:18 -0800428}
429
430static struct pernet_operations ip_rt_proc_ops __net_initdata = {
431 .init = ip_rt_do_proc_init,
432 .exit = ip_rt_do_proc_exit,
433};
434
435static int __init ip_rt_proc_init(void)
436{
437 return register_pernet_subsys(&ip_rt_proc_ops);
438}
439
Pavel Emelyanov107f1632007-12-05 21:14:28 -0800440#else
Denis V. Lunev73b38712008-02-28 20:51:18 -0800441static inline int ip_rt_proc_init(void)
Pavel Emelyanov107f1632007-12-05 21:14:28 -0800442{
443 return 0;
444}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445#endif /* CONFIG_PROC_FS */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900446
Eric Dumazet4331deb2012-07-25 05:11:23 +0000447static inline bool rt_is_expired(const struct rtable *rth)
Denis V. Luneve84f84f2008-07-05 19:04:32 -0700448{
Changli Gaod8d1f302010-06-10 23:31:35 -0700449 return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
Denis V. Luneve84f84f2008-07-05 19:04:32 -0700450}
451
Eric Dumazetbeb659b2007-11-19 22:43:37 -0800452/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300453 * Perturbation of rt_genid by a small quantity [1..256]
Eric Dumazet29e75252008-01-31 17:05:09 -0800454 * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
455 * many times (2^24) without giving recent rt_genid.
456 * Jenkins hash is strong enough that litle changes of rt_genid are OK.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 */
Denis V. Lunev86c657f2008-07-05 19:03:31 -0700458static void rt_cache_invalidate(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459{
Eric Dumazet29e75252008-01-31 17:05:09 -0800460 unsigned char shuffle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461
Eric Dumazet29e75252008-01-31 17:05:09 -0800462 get_random_bytes(&shuffle, sizeof(shuffle));
Denis V. Luneve84f84f2008-07-05 19:04:32 -0700463 atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464}
465
Eric Dumazetbeb659b2007-11-19 22:43:37 -0800466/*
Eric Dumazet29e75252008-01-31 17:05:09 -0800467 * delay < 0 : invalidate cache (fast : entries will be deleted later)
468 * delay >= 0 : invalidate & flush cache (can be long)
469 */
Denis V. Lunev76e6ebf2008-07-05 19:00:44 -0700470void rt_cache_flush(struct net *net, int delay)
Eric Dumazet29e75252008-01-31 17:05:09 -0800471{
Denis V. Lunev86c657f2008-07-05 19:03:31 -0700472 rt_cache_invalidate(net);
Eric Dumazet98376382010-03-08 03:20:00 +0000473}
474
David S. Millerf894cbf2012-07-02 21:52:24 -0700475static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
476 struct sk_buff *skb,
477 const void *daddr)
David Miller3769cff2011-07-11 22:44:24 +0000478{
David S. Millerd3aaeb32011-07-18 00:40:17 -0700479 struct net_device *dev = dst->dev;
480 const __be32 *pkey = daddr;
David S. Miller39232972012-01-26 15:22:32 -0500481 const struct rtable *rt;
David Miller3769cff2011-07-11 22:44:24 +0000482 struct neighbour *n;
483
David S. Miller39232972012-01-26 15:22:32 -0500484 rt = (const struct rtable *) dst;
David S. Millera263b302012-07-02 02:02:15 -0700485 if (rt->rt_gateway)
David S. Miller39232972012-01-26 15:22:32 -0500486 pkey = (const __be32 *) &rt->rt_gateway;
David S. Millerf894cbf2012-07-02 21:52:24 -0700487 else if (skb)
488 pkey = &ip_hdr(skb)->daddr;
David S. Millerd3aaeb32011-07-18 00:40:17 -0700489
David S. Miller80703d22012-02-15 17:48:35 -0500490 n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey);
David S. Millerd3aaeb32011-07-18 00:40:17 -0700491 if (n)
492 return n;
David Miller32092ec2011-07-25 00:01:41 +0000493 return neigh_create(&arp_tbl, pkey, dev);
David S. Millerd3aaeb32011-07-18 00:40:17 -0700494}
495
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496/*
497 * Peer allocation may fail only in serious out-of-memory conditions. However
498 * we still can generate some output.
499 * Random ID selection looks a bit dangerous because we have no chances to
500 * select ID being unique in a reasonable period of time.
501 * But broken packet identifier may be better than no packet at all.
502 */
503static void ip_select_fb_ident(struct iphdr *iph)
504{
505 static DEFINE_SPINLOCK(ip_fb_id_lock);
506 static u32 ip_fallback_id;
507 u32 salt;
508
509 spin_lock_bh(&ip_fb_id_lock);
Al Viroe4485152006-09-26 22:15:01 -0700510 salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 iph->id = htons(salt & 0xFFFF);
512 ip_fallback_id = salt;
513 spin_unlock_bh(&ip_fb_id_lock);
514}
515
516void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
517{
David S. Miller1d861aa2012-07-10 03:58:16 -0700518 struct net *net = dev_net(dst->dev);
519 struct inet_peer *peer;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520
David S. Miller1d861aa2012-07-10 03:58:16 -0700521 peer = inet_getpeer_v4(net->ipv4.peers, iph->daddr, 1);
522 if (peer) {
523 iph->id = htons(inet_getid(peer, more));
524 inet_putpeer(peer);
525 return;
526 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527
528 ip_select_fb_ident(iph);
529}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000530EXPORT_SYMBOL(__ip_select_ident);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531
Eric Dumazet5abf7f72012-07-17 22:42:13 +0200532static void __build_flow_key(struct flowi4 *fl4, const struct sock *sk,
David S. Miller4895c772012-07-17 04:19:00 -0700533 const struct iphdr *iph,
534 int oif, u8 tos,
535 u8 prot, u32 mark, int flow_flags)
536{
537 if (sk) {
538 const struct inet_sock *inet = inet_sk(sk);
539
540 oif = sk->sk_bound_dev_if;
541 mark = sk->sk_mark;
542 tos = RT_CONN_FLAGS(sk);
543 prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
544 }
545 flowi4_init_output(fl4, oif, mark, tos,
546 RT_SCOPE_UNIVERSE, prot,
547 flow_flags,
548 iph->daddr, iph->saddr, 0, 0);
549}
550
Eric Dumazet5abf7f72012-07-17 22:42:13 +0200551static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
552 const struct sock *sk)
David S. Miller4895c772012-07-17 04:19:00 -0700553{
554 const struct iphdr *iph = ip_hdr(skb);
555 int oif = skb->dev->ifindex;
556 u8 tos = RT_TOS(iph->tos);
557 u8 prot = iph->protocol;
558 u32 mark = skb->mark;
559
560 __build_flow_key(fl4, sk, iph, oif, tos, prot, mark, 0);
561}
562
Eric Dumazet5abf7f72012-07-17 22:42:13 +0200563static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
David S. Miller4895c772012-07-17 04:19:00 -0700564{
565 const struct inet_sock *inet = inet_sk(sk);
Eric Dumazet5abf7f72012-07-17 22:42:13 +0200566 const struct ip_options_rcu *inet_opt;
David S. Miller4895c772012-07-17 04:19:00 -0700567 __be32 daddr = inet->inet_daddr;
568
569 rcu_read_lock();
570 inet_opt = rcu_dereference(inet->inet_opt);
571 if (inet_opt && inet_opt->opt.srr)
572 daddr = inet_opt->opt.faddr;
573 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
574 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
575 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
576 inet_sk_flowi_flags(sk),
577 daddr, inet->inet_saddr, 0, 0);
578 rcu_read_unlock();
579}
580
Eric Dumazet5abf7f72012-07-17 22:42:13 +0200581static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
582 const struct sk_buff *skb)
David S. Miller4895c772012-07-17 04:19:00 -0700583{
584 if (skb)
585 build_skb_flow_key(fl4, skb, sk);
586 else
587 build_sk_flow_key(fl4, sk);
588}
589
Julian Anastasovaee06da2012-07-18 10:15:35 +0000590static DEFINE_SEQLOCK(fnhe_seqlock);
David S. Miller4895c772012-07-17 04:19:00 -0700591
Julian Anastasovaee06da2012-07-18 10:15:35 +0000592static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
David S. Miller4895c772012-07-17 04:19:00 -0700593{
594 struct fib_nh_exception *fnhe, *oldest;
595
596 oldest = rcu_dereference(hash->chain);
597 for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
598 fnhe = rcu_dereference(fnhe->fnhe_next)) {
599 if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
600 oldest = fnhe;
601 }
602 return oldest;
603}
604
David S. Millerd3a25c92012-07-17 13:23:08 -0700605static inline u32 fnhe_hashfun(__be32 daddr)
606{
607 u32 hval;
608
609 hval = (__force u32) daddr;
610 hval ^= (hval >> 11) ^ (hval >> 22);
611
612 return hval & (FNHE_HASH_SIZE - 1);
613}
614
Julian Anastasovaee06da2012-07-18 10:15:35 +0000615static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
616 u32 pmtu, unsigned long expires)
David S. Miller4895c772012-07-17 04:19:00 -0700617{
Julian Anastasovaee06da2012-07-18 10:15:35 +0000618 struct fnhe_hash_bucket *hash;
David S. Miller4895c772012-07-17 04:19:00 -0700619 struct fib_nh_exception *fnhe;
620 int depth;
Julian Anastasovaee06da2012-07-18 10:15:35 +0000621 u32 hval = fnhe_hashfun(daddr);
David S. Miller4895c772012-07-17 04:19:00 -0700622
Julian Anastasovaee06da2012-07-18 10:15:35 +0000623 write_seqlock_bh(&fnhe_seqlock);
624
625 hash = nh->nh_exceptions;
David S. Miller4895c772012-07-17 04:19:00 -0700626 if (!hash) {
Julian Anastasovaee06da2012-07-18 10:15:35 +0000627 hash = kzalloc(FNHE_HASH_SIZE * sizeof(*hash), GFP_ATOMIC);
David S. Miller4895c772012-07-17 04:19:00 -0700628 if (!hash)
Julian Anastasovaee06da2012-07-18 10:15:35 +0000629 goto out_unlock;
630 nh->nh_exceptions = hash;
David S. Miller4895c772012-07-17 04:19:00 -0700631 }
632
David S. Miller4895c772012-07-17 04:19:00 -0700633 hash += hval;
634
635 depth = 0;
636 for (fnhe = rcu_dereference(hash->chain); fnhe;
637 fnhe = rcu_dereference(fnhe->fnhe_next)) {
638 if (fnhe->fnhe_daddr == daddr)
Julian Anastasovaee06da2012-07-18 10:15:35 +0000639 break;
David S. Miller4895c772012-07-17 04:19:00 -0700640 depth++;
641 }
642
Julian Anastasovaee06da2012-07-18 10:15:35 +0000643 if (fnhe) {
644 if (gw)
645 fnhe->fnhe_gw = gw;
646 if (pmtu) {
647 fnhe->fnhe_pmtu = pmtu;
648 fnhe->fnhe_expires = expires;
649 }
650 } else {
651 if (depth > FNHE_RECLAIM_DEPTH)
652 fnhe = fnhe_oldest(hash);
653 else {
654 fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
655 if (!fnhe)
656 goto out_unlock;
657
658 fnhe->fnhe_next = hash->chain;
659 rcu_assign_pointer(hash->chain, fnhe);
660 }
661 fnhe->fnhe_daddr = daddr;
662 fnhe->fnhe_gw = gw;
663 fnhe->fnhe_pmtu = pmtu;
664 fnhe->fnhe_expires = expires;
David S. Miller4895c772012-07-17 04:19:00 -0700665 }
David S. Miller4895c772012-07-17 04:19:00 -0700666
David S. Miller4895c772012-07-17 04:19:00 -0700667 fnhe->fnhe_stamp = jiffies;
Julian Anastasovaee06da2012-07-18 10:15:35 +0000668
669out_unlock:
670 write_sequnlock_bh(&fnhe_seqlock);
671 return;
David S. Miller4895c772012-07-17 04:19:00 -0700672}
673
David S. Millerceb33202012-07-17 11:31:28 -0700674static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
675 bool kill_route)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676{
David S. Millere47a1852012-07-11 20:55:47 -0700677 __be32 new_gw = icmp_hdr(skb)->un.gateway;
David S. Miller94206122012-07-11 20:38:08 -0700678 __be32 old_gw = ip_hdr(skb)->saddr;
David S. Millere47a1852012-07-11 20:55:47 -0700679 struct net_device *dev = skb->dev;
David S. Millere47a1852012-07-11 20:55:47 -0700680 struct in_device *in_dev;
David S. Miller4895c772012-07-17 04:19:00 -0700681 struct fib_result res;
David S. Millere47a1852012-07-11 20:55:47 -0700682 struct neighbour *n;
Denis V. Lunev317805b2008-02-28 20:50:06 -0800683 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684
David S. Miller94206122012-07-11 20:38:08 -0700685 switch (icmp_hdr(skb)->code & 7) {
686 case ICMP_REDIR_NET:
687 case ICMP_REDIR_NETTOS:
688 case ICMP_REDIR_HOST:
689 case ICMP_REDIR_HOSTTOS:
690 break;
691
692 default:
693 return;
694 }
695
David S. Millere47a1852012-07-11 20:55:47 -0700696 if (rt->rt_gateway != old_gw)
697 return;
698
699 in_dev = __in_dev_get_rcu(dev);
700 if (!in_dev)
701 return;
702
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900703 net = dev_net(dev);
Joe Perches9d4fb272009-11-23 10:41:23 -0800704 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
705 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
706 ipv4_is_zeronet(new_gw))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 goto reject_redirect;
708
709 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
710 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
711 goto reject_redirect;
712 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
713 goto reject_redirect;
714 } else {
Denis V. Lunev317805b2008-02-28 20:50:06 -0800715 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 goto reject_redirect;
717 }
718
David S. Miller4895c772012-07-17 04:19:00 -0700719 n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw);
David S. Millere47a1852012-07-11 20:55:47 -0700720 if (n) {
721 if (!(n->nud_state & NUD_VALID)) {
722 neigh_event_send(n, NULL);
723 } else {
David S. Miller4895c772012-07-17 04:19:00 -0700724 if (fib_lookup(net, fl4, &res) == 0) {
725 struct fib_nh *nh = &FIB_RES_NH(res);
David S. Miller4895c772012-07-17 04:19:00 -0700726
Julian Anastasovaee06da2012-07-18 10:15:35 +0000727 update_or_create_fnhe(nh, fl4->daddr, new_gw,
728 0, 0);
David S. Miller4895c772012-07-17 04:19:00 -0700729 }
David S. Millerceb33202012-07-17 11:31:28 -0700730 if (kill_route)
731 rt->dst.obsolete = DST_OBSOLETE_KILL;
David S. Millere47a1852012-07-11 20:55:47 -0700732 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
733 }
734 neigh_release(n);
735 }
736 return;
737
738reject_redirect:
739#ifdef CONFIG_IP_ROUTE_VERBOSE
David S. Miller99ee0382012-07-12 07:40:05 -0700740 if (IN_DEV_LOG_MARTIANS(in_dev)) {
741 const struct iphdr *iph = (const struct iphdr *) skb->data;
742 __be32 daddr = iph->daddr;
743 __be32 saddr = iph->saddr;
744
David S. Millere47a1852012-07-11 20:55:47 -0700745 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
746 " Advised path = %pI4 -> %pI4\n",
747 &old_gw, dev->name, &new_gw,
748 &saddr, &daddr);
David S. Miller99ee0382012-07-12 07:40:05 -0700749 }
David S. Millere47a1852012-07-11 20:55:47 -0700750#endif
751 ;
752}
753
David S. Miller4895c772012-07-17 04:19:00 -0700754static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
755{
756 struct rtable *rt;
757 struct flowi4 fl4;
758
759 rt = (struct rtable *) dst;
760
761 ip_rt_build_flow_key(&fl4, sk, skb);
David S. Millerceb33202012-07-17 11:31:28 -0700762 __ip_do_redirect(rt, skb, &fl4, true);
David S. Miller4895c772012-07-17 04:19:00 -0700763}
764
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
766{
Eric Dumazetee6b9672008-03-05 18:30:47 -0800767 struct rtable *rt = (struct rtable *)dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 struct dst_entry *ret = dst;
769
770 if (rt) {
Timo Teräsd11a4dc2010-03-18 23:20:20 +0000771 if (dst->obsolete > 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 ip_rt_put(rt);
773 ret = NULL;
David S. Miller59436342012-07-10 06:58:42 -0700774 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
775 rt->dst.expires) {
David S. Miller89aef892012-07-17 11:00:09 -0700776 ip_rt_put(rt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 ret = NULL;
778 }
779 }
780 return ret;
781}
782
783/*
784 * Algorithm:
785 * 1. The first ip_rt_redirect_number redirects are sent
786 * with exponential backoff, then we stop sending them at all,
787 * assuming that the host ignores our redirects.
788 * 2. If we did not see packets requiring redirects
789 * during ip_rt_redirect_silence, we assume that the host
790 * forgot redirected route and start to send redirects again.
791 *
792 * This algorithm is much cheaper and more intelligent than dumb load limiting
793 * in icmp.c.
794 *
795 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
796 * and "frag. need" (breaks PMTU discovery) in icmp.c.
797 */
798
799void ip_rt_send_redirect(struct sk_buff *skb)
800{
Eric Dumazet511c3f92009-06-02 05:14:27 +0000801 struct rtable *rt = skb_rtable(skb);
Eric Dumazet30038fc2009-08-28 23:52:01 -0700802 struct in_device *in_dev;
David S. Miller92d86822011-02-04 15:55:25 -0800803 struct inet_peer *peer;
David S. Miller1d861aa2012-07-10 03:58:16 -0700804 struct net *net;
Eric Dumazet30038fc2009-08-28 23:52:01 -0700805 int log_martians;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806
Eric Dumazet30038fc2009-08-28 23:52:01 -0700807 rcu_read_lock();
Changli Gaod8d1f302010-06-10 23:31:35 -0700808 in_dev = __in_dev_get_rcu(rt->dst.dev);
Eric Dumazet30038fc2009-08-28 23:52:01 -0700809 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
810 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 return;
Eric Dumazet30038fc2009-08-28 23:52:01 -0700812 }
813 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
814 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815
David S. Miller1d861aa2012-07-10 03:58:16 -0700816 net = dev_net(rt->dst.dev);
817 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
David S. Miller92d86822011-02-04 15:55:25 -0800818 if (!peer) {
819 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
820 return;
821 }
822
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 /* No redirected packets during ip_rt_redirect_silence;
824 * reset the algorithm.
825 */
David S. Miller92d86822011-02-04 15:55:25 -0800826 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
827 peer->rate_tokens = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828
829 /* Too many ignored redirects; do not send anything
Changli Gaod8d1f302010-06-10 23:31:35 -0700830 * set dst.rate_last to the last seen redirected packet.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 */
David S. Miller92d86822011-02-04 15:55:25 -0800832 if (peer->rate_tokens >= ip_rt_redirect_number) {
833 peer->rate_last = jiffies;
David S. Miller1d861aa2012-07-10 03:58:16 -0700834 goto out_put_peer;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 }
836
837 /* Check for load limit; set rate_last to the latest sent
838 * redirect.
839 */
David S. Miller92d86822011-02-04 15:55:25 -0800840 if (peer->rate_tokens == 0 ||
Li Yewang14fb8a72006-12-18 00:26:35 -0800841 time_after(jiffies,
David S. Miller92d86822011-02-04 15:55:25 -0800842 (peer->rate_last +
843 (ip_rt_redirect_load << peer->rate_tokens)))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
David S. Miller92d86822011-02-04 15:55:25 -0800845 peer->rate_last = jiffies;
846 ++peer->rate_tokens;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847#ifdef CONFIG_IP_ROUTE_VERBOSE
Eric Dumazet30038fc2009-08-28 23:52:01 -0700848 if (log_martians &&
Joe Perchese87cc472012-05-13 21:56:26 +0000849 peer->rate_tokens == ip_rt_redirect_number)
850 net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
David S. Miller92101b32012-07-23 16:29:00 -0700851 &ip_hdr(skb)->saddr, inet_iif(skb),
David S. Millerf1ce3062012-07-12 10:10:17 -0700852 &ip_hdr(skb)->daddr, &rt->rt_gateway);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853#endif
854 }
David S. Miller1d861aa2012-07-10 03:58:16 -0700855out_put_peer:
856 inet_putpeer(peer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857}
858
859static int ip_error(struct sk_buff *skb)
860{
David S. Miller251da412012-06-26 16:27:09 -0700861 struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
Eric Dumazet511c3f92009-06-02 05:14:27 +0000862 struct rtable *rt = skb_rtable(skb);
David S. Miller92d86822011-02-04 15:55:25 -0800863 struct inet_peer *peer;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 unsigned long now;
David S. Miller251da412012-06-26 16:27:09 -0700865 struct net *net;
David S. Miller92d86822011-02-04 15:55:25 -0800866 bool send;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 int code;
868
David S. Miller251da412012-06-26 16:27:09 -0700869 net = dev_net(rt->dst.dev);
870 if (!IN_DEV_FORWARD(in_dev)) {
871 switch (rt->dst.error) {
872 case EHOSTUNREACH:
873 IP_INC_STATS_BH(net, IPSTATS_MIB_INADDRERRORS);
874 break;
875
876 case ENETUNREACH:
877 IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
878 break;
879 }
880 goto out;
881 }
882
Changli Gaod8d1f302010-06-10 23:31:35 -0700883 switch (rt->dst.error) {
Joe Perches4500ebf2011-07-01 09:43:07 +0000884 case EINVAL:
885 default:
886 goto out;
887 case EHOSTUNREACH:
888 code = ICMP_HOST_UNREACH;
889 break;
890 case ENETUNREACH:
891 code = ICMP_NET_UNREACH;
David S. Miller251da412012-06-26 16:27:09 -0700892 IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
Joe Perches4500ebf2011-07-01 09:43:07 +0000893 break;
894 case EACCES:
895 code = ICMP_PKT_FILTERED;
896 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 }
898
David S. Miller1d861aa2012-07-10 03:58:16 -0700899 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
David S. Miller92d86822011-02-04 15:55:25 -0800900
901 send = true;
902 if (peer) {
903 now = jiffies;
904 peer->rate_tokens += now - peer->rate_last;
905 if (peer->rate_tokens > ip_rt_error_burst)
906 peer->rate_tokens = ip_rt_error_burst;
907 peer->rate_last = now;
908 if (peer->rate_tokens >= ip_rt_error_cost)
909 peer->rate_tokens -= ip_rt_error_cost;
910 else
911 send = false;
David S. Miller1d861aa2012-07-10 03:58:16 -0700912 inet_putpeer(peer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 }
David S. Miller92d86822011-02-04 15:55:25 -0800914 if (send)
915 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916
917out: kfree_skb(skb);
918 return 0;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900919}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920
David S. Millerceb33202012-07-17 11:31:28 -0700921static u32 __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922{
David S. Miller4895c772012-07-17 04:19:00 -0700923 struct fib_result res;
David S. Miller2c8cec52011-02-09 20:42:07 -0800924
David S. Miller59436342012-07-10 06:58:42 -0700925 if (mtu < ip_rt_min_pmtu)
926 mtu = ip_rt_min_pmtu;
Eric Dumazetfe6fe792011-06-08 06:07:07 +0000927
David S. Miller4895c772012-07-17 04:19:00 -0700928 if (fib_lookup(dev_net(rt->dst.dev), fl4, &res) == 0) {
929 struct fib_nh *nh = &FIB_RES_NH(res);
David S. Miller4895c772012-07-17 04:19:00 -0700930
Julian Anastasovaee06da2012-07-18 10:15:35 +0000931 update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
932 jiffies + ip_rt_mtu_expires);
David S. Miller4895c772012-07-17 04:19:00 -0700933 }
David S. Millerceb33202012-07-17 11:31:28 -0700934 return mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935}
936
David S. Miller4895c772012-07-17 04:19:00 -0700937static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
938 struct sk_buff *skb, u32 mtu)
939{
940 struct rtable *rt = (struct rtable *) dst;
941 struct flowi4 fl4;
942
943 ip_rt_build_flow_key(&fl4, sk, skb);
David S. Millerceb33202012-07-17 11:31:28 -0700944 mtu = __ip_rt_update_pmtu(rt, &fl4, mtu);
945
946 if (!rt->rt_pmtu) {
947 dst->obsolete = DST_OBSOLETE_KILL;
948 } else {
949 rt->rt_pmtu = mtu;
950 dst_set_expires(&rt->dst, ip_rt_mtu_expires);
951 }
David S. Miller4895c772012-07-17 04:19:00 -0700952}
953
David S. Miller36393392012-06-14 22:21:46 -0700954void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
955 int oif, u32 mark, u8 protocol, int flow_flags)
956{
David S. Miller4895c772012-07-17 04:19:00 -0700957 const struct iphdr *iph = (const struct iphdr *) skb->data;
David S. Miller36393392012-06-14 22:21:46 -0700958 struct flowi4 fl4;
959 struct rtable *rt;
960
David S. Miller4895c772012-07-17 04:19:00 -0700961 __build_flow_key(&fl4, NULL, iph, oif,
962 RT_TOS(iph->tos), protocol, mark, flow_flags);
David S. Miller36393392012-06-14 22:21:46 -0700963 rt = __ip_route_output_key(net, &fl4);
964 if (!IS_ERR(rt)) {
David S. Miller4895c772012-07-17 04:19:00 -0700965 __ip_rt_update_pmtu(rt, &fl4, mtu);
David S. Miller36393392012-06-14 22:21:46 -0700966 ip_rt_put(rt);
967 }
968}
969EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
970
971void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
972{
David S. Miller4895c772012-07-17 04:19:00 -0700973 const struct iphdr *iph = (const struct iphdr *) skb->data;
974 struct flowi4 fl4;
975 struct rtable *rt;
David S. Miller36393392012-06-14 22:21:46 -0700976
David S. Miller4895c772012-07-17 04:19:00 -0700977 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
978 rt = __ip_route_output_key(sock_net(sk), &fl4);
979 if (!IS_ERR(rt)) {
980 __ip_rt_update_pmtu(rt, &fl4, mtu);
981 ip_rt_put(rt);
982 }
David S. Miller36393392012-06-14 22:21:46 -0700983}
984EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
David S. Millerf39925d2011-02-09 22:00:16 -0800985
David S. Millerb42597e2012-07-11 21:25:45 -0700986void ipv4_redirect(struct sk_buff *skb, struct net *net,
987 int oif, u32 mark, u8 protocol, int flow_flags)
988{
David S. Miller4895c772012-07-17 04:19:00 -0700989 const struct iphdr *iph = (const struct iphdr *) skb->data;
David S. Millerb42597e2012-07-11 21:25:45 -0700990 struct flowi4 fl4;
991 struct rtable *rt;
992
David S. Miller4895c772012-07-17 04:19:00 -0700993 __build_flow_key(&fl4, NULL, iph, oif,
994 RT_TOS(iph->tos), protocol, mark, flow_flags);
David S. Millerb42597e2012-07-11 21:25:45 -0700995 rt = __ip_route_output_key(net, &fl4);
996 if (!IS_ERR(rt)) {
David S. Millerceb33202012-07-17 11:31:28 -0700997 __ip_do_redirect(rt, skb, &fl4, false);
David S. Millerb42597e2012-07-11 21:25:45 -0700998 ip_rt_put(rt);
999 }
1000}
1001EXPORT_SYMBOL_GPL(ipv4_redirect);
1002
1003void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
1004{
David S. Miller4895c772012-07-17 04:19:00 -07001005 const struct iphdr *iph = (const struct iphdr *) skb->data;
1006 struct flowi4 fl4;
1007 struct rtable *rt;
David S. Millerb42597e2012-07-11 21:25:45 -07001008
David S. Miller4895c772012-07-17 04:19:00 -07001009 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
1010 rt = __ip_route_output_key(sock_net(sk), &fl4);
1011 if (!IS_ERR(rt)) {
David S. Millerceb33202012-07-17 11:31:28 -07001012 __ip_do_redirect(rt, skb, &fl4, false);
David S. Miller4895c772012-07-17 04:19:00 -07001013 ip_rt_put(rt);
1014 }
David S. Millerb42597e2012-07-11 21:25:45 -07001015}
1016EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
1017
David S. Millerefbc3682011-12-01 13:38:59 -05001018static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1019{
1020 struct rtable *rt = (struct rtable *) dst;
1021
David S. Millerceb33202012-07-17 11:31:28 -07001022 /* All IPV4 dsts are created with ->obsolete set to the value
1023 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1024 * into this function always.
1025 *
1026 * When a PMTU/redirect information update invalidates a
1027 * route, this is indicated by setting obsolete to
1028 * DST_OBSOLETE_KILL.
1029 */
1030 if (dst->obsolete == DST_OBSOLETE_KILL || rt_is_expired(rt))
David S. Millerefbc3682011-12-01 13:38:59 -05001031 return NULL;
Timo Teräsd11a4dc2010-03-18 23:20:20 +00001032 return dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033}
1034
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035static void ipv4_link_failure(struct sk_buff *skb)
1036{
1037 struct rtable *rt;
1038
1039 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1040
Eric Dumazet511c3f92009-06-02 05:14:27 +00001041 rt = skb_rtable(skb);
David S. Miller59436342012-07-10 06:58:42 -07001042 if (rt)
1043 dst_set_expires(&rt->dst, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044}
1045
1046static int ip_rt_bug(struct sk_buff *skb)
1047{
Joe Perches91df42b2012-05-15 14:11:54 +00001048 pr_debug("%s: %pI4 -> %pI4, %s\n",
1049 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1050 skb->dev ? skb->dev->name : "?");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051 kfree_skb(skb);
Dave Jonesc378a9c2011-05-21 07:16:42 +00001052 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 return 0;
1054}
1055
1056/*
1057 We do not cache source address of outgoing interface,
1058 because it is used only by IP RR, TS and SRR options,
1059 so that it out of fast path.
1060
1061 BTW remember: "addr" is allowed to be not aligned
1062 in IP options!
1063 */
1064
David S. Miller8e363602011-05-13 17:29:41 -04001065void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066{
Al Viroa61ced52006-09-26 21:27:54 -07001067 __be32 src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068
David S. Millerc7537962010-11-11 17:07:48 -08001069 if (rt_is_output_route(rt))
David S. Millerc5be24f2011-05-13 18:01:21 -04001070 src = ip_hdr(skb)->saddr;
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00001071 else {
David S. Miller8e363602011-05-13 17:29:41 -04001072 struct fib_result res;
1073 struct flowi4 fl4;
1074 struct iphdr *iph;
1075
1076 iph = ip_hdr(skb);
1077
1078 memset(&fl4, 0, sizeof(fl4));
1079 fl4.daddr = iph->daddr;
1080 fl4.saddr = iph->saddr;
Julian Anastasovb0fe4a32011-07-23 02:00:41 +00001081 fl4.flowi4_tos = RT_TOS(iph->tos);
David S. Miller8e363602011-05-13 17:29:41 -04001082 fl4.flowi4_oif = rt->dst.dev->ifindex;
1083 fl4.flowi4_iif = skb->dev->ifindex;
1084 fl4.flowi4_mark = skb->mark;
David S. Miller5e2b61f2011-03-04 21:47:09 -08001085
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00001086 rcu_read_lock();
David S. Miller68a5e3d2011-03-11 20:07:33 -05001087 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0)
David S. Miller436c3b62011-03-24 17:42:21 -07001088 src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00001089 else
David S. Millerf8126f12012-07-13 05:03:45 -07001090 src = inet_select_addr(rt->dst.dev,
1091 rt_nexthop(rt, iph->daddr),
1092 RT_SCOPE_UNIVERSE);
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00001093 rcu_read_unlock();
1094 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 memcpy(addr, &src, 4);
1096}
1097
Patrick McHardyc7066f72011-01-14 13:36:42 +01001098#ifdef CONFIG_IP_ROUTE_CLASSID
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099static void set_class_tag(struct rtable *rt, u32 tag)
1100{
Changli Gaod8d1f302010-06-10 23:31:35 -07001101 if (!(rt->dst.tclassid & 0xFFFF))
1102 rt->dst.tclassid |= tag & 0xFFFF;
1103 if (!(rt->dst.tclassid & 0xFFFF0000))
1104 rt->dst.tclassid |= tag & 0xFFFF0000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105}
1106#endif
1107
David S. Miller0dbaee32010-12-13 12:52:14 -08001108static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1109{
1110 unsigned int advmss = dst_metric_raw(dst, RTAX_ADVMSS);
1111
1112 if (advmss == 0) {
1113 advmss = max_t(unsigned int, dst->dev->mtu - 40,
1114 ip_rt_min_advmss);
1115 if (advmss > 65535 - 40)
1116 advmss = 65535 - 40;
1117 }
1118 return advmss;
1119}
1120
Steffen Klassertebb762f2011-11-23 02:12:51 +00001121static unsigned int ipv4_mtu(const struct dst_entry *dst)
David S. Millerd33e4552010-12-14 13:01:14 -08001122{
Steffen Klassert261663b2011-11-23 02:14:50 +00001123 const struct rtable *rt = (const struct rtable *) dst;
David S. Miller59436342012-07-10 06:58:42 -07001124 unsigned int mtu = rt->rt_pmtu;
1125
1126 if (mtu && time_after_eq(jiffies, rt->dst.expires))
1127 mtu = 0;
1128
1129 if (!mtu)
1130 mtu = dst_metric_raw(dst, RTAX_MTU);
Steffen Klassert618f9bc2011-11-23 02:13:31 +00001131
Steffen Klassert261663b2011-11-23 02:14:50 +00001132 if (mtu && rt_is_output_route(rt))
Steffen Klassert618f9bc2011-11-23 02:13:31 +00001133 return mtu;
1134
1135 mtu = dst->dev->mtu;
David S. Millerd33e4552010-12-14 13:01:14 -08001136
1137 if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
David S. Millerf8126f12012-07-13 05:03:45 -07001138 if (rt->rt_gateway && mtu > 576)
David S. Millerd33e4552010-12-14 13:01:14 -08001139 mtu = 576;
1140 }
1141
1142 if (mtu > IP_MAX_MTU)
1143 mtu = IP_MAX_MTU;
1144
1145 return mtu;
1146}
1147
David S. Millerf2bb4be2012-07-17 12:20:47 -07001148static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
David S. Miller4895c772012-07-17 04:19:00 -07001149{
1150 struct fnhe_hash_bucket *hash = nh->nh_exceptions;
1151 struct fib_nh_exception *fnhe;
1152 u32 hval;
1153
David S. Millerf2bb4be2012-07-17 12:20:47 -07001154 if (!hash)
1155 return NULL;
1156
David S. Millerd3a25c92012-07-17 13:23:08 -07001157 hval = fnhe_hashfun(daddr);
David S. Miller4895c772012-07-17 04:19:00 -07001158
1159 for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1160 fnhe = rcu_dereference(fnhe->fnhe_next)) {
David S. Millerf2bb4be2012-07-17 12:20:47 -07001161 if (fnhe->fnhe_daddr == daddr)
1162 return fnhe;
1163 }
1164 return NULL;
1165}
David S. Miller4895c772012-07-17 04:19:00 -07001166
David S. Millerf2bb4be2012-07-17 12:20:47 -07001167static void rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1168 __be32 daddr)
1169{
1170 __be32 fnhe_daddr, gw;
1171 unsigned long expires;
1172 unsigned int seq;
1173 u32 pmtu;
Julian Anastasovaee06da2012-07-18 10:15:35 +00001174
David S. Millerf2bb4be2012-07-17 12:20:47 -07001175restart:
1176 seq = read_seqbegin(&fnhe_seqlock);
1177 fnhe_daddr = fnhe->fnhe_daddr;
1178 gw = fnhe->fnhe_gw;
1179 pmtu = fnhe->fnhe_pmtu;
1180 expires = fnhe->fnhe_expires;
1181 if (read_seqretry(&fnhe_seqlock, seq))
1182 goto restart;
1183
1184 if (daddr != fnhe_daddr)
1185 return;
1186
1187 if (pmtu) {
1188 unsigned long diff = expires - jiffies;
1189
1190 if (time_before(jiffies, expires)) {
1191 rt->rt_pmtu = pmtu;
1192 dst_set_expires(&rt->dst, diff);
David S. Miller4895c772012-07-17 04:19:00 -07001193 }
David S. Millerf2bb4be2012-07-17 12:20:47 -07001194 }
1195 if (gw) {
1196 rt->rt_flags |= RTCF_REDIRECTED;
1197 rt->rt_gateway = gw;
1198 }
1199 fnhe->fnhe_stamp = jiffies;
1200}
1201
Eric Dumazet54764bb2012-07-31 01:08:23 +00001202static inline void rt_free(struct rtable *rt)
1203{
1204 call_rcu(&rt->dst.rcu_head, dst_rcu_free);
1205}
1206
David S. Millerf2bb4be2012-07-17 12:20:47 -07001207static void rt_cache_route(struct fib_nh *nh, struct rtable *rt)
1208{
Eric Dumazetd26b3a72012-07-31 05:45:30 +00001209 struct rtable *orig, *prev, **p;
David S. Millerf2bb4be2012-07-17 12:20:47 -07001210
Eric Dumazetd26b3a72012-07-31 05:45:30 +00001211 if (rt_is_input_route(rt)) {
Eric Dumazet54764bb2012-07-31 01:08:23 +00001212 p = (struct rtable **)&nh->nh_rth_input;
Eric Dumazetd26b3a72012-07-31 05:45:30 +00001213 } else {
1214 if (!nh->nh_pcpu_rth_output)
1215 goto nocache;
1216 p = (struct rtable **)__this_cpu_ptr(nh->nh_pcpu_rth_output);
1217 }
David S. Millerf2bb4be2012-07-17 12:20:47 -07001218 orig = *p;
1219
1220 prev = cmpxchg(p, orig, rt);
1221 if (prev == orig) {
David S. Millerf2bb4be2012-07-17 12:20:47 -07001222 if (orig)
Eric Dumazet54764bb2012-07-31 01:08:23 +00001223 rt_free(orig);
David S. Millerc6cffba2012-07-26 11:14:38 +00001224 } else {
Eric Dumazet54764bb2012-07-31 01:08:23 +00001225 /* Routes we intend to cache in the FIB nexthop have
1226 * the DST_NOCACHE bit clear. However, if we are
1227 * unsuccessful at storing this route into the cache
1228 * we really need to set it.
1229 */
Eric Dumazetd26b3a72012-07-31 05:45:30 +00001230nocache:
Eric Dumazet54764bb2012-07-31 01:08:23 +00001231 rt->dst.flags |= DST_NOCACHE;
David S. Miller4895c772012-07-17 04:19:00 -07001232 }
1233}
1234
Eric Dumazet4331deb2012-07-25 05:11:23 +00001235static bool rt_cache_valid(const struct rtable *rt)
David S. Millerd2d68ba2012-07-17 12:58:50 -07001236{
Eric Dumazet4331deb2012-07-25 05:11:23 +00001237 return rt &&
1238 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1239 !rt_is_expired(rt);
David S. Millerd2d68ba2012-07-17 12:58:50 -07001240}
1241
David S. Millerf2bb4be2012-07-17 12:20:47 -07001242static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
David S. Miller5e2b61f2011-03-04 21:47:09 -08001243 const struct fib_result *res,
David S. Millerf2bb4be2012-07-17 12:20:47 -07001244 struct fib_nh_exception *fnhe,
David S. Miller982721f2011-02-16 21:44:24 -08001245 struct fib_info *fi, u16 type, u32 itag)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 if (fi) {
David S. Miller4895c772012-07-17 04:19:00 -07001248 struct fib_nh *nh = &FIB_RES_NH(*res);
1249
1250 if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK)
1251 rt->rt_gateway = nh->nh_gw;
David S. Millerf2bb4be2012-07-17 12:20:47 -07001252 if (unlikely(fnhe))
1253 rt_bind_exception(rt, fnhe, daddr);
David S. Miller28605832012-07-17 14:55:59 -07001254 dst_init_metrics(&rt->dst, fi->fib_metrics, true);
Patrick McHardyc7066f72011-01-14 13:36:42 +01001255#ifdef CONFIG_IP_ROUTE_CLASSID
David S. Millerf2bb4be2012-07-17 12:20:47 -07001256 rt->dst.tclassid = nh->nh_tclassid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257#endif
David S. Millerc6cffba2012-07-26 11:14:38 +00001258 if (!(rt->dst.flags & DST_NOCACHE))
David S. Millerf2bb4be2012-07-17 12:20:47 -07001259 rt_cache_route(nh, rt);
David S. Millerd33e4552010-12-14 13:01:14 -08001260 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261
Patrick McHardyc7066f72011-01-14 13:36:42 +01001262#ifdef CONFIG_IP_ROUTE_CLASSID
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263#ifdef CONFIG_IP_MULTIPLE_TABLES
David S. Miller85b91b02012-07-13 08:21:29 -07001264 set_class_tag(rt, res->tclassid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265#endif
1266 set_class_tag(rt, itag);
1267#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268}
1269
David S. Miller5c1e6aa2011-04-28 14:13:38 -07001270static struct rtable *rt_dst_alloc(struct net_device *dev,
David S. Millerf2bb4be2012-07-17 12:20:47 -07001271 bool nopolicy, bool noxfrm, bool will_cache)
David S. Miller0c4dcd52011-02-17 15:42:37 -08001272{
David S. Millerf5b0a872012-07-19 12:31:33 -07001273 return dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
David S. Millerc6cffba2012-07-26 11:14:38 +00001274 (will_cache ? 0 : (DST_HOST | DST_NOCACHE)) |
David S. Miller5c1e6aa2011-04-28 14:13:38 -07001275 (nopolicy ? DST_NOPOLICY : 0) |
1276 (noxfrm ? DST_NOXFRM : 0));
David S. Miller0c4dcd52011-02-17 15:42:37 -08001277}
1278
Eric Dumazet96d36222010-06-02 19:21:31 +00001279/* called in rcu_read_lock() section */
Al Viro9e12bb22006-09-26 21:25:20 -07001280static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 u8 tos, struct net_device *dev, int our)
1282{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283 struct rtable *rth;
Eric Dumazet96d36222010-06-02 19:21:31 +00001284 struct in_device *in_dev = __in_dev_get_rcu(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285 u32 itag = 0;
Eric Dumazetb5f7e752010-06-02 12:05:27 +00001286 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287
1288 /* Primary sanity checks. */
1289
1290 if (in_dev == NULL)
1291 return -EINVAL;
1292
Jan Engelhardt1e637c72008-01-21 03:18:08 -08001293 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
Thomas Grafd0daebc32012-06-12 00:44:01 +00001294 skb->protocol != htons(ETH_P_IP))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 goto e_inval;
1296
Thomas Grafd0daebc32012-06-12 00:44:01 +00001297 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
1298 if (ipv4_is_loopback(saddr))
1299 goto e_inval;
1300
Joe Perchesf97c1e02007-12-16 13:45:43 -08001301 if (ipv4_is_zeronet(saddr)) {
1302 if (!ipv4_is_local_multicast(daddr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 goto e_inval;
Eric Dumazetb5f7e752010-06-02 12:05:27 +00001304 } else {
David S. Miller9e56e382012-06-28 18:54:02 -07001305 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1306 in_dev, &itag);
Eric Dumazetb5f7e752010-06-02 12:05:27 +00001307 if (err < 0)
1308 goto e_err;
1309 }
Benjamin LaHaise4e7b2f12012-03-27 15:55:32 +00001310 rth = rt_dst_alloc(dev_net(dev)->loopback_dev,
David S. Millerf2bb4be2012-07-17 12:20:47 -07001311 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 if (!rth)
1313 goto e_nobufs;
1314
Patrick McHardyc7066f72011-01-14 13:36:42 +01001315#ifdef CONFIG_IP_ROUTE_CLASSID
Changli Gaod8d1f302010-06-10 23:31:35 -07001316 rth->dst.tclassid = itag;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317#endif
David S. Millercf911662011-04-28 14:31:47 -07001318 rth->dst.output = ip_rt_bug;
1319
Denis V. Luneve84f84f2008-07-05 19:04:32 -07001320 rth->rt_genid = rt_genid(dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 rth->rt_flags = RTCF_MULTICAST;
Eric Dumazet29e75252008-01-31 17:05:09 -08001322 rth->rt_type = RTN_MULTICAST;
David S. Miller9917e1e82012-07-17 14:44:26 -07001323 rth->rt_is_input= 1;
David S. Miller13378ca2012-07-23 13:57:45 -07001324 rth->rt_iif = 0;
David S. Miller59436342012-07-10 06:58:42 -07001325 rth->rt_pmtu = 0;
David S. Millerf8126f12012-07-13 05:03:45 -07001326 rth->rt_gateway = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 if (our) {
Changli Gaod8d1f302010-06-10 23:31:35 -07001328 rth->dst.input= ip_local_deliver;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 rth->rt_flags |= RTCF_LOCAL;
1330 }
1331
1332#ifdef CONFIG_IP_MROUTE
Joe Perchesf97c1e02007-12-16 13:45:43 -08001333 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
Changli Gaod8d1f302010-06-10 23:31:35 -07001334 rth->dst.input = ip_mr_input;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335#endif
1336 RT_CACHE_STAT_INC(in_slow_mc);
1337
David S. Miller89aef892012-07-17 11:00:09 -07001338 skb_dst_set(skb, &rth->dst);
1339 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340
1341e_nobufs:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343e_inval:
Eric Dumazet96d36222010-06-02 19:21:31 +00001344 return -EINVAL;
Eric Dumazetb5f7e752010-06-02 12:05:27 +00001345e_err:
Eric Dumazetb5f7e752010-06-02 12:05:27 +00001346 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347}
1348
1349
1350static void ip_handle_martian_source(struct net_device *dev,
1351 struct in_device *in_dev,
1352 struct sk_buff *skb,
Al Viro9e12bb22006-09-26 21:25:20 -07001353 __be32 daddr,
1354 __be32 saddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355{
1356 RT_CACHE_STAT_INC(in_martian_src);
1357#ifdef CONFIG_IP_ROUTE_VERBOSE
1358 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1359 /*
1360 * RFC1812 recommendation, if source is martian,
1361 * the only hint is MAC header.
1362 */
Joe Perches058bd4d2012-03-11 18:36:11 +00001363 pr_warn("martian source %pI4 from %pI4, on dev %s\n",
Harvey Harrison673d57e2008-10-31 00:53:57 -07001364 &daddr, &saddr, dev->name);
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001365 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
Joe Perches058bd4d2012-03-11 18:36:11 +00001366 print_hex_dump(KERN_WARNING, "ll header: ",
1367 DUMP_PREFIX_OFFSET, 16, 1,
1368 skb_mac_header(skb),
1369 dev->hard_header_len, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370 }
1371 }
1372#endif
1373}
1374
Eric Dumazet47360222010-06-03 04:13:21 +00001375/* called in rcu_read_lock() section */
Stephen Hemminger5969f712008-04-10 01:52:09 -07001376static int __mkroute_input(struct sk_buff *skb,
David S. Miller982721f2011-02-16 21:44:24 -08001377 const struct fib_result *res,
Stephen Hemminger5969f712008-04-10 01:52:09 -07001378 struct in_device *in_dev,
David S. Millerc6cffba2012-07-26 11:14:38 +00001379 __be32 daddr, __be32 saddr, u32 tos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 struct rtable *rth;
1382 int err;
1383 struct in_device *out_dev;
Eric Dumazet47360222010-06-03 04:13:21 +00001384 unsigned int flags = 0;
David S. Millerd2d68ba2012-07-17 12:58:50 -07001385 bool do_cache;
Al Virod9c9df82006-09-26 21:28:14 -07001386 u32 itag;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387
1388 /* get a working reference to the output device */
Eric Dumazet47360222010-06-03 04:13:21 +00001389 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 if (out_dev == NULL) {
Joe Perchese87cc472012-05-13 21:56:26 +00001391 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 return -EINVAL;
1393 }
1394
1395
Michael Smith5c04c812011-04-07 04:51:50 +00001396 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
David S. Miller9e56e382012-06-28 18:54:02 -07001397 in_dev->dev, in_dev, &itag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 if (err < 0) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001399 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400 saddr);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001401
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402 goto cleanup;
1403 }
1404
Thomas Graf51b77ca2008-06-03 16:36:01 -07001405 if (out_dev == in_dev && err &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406 (IN_DEV_SHARED_MEDIA(out_dev) ||
1407 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
1408 flags |= RTCF_DOREDIRECT;
1409
1410 if (skb->protocol != htons(ETH_P_IP)) {
1411 /* Not IP (i.e. ARP). Do not create route, if it is
1412 * invalid for proxy arp. DNAT routes are always valid.
Jesper Dangaard Brouer65324142010-01-05 05:50:47 +00001413 *
1414 * Proxy arp feature have been extended to allow, ARP
1415 * replies back to the same interface, to support
1416 * Private VLAN switch technologies. See arp.c.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417 */
Jesper Dangaard Brouer65324142010-01-05 05:50:47 +00001418 if (out_dev == in_dev &&
1419 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 err = -EINVAL;
1421 goto cleanup;
1422 }
1423 }
1424
David S. Millerd2d68ba2012-07-17 12:58:50 -07001425 do_cache = false;
1426 if (res->fi) {
David S. Millerfe3edf42012-07-23 13:22:20 -07001427 if (!itag) {
Eric Dumazet54764bb2012-07-31 01:08:23 +00001428 rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
David S. Millerd2d68ba2012-07-17 12:58:50 -07001429 if (rt_cache_valid(rth)) {
David S. Millerc6cffba2012-07-26 11:14:38 +00001430 skb_dst_set_noref(skb, &rth->dst);
David S. Millerd2d68ba2012-07-17 12:58:50 -07001431 goto out;
1432 }
1433 do_cache = true;
1434 }
1435 }
David S. Millerf2bb4be2012-07-17 12:20:47 -07001436
David S. Miller5c1e6aa2011-04-28 14:13:38 -07001437 rth = rt_dst_alloc(out_dev->dev,
1438 IN_DEV_CONF_GET(in_dev, NOPOLICY),
David S. Millerd2d68ba2012-07-17 12:58:50 -07001439 IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440 if (!rth) {
1441 err = -ENOBUFS;
1442 goto cleanup;
1443 }
1444
David S. Millercf911662011-04-28 14:31:47 -07001445 rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
1446 rth->rt_flags = flags;
1447 rth->rt_type = res->type;
David S. Miller9917e1e82012-07-17 14:44:26 -07001448 rth->rt_is_input = 1;
David S. Miller13378ca2012-07-23 13:57:45 -07001449 rth->rt_iif = 0;
David S. Miller59436342012-07-10 06:58:42 -07001450 rth->rt_pmtu = 0;
David S. Millerf8126f12012-07-13 05:03:45 -07001451 rth->rt_gateway = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452
Changli Gaod8d1f302010-06-10 23:31:35 -07001453 rth->dst.input = ip_forward;
1454 rth->dst.output = ip_output;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455
David S. Millerd2d68ba2012-07-17 12:58:50 -07001456 rt_set_nexthop(rth, daddr, res, NULL, res->fi, res->type, itag);
David S. Millerc6cffba2012-07-26 11:14:38 +00001457 skb_dst_set(skb, &rth->dst);
David S. Millerd2d68ba2012-07-17 12:58:50 -07001458out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459 err = 0;
1460 cleanup:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 return err;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001462}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463
Stephen Hemminger5969f712008-04-10 01:52:09 -07001464static int ip_mkroute_input(struct sk_buff *skb,
1465 struct fib_result *res,
David S. Miller68a5e3d2011-03-11 20:07:33 -05001466 const struct flowi4 *fl4,
Stephen Hemminger5969f712008-04-10 01:52:09 -07001467 struct in_device *in_dev,
1468 __be32 daddr, __be32 saddr, u32 tos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470#ifdef CONFIG_IP_ROUTE_MULTIPATH
David S. Millerff3fccb2011-03-10 16:23:24 -08001471 if (res->fi && res->fi->fib_nhs > 1)
David S. Miller1b7fe5932011-03-10 17:01:16 -08001472 fib_select_multipath(res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473#endif
1474
1475 /* create a routing cache entry */
David S. Millerc6cffba2012-07-26 11:14:38 +00001476 return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477}
1478
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479/*
1480 * NOTE. We drop all the packets that has local source
1481 * addresses, because every properly looped back packet
1482 * must have correct destination already attached by output routine.
1483 *
1484 * Such approach solves two big problems:
1485 * 1. Not simplex devices are handled properly.
1486 * 2. IP spoofing attempts are filtered with 100% of guarantee.
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00001487 * called with rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 */
1489
Al Viro9e12bb22006-09-26 21:25:20 -07001490static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
David S. Millerc10237e2012-06-27 17:05:06 -07001491 u8 tos, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492{
1493 struct fib_result res;
Eric Dumazet96d36222010-06-02 19:21:31 +00001494 struct in_device *in_dev = __in_dev_get_rcu(dev);
David S. Miller68a5e3d2011-03-11 20:07:33 -05001495 struct flowi4 fl4;
Eric Dumazet95c96172012-04-15 05:58:06 +00001496 unsigned int flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 u32 itag = 0;
Eric Dumazet95c96172012-04-15 05:58:06 +00001498 struct rtable *rth;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 int err = -EINVAL;
Daniel Baluta5e73ea12012-04-15 01:34:41 +00001500 struct net *net = dev_net(dev);
David S. Millerd2d68ba2012-07-17 12:58:50 -07001501 bool do_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502
1503 /* IP on this device is disabled. */
1504
1505 if (!in_dev)
1506 goto out;
1507
1508 /* Check for the most weird martians, which can be not detected
1509 by fib_lookup.
1510 */
1511
Thomas Grafd0daebc32012-06-12 00:44:01 +00001512 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 goto martian_source;
1514
David S. Millerd2d68ba2012-07-17 12:58:50 -07001515 res.fi = NULL;
Andy Walls27a954b2010-10-17 15:11:22 +00001516 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 goto brd_input;
1518
1519 /* Accept zero addresses only to limited broadcast;
1520 * I even do not know to fix it or not. Waiting for complains :-)
1521 */
Joe Perchesf97c1e02007-12-16 13:45:43 -08001522 if (ipv4_is_zeronet(saddr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 goto martian_source;
1524
Thomas Grafd0daebc32012-06-12 00:44:01 +00001525 if (ipv4_is_zeronet(daddr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 goto martian_destination;
1527
Thomas Grafd0daebc32012-06-12 00:44:01 +00001528 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev))) {
1529 if (ipv4_is_loopback(daddr))
1530 goto martian_destination;
1531
1532 if (ipv4_is_loopback(saddr))
1533 goto martian_source;
1534 }
1535
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536 /*
1537 * Now we are ready to route packet.
1538 */
David S. Miller68a5e3d2011-03-11 20:07:33 -05001539 fl4.flowi4_oif = 0;
1540 fl4.flowi4_iif = dev->ifindex;
1541 fl4.flowi4_mark = skb->mark;
1542 fl4.flowi4_tos = tos;
1543 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
1544 fl4.daddr = daddr;
1545 fl4.saddr = saddr;
1546 err = fib_lookup(net, &fl4, &res);
David S. Miller251da412012-06-26 16:27:09 -07001547 if (err != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 goto no_route;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549
1550 RT_CACHE_STAT_INC(in_slow_tot);
1551
1552 if (res.type == RTN_BROADCAST)
1553 goto brd_input;
1554
1555 if (res.type == RTN_LOCAL) {
Michael Smith5c04c812011-04-07 04:51:50 +00001556 err = fib_validate_source(skb, saddr, daddr, tos,
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00001557 net->loopback_dev->ifindex,
David S. Miller9e56e382012-06-28 18:54:02 -07001558 dev, in_dev, &itag);
Eric Dumazetb5f7e752010-06-02 12:05:27 +00001559 if (err < 0)
1560 goto martian_source_keep_err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 goto local_input;
1562 }
1563
1564 if (!IN_DEV_FORWARD(in_dev))
David S. Miller251da412012-06-26 16:27:09 -07001565 goto no_route;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 if (res.type != RTN_UNICAST)
1567 goto martian_destination;
1568
David S. Miller68a5e3d2011-03-11 20:07:33 -05001569 err = ip_mkroute_input(skb, &res, &fl4, in_dev, daddr, saddr, tos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570out: return err;
1571
1572brd_input:
1573 if (skb->protocol != htons(ETH_P_IP))
1574 goto e_inval;
1575
David S. Miller41347dc2012-06-28 04:05:27 -07001576 if (!ipv4_is_zeronet(saddr)) {
David S. Miller9e56e382012-06-28 18:54:02 -07001577 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1578 in_dev, &itag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579 if (err < 0)
Eric Dumazetb5f7e752010-06-02 12:05:27 +00001580 goto martian_source_keep_err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 }
1582 flags |= RTCF_BROADCAST;
1583 res.type = RTN_BROADCAST;
1584 RT_CACHE_STAT_INC(in_brd);
1585
1586local_input:
David S. Millerd2d68ba2012-07-17 12:58:50 -07001587 do_cache = false;
1588 if (res.fi) {
David S. Millerfe3edf42012-07-23 13:22:20 -07001589 if (!itag) {
Eric Dumazet54764bb2012-07-31 01:08:23 +00001590 rth = rcu_dereference(FIB_RES_NH(res).nh_rth_input);
David S. Millerd2d68ba2012-07-17 12:58:50 -07001591 if (rt_cache_valid(rth)) {
David S. Millerc6cffba2012-07-26 11:14:38 +00001592 skb_dst_set_noref(skb, &rth->dst);
1593 err = 0;
1594 goto out;
David S. Millerd2d68ba2012-07-17 12:58:50 -07001595 }
1596 do_cache = true;
1597 }
1598 }
1599
David S. Miller5c1e6aa2011-04-28 14:13:38 -07001600 rth = rt_dst_alloc(net->loopback_dev,
David S. Millerd2d68ba2012-07-17 12:58:50 -07001601 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602 if (!rth)
1603 goto e_nobufs;
1604
David S. Millercf911662011-04-28 14:31:47 -07001605 rth->dst.input= ip_local_deliver;
Changli Gaod8d1f302010-06-10 23:31:35 -07001606 rth->dst.output= ip_rt_bug;
David S. Millercf911662011-04-28 14:31:47 -07001607#ifdef CONFIG_IP_ROUTE_CLASSID
1608 rth->dst.tclassid = itag;
1609#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610
David S. Millercf911662011-04-28 14:31:47 -07001611 rth->rt_genid = rt_genid(net);
1612 rth->rt_flags = flags|RTCF_LOCAL;
1613 rth->rt_type = res.type;
David S. Miller9917e1e82012-07-17 14:44:26 -07001614 rth->rt_is_input = 1;
David S. Miller13378ca2012-07-23 13:57:45 -07001615 rth->rt_iif = 0;
David S. Miller59436342012-07-10 06:58:42 -07001616 rth->rt_pmtu = 0;
David S. Millerf8126f12012-07-13 05:03:45 -07001617 rth->rt_gateway = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618 if (res.type == RTN_UNREACHABLE) {
Changli Gaod8d1f302010-06-10 23:31:35 -07001619 rth->dst.input= ip_error;
1620 rth->dst.error= -err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621 rth->rt_flags &= ~RTCF_LOCAL;
1622 }
David S. Millerd2d68ba2012-07-17 12:58:50 -07001623 if (do_cache)
1624 rt_cache_route(&FIB_RES_NH(res), rth);
David S. Miller89aef892012-07-17 11:00:09 -07001625 skb_dst_set(skb, &rth->dst);
David S. Millerb23dd4f2011-03-02 14:31:35 -08001626 err = 0;
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00001627 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628
1629no_route:
1630 RT_CACHE_STAT_INC(in_no_route);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631 res.type = RTN_UNREACHABLE;
Mitsuru Chinen7f538782007-12-07 01:07:24 -08001632 if (err == -ESRCH)
1633 err = -ENETUNREACH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634 goto local_input;
1635
1636 /*
1637 * Do not cache martian addresses: they should be logged (RFC1812)
1638 */
1639martian_destination:
1640 RT_CACHE_STAT_INC(in_martian_dst);
1641#ifdef CONFIG_IP_ROUTE_VERBOSE
Joe Perchese87cc472012-05-13 21:56:26 +00001642 if (IN_DEV_LOG_MARTIANS(in_dev))
1643 net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
1644 &daddr, &saddr, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645#endif
Dietmar Eggemann2c2910a2005-06-28 13:06:23 -07001646
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647e_inval:
1648 err = -EINVAL;
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00001649 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650
1651e_nobufs:
1652 err = -ENOBUFS;
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00001653 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654
1655martian_source:
Eric Dumazetb5f7e752010-06-02 12:05:27 +00001656 err = -EINVAL;
1657martian_source_keep_err:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00001659 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660}
1661
David S. Millerc6cffba2012-07-26 11:14:38 +00001662int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1663 u8 tos, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664{
Eric Dumazet96d36222010-06-02 19:21:31 +00001665 int res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666
Eric Dumazet96d36222010-06-02 19:21:31 +00001667 rcu_read_lock();
1668
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669 /* Multicast recognition logic is moved from route cache to here.
1670 The problem was that too many Ethernet cards have broken/missing
1671 hardware multicast filters :-( As result the host on multicasting
1672 network acquires a lot of useless route cache entries, sort of
1673 SDR messages from all the world. Now we try to get rid of them.
1674 Really, provided software IP multicast filter is organized
1675 reasonably (at least, hashed), it does not result in a slowdown
1676 comparing with route cache reject entries.
1677 Note, that multicast routers are not affected, because
1678 route cache entry is created eventually.
1679 */
Joe Perchesf97c1e02007-12-16 13:45:43 -08001680 if (ipv4_is_multicast(daddr)) {
Eric Dumazet96d36222010-06-02 19:21:31 +00001681 struct in_device *in_dev = __in_dev_get_rcu(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682
Eric Dumazet96d36222010-06-02 19:21:31 +00001683 if (in_dev) {
David S. Millerdbdd9a52011-03-10 16:34:38 -08001684 int our = ip_check_mc_rcu(in_dev, daddr, saddr,
1685 ip_hdr(skb)->protocol);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686 if (our
1687#ifdef CONFIG_IP_MROUTE
Joe Perches9d4fb272009-11-23 10:41:23 -08001688 ||
1689 (!ipv4_is_local_multicast(daddr) &&
1690 IN_DEV_MFORWARD(in_dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691#endif
Joe Perches9d4fb272009-11-23 10:41:23 -08001692 ) {
Eric Dumazet96d36222010-06-02 19:21:31 +00001693 int res = ip_route_input_mc(skb, daddr, saddr,
1694 tos, dev, our);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695 rcu_read_unlock();
Eric Dumazet96d36222010-06-02 19:21:31 +00001696 return res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 }
1698 }
1699 rcu_read_unlock();
1700 return -EINVAL;
1701 }
David S. Millerc10237e2012-06-27 17:05:06 -07001702 res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
Eric Dumazet96d36222010-06-02 19:21:31 +00001703 rcu_read_unlock();
1704 return res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705}
David S. Millerc6cffba2012-07-26 11:14:38 +00001706EXPORT_SYMBOL(ip_route_input_noref);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00001708/* called with rcu_read_lock() */
David S. Miller982721f2011-02-16 21:44:24 -08001709static struct rtable *__mkroute_output(const struct fib_result *res,
David Miller1a00fee2012-07-01 02:02:56 +00001710 const struct flowi4 *fl4, int orig_oif,
Julian Anastasovf61759e2011-12-02 11:39:42 +00001711 struct net_device *dev_out,
David S. Miller5ada5522011-02-17 15:29:00 -08001712 unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713{
David S. Miller982721f2011-02-16 21:44:24 -08001714 struct fib_info *fi = res->fi;
David S. Millerf2bb4be2012-07-17 12:20:47 -07001715 struct fib_nh_exception *fnhe;
David S. Miller5ada5522011-02-17 15:29:00 -08001716 struct in_device *in_dev;
David S. Miller982721f2011-02-16 21:44:24 -08001717 u16 type = res->type;
David S. Miller5ada5522011-02-17 15:29:00 -08001718 struct rtable *rth;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719
Thomas Grafd0daebc32012-06-12 00:44:01 +00001720 in_dev = __in_dev_get_rcu(dev_out);
1721 if (!in_dev)
David S. Miller5ada5522011-02-17 15:29:00 -08001722 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723
Thomas Grafd0daebc32012-06-12 00:44:01 +00001724 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
1725 if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK))
1726 return ERR_PTR(-EINVAL);
1727
David S. Miller68a5e3d2011-03-11 20:07:33 -05001728 if (ipv4_is_lbcast(fl4->daddr))
David S. Miller982721f2011-02-16 21:44:24 -08001729 type = RTN_BROADCAST;
David S. Miller68a5e3d2011-03-11 20:07:33 -05001730 else if (ipv4_is_multicast(fl4->daddr))
David S. Miller982721f2011-02-16 21:44:24 -08001731 type = RTN_MULTICAST;
David S. Miller68a5e3d2011-03-11 20:07:33 -05001732 else if (ipv4_is_zeronet(fl4->daddr))
David S. Miller5ada5522011-02-17 15:29:00 -08001733 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734
1735 if (dev_out->flags & IFF_LOOPBACK)
1736 flags |= RTCF_LOCAL;
1737
David S. Miller982721f2011-02-16 21:44:24 -08001738 if (type == RTN_BROADCAST) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 flags |= RTCF_BROADCAST | RTCF_LOCAL;
David S. Miller982721f2011-02-16 21:44:24 -08001740 fi = NULL;
1741 } else if (type == RTN_MULTICAST) {
Eric Dumazetdd28d1a2010-09-29 11:53:50 +00001742 flags |= RTCF_MULTICAST | RTCF_LOCAL;
David S. Miller813b3b52011-04-28 14:48:42 -07001743 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
1744 fl4->flowi4_proto))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 flags &= ~RTCF_LOCAL;
1746 /* If multicast route do not exist use
Eric Dumazetdd28d1a2010-09-29 11:53:50 +00001747 * default one, but do not gateway in this case.
1748 * Yes, it is hack.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749 */
David S. Miller982721f2011-02-16 21:44:24 -08001750 if (fi && res->prefixlen < 4)
1751 fi = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752 }
1753
David S. Millerf2bb4be2012-07-17 12:20:47 -07001754 fnhe = NULL;
1755 if (fi) {
1756 fnhe = find_exception(&FIB_RES_NH(*res), fl4->daddr);
Eric Dumazetd26b3a72012-07-31 05:45:30 +00001757 if (!fnhe && FIB_RES_NH(*res).nh_pcpu_rth_output) {
1758 struct rtable __rcu **prth;
1759
1760 prth = __this_cpu_ptr(FIB_RES_NH(*res).nh_pcpu_rth_output);
1761 rth = rcu_dereference(*prth);
David S. Millerd2d68ba2012-07-17 12:58:50 -07001762 if (rt_cache_valid(rth)) {
David S. Miller93ac5342012-07-17 14:09:39 -07001763 dst_hold(&rth->dst);
David S. Millerf2bb4be2012-07-17 12:20:47 -07001764 return rth;
1765 }
1766 }
1767 }
David S. Miller5c1e6aa2011-04-28 14:13:38 -07001768 rth = rt_dst_alloc(dev_out,
1769 IN_DEV_CONF_GET(in_dev, NOPOLICY),
David S. Millerf2bb4be2012-07-17 12:20:47 -07001770 IN_DEV_CONF_GET(in_dev, NOXFRM),
1771 fi && !fnhe);
Dimitris Michailidis8391d072010-10-07 14:48:38 +00001772 if (!rth)
David S. Miller5ada5522011-02-17 15:29:00 -08001773 return ERR_PTR(-ENOBUFS);
Dimitris Michailidis8391d072010-10-07 14:48:38 +00001774
David S. Millercf911662011-04-28 14:31:47 -07001775 rth->dst.output = ip_output;
1776
David S. Millercf911662011-04-28 14:31:47 -07001777 rth->rt_genid = rt_genid(dev_net(dev_out));
1778 rth->rt_flags = flags;
1779 rth->rt_type = type;
David S. Miller9917e1e82012-07-17 14:44:26 -07001780 rth->rt_is_input = 0;
David S. Miller13378ca2012-07-23 13:57:45 -07001781 rth->rt_iif = orig_oif ? : 0;
David S. Miller59436342012-07-10 06:58:42 -07001782 rth->rt_pmtu = 0;
David S. Millerf8126f12012-07-13 05:03:45 -07001783 rth->rt_gateway = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784
1785 RT_CACHE_STAT_INC(out_slow_tot);
1786
David S. Miller41347dc2012-06-28 04:05:27 -07001787 if (flags & RTCF_LOCAL)
Changli Gaod8d1f302010-06-10 23:31:35 -07001788 rth->dst.input = ip_local_deliver;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001790 if (flags & RTCF_LOCAL &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791 !(dev_out->flags & IFF_LOOPBACK)) {
Changli Gaod8d1f302010-06-10 23:31:35 -07001792 rth->dst.output = ip_mc_output;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 RT_CACHE_STAT_INC(out_slow_mc);
1794 }
1795#ifdef CONFIG_IP_MROUTE
David S. Miller982721f2011-02-16 21:44:24 -08001796 if (type == RTN_MULTICAST) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797 if (IN_DEV_MFORWARD(in_dev) &&
David S. Miller813b3b52011-04-28 14:48:42 -07001798 !ipv4_is_local_multicast(fl4->daddr)) {
Changli Gaod8d1f302010-06-10 23:31:35 -07001799 rth->dst.input = ip_mr_input;
1800 rth->dst.output = ip_mc_output;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 }
1802 }
1803#endif
1804 }
1805
David S. Millerf2bb4be2012-07-17 12:20:47 -07001806 rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807
David S. Miller5ada5522011-02-17 15:29:00 -08001808 return rth;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809}
1810
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811/*
1812 * Major route resolver routine.
1813 */
1814
David S. Miller89aef892012-07-17 11:00:09 -07001815struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817 struct net_device *dev_out = NULL;
Julian Anastasovf61759e2011-12-02 11:39:42 +00001818 __u8 tos = RT_FL_TOS(fl4);
David S. Miller813b3b52011-04-28 14:48:42 -07001819 unsigned int flags = 0;
1820 struct fib_result res;
David S. Miller5ada5522011-02-17 15:29:00 -08001821 struct rtable *rth;
David S. Miller813b3b52011-04-28 14:48:42 -07001822 int orig_oif;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823
David S. Miller85b91b02012-07-13 08:21:29 -07001824 res.tclassid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825 res.fi = NULL;
David S. Miller8b96d222012-06-11 02:01:56 -07001826 res.table = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827
David S. Miller813b3b52011-04-28 14:48:42 -07001828 orig_oif = fl4->flowi4_oif;
1829
1830 fl4->flowi4_iif = net->loopback_dev->ifindex;
1831 fl4->flowi4_tos = tos & IPTOS_RT_MASK;
1832 fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
1833 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
David S. Miller44713b62011-03-04 21:24:47 -08001834
David S. Miller010c2702011-02-17 15:37:09 -08001835 rcu_read_lock();
David S. Miller813b3b52011-04-28 14:48:42 -07001836 if (fl4->saddr) {
David S. Millerb23dd4f2011-03-02 14:31:35 -08001837 rth = ERR_PTR(-EINVAL);
David S. Miller813b3b52011-04-28 14:48:42 -07001838 if (ipv4_is_multicast(fl4->saddr) ||
1839 ipv4_is_lbcast(fl4->saddr) ||
1840 ipv4_is_zeronet(fl4->saddr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841 goto out;
1842
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843 /* I removed check for oif == dev_out->oif here.
1844 It was wrong for two reasons:
Denis V. Lunev1ab35272008-01-22 22:04:30 -08001845 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
1846 is assigned to multiple interfaces.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847 2. Moreover, we are allowed to send packets with saddr
1848 of another iface. --ANK
1849 */
1850
David S. Miller813b3b52011-04-28 14:48:42 -07001851 if (fl4->flowi4_oif == 0 &&
1852 (ipv4_is_multicast(fl4->daddr) ||
1853 ipv4_is_lbcast(fl4->daddr))) {
Julian Anastasova210d012008-10-01 07:28:28 -07001854 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
David S. Miller813b3b52011-04-28 14:48:42 -07001855 dev_out = __ip_dev_find(net, fl4->saddr, false);
Julian Anastasova210d012008-10-01 07:28:28 -07001856 if (dev_out == NULL)
1857 goto out;
1858
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859 /* Special hack: user can direct multicasts
1860 and limited broadcast via necessary interface
1861 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
1862 This hack is not just for fun, it allows
1863 vic,vat and friends to work.
1864 They bind socket to loopback, set ttl to zero
1865 and expect that it will work.
1866 From the viewpoint of routing cache they are broken,
1867 because we are not allowed to build multicast path
1868 with loopback source addr (look, routing cache
1869 cannot know, that ttl is zero, so that packet
1870 will not leave this host and route is valid).
1871 Luckily, this hack is good workaround.
1872 */
1873
David S. Miller813b3b52011-04-28 14:48:42 -07001874 fl4->flowi4_oif = dev_out->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875 goto make_route;
1876 }
Julian Anastasova210d012008-10-01 07:28:28 -07001877
David S. Miller813b3b52011-04-28 14:48:42 -07001878 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
Julian Anastasova210d012008-10-01 07:28:28 -07001879 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
David S. Miller813b3b52011-04-28 14:48:42 -07001880 if (!__ip_dev_find(net, fl4->saddr, false))
Julian Anastasova210d012008-10-01 07:28:28 -07001881 goto out;
Julian Anastasova210d012008-10-01 07:28:28 -07001882 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883 }
1884
1885
David S. Miller813b3b52011-04-28 14:48:42 -07001886 if (fl4->flowi4_oif) {
1887 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
David S. Millerb23dd4f2011-03-02 14:31:35 -08001888 rth = ERR_PTR(-ENODEV);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889 if (dev_out == NULL)
1890 goto out;
Herbert Xue5ed6392005-10-03 14:35:55 -07001891
1892 /* RACE: Check return value of inet_select_addr instead. */
Eric Dumazetfc75fc82010-12-22 04:39:39 +00001893 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
David S. Millerb23dd4f2011-03-02 14:31:35 -08001894 rth = ERR_PTR(-ENETUNREACH);
Eric Dumazetfc75fc82010-12-22 04:39:39 +00001895 goto out;
1896 }
David S. Miller813b3b52011-04-28 14:48:42 -07001897 if (ipv4_is_local_multicast(fl4->daddr) ||
1898 ipv4_is_lbcast(fl4->daddr)) {
1899 if (!fl4->saddr)
1900 fl4->saddr = inet_select_addr(dev_out, 0,
1901 RT_SCOPE_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902 goto make_route;
1903 }
David S. Miller813b3b52011-04-28 14:48:42 -07001904 if (fl4->saddr) {
1905 if (ipv4_is_multicast(fl4->daddr))
1906 fl4->saddr = inet_select_addr(dev_out, 0,
1907 fl4->flowi4_scope);
1908 else if (!fl4->daddr)
1909 fl4->saddr = inet_select_addr(dev_out, 0,
1910 RT_SCOPE_HOST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911 }
1912 }
1913
David S. Miller813b3b52011-04-28 14:48:42 -07001914 if (!fl4->daddr) {
1915 fl4->daddr = fl4->saddr;
1916 if (!fl4->daddr)
1917 fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
Denis V. Lunevb40afd02008-01-22 22:06:19 -08001918 dev_out = net->loopback_dev;
David S. Miller813b3b52011-04-28 14:48:42 -07001919 fl4->flowi4_oif = net->loopback_dev->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 res.type = RTN_LOCAL;
1921 flags |= RTCF_LOCAL;
1922 goto make_route;
1923 }
1924
David S. Miller813b3b52011-04-28 14:48:42 -07001925 if (fib_lookup(net, fl4, &res)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926 res.fi = NULL;
David S. Miller8b96d222012-06-11 02:01:56 -07001927 res.table = NULL;
David S. Miller813b3b52011-04-28 14:48:42 -07001928 if (fl4->flowi4_oif) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929 /* Apparently, routing tables are wrong. Assume,
1930 that the destination is on link.
1931
1932 WHY? DW.
1933 Because we are allowed to send to iface
1934 even if it has NO routes and NO assigned
1935 addresses. When oif is specified, routing
1936 tables are looked up with only one purpose:
1937 to catch if destination is gatewayed, rather than
1938 direct. Moreover, if MSG_DONTROUTE is set,
1939 we send packet, ignoring both routing tables
1940 and ifaddr state. --ANK
1941
1942
1943 We could make it even if oif is unknown,
1944 likely IPv6, but we do not.
1945 */
1946
David S. Miller813b3b52011-04-28 14:48:42 -07001947 if (fl4->saddr == 0)
1948 fl4->saddr = inet_select_addr(dev_out, 0,
1949 RT_SCOPE_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950 res.type = RTN_UNICAST;
1951 goto make_route;
1952 }
David S. Millerb23dd4f2011-03-02 14:31:35 -08001953 rth = ERR_PTR(-ENETUNREACH);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954 goto out;
1955 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956
1957 if (res.type == RTN_LOCAL) {
David S. Miller813b3b52011-04-28 14:48:42 -07001958 if (!fl4->saddr) {
Joel Sing9fc3bbb2011-01-03 20:24:20 +00001959 if (res.fi->fib_prefsrc)
David S. Miller813b3b52011-04-28 14:48:42 -07001960 fl4->saddr = res.fi->fib_prefsrc;
Joel Sing9fc3bbb2011-01-03 20:24:20 +00001961 else
David S. Miller813b3b52011-04-28 14:48:42 -07001962 fl4->saddr = fl4->daddr;
Joel Sing9fc3bbb2011-01-03 20:24:20 +00001963 }
Denis V. Lunevb40afd02008-01-22 22:06:19 -08001964 dev_out = net->loopback_dev;
David S. Miller813b3b52011-04-28 14:48:42 -07001965 fl4->flowi4_oif = dev_out->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966 res.fi = NULL;
1967 flags |= RTCF_LOCAL;
1968 goto make_route;
1969 }
1970
1971#ifdef CONFIG_IP_ROUTE_MULTIPATH
David S. Miller813b3b52011-04-28 14:48:42 -07001972 if (res.fi->fib_nhs > 1 && fl4->flowi4_oif == 0)
David S. Miller1b7fe5932011-03-10 17:01:16 -08001973 fib_select_multipath(&res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 else
1975#endif
David S. Miller21d8c492011-04-14 14:49:37 -07001976 if (!res.prefixlen &&
1977 res.table->tb_num_default > 1 &&
David S. Miller813b3b52011-04-28 14:48:42 -07001978 res.type == RTN_UNICAST && !fl4->flowi4_oif)
David S. Miller0c838ff2011-01-31 16:16:50 -08001979 fib_select_default(&res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980
David S. Miller813b3b52011-04-28 14:48:42 -07001981 if (!fl4->saddr)
1982 fl4->saddr = FIB_RES_PREFSRC(net, res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984 dev_out = FIB_RES_DEV(res);
David S. Miller813b3b52011-04-28 14:48:42 -07001985 fl4->flowi4_oif = dev_out->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986
1987
1988make_route:
David Miller1a00fee2012-07-01 02:02:56 +00001989 rth = __mkroute_output(&res, fl4, orig_oif, dev_out, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990
David S. Miller010c2702011-02-17 15:37:09 -08001991out:
1992 rcu_read_unlock();
David S. Millerb23dd4f2011-03-02 14:31:35 -08001993 return rth;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994}
Arnaldo Carvalho de Melod8c97a92005-08-09 20:12:12 -07001995EXPORT_SYMBOL_GPL(__ip_route_output_key);
1996
Jianzhao Wangae2688d2010-09-08 14:35:43 -07001997static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
1998{
1999 return NULL;
2000}
2001
Steffen Klassertebb762f2011-11-23 02:12:51 +00002002static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
Roland Dreierec831ea2011-01-31 13:16:00 -08002003{
Steffen Klassert618f9bc2011-11-23 02:13:31 +00002004 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2005
2006 return mtu ? : dst->dev->mtu;
Roland Dreierec831ea2011-01-31 13:16:00 -08002007}
2008
David S. Miller6700c272012-07-17 03:29:28 -07002009static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
2010 struct sk_buff *skb, u32 mtu)
David S. Miller14e50e52007-05-24 18:17:54 -07002011{
2012}
2013
David S. Miller6700c272012-07-17 03:29:28 -07002014static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
2015 struct sk_buff *skb)
David S. Millerb587ee32012-07-12 00:39:24 -07002016{
2017}
2018
Held Bernhard0972ddb2011-04-24 22:07:32 +00002019static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
2020 unsigned long old)
2021{
2022 return NULL;
2023}
2024
David S. Miller14e50e52007-05-24 18:17:54 -07002025static struct dst_ops ipv4_dst_blackhole_ops = {
2026 .family = AF_INET,
Harvey Harrison09640e62009-02-01 00:45:17 -08002027 .protocol = cpu_to_be16(ETH_P_IP),
Jianzhao Wangae2688d2010-09-08 14:35:43 -07002028 .check = ipv4_blackhole_dst_check,
Steffen Klassertebb762f2011-11-23 02:12:51 +00002029 .mtu = ipv4_blackhole_mtu,
Eric Dumazet214f45c2011-02-18 11:39:01 -08002030 .default_advmss = ipv4_default_advmss,
David S. Miller14e50e52007-05-24 18:17:54 -07002031 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
David S. Millerb587ee32012-07-12 00:39:24 -07002032 .redirect = ipv4_rt_blackhole_redirect,
Held Bernhard0972ddb2011-04-24 22:07:32 +00002033 .cow_metrics = ipv4_rt_blackhole_cow_metrics,
David S. Millerd3aaeb32011-07-18 00:40:17 -07002034 .neigh_lookup = ipv4_neigh_lookup,
David S. Miller14e50e52007-05-24 18:17:54 -07002035};
2036
David S. Miller2774c132011-03-01 14:59:04 -08002037struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
David S. Miller14e50e52007-05-24 18:17:54 -07002038{
David S. Miller2774c132011-03-01 14:59:04 -08002039 struct rtable *ort = (struct rtable *) dst_orig;
David S. Millerf5b0a872012-07-19 12:31:33 -07002040 struct rtable *rt;
David S. Miller14e50e52007-05-24 18:17:54 -07002041
David S. Millerf5b0a872012-07-19 12:31:33 -07002042 rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
David S. Miller14e50e52007-05-24 18:17:54 -07002043 if (rt) {
Changli Gaod8d1f302010-06-10 23:31:35 -07002044 struct dst_entry *new = &rt->dst;
David S. Miller14e50e52007-05-24 18:17:54 -07002045
David S. Miller14e50e52007-05-24 18:17:54 -07002046 new->__use = 1;
Herbert Xu352e5122007-11-13 21:34:06 -08002047 new->input = dst_discard;
2048 new->output = dst_discard;
David S. Miller14e50e52007-05-24 18:17:54 -07002049
Changli Gaod8d1f302010-06-10 23:31:35 -07002050 new->dev = ort->dst.dev;
David S. Miller14e50e52007-05-24 18:17:54 -07002051 if (new->dev)
2052 dev_hold(new->dev);
2053
David S. Miller9917e1e82012-07-17 14:44:26 -07002054 rt->rt_is_input = ort->rt_is_input;
David S. Miller5e2b61f2011-03-04 21:47:09 -08002055 rt->rt_iif = ort->rt_iif;
David S. Miller59436342012-07-10 06:58:42 -07002056 rt->rt_pmtu = ort->rt_pmtu;
David S. Miller14e50e52007-05-24 18:17:54 -07002057
Denis V. Luneve84f84f2008-07-05 19:04:32 -07002058 rt->rt_genid = rt_genid(net);
David S. Miller14e50e52007-05-24 18:17:54 -07002059 rt->rt_flags = ort->rt_flags;
2060 rt->rt_type = ort->rt_type;
David S. Miller14e50e52007-05-24 18:17:54 -07002061 rt->rt_gateway = ort->rt_gateway;
David S. Miller14e50e52007-05-24 18:17:54 -07002062
2063 dst_free(new);
2064 }
2065
David S. Miller2774c132011-03-01 14:59:04 -08002066 dst_release(dst_orig);
2067
2068 return rt ? &rt->dst : ERR_PTR(-ENOMEM);
David S. Miller14e50e52007-05-24 18:17:54 -07002069}
2070
David S. Miller9d6ec932011-03-12 01:12:47 -05002071struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
David S. Millerb23dd4f2011-03-02 14:31:35 -08002072 struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073{
David S. Miller9d6ec932011-03-12 01:12:47 -05002074 struct rtable *rt = __ip_route_output_key(net, flp4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075
David S. Millerb23dd4f2011-03-02 14:31:35 -08002076 if (IS_ERR(rt))
2077 return rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078
David S. Miller56157872011-05-02 14:37:45 -07002079 if (flp4->flowi4_proto)
David S. Miller9d6ec932011-03-12 01:12:47 -05002080 rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
2081 flowi4_to_flowi(flp4),
2082 sk, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083
David S. Millerb23dd4f2011-03-02 14:31:35 -08002084 return rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085}
Arnaldo Carvalho de Melod8c97a92005-08-09 20:12:12 -07002086EXPORT_SYMBOL_GPL(ip_route_output_flow);
2087
David S. Millerf1ce3062012-07-12 10:10:17 -07002088static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2089 struct flowi4 *fl4, struct sk_buff *skb, u32 pid,
2090 u32 seq, int event, int nowait, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091{
Eric Dumazet511c3f92009-06-02 05:14:27 +00002092 struct rtable *rt = skb_rtable(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093 struct rtmsg *r;
Thomas Grafbe403ea2006-08-17 18:15:17 -07002094 struct nlmsghdr *nlh;
Steffen Klassert2bc8ca42011-10-11 01:12:02 +00002095 unsigned long expires = 0;
David S. Millerf1850712012-07-10 07:26:01 -07002096 u32 error;
Julian Anastasov521f5492012-07-20 12:02:08 +03002097 u32 metrics[RTAX_MAX];
Thomas Grafbe403ea2006-08-17 18:15:17 -07002098
2099 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
2100 if (nlh == NULL)
Patrick McHardy26932562007-01-31 23:16:40 -08002101 return -EMSGSIZE;
Thomas Grafbe403ea2006-08-17 18:15:17 -07002102
2103 r = nlmsg_data(nlh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104 r->rtm_family = AF_INET;
2105 r->rtm_dst_len = 32;
2106 r->rtm_src_len = 0;
David Millerd6c0a4f2012-07-01 02:02:59 +00002107 r->rtm_tos = fl4->flowi4_tos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108 r->rtm_table = RT_TABLE_MAIN;
David S. Millerf3756b72012-04-01 20:39:02 -04002109 if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN))
2110 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111 r->rtm_type = rt->rt_type;
2112 r->rtm_scope = RT_SCOPE_UNIVERSE;
2113 r->rtm_protocol = RTPROT_UNSPEC;
2114 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2115 if (rt->rt_flags & RTCF_NOTIFY)
2116 r->rtm_flags |= RTM_F_NOTIFY;
Thomas Grafbe403ea2006-08-17 18:15:17 -07002117
David S. Millerf1ce3062012-07-12 10:10:17 -07002118 if (nla_put_be32(skb, RTA_DST, dst))
David S. Millerf3756b72012-04-01 20:39:02 -04002119 goto nla_put_failure;
David Miller1a00fee2012-07-01 02:02:56 +00002120 if (src) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121 r->rtm_src_len = 32;
David Miller1a00fee2012-07-01 02:02:56 +00002122 if (nla_put_be32(skb, RTA_SRC, src))
David S. Millerf3756b72012-04-01 20:39:02 -04002123 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124 }
David S. Millerf3756b72012-04-01 20:39:02 -04002125 if (rt->dst.dev &&
2126 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2127 goto nla_put_failure;
Patrick McHardyc7066f72011-01-14 13:36:42 +01002128#ifdef CONFIG_IP_ROUTE_CLASSID
David S. Millerf3756b72012-04-01 20:39:02 -04002129 if (rt->dst.tclassid &&
2130 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2131 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132#endif
David S. Miller41347dc2012-06-28 04:05:27 -07002133 if (!rt_is_input_route(rt) &&
David Millerd6c0a4f2012-07-01 02:02:59 +00002134 fl4->saddr != src) {
2135 if (nla_put_be32(skb, RTA_PREFSRC, fl4->saddr))
David S. Millerf3756b72012-04-01 20:39:02 -04002136 goto nla_put_failure;
2137 }
David S. Millerf8126f12012-07-13 05:03:45 -07002138 if (rt->rt_gateway &&
David S. Millerf3756b72012-04-01 20:39:02 -04002139 nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway))
2140 goto nla_put_failure;
Thomas Grafbe403ea2006-08-17 18:15:17 -07002141
Julian Anastasov521f5492012-07-20 12:02:08 +03002142 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2143 if (rt->rt_pmtu)
2144 metrics[RTAX_MTU - 1] = rt->rt_pmtu;
2145 if (rtnetlink_put_metrics(skb, metrics) < 0)
Thomas Grafbe403ea2006-08-17 18:15:17 -07002146 goto nla_put_failure;
2147
David Millerb4869882012-07-01 02:03:01 +00002148 if (fl4->flowi4_mark &&
2149 nla_put_be32(skb, RTA_MARK, fl4->flowi4_mark))
David S. Millerf3756b72012-04-01 20:39:02 -04002150 goto nla_put_failure;
Eric Dumazet963bfee2010-07-20 22:03:14 +00002151
Changli Gaod8d1f302010-06-10 23:31:35 -07002152 error = rt->dst.error;
David S. Miller59436342012-07-10 06:58:42 -07002153 expires = rt->dst.expires;
2154 if (expires) {
2155 if (time_before(jiffies, expires))
2156 expires -= jiffies;
2157 else
2158 expires = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159 }
Thomas Grafbe403ea2006-08-17 18:15:17 -07002160
David S. Millerc7537962010-11-11 17:07:48 -08002161 if (rt_is_input_route(rt)) {
David S. Millerf1ce3062012-07-12 10:10:17 -07002162 if (nla_put_u32(skb, RTA_IIF, rt->rt_iif))
2163 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164 }
2165
David S. Millerf1850712012-07-10 07:26:01 -07002166 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
Thomas Grafe3703b32006-11-27 09:27:07 -08002167 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168
Thomas Grafbe403ea2006-08-17 18:15:17 -07002169 return nlmsg_end(skb, nlh);
2170
2171nla_put_failure:
Patrick McHardy26932562007-01-31 23:16:40 -08002172 nlmsg_cancel(skb, nlh);
2173 return -EMSGSIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174}
2175
Daniel Baluta5e73ea12012-04-15 01:34:41 +00002176static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002178 struct net *net = sock_net(in_skb->sk);
Thomas Grafd889ce32006-08-17 18:15:44 -07002179 struct rtmsg *rtm;
2180 struct nlattr *tb[RTA_MAX+1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181 struct rtable *rt = NULL;
David Millerd6c0a4f2012-07-01 02:02:59 +00002182 struct flowi4 fl4;
Al Viro9e12bb22006-09-26 21:25:20 -07002183 __be32 dst = 0;
2184 __be32 src = 0;
2185 u32 iif;
Thomas Grafd889ce32006-08-17 18:15:44 -07002186 int err;
Eric Dumazet963bfee2010-07-20 22:03:14 +00002187 int mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188 struct sk_buff *skb;
2189
Thomas Grafd889ce32006-08-17 18:15:44 -07002190 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2191 if (err < 0)
2192 goto errout;
2193
2194 rtm = nlmsg_data(nlh);
2195
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
Thomas Grafd889ce32006-08-17 18:15:44 -07002197 if (skb == NULL) {
2198 err = -ENOBUFS;
2199 goto errout;
2200 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201
2202 /* Reserve room for dummy headers, this skb can pass
2203 through good chunk of routing engine.
2204 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07002205 skb_reset_mac_header(skb);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07002206 skb_reset_network_header(skb);
Stephen Hemmingerd2c962b2006-04-17 17:27:11 -07002207
2208 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07002209 ip_hdr(skb)->protocol = IPPROTO_ICMP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2211
Al Viro17fb2c62006-09-26 22:15:25 -07002212 src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
2213 dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
Thomas Grafd889ce32006-08-17 18:15:44 -07002214 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
Eric Dumazet963bfee2010-07-20 22:03:14 +00002215 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216
David Millerd6c0a4f2012-07-01 02:02:59 +00002217 memset(&fl4, 0, sizeof(fl4));
2218 fl4.daddr = dst;
2219 fl4.saddr = src;
2220 fl4.flowi4_tos = rtm->rtm_tos;
2221 fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
2222 fl4.flowi4_mark = mark;
2223
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224 if (iif) {
Thomas Grafd889ce32006-08-17 18:15:44 -07002225 struct net_device *dev;
2226
Denis V. Lunev19375042008-02-28 20:52:04 -08002227 dev = __dev_get_by_index(net, iif);
Thomas Grafd889ce32006-08-17 18:15:44 -07002228 if (dev == NULL) {
2229 err = -ENODEV;
2230 goto errout_free;
2231 }
2232
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233 skb->protocol = htons(ETH_P_IP);
2234 skb->dev = dev;
Eric Dumazet963bfee2010-07-20 22:03:14 +00002235 skb->mark = mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236 local_bh_disable();
2237 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2238 local_bh_enable();
Thomas Grafd889ce32006-08-17 18:15:44 -07002239
Eric Dumazet511c3f92009-06-02 05:14:27 +00002240 rt = skb_rtable(skb);
Changli Gaod8d1f302010-06-10 23:31:35 -07002241 if (err == 0 && rt->dst.error)
2242 err = -rt->dst.error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243 } else {
David S. Miller9d6ec932011-03-12 01:12:47 -05002244 rt = ip_route_output_key(net, &fl4);
David S. Millerb23dd4f2011-03-02 14:31:35 -08002245
2246 err = 0;
2247 if (IS_ERR(rt))
2248 err = PTR_ERR(rt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249 }
Thomas Grafd889ce32006-08-17 18:15:44 -07002250
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251 if (err)
Thomas Grafd889ce32006-08-17 18:15:44 -07002252 goto errout_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253
Changli Gaod8d1f302010-06-10 23:31:35 -07002254 skb_dst_set(skb, &rt->dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255 if (rtm->rtm_flags & RTM_F_NOTIFY)
2256 rt->rt_flags |= RTCF_NOTIFY;
2257
David S. Millerf1ce3062012-07-12 10:10:17 -07002258 err = rt_fill_info(net, dst, src, &fl4, skb,
David Miller1a00fee2012-07-01 02:02:56 +00002259 NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
Denis V. Lunev19375042008-02-28 20:52:04 -08002260 RTM_NEWROUTE, 0, 0);
Thomas Grafd889ce32006-08-17 18:15:44 -07002261 if (err <= 0)
2262 goto errout_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263
Denis V. Lunev19375042008-02-28 20:52:04 -08002264 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
Thomas Grafd889ce32006-08-17 18:15:44 -07002265errout:
Thomas Graf2942e902006-08-15 00:30:25 -07002266 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267
Thomas Grafd889ce32006-08-17 18:15:44 -07002268errout_free:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269 kfree_skb(skb);
Thomas Grafd889ce32006-08-17 18:15:44 -07002270 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271}
2272
2273int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2274{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275 return skb->len;
2276}
2277
2278void ip_rt_multicast_event(struct in_device *in_dev)
2279{
Denis V. Lunev76e6ebf2008-07-05 19:00:44 -07002280 rt_cache_flush(dev_net(in_dev->dev), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281}
2282
2283#ifdef CONFIG_SYSCTL
Denis V. Lunev81c684d2008-07-08 03:05:28 -07002284static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07002285 void __user *buffer,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286 size_t *lenp, loff_t *ppos)
2287{
2288 if (write) {
Denis V. Lunev639e1042008-07-05 19:02:06 -07002289 int flush_delay;
Denis V. Lunev81c684d2008-07-08 03:05:28 -07002290 ctl_table ctl;
Denis V. Lunev39a23e72008-07-05 19:02:33 -07002291 struct net *net;
Denis V. Lunev639e1042008-07-05 19:02:06 -07002292
Denis V. Lunev81c684d2008-07-08 03:05:28 -07002293 memcpy(&ctl, __ctl, sizeof(ctl));
2294 ctl.data = &flush_delay;
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07002295 proc_dointvec(&ctl, write, buffer, lenp, ppos);
Denis V. Lunev639e1042008-07-05 19:02:06 -07002296
Denis V. Lunev81c684d2008-07-08 03:05:28 -07002297 net = (struct net *)__ctl->extra1;
Denis V. Lunev39a23e72008-07-05 19:02:33 -07002298 rt_cache_flush(net, flush_delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299 return 0;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002300 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301
2302 return -EINVAL;
2303}
2304
Al Viroeeb61f72008-07-27 08:59:33 +01002305static ctl_table ipv4_route_table[] = {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002306 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307 .procname = "gc_thresh",
2308 .data = &ipv4_dst_ops.gc_thresh,
2309 .maxlen = sizeof(int),
2310 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08002311 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312 },
2313 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314 .procname = "max_size",
2315 .data = &ip_rt_max_size,
2316 .maxlen = sizeof(int),
2317 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08002318 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319 },
2320 {
2321 /* Deprecated. Use gc_min_interval_ms */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002322
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323 .procname = "gc_min_interval",
2324 .data = &ip_rt_gc_min_interval,
2325 .maxlen = sizeof(int),
2326 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08002327 .proc_handler = proc_dointvec_jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328 },
2329 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330 .procname = "gc_min_interval_ms",
2331 .data = &ip_rt_gc_min_interval,
2332 .maxlen = sizeof(int),
2333 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08002334 .proc_handler = proc_dointvec_ms_jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 },
2336 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337 .procname = "gc_timeout",
2338 .data = &ip_rt_gc_timeout,
2339 .maxlen = sizeof(int),
2340 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08002341 .proc_handler = proc_dointvec_jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342 },
2343 {
Eric Dumazet9f28a2f2011-12-21 15:47:16 -05002344 .procname = "gc_interval",
2345 .data = &ip_rt_gc_interval,
2346 .maxlen = sizeof(int),
2347 .mode = 0644,
2348 .proc_handler = proc_dointvec_jiffies,
2349 },
2350 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351 .procname = "redirect_load",
2352 .data = &ip_rt_redirect_load,
2353 .maxlen = sizeof(int),
2354 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08002355 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356 },
2357 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358 .procname = "redirect_number",
2359 .data = &ip_rt_redirect_number,
2360 .maxlen = sizeof(int),
2361 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08002362 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363 },
2364 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365 .procname = "redirect_silence",
2366 .data = &ip_rt_redirect_silence,
2367 .maxlen = sizeof(int),
2368 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08002369 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370 },
2371 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372 .procname = "error_cost",
2373 .data = &ip_rt_error_cost,
2374 .maxlen = sizeof(int),
2375 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08002376 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377 },
2378 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379 .procname = "error_burst",
2380 .data = &ip_rt_error_burst,
2381 .maxlen = sizeof(int),
2382 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08002383 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384 },
2385 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386 .procname = "gc_elasticity",
2387 .data = &ip_rt_gc_elasticity,
2388 .maxlen = sizeof(int),
2389 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08002390 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391 },
2392 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393 .procname = "mtu_expires",
2394 .data = &ip_rt_mtu_expires,
2395 .maxlen = sizeof(int),
2396 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08002397 .proc_handler = proc_dointvec_jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398 },
2399 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400 .procname = "min_pmtu",
2401 .data = &ip_rt_min_pmtu,
2402 .maxlen = sizeof(int),
2403 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08002404 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405 },
2406 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407 .procname = "min_adv_mss",
2408 .data = &ip_rt_min_advmss,
2409 .maxlen = sizeof(int),
2410 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08002411 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412 },
Eric W. Biedermanf8572d82009-11-05 13:32:03 -08002413 { }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414};
Denis V. Lunev39a23e72008-07-05 19:02:33 -07002415
Denis V. Lunev39a23e72008-07-05 19:02:33 -07002416static struct ctl_table ipv4_route_flush_table[] = {
2417 {
Denis V. Lunev39a23e72008-07-05 19:02:33 -07002418 .procname = "flush",
2419 .maxlen = sizeof(int),
2420 .mode = 0200,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08002421 .proc_handler = ipv4_sysctl_rtcache_flush,
Denis V. Lunev39a23e72008-07-05 19:02:33 -07002422 },
Eric W. Biedermanf8572d82009-11-05 13:32:03 -08002423 { },
Denis V. Lunev39a23e72008-07-05 19:02:33 -07002424};
2425
2426static __net_init int sysctl_route_net_init(struct net *net)
2427{
2428 struct ctl_table *tbl;
2429
2430 tbl = ipv4_route_flush_table;
Octavian Purdila09ad9bc2009-11-25 15:14:13 -08002431 if (!net_eq(net, &init_net)) {
Denis V. Lunev39a23e72008-07-05 19:02:33 -07002432 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
2433 if (tbl == NULL)
2434 goto err_dup;
2435 }
2436 tbl[0].extra1 = net;
2437
Eric W. Biedermanec8f23c2012-04-19 13:44:49 +00002438 net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
Denis V. Lunev39a23e72008-07-05 19:02:33 -07002439 if (net->ipv4.route_hdr == NULL)
2440 goto err_reg;
2441 return 0;
2442
2443err_reg:
2444 if (tbl != ipv4_route_flush_table)
2445 kfree(tbl);
2446err_dup:
2447 return -ENOMEM;
2448}
2449
2450static __net_exit void sysctl_route_net_exit(struct net *net)
2451{
2452 struct ctl_table *tbl;
2453
2454 tbl = net->ipv4.route_hdr->ctl_table_arg;
2455 unregister_net_sysctl_table(net->ipv4.route_hdr);
2456 BUG_ON(tbl == ipv4_route_flush_table);
2457 kfree(tbl);
2458}
2459
2460static __net_initdata struct pernet_operations sysctl_route_ops = {
2461 .init = sysctl_route_net_init,
2462 .exit = sysctl_route_net_exit,
2463};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002464#endif
2465
Neil Horman3ee94372010-05-08 01:57:52 -07002466static __net_init int rt_genid_init(struct net *net)
Denis V. Lunev9f5e97e2008-07-05 19:02:59 -07002467{
Neil Horman3ee94372010-05-08 01:57:52 -07002468 get_random_bytes(&net->ipv4.rt_genid,
2469 sizeof(net->ipv4.rt_genid));
David S. Miller436c3b62011-03-24 17:42:21 -07002470 get_random_bytes(&net->ipv4.dev_addr_genid,
2471 sizeof(net->ipv4.dev_addr_genid));
Denis V. Lunev9f5e97e2008-07-05 19:02:59 -07002472 return 0;
2473}
2474
Neil Horman3ee94372010-05-08 01:57:52 -07002475static __net_initdata struct pernet_operations rt_genid_ops = {
2476 .init = rt_genid_init,
Denis V. Lunev9f5e97e2008-07-05 19:02:59 -07002477};
2478
David S. Millerc3426b42012-06-09 16:27:05 -07002479static int __net_init ipv4_inetpeer_init(struct net *net)
2480{
2481 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
2482
2483 if (!bp)
2484 return -ENOMEM;
2485 inet_peer_base_init(bp);
2486 net->ipv4.peers = bp;
2487 return 0;
2488}
2489
2490static void __net_exit ipv4_inetpeer_exit(struct net *net)
2491{
2492 struct inet_peer_base *bp = net->ipv4.peers;
2493
2494 net->ipv4.peers = NULL;
David S. Miller56a6b242012-06-09 16:32:41 -07002495 inetpeer_invalidate_tree(bp);
David S. Millerc3426b42012-06-09 16:27:05 -07002496 kfree(bp);
2497}
2498
2499static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
2500 .init = ipv4_inetpeer_init,
2501 .exit = ipv4_inetpeer_exit,
2502};
Denis V. Lunev9f5e97e2008-07-05 19:02:59 -07002503
Patrick McHardyc7066f72011-01-14 13:36:42 +01002504#ifdef CONFIG_IP_ROUTE_CLASSID
Tejun Heo7d720c32010-02-16 15:20:26 +00002505struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
Patrick McHardyc7066f72011-01-14 13:36:42 +01002506#endif /* CONFIG_IP_ROUTE_CLASSID */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507
Linus Torvalds1da177e2005-04-16 15:20:36 -07002508int __init ip_rt_init(void)
2509{
Eric Dumazet424c4b72005-07-05 14:58:19 -07002510 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511
Patrick McHardyc7066f72011-01-14 13:36:42 +01002512#ifdef CONFIG_IP_ROUTE_CLASSID
Ingo Molnar0dcec8c2009-02-25 14:07:33 +01002513 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514 if (!ip_rt_acct)
2515 panic("IP: failed to allocate ip_rt_acct\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516#endif
2517
Alexey Dobriyane5d679f332006-08-26 19:25:52 -07002518 ipv4_dst_ops.kmem_cachep =
2519 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
Paul Mundt20c2df82007-07-20 10:11:58 +09002520 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521
David S. Miller14e50e52007-05-24 18:17:54 -07002522 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
2523
Eric Dumazetfc66f952010-10-08 06:37:34 +00002524 if (dst_entries_init(&ipv4_dst_ops) < 0)
2525 panic("IP: failed to allocate ipv4_dst_ops counter\n");
2526
2527 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
2528 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
2529
David S. Miller89aef892012-07-17 11:00:09 -07002530 ipv4_dst_ops.gc_thresh = ~0;
2531 ip_rt_max_size = INT_MAX;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533 devinet_init();
2534 ip_fib_init();
2535
Denis V. Lunev73b38712008-02-28 20:51:18 -08002536 if (ip_rt_proc_init())
Joe Perches058bd4d2012-03-11 18:36:11 +00002537 pr_err("Unable to create route proc files\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538#ifdef CONFIG_XFRM
2539 xfrm_init();
Neil Hormana33bc5c2009-07-30 18:52:15 -07002540 xfrm4_init(ip_rt_max_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541#endif
Greg Rosec7ac8672011-06-10 01:27:09 +00002542 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL);
Thomas Graf63f34442007-03-22 11:55:17 -07002543
Denis V. Lunev39a23e72008-07-05 19:02:33 -07002544#ifdef CONFIG_SYSCTL
2545 register_pernet_subsys(&sysctl_route_ops);
2546#endif
Neil Horman3ee94372010-05-08 01:57:52 -07002547 register_pernet_subsys(&rt_genid_ops);
David S. Millerc3426b42012-06-09 16:27:05 -07002548 register_pernet_subsys(&ipv4_inetpeer_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549 return rc;
2550}
2551
Al Viroa1bc6eb2008-07-30 06:32:52 -04002552#ifdef CONFIG_SYSCTL
Al Viroeeb61f72008-07-27 08:59:33 +01002553/*
2554 * We really need to sanitize the damn ipv4 init order, then all
2555 * this nonsense will go away.
2556 */
2557void __init ip_static_sysctl_init(void)
2558{
Eric W. Biederman4e5ca782012-04-19 13:32:39 +00002559 register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);
Al Viroeeb61f72008-07-27 08:59:33 +01002560}
Al Viroa1bc6eb2008-07-30 06:32:52 -04002561#endif