aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorPablo Neira Ayuso <pablo@netfilter.org>2012-09-03 15:28:30 +0200
committerPablo Neira Ayuso <pablo@netfilter.org>2012-09-03 15:34:51 +0200
commitace1fe1231bdfffd60b5e703aa5b7283fbf98dbd (patch)
tree06c7492a8f3cc65f916768616ca24c6bc7171761 /net
parentce9f3f31efb88841e4df98794b13dbac8c4901da (diff)
parenta2dc375e12334b3d8f787a48b2fb6172ccfb80ae (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
This merges (3f509c6 netfilter: nf_nat_sip: fix incorrect handling of EBUSY for RTCP expectation) to Patrick McHardy's IPv6 NAT changes.
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_core.c6
-rw-r--r--net/appletalk/atalk_proc.c3
-rw-r--r--net/atm/resources.c2
-rw-r--r--net/ax25/ax25_uid.c21
-rw-r--r--net/batman-adv/bat_iv_ogm.c96
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c214
-rw-r--r--net/batman-adv/bridge_loop_avoidance.h11
-rw-r--r--net/batman-adv/debugfs.c12
-rw-r--r--net/batman-adv/gateway_client.c53
-rw-r--r--net/batman-adv/hard-interface.c13
-rw-r--r--net/batman-adv/main.c27
-rw-r--r--net/batman-adv/main.h29
-rw-r--r--net/batman-adv/packet.h35
-rw-r--r--net/batman-adv/routing.c85
-rw-r--r--net/batman-adv/send.c8
-rw-r--r--net/batman-adv/soft-interface.c79
-rw-r--r--net/batman-adv/soft-interface.h5
-rw-r--r--net/batman-adv/translation-table.c416
-rw-r--r--net/batman-adv/translation-table.h4
-rw-r--r--net/batman-adv/types.h120
-rw-r--r--net/batman-adv/unicast.c16
-rw-r--r--net/batman-adv/vis.c144
-rw-r--r--net/batman-adv/vis.h2
-rw-r--r--net/ceph/ceph_common.c1
-rw-r--r--net/ceph/debugfs.c4
-rw-r--r--net/ceph/messenger.c11
-rw-r--r--net/ceph/mon_client.c51
-rw-r--r--net/core/dev.c24
-rw-r--r--net/core/fib_rules.c3
-rw-r--r--net/core/link_watch.c8
-rw-r--r--net/core/netpoll.c13
-rw-r--r--net/core/request_sock.c95
-rw-r--r--net/core/scm.c31
-rw-r--r--net/core/sock.c12
-rw-r--r--net/decnet/af_decnet.c4
-rw-r--r--net/ieee802154/6lowpan.c53
-rw-r--r--net/ipv4/af_inet.c28
-rw-r--r--net/ipv4/devinet.c6
-rw-r--r--net/ipv4/fib_frontend.c7
-rw-r--r--net/ipv4/inet_connection_sock.c57
-rw-r--r--net/ipv4/inet_diag.c21
-rw-r--r--net/ipv4/ipmr.c14
-rw-r--r--net/ipv4/ping.c22
-rw-r--r--net/ipv4/proc.c4
-rw-r--r--net/ipv4/raw.c4
-rw-r--r--net/ipv4/route.c11
-rw-r--r--net/ipv4/syncookies.c1
-rw-r--r--net/ipv4/sysctl_net_ipv4.c87
-rw-r--r--net/ipv4/tcp.c49
-rw-r--r--net/ipv4/tcp_fastopen.c83
-rw-r--r--net/ipv4/tcp_input.c90
-rw-r--r--net/ipv4/tcp_ipv4.c275
-rw-r--r--net/ipv4/tcp_minisocks.c61
-rw-r--r--net/ipv4/tcp_output.c21
-rw-r--r--net/ipv4/tcp_timer.c39
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv4/udp_diag.c5
-rw-r--r--net/ipv6/addrconf.c35
-rw-r--r--net/ipv6/ip6_flowlabel.c47
-rw-r--r--net/ipv6/raw.c3
-rw-r--r--net/ipv6/syncookies.c1
-rw-r--r--net/ipv6/tcp_ipv6.c11
-rw-r--r--net/ipv6/udp.c3
-rw-r--r--net/ipx/ipx_proc.c3
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/l2tp/l2tp_core.c3
-rw-r--r--net/l2tp/l2tp_core.h1
-rw-r--r--net/llc/llc_proc.c2
-rw-r--r--net/mac80211/aes_cmac.c6
-rw-r--r--net/mac80211/cfg.c66
-rw-r--r--net/mac80211/debugfs.c32
-rw-r--r--net/mac80211/driver-ops.h11
-rw-r--r--net/mac80211/ibss.c15
-rw-r--r--net/mac80211/ieee80211_i.h30
-rw-r--r--net/mac80211/iface.c289
-rw-r--r--net/mac80211/main.c21
-rw-r--r--net/mac80211/mesh.c28
-rw-r--r--net/mac80211/mesh.h3
-rw-r--r--net/mac80211/mesh_hwmp.c2
-rw-r--r--net/mac80211/mesh_pathtbl.c44
-rw-r--r--net/mac80211/mesh_plink.c38
-rw-r--r--net/mac80211/mlme.c240
-rw-r--r--net/mac80211/offchannel.c6
-rw-r--r--net/mac80211/rate.h2
-rw-r--r--net/mac80211/rx.c58
-rw-r--r--net/mac80211/scan.c12
-rw-r--r--net/mac80211/status.c22
-rw-r--r--net/mac80211/trace.h11
-rw-r--r--net/mac80211/tx.c109
-rw-r--r--net/mac80211/util.c57
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c4
-rw-r--r--net/netfilter/nf_conntrack_core.c16
-rw-r--r--net/netfilter/nf_conntrack_netlink.c3
-rw-r--r--net/netfilter/nf_nat_sip.c5
-rw-r--r--net/netfilter/nfnetlink_log.c20
-rw-r--r--net/netfilter/xt_LOG.c16
-rw-r--r--net/netfilter/xt_owner.c30
-rw-r--r--net/netfilter/xt_recent.c13
-rw-r--r--net/netlink/af_netlink.c10
-rw-r--r--net/openvswitch/flow.c10
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/phonet/socket.c6
-rw-r--r--net/rfkill/core.c14
-rw-r--r--net/sched/cls_api.c2
-rw-r--r--net/sched/cls_basic.c3
-rw-r--r--net/sched/cls_cgroup.c3
-rw-r--r--net/sched/cls_flow.c19
-rw-r--r--net/sched/cls_fw.c3
-rw-r--r--net/sched/cls_route.c3
-rw-r--r--net/sched/cls_rsvp.h3
-rw-r--r--net/sched/cls_tcindex.c3
-rw-r--r--net/sched/cls_u32.c3
-rw-r--r--net/sctp/proc.c6
-rw-r--r--net/unix/af_unix.c12
-rw-r--r--net/wireless/chan.c7
-rw-r--r--net/wireless/core.c53
-rw-r--r--net/wireless/mlme.c10
-rw-r--r--net/wireless/nl80211.c122
-rw-r--r--net/wireless/radiotap.c2
-rw-r--r--net/wireless/util.c36
120 files changed, 2941 insertions, 1313 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 8ca533c95de..b258da88f67 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -368,3 +368,9 @@ void vlan_vids_del_by_dev(struct net_device *dev,
vlan_vid_del(dev, vid_info->vid);
}
EXPORT_SYMBOL(vlan_vids_del_by_dev);
+
+bool vlan_uses_dev(const struct net_device *dev)
+{
+ return rtnl_dereference(dev->vlan_info) ? true : false;
+}
+EXPORT_SYMBOL(vlan_uses_dev);
diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
index b5b1a221c24..c30f3a0717f 100644
--- a/net/appletalk/atalk_proc.c
+++ b/net/appletalk/atalk_proc.c
@@ -183,7 +183,8 @@ static int atalk_seq_socket_show(struct seq_file *seq, void *v)
ntohs(at->dest_net), at->dest_node, at->dest_port,
sk_wmem_alloc_get(s),
sk_rmem_alloc_get(s),
- s->sk_state, SOCK_INODE(s->sk_socket)->i_uid);
+ s->sk_state,
+ from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)));
out:
return 0;
}
diff --git a/net/atm/resources.c b/net/atm/resources.c
index 23f45ce6f35..0447d5d0b63 100644
--- a/net/atm/resources.c
+++ b/net/atm/resources.c
@@ -432,7 +432,7 @@ int atm_dev_ioctl(unsigned int cmd, void __user *arg, int compat)
size = dev->ops->ioctl(dev, cmd, buf);
}
if (size < 0) {
- error = (size == -ENOIOCTLCMD ? -EINVAL : size);
+ error = (size == -ENOIOCTLCMD ? -ENOTTY : size);
goto done;
}
}
diff --git a/net/ax25/ax25_uid.c b/net/ax25/ax25_uid.c
index e3c579ba632..957999e43ff 100644
--- a/net/ax25/ax25_uid.c
+++ b/net/ax25/ax25_uid.c
@@ -51,14 +51,14 @@ int ax25_uid_policy;
EXPORT_SYMBOL(ax25_uid_policy);
-ax25_uid_assoc *ax25_findbyuid(uid_t uid)
+ax25_uid_assoc *ax25_findbyuid(kuid_t uid)
{
ax25_uid_assoc *ax25_uid, *res = NULL;
struct hlist_node *node;
read_lock(&ax25_uid_lock);
ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) {
- if (ax25_uid->uid == uid) {
+ if (uid_eq(ax25_uid->uid, uid)) {
ax25_uid_hold(ax25_uid);
res = ax25_uid;
break;
@@ -84,7 +84,7 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
read_lock(&ax25_uid_lock);
ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) {
if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) {
- res = ax25_uid->uid;
+ res = from_kuid_munged(current_user_ns(), ax25_uid->uid);
break;
}
}
@@ -93,9 +93,14 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
return res;
case SIOCAX25ADDUID:
+ {
+ kuid_t sax25_kuid;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- user = ax25_findbyuid(sax->sax25_uid);
+ sax25_kuid = make_kuid(current_user_ns(), sax->sax25_uid);
+ if (!uid_valid(sax25_kuid))
+ return -EINVAL;
+ user = ax25_findbyuid(sax25_kuid);
if (user) {
ax25_uid_put(user);
return -EEXIST;
@@ -106,7 +111,7 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
return -ENOMEM;
atomic_set(&ax25_uid->refcount, 1);
- ax25_uid->uid = sax->sax25_uid;
+ ax25_uid->uid = sax25_kuid;
ax25_uid->call = sax->sax25_call;
write_lock(&ax25_uid_lock);
@@ -114,7 +119,7 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
write_unlock(&ax25_uid_lock);
return 0;
-
+ }
case SIOCAX25DELUID:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
@@ -172,7 +177,9 @@ static int ax25_uid_seq_show(struct seq_file *seq, void *v)
struct ax25_uid_assoc *pt;
pt = hlist_entry(v, struct ax25_uid_assoc, uid_node);
- seq_printf(seq, "%6d %s\n", pt->uid, ax2asc(buf, &pt->call));
+ seq_printf(seq, "%6d %s\n",
+ from_kuid_munged(seq_user_ns(seq), pt->uid),
+ ax2asc(buf, &pt->call));
}
return 0;
}
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index e877af8bdd1..df79300dcb7 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -166,13 +166,15 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
int16_t buff_pos;
struct batadv_ogm_packet *batadv_ogm_packet;
struct sk_buff *skb;
+ uint8_t *packet_pos;
if (hard_iface->if_status != BATADV_IF_ACTIVE)
return;
packet_num = 0;
buff_pos = 0;
- batadv_ogm_packet = (struct batadv_ogm_packet *)forw_packet->skb->data;
+ packet_pos = forw_packet->skb->data;
+ batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
/* adjust all flags and log packets */
while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
@@ -181,15 +183,17 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
/* we might have aggregated direct link packets with an
* ordinary base packet
*/
- if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
- (forw_packet->if_incoming == hard_iface))
+ if (forw_packet->direct_link_flags & BIT(packet_num) &&
+ forw_packet->if_incoming == hard_iface)
batadv_ogm_packet->flags |= BATADV_DIRECTLINK;
else
batadv_ogm_packet->flags &= ~BATADV_DIRECTLINK;
- fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
- "Sending own" :
- "Forwarding"));
+ if (packet_num > 0 || !forw_packet->own)
+ fwd_str = "Forwarding";
+ else
+ fwd_str = "Sending own";
+
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n",
fwd_str, (packet_num > 0 ? "aggregated " : ""),
@@ -204,8 +208,8 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
buff_pos += BATADV_OGM_HLEN;
buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes);
packet_num++;
- batadv_ogm_packet = (struct batadv_ogm_packet *)
- (forw_packet->skb->data + buff_pos);
+ packet_pos = forw_packet->skb->data + buff_pos;
+ batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
}
/* create clone because function is called more than once */
@@ -227,9 +231,10 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
struct batadv_hard_iface *primary_if = NULL;
struct batadv_ogm_packet *batadv_ogm_packet;
unsigned char directlink;
+ uint8_t *packet_pos;
- batadv_ogm_packet = (struct batadv_ogm_packet *)
- (forw_packet->skb->data);
+ packet_pos = forw_packet->skb->data;
+ batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
directlink = (batadv_ogm_packet->flags & BATADV_DIRECTLINK ? 1 : 0);
if (!forw_packet->if_incoming) {
@@ -454,6 +459,7 @@ static void batadv_iv_ogm_aggregate(struct batadv_forw_packet *forw_packet_aggr,
int packet_len, bool direct_link)
{
unsigned char *skb_buff;
+ unsigned long new_direct_link_flag;
skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
memcpy(skb_buff, packet_buff, packet_len);
@@ -461,9 +467,10 @@ static void batadv_iv_ogm_aggregate(struct batadv_forw_packet *forw_packet_aggr,
forw_packet_aggr->num_packets++;
/* save packet direct link flag status */
- if (direct_link)
- forw_packet_aggr->direct_link_flags |=
- (1 << forw_packet_aggr->num_packets);
+ if (direct_link) {
+ new_direct_link_flag = BIT(forw_packet_aggr->num_packets);
+ forw_packet_aggr->direct_link_flags |= new_direct_link_flag;
+ }
}
static void batadv_iv_ogm_queue_add(struct batadv_priv *bat_priv,
@@ -586,6 +593,8 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
struct batadv_ogm_packet *batadv_ogm_packet;
struct batadv_hard_iface *primary_if;
int vis_server, tt_num_changes = 0;
+ uint32_t seqno;
+ uint8_t bandwidth;
vis_server = atomic_read(&bat_priv->vis_mode);
primary_if = batadv_primary_if_get_selected(bat_priv);
@@ -599,12 +608,12 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff;
/* change sequence number to network order */
- batadv_ogm_packet->seqno =
- htonl((uint32_t)atomic_read(&hard_iface->seqno));
+ seqno = (uint32_t)atomic_read(&hard_iface->seqno);
+ batadv_ogm_packet->seqno = htonl(seqno);
atomic_inc(&hard_iface->seqno);
- batadv_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
- batadv_ogm_packet->tt_crc = htons(bat_priv->tt_crc);
+ batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
+ batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
if (tt_num_changes >= 0)
batadv_ogm_packet->tt_num_changes = tt_num_changes;
@@ -613,12 +622,13 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
else
batadv_ogm_packet->flags &= ~BATADV_VIS_SERVER;
- if ((hard_iface == primary_if) &&
- (atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_SERVER))
- batadv_ogm_packet->gw_flags =
- (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
- else
+ if (hard_iface == primary_if &&
+ atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_SERVER) {
+ bandwidth = (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
+ batadv_ogm_packet->gw_flags = bandwidth;
+ } else {
batadv_ogm_packet->gw_flags = BATADV_NO_FLAGS;
+ }
batadv_slide_own_bcast_window(hard_iface);
batadv_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
@@ -642,8 +652,9 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
struct batadv_neigh_node *router = NULL;
struct batadv_orig_node *orig_node_tmp;
struct hlist_node *node;
- uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
+ uint8_t sum_orig, sum_neigh;
uint8_t *neigh_addr;
+ uint8_t tq_avg;
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"update_originator(): Searching and updating originator entry of received packet\n");
@@ -667,8 +678,8 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
spin_lock_bh(&tmp_neigh_node->lq_update_lock);
batadv_ring_buffer_set(tmp_neigh_node->tq_recv,
&tmp_neigh_node->tq_index, 0);
- tmp_neigh_node->tq_avg =
- batadv_ring_buffer_avg(tmp_neigh_node->tq_recv);
+ tq_avg = batadv_ring_buffer_avg(tmp_neigh_node->tq_recv);
+ tmp_neigh_node->tq_avg = tq_avg;
spin_unlock_bh(&tmp_neigh_node->lq_update_lock);
}
@@ -727,17 +738,15 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
if (router && (neigh_node->tq_avg == router->tq_avg)) {
orig_node_tmp = router->orig_node;
spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
- bcast_own_sum_orig =
- orig_node_tmp->bcast_own_sum[if_incoming->if_num];
+ sum_orig = orig_node_tmp->bcast_own_sum[if_incoming->if_num];
spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
orig_node_tmp = neigh_node->orig_node;
spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
- bcast_own_sum_neigh =
- orig_node_tmp->bcast_own_sum[if_incoming->if_num];
+ sum_neigh = orig_node_tmp->bcast_own_sum[if_incoming->if_num];
spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
- if (bcast_own_sum_orig >= bcast_own_sum_neigh)
+ if (sum_orig >= sum_neigh)
goto update_tt;
}
@@ -835,8 +844,10 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
spin_unlock_bh(&orig_node->ogm_cnt_lock);
/* pay attention to not get a value bigger than 100 % */
- total_count = (orig_eq_count > neigh_rq_count ?
- neigh_rq_count : orig_eq_count);
+ if (orig_eq_count > neigh_rq_count)
+ total_count = neigh_rq_count;
+ else
+ total_count = orig_eq_count;
/* if we have too few packets (too less data) we set tq_own to zero
* if we receive too few packets it is not considered bidirectional
@@ -910,6 +921,7 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
int set_mark, ret = -1;
uint32_t seqno = ntohl(batadv_ogm_packet->seqno);
uint8_t *neigh_addr;
+ uint8_t packet_count;
orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig);
if (!orig_node)
@@ -944,9 +956,9 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
tmp_neigh_node->real_bits,
seq_diff, set_mark);
- tmp_neigh_node->real_packet_count =
- bitmap_weight(tmp_neigh_node->real_bits,
- BATADV_TQ_LOCAL_WINDOW_SIZE);
+ packet_count = bitmap_weight(tmp_neigh_node->real_bits,
+ BATADV_TQ_LOCAL_WINDOW_SIZE);
+ tmp_neigh_node->real_packet_count = packet_count;
}
rcu_read_unlock();
@@ -1163,9 +1175,12 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
/* if sender is a direct neighbor the sender mac equals
* originator mac
*/
- orig_neigh_node = (is_single_hop_neigh ?
- orig_node :
- batadv_get_orig_node(bat_priv, ethhdr->h_source));
+ if (is_single_hop_neigh)
+ orig_neigh_node = orig_node;
+ else
+ orig_neigh_node = batadv_get_orig_node(bat_priv,
+ ethhdr->h_source);
+
if (!orig_neigh_node)
goto out;
@@ -1251,6 +1266,7 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
int buff_pos = 0, packet_len;
unsigned char *tt_buff, *packet_buff;
bool ret;
+ uint8_t *packet_pos;
ret = batadv_check_management_packet(skb, if_incoming, BATADV_OGM_HLEN);
if (!ret)
@@ -1281,8 +1297,8 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
buff_pos += BATADV_OGM_HLEN;
buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes);
- batadv_ogm_packet = (struct batadv_ogm_packet *)
- (packet_buff + buff_pos);
+ packet_pos = packet_buff + buff_pos;
+ batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
} while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len,
batadv_ogm_packet->tt_num_changes));
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 6705d35b17c..0a9084ad19a 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -133,7 +133,7 @@ static void batadv_claim_free_ref(struct batadv_claim *claim)
static struct batadv_claim *batadv_claim_hash_find(struct batadv_priv *bat_priv,
struct batadv_claim *data)
{
- struct batadv_hashtable *hash = bat_priv->claim_hash;
+ struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
struct hlist_head *head;
struct hlist_node *node;
struct batadv_claim *claim;
@@ -174,7 +174,7 @@ static struct batadv_backbone_gw *
batadv_backbone_hash_find(struct batadv_priv *bat_priv,
uint8_t *addr, short vid)
{
- struct batadv_hashtable *hash = bat_priv->backbone_hash;
+ struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
struct hlist_head *head;
struct hlist_node *node;
struct batadv_backbone_gw search_entry, *backbone_gw;
@@ -218,7 +218,7 @@ batadv_bla_del_backbone_claims(struct batadv_backbone_gw *backbone_gw)
int i;
spinlock_t *list_lock; /* protects write access to the hash lists */
- hash = backbone_gw->bat_priv->claim_hash;
+ hash = backbone_gw->bat_priv->bla.claim_hash;
if (!hash)
return;
@@ -265,7 +265,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
if (!primary_if)
return;
- memcpy(&local_claim_dest, &bat_priv->claim_dest,
+ memcpy(&local_claim_dest, &bat_priv->bla.claim_dest,
sizeof(local_claim_dest));
local_claim_dest.type = claimtype;
@@ -281,7 +281,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
NULL,
/* Ethernet SRC/HW SRC: originator mac */
primary_if->net_dev->dev_addr,
- /* HW DST: FF:43:05:XX:00:00
+ /* HW DST: FF:43:05:XX:YY:YY
* with XX = claim type
* and YY:YY = group id
*/
@@ -295,7 +295,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
/* now we pretend that the client would have sent this ... */
switch (claimtype) {
- case BATADV_CLAIM_TYPE_ADD:
+ case BATADV_CLAIM_TYPE_CLAIM:
/* normal claim frame
* set Ethernet SRC to the clients mac
*/
@@ -303,7 +303,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
batadv_dbg(BATADV_DBG_BLA, bat_priv,
"bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid);
break;
- case BATADV_CLAIM_TYPE_DEL:
+ case BATADV_CLAIM_TYPE_UNCLAIM:
/* unclaim frame
* set HW SRC to the clients mac
*/
@@ -323,7 +323,8 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
break;
case BATADV_CLAIM_TYPE_REQUEST:
/* request frame
- * set HW SRC to the special mac containg the crc
+ * set HW SRC and header destination to the receiving backbone
+ * gws mac
*/
memcpy(hw_src, mac, ETH_ALEN);
memcpy(ethhdr->h_dest, mac, ETH_ALEN);
@@ -339,8 +340,9 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
skb_reset_mac_header(skb);
skb->protocol = eth_type_trans(skb, soft_iface);
- bat_priv->stats.rx_packets++;
- bat_priv->stats.rx_bytes += skb->len + ETH_HLEN;
+ batadv_inc_counter(bat_priv, BATADV_CNT_RX);
+ batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
+ skb->len + ETH_HLEN);
soft_iface->last_rx = jiffies;
netif_rx(skb);
@@ -389,7 +391,7 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig,
/* one for the hash, one for returning */
atomic_set(&entry->refcount, 2);
- hash_added = batadv_hash_add(bat_priv->backbone_hash,
+ hash_added = batadv_hash_add(bat_priv->bla.backbone_hash,
batadv_compare_backbone_gw,
batadv_choose_backbone_gw, entry,
&entry->hash_entry);
@@ -456,7 +458,7 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
if (!backbone_gw)
return;
- hash = bat_priv->claim_hash;
+ hash = bat_priv->bla.claim_hash;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
@@ -467,7 +469,7 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
continue;
batadv_bla_send_claim(bat_priv, claim->addr, claim->vid,
- BATADV_CLAIM_TYPE_ADD);
+ BATADV_CLAIM_TYPE_CLAIM);
}
rcu_read_unlock();
}
@@ -497,7 +499,7 @@ static void batadv_bla_send_request(struct batadv_backbone_gw *backbone_gw)
/* no local broadcasts should be sent or received, for now. */
if (!atomic_read(&backbone_gw->request_sent)) {
- atomic_inc(&backbone_gw->bat_priv->bla_num_requests);
+ atomic_inc(&backbone_gw->bat_priv->bla.num_requests);
atomic_set(&backbone_gw->request_sent, 1);
}
}
@@ -557,7 +559,7 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
batadv_dbg(BATADV_DBG_BLA, bat_priv,
"bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
mac, vid);
- hash_added = batadv_hash_add(bat_priv->claim_hash,
+ hash_added = batadv_hash_add(bat_priv->bla.claim_hash,
batadv_compare_claim,
batadv_choose_claim, claim,
&claim->hash_entry);
@@ -577,8 +579,7 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
"bla_add_claim(): changing ownership for %pM, vid %d\n",
mac, vid);
- claim->backbone_gw->crc ^=
- crc16(0, claim->addr, ETH_ALEN);
+ claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
batadv_backbone_gw_free_ref(claim->backbone_gw);
}
@@ -610,7 +611,7 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n",
mac, vid);
- batadv_hash_remove(bat_priv->claim_hash, batadv_compare_claim,
+ batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
batadv_choose_claim, claim);
batadv_claim_free_ref(claim); /* reference from the hash is gone */
@@ -657,7 +658,7 @@ static int batadv_handle_announce(struct batadv_priv *bat_priv,
* we can allow traffic again.
*/
if (atomic_read(&backbone_gw->request_sent)) {
- atomic_dec(&backbone_gw->bat_priv->bla_num_requests);
+ atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
atomic_set(&backbone_gw->request_sent, 0);
}
}
@@ -702,7 +703,7 @@ static int batadv_handle_unclaim(struct batadv_priv *bat_priv,
if (primary_if && batadv_compare_eth(backbone_addr,
primary_if->net_dev->dev_addr))
batadv_bla_send_claim(bat_priv, claim_addr, vid,
- BATADV_CLAIM_TYPE_DEL);
+ BATADV_CLAIM_TYPE_UNCLAIM);
backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid);
@@ -738,7 +739,7 @@ static int batadv_handle_claim(struct batadv_priv *bat_priv,
batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
batadv_bla_send_claim(bat_priv, claim_addr, vid,
- BATADV_CLAIM_TYPE_ADD);
+ BATADV_CLAIM_TYPE_CLAIM);
/* TODO: we could call something like tt_local_del() here. */
@@ -772,7 +773,7 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv,
struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
- bla_dst_own = &bat_priv->claim_dest;
+ bla_dst_own = &bat_priv->bla.claim_dest;
/* check if it is a claim packet in general */
if (memcmp(bla_dst->magic, bla_dst_own->magic,
@@ -783,12 +784,12 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv,
* otherwise assume it is in the hw_src
*/
switch (bla_dst->type) {
- case BATADV_CLAIM_TYPE_ADD:
+ case BATADV_CLAIM_TYPE_CLAIM:
backbone_addr = hw_src;
break;
case BATADV_CLAIM_TYPE_REQUEST:
case BATADV_CLAIM_TYPE_ANNOUNCE:
- case BATADV_CLAIM_TYPE_DEL:
+ case BATADV_CLAIM_TYPE_UNCLAIM:
backbone_addr = ethhdr->h_source;
break;
default:
@@ -904,12 +905,12 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
/* check for the different types of claim frames ... */
switch (bla_dst->type) {
- case BATADV_CLAIM_TYPE_ADD:
+ case BATADV_CLAIM_TYPE_CLAIM:
if (batadv_handle_claim(bat_priv, primary_if, hw_src,
ethhdr->h_source, vid))
return 1;
break;
- case BATADV_CLAIM_TYPE_DEL:
+ case BATADV_CLAIM_TYPE_UNCLAIM:
if (batadv_handle_unclaim(bat_priv, primary_if,
ethhdr->h_source, hw_src, vid))
return 1;
@@ -945,7 +946,7 @@ static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
spinlock_t *list_lock; /* protects write access to the hash lists */
int i;
- hash = bat_priv->backbone_hash;
+ hash = bat_priv->bla.backbone_hash;
if (!hash)
return;
@@ -969,7 +970,7 @@ static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
purge_now:
/* don't wait for the pending request anymore */
if (atomic_read(&backbone_gw->request_sent))
- atomic_dec(&bat_priv->bla_num_requests);
+ atomic_dec(&bat_priv->bla.num_requests);
batadv_bla_del_backbone_claims(backbone_gw);
@@ -999,7 +1000,7 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
struct batadv_hashtable *hash;
int i;
- hash = bat_priv->claim_hash;
+ hash = bat_priv->bla.claim_hash;
if (!hash)
return;
@@ -1046,11 +1047,12 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
struct hlist_node *node;
struct hlist_head *head;
struct batadv_hashtable *hash;
+ __be16 group;
int i;
/* reset bridge loop avoidance group id */
- bat_priv->claim_dest.group =
- htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
+ group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
+ bat_priv->bla.claim_dest.group = group;
if (!oldif) {
batadv_bla_purge_claims(bat_priv, NULL, 1);
@@ -1058,7 +1060,7 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
return;
}
- hash = bat_priv->backbone_hash;
+ hash = bat_priv->bla.backbone_hash;
if (!hash)
return;
@@ -1088,8 +1090,8 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
/* (re)start the timer */
static void batadv_bla_start_timer(struct batadv_priv *bat_priv)
{
- INIT_DELAYED_WORK(&bat_priv->bla_work, batadv_bla_periodic_work);
- queue_delayed_work(batadv_event_workqueue, &bat_priv->bla_work,
+ INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work);
+ queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
}
@@ -1099,9 +1101,9 @@ static void batadv_bla_start_timer(struct batadv_priv *bat_priv)
*/
static void batadv_bla_periodic_work(struct work_struct *work)
{
- struct delayed_work *delayed_work =
- container_of(work, struct delayed_work, work);
+ struct delayed_work *delayed_work;
struct batadv_priv *bat_priv;
+ struct batadv_priv_bla *priv_bla;
struct hlist_node *node;
struct hlist_head *head;
struct batadv_backbone_gw *backbone_gw;
@@ -1109,7 +1111,9 @@ static void batadv_bla_periodic_work(struct work_struct *work)
struct batadv_hard_iface *primary_if;
int i;
- bat_priv = container_of(delayed_work, struct batadv_priv, bla_work);
+ delayed_work = container_of(work, struct delayed_work, work);
+ priv_bla = container_of(delayed_work, struct batadv_priv_bla, work);
+ bat_priv = container_of(priv_bla, struct batadv_priv, bla);
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
@@ -1120,7 +1124,7 @@ static void batadv_bla_periodic_work(struct work_struct *work)
if (!atomic_read(&bat_priv->bridge_loop_avoidance))
goto out;
- hash = bat_priv->backbone_hash;
+ hash = bat_priv->bla.backbone_hash;
if (!hash)
goto out;
@@ -1160,40 +1164,41 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
int i;
uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
struct batadv_hard_iface *primary_if;
+ uint16_t crc;
+ unsigned long entrytime;
batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
/* setting claim destination address */
- memcpy(&bat_priv->claim_dest.magic, claim_dest, 3);
- bat_priv->claim_dest.type = 0;
+ memcpy(&bat_priv->bla.claim_dest.magic, claim_dest, 3);
+ bat_priv->bla.claim_dest.type = 0;
primary_if = batadv_primary_if_get_selected(bat_priv);
if (primary_if) {
- bat_priv->claim_dest.group =
- htons(crc16(0, primary_if->net_dev->dev_addr,
- ETH_ALEN));
+ crc = crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN);
+ bat_priv->bla.claim_dest.group = htons(crc);
batadv_hardif_free_ref(primary_if);
} else {
- bat_priv->claim_dest.group = 0; /* will be set later */
+ bat_priv->bla.claim_dest.group = 0; /* will be set later */
}
/* initialize the duplicate list */
+ entrytime = jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
for (i = 0; i < BATADV_DUPLIST_SIZE; i++)
- bat_priv->bcast_duplist[i].entrytime =
- jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
- bat_priv->bcast_duplist_curr = 0;
+ bat_priv->bla.bcast_duplist[i].entrytime = entrytime;
+ bat_priv->bla.bcast_duplist_curr = 0;
- if (bat_priv->claim_hash)
+ if (bat_priv->bla.claim_hash)
return 0;
- bat_priv->claim_hash = batadv_hash_new(128);
- bat_priv->backbone_hash = batadv_hash_new(32);
+ bat_priv->bla.claim_hash = batadv_hash_new(128);
+ bat_priv->bla.backbone_hash = batadv_hash_new(32);
- if (!bat_priv->claim_hash || !bat_priv->backbone_hash)
+ if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash)
return -ENOMEM;
- batadv_hash_set_lock_class(bat_priv->claim_hash,
+ batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
&batadv_claim_hash_lock_class_key);
- batadv_hash_set_lock_class(bat_priv->backbone_hash,
+ batadv_hash_set_lock_class(bat_priv->bla.backbone_hash,
&batadv_backbone_hash_lock_class_key);
batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
@@ -1234,8 +1239,9 @@ int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
crc = crc16(0, content, length);
for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
- curr = (bat_priv->bcast_duplist_curr + i) % BATADV_DUPLIST_SIZE;
- entry = &bat_priv->bcast_duplist[curr];
+ curr = (bat_priv->bla.bcast_duplist_curr + i);
+ curr %= BATADV_DUPLIST_SIZE;
+ entry = &bat_priv->bla.bcast_duplist[curr];
/* we can stop searching if the entry is too old ;
* later entries will be even older
@@ -1256,13 +1262,13 @@ int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
return 1;
}
/* not found, add a new entry (overwrite the oldest entry) */
- curr = (bat_priv->bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
+ curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
curr %= BATADV_DUPLIST_SIZE;
- entry = &bat_priv->bcast_duplist[curr];
+ entry = &bat_priv->bla.bcast_duplist[curr];
entry->crc = crc;
entry->entrytime = jiffies;
memcpy(entry->orig, bcast_packet->orig, ETH_ALEN);
- bat_priv->bcast_duplist_curr = curr;
+ bat_priv->bla.bcast_duplist_curr = curr;
/* allow it, its the first occurence. */
return 0;
@@ -1279,7 +1285,7 @@ int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
*/
int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig)
{
- struct batadv_hashtable *hash = bat_priv->backbone_hash;
+ struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
struct hlist_head *head;
struct hlist_node *node;
struct batadv_backbone_gw *backbone_gw;
@@ -1339,8 +1345,7 @@ int batadv_bla_is_backbone_gw(struct sk_buff *skb,
if (!pskb_may_pull(skb, hdr_size + sizeof(struct vlan_ethhdr)))
return 0;
- vhdr = (struct vlan_ethhdr *)(((uint8_t *)skb->data) +
- hdr_size);
+ vhdr = (struct vlan_ethhdr *)(skb->data + hdr_size);
vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
}
@@ -1359,18 +1364,18 @@ void batadv_bla_free(struct batadv_priv *bat_priv)
{
struct batadv_hard_iface *primary_if;
- cancel_delayed_work_sync(&bat_priv->bla_work);
+ cancel_delayed_work_sync(&bat_priv->bla.work);
primary_if = batadv_primary_if_get_selected(bat_priv);
- if (bat_priv->claim_hash) {
+ if (bat_priv->bla.claim_hash) {
batadv_bla_purge_claims(bat_priv, primary_if, 1);
- batadv_hash_destroy(bat_priv->claim_hash);
- bat_priv->claim_hash = NULL;
+ batadv_hash_destroy(bat_priv->bla.claim_hash);
+ bat_priv->bla.claim_hash = NULL;
}
- if (bat_priv->backbone_hash) {
+ if (bat_priv->bla.backbone_hash) {
batadv_bla_purge_backbone_gw(bat_priv, 1);
- batadv_hash_destroy(bat_priv->backbone_hash);
- bat_priv->backbone_hash = NULL;
+ batadv_hash_destroy(bat_priv->bla.backbone_hash);
+ bat_priv->bla.backbone_hash = NULL;
}
if (primary_if)
batadv_hardif_free_ref(primary_if);
@@ -1409,7 +1414,7 @@ int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid,
goto allow;
- if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
+ if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
/* don't allow broadcasts while requests are in flight */
if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
goto handled;
@@ -1508,7 +1513,7 @@ int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid)
ethhdr = (struct ethhdr *)skb_mac_header(skb);
- if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
+ if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
/* don't allow broadcasts while requests are in flight */
if (is_multicast_ether_addr(ethhdr->h_dest))
goto handled;
@@ -1564,7 +1569,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
{
struct net_device *net_dev = (struct net_device *)seq->private;
struct batadv_priv *bat_priv = netdev_priv(net_dev);
- struct batadv_hashtable *hash = bat_priv->claim_hash;
+ struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
struct batadv_claim *claim;
struct batadv_hard_iface *primary_if;
struct hlist_node *node;
@@ -1593,7 +1598,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
seq_printf(seq,
"Claims announced for the mesh %s (orig %pM, group id %04x)\n",
net_dev->name, primary_addr,
- ntohs(bat_priv->claim_dest.group));
+ ntohs(bat_priv->bla.claim_dest.group));
seq_printf(seq, " %-17s %-5s %-17s [o] (%-4s)\n",
"Client", "VID", "Originator", "CRC");
for (i = 0; i < hash->size; i++) {
@@ -1616,3 +1621,68 @@ out:
batadv_hardif_free_ref(primary_if);
return ret;
}
+
+int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
+{
+ struct net_device *net_dev = (struct net_device *)seq->private;
+ struct batadv_priv *bat_priv = netdev_priv(net_dev);
+ struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
+ struct batadv_backbone_gw *backbone_gw;
+ struct batadv_hard_iface *primary_if;
+ struct hlist_node *node;
+ struct hlist_head *head;
+ int secs, msecs;
+ uint32_t i;
+ bool is_own;
+ int ret = 0;
+ uint8_t *primary_addr;
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if) {
+ ret = seq_printf(seq,
+ "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
+ net_dev->name);
+ goto out;
+ }
+
+ if (primary_if->if_status != BATADV_IF_ACTIVE) {
+ ret = seq_printf(seq,
+ "BATMAN mesh %s disabled - primary interface not active\n",
+ net_dev->name);
+ goto out;
+ }
+
+ primary_addr = primary_if->net_dev->dev_addr;
+ seq_printf(seq,
+ "Backbones announced for the mesh %s (orig %pM, group id %04x)\n",
+ net_dev->name, primary_addr,
+ ntohs(bat_priv->bla.claim_dest.group));
+ seq_printf(seq, " %-17s %-5s %-9s (%-4s)\n",
+ "Originator", "VID", "last seen", "CRC");
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
+ msecs = jiffies_to_msecs(jiffies -
+ backbone_gw->lasttime);
+ secs = msecs / 1000;
+ msecs = msecs % 1000;
+
+ is_own = batadv_compare_eth(backbone_gw->orig,
+ primary_addr);
+ if (is_own)
+ continue;
+
+ seq_printf(seq,
+ " * %pM on % 5d % 4i.%03is (%04x)\n",
+ backbone_gw->orig, backbone_gw->vid,
+ secs, msecs, backbone_gw->crc);
+ }
+ rcu_read_unlock();
+ }
+out:
+ if (primary_if)
+ batadv_hardif_free_ref(primary_if);
+ return ret;
+}
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
index 563cfbf94a7..789cb73bde6 100644
--- a/net/batman-adv/bridge_loop_avoidance.h
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -27,6 +27,8 @@ int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid);
int batadv_bla_is_backbone_gw(struct sk_buff *skb,
struct batadv_orig_node *orig_node, int hdr_size);
int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset);
+int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq,
+ void *offset);
int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig);
int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
struct batadv_bcast_packet *bcast_packet,
@@ -41,8 +43,7 @@ void batadv_bla_free(struct batadv_priv *bat_priv);
#else /* ifdef CONFIG_BATMAN_ADV_BLA */
static inline int batadv_bla_rx(struct batadv_priv *bat_priv,
- struct sk_buff *skb, short vid,
- bool is_bcast)
+ struct sk_buff *skb, short vid, bool is_bcast)
{
return 0;
}
@@ -66,6 +67,12 @@ static inline int batadv_bla_claim_table_seq_print_text(struct seq_file *seq,
return 0;
}
+static inline int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq,
+ void *offset)
+{
+ return 0;
+}
+
static inline int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv,
uint8_t *orig)
{
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
index 34fbb1667bc..391d4fb2026 100644
--- a/net/batman-adv/debugfs.c
+++ b/net/batman-adv/debugfs.c
@@ -267,6 +267,15 @@ static int batadv_bla_claim_table_open(struct inode *inode, struct file *file)
return single_open(file, batadv_bla_claim_table_seq_print_text,
net_dev);
}
+
+static int batadv_bla_backbone_table_open(struct inode *inode,
+ struct file *file)
+{
+ struct net_device *net_dev = (struct net_device *)inode->i_private;
+ return single_open(file, batadv_bla_backbone_table_seq_print_text,
+ net_dev);
+}
+
#endif
static int batadv_transtable_local_open(struct inode *inode, struct file *file)
@@ -305,6 +314,8 @@ static BATADV_DEBUGINFO(transtable_global, S_IRUGO,
batadv_transtable_global_open);
#ifdef CONFIG_BATMAN_ADV_BLA
static BATADV_DEBUGINFO(bla_claim_table, S_IRUGO, batadv_bla_claim_table_open);
+static BATADV_DEBUGINFO(bla_backbone_table, S_IRUGO,
+ batadv_bla_backbone_table_open);
#endif
static BATADV_DEBUGINFO(transtable_local, S_IRUGO,
batadv_transtable_local_open);
@@ -316,6 +327,7 @@ static struct batadv_debuginfo *batadv_mesh_debuginfos[] = {
&batadv_debuginfo_transtable_global,
#ifdef CONFIG_BATMAN_ADV_BLA
&batadv_debuginfo_bla_claim_table,
+ &batadv_debuginfo_bla_backbone_table,
#endif
&batadv_debuginfo_transtable_local,
&batadv_debuginfo_vis_data,
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index fc866f2e452..15d67abc10a 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -48,7 +48,7 @@ batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv)
struct batadv_gw_node *gw_node;
rcu_read_lock();
- gw_node = rcu_dereference(bat_priv->curr_gw);
+ gw_node = rcu_dereference(bat_priv->gw.curr_gw);
if (!gw_node)
goto out;
@@ -91,23 +91,23 @@ static void batadv_gw_select(struct batadv_priv *bat_priv,
{
struct batadv_gw_node *curr_gw_node;
- spin_lock_bh(&bat_priv->gw_list_lock);
+ spin_lock_bh(&bat_priv->gw.list_lock);
if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount))
new_gw_node = NULL;
- curr_gw_node = rcu_dereference_protected(bat_priv->curr_gw, 1);
- rcu_assign_pointer(bat_priv->curr_gw, new_gw_node);
+ curr_gw_node = rcu_dereference_protected(bat_priv->gw.curr_gw, 1);
+ rcu_assign_pointer(bat_priv->gw.curr_gw, new_gw_node);
if (curr_gw_node)
batadv_gw_node_free_ref(curr_gw_node);
- spin_unlock_bh(&bat_priv->gw_list_lock);
+ spin_unlock_bh(&bat_priv->gw.list_lock);
}
void batadv_gw_deselect(struct batadv_priv *bat_priv)
{
- atomic_set(&bat_priv->gw_reselect, 1);
+ atomic_set(&bat_priv->gw.reselect, 1);
}
static struct batadv_gw_node *
@@ -117,12 +117,17 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
struct hlist_node *node;
struct batadv_gw_node *gw_node, *curr_gw = NULL;
uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
+ uint32_t gw_divisor;
uint8_t max_tq = 0;
int down, up;
+ uint8_t tq_avg;
struct batadv_orig_node *orig_node;
+ gw_divisor = BATADV_TQ_LOCAL_WINDOW_SIZE * BATADV_TQ_LOCAL_WINDOW_SIZE;
+ gw_divisor *= 64;
+
rcu_read_lock();
- hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
+ hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) {
if (gw_node->deleted)
continue;
@@ -134,19 +139,19 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
if (!atomic_inc_not_zero(&gw_node->refcount))
goto next;
+ tq_avg = router->tq_avg;
+
switch (atomic_read(&bat_priv->gw_sel_class)) {
case 1: /* fast connection */
batadv_gw_bandwidth_to_kbit(orig_node->gw_flags,
&down, &up);
- tmp_gw_factor = (router->tq_avg * router->tq_avg *
- down * 100 * 100) /
- (BATADV_TQ_LOCAL_WINDOW_SIZE *
- BATADV_TQ_LOCAL_WINDOW_SIZE * 64);
+ tmp_gw_factor = tq_avg * tq_avg * down * 100 * 100;
+ tmp_gw_factor /= gw_divisor;
if ((tmp_gw_factor > max_gw_factor) ||
((tmp_gw_factor == max_gw_factor) &&
- (router->tq_avg > max_tq))) {
+ (tq_avg > max_tq))) {
if (curr_gw)
batadv_gw_node_free_ref(curr_gw);
curr_gw = gw_node;
@@ -161,7 +166,7 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
* soon as a better gateway appears which has
* $routing_class more tq points)
*/
- if (router->tq_avg > max_tq) {
+ if (tq_avg > max_tq) {
if (curr_gw)
batadv_gw_node_free_ref(curr_gw);
curr_gw = gw_node;
@@ -170,8 +175,8 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
break;
}
- if (router->tq_avg > max_tq)
- max_tq = router->tq_avg;
+ if (tq_avg > max_tq)
+ max_tq = tq_avg;
if (tmp_gw_factor > max_gw_factor)
max_gw_factor = tmp_gw_factor;
@@ -202,7 +207,7 @@ void batadv_gw_election(struct batadv_priv *bat_priv)
curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
- if (!batadv_atomic_dec_not_zero(&bat_priv->gw_reselect) && curr_gw)
+ if (!batadv_atomic_dec_not_zero(&bat_priv->gw.reselect) && curr_gw)
goto out;
next_gw = batadv_gw_get_best_gw_node(bat_priv);
@@ -321,9 +326,9 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
gw_node->orig_node = orig_node;
atomic_set(&gw_node->refcount, 1);
- spin_lock_bh(&bat_priv->gw_list_lock);
- hlist_add_head_rcu(&gw_node->list, &bat_priv->gw_list);
- spin_unlock_bh(&bat_priv->gw_list_lock);
+ spin_lock_bh(&bat_priv->gw.list_lock);
+ hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.list);
+ spin_unlock_bh(&bat_priv->gw.list_lock);
batadv_gw_bandwidth_to_kbit(new_gwflags, &down, &up);
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
@@ -350,7 +355,7 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
rcu_read_lock();
- hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
+ hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) {
if (gw_node->orig_node != orig_node)
continue;
@@ -404,10 +409,10 @@ void batadv_gw_node_purge(struct batadv_priv *bat_priv)
curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
- spin_lock_bh(&bat_priv->gw_list_lock);
+ spin_lock_bh(&bat_priv->gw.list_lock);
hlist_for_each_entry_safe(gw_node, node, node_tmp,
- &bat_priv->gw_list, list) {
+ &bat_priv->gw.list, list) {
if (((!gw_node->deleted) ||
(time_before(jiffies, gw_node->deleted + timeout))) &&
atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE)
@@ -420,7 +425,7 @@ void batadv_gw_node_purge(struct batadv_priv *bat_priv)
batadv_gw_node_free_ref(gw_node);
}
- spin_unlock_bh(&bat_priv->gw_list_lock);
+ spin_unlock_bh(&bat_priv->gw.list_lock);
/* gw_deselect() needs to acquire the gw_list_lock */
if (do_deselect)
@@ -496,7 +501,7 @@ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
primary_if->net_dev->dev_addr, net_dev->name);
rcu_read_lock();
- hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
+ hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) {
if (gw_node->deleted)
continue;
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 282bf6e9353..d112fd6750b 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -103,13 +103,14 @@ static void batadv_primary_if_update_addr(struct batadv_priv *bat_priv,
{
struct batadv_vis_packet *vis_packet;
struct batadv_hard_iface *primary_if;
+ struct sk_buff *skb;
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
- vis_packet = (struct batadv_vis_packet *)
- bat_priv->my_vis_info->skb_packet->data;
+ skb = bat_priv->vis.my_info->skb_packet;
+ vis_packet = (struct batadv_vis_packet *)skb->data;
memcpy(vis_packet->vis_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
memcpy(vis_packet->sender_orig,
primary_if->net_dev->dev_addr, ETH_ALEN);
@@ -313,7 +314,13 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
hard_iface->if_num = bat_priv->num_ifaces;
bat_priv->num_ifaces++;
hard_iface->if_status = BATADV_IF_INACTIVE;
- batadv_orig_hash_add_if(hard_iface, bat_priv->num_ifaces);
+ ret = batadv_orig_hash_add_if(hard_iface, bat_priv->num_ifaces);
+ if (ret < 0) {
+ bat_priv->bat_algo_ops->bat_iface_disable(hard_iface);
+ bat_priv->num_ifaces--;
+ hard_iface->if_status = BATADV_IF_NOT_IN_USE;
+ goto err_dev;
+ }
hard_iface->batman_adv_ptype.type = ethertype;
hard_iface->batman_adv_ptype.func = batadv_batman_skb_recv;
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 13c88b25ab3..b4aa470bc4a 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -58,9 +58,6 @@ static int __init batadv_init(void)
batadv_iv_init();
- /* the name should not be longer than 10 chars - see
- * http://lwn.net/Articles/23634/
- */
batadv_event_workqueue = create_singlethread_workqueue("bat_events");
if (!batadv_event_workqueue)
@@ -97,20 +94,20 @@ int batadv_mesh_init(struct net_device *soft_iface)
spin_lock_init(&bat_priv->forw_bat_list_lock);
spin_lock_init(&bat_priv->forw_bcast_list_lock);
- spin_lock_init(&bat_priv->tt_changes_list_lock);
- spin_lock_init(&bat_priv->tt_req_list_lock);
- spin_lock_init(&bat_priv->tt_roam_list_lock);
- spin_lock_init(&bat_priv->tt_buff_lock);
- spin_lock_init(&bat_priv->gw_list_lock);
- spin_lock_init(&bat_priv->vis_hash_lock);
- spin_lock_init(&bat_priv->vis_list_lock);
+ spin_lock_init(&bat_priv->tt.changes_list_lock);
+ spin_lock_init(&bat_priv->tt.req_list_lock);
+ spin_lock_init(&bat_priv->tt.roam_list_lock);
+ spin_lock_init(&bat_priv->tt.last_changeset_lock);
+ spin_lock_init(&bat_priv->gw.list_lock);
+ spin_lock_init(&bat_priv->vis.hash_lock);
+ spin_lock_init(&bat_priv->vis.list_lock);
INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
- INIT_HLIST_HEAD(&bat_priv->gw_list);
- INIT_LIST_HEAD(&bat_priv->tt_changes_list);
- INIT_LIST_HEAD(&bat_priv->tt_req_list);
- INIT_LIST_HEAD(&bat_priv->tt_roam_list);
+ INIT_HLIST_HEAD(&bat_priv->gw.list);
+ INIT_LIST_HEAD(&bat_priv->tt.changes_list);
+ INIT_LIST_HEAD(&bat_priv->tt.req_list);
+ INIT_LIST_HEAD(&bat_priv->tt.roam_list);
ret = batadv_originator_init(bat_priv);
if (ret < 0)
@@ -131,7 +128,7 @@ int batadv_mesh_init(struct net_device *soft_iface)
if (ret < 0)
goto err;
- atomic_set(&bat_priv->gw_reselect, 0);
+ atomic_set(&bat_priv->gw.reselect, 0);
atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
return 0;
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 5d8fa075794..d57b746219d 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -26,7 +26,7 @@
#define BATADV_DRIVER_DEVICE "batman-adv"
#ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2012.3.0"
+#define BATADV_SOURCE_VERSION "2012.4.0"
#endif
/* B.A.T.M.A.N. parameters */
@@ -41,13 +41,14 @@
* -> TODO: check influence on BATADV_TQ_LOCAL_WINDOW_SIZE
*/
#define BATADV_PURGE_TIMEOUT 200000 /* 200 seconds */
-#define BATADV_TT_LOCAL_TIMEOUT 3600000 /* in miliseconds */
-#define BATADV_TT_CLIENT_ROAM_TIMEOUT 600000 /* in miliseconds */
+#define BATADV_TT_LOCAL_TIMEOUT 3600000 /* in milliseconds */
+#define BATADV_TT_CLIENT_ROAM_TIMEOUT 600000 /* in milliseconds */
+#define BATADV_TT_CLIENT_TEMP_TIMEOUT 600000 /* in milliseconds */
/* sliding packet range of received originator messages in sequence numbers
* (should be a multiple of our word size)
*/
#define BATADV_TQ_LOCAL_WINDOW_SIZE 64
-/* miliseconds we have to keep pending tt_req */
+/* milliseconds we have to keep pending tt_req */
#define BATADV_TT_REQUEST_TIMEOUT 3000
#define BATADV_TQ_GLOBAL_WINDOW_SIZE 5
@@ -59,7 +60,7 @@
#define BATADV_TT_OGM_APPEND_MAX 3
/* Time in which a client can roam at most ROAMING_MAX_COUNT times in
- * miliseconds
+ * milliseconds
*/
#define BATADV_ROAMING_MAX_TIME 20000
#define BATADV_ROAMING_MAX_COUNT 5
@@ -123,15 +124,6 @@ enum batadv_uev_type {
/* Append 'batman-adv: ' before kernel messages */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-/* all messages related to routing / flooding / broadcasting / etc */
-enum batadv_dbg_level {
- BATADV_DBG_BATMAN = 1 << 0,
- BATADV_DBG_ROUTES = 1 << 1, /* route added / changed / deleted */
- BATADV_DBG_TT = 1 << 2, /* translation table operations */
- BATADV_DBG_BLA = 1 << 3, /* bridge loop avoidance */
- BATADV_DBG_ALL = 15,
-};
-
/* Kernel headers */
#include <linux/mutex.h> /* mutex */
@@ -173,6 +165,15 @@ int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops);
int batadv_algo_select(struct batadv_priv *bat_priv, char *name);
int batadv_algo_seq_print_text(struct seq_file *seq, void *offset);
+/* all messages related to routing / flooding / broadcasting / etc */
+enum batadv_dbg_level {
+ BATADV_DBG_BATMAN = BIT(0),
+ BATADV_DBG_ROUTES = BIT(1), /* route added / changed / deleted */
+ BATADV_DBG_TT = BIT(2), /* translation table operations */
+ BATADV_DBG_BLA = BIT(3), /* bridge loop avoidance */
+ BATADV_DBG_ALL = 15,
+};
+
#ifdef CONFIG_BATMAN_ADV_DEBUG
int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
__printf(2, 3);
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index 8d3e55a96ad..2d23a14c220 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -37,10 +37,10 @@ enum batadv_packettype {
#define BATADV_COMPAT_VERSION 14
enum batadv_iv_flags {
- BATADV_NOT_BEST_NEXT_HOP = 1 << 3,
- BATADV_PRIMARIES_FIRST_HOP = 1 << 4,
- BATADV_VIS_SERVER = 1 << 5,
- BATADV_DIRECTLINK = 1 << 6,
+ BATADV_NOT_BEST_NEXT_HOP = BIT(3),
+ BATADV_PRIMARIES_FIRST_HOP = BIT(4),
+ BATADV_VIS_SERVER = BIT(5),
+ BATADV_DIRECTLINK = BIT(6),
};
/* ICMP message types */
@@ -60,8 +60,8 @@ enum batadv_vis_packettype {
/* fragmentation defines */
enum batadv_unicast_frag_flags {
- BATADV_UNI_FRAG_HEAD = 1 << 0,
- BATADV_UNI_FRAG_LARGETAIL = 1 << 1,
+ BATADV_UNI_FRAG_HEAD = BIT(0),
+ BATADV_UNI_FRAG_LARGETAIL = BIT(1),
};
/* TT_QUERY subtypes */
@@ -74,26 +74,27 @@ enum batadv_tt_query_packettype {
/* TT_QUERY flags */
enum batadv_tt_query_flags {
- BATADV_TT_FULL_TABLE = 1 << 2,
+ BATADV_TT_FULL_TABLE = BIT(2),
};
/* BATADV_TT_CLIENT flags.
- * Flags from 1 to 1 << 7 are sent on the wire, while flags from 1 << 8 to
- * 1 << 15 are used for local computation only
+ * Flags from BIT(0) to BIT(7) are sent on the wire, while flags from BIT(8) to
+ * BIT(15) are used for local computation only
*/
enum batadv_tt_client_flags {
- BATADV_TT_CLIENT_DEL = 1 << 0,
- BATADV_TT_CLIENT_ROAM = 1 << 1,
- BATADV_TT_CLIENT_WIFI = 1 << 2,
- BATADV_TT_CLIENT_NOPURGE = 1 << 8,
- BATADV_TT_CLIENT_NEW = 1 << 9,
- BATADV_TT_CLIENT_PENDING = 1 << 10,
+ BATADV_TT_CLIENT_DEL = BIT(0),
+ BATADV_TT_CLIENT_ROAM = BIT(1),
+ BATADV_TT_CLIENT_WIFI = BIT(2),
+ BATADV_TT_CLIENT_TEMP = BIT(3),
+ BATADV_TT_CLIENT_NOPURGE = BIT(8),
+ BATADV_TT_CLIENT_NEW = BIT(9),
+ BATADV_TT_CLIENT_PENDING = BIT(10),
};
/* claim frame types for the bridge loop avoidance */
enum batadv_bla_claimframe {
- BATADV_CLAIM_TYPE_ADD = 0x00,
- BATADV_CLAIM_TYPE_DEL = 0x01,
+ BATADV_CLAIM_TYPE_CLAIM = 0x00,
+ BATADV_CLAIM_TYPE_UNCLAIM = 0x01,
BATADV_CLAIM_TYPE_ANNOUNCE = 0x02,
BATADV_CLAIM_TYPE_REQUEST = 0x03,
};
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index bc2b88bbea1..939fc01371d 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -579,32 +579,45 @@ batadv_find_ifalter_router(struct batadv_orig_node *primary_orig,
return router;
}
-int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
+static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size)
{
- struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
- struct batadv_tt_query_packet *tt_query;
- uint16_t tt_size;
struct ethhdr *ethhdr;
- char tt_flag;
- size_t packet_size;
/* drop packet if it has not necessary minimum size */
- if (unlikely(!pskb_may_pull(skb,
- sizeof(struct batadv_tt_query_packet))))
- goto out;
-
- /* I could need to modify it */
- if (skb_cow(skb, sizeof(struct batadv_tt_query_packet)) < 0)
- goto out;
+ if (unlikely(!pskb_may_pull(skb, hdr_size)))
+ return -1;
ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* packet with unicast indication but broadcast recipient */
if (is_broadcast_ether_addr(ethhdr->h_dest))
- goto out;
+ return -1;
/* packet with broadcast sender address */
if (is_broadcast_ether_addr(ethhdr->h_source))
+ return -1;
+
+ /* not for me */
+ if (!batadv_is_my_mac(ethhdr->h_dest))
+ return -1;
+
+ return 0;
+}
+
+int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
+{
+ struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+ struct batadv_tt_query_packet *tt_query;
+ uint16_t tt_size;
+ int hdr_size = sizeof(*tt_query);
+ char tt_flag;
+ size_t packet_size;
+
+ if (batadv_check_unicast_packet(skb, hdr_size) < 0)
+ return NET_RX_DROP;
+
+ /* I could need to modify it */
+ if (skb_cow(skb, sizeof(struct batadv_tt_query_packet)) < 0)
goto out;
tt_query = (struct batadv_tt_query_packet *)skb->data;
@@ -721,7 +734,7 @@ int batadv_recv_roam_adv(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
* been incremented yet. This flag will make me check all the incoming
* packets for the correct destination.
*/
- bat_priv->tt_poss_change = true;
+ bat_priv->tt.poss_change = true;
batadv_orig_node_free_ref(orig_node);
out:
@@ -819,31 +832,6 @@ err:
return NULL;
}
-static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size)
-{
- struct ethhdr *ethhdr;
-
- /* drop packet if it has not necessary minimum size */
- if (unlikely(!pskb_may_pull(skb, hdr_size)))
- return -1;
-
- ethhdr = (struct ethhdr *)skb_mac_header(skb);
-
- /* packet with unicast indication but broadcast recipient */
- if (is_broadcast_ether_addr(ethhdr->h_dest))
- return -1;
-
- /* packet with broadcast sender address */
- if (is_broadcast_ether_addr(ethhdr->h_source))
- return -1;
-
- /* not for me */
- if (!batadv_is_my_mac(ethhdr->h_dest))
- return -1;
-
- return 0;
-}
-
static int batadv_route_unicast_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if)
{
@@ -947,8 +935,8 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
unicast_packet = (struct batadv_unicast_packet *)skb->data;
if (batadv_is_my_mac(unicast_packet->dest)) {
- tt_poss_change = bat_priv->tt_poss_change;
- curr_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
+ tt_poss_change = bat_priv->tt.poss_change;
+ curr_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
} else {
orig_node = batadv_orig_hash_find(bat_priv,
unicast_packet->dest);
@@ -993,8 +981,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
} else {
memcpy(unicast_packet->dest, orig_node->orig,
ETH_ALEN);
- curr_ttvn = (uint8_t)
- atomic_read(&orig_node->last_ttvn);
+ curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
batadv_orig_node_free_ref(orig_node);
}
@@ -1025,8 +1012,9 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
/* packet for me */
if (batadv_is_my_mac(unicast_packet->dest)) {
- batadv_interface_rx(recv_if->soft_iface, skb, recv_if,
- hdr_size);
+ batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size,
+ NULL);
+
return NET_RX_SUCCESS;
}
@@ -1063,7 +1051,7 @@ int batadv_recv_ucast_frag_packet(struct sk_buff *skb,
return NET_RX_SUCCESS;
batadv_interface_rx(recv_if->soft_iface, new_skb, recv_if,
- sizeof(struct batadv_unicast_packet));
+ sizeof(struct batadv_unicast_packet), NULL);
return NET_RX_SUCCESS;
}
@@ -1150,7 +1138,8 @@ int batadv_recv_bcast_packet(struct sk_buff *skb,
goto out;
/* broadcast for me */
- batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
+ batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size,
+ orig_node);
ret = NET_RX_SUCCESS;
goto out;
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 3b4b2daa3b3..570a8bce036 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -190,13 +190,13 @@ out:
static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
{
struct batadv_hard_iface *hard_iface;
- struct delayed_work *delayed_work =
- container_of(work, struct delayed_work, work);
+ struct delayed_work *delayed_work;
struct batadv_forw_packet *forw_packet;
struct sk_buff *skb1;
struct net_device *soft_iface;
struct batadv_priv *bat_priv;
+ delayed_work = container_of(work, struct delayed_work, work);
forw_packet = container_of(delayed_work, struct batadv_forw_packet,
delayed_work);
soft_iface = forw_packet->if_incoming->soft_iface;
@@ -239,11 +239,11 @@ out:
void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
{
- struct delayed_work *delayed_work =
- container_of(work, struct delayed_work, work);
+ struct delayed_work *delayed_work;
struct batadv_forw_packet *forw_packet;
struct batadv_priv *bat_priv;
+ delayed_work = container_of(work, struct delayed_work, work);
forw_packet = container_of(delayed_work, struct batadv_forw_packet,
delayed_work);
bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 109ea2aae96..7b683e0bd66 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -93,7 +93,14 @@ static int batadv_interface_release(struct net_device *dev)
static struct net_device_stats *batadv_interface_stats(struct net_device *dev)
{
struct batadv_priv *bat_priv = netdev_priv(dev);
- return &bat_priv->stats;
+ struct net_device_stats *stats = &bat_priv->stats;
+
+ stats->tx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_TX);
+ stats->tx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_TX_BYTES);
+ stats->tx_dropped = batadv_sum_counter(bat_priv, BATADV_CNT_TX_DROPPED);
+ stats->rx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_RX);
+ stats->rx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_RX_BYTES);
+ return stats;
}
static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
@@ -142,6 +149,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
int data_len = skb->len, ret;
short vid __maybe_unused = -1;
bool do_bcast = false;
+ uint32_t seqno;
if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
goto dropped;
@@ -223,8 +231,8 @@ static int batadv_interface_tx(struct sk_buff *skb,
primary_if->net_dev->dev_addr, ETH_ALEN);
/* set broadcast sequence number */
- bcast_packet->seqno =
- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
+ seqno = atomic_inc_return(&bat_priv->bcast_seqno);
+ bcast_packet->seqno = htonl(seqno);
batadv_add_bcast_packet_to_list(bat_priv, skb, 1);
@@ -246,14 +254,14 @@ static int batadv_interface_tx(struct sk_buff *skb,
goto dropped_freed;
}
- bat_priv->stats.tx_packets++;
- bat_priv->stats.tx_bytes += data_len;
+ batadv_inc_counter(bat_priv, BATADV_CNT_TX);
+ batadv_add_counter(bat_priv, BATADV_CNT_TX_BYTES, data_len);
goto end;
dropped:
kfree_skb(skb);
dropped_freed:
- bat_priv->stats.tx_dropped++;
+ batadv_inc_counter(bat_priv, BATADV_CNT_TX_DROPPED);
end:
if (primary_if)
batadv_hardif_free_ref(primary_if);
@@ -262,7 +270,7 @@ end:
void batadv_interface_rx(struct net_device *soft_iface,
struct sk_buff *skb, struct batadv_hard_iface *recv_if,
- int hdr_size)
+ int hdr_size, struct batadv_orig_node *orig_node)
{
struct batadv_priv *bat_priv = netdev_priv(soft_iface);
struct ethhdr *ethhdr;
@@ -308,11 +316,16 @@ void batadv_interface_rx(struct net_device *soft_iface,
/* skb->ip_summed = CHECKSUM_UNNECESSARY; */
- bat_priv->stats.rx_packets++;
- bat_priv->stats.rx_bytes += skb->len + ETH_HLEN;
+ batadv_inc_counter(bat_priv, BATADV_CNT_RX);
+ batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
+ skb->len + ETH_HLEN);
soft_iface->last_rx = jiffies;
+ if (orig_node)
+ batadv_tt_add_temporary_global_entry(bat_priv, orig_node,
+ ethhdr->h_source);
+
if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
goto dropped;
@@ -379,15 +392,22 @@ struct net_device *batadv_softif_create(const char *name)
if (!soft_iface)
goto out;
+ bat_priv = netdev_priv(soft_iface);
+
+ /* batadv_interface_stats() needs to be available as soon as
+ * register_netdevice() has been called
+ */
+ bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(uint64_t));
+ if (!bat_priv->bat_counters)
+ goto free_soft_iface;
+
ret = register_netdevice(soft_iface);
if (ret < 0) {
pr_err("Unable to register the batman interface '%s': %i\n",
name, ret);
- goto free_soft_iface;
+ goto free_bat_counters;
}
- bat_priv = netdev_priv(soft_iface);
-
atomic_set(&bat_priv->aggregated_ogms, 1);
atomic_set(&bat_priv->bonding, 0);
atomic_set(&bat_priv->bridge_loop_avoidance, 0);
@@ -405,29 +425,26 @@ struct net_device *batadv_softif_create(const char *name)
atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
atomic_set(&bat_priv->bcast_seqno, 1);
- atomic_set(&bat_priv->ttvn, 0);
- atomic_set(&bat_priv->tt_local_changes, 0);
- atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
- atomic_set(&bat_priv->bla_num_requests, 0);
-
- bat_priv->tt_buff = NULL;
- bat_priv->tt_buff_len = 0;
- bat_priv->tt_poss_change = false;
+ atomic_set(&bat_priv->tt.vn, 0);
+ atomic_set(&bat_priv->tt.local_changes, 0);
+ atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
+#ifdef CONFIG_BATMAN_ADV_BLA
+ atomic_set(&bat_priv->bla.num_requests, 0);
+#endif
+ bat_priv->tt.last_changeset = NULL;
+ bat_priv->tt.last_changeset_len = 0;
+ bat_priv->tt.poss_change = false;
bat_priv->primary_if = NULL;
bat_priv->num_ifaces = 0;
- bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(uint64_t));
- if (!bat_priv->bat_counters)
- goto unreg_soft_iface;
-
ret = batadv_algo_select(bat_priv, batadv_routing_algo);
if (ret < 0)
- goto free_bat_counters;
+ goto unreg_soft_iface;
ret = batadv_sysfs_add_meshif(soft_iface);
if (ret < 0)
- goto free_bat_counters;
+ goto unreg_soft_iface;
ret = batadv_debugfs_add_meshif(soft_iface);
if (ret < 0)
@@ -443,12 +460,13 @@ unreg_debugfs:
batadv_debugfs_del_meshif(soft_iface);
unreg_sysfs:
batadv_sysfs_del_meshif(soft_iface);
-free_bat_counters:
- free_percpu(bat_priv->bat_counters);
unreg_soft_iface:
+ free_percpu(bat_priv->bat_counters);
unregister_netdevice(soft_iface);
return NULL;
+free_bat_counters:
+ free_percpu(bat_priv->bat_counters);
free_soft_iface:
free_netdev(soft_iface);
out:
@@ -518,6 +536,11 @@ static u32 batadv_get_link(struct net_device *dev)
static const struct {
const char name[ETH_GSTRING_LEN];
} batadv_counters_strings[] = {
+ { "tx" },
+ { "tx_bytes" },
+ { "tx_dropped" },
+ { "rx" },
+ { "rx_bytes" },
{ "forward" },
{ "forward_bytes" },
{ "mgmt_tx" },
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index 852c683b06a..07a08fed28b 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -21,8 +21,9 @@
#define _NET_BATMAN_ADV_SOFT_INTERFACE_H_
int batadv_skb_head_push(struct sk_buff *skb, unsigned int len);
-void batadv_interface_rx(struct net_device *soft_iface, struct sk_buff *skb,
- struct batadv_hard_iface *recv_if, int hdr_size);
+void batadv_interface_rx(struct net_device *soft_iface,
+ struct sk_buff *skb, struct batadv_hard_iface *recv_if,
+ int hdr_size, struct batadv_orig_node *orig_node);
struct net_device *batadv_softif_create(const char *name);
void batadv_softif_destroy(struct net_device *soft_iface);
int batadv_softif_is_valid(const struct net_device *net_dev);
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 99dd8f75b3f..112edd371b2 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -34,6 +34,10 @@ static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
static void batadv_tt_purge(struct work_struct *work);
static void
batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry);
+static void batadv_tt_global_del(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ const unsigned char *addr,
+ const char *message, bool roaming);
/* returns 1 if they are the same mac addr */
static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
@@ -46,8 +50,8 @@ static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
static void batadv_tt_start_timer(struct batadv_priv *bat_priv)
{
- INIT_DELAYED_WORK(&bat_priv->tt_work, batadv_tt_purge);
- queue_delayed_work(batadv_event_workqueue, &bat_priv->tt_work,
+ INIT_DELAYED_WORK(&bat_priv->tt.work, batadv_tt_purge);
+ queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work,
msecs_to_jiffies(5000));
}
@@ -88,7 +92,7 @@ batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const void *data)
struct batadv_tt_common_entry *tt_common_entry;
struct batadv_tt_local_entry *tt_local_entry = NULL;
- tt_common_entry = batadv_tt_hash_find(bat_priv->tt_local_hash, data);
+ tt_common_entry = batadv_tt_hash_find(bat_priv->tt.local_hash, data);
if (tt_common_entry)
tt_local_entry = container_of(tt_common_entry,
struct batadv_tt_local_entry,
@@ -102,7 +106,7 @@ batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const void *data)
struct batadv_tt_common_entry *tt_common_entry;
struct batadv_tt_global_entry *tt_global_entry = NULL;
- tt_common_entry = batadv_tt_hash_find(bat_priv->tt_global_hash, data);
+ tt_common_entry = batadv_tt_hash_find(bat_priv->tt.global_hash, data);
if (tt_common_entry)
tt_global_entry = container_of(tt_common_entry,
struct batadv_tt_global_entry,
@@ -152,6 +156,8 @@ static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
static void
batadv_tt_orig_list_entry_free_ref(struct batadv_tt_orig_list_entry *orig_entry)
{
+ if (!atomic_dec_and_test(&orig_entry->refcount))
+ return;
/* to avoid race conditions, immediately decrease the tt counter */
atomic_dec(&orig_entry->orig_node->tt_size);
call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
@@ -175,8 +181,8 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
del_op_requested = flags & BATADV_TT_CLIENT_DEL;
/* check for ADD+DEL or DEL+ADD events */
- spin_lock_bh(&bat_priv->tt_changes_list_lock);
- list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
+ spin_lock_bh(&bat_priv->tt.changes_list_lock);
+ list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
list) {
if (!batadv_compare_eth(entry->change.addr, addr))
continue;
@@ -203,15 +209,15 @@ del:
}
/* track the change in the OGMinterval list */
- list_add_tail(&tt_change_node->list, &bat_priv->tt_changes_list);
+ list_add_tail(&tt_change_node->list, &bat_priv->tt.changes_list);
unlock:
- spin_unlock_bh(&bat_priv->tt_changes_list_lock);
+ spin_unlock_bh(&bat_priv->tt.changes_list_lock);
if (event_removed)
- atomic_dec(&bat_priv->tt_local_changes);
+ atomic_dec(&bat_priv->tt.local_changes);
else
- atomic_inc(&bat_priv->tt_local_changes);
+ atomic_inc(&bat_priv->tt.local_changes);
}
int batadv_tt_len(int changes_num)
@@ -221,12 +227,12 @@ int batadv_tt_len(int changes_num)
static int batadv_tt_local_init(struct batadv_priv *bat_priv)
{
- if (bat_priv->tt_local_hash)
+ if (bat_priv->tt.local_hash)
return 0;
- bat_priv->tt_local_hash = batadv_hash_new(1024);
+ bat_priv->tt.local_hash = batadv_hash_new(1024);
- if (!bat_priv->tt_local_hash)
+ if (!bat_priv->tt.local_hash)
return -ENOMEM;
return 0;
@@ -258,7 +264,7 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Creating new local tt entry: %pM (ttvn: %d)\n", addr,
- (uint8_t)atomic_read(&bat_priv->ttvn));
+ (uint8_t)atomic_read(&bat_priv->tt.vn));
memcpy(tt_local_entry->common.addr, addr, ETH_ALEN);
tt_local_entry->common.flags = BATADV_NO_FLAGS;
@@ -266,6 +272,7 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
tt_local_entry->common.flags |= BATADV_TT_CLIENT_WIFI;
atomic_set(&tt_local_entry->common.refcount, 2);
tt_local_entry->last_seen = jiffies;
+ tt_local_entry->common.added_at = tt_local_entry->last_seen;
/* the batman interface mac address should never be purged */
if (batadv_compare_eth(addr, soft_iface->dev_addr))
@@ -277,7 +284,7 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
*/
tt_local_entry->common.flags |= BATADV_TT_CLIENT_NEW;
- hash_added = batadv_hash_add(bat_priv->tt_local_hash, batadv_compare_tt,
+ hash_added = batadv_hash_add(bat_priv->tt.local_hash, batadv_compare_tt,
batadv_choose_orig,
&tt_local_entry->common,
&tt_local_entry->common.hash_entry);
@@ -348,7 +355,7 @@ static void batadv_tt_prepare_packet_buff(struct batadv_priv *bat_priv,
primary_if = batadv_primary_if_get_selected(bat_priv);
req_len = min_packet_len;
- req_len += batadv_tt_len(atomic_read(&bat_priv->tt_local_changes));
+ req_len += batadv_tt_len(atomic_read(&bat_priv->tt.local_changes));
/* if we have too many changes for one packet don't send any
* and wait for the tt table request which will be fragmented
@@ -381,10 +388,10 @@ static int batadv_tt_changes_fill_buff(struct batadv_priv *bat_priv,
if (new_len > 0)
tot_changes = new_len / batadv_tt_len(1);
- spin_lock_bh(&bat_priv->tt_changes_list_lock);
- atomic_set(&bat_priv->tt_local_changes, 0);
+ spin_lock_bh(&bat_priv->tt.changes_list_lock);
+ atomic_set(&bat_priv->tt.local_changes, 0);
- list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
+ list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
list) {
if (count < tot_changes) {
memcpy(tt_buff + batadv_tt_len(count),
@@ -394,25 +401,25 @@ static int batadv_tt_changes_fill_buff(struct batadv_priv *bat_priv,
list_del(&entry->list);
kfree(entry);
}
- spin_unlock_bh(&bat_priv->tt_changes_list_lock);
+ spin_unlock_bh(&bat_priv->tt.changes_list_lock);
/* Keep the buffer for possible tt_request */
- spin_lock_bh(&bat_priv->tt_buff_lock);
- kfree(bat_priv->tt_buff);
- bat_priv->tt_buff_len = 0;
- bat_priv->tt_buff = NULL;
+ spin_lock_bh(&bat_priv->tt.last_changeset_lock);
+ kfree(bat_priv->tt.last_changeset);
+ bat_priv->tt.last_changeset_len = 0;
+ bat_priv->tt.last_changeset = NULL;
/* check whether this new OGM has no changes due to size problems */
if (new_len > 0) {
/* if kmalloc() fails we will reply with the full table
* instead of providing the diff
*/
- bat_priv->tt_buff = kmalloc(new_len, GFP_ATOMIC);
- if (bat_priv->tt_buff) {
- memcpy(bat_priv->tt_buff, tt_buff, new_len);
- bat_priv->tt_buff_len = new_len;
+ bat_priv->tt.last_changeset = kmalloc(new_len, GFP_ATOMIC);
+ if (bat_priv->tt.last_changeset) {
+ memcpy(bat_priv->tt.last_changeset, tt_buff, new_len);
+ bat_priv->tt.last_changeset_len = new_len;
}
}
- spin_unlock_bh(&bat_priv->tt_buff_lock);
+ spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
return count;
}
@@ -421,7 +428,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
{
struct net_device *net_dev = (struct net_device *)seq->private;
struct batadv_priv *bat_priv = netdev_priv(net_dev);
- struct batadv_hashtable *hash = bat_priv->tt_local_hash;
+ struct batadv_hashtable *hash = bat_priv->tt.local_hash;
struct batadv_tt_common_entry *tt_common_entry;
struct batadv_hard_iface *primary_if;
struct hlist_node *node;
@@ -446,7 +453,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
seq_printf(seq,
"Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
- net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn));
+ net_dev->name, (uint8_t)atomic_read(&bat_priv->tt.vn));
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
@@ -544,7 +551,7 @@ static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
static void batadv_tt_local_purge(struct batadv_priv *bat_priv)
{
- struct batadv_hashtable *hash = bat_priv->tt_local_hash;
+ struct batadv_hashtable *hash = bat_priv->tt.local_hash;
struct hlist_head *head;
spinlock_t *list_lock; /* protects write access to the hash lists */
uint32_t i;
@@ -570,10 +577,10 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
struct hlist_head *head;
uint32_t i;
- if (!bat_priv->tt_local_hash)
+ if (!bat_priv->tt.local_hash)
return;
- hash = bat_priv->tt_local_hash;
+ hash = bat_priv->tt.local_hash;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
@@ -593,17 +600,17 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
batadv_hash_destroy(hash);
- bat_priv->tt_local_hash = NULL;
+ bat_priv->tt.local_hash = NULL;
}
static int batadv_tt_global_init(struct batadv_priv *bat_priv)
{
- if (bat_priv->tt_global_hash)
+ if (bat_priv->tt.global_hash)
return 0;
- bat_priv->tt_global_hash = batadv_hash_new(1024);
+ bat_priv->tt.global_hash = batadv_hash_new(1024);
- if (!bat_priv->tt_global_hash)
+ if (!bat_priv->tt.global_hash)
return -ENOMEM;
return 0;
@@ -613,62 +620,99 @@ static void batadv_tt_changes_list_free(struct batadv_priv *bat_priv)
{
struct batadv_tt_change_node *entry, *safe;
- spin_lock_bh(&bat_priv->tt_changes_list_lock);
+ spin_lock_bh(&bat_priv->tt.changes_list_lock);
- list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
+ list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
list) {
list_del(&entry->list);
kfree(entry);
}
- atomic_set(&bat_priv->tt_local_changes, 0);
- spin_unlock_bh(&bat_priv->tt_changes_list_lock);
+ atomic_set(&bat_priv->tt.local_changes, 0);
+ spin_unlock_bh(&bat_priv->tt.changes_list_lock);
}
-/* find out if an orig_node is already in the list of a tt_global_entry.
- * returns 1 if found, 0 otherwise
+/* retrieves the orig_tt_list_entry belonging to orig_node from the
+ * batadv_tt_global_entry list
+ *
+ * returns it with an increased refcounter, NULL if not found
*/
-static bool
-batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
- const struct batadv_orig_node *orig_node)
+static struct batadv_tt_orig_list_entry *
+batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
+ const struct batadv_orig_node *orig_node)
{
- struct batadv_tt_orig_list_entry *tmp_orig_entry;
+ struct batadv_tt_orig_list_entry *tmp_orig_entry, *orig_entry = NULL;
const struct hlist_head *head;
struct hlist_node *node;
- bool found = false;
rcu_read_lock();
head = &entry->orig_list;
hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) {
- if (tmp_orig_entry->orig_node == orig_node) {
- found = true;
- break;
- }
+ if (tmp_orig_entry->orig_node != orig_node)
+ continue;
+ if (!atomic_inc_not_zero(&tmp_orig_entry->refcount))
+ continue;
+
+ orig_entry = tmp_orig_entry;
+ break;
}
rcu_read_unlock();
+
+ return orig_entry;
+}
+
+/* find out if an orig_node is already in the list of a tt_global_entry.
+ * returns true if found, false otherwise
+ */
+static bool
+batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
+ const struct batadv_orig_node *orig_node)
+{
+ struct batadv_tt_orig_list_entry *orig_entry;
+ bool found = false;
+
+ orig_entry = batadv_tt_global_orig_entry_find(entry, orig_node);
+ if (orig_entry) {
+ found = true;
+ batadv_tt_orig_list_entry_free_ref(orig_entry);
+ }
+
return found;
}
static void
-batadv_tt_global_add_orig_entry(struct batadv_tt_global_entry *tt_global_entry,
+batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
struct batadv_orig_node *orig_node, int ttvn)
{
struct batadv_tt_orig_list_entry *orig_entry;
+ orig_entry = batadv_tt_global_orig_entry_find(tt_global, orig_node);
+ if (orig_entry) {
+ /* refresh the ttvn: the current value could be a bogus one that
+ * was added during a "temporary client detection"
+ */
+ orig_entry->ttvn = ttvn;
+ goto out;
+ }
+
orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC);
if (!orig_entry)
- return;
+ goto out;
INIT_HLIST_NODE(&orig_entry->list);
atomic_inc(&orig_node->refcount);
atomic_inc(&orig_node->tt_size);
orig_entry->orig_node = orig_node;
orig_entry->ttvn = ttvn;
+ atomic_set(&orig_entry->refcount, 2);
- spin_lock_bh(&tt_global_entry->list_lock);
+ spin_lock_bh(&tt_global->list_lock);
hlist_add_head_rcu(&orig_entry->list,
- &tt_global_entry->orig_list);
- spin_unlock_bh(&tt_global_entry->list_lock);
+ &tt_global->orig_list);
+ spin_unlock_bh(&tt_global->list_lock);
+out:
+ if (orig_entry)
+ batadv_tt_orig_list_entry_free_ref(orig_entry);
}
/* caller must hold orig_node refcount */
@@ -695,11 +739,12 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv,
common->flags = flags;
tt_global_entry->roam_at = 0;
atomic_set(&common->refcount, 2);
+ common->added_at = jiffies;
INIT_HLIST_HEAD(&tt_global_entry->orig_list);
spin_lock_init(&tt_global_entry->list_lock);
- hash_added = batadv_hash_add(bat_priv->tt_global_hash,
+ hash_added = batadv_hash_add(bat_priv->tt.global_hash,
batadv_compare_tt,
batadv_choose_orig, common,
&common->hash_entry);
@@ -709,11 +754,20 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv,
batadv_tt_global_entry_free_ref(tt_global_entry);
goto out_remove;
}
-
- batadv_tt_global_add_orig_entry(tt_global_entry, orig_node,
- ttvn);
} else {
- /* there is already a global entry, use this one. */
+ /* If there is already a global entry, we can use this one for
+ * our processing.
+ * But if we are trying to add a temporary client we can exit
+ * directly because the temporary information should never
+ * override any already known client state (whatever it is)
+ */
+ if (flags & BATADV_TT_CLIENT_TEMP)
+ goto out;
+
+ /* if the client was temporary added before receiving the first
+ * OGM announcing it, we have to clear the TEMP flag
+ */
+ tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_TEMP;
/* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
* one originator left in the list and we previously received a
@@ -727,12 +781,9 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv,
tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_ROAM;
tt_global_entry->roam_at = 0;
}
-
- if (!batadv_tt_global_entry_has_orig(tt_global_entry,
- orig_node))
- batadv_tt_global_add_orig_entry(tt_global_entry,
- orig_node, ttvn);
}
+ /* add the new orig_entry (if needed) or update it */
+ batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn);
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Creating new global tt entry: %pM (via %pM)\n",
@@ -771,11 +822,12 @@ batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry,
hlist_for_each_entry_rcu(orig_entry, node, head, list) {
flags = tt_common_entry->flags;
last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn);
- seq_printf(seq, " * %pM (%3u) via %pM (%3u) [%c%c]\n",
+ seq_printf(seq, " * %pM (%3u) via %pM (%3u) [%c%c%c]\n",
tt_global_entry->common.addr, orig_entry->ttvn,
orig_entry->orig_node->orig, last_ttvn,
(flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
- (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'));
+ (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'),
+ (flags & BATADV_TT_CLIENT_TEMP ? 'T' : '.'));
}
}
@@ -783,7 +835,7 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
{
struct net_device *net_dev = (struct net_device *)seq->private;
struct batadv_priv *bat_priv = netdev_priv(net_dev);
- struct batadv_hashtable *hash = bat_priv->tt_global_hash;
+ struct batadv_hashtable *hash = bat_priv->tt.global_hash;
struct batadv_tt_common_entry *tt_common_entry;
struct batadv_tt_global_entry *tt_global;
struct batadv_hard_iface *primary_if;
@@ -884,7 +936,7 @@ batadv_tt_global_del_struct(struct batadv_priv *bat_priv,
"Deleting global tt entry %pM: %s\n",
tt_global_entry->common.addr, message);
- batadv_hash_remove(bat_priv->tt_global_hash, batadv_compare_tt,
+ batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
batadv_choose_orig, tt_global_entry->common.addr);
batadv_tt_global_entry_free_ref(tt_global_entry);
@@ -995,7 +1047,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
struct batadv_tt_global_entry *tt_global;
struct batadv_tt_common_entry *tt_common_entry;
uint32_t i;
- struct batadv_hashtable *hash = bat_priv->tt_global_hash;
+ struct batadv_hashtable *hash = bat_priv->tt.global_hash;
struct hlist_node *node, *safe;
struct hlist_head *head;
spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -1030,49 +1082,63 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
orig_node->tt_initialised = false;
}
-static void batadv_tt_global_roam_purge_list(struct batadv_priv *bat_priv,
- struct hlist_head *head)
+static bool batadv_tt_global_to_purge(struct batadv_tt_global_entry *tt_global,
+ char **msg)
{
- struct batadv_tt_common_entry *tt_common_entry;
- struct batadv_tt_global_entry *tt_global_entry;
- struct hlist_node *node, *node_tmp;
-
- hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, head,
- hash_entry) {
- tt_global_entry = container_of(tt_common_entry,
- struct batadv_tt_global_entry,
- common);
- if (!(tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM))
- continue;
- if (!batadv_has_timed_out(tt_global_entry->roam_at,
- BATADV_TT_CLIENT_ROAM_TIMEOUT))
- continue;
+ bool purge = false;
+ unsigned long roam_timeout = BATADV_TT_CLIENT_ROAM_TIMEOUT;
+ unsigned long temp_timeout = BATADV_TT_CLIENT_TEMP_TIMEOUT;
- batadv_dbg(BATADV_DBG_TT, bat_priv,
- "Deleting global tt entry (%pM): Roaming timeout\n",
- tt_global_entry->common.addr);
+ if ((tt_global->common.flags & BATADV_TT_CLIENT_ROAM) &&
+ batadv_has_timed_out(tt_global->roam_at, roam_timeout)) {
+ purge = true;
+ *msg = "Roaming timeout\n";
+ }
- hlist_del_rcu(node);
- batadv_tt_global_entry_free_ref(tt_global_entry);
+ if ((tt_global->common.flags & BATADV_TT_CLIENT_TEMP) &&
+ batadv_has_timed_out(tt_global->common.added_at, temp_timeout)) {
+ purge = true;
+ *msg = "Temporary client timeout\n";
}
+
+ return purge;
}
-static void batadv_tt_global_roam_purge(struct batadv_priv *bat_priv)
+static void batadv_tt_global_purge(struct batadv_priv *bat_priv)
{
- struct batadv_hashtable *hash = bat_priv->tt_global_hash;
+ struct batadv_hashtable *hash = bat_priv->tt.global_hash;
struct hlist_head *head;
+ struct hlist_node *node, *node_tmp;
spinlock_t *list_lock; /* protects write access to the hash lists */
uint32_t i;
+ char *msg = NULL;
+ struct batadv_tt_common_entry *tt_common;
+ struct batadv_tt_global_entry *tt_global;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock);
- batadv_tt_global_roam_purge_list(bat_priv, head);
+ hlist_for_each_entry_safe(tt_common, node, node_tmp, head,
+ hash_entry) {
+ tt_global = container_of(tt_common,
+ struct batadv_tt_global_entry,
+ common);
+
+ if (!batadv_tt_global_to_purge(tt_global, &msg))
+ continue;
+
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Deleting global tt entry (%pM): %s\n",
+ tt_global->common.addr, msg);
+
+ hlist_del_rcu(node);
+
+ batadv_tt_global_entry_free_ref(tt_global);
+ }
spin_unlock_bh(list_lock);
}
-
}
static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
@@ -1085,10 +1151,10 @@ static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
struct hlist_head *head;
uint32_t i;
- if (!bat_priv->tt_global_hash)
+ if (!bat_priv->tt.global_hash)
return;
- hash = bat_priv->tt_global_hash;
+ hash = bat_priv->tt.global_hash;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
@@ -1108,7 +1174,7 @@ static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
batadv_hash_destroy(hash);
- bat_priv->tt_global_hash = NULL;
+ bat_priv->tt.global_hash = NULL;
}
static bool
@@ -1187,7 +1253,7 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node)
{
uint16_t total = 0, total_one;
- struct batadv_hashtable *hash = bat_priv->tt_global_hash;
+ struct batadv_hashtable *hash = bat_priv->tt.global_hash;
struct batadv_tt_common_entry *tt_common;
struct batadv_tt_global_entry *tt_global;
struct hlist_node *node;
@@ -1210,6 +1276,12 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
*/
if (tt_common->flags & BATADV_TT_CLIENT_ROAM)
continue;
+ /* Temporary clients have not been announced yet, so
+ * they have to be skipped while computing the global
+ * crc
+ */
+ if (tt_common->flags & BATADV_TT_CLIENT_TEMP)
+ continue;
/* find out if this global entry is announced by this
* originator
@@ -1234,7 +1306,7 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv)
{
uint16_t total = 0, total_one;
- struct batadv_hashtable *hash = bat_priv->tt_local_hash;
+ struct batadv_hashtable *hash = bat_priv->tt.local_hash;
struct batadv_tt_common_entry *tt_common;
struct hlist_node *node;
struct hlist_head *head;
@@ -1267,14 +1339,14 @@ static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
{
struct batadv_tt_req_node *node, *safe;
- spin_lock_bh(&bat_priv->tt_req_list_lock);
+ spin_lock_bh(&bat_priv->tt.req_list_lock);
- list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
+ list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
list_del(&node->list);
kfree(node);
}
- spin_unlock_bh(&bat_priv->tt_req_list_lock);
+ spin_unlock_bh(&bat_priv->tt.req_list_lock);
}
static void batadv_tt_save_orig_buffer(struct batadv_priv *bat_priv,
@@ -1304,15 +1376,15 @@ static void batadv_tt_req_purge(struct batadv_priv *bat_priv)
{
struct batadv_tt_req_node *node, *safe;
- spin_lock_bh(&bat_priv->tt_req_list_lock);
- list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
+ spin_lock_bh(&bat_priv->tt.req_list_lock);
+ list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
if (batadv_has_timed_out(node->issued_at,
BATADV_TT_REQUEST_TIMEOUT)) {
list_del(&node->list);
kfree(node);
}
}
- spin_unlock_bh(&bat_priv->tt_req_list_lock);
+ spin_unlock_bh(&bat_priv->tt.req_list_lock);
}
/* returns the pointer to the new tt_req_node struct if no request
@@ -1324,8 +1396,8 @@ batadv_new_tt_req_node(struct batadv_priv *bat_priv,
{
struct batadv_tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
- spin_lock_bh(&bat_priv->tt_req_list_lock);
- list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) {
+ spin_lock_bh(&bat_priv->tt.req_list_lock);
+ list_for_each_entry(tt_req_node_tmp, &bat_priv->tt.req_list, list) {
if (batadv_compare_eth(tt_req_node_tmp, orig_node) &&
!batadv_has_timed_out(tt_req_node_tmp->issued_at,
BATADV_TT_REQUEST_TIMEOUT))
@@ -1339,9 +1411,9 @@ batadv_new_tt_req_node(struct batadv_priv *bat_priv,
memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN);
tt_req_node->issued_at = jiffies;
- list_add(&tt_req_node->list, &bat_priv->tt_req_list);
+ list_add(&tt_req_node->list, &bat_priv->tt.req_list);
unlock:
- spin_unlock_bh(&bat_priv->tt_req_list_lock);
+ spin_unlock_bh(&bat_priv->tt.req_list_lock);
return tt_req_node;
}
@@ -1363,7 +1435,8 @@ static int batadv_tt_global_valid(const void *entry_ptr,
const struct batadv_tt_global_entry *tt_global_entry;
const struct batadv_orig_node *orig_node = data_ptr;
- if (tt_common_entry->flags & BATADV_TT_CLIENT_ROAM)
+ if (tt_common_entry->flags & BATADV_TT_CLIENT_ROAM ||
+ tt_common_entry->flags & BATADV_TT_CLIENT_TEMP)
return 0;
tt_global_entry = container_of(tt_common_entry,
@@ -1507,9 +1580,9 @@ out:
if (ret)
kfree_skb(skb);
if (ret && tt_req_node) {
- spin_lock_bh(&bat_priv->tt_req_list_lock);
+ spin_lock_bh(&bat_priv->tt.req_list_lock);
list_del(&tt_req_node->list);
- spin_unlock_bh(&bat_priv->tt_req_list_lock);
+ spin_unlock_bh(&bat_priv->tt.req_list_lock);
kfree(tt_req_node);
}
return ret;
@@ -1530,6 +1603,7 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv,
uint16_t tt_len, tt_tot;
struct sk_buff *skb = NULL;
struct batadv_tt_query_packet *tt_response;
+ uint8_t *packet_pos;
size_t len;
batadv_dbg(BATADV_DBG_TT, bat_priv,
@@ -1583,8 +1657,8 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv,
goto unlock;
skb_reserve(skb, ETH_HLEN);
- tt_response = (struct batadv_tt_query_packet *)skb_put(skb,
- len);
+ packet_pos = skb_put(skb, len);
+ tt_response = (struct batadv_tt_query_packet *)packet_pos;
tt_response->ttvn = req_ttvn;
tt_response->tt_data = htons(tt_tot);
@@ -1600,7 +1674,7 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv,
ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
skb = batadv_tt_response_fill_table(tt_len, ttvn,
- bat_priv->tt_global_hash,
+ bat_priv->tt.global_hash,
primary_if,
batadv_tt_global_valid,
req_dst_orig_node);
@@ -1663,6 +1737,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
uint16_t tt_len, tt_tot;
struct sk_buff *skb = NULL;
struct batadv_tt_query_packet *tt_response;
+ uint8_t *packet_pos;
size_t len;
batadv_dbg(BATADV_DBG_TT, bat_priv,
@@ -1671,7 +1746,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
(tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
- my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
+ my_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
req_ttvn = tt_request->ttvn;
orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
@@ -1690,7 +1765,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
* is too big send the whole local translation table
*/
if (tt_request->flags & BATADV_TT_FULL_TABLE || my_ttvn != req_ttvn ||
- !bat_priv->tt_buff)
+ !bat_priv->tt.last_changeset)
full_table = true;
else
full_table = false;
@@ -1699,8 +1774,8 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
* I'll send only one packet with as much TT entries as I can
*/
if (!full_table) {
- spin_lock_bh(&bat_priv->tt_buff_lock);
- tt_len = bat_priv->tt_buff_len;
+ spin_lock_bh(&bat_priv->tt.last_changeset_lock);
+ tt_len = bat_priv->tt.last_changeset_len;
tt_tot = tt_len / sizeof(struct batadv_tt_change);
len = sizeof(*tt_response) + tt_len;
@@ -1709,22 +1784,22 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
goto unlock;
skb_reserve(skb, ETH_HLEN);
- tt_response = (struct batadv_tt_query_packet *)skb_put(skb,
- len);
+ packet_pos = skb_put(skb, len);
+ tt_response = (struct batadv_tt_query_packet *)packet_pos;
tt_response->ttvn = req_ttvn;
tt_response->tt_data = htons(tt_tot);
tt_buff = skb->data + sizeof(*tt_response);
- memcpy(tt_buff, bat_priv->tt_buff,
- bat_priv->tt_buff_len);
- spin_unlock_bh(&bat_priv->tt_buff_lock);
+ memcpy(tt_buff, bat_priv->tt.last_changeset,
+ bat_priv->tt.last_changeset_len);
+ spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
} else {
- tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt);
+ tt_len = (uint16_t)atomic_read(&bat_priv->tt.local_entry_num);
tt_len *= sizeof(struct batadv_tt_change);
- ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
+ ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
skb = batadv_tt_response_fill_table(tt_len, ttvn,
- bat_priv->tt_local_hash,
+ bat_priv->tt.local_hash,
primary_if,
batadv_tt_local_valid_entry,
NULL);
@@ -1756,7 +1831,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
goto out;
unlock:
- spin_unlock_bh(&bat_priv->tt_buff_lock);
+ spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
out:
if (orig_node)
batadv_orig_node_free_ref(orig_node);
@@ -1909,14 +1984,14 @@ void batadv_handle_tt_response(struct batadv_priv *bat_priv,
}
/* Delete the tt_req_node from pending tt_requests list */
- spin_lock_bh(&bat_priv->tt_req_list_lock);
- list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
+ spin_lock_bh(&bat_priv->tt.req_list_lock);
+ list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
if (!batadv_compare_eth(node->addr, tt_response->src))
continue;
list_del(&node->list);
kfree(node);
}
- spin_unlock_bh(&bat_priv->tt_req_list_lock);
+ spin_unlock_bh(&bat_priv->tt.req_list_lock);
/* Recalculate the CRC for this orig_node and store it */
orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
@@ -1950,22 +2025,22 @@ static void batadv_tt_roam_list_free(struct batadv_priv *bat_priv)
{
struct batadv_tt_roam_node *node, *safe;
- spin_lock_bh(&bat_priv->tt_roam_list_lock);
+ spin_lock_bh(&bat_priv->tt.roam_list_lock);
- list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
+ list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) {
list_del(&node->list);
kfree(node);
}
- spin_unlock_bh(&bat_priv->tt_roam_list_lock);
+ spin_unlock_bh(&bat_priv->tt.roam_list_lock);
}
static void batadv_tt_roam_purge(struct batadv_priv *bat_priv)
{
struct batadv_tt_roam_node *node, *safe;
- spin_lock_bh(&bat_priv->tt_roam_list_lock);
- list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
+ spin_lock_bh(&bat_priv->tt.roam_list_lock);
+ list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) {
if (!batadv_has_timed_out(node->first_time,
BATADV_ROAMING_MAX_TIME))
continue;
@@ -1973,7 +2048,7 @@ static void batadv_tt_roam_purge(struct batadv_priv *bat_priv)
list_del(&node->list);
kfree(node);
}
- spin_unlock_bh(&bat_priv->tt_roam_list_lock);
+ spin_unlock_bh(&bat_priv->tt.roam_list_lock);
}
/* This function checks whether the client already reached the
@@ -1988,11 +2063,11 @@ static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv,
struct batadv_tt_roam_node *tt_roam_node;
bool ret = false;
- spin_lock_bh(&bat_priv->tt_roam_list_lock);
+ spin_lock_bh(&bat_priv->tt.roam_list_lock);
/* The new tt_req will be issued only if I'm not waiting for a
* reply from the same orig_node yet
*/
- list_for_each_entry(tt_roam_node, &bat_priv->tt_roam_list, list) {
+ list_for_each_entry(tt_roam_node, &bat_priv->tt.roam_list, list) {
if (!batadv_compare_eth(tt_roam_node->addr, client))
continue;
@@ -2017,12 +2092,12 @@ static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv,
BATADV_ROAMING_MAX_COUNT - 1);
memcpy(tt_roam_node->addr, client, ETH_ALEN);
- list_add(&tt_roam_node->list, &bat_priv->tt_roam_list);
+ list_add(&tt_roam_node->list, &bat_priv->tt.roam_list);
ret = true;
}
unlock:
- spin_unlock_bh(&bat_priv->tt_roam_list_lock);
+ spin_unlock_bh(&bat_priv->tt.roam_list_lock);
return ret;
}
@@ -2086,13 +2161,15 @@ out:
static void batadv_tt_purge(struct work_struct *work)
{
struct delayed_work *delayed_work;
+ struct batadv_priv_tt *priv_tt;
struct batadv_priv *bat_priv;
delayed_work = container_of(work, struct delayed_work, work);
- bat_priv = container_of(delayed_work, struct batadv_priv, tt_work);
+ priv_tt = container_of(delayed_work, struct batadv_priv_tt, work);
+ bat_priv = container_of(priv_tt, struct batadv_priv, tt);
batadv_tt_local_purge(bat_priv);
- batadv_tt_global_roam_purge(bat_priv);
+ batadv_tt_global_purge(bat_priv);
batadv_tt_req_purge(bat_priv);
batadv_tt_roam_purge(bat_priv);
@@ -2101,7 +2178,7 @@ static void batadv_tt_purge(struct work_struct *work)
void batadv_tt_free(struct batadv_priv *bat_priv)
{
- cancel_delayed_work_sync(&bat_priv->tt_work);
+ cancel_delayed_work_sync(&bat_priv->tt.work);
batadv_tt_local_table_free(bat_priv);
batadv_tt_global_table_free(bat_priv);
@@ -2109,7 +2186,7 @@ void batadv_tt_free(struct batadv_priv *bat_priv)
batadv_tt_changes_list_free(bat_priv);
batadv_tt_roam_list_free(bat_priv);
- kfree(bat_priv->tt_buff);
+ kfree(bat_priv->tt.last_changeset);
}
/* This function will enable or disable the specified flags for all the entries
@@ -2153,7 +2230,7 @@ out:
/* Purge out all the tt local entries marked with BATADV_TT_CLIENT_PENDING */
static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
{
- struct batadv_hashtable *hash = bat_priv->tt_local_hash;
+ struct batadv_hashtable *hash = bat_priv->tt.local_hash;
struct batadv_tt_common_entry *tt_common;
struct batadv_tt_local_entry *tt_local;
struct hlist_node *node, *node_tmp;
@@ -2178,7 +2255,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
"Deleting local tt entry (%pM): pending\n",
tt_common->addr);
- atomic_dec(&bat_priv->num_local_tt);
+ atomic_dec(&bat_priv->tt.local_entry_num);
hlist_del_rcu(node);
tt_local = container_of(tt_common,
struct batadv_tt_local_entry,
@@ -2196,26 +2273,26 @@ static int batadv_tt_commit_changes(struct batadv_priv *bat_priv,
{
uint16_t changed_num = 0;
- if (atomic_read(&bat_priv->tt_local_changes) < 1)
+ if (atomic_read(&bat_priv->tt.local_changes) < 1)
return -ENOENT;
- changed_num = batadv_tt_set_flags(bat_priv->tt_local_hash,
+ changed_num = batadv_tt_set_flags(bat_priv->tt.local_hash,
BATADV_TT_CLIENT_NEW, false);
/* all reset entries have to be counted as local entries */
- atomic_add(changed_num, &bat_priv->num_local_tt);
+ atomic_add(changed_num, &bat_priv->tt.local_entry_num);
batadv_tt_local_purge_pending_clients(bat_priv);
- bat_priv->tt_crc = batadv_tt_local_crc(bat_priv);
+ bat_priv->tt.local_crc = batadv_tt_local_crc(bat_priv);
/* Increment the TTVN only once per OGM interval */
- atomic_inc(&bat_priv->ttvn);
+ atomic_inc(&bat_priv->tt.vn);
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Local changes committed, updating to ttvn %u\n",
- (uint8_t)atomic_read(&bat_priv->ttvn));
- bat_priv->tt_poss_change = false;
+ (uint8_t)atomic_read(&bat_priv->tt.vn));
+ bat_priv->tt.poss_change = false;
/* reset the sending counter */
- atomic_set(&bat_priv->tt_ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX);
+ atomic_set(&bat_priv->tt.ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX);
return batadv_tt_changes_fill_buff(bat_priv, packet_buff,
packet_buff_len, packet_min_len);
@@ -2235,7 +2312,7 @@ int batadv_tt_append_diff(struct batadv_priv *bat_priv,
/* if the changes have been sent often enough */
if ((tt_num_changes < 0) &&
- (!batadv_atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))) {
+ (!batadv_atomic_dec_not_zero(&bat_priv->tt.ogm_append_cnt))) {
batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
packet_min_len, packet_min_len);
tt_num_changes = 0;
@@ -2366,3 +2443,22 @@ bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
out:
return ret;
}
+
+bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ const unsigned char *addr)
+{
+ bool ret = false;
+
+ if (!batadv_tt_global_add(bat_priv, orig_node, addr,
+ BATADV_TT_CLIENT_TEMP,
+ atomic_read(&orig_node->last_ttvn)))
+ goto out;
+
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Added temporary global client (addr: %pM orig: %pM)\n",
+ addr, orig_node->orig);
+ ret = true;
+out:
+ return ret;
+}
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index ffa87355096..811fffd4760 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -59,6 +59,8 @@ int batadv_tt_append_diff(struct batadv_priv *bat_priv,
int packet_min_len);
bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
uint8_t *addr);
-
+bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ const unsigned char *addr);
#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 12635fd2c3d..2ed82caacdc 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -145,6 +145,11 @@ struct batadv_bcast_duplist_entry {
#endif
enum batadv_counters {
+ BATADV_CNT_TX,
+ BATADV_CNT_TX_BYTES,
+ BATADV_CNT_TX_DROPPED,
+ BATADV_CNT_RX,
+ BATADV_CNT_RX_BYTES,
BATADV_CNT_FORWARD,
BATADV_CNT_FORWARD_BYTES,
BATADV_CNT_MGMT_TX,
@@ -160,6 +165,67 @@ enum batadv_counters {
BATADV_CNT_NUM,
};
+/**
+ * struct batadv_priv_tt - per mesh interface translation table data
+ * @vn: translation table version number
+ * @local_changes: changes registered in an originator interval
+ * @poss_change: Detect an ongoing roaming phase. If true, then this node
+ * received a roaming_adv and has to inspect every packet directed to it to
+ * check whether it still is the true destination or not. This flag will be
+ * reset to false as soon as the this node's ttvn is increased
+ * @changes_list: tracks tt local changes within an originator interval
+ * @req_list: list of pending tt_requests
+ * @local_crc: Checksum of the local table, recomputed before sending a new OGM
+ */
+struct batadv_priv_tt {
+ atomic_t vn;
+ atomic_t ogm_append_cnt;
+ atomic_t local_changes;
+ bool poss_change;
+ struct list_head changes_list;
+ struct batadv_hashtable *local_hash;
+ struct batadv_hashtable *global_hash;
+ struct list_head req_list;
+ struct list_head roam_list;
+ spinlock_t changes_list_lock; /* protects changes */
+ spinlock_t req_list_lock; /* protects req_list */
+ spinlock_t roam_list_lock; /* protects roam_list */
+ atomic_t local_entry_num;
+ uint16_t local_crc;
+ unsigned char *last_changeset;
+ int16_t last_changeset_len;
+ spinlock_t last_changeset_lock; /* protects last_changeset */
+ struct delayed_work work;
+};
+
+#ifdef CONFIG_BATMAN_ADV_BLA
+struct batadv_priv_bla {
+ atomic_t num_requests; /* number of bla requests in flight */
+ struct batadv_hashtable *claim_hash;
+ struct batadv_hashtable *backbone_hash;
+ struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE];
+ int bcast_duplist_curr;
+ struct batadv_bla_claim_dst claim_dest;
+ struct delayed_work work;
+};
+#endif
+
+struct batadv_priv_gw {
+ struct hlist_head list;
+ spinlock_t list_lock; /* protects gw_list and curr_gw */
+ struct batadv_gw_node __rcu *curr_gw; /* rcu protected pointer */
+ atomic_t reselect;
+};
+
+struct batadv_priv_vis {
+ struct list_head send_list;
+ struct batadv_hashtable *hash;
+ spinlock_t hash_lock; /* protects hash */
+ spinlock_t list_lock; /* protects info::recv_list */
+ struct delayed_work work;
+ struct batadv_vis_info *my_info;
+};
+
struct batadv_priv {
atomic_t mesh_state;
struct net_device_stats stats;
@@ -179,64 +245,24 @@ struct batadv_priv {
atomic_t bcast_seqno;
atomic_t bcast_queue_left;
atomic_t batman_queue_left;
- atomic_t ttvn; /* translation table version number */
- atomic_t tt_ogm_append_cnt;
- atomic_t tt_local_changes; /* changes registered in a OGM interval */
- atomic_t bla_num_requests; /* number of bla requests in flight */
- /* The tt_poss_change flag is used to detect an ongoing roaming phase.
- * If true, then I received a Roaming_adv and I have to inspect every
- * packet directed to me to check whether I am still the true
- * destination or not. This flag will be reset to false as soon as I
- * increase my TTVN
- */
- bool tt_poss_change;
char num_ifaces;
struct batadv_debug_log *debug_log;
struct kobject *mesh_obj;
struct dentry *debug_dir;
struct hlist_head forw_bat_list;
struct hlist_head forw_bcast_list;
- struct hlist_head gw_list;
- struct list_head tt_changes_list; /* tracks changes in a OGM int */
- struct list_head vis_send_list;
struct batadv_hashtable *orig_hash;
- struct batadv_hashtable *tt_local_hash;
- struct batadv_hashtable *tt_global_hash;
-#ifdef CONFIG_BATMAN_ADV_BLA
- struct batadv_hashtable *claim_hash;
- struct batadv_hashtable *backbone_hash;
-#endif
- struct list_head tt_req_list; /* list of pending tt_requests */
- struct list_head tt_roam_list;
- struct batadv_hashtable *vis_hash;
-#ifdef CONFIG_BATMAN_ADV_BLA
- struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE];
- int bcast_duplist_curr;
- struct batadv_bla_claim_dst claim_dest;
-#endif
spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
spinlock_t forw_bcast_list_lock; /* protects */
- spinlock_t tt_changes_list_lock; /* protects tt_changes */
- spinlock_t tt_req_list_lock; /* protects tt_req_list */
- spinlock_t tt_roam_list_lock; /* protects tt_roam_list */
- spinlock_t gw_list_lock; /* protects gw_list and curr_gw */
- spinlock_t vis_hash_lock; /* protects vis_hash */
- spinlock_t vis_list_lock; /* protects vis_info::recv_list */
- atomic_t num_local_tt;
- /* Checksum of the local table, recomputed before sending a new OGM */
- uint16_t tt_crc;
- unsigned char *tt_buff;
- int16_t tt_buff_len;
- spinlock_t tt_buff_lock; /* protects tt_buff */
- struct delayed_work tt_work;
struct delayed_work orig_work;
- struct delayed_work vis_work;
- struct delayed_work bla_work;
- struct batadv_gw_node __rcu *curr_gw; /* rcu protected pointer */
- atomic_t gw_reselect;
struct batadv_hard_iface __rcu *primary_if; /* rcu protected pointer */
- struct batadv_vis_info *my_vis_info;
struct batadv_algo_ops *bat_algo_ops;
+#ifdef CONFIG_BATMAN_ADV_BLA
+ struct batadv_priv_bla bla;
+#endif
+ struct batadv_priv_gw gw;
+ struct batadv_priv_tt tt;
+ struct batadv_priv_vis vis;
};
struct batadv_socket_client {
@@ -258,6 +284,7 @@ struct batadv_tt_common_entry {
uint8_t addr[ETH_ALEN];
struct hlist_node hash_entry;
uint16_t flags;
+ unsigned long added_at;
atomic_t refcount;
struct rcu_head rcu;
};
@@ -277,6 +304,7 @@ struct batadv_tt_global_entry {
struct batadv_tt_orig_list_entry {
struct batadv_orig_node *orig_node;
uint8_t ttvn;
+ atomic_t refcount;
struct rcu_head rcu;
struct hlist_node list;
};
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index 00164645b3f..f39723281ca 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -39,6 +39,7 @@ batadv_frag_merge_packet(struct list_head *head,
struct batadv_unicast_packet *unicast_packet;
int hdr_len = sizeof(*unicast_packet);
int uni_diff = sizeof(*up) - hdr_len;
+ uint8_t *packet_pos;
up = (struct batadv_unicast_frag_packet *)skb->data;
/* set skb to the first part and tmp_skb to the second part */
@@ -65,8 +66,8 @@ batadv_frag_merge_packet(struct list_head *head,
kfree_skb(tmp_skb);
memmove(skb->data + uni_diff, skb->data, hdr_len);
- unicast_packet = (struct batadv_unicast_packet *)skb_pull(skb,
- uni_diff);
+ packet_pos = skb_pull(skb, uni_diff);
+ unicast_packet = (struct batadv_unicast_packet *)packet_pos;
unicast_packet->header.packet_type = BATADV_UNICAST;
return skb;
@@ -121,6 +122,7 @@ batadv_frag_search_packet(struct list_head *head,
{
struct batadv_frag_packet_list_entry *tfp;
struct batadv_unicast_frag_packet *tmp_up = NULL;
+ int is_head_tmp, is_head;
uint16_t search_seqno;
if (up->flags & BATADV_UNI_FRAG_HEAD)
@@ -128,6 +130,8 @@ batadv_frag_search_packet(struct list_head *head,
else
search_seqno = ntohs(up->seqno)-1;
+ is_head = !!(up->flags & BATADV_UNI_FRAG_HEAD);
+
list_for_each_entry(tfp, head, list) {
if (!tfp->skb)
@@ -139,9 +143,8 @@ batadv_frag_search_packet(struct list_head *head,
tmp_up = (struct batadv_unicast_frag_packet *)tfp->skb->data;
if (tfp->seqno == search_seqno) {
-
- if ((tmp_up->flags & BATADV_UNI_FRAG_HEAD) !=
- (up->flags & BATADV_UNI_FRAG_HEAD))
+ is_head_tmp = !!(tmp_up->flags & BATADV_UNI_FRAG_HEAD);
+ if (is_head_tmp != is_head)
return tfp;
else
goto mov_tail;
@@ -334,8 +337,7 @@ find_router:
/* copy the destination for faster routing */
memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
/* set the destination tt version number */
- unicast_packet->ttvn =
- (uint8_t)atomic_read(&orig_node->last_ttvn);
+ unicast_packet->ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
/* inform the destination node that we are still missing a correct route
* for this client. The destination will receive this packet and will
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index 2a2ea068146..5abd1454fb0 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -41,13 +41,13 @@ static void batadv_free_info(struct kref *ref)
bat_priv = info->bat_priv;
list_del_init(&info->send_list);
- spin_lock_bh(&bat_priv->vis_list_lock);
+ spin_lock_bh(&bat_priv->vis.list_lock);
list_for_each_entry_safe(entry, tmp, &info->recv_list, list) {
list_del(&entry->list);
kfree(entry);
}
- spin_unlock_bh(&bat_priv->vis_list_lock);
+ spin_unlock_bh(&bat_priv->vis.list_lock);
kfree_skb(info->skb_packet);
kfree(info);
}
@@ -94,7 +94,7 @@ static uint32_t batadv_vis_info_choose(const void *data, uint32_t size)
static struct batadv_vis_info *
batadv_vis_hash_find(struct batadv_priv *bat_priv, const void *data)
{
- struct batadv_hashtable *hash = bat_priv->vis_hash;
+ struct batadv_hashtable *hash = bat_priv->vis.hash;
struct hlist_head *head;
struct hlist_node *node;
struct batadv_vis_info *vis_info, *vis_info_tmp = NULL;
@@ -252,7 +252,7 @@ int batadv_vis_seq_print_text(struct seq_file *seq, void *offset)
struct hlist_head *head;
struct net_device *net_dev = (struct net_device *)seq->private;
struct batadv_priv *bat_priv = netdev_priv(net_dev);
- struct batadv_hashtable *hash = bat_priv->vis_hash;
+ struct batadv_hashtable *hash = bat_priv->vis.hash;
uint32_t i;
int ret = 0;
int vis_server = atomic_read(&bat_priv->vis_mode);
@@ -264,12 +264,12 @@ int batadv_vis_seq_print_text(struct seq_file *seq, void *offset)
if (vis_server == BATADV_VIS_TYPE_CLIENT_UPDATE)
goto out;
- spin_lock_bh(&bat_priv->vis_hash_lock);
+ spin_lock_bh(&bat_priv->vis.hash_lock);
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
batadv_vis_seq_print_text_bucket(seq, head);
}
- spin_unlock_bh(&bat_priv->vis_hash_lock);
+ spin_unlock_bh(&bat_priv->vis.hash_lock);
out:
if (primary_if)
@@ -285,7 +285,7 @@ static void batadv_send_list_add(struct batadv_priv *bat_priv,
{
if (list_empty(&info->send_list)) {
kref_get(&info->refcount);
- list_add_tail(&info->send_list, &bat_priv->vis_send_list);
+ list_add_tail(&info->send_list, &bat_priv->vis.send_list);
}
}
@@ -311,9 +311,9 @@ static void batadv_recv_list_add(struct batadv_priv *bat_priv,
return;
memcpy(entry->mac, mac, ETH_ALEN);
- spin_lock_bh(&bat_priv->vis_list_lock);
+ spin_lock_bh(&bat_priv->vis.list_lock);
list_add_tail(&entry->list, recv_list);
- spin_unlock_bh(&bat_priv->vis_list_lock);
+ spin_unlock_bh(&bat_priv->vis.list_lock);
}
/* returns 1 if this mac is in the recv_list */
@@ -323,14 +323,14 @@ static int batadv_recv_list_is_in(struct batadv_priv *bat_priv,
{
const struct batadv_recvlist_node *entry;
- spin_lock_bh(&bat_priv->vis_list_lock);
+ spin_lock_bh(&bat_priv->vis.list_lock);
list_for_each_entry(entry, recv_list, list) {
if (batadv_compare_eth(entry->mac, mac)) {
- spin_unlock_bh(&bat_priv->vis_list_lock);
+ spin_unlock_bh(&bat_priv->vis.list_lock);
return 1;
}
}
- spin_unlock_bh(&bat_priv->vis_list_lock);
+ spin_unlock_bh(&bat_priv->vis.list_lock);
return 0;
}
@@ -354,7 +354,7 @@ batadv_add_packet(struct batadv_priv *bat_priv,
*is_new = 0;
/* sanity check */
- if (!bat_priv->vis_hash)
+ if (!bat_priv->vis.hash)
return NULL;
/* see if the packet is already in vis_hash */
@@ -385,7 +385,7 @@ batadv_add_packet(struct batadv_priv *bat_priv,
}
}
/* remove old entry */
- batadv_hash_remove(bat_priv->vis_hash, batadv_vis_info_cmp,
+ batadv_hash_remove(bat_priv->vis.hash, batadv_vis_info_cmp,
batadv_vis_info_choose, old_info);
batadv_send_list_del(old_info);
kref_put(&old_info->refcount, batadv_free_info);
@@ -426,7 +426,7 @@ batadv_add_packet(struct batadv_priv *bat_priv,
batadv_recv_list_add(bat_priv, &info->recv_list, packet->sender_orig);
/* try to add it */
- hash_added = batadv_hash_add(bat_priv->vis_hash, batadv_vis_info_cmp,
+ hash_added = batadv_hash_add(bat_priv->vis.hash, batadv_vis_info_cmp,
batadv_vis_info_choose, info,
&info->hash_entry);
if (hash_added != 0) {
@@ -449,7 +449,7 @@ void batadv_receive_server_sync_packet(struct batadv_priv *bat_priv,
make_broadcast = (vis_server == BATADV_VIS_TYPE_SERVER_SYNC);
- spin_lock_bh(&bat_priv->vis_hash_lock);
+ spin_lock_bh(&bat_priv->vis.hash_lock);
info = batadv_add_packet(bat_priv, vis_packet, vis_info_len,
&is_new, make_broadcast);
if (!info)
@@ -461,7 +461,7 @@ void batadv_receive_server_sync_packet(struct batadv_priv *bat_priv,
if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC && is_new)
batadv_send_list_add(bat_priv, info);
end:
- spin_unlock_bh(&bat_priv->vis_hash_lock);
+ spin_unlock_bh(&bat_priv->vis.hash_lock);
}
/* handle an incoming client update packet and schedule forward if needed. */
@@ -484,7 +484,7 @@ void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,
batadv_is_my_mac(vis_packet->target_orig))
are_target = 1;
- spin_lock_bh(&bat_priv->vis_hash_lock);
+ spin_lock_bh(&bat_priv->vis.hash_lock);
info = batadv_add_packet(bat_priv, vis_packet, vis_info_len,
&is_new, are_target);
@@ -505,7 +505,7 @@ void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,
}
end:
- spin_unlock_bh(&bat_priv->vis_hash_lock);
+ spin_unlock_bh(&bat_priv->vis.hash_lock);
}
/* Walk the originators and find the VIS server with the best tq. Set the packet
@@ -574,10 +574,11 @@ static int batadv_generate_vis_packet(struct batadv_priv *bat_priv)
struct hlist_head *head;
struct batadv_orig_node *orig_node;
struct batadv_neigh_node *router;
- struct batadv_vis_info *info = bat_priv->my_vis_info;
+ struct batadv_vis_info *info = bat_priv->vis.my_info;
struct batadv_vis_packet *packet;
struct batadv_vis_info_entry *entry;
struct batadv_tt_common_entry *tt_common_entry;
+ uint8_t *packet_pos;
int best_tq = -1;
uint32_t i;
@@ -618,8 +619,8 @@ static int batadv_generate_vis_packet(struct batadv_priv *bat_priv)
goto next;
/* fill one entry into buffer. */
- entry = (struct batadv_vis_info_entry *)
- skb_put(info->skb_packet, sizeof(*entry));
+ packet_pos = skb_put(info->skb_packet, sizeof(*entry));
+ entry = (struct batadv_vis_info_entry *)packet_pos;
memcpy(entry->src,
router->if_incoming->net_dev->dev_addr,
ETH_ALEN);
@@ -636,7 +637,7 @@ next:
rcu_read_unlock();
}
- hash = bat_priv->tt_local_hash;
+ hash = bat_priv->tt.local_hash;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
@@ -644,9 +645,8 @@ next:
rcu_read_lock();
hlist_for_each_entry_rcu(tt_common_entry, node, head,
hash_entry) {
- entry = (struct batadv_vis_info_entry *)
- skb_put(info->skb_packet,
- sizeof(*entry));
+ packet_pos = skb_put(info->skb_packet, sizeof(*entry));
+ entry = (struct batadv_vis_info_entry *)packet_pos;
memset(entry->src, 0, ETH_ALEN);
memcpy(entry->dest, tt_common_entry->addr, ETH_ALEN);
entry->quality = 0; /* 0 means TT */
@@ -671,7 +671,7 @@ unlock:
static void batadv_purge_vis_packets(struct batadv_priv *bat_priv)
{
uint32_t i;
- struct batadv_hashtable *hash = bat_priv->vis_hash;
+ struct batadv_hashtable *hash = bat_priv->vis.hash;
struct hlist_node *node, *node_tmp;
struct hlist_head *head;
struct batadv_vis_info *info;
@@ -682,7 +682,7 @@ static void batadv_purge_vis_packets(struct batadv_priv *bat_priv)
hlist_for_each_entry_safe(info, node, node_tmp,
head, hash_entry) {
/* never purge own data. */
- if (info == bat_priv->my_vis_info)
+ if (info == bat_priv->vis.my_info)
continue;
if (batadv_has_timed_out(info->first_seen,
@@ -814,34 +814,36 @@ out:
/* called from timer; send (and maybe generate) vis packet. */
static void batadv_send_vis_packets(struct work_struct *work)
{
- struct delayed_work *delayed_work =
- container_of(work, struct delayed_work, work);
+ struct delayed_work *delayed_work;
struct batadv_priv *bat_priv;
+ struct batadv_priv_vis *priv_vis;
struct batadv_vis_info *info;
- bat_priv = container_of(delayed_work, struct batadv_priv, vis_work);
- spin_lock_bh(&bat_priv->vis_hash_lock);
+ delayed_work = container_of(work, struct delayed_work, work);
+ priv_vis = container_of(delayed_work, struct batadv_priv_vis, work);
+ bat_priv = container_of(priv_vis, struct batadv_priv, vis);
+ spin_lock_bh(&bat_priv->vis.hash_lock);
batadv_purge_vis_packets(bat_priv);
if (batadv_generate_vis_packet(bat_priv) == 0) {
/* schedule if generation was successful */
- batadv_send_list_add(bat_priv, bat_priv->my_vis_info);
+ batadv_send_list_add(bat_priv, bat_priv->vis.my_info);
}
- while (!list_empty(&bat_priv->vis_send_list)) {
- info = list_first_entry(&bat_priv->vis_send_list,
+ while (!list_empty(&bat_priv->vis.send_list)) {
+ info = list_first_entry(&bat_priv->vis.send_list,
typeof(*info), send_list);
kref_get(&info->refcount);
- spin_unlock_bh(&bat_priv->vis_hash_lock);
+ spin_unlock_bh(&bat_priv->vis.hash_lock);
batadv_send_vis_packet(bat_priv, info);
- spin_lock_bh(&bat_priv->vis_hash_lock);
+ spin_lock_bh(&bat_priv->vis.hash_lock);
batadv_send_list_del(info);
kref_put(&info->refcount, batadv_free_info);
}
- spin_unlock_bh(&bat_priv->vis_hash_lock);
+ spin_unlock_bh(&bat_priv->vis.hash_lock);
batadv_start_vis_timer(bat_priv);
}
@@ -856,37 +858,37 @@ int batadv_vis_init(struct batadv_priv *bat_priv)
unsigned long first_seen;
struct sk_buff *tmp_skb;
- if (bat_priv->vis_hash)
+ if (bat_priv->vis.hash)
return 0;
- spin_lock_bh(&bat_priv->vis_hash_lock);
+ spin_lock_bh(&bat_priv->vis.hash_lock);
- bat_priv->vis_hash = batadv_hash_new(256);
- if (!bat_priv->vis_hash) {
+ bat_priv->vis.hash = batadv_hash_new(256);
+ if (!bat_priv->vis.hash) {
pr_err("Can't initialize vis_hash\n");
goto err;
}
- bat_priv->my_vis_info = kmalloc(BATADV_MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
- if (!bat_priv->my_vis_info)
+ bat_priv->vis.my_info = kmalloc(BATADV_MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
+ if (!bat_priv->vis.my_info)
goto err;
len = sizeof(*packet) + BATADV_MAX_VIS_PACKET_SIZE + ETH_HLEN;
- bat_priv->my_vis_info->skb_packet = dev_alloc_skb(len);
- if (!bat_priv->my_vis_info->skb_packet)
+ bat_priv->vis.my_info->skb_packet = dev_alloc_skb(len);
+ if (!bat_priv->vis.my_info->skb_packet)
goto free_info;
- skb_reserve(bat_priv->my_vis_info->skb_packet, ETH_HLEN);
- tmp_skb = bat_priv->my_vis_info->skb_packet;
+ skb_reserve(bat_priv->vis.my_info->skb_packet, ETH_HLEN);
+ tmp_skb = bat_priv->vis.my_info->skb_packet;
packet = (struct batadv_vis_packet *)skb_put(tmp_skb, sizeof(*packet));
/* prefill the vis info */
first_seen = jiffies - msecs_to_jiffies(BATADV_VIS_INTERVAL);
- bat_priv->my_vis_info->first_seen = first_seen;
- INIT_LIST_HEAD(&bat_priv->my_vis_info->recv_list);
- INIT_LIST_HEAD(&bat_priv->my_vis_info->send_list);
- kref_init(&bat_priv->my_vis_info->refcount);
- bat_priv->my_vis_info->bat_priv = bat_priv;
+ bat_priv->vis.my_info->first_seen = first_seen;
+ INIT_LIST_HEAD(&bat_priv->vis.my_info->recv_list);
+ INIT_LIST_HEAD(&bat_priv->vis.my_info->send_list);
+ kref_init(&bat_priv->vis.my_info->refcount);
+ bat_priv->vis.my_info->bat_priv = bat_priv;
packet->header.version = BATADV_COMPAT_VERSION;
packet->header.packet_type = BATADV_VIS;
packet->header.ttl = BATADV_TTL;
@@ -894,28 +896,28 @@ int batadv_vis_init(struct batadv_priv *bat_priv)
packet->reserved = 0;
packet->entries = 0;
- INIT_LIST_HEAD(&bat_priv->vis_send_list);
+ INIT_LIST_HEAD(&bat_priv->vis.send_list);
- hash_added = batadv_hash_add(bat_priv->vis_hash, batadv_vis_info_cmp,
+ hash_added = batadv_hash_add(bat_priv->vis.hash, batadv_vis_info_cmp,
batadv_vis_info_choose,
- bat_priv->my_vis_info,
- &bat_priv->my_vis_info->hash_entry);
+ bat_priv->vis.my_info,
+ &bat_priv->vis.my_info->hash_entry);
if (hash_added != 0) {
pr_err("Can't add own vis packet into hash\n");
/* not in hash, need to remove it manually. */
- kref_put(&bat_priv->my_vis_info->refcount, batadv_free_info);
+ kref_put(&bat_priv->vis.my_info->refcount, batadv_free_info);
goto err;
}
- spin_unlock_bh(&bat_priv->vis_hash_lock);
+ spin_unlock_bh(&bat_priv->vis.hash_lock);
batadv_start_vis_timer(bat_priv);
return 0;
free_info:
- kfree(bat_priv->my_vis_info);
- bat_priv->my_vis_info = NULL;
+ kfree(bat_priv->vis.my_info);
+ bat_priv->vis.my_info = NULL;
err:
- spin_unlock_bh(&bat_priv->vis_hash_lock);
+ spin_unlock_bh(&bat_priv->vis.hash_lock);
batadv_vis_quit(bat_priv);
return -ENOMEM;
}
@@ -933,23 +935,23 @@ static void batadv_free_info_ref(struct hlist_node *node, void *arg)
/* shutdown vis-server */
void batadv_vis_quit(struct batadv_priv *bat_priv)
{
- if (!bat_priv->vis_hash)
+ if (!bat_priv->vis.hash)
return;
- cancel_delayed_work_sync(&bat_priv->vis_work);
+ cancel_delayed_work_sync(&bat_priv->vis.work);
- spin_lock_bh(&bat_priv->vis_hash_lock);
+ spin_lock_bh(&bat_priv->vis.hash_lock);
/* properly remove, kill timers ... */
- batadv_hash_delete(bat_priv->vis_hash, batadv_free_info_ref, NULL);
- bat_priv->vis_hash = NULL;
- bat_priv->my_vis_info = NULL;
- spin_unlock_bh(&bat_priv->vis_hash_lock);
+ batadv_hash_delete(bat_priv->vis.hash, batadv_free_info_ref, NULL);
+ bat_priv->vis.hash = NULL;
+ bat_priv->vis.my_info = NULL;
+ spin_unlock_bh(&bat_priv->vis.hash_lock);
}
/* schedule packets for (re)transmission */
static void batadv_start_vis_timer(struct batadv_priv *bat_priv)
{
- INIT_DELAYED_WORK(&bat_priv->vis_work, batadv_send_vis_packets);
- queue_delayed_work(batadv_event_workqueue, &bat_priv->vis_work,
+ INIT_DELAYED_WORK(&bat_priv->vis.work, batadv_send_vis_packets);
+ queue_delayed_work(batadv_event_workqueue, &bat_priv->vis.work,
msecs_to_jiffies(BATADV_VIS_INTERVAL));
}
diff --git a/net/batman-adv/vis.h b/net/batman-adv/vis.h
index 84e716ed896..873282fa86d 100644
--- a/net/batman-adv/vis.h
+++ b/net/batman-adv/vis.h
@@ -20,7 +20,7 @@
#ifndef _NET_BATMAN_ADV_VIS_H_
#define _NET_BATMAN_ADV_VIS_H_
-/* timeout of vis packets in miliseconds */
+/* timeout of vis packets in milliseconds */
#define BATADV_VIS_TIMEOUT 200000
int batadv_vis_seq_print_text(struct seq_file *seq, void *offset);
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 69e38db28e5..a8020293f34 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -84,7 +84,6 @@ int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid)
return -1;
}
} else {
- pr_info("client%lld fsid %pU\n", ceph_client_id(client), fsid);
memcpy(&client->fsid, fsid, sizeof(*fsid));
}
return 0;
diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c
index 54b531a0112..38b5dc1823d 100644
--- a/net/ceph/debugfs.c
+++ b/net/ceph/debugfs.c
@@ -189,6 +189,9 @@ int ceph_debugfs_client_init(struct ceph_client *client)
snprintf(name, sizeof(name), "%pU.client%lld", &client->fsid,
client->monc.auth->global_id);
+ dout("ceph_debugfs_client_init %p %s\n", client, name);
+
+ BUG_ON(client->debugfs_dir);
client->debugfs_dir = debugfs_create_dir(name, ceph_debugfs_dir);
if (!client->debugfs_dir)
goto out;
@@ -234,6 +237,7 @@ out:
void ceph_debugfs_client_cleanup(struct ceph_client *client)
{
+ dout("ceph_debugfs_client_cleanup %p\n", client);
debugfs_remove(client->debugfs_osdmap);
debugfs_remove(client->debugfs_monmap);
debugfs_remove(client->osdc.debugfs_file);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index b9796750034..24c5eea8c45 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -915,7 +915,6 @@ static int prepare_write_connect(struct ceph_connection *con)
con->out_connect.authorizer_len = auth ?
cpu_to_le32(auth->authorizer_buf_len) : 0;
- con_out_kvec_reset(con);
con_out_kvec_add(con, sizeof (con->out_connect),
&con->out_connect);
if (auth && auth->authorizer_buf_len)
@@ -1557,6 +1556,7 @@ static int process_connect(struct ceph_connection *con)
return -1;
}
con->auth_retry = 1;
+ con_out_kvec_reset(con);
ret = prepare_write_connect(con);
if (ret < 0)
return ret;
@@ -1577,6 +1577,7 @@ static int process_connect(struct ceph_connection *con)
ENTITY_NAME(con->peer_name),
ceph_pr_addr(&con->peer_addr.in_addr));
reset_connection(con);
+ con_out_kvec_reset(con);
ret = prepare_write_connect(con);
if (ret < 0)
return ret;
@@ -1601,6 +1602,7 @@ static int process_connect(struct ceph_connection *con)
le32_to_cpu(con->out_connect.connect_seq),
le32_to_cpu(con->in_reply.connect_seq));
con->connect_seq = le32_to_cpu(con->in_reply.connect_seq);
+ con_out_kvec_reset(con);
ret = prepare_write_connect(con);
if (ret < 0)
return ret;
@@ -1617,6 +1619,7 @@ static int process_connect(struct ceph_connection *con)
le32_to_cpu(con->in_reply.global_seq));
get_global_seq(con->msgr,
le32_to_cpu(con->in_reply.global_seq));
+ con_out_kvec_reset(con);
ret = prepare_write_connect(con);
if (ret < 0)
return ret;
@@ -2135,7 +2138,11 @@ more:
BUG_ON(con->state != CON_STATE_CONNECTING);
con->state = CON_STATE_NEGOTIATING;
- /* Banner is good, exchange connection info */
+ /*
+ * Received banner is good, exchange connection info.
+ * Do not reset out_kvec, as sending our banner raced
+ * with receiving peer banner after connect completed.
+ */
ret = prepare_write_connect(con);
if (ret < 0)
goto out;
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 105d533b55f..900ea0f043f 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -311,6 +311,17 @@ int ceph_monc_open_session(struct ceph_mon_client *monc)
EXPORT_SYMBOL(ceph_monc_open_session);
/*
+ * We require the fsid and global_id in order to initialize our
+ * debugfs dir.
+ */
+static bool have_debugfs_info(struct ceph_mon_client *monc)
+{
+ dout("have_debugfs_info fsid %d globalid %lld\n",
+ (int)monc->client->have_fsid, monc->auth->global_id);
+ return monc->client->have_fsid && monc->auth->global_id > 0;
+}
+
+/*
* The monitor responds with mount ack indicate mount success. The
* included client ticket allows the client to talk to MDSs and OSDs.
*/
@@ -320,9 +331,12 @@ static void ceph_monc_handle_map(struct ceph_mon_client *monc,
struct ceph_client *client = monc->client;
struct ceph_monmap *monmap = NULL, *old = monc->monmap;
void *p, *end;
+ int had_debugfs_info, init_debugfs = 0;
mutex_lock(&monc->mutex);
+ had_debugfs_info = have_debugfs_info(monc);
+
dout("handle_monmap\n");
p = msg->front.iov_base;
end = p + msg->front.iov_len;
@@ -344,12 +358,22 @@ static void ceph_monc_handle_map(struct ceph_mon_client *monc,
if (!client->have_fsid) {
client->have_fsid = true;
+ if (!had_debugfs_info && have_debugfs_info(monc)) {
+ pr_info("client%lld fsid %pU\n",
+ ceph_client_id(monc->client),
+ &monc->client->fsid);
+ init_debugfs = 1;
+ }
mutex_unlock(&monc->mutex);
- /*
- * do debugfs initialization without mutex to avoid
- * creating a locking dependency
- */
- ceph_debugfs_client_init(client);
+
+ if (init_debugfs) {
+ /*
+ * do debugfs initialization without mutex to avoid
+ * creating a locking dependency
+ */
+ ceph_debugfs_client_init(monc->client);
+ }
+
goto out_unlocked;
}
out:
@@ -865,8 +889,10 @@ static void handle_auth_reply(struct ceph_mon_client *monc,
{
int ret;
int was_auth = 0;
+ int had_debugfs_info, init_debugfs = 0;
mutex_lock(&monc->mutex);
+ had_debugfs_info = have_debugfs_info(monc);
if (monc->auth->ops)
was_auth = monc->auth->ops->is_authenticated(monc->auth);
monc->pending_auth = 0;
@@ -889,7 +915,22 @@ static void handle_auth_reply(struct ceph_mon_client *monc,
__send_subscribe(monc);
__resend_generic_request(monc);
}
+
+ if (!had_debugfs_info && have_debugfs_info(monc)) {
+ pr_info("client%lld fsid %pU\n",
+ ceph_client_id(monc->client),
+ &monc->client->fsid);
+ init_debugfs = 1;
+ }
mutex_unlock(&monc->mutex);
+
+ if (init_debugfs) {
+ /*
+ * do debugfs initialization without mutex to avoid
+ * creating a locking dependency
+ */
+ ceph_debugfs_client_init(monc->client);
+ }
}
static int __validate_auth(struct ceph_mon_client *monc)
diff --git a/net/core/dev.c b/net/core/dev.c
index 0640d2a859c..b1e6d638551 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1466,8 +1466,7 @@ EXPORT_SYMBOL(unregister_netdevice_notifier);
int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
{
- if (val != NETDEV_UNREGISTER_FINAL)
- ASSERT_RTNL();
+ ASSERT_RTNL();
return raw_notifier_call_chain(&netdev_chain, val, dev);
}
EXPORT_SYMBOL(call_netdevice_notifiers);
@@ -2185,9 +2184,7 @@ EXPORT_SYMBOL(netif_skb_features);
/*
* Returns true if either:
* 1. skb has frag_list and the device doesn't support FRAGLIST, or
- * 2. skb is fragmented and the device does not support SG, or if
- * at least one of fragments is in highmem and device does not
- * support DMA from it.
+ * 2. skb is fragmented and the device does not support SG.
*/
static inline int skb_needs_linearize(struct sk_buff *skb,
int features)
@@ -4521,8 +4518,8 @@ static void dev_change_rx_flags(struct net_device *dev, int flags)
static int __dev_set_promiscuity(struct net_device *dev, int inc)
{
unsigned int old_flags = dev->flags;
- uid_t uid;
- gid_t gid;
+ kuid_t uid;
+ kgid_t gid;
ASSERT_RTNL();
@@ -4554,7 +4551,8 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc)
dev->name, (dev->flags & IFF_PROMISC),
(old_flags & IFF_PROMISC),
audit_get_loginuid(current),
- uid, gid,
+ from_kuid(&init_user_ns, uid),
+ from_kgid(&init_user_ns, gid),
audit_get_sessionid(current));
}
@@ -5649,6 +5647,8 @@ int register_netdevice(struct net_device *dev)
set_bit(__LINK_STATE_PRESENT, &dev->state);
+ linkwatch_init_dev(dev);
+
dev_init_scheduler(dev);
dev_hold(dev);
list_netdevice(dev);
@@ -5782,7 +5782,11 @@ static void netdev_wait_allrefs(struct net_device *dev)
/* Rebroadcast unregister notification */
call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
+
+ __rtnl_unlock();
rcu_barrier();
+ rtnl_lock();
+
call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
&dev->state)) {
@@ -5855,7 +5859,9 @@ void netdev_run_todo(void)
= list_first_entry(&list, struct net_device, todo_list);
list_del(&dev->todo_list);
+ rtnl_lock();
call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
+ __rtnl_unlock();
if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
pr_err("network todo '%s' but state %d\n",
@@ -6251,6 +6257,8 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
the device is just moving and can keep their slaves up.
*/
call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
+ rcu_barrier();
+ call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
/*
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 585093755c2..ab7db83236c 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -711,16 +711,15 @@ static int fib_rules_event(struct notifier_block *this, unsigned long event,
struct net *net = dev_net(dev);
struct fib_rules_ops *ops;
+ ASSERT_RTNL();
switch (event) {
case NETDEV_REGISTER:
- ASSERT_RTNL();
list_for_each_entry(ops, &net->rules_ops, list)
attach_rules(&ops->rules_list, dev);
break;
case NETDEV_UNREGISTER:
- ASSERT_RTNL();
list_for_each_entry(ops, &net->rules_ops, list)
detach_rules(&ops->rules_list, dev);
break;
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index c3519c6d1b1..a01922219a2 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -76,6 +76,14 @@ static void rfc2863_policy(struct net_device *dev)
}
+void linkwatch_init_dev(struct net_device *dev)
+{
+ /* Handle pre-registration link state changes */
+ if (!netif_carrier_ok(dev) || netif_dormant(dev))
+ rfc2863_policy(dev);
+}
+
+
static bool linkwatch_urgent_event(struct net_device *dev)
{
if (!netif_running(dev))
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 346b1eb83a1..dd67818025d 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -168,24 +168,16 @@ static void poll_napi(struct net_device *dev)
struct napi_struct *napi;
int budget = 16;
- WARN_ON_ONCE(!irqs_disabled());
-
list_for_each_entry(napi, &dev->napi_list, dev_list) {
- local_irq_enable();
if (napi->poll_owner != smp_processor_id() &&
spin_trylock(&napi->poll_lock)) {
- rcu_read_lock_bh();
budget = poll_one_napi(rcu_dereference_bh(dev->npinfo),
napi, budget);
- rcu_read_unlock_bh();
spin_unlock(&napi->poll_lock);
- if (!budget) {
- local_irq_disable();
+ if (!budget)
break;
- }
}
- local_irq_disable();
}
}
@@ -388,6 +380,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
struct udphdr *udph;
struct iphdr *iph;
struct ethhdr *eth;
+ static atomic_t ip_ident;
udp_len = len + sizeof(*udph);
ip_len = udp_len + sizeof(*iph);
@@ -423,7 +416,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
put_unaligned(0x45, (unsigned char *)iph);
iph->tos = 0;
put_unaligned(htons(ip_len), &(iph->tot_len));
- iph->id = 0;
+ iph->id = htons(atomic_inc_return(&ip_ident));
iph->frag_off = 0;
iph->ttl = 64;
iph->protocol = IPPROTO_UDP;
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index 9b570a6a33c..c31d9e8668c 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -15,6 +15,7 @@
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/tcp.h>
#include <linux/vmalloc.h>
#include <net/request_sock.h>
@@ -130,3 +131,97 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
kfree(lopt);
}
+/*
+ * This function is called to set a Fast Open socket's "fastopen_rsk" field
+ * to NULL when a TFO socket no longer needs to access the request_sock.
+ * This happens only after 3WHS has been either completed or aborted (e.g.,
+ * RST is received).
+ *
+ * Before TFO, a child socket is created only after 3WHS is completed,
+ * hence it never needs to access the request_sock. things get a lot more
+ * complex with TFO. A child socket, accepted or not, has to access its
+ * request_sock for 3WHS processing, e.g., to retransmit SYN-ACK pkts,
+ * until 3WHS is either completed or aborted. Afterwards the req will stay
+ * until either the child socket is accepted, or in the rare case when the
+ * listener is closed before the child is accepted.
+ *
+ * In short, a request socket is only freed after BOTH 3WHS has completed
+ * (or aborted) and the child socket has been accepted (or listener closed).
+ * When a child socket is accepted, its corresponding req->sk is set to
+ * NULL since it's no longer needed. More importantly, "req->sk == NULL"
+ * will be used by the code below to determine if a child socket has been
+ * accepted or not, and the check is protected by the fastopenq->lock
+ * described below.
+ *
+ * Note that fastopen_rsk is only accessed from the child socket's context
+ * with its socket lock held. But a request_sock (req) can be accessed by
+ * both its child socket through fastopen_rsk, and a listener socket through
+ * icsk_accept_queue.rskq_accept_head. To protect the access a simple spin
+ * lock per listener "icsk->icsk_accept_queue.fastopenq->lock" is created.
+ * only in the rare case when both the listener and the child locks are held,
+ * e.g., in inet_csk_listen_stop() do we not need to acquire the lock.
+ * The lock also protects other fields such as fastopenq->qlen, which is
+ * decremented by this function when fastopen_rsk is no longer needed.
+ *
+ * Note that another solution was to simply use the existing socket lock
+ * from the listener. But first socket lock is difficult to use. It is not
+ * a simple spin lock - one must consider sock_owned_by_user() and arrange
+ * to use sk_add_backlog() stuff. But what really makes it infeasible is the
+ * locking hierarchy violation. E.g., inet_csk_listen_stop() may try to
+ * acquire a child's lock while holding listener's socket lock. A corner
+ * case might also exist in tcp_v4_hnd_req() that will trigger this locking
+ * order.
+ *
+ * When a TFO req is created, it needs to sock_hold its listener to prevent
+ * the latter data structure from going away.
+ *
+ * This function also sets "treq->listener" to NULL and unreference listener
+ * socket. treq->listener is used by the listener so it is protected by the
+ * fastopenq->lock in this function.
+ */
+void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
+ bool reset)
+{
+ struct sock *lsk = tcp_rsk(req)->listener;
+ struct fastopen_queue *fastopenq =
+ inet_csk(lsk)->icsk_accept_queue.fastopenq;
+
+ BUG_ON(!spin_is_locked(&sk->sk_lock.slock) && !sock_owned_by_user(sk));
+
+ tcp_sk(sk)->fastopen_rsk = NULL;
+ spin_lock_bh(&fastopenq->lock);
+ fastopenq->qlen--;
+ tcp_rsk(req)->listener = NULL;
+ if (req->sk) /* the child socket hasn't been accepted yet */
+ goto out;
+
+ if (!reset || lsk->sk_state != TCP_LISTEN) {
+ /* If the listener has been closed don't bother with the
+ * special RST handling below.
+ */
+ spin_unlock_bh(&fastopenq->lock);
+ sock_put(lsk);
+ reqsk_free(req);
+ return;
+ }
+ /* Wait for 60secs before removing a req that has triggered RST.
+ * This is a simple defense against TFO spoofing attack - by
+ * counting the req against fastopen.max_qlen, and disabling
+ * TFO when the qlen exceeds max_qlen.
+ *
+ * For more details see CoNext'11 "TCP Fast Open" paper.
+ */
+ req->expires = jiffies + 60*HZ;
+ if (fastopenq->rskq_rst_head == NULL)
+ fastopenq->rskq_rst_head = req;
+ else
+ fastopenq->rskq_rst_tail->dl_next = req;
+
+ req->dl_next = NULL;
+ fastopenq->rskq_rst_tail = req;
+ fastopenq->qlen++;
+out:
+ spin_unlock_bh(&fastopenq->lock);
+ sock_put(lsk);
+ return;
+}
diff --git a/net/core/scm.c b/net/core/scm.c
index 040cebeed45..6ab491d6c26 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -45,12 +45,17 @@
static __inline__ int scm_check_creds(struct ucred *creds)
{
const struct cred *cred = current_cred();
+ kuid_t uid = make_kuid(cred->user_ns, creds->uid);
+ kgid_t gid = make_kgid(cred->user_ns, creds->gid);
+
+ if (!uid_valid(uid) || !gid_valid(gid))
+ return -EINVAL;
if ((creds->pid == task_tgid_vnr(current) || capable(CAP_SYS_ADMIN)) &&
- ((creds->uid == cred->uid || creds->uid == cred->euid ||
- creds->uid == cred->suid) || capable(CAP_SETUID)) &&
- ((creds->gid == cred->gid || creds->gid == cred->egid ||
- creds->gid == cred->sgid) || capable(CAP_SETGID))) {
+ ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) ||
+ uid_eq(uid, cred->suid)) || capable(CAP_SETUID)) &&
+ ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) ||
+ gid_eq(gid, cred->sgid)) || capable(CAP_SETGID))) {
return 0;
}
return -EPERM;
@@ -149,6 +154,9 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
goto error;
break;
case SCM_CREDENTIALS:
+ {
+ kuid_t uid;
+ kgid_t gid;
if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct ucred)))
goto error;
memcpy(&p->creds, CMSG_DATA(cmsg), sizeof(struct ucred));
@@ -166,22 +174,29 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
p->pid = pid;
}
+ err = -EINVAL;
+ uid = make_kuid(current_user_ns(), p->creds.uid);
+ gid = make_kgid(current_user_ns(), p->creds.gid);
+ if (!uid_valid(uid) || !gid_valid(gid))
+ goto error;
+
if (!p->cred ||
- (p->cred->euid != p->creds.uid) ||
- (p->cred->egid != p->creds.gid)) {
+ !uid_eq(p->cred->euid, uid) ||
+ !gid_eq(p->cred->egid, gid)) {
struct cred *cred;
err = -ENOMEM;
cred = prepare_creds();
if (!cred)
goto error;
- cred->uid = cred->euid = p->creds.uid;
- cred->gid = cred->egid = p->creds.gid;
+ cred->uid = cred->euid = uid;
+ cred->gid = cred->egid = gid;
if (p->cred)
put_cred(p->cred);
p->cred = cred;
}
break;
+ }
default:
goto error;
}
diff --git a/net/core/sock.c b/net/core/sock.c
index 8f67ced8d6a..d765156eab6 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -868,8 +868,8 @@ void cred_to_ucred(struct pid *pid, const struct cred *cred,
if (cred) {
struct user_namespace *current_ns = current_user_ns();
- ucred->uid = from_kuid(current_ns, cred->euid);
- ucred->gid = from_kgid(current_ns, cred->egid);
+ ucred->uid = from_kuid_munged(current_ns, cred->euid);
+ ucred->gid = from_kgid_munged(current_ns, cred->egid);
}
}
EXPORT_SYMBOL_GPL(cred_to_ucred);
@@ -1230,7 +1230,7 @@ void sock_update_classid(struct sock *sk)
rcu_read_lock(); /* doing current task, which cannot vanish. */
classid = task_cls_classid(current);
rcu_read_unlock();
- if (classid && classid != sk->sk_classid)
+ if (classid != sk->sk_classid)
sk->sk_classid = classid;
}
EXPORT_SYMBOL(sock_update_classid);
@@ -1527,12 +1527,12 @@ void sock_edemux(struct sk_buff *skb)
}
EXPORT_SYMBOL(sock_edemux);
-int sock_i_uid(struct sock *sk)
+kuid_t sock_i_uid(struct sock *sk)
{
- int uid;
+ kuid_t uid;
read_lock_bh(&sk->sk_callback_lock);
- uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
+ uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
read_unlock_bh(&sk->sk_callback_lock);
return uid;
}
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 2ba1a2814c2..307c322d53b 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1313,10 +1313,10 @@ static int dn_shutdown(struct socket *sock, int how)
if (scp->state == DN_O)
goto out;
- if (how != SHUTDOWN_MASK)
+ if (how != SHUT_RDWR)
goto out;
- sk->sk_shutdown = how;
+ sk->sk_shutdown = SHUTDOWN_MASK;
dn_destroy_sock(sk);
err = 0;
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 6a095225148..d5291113584 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -1063,12 +1063,6 @@ out:
return (err < 0 ? NETDEV_TX_BUSY : NETDEV_TX_OK);
}
-static void lowpan_dev_free(struct net_device *dev)
-{
- dev_put(lowpan_dev_info(dev)->real_dev);
- free_netdev(dev);
-}
-
static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)
{
struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
@@ -1118,7 +1112,7 @@ static void lowpan_setup(struct net_device *dev)
dev->netdev_ops = &lowpan_netdev_ops;
dev->header_ops = &lowpan_header_ops;
dev->ml_priv = &lowpan_mlme;
- dev->destructor = lowpan_dev_free;
+ dev->destructor = free_netdev;
}
static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -1133,6 +1127,8 @@ static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
+ struct sk_buff *local_skb;
+
if (!netif_running(dev))
goto drop;
@@ -1144,7 +1140,12 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */
case LOWPAN_DISPATCH_FRAG1: /* first fragment header */
case LOWPAN_DISPATCH_FRAGN: /* next fragments headers */
- lowpan_process_data(skb);
+ local_skb = skb_clone(skb, GFP_ATOMIC);
+ if (!local_skb)
+ goto drop;
+ lowpan_process_data(local_skb);
+
+ kfree_skb(skb);
break;
default:
break;
@@ -1237,6 +1238,34 @@ static inline void __init lowpan_netlink_fini(void)
rtnl_link_unregister(&lowpan_link_ops);
}
+static int lowpan_device_event(struct notifier_block *unused,
+ unsigned long event,
+ void *ptr)
+{
+ struct net_device *dev = ptr;
+ LIST_HEAD(del_list);
+ struct lowpan_dev_record *entry, *tmp;
+
+ if (dev->type != ARPHRD_IEEE802154)
+ goto out;
+
+ if (event == NETDEV_UNREGISTER) {
+ list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
+ if (lowpan_dev_info(entry->ldev)->real_dev == dev)
+ lowpan_dellink(entry->ldev, &del_list);
+ }
+
+ unregister_netdevice_many(&del_list);
+ };
+
+out:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block lowpan_dev_notifier = {
+ .notifier_call = lowpan_device_event,
+};
+
static struct packet_type lowpan_packet_type = {
.type = __constant_htons(ETH_P_IEEE802154),
.func = lowpan_rcv,
@@ -1251,6 +1280,12 @@ static int __init lowpan_init_module(void)
goto out;
dev_add_pack(&lowpan_packet_type);
+
+ err = register_netdevice_notifier(&lowpan_dev_notifier);
+ if (err < 0) {
+ dev_remove_pack(&lowpan_packet_type);
+ lowpan_netlink_fini();
+ }
out:
return err;
}
@@ -1263,6 +1298,8 @@ static void __exit lowpan_cleanup_module(void)
dev_remove_pack(&lowpan_packet_type);
+ unregister_netdevice_notifier(&lowpan_dev_notifier);
+
/* Now 6lowpan packet_type is removed, so no new fragments are
* expected on RX, therefore that's the time to clean incomplete
* fragments.
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 6681ccf5c3e..4f70ef0b946 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -149,6 +149,11 @@ void inet_sock_destruct(struct sock *sk)
pr_err("Attempt to release alive inet socket %p\n", sk);
return;
}
+ if (sk->sk_type == SOCK_STREAM) {
+ struct fastopen_queue *fastopenq =
+ inet_csk(sk)->icsk_accept_queue.fastopenq;
+ kfree(fastopenq);
+ }
WARN_ON(atomic_read(&sk->sk_rmem_alloc));
WARN_ON(atomic_read(&sk->sk_wmem_alloc));
@@ -212,6 +217,26 @@ int inet_listen(struct socket *sock, int backlog)
* we can only allow the backlog to be adjusted.
*/
if (old_state != TCP_LISTEN) {
+ /* Check special setups for testing purpose to enable TFO w/o
+ * requiring TCP_FASTOPEN sockopt.
+ * Note that only TCP sockets (SOCK_STREAM) will reach here.
+ * Also fastopenq may already been allocated because this
+ * socket was in TCP_LISTEN state previously but was
+ * shutdown() (rather than close()).
+ */
+ if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) != 0 &&
+ inet_csk(sk)->icsk_accept_queue.fastopenq == NULL) {
+ if ((sysctl_tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) != 0)
+ err = fastopen_init_queue(sk, backlog);
+ else if ((sysctl_tcp_fastopen &
+ TFO_SERVER_WO_SOCKOPT2) != 0)
+ err = fastopen_init_queue(sk,
+ ((uint)sysctl_tcp_fastopen) >> 16);
+ else
+ err = 0;
+ if (err)
+ goto out;
+ }
err = inet_csk_listen_start(sk, backlog);
if (err)
goto out;
@@ -701,7 +726,8 @@ int inet_accept(struct socket *sock, struct socket *newsock, int flags)
sock_rps_record_flow(sk2);
WARN_ON(!((1 << sk2->sk_state) &
- (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE)));
+ (TCPF_ESTABLISHED | TCPF_SYN_RECV |
+ TCPF_CLOSE_WAIT | TCPF_CLOSE)));
sock_graft(sk2, newsock);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 6a5e6e4b142..adf273f8ad2 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1147,12 +1147,8 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct net_device *dev = ptr;
- struct in_device *in_dev;
-
- if (event == NETDEV_UNREGISTER_FINAL)
- goto out;
+ struct in_device *in_dev = __in_dev_get_rtnl(dev);
- in_dev = __in_dev_get_rtnl(dev);
ASSERT_RTNL();
if (!in_dev) {
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index fd7d9ae64f1..acdee325d97 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1050,9 +1050,6 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
return NOTIFY_DONE;
}
- if (event == NETDEV_UNREGISTER_FINAL)
- return NOTIFY_DONE;
-
in_dev = __in_dev_get_rtnl(dev);
switch (event) {
@@ -1064,14 +1061,14 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
fib_sync_up(dev);
#endif
atomic_inc(&net->ipv4.dev_addr_genid);
- rt_cache_flush(dev_net(dev), -1);
+ rt_cache_flush(net, -1);
break;
case NETDEV_DOWN:
fib_disable_ip(dev, 0, 0);
break;
case NETDEV_CHANGEMTU:
case NETDEV_CHANGE:
- rt_cache_flush(dev_net(dev), 0);
+ rt_cache_flush(net, 0);
break;
}
return NOTIFY_DONE;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 7f75f21d7b8..8464b79c493 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -283,7 +283,9 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
{
struct inet_connection_sock *icsk = inet_csk(sk);
+ struct request_sock_queue *queue = &icsk->icsk_accept_queue;
struct sock *newsk;
+ struct request_sock *req;
int error;
lock_sock(sk);
@@ -296,7 +298,7 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
goto out_err;
/* Find already established connection */
- if (reqsk_queue_empty(&icsk->icsk_accept_queue)) {
+ if (reqsk_queue_empty(queue)) {
long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
/* If this is a non blocking socket don't sleep */
@@ -308,14 +310,32 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
if (error)
goto out_err;
}
-
- newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk);
- WARN_ON(newsk->sk_state == TCP_SYN_RECV);
+ req = reqsk_queue_remove(queue);
+ newsk = req->sk;
+
+ sk_acceptq_removed(sk);
+ if (sk->sk_type == SOCK_STREAM && queue->fastopenq != NULL) {
+ spin_lock_bh(&queue->fastopenq->lock);
+ if (tcp_rsk(req)->listener) {
+ /* We are still waiting for the final ACK from 3WHS
+ * so can't free req now. Instead, we set req->sk to
+ * NULL to signify that the child socket is taken
+ * so reqsk_fastopen_remove() will free the req
+ * when 3WHS finishes (or is aborted).
+ */
+ req->sk = NULL;
+ req = NULL;
+ }
+ spin_unlock_bh(&queue->fastopenq->lock);
+ }
out:
release_sock(sk);
+ if (req)
+ __reqsk_free(req);
return newsk;
out_err:
newsk = NULL;
+ req = NULL;
*err = error;
goto out;
}
@@ -720,13 +740,14 @@ EXPORT_SYMBOL_GPL(inet_csk_listen_start);
void inet_csk_listen_stop(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
+ struct request_sock_queue *queue = &icsk->icsk_accept_queue;
struct request_sock *acc_req;
struct request_sock *req;
inet_csk_delete_keepalive_timer(sk);
/* make all the listen_opt local to us */
- acc_req = reqsk_queue_yank_acceptq(&icsk->icsk_accept_queue);
+ acc_req = reqsk_queue_yank_acceptq(queue);
/* Following specs, it would be better either to send FIN
* (and enter FIN-WAIT-1, it is normal close)
@@ -736,7 +757,7 @@ void inet_csk_listen_stop(struct sock *sk)
* To be honest, we are not able to make either
* of the variants now. --ANK
*/
- reqsk_queue_destroy(&icsk->icsk_accept_queue);
+ reqsk_queue_destroy(queue);
while ((req = acc_req) != NULL) {
struct sock *child = req->sk;
@@ -754,6 +775,19 @@ void inet_csk_listen_stop(struct sock *sk)
percpu_counter_inc(sk->sk_prot->orphan_count);
+ if (sk->sk_type == SOCK_STREAM && tcp_rsk(req)->listener) {
+ BUG_ON(tcp_sk(child)->fastopen_rsk != req);
+ BUG_ON(sk != tcp_rsk(req)->listener);
+
+ /* Paranoid, to prevent race condition if
+ * an inbound pkt destined for child is
+ * blocked by sock lock in tcp_v4_rcv().
+ * Also to satisfy an assertion in
+ * tcp_v4_destroy_sock().
+ */
+ tcp_sk(child)->fastopen_rsk = NULL;
+ sock_put(sk);
+ }
inet_csk_destroy_sock(child);
bh_unlock_sock(child);
@@ -763,6 +797,17 @@ void inet_csk_listen_stop(struct sock *sk)
sk_acceptq_removed(sk);
__reqsk_free(req);
}
+ if (queue->fastopenq != NULL) {
+ /* Free all the reqs queued in rskq_rst_head. */
+ spin_lock_bh(&queue->fastopenq->lock);
+ acc_req = queue->fastopenq->rskq_rst_head;
+ queue->fastopenq->rskq_rst_head = NULL;
+ spin_unlock_bh(&queue->fastopenq->lock);
+ while ((req = acc_req) != NULL) {
+ acc_req = req->dl_next;
+ __reqsk_free(req);
+ }
+ }
WARN_ON(sk->sk_ack_backlog);
}
EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 570e61f9611..8bc005b1435 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -69,6 +69,7 @@ static inline void inet_diag_unlock_handler(
int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
struct sk_buff *skb, struct inet_diag_req_v2 *req,
+ struct user_namespace *user_ns,
u32 pid, u32 seq, u16 nlmsg_flags,
const struct nlmsghdr *unlh)
{
@@ -124,7 +125,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
}
#endif
- r->idiag_uid = sock_i_uid(sk);
+ r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
r->idiag_inode = sock_i_ino(sk);
if (ext & (1 << (INET_DIAG_MEMINFO - 1))) {
@@ -199,11 +200,12 @@ EXPORT_SYMBOL_GPL(inet_sk_diag_fill);
static int inet_csk_diag_fill(struct sock *sk,
struct sk_buff *skb, struct inet_diag_req_v2 *req,
+ struct user_namespace *user_ns,
u32 pid, u32 seq, u16 nlmsg_flags,
const struct nlmsghdr *unlh)
{
return inet_sk_diag_fill(sk, inet_csk(sk),
- skb, req, pid, seq, nlmsg_flags, unlh);
+ skb, req, user_ns, pid, seq, nlmsg_flags, unlh);
}
static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
@@ -256,14 +258,16 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
}
static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
- struct inet_diag_req_v2 *r, u32 pid, u32 seq, u16 nlmsg_flags,
+ struct inet_diag_req_v2 *r,
+ struct user_namespace *user_ns,
+ u32 pid, u32 seq, u16 nlmsg_flags,
const struct nlmsghdr *unlh)
{
if (sk->sk_state == TCP_TIME_WAIT)
return inet_twsk_diag_fill((struct inet_timewait_sock *)sk,
skb, r, pid, seq, nlmsg_flags,
unlh);
- return inet_csk_diag_fill(sk, skb, r, pid, seq, nlmsg_flags, unlh);
+ return inet_csk_diag_fill(sk, skb, r, user_ns, pid, seq, nlmsg_flags, unlh);
}
int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_skb,
@@ -311,6 +315,7 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_s
}
err = sk_diag_fill(sk, rep, req,
+ sk_user_ns(NETLINK_CB(in_skb).ssk),
NETLINK_CB(in_skb).pid,
nlh->nlmsg_seq, 0, nlh);
if (err < 0) {
@@ -551,6 +556,7 @@ static int inet_csk_diag_dump(struct sock *sk,
return 0;
return inet_csk_diag_fill(sk, skb, r,
+ sk_user_ns(NETLINK_CB(cb->skb).ssk),
NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
}
@@ -591,7 +597,9 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
}
static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
- struct request_sock *req, u32 pid, u32 seq,
+ struct request_sock *req,
+ struct user_namespace *user_ns,
+ u32 pid, u32 seq,
const struct nlmsghdr *unlh)
{
const struct inet_request_sock *ireq = inet_rsk(req);
@@ -625,7 +633,7 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
r->idiag_expires = jiffies_to_msecs(tmo);
r->idiag_rqueue = 0;
r->idiag_wqueue = 0;
- r->idiag_uid = sock_i_uid(sk);
+ r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
r->idiag_inode = 0;
#if IS_ENABLED(CONFIG_IPV6)
if (r->idiag_family == AF_INET6) {
@@ -702,6 +710,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
}
err = inet_diag_fill_req(skb, sk, req,
+ sk_user_ns(NETLINK_CB(cb->skb).ssk),
NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, cb->nlh);
if (err < 0) {
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 3a57570c8ee..8aa7a4cf913 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -124,6 +124,8 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
static struct kmem_cache *mrt_cachep __read_mostly;
static struct mr_table *ipmr_new_table(struct net *net, u32 id);
+static void ipmr_free_table(struct mr_table *mrt);
+
static int ip_mr_forward(struct net *net, struct mr_table *mrt,
struct sk_buff *skb, struct mfc_cache *cache,
int local);
@@ -131,6 +133,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
struct sk_buff *pkt, vifi_t vifi, int assert);
static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
struct mfc_cache *c, struct rtmsg *rtm);
+static void mroute_clean_tables(struct mr_table *mrt);
static void ipmr_expire_process(unsigned long arg);
#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
@@ -271,7 +274,7 @@ static void __net_exit ipmr_rules_exit(struct net *net)
list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
list_del(&mrt->list);
- kfree(mrt);
+ ipmr_free_table(mrt);
}
fib_rules_unregister(net->ipv4.mr_rules_ops);
}
@@ -299,7 +302,7 @@ static int __net_init ipmr_rules_init(struct net *net)
static void __net_exit ipmr_rules_exit(struct net *net)
{
- kfree(net->ipv4.mrt);
+ ipmr_free_table(net->ipv4.mrt);
}
#endif
@@ -336,6 +339,13 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
return mrt;
}
+static void ipmr_free_table(struct mr_table *mrt)
+{
+ del_timer_sync(&mrt->ipmr_expire_timer);
+ mroute_clean_tables(mrt);
+ kfree(mrt);
+}
+
/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 6232d476f37..8f3d05424a3 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -185,10 +185,10 @@ exit:
return sk;
}
-static void inet_get_ping_group_range_net(struct net *net, gid_t *low,
- gid_t *high)
+static void inet_get_ping_group_range_net(struct net *net, kgid_t *low,
+ kgid_t *high)
{
- gid_t *data = net->ipv4.sysctl_ping_group_range;
+ kgid_t *data = net->ipv4.sysctl_ping_group_range;
unsigned int seq;
do {
@@ -203,19 +203,13 @@ static void inet_get_ping_group_range_net(struct net *net, gid_t *low,
static int ping_init_sock(struct sock *sk)
{
struct net *net = sock_net(sk);
- gid_t group = current_egid();
- gid_t range[2];
+ kgid_t group = current_egid();
struct group_info *group_info = get_current_groups();
int i, j, count = group_info->ngroups;
kgid_t low, high;
- inet_get_ping_group_range_net(net, range, range+1);
- low = make_kgid(&init_user_ns, range[0]);
- high = make_kgid(&init_user_ns, range[1]);
- if (!gid_valid(low) || !gid_valid(high) || gid_lt(high, low))
- return -EACCES;
-
- if (range[0] <= group && group <= range[1])
+ inet_get_ping_group_range_net(net, &low, &high);
+ if (gid_lte(low, group) && gid_lte(group, high))
return 0;
for (i = 0; i < group_info->nblocks; i++) {
@@ -845,7 +839,9 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
bucket, src, srcp, dest, destp, sp->sk_state,
sk_wmem_alloc_get(sp),
sk_rmem_alloc_get(sp),
- 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
+ 0, 0L, 0,
+ from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
+ 0, sock_i_ino(sp),
atomic_read(&sp->sk_refcnt), sp,
atomic_read(&sp->sk_drops), len);
}
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 957acd12250..8de53e1ddd5 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -263,6 +263,10 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPChallengeACK", LINUX_MIB_TCPCHALLENGEACK),
SNMP_MIB_ITEM("TCPSYNChallenge", LINUX_MIB_TCPSYNCHALLENGE),
SNMP_MIB_ITEM("TCPFastOpenActive", LINUX_MIB_TCPFASTOPENACTIVE),
+ SNMP_MIB_ITEM("TCPFastOpenPassive", LINUX_MIB_TCPFASTOPENPASSIVE),
+ SNMP_MIB_ITEM("TCPFastOpenPassiveFail", LINUX_MIB_TCPFASTOPENPASSIVEFAIL),
+ SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW),
+ SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index ff0f071969e..f2425785d40 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -992,7 +992,9 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
i, src, srcp, dest, destp, sp->sk_state,
sk_wmem_alloc_get(sp),
sk_rmem_alloc_get(sp),
- 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
+ 0, 0L, 0,
+ from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
+ 0, sock_i_ino(sp),
atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 50f6d3adb47..dc9549b5eb1 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -934,12 +934,14 @@ static u32 __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
if (mtu < ip_rt_min_pmtu)
mtu = ip_rt_min_pmtu;
+ rcu_read_lock();
if (fib_lookup(dev_net(rt->dst.dev), fl4, &res) == 0) {
struct fib_nh *nh = &FIB_RES_NH(res);
update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
jiffies + ip_rt_mtu_expires);
}
+ rcu_read_unlock();
return mtu;
}
@@ -956,7 +958,7 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
dst->obsolete = DST_OBSOLETE_KILL;
} else {
rt->rt_pmtu = mtu;
- dst_set_expires(&rt->dst, ip_rt_mtu_expires);
+ rt->dst.expires = max(1UL, jiffies + ip_rt_mtu_expires);
}
}
@@ -1132,10 +1134,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
const struct rtable *rt = (const struct rtable *) dst;
unsigned int mtu = rt->rt_pmtu;
- if (mtu && time_after_eq(jiffies, rt->dst.expires))
- mtu = 0;
-
- if (!mtu)
+ if (!mtu || time_after_eq(jiffies, rt->dst.expires))
mtu = dst_metric_raw(dst, RTAX_MTU);
if (mtu && rt_is_output_route(rt))
@@ -1263,7 +1262,7 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
{
struct rtable *rt = (struct rtable *) dst;
- if (dst->flags & DST_NOCACHE) {
+ if (!list_empty(&rt->rt_uncached)) {
spin_lock_bh(&rt_uncached_lock);
list_del(&rt->rt_uncached);
spin_unlock_bh(&rt_uncached_lock);
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 650e1528e1e..ba48e799b03 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -319,6 +319,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
ireq->tstamp_ok = tcp_opt.saw_tstamp;
req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
treq->snt_synack = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsecr : 0;
+ treq->listener = NULL;
/* We throwed the options of the initial SYN away, so we hope
* the ACK carries the same options again (see RFC1122 4.2.3.8)
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 1b5ce96707a..9205e492dc9 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -76,9 +76,9 @@ static int ipv4_local_port_range(ctl_table *table, int write,
}
-static void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low, gid_t *high)
+static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low, kgid_t *high)
{
- gid_t *data = table->data;
+ kgid_t *data = table->data;
unsigned int seq;
do {
seq = read_seqbegin(&sysctl_local_ports.lock);
@@ -89,12 +89,12 @@ static void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low,
}
/* Update system visible IP port range */
-static void set_ping_group_range(struct ctl_table *table, gid_t range[2])
+static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t high)
{
- gid_t *data = table->data;
+ kgid_t *data = table->data;
write_seqlock(&sysctl_local_ports.lock);
- data[0] = range[0];
- data[1] = range[1];
+ data[0] = low;
+ data[1] = high;
write_sequnlock(&sysctl_local_ports.lock);
}
@@ -103,21 +103,33 @@ static int ipv4_ping_group_range(ctl_table *table, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos)
{
+ struct user_namespace *user_ns = current_user_ns();
int ret;
- gid_t range[2];
+ gid_t urange[2];
+ kgid_t low, high;
ctl_table tmp = {
- .data = &range,
- .maxlen = sizeof(range),
+ .data = &urange,
+ .maxlen = sizeof(urange),
.mode = table->mode,
.extra1 = &ip_ping_group_range_min,
.extra2 = &ip_ping_group_range_max,
};
- inet_get_ping_group_range_table(table, range, range + 1);
+ inet_get_ping_group_range_table(table, &low, &high);
+ urange[0] = from_kgid_munged(user_ns, low);
+ urange[1] = from_kgid_munged(user_ns, high);
ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
- if (write && ret == 0)
- set_ping_group_range(table, range);
+ if (write && ret == 0) {
+ low = make_kgid(user_ns, urange[0]);
+ high = make_kgid(user_ns, urange[1]);
+ if (!gid_valid(low) || !gid_valid(high) ||
+ (urange[1] < urange[0]) || gid_lt(high, low)) {
+ low = make_kgid(&init_user_ns, 1);
+ high = make_kgid(&init_user_ns, 0);
+ }
+ set_ping_group_range(table, low, high);
+ }
return ret;
}
@@ -220,6 +232,45 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
return 0;
}
+int proc_tcp_fastopen_key(ctl_table *ctl, int write, void __user *buffer,
+ size_t *lenp, loff_t *ppos)
+{
+ ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
+ struct tcp_fastopen_context *ctxt;
+ int ret;
+ u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
+
+ tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL);
+ if (!tbl.data)
+ return -ENOMEM;
+
+ rcu_read_lock();
+ ctxt = rcu_dereference(tcp_fastopen_ctx);
+ if (ctxt)
+ memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
+ rcu_read_unlock();
+
+ snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x",
+ user_key[0], user_key[1], user_key[2], user_key[3]);
+ ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
+
+ if (write && ret == 0) {
+ if (sscanf(tbl.data, "%x-%x-%x-%x", user_key, user_key + 1,
+ user_key + 2, user_key + 3) != 4) {
+ ret = -EINVAL;
+ goto bad_key;
+ }
+ tcp_fastopen_reset_cipher(user_key, TCP_FASTOPEN_KEY_LENGTH);
+ }
+
+bad_key:
+ pr_debug("proc FO key set 0x%x-%x-%x-%x <- 0x%s: %u\n",
+ user_key[0], user_key[1], user_key[2], user_key[3],
+ (char *)tbl.data, ret);
+ kfree(tbl.data);
+ return ret;
+}
+
static struct ctl_table ipv4_table[] = {
{
.procname = "tcp_timestamps",
@@ -374,6 +425,12 @@ static struct ctl_table ipv4_table[] = {
.proc_handler = proc_dointvec,
},
{
+ .procname = "tcp_fastopen_key",
+ .mode = 0600,
+ .maxlen = ((TCP_FASTOPEN_KEY_LENGTH * 2) + 10),
+ .proc_handler = proc_tcp_fastopen_key,
+ },
+ {
.procname = "tcp_tw_recycle",
.data = &tcp_death_row.sysctl_tw_recycle,
.maxlen = sizeof(int),
@@ -786,7 +843,7 @@ static struct ctl_table ipv4_net_table[] = {
{
.procname = "ping_group_range",
.data = &init_net.ipv4.sysctl_ping_group_range,
- .maxlen = sizeof(init_net.ipv4.sysctl_ping_group_range),
+ .maxlen = sizeof(gid_t)*2,
.mode = 0644,
.proc_handler = ipv4_ping_group_range,
},
@@ -830,8 +887,8 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
* Sane defaults - nobody may create ping sockets.
* Boot scripts should set this to distro-specific group.
*/
- net->ipv4.sysctl_ping_group_range[0] = 1;
- net->ipv4.sysctl_ping_group_range[1] = 0;
+ net->ipv4.sysctl_ping_group_range[0] = make_kgid(&init_user_ns, 1);
+ net->ipv4.sysctl_ping_group_range[1] = make_kgid(&init_user_ns, 0);
tcp_init_mem(net);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 2109ff4a1da..df83d744e38 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -486,8 +486,9 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
if (sk->sk_shutdown & RCV_SHUTDOWN)
mask |= POLLIN | POLLRDNORM | POLLRDHUP;
- /* Connected? */
- if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
+ /* Connected or passive Fast Open socket? */
+ if (sk->sk_state != TCP_SYN_SENT &&
+ (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk != NULL)) {
int target = sock_rcvlowat(sk, 0, INT_MAX);
if (tp->urg_seq == tp->copied_seq &&
@@ -840,10 +841,15 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
ssize_t copied;
long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
- /* Wait for a connection to finish. */
- if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
+ /* Wait for a connection to finish. One exception is TCP Fast Open
+ * (passive side) where data is allowed to be sent before a connection
+ * is fully established.
+ */
+ if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
+ !tcp_passive_fastopen(sk)) {
if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
goto out_err;
+ }
clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
@@ -1042,10 +1048,15 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
- /* Wait for a connection to finish. */
- if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
+ /* Wait for a connection to finish. One exception is TCP Fast Open
+ * (passive side) where data is allowed to be sent before a connection
+ * is fully established.
+ */
+ if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
+ !tcp_passive_fastopen(sk)) {
if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
goto do_error;
+ }
if (unlikely(tp->repair)) {
if (tp->repair_queue == TCP_RECV_QUEUE) {
@@ -2144,6 +2155,10 @@ void tcp_close(struct sock *sk, long timeout)
* they look as CLOSING or LAST_ACK for Linux)
* Probably, I missed some more holelets.
* --ANK
+ * XXX (TFO) - To start off we don't support SYN+ACK+FIN
+ * in a single packet! (May consider it later but will
+ * probably need API support or TCP_CORK SYN-ACK until
+ * data is written and socket is closed.)
*/
tcp_send_fin(sk);
}
@@ -2215,8 +2230,16 @@ adjudge_to_death:
}
}
- if (sk->sk_state == TCP_CLOSE)
+ if (sk->sk_state == TCP_CLOSE) {
+ struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
+ /* We could get here with a non-NULL req if the socket is
+ * aborted (e.g., closed with unread data) before 3WHS
+ * finishes.
+ */
+ if (req != NULL)
+ reqsk_fastopen_remove(sk, req, false);
inet_csk_destroy_sock(sk);
+ }
/* Otherwise, socket is reprieved until protocol close. */
out:
@@ -2688,6 +2711,14 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
else
icsk->icsk_user_timeout = msecs_to_jiffies(val);
break;
+
+ case TCP_FASTOPEN:
+ if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE |
+ TCPF_LISTEN)))
+ err = fastopen_init_queue(sk, val);
+ else
+ err = -EINVAL;
+ break;
default:
err = -ENOPROTOOPT;
break;
@@ -3501,11 +3532,15 @@ EXPORT_SYMBOL(tcp_cookie_generator);
void tcp_done(struct sock *sk)
{
+ struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
+
if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
tcp_set_state(sk, TCP_CLOSE);
tcp_clear_xmit_timers(sk);
+ if (req != NULL)
+ reqsk_fastopen_remove(sk, req, false);
sk->sk_shutdown = SHUTDOWN_MASK;
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index a7f729c409d..8f7ef0ad80e 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -1,10 +1,91 @@
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/tcp.h>
+#include <linux/rcupdate.h>
+#include <linux/rculist.h>
+#include <net/inetpeer.h>
+#include <net/tcp.h>
-int sysctl_tcp_fastopen;
+int sysctl_tcp_fastopen __read_mostly;
+
+struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
+
+static DEFINE_SPINLOCK(tcp_fastopen_ctx_lock);
+
+static void tcp_fastopen_ctx_free(struct rcu_head *head)
+{
+ struct tcp_fastopen_context *ctx =
+ container_of(head, struct tcp_fastopen_context, rcu);
+ crypto_free_cipher(ctx->tfm);
+ kfree(ctx);
+}
+
+int tcp_fastopen_reset_cipher(void *key, unsigned int len)
+{
+ int err;
+ struct tcp_fastopen_context *ctx, *octx;
+
+ ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
+
+ if (IS_ERR(ctx->tfm)) {
+ err = PTR_ERR(ctx->tfm);
+error: kfree(ctx);
+ pr_err("TCP: TFO aes cipher alloc error: %d\n", err);
+ return err;
+ }
+ err = crypto_cipher_setkey(ctx->tfm, key, len);
+ if (err) {
+ pr_err("TCP: TFO cipher key error: %d\n", err);
+ crypto_free_cipher(ctx->tfm);
+ goto error;
+ }
+ memcpy(ctx->key, key, len);
+
+ spin_lock(&tcp_fastopen_ctx_lock);
+
+ octx = rcu_dereference_protected(tcp_fastopen_ctx,
+ lockdep_is_held(&tcp_fastopen_ctx_lock));
+ rcu_assign_pointer(tcp_fastopen_ctx, ctx);
+ spin_unlock(&tcp_fastopen_ctx_lock);
+
+ if (octx)
+ call_rcu(&octx->rcu, tcp_fastopen_ctx_free);
+ return err;
+}
+
+/* Computes the fastopen cookie for the peer.
+ * The peer address is a 128 bits long (pad with zeros for IPv4).
+ *
+ * The caller must check foc->len to determine if a valid cookie
+ * has been generated successfully.
+*/
+void tcp_fastopen_cookie_gen(__be32 addr, struct tcp_fastopen_cookie *foc)
+{
+ __be32 peer_addr[4] = { addr, 0, 0, 0 };
+ struct tcp_fastopen_context *ctx;
+
+ rcu_read_lock();
+ ctx = rcu_dereference(tcp_fastopen_ctx);
+ if (ctx) {
+ crypto_cipher_encrypt_one(ctx->tfm,
+ foc->val,
+ (__u8 *)peer_addr);
+ foc->len = TCP_FASTOPEN_COOKIE_SIZE;
+ }
+ rcu_read_unlock();
+}
static int __init tcp_fastopen_init(void)
{
+ __u8 key[TCP_FASTOPEN_KEY_LENGTH];
+
+ get_random_bytes(key, sizeof(key));
+ tcp_fastopen_reset_cipher(key, sizeof(key));
return 0;
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index bcfccc5cb8d..8c304a40079 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -378,7 +378,7 @@ static void tcp_fixup_rcvbuf(struct sock *sk)
/* 4. Try to fixup all. It is made immediately after connection enters
* established state.
*/
-static void tcp_init_buffer_space(struct sock *sk)
+void tcp_init_buffer_space(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
int maxwin;
@@ -2930,13 +2930,14 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
* tcp_xmit_retransmit_queue().
*/
static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
- int newly_acked_sacked, bool is_dupack,
+ int prior_sacked, bool is_dupack,
int flag)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
(tcp_fackets_out(tp) > tp->reordering));
+ int newly_acked_sacked = 0;
int fast_rexmit = 0;
if (WARN_ON(!tp->packets_out && tp->sacked_out))
@@ -2996,6 +2997,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
tcp_add_reno_sack(sk);
} else
do_lost = tcp_try_undo_partial(sk, pkts_acked);
+ newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
break;
case TCP_CA_Loss:
if (flag & FLAG_DATA_ACKED)
@@ -3017,6 +3019,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
if (is_dupack)
tcp_add_reno_sack(sk);
}
+ newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
if (icsk->icsk_ca_state <= TCP_CA_Disorder)
tcp_try_undo_dsack(sk);
@@ -3124,6 +3127,12 @@ void tcp_rearm_rto(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
+ /* If the retrans timer is currently being used by Fast Open
+ * for SYN-ACK retrans purpose, stay put.
+ */
+ if (tp->fastopen_rsk)
+ return;
+
if (!tp->packets_out) {
inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
} else {
@@ -3594,7 +3603,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
int prior_packets;
int prior_sacked = tp->sacked_out;
int pkts_acked = 0;
- int newly_acked_sacked = 0;
bool frto_cwnd = false;
/* If the ack is older than previous acks
@@ -3670,8 +3678,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
pkts_acked = prior_packets - tp->packets_out;
- newly_acked_sacked = (prior_packets - prior_sacked) -
- (tp->packets_out - tp->sacked_out);
if (tp->frto_counter)
frto_cwnd = tcp_process_frto(sk, flag);
@@ -3685,7 +3691,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
tcp_may_raise_cwnd(sk, flag))
tcp_cong_avoid(sk, ack, prior_in_flight);
is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
- tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
+ tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
is_dupack, flag);
} else {
if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
@@ -3702,7 +3708,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
no_queue:
/* If data was DSACKed, see if we can undo a cwnd reduction. */
if (flag & FLAG_DSACKING_ACK)
- tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
+ tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
is_dupack, flag);
/* If this ack opens up a zero window, clear backoff. It was
* being used to time the probes, and is probably far higher than
@@ -3722,8 +3728,7 @@ old_ack:
*/
if (TCP_SKB_CB(skb)->sacked) {
flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
- newly_acked_sacked = tp->sacked_out - prior_sacked;
- tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
+ tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
is_dupack, flag);
}
@@ -4039,7 +4044,7 @@ static inline bool tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq)
}
/* When we get a reset we do this. */
-static void tcp_reset(struct sock *sk)
+void tcp_reset(struct sock *sk)
{
/* We want the right error as BSD sees it (and indeed as we do). */
switch (sk->sk_state) {
@@ -5896,7 +5901,9 @@ discard:
tcp_send_synack(sk);
#if 0
/* Note, we could accept data and URG from this segment.
- * There are no obstacles to make this.
+ * There are no obstacles to make this (except that we must
+ * either change tcp_recvmsg() to prevent it from returning data
+ * before 3WHS completes per RFC793, or employ TCP Fast Open).
*
* However, if we ignore data in ACKless segments sometimes,
* we have no reasons to accept it sometimes.
@@ -5936,6 +5943,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
+ struct request_sock *req;
int queued = 0;
tp->rx_opt.saw_tstamp = 0;
@@ -5991,7 +5999,14 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
return 0;
}
- if (!tcp_validate_incoming(sk, skb, th, 0))
+ req = tp->fastopen_rsk;
+ if (req != NULL) {
+ BUG_ON(sk->sk_state != TCP_SYN_RECV &&
+ sk->sk_state != TCP_FIN_WAIT1);
+
+ if (tcp_check_req(sk, skb, req, NULL, true) == NULL)
+ goto discard;
+ } else if (!tcp_validate_incoming(sk, skb, th, 0))
return 0;
/* step 5: check the ACK field */
@@ -6001,7 +6016,22 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
switch (sk->sk_state) {
case TCP_SYN_RECV:
if (acceptable) {
- tp->copied_seq = tp->rcv_nxt;
+ /* Once we leave TCP_SYN_RECV, we no longer
+ * need req so release it.
+ */
+ if (req) {
+ reqsk_fastopen_remove(sk, req, false);
+ } else {
+ /* Make sure socket is routed, for
+ * correct metrics.
+ */
+ icsk->icsk_af_ops->rebuild_header(sk);
+ tcp_init_congestion_control(sk);
+
+ tcp_mtup_init(sk);
+ tcp_init_buffer_space(sk);
+ tp->copied_seq = tp->rcv_nxt;
+ }
smp_mb();
tcp_set_state(sk, TCP_ESTABLISHED);
sk->sk_state_change(sk);
@@ -6023,23 +6053,27 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
if (tp->rx_opt.tstamp_ok)
tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
- /* Make sure socket is routed, for
- * correct metrics.
- */
- icsk->icsk_af_ops->rebuild_header(sk);
-
- tcp_init_metrics(sk);
-
- tcp_init_congestion_control(sk);
+ if (req) {
+ /* Re-arm the timer because data may
+ * have been sent out. This is similar
+ * to the regular data transmission case
+ * when new data has just been ack'ed.
+ *
+ * (TFO) - we could try to be more
+ * aggressive and retranmitting any data
+ * sooner based on when they were sent
+ * out.
+ */
+ tcp_rearm_rto(sk);
+ } else
+ tcp_init_metrics(sk);
/* Prevent spurious tcp_cwnd_restart() on
* first data packet.
*/
tp->lsndtime = tcp_time_stamp;
- tcp_mtup_init(sk);
tcp_initialize_rcv_mss(sk);
- tcp_init_buffer_space(sk);
tcp_fast_path_on(tp);
} else {
return 1;
@@ -6047,6 +6081,16 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
break;
case TCP_FIN_WAIT1:
+ /* If we enter the TCP_FIN_WAIT1 state and we are a
+ * Fast Open socket and this is the first acceptable
+ * ACK we have received, this would have acknowledged
+ * our SYNACK so stop the SYNACK timer.
+ */
+ if (acceptable && req != NULL) {
+ /* We no longer need the request sock. */
+ reqsk_fastopen_remove(sk, req, false);
+ tcp_rearm_rto(sk);
+ }
if (tp->snd_una == tp->write_seq) {
struct dst_entry *dst;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 1e15c5be04e..e64abed249c 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -352,6 +352,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
const int code = icmp_hdr(icmp_skb)->code;
struct sock *sk;
struct sk_buff *skb;
+ struct request_sock *req;
__u32 seq;
__u32 remaining;
int err;
@@ -394,9 +395,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
icsk = inet_csk(sk);
tp = tcp_sk(sk);
+ req = tp->fastopen_rsk;
seq = ntohl(th->seq);
if (sk->sk_state != TCP_LISTEN &&
- !between(seq, tp->snd_una, tp->snd_nxt)) {
+ !between(seq, tp->snd_una, tp->snd_nxt) &&
+ (req == NULL || seq != tcp_rsk(req)->snt_isn)) {
+ /* For a Fast Open socket, allow seq to be snt_isn. */
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
goto out;
}
@@ -435,6 +439,8 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
!icsk->icsk_backoff)
break;
+ /* XXX (TFO) - revisit the following logic for TFO */
+
if (sock_owned_by_user(sk))
break;
@@ -466,6 +472,14 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
goto out;
}
+ /* XXX (TFO) - if it's a TFO socket and has been accepted, rather
+ * than following the TCP_SYN_RECV case and closing the socket,
+ * we ignore the ICMP error and keep trying like a fully established
+ * socket. Is this the right thing to do?
+ */
+ if (req && req->sk == NULL)
+ goto out;
+
switch (sk->sk_state) {
struct request_sock *req, **prev;
case TCP_LISTEN:
@@ -498,7 +512,8 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
case TCP_SYN_SENT:
case TCP_SYN_RECV: /* Cannot happen.
- It can f.e. if SYNs crossed.
+ It can f.e. if SYNs crossed,
+ or Fast Open.
*/
if (!sock_owned_by_user(sk)) {
sk->sk_err = err;
@@ -809,8 +824,12 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
struct request_sock *req)
{
- tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
- tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
+ /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
+ * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
+ */
+ tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
+ tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
+ tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
req->ts_recent,
0,
tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
@@ -839,7 +858,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
return -1;
- skb = tcp_make_synack(sk, dst, req, rvp);
+ skb = tcp_make_synack(sk, dst, req, rvp, NULL);
if (skb) {
__tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
@@ -1272,6 +1291,178 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
};
#endif
+static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req,
+ struct tcp_fastopen_cookie *foc,
+ struct tcp_fastopen_cookie *valid_foc)
+{
+ bool skip_cookie = false;
+ struct fastopen_queue *fastopenq;
+
+ if (likely(!fastopen_cookie_present(foc))) {
+ /* See include/net/tcp.h for the meaning of these knobs */
+ if ((sysctl_tcp_fastopen & TFO_SERVER_ALWAYS) ||
+ ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD) &&
+ (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1)))
+ skip_cookie = true; /* no cookie to validate */
+ else
+ return false;
+ }
+ fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
+ /* A FO option is present; bump the counter. */
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVE);
+
+ /* Make sure the listener has enabled fastopen, and we don't
+ * exceed the max # of pending TFO requests allowed before trying
+ * to validating the cookie in order to avoid burning CPU cycles
+ * unnecessarily.
+ *
+ * XXX (TFO) - The implication of checking the max_qlen before
+ * processing a cookie request is that clients can't differentiate
+ * between qlen overflow causing Fast Open to be disabled
+ * temporarily vs a server not supporting Fast Open at all.
+ */
+ if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) == 0 ||
+ fastopenq == NULL || fastopenq->max_qlen == 0)
+ return false;
+
+ if (fastopenq->qlen >= fastopenq->max_qlen) {
+ struct request_sock *req1;
+ spin_lock(&fastopenq->lock);
+ req1 = fastopenq->rskq_rst_head;
+ if ((req1 == NULL) || time_after(req1->expires, jiffies)) {
+ spin_unlock(&fastopenq->lock);
+ NET_INC_STATS_BH(sock_net(sk),
+ LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
+ /* Avoid bumping LINUX_MIB_TCPFASTOPENPASSIVEFAIL*/
+ foc->len = -1;
+ return false;
+ }
+ fastopenq->rskq_rst_head = req1->dl_next;
+ fastopenq->qlen--;
+ spin_unlock(&fastopenq->lock);
+ reqsk_free(req1);
+ }
+ if (skip_cookie) {
+ tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+ return true;
+ }
+ if (foc->len == TCP_FASTOPEN_COOKIE_SIZE) {
+ if ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_CHKED) == 0) {
+ tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
+ if ((valid_foc->len != TCP_FASTOPEN_COOKIE_SIZE) ||
+ memcmp(&foc->val[0], &valid_foc->val[0],
+ TCP_FASTOPEN_COOKIE_SIZE) != 0)
+ return false;
+ valid_foc->len = -1;
+ }
+ /* Acknowledge the data received from the peer. */
+ tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+ return true;
+ } else if (foc->len == 0) { /* Client requesting a cookie */
+ tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
+ NET_INC_STATS_BH(sock_net(sk),
+ LINUX_MIB_TCPFASTOPENCOOKIEREQD);
+ } else {
+ /* Client sent a cookie with wrong size. Treat it
+ * the same as invalid and return a valid one.
+ */
+ tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
+ }
+ return false;
+}
+
+static int tcp_v4_conn_req_fastopen(struct sock *sk,
+ struct sk_buff *skb,
+ struct sk_buff *skb_synack,
+ struct request_sock *req,
+ struct request_values *rvp)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
+ const struct inet_request_sock *ireq = inet_rsk(req);
+ struct sock *child;
+
+ req->retrans = 0;
+ req->sk = NULL;
+
+ child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
+ if (child == NULL) {
+ NET_INC_STATS_BH(sock_net(sk),
+ LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
+ kfree_skb(skb_synack);
+ return -1;
+ }
+ ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
+ ireq->rmt_addr, ireq->opt);
+ /* XXX (TFO) - is it ok to ignore error and continue? */
+
+ spin_lock(&queue->fastopenq->lock);
+ queue->fastopenq->qlen++;
+ spin_unlock(&queue->fastopenq->lock);
+
+ /* Initialize the child socket. Have to fix some values to take
+ * into account the child is a Fast Open socket and is created
+ * only out of the bits carried in the SYN packet.
+ */
+ tp = tcp_sk(child);
+
+ tp->fastopen_rsk = req;
+ /* Do a hold on the listner sk so that if the listener is being
+ * closed, the child that has been accepted can live on and still
+ * access listen_lock.
+ */
+ sock_hold(sk);
+ tcp_rsk(req)->listener = sk;
+
+ /* RFC1323: The window in SYN & SYN/ACK segments is never
+ * scaled. So correct it appropriately.
+ */
+ tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
+
+ /* Activate the retrans timer so that SYNACK can be retransmitted.
+ * The request socket is not added to the SYN table of the parent
+ * because it's been added to the accept queue directly.
+ */
+ inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
+ TCP_TIMEOUT_INIT, TCP_RTO_MAX);
+
+ /* Add the child socket directly into the accept queue */
+ inet_csk_reqsk_queue_add(sk, req, child);
+
+ /* Now finish processing the fastopen child socket. */
+ inet_csk(child)->icsk_af_ops->rebuild_header(child);
+ tcp_init_congestion_control(child);
+ tcp_mtup_init(child);
+ tcp_init_buffer_space(child);
+ tcp_init_metrics(child);
+
+ /* Queue the data carried in the SYN packet. We need to first
+ * bump skb's refcnt because the caller will attempt to free it.
+ *
+ * XXX (TFO) - we honor a zero-payload TFO request for now.
+ * (Any reason not to?)
+ */
+ if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq + 1) {
+ /* Don't queue the skb if there is no payload in SYN.
+ * XXX (TFO) - How about SYN+FIN?
+ */
+ tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+ } else {
+ skb = skb_get(skb);
+ skb_dst_drop(skb);
+ __skb_pull(skb, tcp_hdr(skb)->doff * 4);
+ skb_set_owner_r(skb, child);
+ __skb_queue_tail(&child->sk_receive_queue, skb);
+ tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+ }
+ sk->sk_data_ready(sk, 0);
+ bh_unlock_sock(child);
+ sock_put(child);
+ WARN_ON(req->sk == NULL);
+ return 0;
+}
+
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
{
struct tcp_extend_values tmp_ext;
@@ -1285,6 +1476,11 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
__be32 daddr = ip_hdr(skb)->daddr;
__u32 isn = TCP_SKB_CB(skb)->when;
bool want_cookie = false;
+ struct flowi4 fl4;
+ struct tcp_fastopen_cookie foc = { .len = -1 };
+ struct tcp_fastopen_cookie valid_foc = { .len = -1 };
+ struct sk_buff *skb_synack;
+ int do_fastopen;
/* Never answer to SYNs send to broadcast or multicast */
if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
@@ -1319,7 +1515,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
tcp_clear_options(&tmp_opt);
tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
tmp_opt.user_mss = tp->rx_opt.user_mss;
- tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
+ tcp_parse_options(skb, &tmp_opt, &hash_location, 0,
+ want_cookie ? NULL : &foc);
if (tmp_opt.cookie_plus > 0 &&
tmp_opt.saw_tstamp &&
@@ -1377,8 +1574,6 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
isn = cookie_v4_init_sequence(sk, skb, &req->mss);
req->cookie_ts = tmp_opt.tstamp_ok;
} else if (!isn) {
- struct flowi4 fl4;
-
/* VJ's idea. We save last timestamp seen
* from the destination in peer table, when entering
* state TIME-WAIT, and check against it before
@@ -1419,14 +1614,52 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
tcp_rsk(req)->snt_isn = isn;
tcp_rsk(req)->snt_synack = tcp_time_stamp;
- if (tcp_v4_send_synack(sk, dst, req,
- (struct request_values *)&tmp_ext,
- skb_get_queue_mapping(skb),
- want_cookie) ||
- want_cookie)
+ if (dst == NULL) {
+ dst = inet_csk_route_req(sk, &fl4, req);
+ if (dst == NULL)
+ goto drop_and_free;
+ }
+ do_fastopen = tcp_fastopen_check(sk, skb, req, &foc, &valid_foc);
+
+ /* We don't call tcp_v4_send_synack() directly because we need
+ * to make sure a child socket can be created successfully before
+ * sending back synack!
+ *
+ * XXX (TFO) - Ideally one would simply call tcp_v4_send_synack()
+ * (or better yet, call tcp_send_synack() in the child context
+ * directly, but will have to fix bunch of other code first)
+ * after syn_recv_sock() except one will need to first fix the
+ * latter to remove its dependency on the current implementation
+ * of tcp_v4_send_synack()->tcp_select_initial_window().
+ */
+ skb_synack = tcp_make_synack(sk, dst, req,
+ (struct request_values *)&tmp_ext,
+ fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
+
+ if (skb_synack) {
+ __tcp_v4_send_check(skb_synack, ireq->loc_addr, ireq->rmt_addr);
+ skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
+ } else
+ goto drop_and_free;
+
+ if (likely(!do_fastopen)) {
+ int err;
+ err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
+ ireq->rmt_addr, ireq->opt);
+ err = net_xmit_eval(err);
+ if (err || want_cookie)
+ goto drop_and_free;
+
+ tcp_rsk(req)->listener = NULL;
+ /* Add the request_sock to the SYN table */
+ inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
+ if (fastopen_cookie_present(&foc) && foc.len != 0)
+ NET_INC_STATS_BH(sock_net(sk),
+ LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
+ } else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req,
+ (struct request_values *)&tmp_ext))
goto drop_and_free;
- inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
return 0;
drop_and_release:
@@ -1554,7 +1787,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
iph->saddr, iph->daddr);
if (req)
- return tcp_check_req(sk, skb, req, prev);
+ return tcp_check_req(sk, skb, req, prev, false);
nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
th->source, iph->daddr, th->dest, inet_iif(skb));
@@ -1977,6 +2210,7 @@ void tcp_v4_destroy_sock(struct sock *sk)
tcp_cookie_values_release);
tp->cookie_values = NULL;
}
+ BUG_ON(tp->fastopen_rsk != NULL);
/* If socket is aborted during connect operation */
tcp_free_fastopen_req(tp);
@@ -2393,7 +2627,7 @@ void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
EXPORT_SYMBOL(tcp_proc_unregister);
static void get_openreq4(const struct sock *sk, const struct request_sock *req,
- struct seq_file *f, int i, int uid, int *len)
+ struct seq_file *f, int i, kuid_t uid, int *len)
{
const struct inet_request_sock *ireq = inet_rsk(req);
long delta = req->expires - jiffies;
@@ -2410,7 +2644,7 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
1, /* timers active (only the expire timer) */
jiffies_delta_to_clock_t(delta),
req->retrans,
- uid,
+ from_kuid_munged(seq_user_ns(f), uid),
0, /* non standard timer */
0, /* open_requests have no inode */
atomic_read(&sk->sk_refcnt),
@@ -2425,6 +2659,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
const struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
const struct inet_sock *inet = inet_sk(sk);
+ struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
__be32 dest = inet->inet_daddr;
__be32 src = inet->inet_rcv_saddr;
__u16 destp = ntohs(inet->inet_dport);
@@ -2461,7 +2696,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
timer_active,
jiffies_delta_to_clock_t(timer_expires - jiffies),
icsk->icsk_retransmits,
- sock_i_uid(sk),
+ from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
icsk->icsk_probes_out,
sock_i_ino(sk),
atomic_read(&sk->sk_refcnt), sk,
@@ -2469,7 +2704,9 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
jiffies_to_clock_t(icsk->icsk_ack.ato),
(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
tp->snd_cwnd,
- tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh,
+ sk->sk_state == TCP_LISTEN ?
+ (fastopenq ? fastopenq->max_qlen : 0) :
+ (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh),
len);
}
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 6ff7f10dce9..e965319d610 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -507,6 +507,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
newtp->rx_opt.mss_clamp = req->mss;
TCP_ECN_openreq_child(newtp, req);
+ newtp->fastopen_rsk = NULL;
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS);
}
@@ -515,13 +516,18 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
EXPORT_SYMBOL(tcp_create_openreq_child);
/*
- * Process an incoming packet for SYN_RECV sockets represented
- * as a request_sock.
+ * Process an incoming packet for SYN_RECV sockets represented as a
+ * request_sock. Normally sk is the listener socket but for TFO it
+ * points to the child socket.
+ *
+ * XXX (TFO) - The current impl contains a special check for ack
+ * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
*/
struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
- struct request_sock **prev)
+ struct request_sock **prev,
+ bool fastopen)
{
struct tcp_options_received tmp_opt;
const u8 *hash_location;
@@ -530,6 +536,8 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
bool paws_reject = false;
+ BUG_ON(fastopen == (sk->sk_state == TCP_LISTEN));
+
tmp_opt.saw_tstamp = 0;
if (th->doff > (sizeof(struct tcphdr)>>2)) {
tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
@@ -565,6 +573,9 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
*
* Enforce "SYN-ACK" according to figure 8, figure 6
* of RFC793, fixed by RFC1122.
+ *
+ * Note that even if there is new data in the SYN packet
+ * they will be thrown away too.
*/
req->rsk_ops->rtx_syn_ack(sk, req, NULL);
return NULL;
@@ -622,9 +633,12 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
* sent (the segment carries an unacceptable ACK) ...
* a reset is sent."
*
- * Invalid ACK: reset will be sent by listening socket
+ * Invalid ACK: reset will be sent by listening socket.
+ * Note that the ACK validity check for a Fast Open socket is done
+ * elsewhere and is checked directly against the child socket rather
+ * than req because user data may have been sent out.
*/
- if ((flg & TCP_FLAG_ACK) &&
+ if ((flg & TCP_FLAG_ACK) && !fastopen &&
(TCP_SKB_CB(skb)->ack_seq !=
tcp_rsk(req)->snt_isn + 1 + tcp_s_data_size(tcp_sk(sk))))
return sk;
@@ -637,7 +651,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
/* RFC793: "first check sequence number". */
if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
- tcp_rsk(req)->rcv_isn + 1, tcp_rsk(req)->rcv_isn + 1 + req->rcv_wnd)) {
+ tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rcv_wnd)) {
/* Out of window: send ACK and drop. */
if (!(flg & TCP_FLAG_RST))
req->rsk_ops->send_ack(sk, skb, req);
@@ -648,7 +662,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
/* In sequence, PAWS is OK. */
- if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_isn + 1))
+ if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
req->ts_recent = tmp_opt.rcv_tsval;
if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
@@ -667,10 +681,19 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
/* ACK sequence verified above, just make sure ACK is
* set. If ACK not set, just silently drop the packet.
+ *
+ * XXX (TFO) - if we ever allow "data after SYN", the
+ * following check needs to be removed.
*/
if (!(flg & TCP_FLAG_ACK))
return NULL;
+ /* For Fast Open no more processing is needed (sk is the
+ * child socket).
+ */
+ if (fastopen)
+ return sk;
+
/* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
@@ -706,11 +729,21 @@ listen_overflow:
}
embryonic_reset:
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
- if (!(flg & TCP_FLAG_RST))
+ if (!(flg & TCP_FLAG_RST)) {
+ /* Received a bad SYN pkt - for TFO We try not to reset
+ * the local connection unless it's really necessary to
+ * avoid becoming vulnerable to outside attack aiming at
+ * resetting legit local connections.
+ */
req->rsk_ops->send_reset(sk, skb);
-
- inet_csk_reqsk_queue_drop(sk, req, prev);
+ } else if (fastopen) { /* received a valid RST pkt */
+ reqsk_fastopen_remove(sk, req, true);
+ tcp_reset(sk);
+ }
+ if (!fastopen) {
+ inet_csk_reqsk_queue_drop(sk, req, prev);
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
+ }
return NULL;
}
EXPORT_SYMBOL(tcp_check_req);
@@ -719,6 +752,12 @@ EXPORT_SYMBOL(tcp_check_req);
* Queue segment on the new socket if the new socket is active,
* otherwise we just shortcircuit this and continue with
* the new socket.
+ *
+ * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
+ * when entering. But other states are possible due to a race condition
+ * where after __inet_lookup_established() fails but before the listener
+ * locked is obtained, other packets cause the same connection to
+ * be created.
*/
int tcp_child_process(struct sock *parent, struct sock *child,
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index d04632673a9..9383b51f3ef 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -702,7 +702,8 @@ static unsigned int tcp_synack_options(struct sock *sk,
unsigned int mss, struct sk_buff *skb,
struct tcp_out_options *opts,
struct tcp_md5sig_key **md5,
- struct tcp_extend_values *xvp)
+ struct tcp_extend_values *xvp,
+ struct tcp_fastopen_cookie *foc)
{
struct inet_request_sock *ireq = inet_rsk(req);
unsigned int remaining = MAX_TCP_OPTION_SPACE;
@@ -747,7 +748,15 @@ static unsigned int tcp_synack_options(struct sock *sk,
if (unlikely(!ireq->tstamp_ok))
remaining -= TCPOLEN_SACKPERM_ALIGNED;
}
-
+ if (foc != NULL) {
+ u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
+ need = (need + 3) & ~3U; /* Align to 32 bits */
+ if (remaining >= need) {
+ opts->options |= OPTION_FAST_OPEN_COOKIE;
+ opts->fastopen_cookie = foc;
+ remaining -= need;
+ }
+ }
/* Similar rationale to tcp_syn_options() applies here, too.
* If the <SYN> options fit, the same options should fit now!
*/
@@ -2658,7 +2667,8 @@ int tcp_send_synack(struct sock *sk)
*/
struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
struct request_sock *req,
- struct request_values *rvp)
+ struct request_values *rvp,
+ struct tcp_fastopen_cookie *foc)
{
struct tcp_out_options opts;
struct tcp_extend_values *xvp = tcp_xv(rvp);
@@ -2718,7 +2728,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
#endif
TCP_SKB_CB(skb)->when = tcp_time_stamp;
tcp_header_size = tcp_synack_options(sk, req, mss,
- skb, &opts, &md5, xvp)
+ skb, &opts, &md5, xvp, foc)
+ sizeof(*th);
skb_push(skb, tcp_header_size);
@@ -2772,7 +2782,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
}
th->seq = htonl(TCP_SKB_CB(skb)->seq);
- th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1);
+ /* XXX data is queued and acked as is. No buffer/window check */
+ th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
th->window = htons(min(req->rcv_wnd, 65535U));
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index b774a03bd1d..fc04711e80c 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -305,6 +305,35 @@ static void tcp_probe_timer(struct sock *sk)
}
/*
+ * Timer for Fast Open socket to retransmit SYNACK. Note that the
+ * sk here is the child socket, not the parent (listener) socket.
+ */
+static void tcp_fastopen_synack_timer(struct sock *sk)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ int max_retries = icsk->icsk_syn_retries ? :
+ sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
+ struct request_sock *req;
+
+ req = tcp_sk(sk)->fastopen_rsk;
+ req->rsk_ops->syn_ack_timeout(sk, req);
+
+ if (req->retrans >= max_retries) {
+ tcp_write_err(sk);
+ return;
+ }
+ /* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error
+ * returned from rtx_syn_ack() to make it more persistent like
+ * regular retransmit because if the child socket has been accepted
+ * it's not good to give up too easily.
+ */
+ req->rsk_ops->rtx_syn_ack(sk, req, NULL);
+ req->retrans++;
+ inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
+ TCP_TIMEOUT_INIT << req->retrans, TCP_RTO_MAX);
+}
+
+/*
* The TCP retransmit timer.
*/
@@ -317,7 +346,15 @@ void tcp_retransmit_timer(struct sock *sk)
tcp_resume_early_retransmit(sk);
return;
}
-
+ if (tp->fastopen_rsk) {
+ BUG_ON(sk->sk_state != TCP_SYN_RECV &&
+ sk->sk_state != TCP_FIN_WAIT1);
+ tcp_fastopen_synack_timer(sk);
+ /* Before we receive ACK to our SYN-ACK don't retransmit
+ * anything else (e.g., data or FIN segments).
+ */
+ return;
+ }
if (!tp->packets_out)
goto out;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 6f6d1aca3c3..c4e64328d8b 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2110,7 +2110,9 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
bucket, src, srcp, dest, destp, sp->sk_state,
sk_wmem_alloc_get(sp),
sk_rmem_alloc_get(sp),
- 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
+ 0, 0L, 0,
+ from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
+ 0, sock_i_ino(sp),
atomic_read(&sp->sk_refcnt), sp,
atomic_read(&sp->sk_drops), len);
}
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index 16d0960062b..d2f336ea82c 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -24,7 +24,9 @@ static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
if (!inet_diag_bc_sk(bc, sk))
return 0;
- return inet_sk_diag_fill(sk, NULL, skb, req, NETLINK_CB(cb->skb).pid,
+ return inet_sk_diag_fill(sk, NULL, skb, req,
+ sk_user_ns(NETLINK_CB(cb->skb).ssk),
+ NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
}
@@ -69,6 +71,7 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
goto out;
err = inet_sk_diag_fill(sk, NULL, rep, req,
+ sk_user_ns(NETLINK_CB(in_skb).ssk),
NETLINK_CB(in_skb).pid,
nlh->nlmsg_seq, 0, nlh);
if (err < 0) {
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 19d4bffda9d..572cb660837 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -127,8 +127,8 @@ static inline void addrconf_sysctl_unregister(struct inet6_dev *idev)
#endif
#ifdef CONFIG_IPV6_PRIVACY
-static int __ipv6_regen_rndid(struct inet6_dev *idev);
-static int __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr);
+static void __ipv6_regen_rndid(struct inet6_dev *idev);
+static void __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr);
static void ipv6_regen_rndid(unsigned long data);
#endif
@@ -852,16 +852,7 @@ retry:
}
in6_ifa_hold(ifp);
memcpy(addr.s6_addr, ifp->addr.s6_addr, 8);
- if (__ipv6_try_regen_rndid(idev, tmpaddr) < 0) {
- spin_unlock_bh(&ifp->lock);
- write_unlock(&idev->lock);
- pr_warn("%s: regeneration of randomized interface id failed\n",
- __func__);
- in6_ifa_put(ifp);
- in6_dev_put(idev);
- ret = -1;
- goto out;
- }
+ __ipv6_try_regen_rndid(idev, tmpaddr);
memcpy(&addr.s6_addr[8], idev->rndid, 8);
age = (now - ifp->tstamp) / HZ;
tmp_valid_lft = min_t(__u32,
@@ -1600,7 +1591,7 @@ static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
#ifdef CONFIG_IPV6_PRIVACY
/* (re)generation of randomized interface identifier (RFC 3041 3.2, 3.5) */
-static int __ipv6_regen_rndid(struct inet6_dev *idev)
+static void __ipv6_regen_rndid(struct inet6_dev *idev)
{
regen:
get_random_bytes(idev->rndid, sizeof(idev->rndid));
@@ -1627,8 +1618,6 @@ regen:
if ((idev->rndid[2]|idev->rndid[3]|idev->rndid[4]|idev->rndid[5]|idev->rndid[6]|idev->rndid[7]) == 0x00)
goto regen;
}
-
- return 0;
}
static void ipv6_regen_rndid(unsigned long data)
@@ -1642,8 +1631,7 @@ static void ipv6_regen_rndid(unsigned long data)
if (idev->dead)
goto out;
- if (__ipv6_regen_rndid(idev) < 0)
- goto out;
+ __ipv6_regen_rndid(idev);
expires = jiffies +
idev->cnf.temp_prefered_lft * HZ -
@@ -1664,13 +1652,10 @@ out:
in6_dev_put(idev);
}
-static int __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr)
+static void __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr)
{
- int ret = 0;
-
if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0)
- ret = __ipv6_regen_rndid(idev);
- return ret;
+ __ipv6_regen_rndid(idev);
}
#endif
@@ -2566,14 +2551,10 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
void *data)
{
struct net_device *dev = (struct net_device *) data;
- struct inet6_dev *idev;
+ struct inet6_dev *idev = __in6_dev_get(dev);
int run_pending = 0;
int err;
- if (event == NETDEV_UNREGISTER_FINAL)
- return NOTIFY_DONE;
-
- idev = __in6_dev_get(dev);
switch (event) {
case NETDEV_REGISTER:
if (!idev && dev->mtu >= IPV6_MIN_MTU) {
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 9772fbd8a3f..90bbefb5794 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -22,6 +22,7 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/pid_namespace.h>
#include <net/net_namespace.h>
#include <net/sock.h>
@@ -91,6 +92,8 @@ static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
static void fl_free(struct ip6_flowlabel *fl)
{
if (fl) {
+ if (fl->share == IPV6_FL_S_PROCESS)
+ put_pid(fl->owner.pid);
release_net(fl->fl_net);
kfree(fl->opt);
}
@@ -394,10 +397,10 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
case IPV6_FL_S_ANY:
break;
case IPV6_FL_S_PROCESS:
- fl->owner = current->pid;
+ fl->owner.pid = get_task_pid(current, PIDTYPE_PID);
break;
case IPV6_FL_S_USER:
- fl->owner = current_euid();
+ fl->owner.uid = current_euid();
break;
default:
err = -EINVAL;
@@ -561,7 +564,10 @@ recheck:
err = -EPERM;
if (fl1->share == IPV6_FL_S_EXCL ||
fl1->share != fl->share ||
- fl1->owner != fl->owner)
+ ((fl1->share == IPV6_FL_S_PROCESS) &&
+ (fl1->owner.pid == fl->owner.pid)) ||
+ ((fl1->share == IPV6_FL_S_USER) &&
+ uid_eq(fl1->owner.uid, fl->owner.uid)))
goto release;
err = -EINVAL;
@@ -621,6 +627,7 @@ done:
struct ip6fl_iter_state {
struct seq_net_private p;
+ struct pid_namespace *pid_ns;
int bucket;
};
@@ -699,6 +706,7 @@ static void ip6fl_seq_stop(struct seq_file *seq, void *v)
static int ip6fl_seq_show(struct seq_file *seq, void *v)
{
+ struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
if (v == SEQ_START_TOKEN)
seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-32s %s\n",
"Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt");
@@ -708,7 +716,11 @@ static int ip6fl_seq_show(struct seq_file *seq, void *v)
"%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n",
(unsigned int)ntohl(fl->label),
fl->share,
- (int)fl->owner,
+ ((fl->share == IPV6_FL_S_PROCESS) ?
+ pid_nr_ns(fl->owner.pid, state->pid_ns) :
+ ((fl->share == IPV6_FL_S_USER) ?
+ from_kuid_munged(seq_user_ns(seq), fl->owner.uid) :
+ 0)),
atomic_read(&fl->users),
fl->linger/HZ,
(long)(fl->expires - jiffies)/HZ,
@@ -727,8 +739,29 @@ static const struct seq_operations ip6fl_seq_ops = {
static int ip6fl_seq_open(struct inode *inode, struct file *file)
{
- return seq_open_net(inode, file, &ip6fl_seq_ops,
- sizeof(struct ip6fl_iter_state));
+ struct seq_file *seq;
+ struct ip6fl_iter_state *state;
+ int err;
+
+ err = seq_open_net(inode, file, &ip6fl_seq_ops,
+ sizeof(struct ip6fl_iter_state));
+
+ if (!err) {
+ seq = file->private_data;
+ state = ip6fl_seq_private(seq);
+ rcu_read_lock();
+ state->pid_ns = get_pid_ns(task_active_pid_ns(current));
+ rcu_read_unlock();
+ }
+ return err;
+}
+
+static int ip6fl_seq_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq = file->private_data;
+ struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
+ put_pid_ns(state->pid_ns);
+ return seq_release_net(inode, file);
}
static const struct file_operations ip6fl_seq_fops = {
@@ -736,7 +769,7 @@ static const struct file_operations ip6fl_seq_fops = {
.open = ip6fl_seq_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release_net,
+ .release = ip6fl_seq_release,
};
static int __net_init ip6_flowlabel_proc_init(struct net *net)
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index ef0579d5bca..7af88ef0165 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1251,7 +1251,8 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
sk_wmem_alloc_get(sp),
sk_rmem_alloc_get(sp),
0, 0L, 0,
- sock_i_uid(sp), 0,
+ from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
+ 0,
sock_i_ino(sp),
atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
}
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index bb46061c813..182ab9a85d6 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -190,6 +190,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
ireq = inet_rsk(req);
ireq6 = inet6_rsk(req);
treq = tcp_rsk(req);
+ treq->listener = NULL;
if (security_inet_conn_request(sk, skb, req))
goto out_free;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index cd49de3678f..09078b9bc6f 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -475,7 +475,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
goto done;
- skb = tcp_make_synack(sk, dst, req, rvp);
+ skb = tcp_make_synack(sk, dst, req, rvp, NULL);
if (skb) {
__tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
@@ -987,7 +987,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr, inet6_iif(skb));
if (req)
- return tcp_check_req(sk, skb, req, prev);
+ return tcp_check_req(sk, skb, req, prev, false);
nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
&ipv6_hdr(skb)->saddr, th->source,
@@ -1179,6 +1179,7 @@ have_isn:
want_cookie)
goto drop_and_free;
+ tcp_rsk(req)->listener = NULL;
inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
return 0;
@@ -1828,7 +1829,7 @@ static void tcp_v6_destroy_sock(struct sock *sk)
#ifdef CONFIG_PROC_FS
/* Proc filesystem TCPv6 sock list dumping. */
static void get_openreq6(struct seq_file *seq,
- const struct sock *sk, struct request_sock *req, int i, int uid)
+ const struct sock *sk, struct request_sock *req, int i, kuid_t uid)
{
int ttd = req->expires - jiffies;
const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
@@ -1852,7 +1853,7 @@ static void get_openreq6(struct seq_file *seq,
1, /* timers active (only the expire timer) */
jiffies_to_clock_t(ttd),
req->retrans,
- uid,
+ from_kuid_munged(seq_user_ns(seq), uid),
0, /* non standard timer */
0, /* open_requests have no inode */
0, req);
@@ -1902,7 +1903,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
timer_active,
jiffies_delta_to_clock_t(timer_expires - jiffies),
icsk->icsk_retransmits,
- sock_i_uid(sp),
+ from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
icsk->icsk_probes_out,
sock_i_ino(sp),
atomic_read(&sp->sk_refcnt), sp,
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 99d0077b56b..bbdff07eebe 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1458,7 +1458,8 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
sk_wmem_alloc_get(sp),
sk_rmem_alloc_get(sp),
0, 0L, 0,
- sock_i_uid(sp), 0,
+ from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
+ 0,
sock_i_ino(sp),
atomic_read(&sp->sk_refcnt), sp,
atomic_read(&sp->sk_drops));
diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
index f8ba30dfeca..02ff7f2f60d 100644
--- a/net/ipx/ipx_proc.c
+++ b/net/ipx/ipx_proc.c
@@ -217,7 +217,8 @@ static int ipx_seq_socket_show(struct seq_file *seq, void *v)
seq_printf(seq, "%08X %08X %02X %03d\n",
sk_wmem_alloc_get(s),
sk_rmem_alloc_get(s),
- s->sk_state, SOCK_INODE(s->sk_socket)->i_uid);
+ s->sk_state,
+ from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)));
out:
return 0;
}
diff --git a/net/key/af_key.c b/net/key/af_key.c
index ec7d161c129..334f93b8cfc 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -3661,7 +3661,7 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
atomic_read(&s->sk_refcnt),
sk_rmem_alloc_get(s),
sk_wmem_alloc_get(s),
- sock_i_uid(s),
+ from_kuid_munged(seq_user_ns(f), sock_i_uid(s)),
sock_i_ino(s)
);
return 0;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 393355d37b4..513cab08a98 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1347,11 +1347,10 @@ static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
/* Remove from tunnel list */
spin_lock_bh(&pn->l2tp_tunnel_list_lock);
list_del_rcu(&tunnel->list);
+ kfree_rcu(tunnel, rcu);
spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
- synchronize_rcu();
atomic_dec(&l2tp_tunnel_count);
- kfree(tunnel);
}
/* Create a socket for the tunnel, if one isn't set up by
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index a38ec6cdeee..56d583e083a 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -163,6 +163,7 @@ struct l2tp_tunnel_cfg {
struct l2tp_tunnel {
int magic; /* Should be L2TP_TUNNEL_MAGIC */
+ struct rcu_head rcu;
rwlock_t hlist_lock; /* protect session_hlist */
struct hlist_head session_hlist[L2TP_HASH_SIZE];
/* hashed list of sessions,
diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
index a1839c00435..7b4799cfbf8 100644
--- a/net/llc/llc_proc.c
+++ b/net/llc/llc_proc.c
@@ -151,7 +151,7 @@ static int llc_seq_socket_show(struct seq_file *seq, void *v)
sk_wmem_alloc_get(sk),
sk_rmem_alloc_get(sk) - llc->copied_seq,
sk->sk_state,
- sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : -1,
+ from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
llc->link);
out:
return 0;
diff --git a/net/mac80211/aes_cmac.c b/net/mac80211/aes_cmac.c
index 8dfd70d8fcf..a04752e9102 100644
--- a/net/mac80211/aes_cmac.c
+++ b/net/mac80211/aes_cmac.c
@@ -38,14 +38,10 @@ static void gf_mulx(u8 *pad)
static void aes_128_cmac_vector(struct crypto_cipher *tfm, size_t num_elem,
const u8 *addr[], const size_t *len, u8 *mac)
{
- u8 scratch[2 * AES_BLOCK_SIZE];
- u8 *cbc, *pad;
+ u8 cbc[AES_BLOCK_SIZE], pad[AES_BLOCK_SIZE];
const u8 *pos, *end;
size_t i, e, left, total_len;
- cbc = scratch;
- pad = scratch + AES_BLOCK_SIZE;
-
memset(cbc, 0, AES_BLOCK_SIZE);
total_len = 0;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index d41974aacf5..929f897a8de 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -102,6 +102,18 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
return 0;
}
+static int ieee80211_start_p2p_device(struct wiphy *wiphy,
+ struct wireless_dev *wdev)
+{
+ return ieee80211_do_open(wdev, true);
+}
+
+static void ieee80211_stop_p2p_device(struct wiphy *wiphy,
+ struct wireless_dev *wdev)
+{
+ ieee80211_sdata_stop(IEEE80211_WDEV_TO_SUB_IF(wdev));
+}
+
static int ieee80211_set_noack_map(struct wiphy *wiphy,
struct net_device *dev,
u16 noack_map)
@@ -330,7 +342,7 @@ static void rate_idx_to_bitrate(struct rate_info *rate, struct sta_info *sta, in
if (!(rate->flags & RATE_INFO_FLAGS_MCS)) {
struct ieee80211_supported_band *sband;
sband = sta->local->hw.wiphy->bands[
- sta->local->hw.conf.channel->band];
+ sta->local->oper_channel->band];
rate->legacy = sband->bitrates[idx].bitrate;
} else
rate->mcs = idx;
@@ -725,25 +737,23 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
const u8 *resp, size_t resp_len)
{
- struct sk_buff *new, *old;
+ struct probe_resp *new, *old;
if (!resp || !resp_len)
return 1;
old = rtnl_dereference(sdata->u.ap.probe_resp);
- new = dev_alloc_skb(resp_len);
+ new = kzalloc(sizeof(struct probe_resp) + resp_len, GFP_KERNEL);
if (!new)
return -ENOMEM;
- memcpy(skb_put(new, resp_len), resp, resp_len);
+ new->len = resp_len;
+ memcpy(new->data, resp, resp_len);
rcu_assign_pointer(sdata->u.ap.probe_resp, new);
- if (old) {
- /* TODO: use call_rcu() */
- synchronize_rcu();
- dev_kfree_skb(old);
- }
+ if (old)
+ kfree_rcu(old, rcu_head);
return 0;
}
@@ -950,7 +960,7 @@ static void ieee80211_send_layer2_update(struct sta_info *sta)
/* 802.2 Type 1 Logical Link Control (LLC) Exchange Identifier (XID)
* Update response frame; IEEE Std 802.2-1998, 5.4.1.2.1 */
- memset(msg->da, 0xff, ETH_ALEN);
+ eth_broadcast_addr(msg->da);
memcpy(msg->sa, sta->sta.addr, ETH_ALEN);
msg->len = htons(6);
msg->dsap = 0;
@@ -1285,9 +1295,10 @@ static int ieee80211_change_station(struct wiphy *wiphy,
mutex_unlock(&local->sta_mtx);
if (sdata->vif.type == NL80211_IFTYPE_STATION &&
- params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED))
+ params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) {
ieee80211_recalc_ps(local, -1);
-
+ ieee80211_recalc_ps_vif(sdata);
+ }
return 0;
}
@@ -1661,7 +1672,7 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
}
if (!sdata->vif.bss_conf.use_short_slot &&
- sdata->local->hw.conf.channel->band == IEEE80211_BAND_5GHZ) {
+ sdata->local->oper_channel->band == IEEE80211_BAND_5GHZ) {
sdata->vif.bss_conf.use_short_slot = true;
changed |= BSS_CHANGED_ERP_SLOT;
}
@@ -1775,6 +1786,7 @@ static int ieee80211_scan(struct wiphy *wiphy,
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_DEVICE:
break;
case NL80211_IFTYPE_P2P_GO:
if (sdata->local->ops->hw_scan)
@@ -1927,7 +1939,7 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
enum nl80211_tx_power_setting type, int mbm)
{
struct ieee80211_local *local = wiphy_priv(wiphy);
- struct ieee80211_channel *chan = local->hw.conf.channel;
+ struct ieee80211_channel *chan = local->oper_channel;
u32 changes = 0;
switch (type) {
@@ -2079,6 +2091,7 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
ieee80211_recalc_ps(local, -1);
+ ieee80211_recalc_ps_vif(sdata);
return 0;
}
@@ -2461,6 +2474,9 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
if (!sdata->u.mgd.associated)
need_offchan = true;
break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ need_offchan = true;
+ break;
default:
return -EOPNOTSUPP;
}
@@ -2653,6 +2669,7 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
u16 status_code, struct sk_buff *skb)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = sdata->local;
struct ieee80211_tdls_data *tf;
tf = (void *)skb_put(skb, offsetof(struct ieee80211_tdls_data, u));
@@ -2672,8 +2689,10 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
tf->u.setup_req.capability =
cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
- ieee80211_add_srates_ie(sdata, skb, false);
- ieee80211_add_ext_srates_ie(sdata, skb, false);
+ ieee80211_add_srates_ie(sdata, skb, false,
+ local->oper_channel->band);
+ ieee80211_add_ext_srates_ie(sdata, skb, false,
+ local->oper_channel->band);
ieee80211_tdls_add_ext_capab(skb);
break;
case WLAN_TDLS_SETUP_RESPONSE:
@@ -2686,8 +2705,10 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
tf->u.setup_resp.capability =
cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
- ieee80211_add_srates_ie(sdata, skb, false);
- ieee80211_add_ext_srates_ie(sdata, skb, false);
+ ieee80211_add_srates_ie(sdata, skb, false,
+ local->oper_channel->band);
+ ieee80211_add_ext_srates_ie(sdata, skb, false,
+ local->oper_channel->band);
ieee80211_tdls_add_ext_capab(skb);
break;
case WLAN_TDLS_SETUP_CONFIRM:
@@ -2725,6 +2746,7 @@ ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
u16 status_code, struct sk_buff *skb)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = sdata->local;
struct ieee80211_mgmt *mgmt;
mgmt = (void *)skb_put(skb, 24);
@@ -2747,8 +2769,10 @@ ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
mgmt->u.action.u.tdls_discover_resp.capability =
cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
- ieee80211_add_srates_ie(sdata, skb, false);
- ieee80211_add_ext_srates_ie(sdata, skb, false);
+ ieee80211_add_srates_ie(sdata, skb, false,
+ local->oper_channel->band);
+ ieee80211_add_ext_srates_ie(sdata, skb, false,
+ local->oper_channel->band);
ieee80211_tdls_add_ext_capab(skb);
break;
default:
@@ -3005,6 +3029,8 @@ struct cfg80211_ops mac80211_config_ops = {
.add_virtual_intf = ieee80211_add_iface,
.del_virtual_intf = ieee80211_del_iface,
.change_virtual_intf = ieee80211_change_iface,
+ .start_p2p_device = ieee80211_start_p2p_device,
+ .stop_p2p_device = ieee80211_stop_p2p_device,
.add_key = ieee80211_add_key,
.del_key = ieee80211_del_key,
.get_key = ieee80211_get_key,
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index b8dfb440c8e..97173f8144d 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -63,8 +63,6 @@ DEBUGFS_READONLY_FILE(user_power, "%d",
local->user_power_level);
DEBUGFS_READONLY_FILE(power, "%d",
local->hw.conf.power_level);
-DEBUGFS_READONLY_FILE(frequency, "%d",
- local->hw.conf.channel->center_freq);
DEBUGFS_READONLY_FILE(total_ps_buffered, "%d",
local->total_ps_buffered);
DEBUGFS_READONLY_FILE(wep_iv, "%#08x",
@@ -91,33 +89,6 @@ static const struct file_operations reset_ops = {
.llseek = noop_llseek,
};
-static ssize_t channel_type_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ieee80211_local *local = file->private_data;
- const char *buf;
-
- switch (local->hw.conf.channel_type) {
- case NL80211_CHAN_NO_HT:
- buf = "no ht\n";
- break;
- case NL80211_CHAN_HT20:
- buf = "ht20\n";
- break;
- case NL80211_CHAN_HT40MINUS:
- buf = "ht40-\n";
- break;
- case NL80211_CHAN_HT40PLUS:
- buf = "ht40+\n";
- break;
- default:
- buf = "???";
- break;
- }
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
-}
-
static ssize_t hwflags_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -205,7 +176,6 @@ static ssize_t queues_read(struct file *file, char __user *user_buf,
}
DEBUGFS_READONLY_FILE_OPS(hwflags);
-DEBUGFS_READONLY_FILE_OPS(channel_type);
DEBUGFS_READONLY_FILE_OPS(queues);
/* statistics stuff */
@@ -272,12 +242,10 @@ void debugfs_hw_add(struct ieee80211_local *local)
local->debugfs.keys = debugfs_create_dir("keys", phyd);
- DEBUGFS_ADD(frequency);
DEBUGFS_ADD(total_ps_buffered);
DEBUGFS_ADD(wep_iv);
DEBUGFS_ADD(queues);
DEBUGFS_ADD_MODE(reset, 0200);
- DEBUGFS_ADD(channel_type);
DEBUGFS_ADD(hwflags);
DEBUGFS_ADD(user_power);
DEBUGFS_ADD(power);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index df920319910..da9003b2000 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -9,7 +9,7 @@ static inline void check_sdata_in_driver(struct ieee80211_sub_if_data *sdata)
{
WARN(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER),
"%s: Failed check-sdata-in-driver check, flags: 0x%x\n",
- sdata->dev->name, sdata->flags);
+ sdata->dev ? sdata->dev->name : sdata->name, sdata->flags);
}
static inline struct ieee80211_sub_if_data *
@@ -22,9 +22,11 @@ get_bss_sdata(struct ieee80211_sub_if_data *sdata)
return sdata;
}
-static inline void drv_tx(struct ieee80211_local *local, struct sk_buff *skb)
+static inline void drv_tx(struct ieee80211_local *local,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
{
- local->ops->tx(&local->hw, skb);
+ local->ops->tx(&local->hw, control, skb);
}
static inline void drv_get_et_strings(struct ieee80211_sub_if_data *sdata,
@@ -526,6 +528,9 @@ static inline void drv_sta_rc_update(struct ieee80211_local *local,
sdata = get_bss_sdata(sdata);
check_sdata_in_driver(sdata);
+ WARN_ON(changed & IEEE80211_RC_SUPP_RATES_CHANGED &&
+ sdata->vif.type != NL80211_IFTYPE_ADHOC);
+
trace_drv_sta_rc_update(local, sdata, sta, changed);
if (local->ops->sta_rc_update)
local->ops->sta_rc_update(&local->hw, &sdata->vif,
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 5746d62faba..a9d93285dba 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -109,7 +109,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon));
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_PROBE_RESP);
- memset(mgmt->da, 0xff, ETH_ALEN);
+ eth_broadcast_addr(mgmt->da);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
memcpy(mgmt->bssid, ifibss->bssid, ETH_ALEN);
mgmt->u.beacon.beacon_int = cpu_to_le16(beacon_int);
@@ -205,7 +205,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
mod_timer(&ifibss->timer,
round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL));
- bss = cfg80211_inform_bss_frame(local->hw.wiphy, local->hw.conf.channel,
+ bss = cfg80211_inform_bss_frame(local->hw.wiphy, chan,
mgmt, skb->len, 0, GFP_KERNEL);
cfg80211_put_bss(bss);
netif_carrier_on(sdata->dev);
@@ -294,7 +294,7 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
- int band = local->hw.conf.channel->band;
+ int band = local->oper_channel->band;
/*
* XXX: Consider removing the least recently used entry and
@@ -459,8 +459,11 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
}
}
- if (sta && rates_updated)
+ if (sta && rates_updated) {
+ drv_sta_rc_update(local, sdata, &sta->sta,
+ IEEE80211_RC_SUPP_RATES_CHANGED);
rate_control_rate_init(sta);
+ }
rcu_read_unlock();
}
@@ -561,7 +564,7 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
- int band = local->hw.conf.channel->band;
+ int band = local->oper_channel->band;
/*
* XXX: Consider removing the least recently used entry and
@@ -759,7 +762,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
return;
}
sdata_info(sdata, "IBSS not allowed on %d MHz\n",
- local->hw.conf.channel->center_freq);
+ local->oper_channel->center_freq);
/* No IBSS found - decrease scan interval and continue
* scanning. */
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index bb61f7718c4..204bfedba30 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -193,8 +193,6 @@ struct ieee80211_tx_data {
struct sta_info *sta;
struct ieee80211_key *key;
- struct ieee80211_channel *channel;
-
unsigned int flags;
};
@@ -274,9 +272,15 @@ struct beacon_data {
struct rcu_head rcu_head;
};
+struct probe_resp {
+ struct rcu_head rcu_head;
+ int len;
+ u8 data[0];
+};
+
struct ieee80211_if_ap {
struct beacon_data __rcu *beacon;
- struct sk_buff __rcu *probe_resp;
+ struct probe_resp __rcu *probe_resp;
struct list_head vlans;
@@ -359,6 +363,7 @@ enum ieee80211_sta_flags {
IEEE80211_STA_NULLFUNC_ACKED = BIT(8),
IEEE80211_STA_RESET_SIGNAL_AVE = BIT(9),
IEEE80211_STA_DISABLE_40MHZ = BIT(10),
+ IEEE80211_STA_DISABLE_VHT = BIT(11),
};
struct ieee80211_mgd_auth_data {
@@ -1075,6 +1080,8 @@ struct ieee80211_local {
struct idr ack_status_frames;
spinlock_t ack_status_lock;
+ struct ieee80211_sub_if_data __rcu *p2p_sdata;
+
/* dummy netdev for use w/ NAPI */
struct net_device napi_dev;
@@ -1131,7 +1138,7 @@ struct ieee802_11_elems {
u8 *prep;
u8 *perr;
struct ieee80211_rann_ie *rann;
- u8 *ch_switch_elem;
+ struct ieee80211_channel_sw_ie *ch_switch_ie;
u8 *country_elem;
u8 *pwr_constr_elem;
u8 *quiet_elem; /* first quite element */
@@ -1157,7 +1164,6 @@ struct ieee802_11_elems {
u8 preq_len;
u8 prep_len;
u8 perr_len;
- u8 ch_switch_elem_len;
u8 country_elem_len;
u8 pwr_constr_elem_len;
u8 quiet_elem_len;
@@ -1202,6 +1208,7 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
void ieee80211_send_pspoll(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata);
void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency);
+void ieee80211_recalc_ps_vif(struct ieee80211_sub_if_data *sdata);
int ieee80211_max_network_latency(struct notifier_block *nb,
unsigned long data, void *dummy);
int ieee80211_set_arp_filter(struct ieee80211_sub_if_data *sdata);
@@ -1291,6 +1298,8 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local);
void ieee80211_recalc_idle(struct ieee80211_local *local);
void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata,
const int offset);
+int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up);
+void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata);
static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata)
{
@@ -1425,7 +1434,6 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
struct ieee80211_hdr *hdr);
void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
struct ieee80211_hdr *hdr, bool ack);
-void ieee80211_beacon_connection_loss_work(struct work_struct *work);
void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw,
enum queue_stop_reason reason);
@@ -1457,13 +1465,15 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
u8 channel);
struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
u8 *dst, u32 ratemask,
+ struct ieee80211_channel *chan,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len,
bool directed);
void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len,
- u32 ratemask, bool directed, bool no_cck);
+ u32 ratemask, bool directed, bool no_cck,
+ struct ieee80211_channel *channel);
void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
const size_t supp_rates_len,
@@ -1487,9 +1497,11 @@ u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
u32 cap);
int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb, bool need_basic);
+ struct sk_buff *skb, bool need_basic,
+ enum ieee80211_band band);
int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb, bool need_basic);
+ struct sk_buff *skb, bool need_basic,
+ enum ieee80211_band band);
/* channel management */
enum ieee80211_chan_mode {
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index bfb57dcc153..59f8adc2aa5 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -100,6 +100,10 @@ static u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
sdata->vif.bss_conf.idle = true;
continue;
}
+
+ if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
+ continue;
+
/* count everything else */
sdata->vif.bss_conf.idle = false;
count++;
@@ -121,7 +125,8 @@ static u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
list_for_each_entry(sdata, &local->interfaces, list) {
if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
- sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+ sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
continue;
if (sdata->old_idle == sdata->vif.bss_conf.idle)
continue;
@@ -204,6 +209,8 @@ static inline int identical_mac_addr_allowed(int type1, int type2)
{
return type1 == NL80211_IFTYPE_MONITOR ||
type2 == NL80211_IFTYPE_MONITOR ||
+ type1 == NL80211_IFTYPE_P2P_DEVICE ||
+ type2 == NL80211_IFTYPE_P2P_DEVICE ||
(type1 == NL80211_IFTYPE_AP && type2 == NL80211_IFTYPE_WDS) ||
(type1 == NL80211_IFTYPE_WDS &&
(type2 == NL80211_IFTYPE_WDS ||
@@ -406,9 +413,10 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
* an error on interface type changes that have been pre-checked, so most
* checks should be in ieee80211_check_concurrent_iface.
*/
-static int ieee80211_do_open(struct net_device *dev, bool coming_up)
+int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
{
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+ struct net_device *dev = wdev->netdev;
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
u32 changed = 0;
@@ -443,6 +451,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_P2P_DEVICE:
/* no special treatment */
break;
case NL80211_IFTYPE_UNSPECIFIED:
@@ -471,7 +480,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
* Copy the hopefully now-present MAC address to
* this interface, if it has the special null one.
*/
- if (is_zero_ether_addr(dev->dev_addr)) {
+ if (dev && is_zero_ether_addr(dev->dev_addr)) {
memcpy(dev->dev_addr,
local->hw.wiphy->perm_addr,
ETH_ALEN);
@@ -536,15 +545,23 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
local->fif_probe_req++;
}
- changed |= ieee80211_reset_erp_info(sdata);
+ if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE)
+ changed |= ieee80211_reset_erp_info(sdata);
ieee80211_bss_info_change_notify(sdata, changed);
- if (sdata->vif.type == NL80211_IFTYPE_STATION ||
- sdata->vif.type == NL80211_IFTYPE_ADHOC ||
- sdata->vif.type == NL80211_IFTYPE_AP)
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_MESH_POINT:
netif_carrier_off(dev);
- else
+ break;
+ case NL80211_IFTYPE_WDS:
+ case NL80211_IFTYPE_P2P_DEVICE:
+ break;
+ default:
netif_carrier_on(dev);
+ }
/*
* set default queue parameters so drivers don't
@@ -576,6 +593,9 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
}
rate_control_rate_init(sta);
+ netif_carrier_on(dev);
+ } else if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
+ rcu_assign_pointer(local->p2p_sdata, sdata);
}
/*
@@ -601,7 +621,8 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
ieee80211_recalc_ps(local, -1);
- netif_tx_start_all_queues(dev);
+ if (dev)
+ netif_tx_start_all_queues(dev);
return 0;
err_del_interface:
@@ -631,7 +652,7 @@ static int ieee80211_open(struct net_device *dev)
if (err)
return err;
- return ieee80211_do_open(dev, true);
+ return ieee80211_do_open(&sdata->wdev, true);
}
static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
@@ -652,7 +673,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
/*
* Stop TX on this interface first.
*/
- netif_tx_stop_all_queues(sdata->dev);
+ if (sdata->dev)
+ netif_tx_stop_all_queues(sdata->dev);
ieee80211_roc_purge(sdata);
@@ -691,14 +713,16 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
local->fif_probe_req--;
}
- netif_addr_lock_bh(sdata->dev);
- spin_lock_bh(&local->filter_lock);
- __hw_addr_unsync(&local->mc_list, &sdata->dev->mc,
- sdata->dev->addr_len);
- spin_unlock_bh(&local->filter_lock);
- netif_addr_unlock_bh(sdata->dev);
+ if (sdata->dev) {
+ netif_addr_lock_bh(sdata->dev);
+ spin_lock_bh(&local->filter_lock);
+ __hw_addr_unsync(&local->mc_list, &sdata->dev->mc,
+ sdata->dev->addr_len);
+ spin_unlock_bh(&local->filter_lock);
+ netif_addr_unlock_bh(sdata->dev);
- ieee80211_configure_filter(local);
+ ieee80211_configure_filter(local);
+ }
del_timer_sync(&local->dynamic_ps_timer);
cancel_work_sync(&local->dynamic_ps_enable_work);
@@ -708,7 +732,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
struct ieee80211_sub_if_data *vlan, *tmpsdata;
struct beacon_data *old_beacon =
rtnl_dereference(sdata->u.ap.beacon);
- struct sk_buff *old_probe_resp =
+ struct probe_resp *old_probe_resp =
rtnl_dereference(sdata->u.ap.probe_resp);
/* sdata_running will return false, so this will disable */
@@ -720,7 +744,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
RCU_INIT_POINTER(sdata->u.ap.probe_resp, NULL);
synchronize_rcu();
kfree(old_beacon);
- kfree_skb(old_probe_resp);
+ kfree(old_probe_resp);
/* down all dependent devices, that is VLANs */
list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
@@ -759,6 +783,10 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
ieee80211_adjust_monitor_flags(sdata, -1);
ieee80211_configure_filter(local);
break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ /* relies on synchronize_rcu() below */
+ rcu_assign_pointer(local->p2p_sdata, NULL);
+ /* fall through */
default:
flush_work(&sdata->work);
/*
@@ -771,14 +799,6 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
skb_queue_purge(&sdata->skb_queue);
/*
- * Disable beaconing here for mesh only, AP and IBSS
- * are already taken care of.
- */
- if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
- ieee80211_bss_info_change_notify(sdata,
- BSS_CHANGED_BEACON_ENABLED);
-
- /*
* Free all remaining keys, there shouldn't be any,
* except maybe group keys in AP more or WDS?
*/
@@ -877,9 +897,8 @@ static void ieee80211_set_multicast_list(struct net_device *dev)
* Called when the netdev is removed or, by the code below, before
* the interface type changes.
*/
-static void ieee80211_teardown_sdata(struct net_device *dev)
+static void ieee80211_teardown_sdata(struct ieee80211_sub_if_data *sdata)
{
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
int flushed;
int i;
@@ -900,6 +919,11 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
WARN_ON(flushed);
}
+static void ieee80211_uninit(struct net_device *dev)
+{
+ ieee80211_teardown_sdata(IEEE80211_DEV_TO_SUB_IF(dev));
+}
+
static u16 ieee80211_netdev_select_queue(struct net_device *dev,
struct sk_buff *skb)
{
@@ -909,7 +933,7 @@ static u16 ieee80211_netdev_select_queue(struct net_device *dev,
static const struct net_device_ops ieee80211_dataif_ops = {
.ndo_open = ieee80211_open,
.ndo_stop = ieee80211_stop,
- .ndo_uninit = ieee80211_teardown_sdata,
+ .ndo_uninit = ieee80211_uninit,
.ndo_start_xmit = ieee80211_subif_start_xmit,
.ndo_set_rx_mode = ieee80211_set_multicast_list,
.ndo_change_mtu = ieee80211_change_mtu,
@@ -940,7 +964,7 @@ static u16 ieee80211_monitor_select_queue(struct net_device *dev,
static const struct net_device_ops ieee80211_monitorif_ops = {
.ndo_open = ieee80211_open,
.ndo_stop = ieee80211_stop,
- .ndo_uninit = ieee80211_teardown_sdata,
+ .ndo_uninit = ieee80211_uninit,
.ndo_start_xmit = ieee80211_monitor_start_xmit,
.ndo_set_rx_mode = ieee80211_set_multicast_list,
.ndo_change_mtu = ieee80211_change_mtu,
@@ -1099,7 +1123,6 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
/* and set some type-dependent values */
sdata->vif.type = type;
sdata->vif.p2p = false;
- sdata->dev->netdev_ops = &ieee80211_dataif_ops;
sdata->wdev.iftype = type;
sdata->control_port_protocol = cpu_to_be16(ETH_P_PAE);
@@ -1107,8 +1130,11 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
sdata->noack_map = 0;
- /* only monitor differs */
- sdata->dev->type = ARPHRD_ETHER;
+ /* only monitor/p2p-device differ */
+ if (sdata->dev) {
+ sdata->dev->netdev_ops = &ieee80211_dataif_ops;
+ sdata->dev->type = ARPHRD_ETHER;
+ }
skb_queue_head_init(&sdata->skb_queue);
INIT_WORK(&sdata->work, ieee80211_iface_work);
@@ -1146,6 +1172,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
break;
case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_P2P_DEVICE:
break;
case NL80211_IFTYPE_UNSPECIFIED:
case NUM_NL80211_IFTYPES:
@@ -1156,18 +1183,6 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
ieee80211_debugfs_add_netdev(sdata);
}
-static void ieee80211_clean_sdata(struct ieee80211_sub_if_data *sdata)
-{
- switch (sdata->vif.type) {
- case NL80211_IFTYPE_MESH_POINT:
- mesh_path_flush_by_iface(sdata);
- break;
-
- default:
- break;
- }
-}
-
static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
enum nl80211_iftype type)
{
@@ -1225,7 +1240,7 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
ieee80211_do_stop(sdata, false);
- ieee80211_teardown_sdata(sdata->dev);
+ ieee80211_teardown_sdata(sdata);
ret = drv_change_interface(local, sdata, internal_type, p2p);
if (ret)
@@ -1240,7 +1255,7 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
ieee80211_setup_sdata(sdata, type);
- err = ieee80211_do_open(sdata->dev, false);
+ err = ieee80211_do_open(&sdata->wdev, false);
WARN(err, "type change: do_open returned %d", err);
return ret;
@@ -1267,14 +1282,14 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
return ret;
} else {
/* Purge and reset type-dependent state. */
- ieee80211_teardown_sdata(sdata->dev);
+ ieee80211_teardown_sdata(sdata);
ieee80211_setup_sdata(sdata, type);
}
/* reset some values that shouldn't be kept across type changes */
sdata->vif.bss_conf.basic_rates =
ieee80211_mandatory_rates(sdata->local,
- sdata->local->hw.conf.channel->band);
+ sdata->local->oper_channel->band);
sdata->drop_unencrypted = 0;
if (type == NL80211_IFTYPE_STATION)
sdata->u.mgd.use_4addr = false;
@@ -1283,8 +1298,7 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
}
static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
- struct net_device *dev,
- enum nl80211_iftype type)
+ u8 *perm_addr, enum nl80211_iftype type)
{
struct ieee80211_sub_if_data *sdata;
u64 mask, start, addr, val, inc;
@@ -1293,13 +1307,12 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
int i;
/* default ... something at least */
- memcpy(dev->perm_addr, local->hw.wiphy->perm_addr, ETH_ALEN);
+ memcpy(perm_addr, local->hw.wiphy->perm_addr, ETH_ALEN);
if (is_zero_ether_addr(local->hw.wiphy->addr_mask) &&
local->hw.wiphy->n_addresses <= 1)
return;
-
mutex_lock(&local->iflist_mtx);
switch (type) {
@@ -1312,11 +1325,24 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
list_for_each_entry(sdata, &local->interfaces, list) {
if (sdata->vif.type != NL80211_IFTYPE_AP)
continue;
- memcpy(dev->perm_addr, sdata->vif.addr, ETH_ALEN);
+ memcpy(perm_addr, sdata->vif.addr, ETH_ALEN);
break;
}
/* keep default if no AP interface present */
break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
+ if (local->hw.flags & IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF) {
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE)
+ continue;
+ if (!ieee80211_sdata_running(sdata))
+ continue;
+ memcpy(perm_addr, sdata->vif.addr, ETH_ALEN);
+ goto out_unlock;
+ }
+ }
+ /* otherwise fall through */
default:
/* assign a new address if possible -- try n_addresses first */
for (i = 0; i < local->hw.wiphy->n_addresses; i++) {
@@ -1331,7 +1357,7 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
}
if (!used) {
- memcpy(dev->perm_addr,
+ memcpy(perm_addr,
local->hw.wiphy->addresses[i].addr,
ETH_ALEN);
break;
@@ -1382,7 +1408,7 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
}
if (!used) {
- memcpy(dev->perm_addr, tmp_addr, ETH_ALEN);
+ memcpy(perm_addr, tmp_addr, ETH_ALEN);
break;
}
addr = (start & ~mask) | (val & mask);
@@ -1391,6 +1417,7 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
break;
}
+ out_unlock:
mutex_unlock(&local->iflist_mtx);
}
@@ -1398,49 +1425,68 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
struct wireless_dev **new_wdev, enum nl80211_iftype type,
struct vif_params *params)
{
- struct net_device *ndev;
+ struct net_device *ndev = NULL;
struct ieee80211_sub_if_data *sdata = NULL;
int ret, i;
int txqs = 1;
ASSERT_RTNL();
- if (local->hw.queues >= IEEE80211_NUM_ACS)
- txqs = IEEE80211_NUM_ACS;
-
- ndev = alloc_netdev_mqs(sizeof(*sdata) + local->hw.vif_data_size,
- name, ieee80211_if_setup, txqs, 1);
- if (!ndev)
- return -ENOMEM;
- dev_net_set(ndev, wiphy_net(local->hw.wiphy));
-
- ndev->needed_headroom = local->tx_headroom +
- 4*6 /* four MAC addresses */
- + 2 + 2 + 2 + 2 /* ctl, dur, seq, qos */
- + 6 /* mesh */
- + 8 /* rfc1042/bridge tunnel */
- - ETH_HLEN /* ethernet hard_header_len */
- + IEEE80211_ENCRYPT_HEADROOM;
- ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM;
-
- ret = dev_alloc_name(ndev, ndev->name);
- if (ret < 0)
- goto fail;
-
- ieee80211_assign_perm_addr(local, ndev, type);
- memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN);
- SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy));
-
- /* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */
- sdata = netdev_priv(ndev);
- ndev->ieee80211_ptr = &sdata->wdev;
- memcpy(sdata->vif.addr, ndev->dev_addr, ETH_ALEN);
- memcpy(sdata->name, ndev->name, IFNAMSIZ);
+ if (type == NL80211_IFTYPE_P2P_DEVICE) {
+ struct wireless_dev *wdev;
+
+ sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size,
+ GFP_KERNEL);
+ if (!sdata)
+ return -ENOMEM;
+ wdev = &sdata->wdev;
+
+ sdata->dev = NULL;
+ strlcpy(sdata->name, name, IFNAMSIZ);
+ ieee80211_assign_perm_addr(local, wdev->address, type);
+ memcpy(sdata->vif.addr, wdev->address, ETH_ALEN);
+ } else {
+ if (local->hw.queues >= IEEE80211_NUM_ACS)
+ txqs = IEEE80211_NUM_ACS;
+
+ ndev = alloc_netdev_mqs(sizeof(*sdata) +
+ local->hw.vif_data_size,
+ name, ieee80211_if_setup, txqs, 1);
+ if (!ndev)
+ return -ENOMEM;
+ dev_net_set(ndev, wiphy_net(local->hw.wiphy));
+
+ ndev->needed_headroom = local->tx_headroom +
+ 4*6 /* four MAC addresses */
+ + 2 + 2 + 2 + 2 /* ctl, dur, seq, qos */
+ + 6 /* mesh */
+ + 8 /* rfc1042/bridge tunnel */
+ - ETH_HLEN /* ethernet hard_header_len */
+ + IEEE80211_ENCRYPT_HEADROOM;
+ ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM;
+
+ ret = dev_alloc_name(ndev, ndev->name);
+ if (ret < 0) {
+ free_netdev(ndev);
+ return ret;
+ }
+
+ ieee80211_assign_perm_addr(local, ndev->perm_addr, type);
+ memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN);
+ SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy));
+
+ /* don't use IEEE80211_DEV_TO_SUB_IF -- it checks too much */
+ sdata = netdev_priv(ndev);
+ ndev->ieee80211_ptr = &sdata->wdev;
+ memcpy(sdata->vif.addr, ndev->dev_addr, ETH_ALEN);
+ memcpy(sdata->name, ndev->name, IFNAMSIZ);
+
+ sdata->dev = ndev;
+ }
/* initialise type-independent data */
sdata->wdev.wiphy = local->hw.wiphy;
sdata->local = local;
- sdata->dev = ndev;
#ifdef CONFIG_INET
sdata->arp_filter_state = true;
#endif
@@ -1469,17 +1515,21 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
/* setup type-dependent data */
ieee80211_setup_sdata(sdata, type);
- if (params) {
- ndev->ieee80211_ptr->use_4addr = params->use_4addr;
- if (type == NL80211_IFTYPE_STATION)
- sdata->u.mgd.use_4addr = params->use_4addr;
- }
+ if (ndev) {
+ if (params) {
+ ndev->ieee80211_ptr->use_4addr = params->use_4addr;
+ if (type == NL80211_IFTYPE_STATION)
+ sdata->u.mgd.use_4addr = params->use_4addr;
+ }
- ndev->features |= local->hw.netdev_features;
+ ndev->features |= local->hw.netdev_features;
- ret = register_netdevice(ndev);
- if (ret)
- goto fail;
+ ret = register_netdevice(ndev);
+ if (ret) {
+ free_netdev(ndev);
+ return ret;
+ }
+ }
mutex_lock(&local->iflist_mtx);
list_add_tail_rcu(&sdata->list, &local->interfaces);
@@ -1489,10 +1539,6 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
*new_wdev = &sdata->wdev;
return 0;
-
- fail:
- free_netdev(ndev);
- return ret;
}
void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
@@ -1503,11 +1549,22 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
list_del_rcu(&sdata->list);
mutex_unlock(&sdata->local->iflist_mtx);
- /* clean up type-dependent data */
- ieee80211_clean_sdata(sdata);
-
synchronize_rcu();
- unregister_netdevice(sdata->dev);
+
+ if (sdata->dev) {
+ unregister_netdevice(sdata->dev);
+ } else {
+ cfg80211_unregister_wdev(&sdata->wdev);
+ kfree(sdata);
+ }
+}
+
+void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata)
+{
+ if (WARN_ON_ONCE(!test_bit(SDATA_STATE_RUNNING, &sdata->state)))
+ return;
+ ieee80211_do_stop(sdata, true);
+ ieee80211_teardown_sdata(sdata);
}
/*
@@ -1518,6 +1575,7 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
{
struct ieee80211_sub_if_data *sdata, *tmp;
LIST_HEAD(unreg_list);
+ LIST_HEAD(wdev_list);
ASSERT_RTNL();
@@ -1525,13 +1583,20 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
list_del(&sdata->list);
- ieee80211_clean_sdata(sdata);
-
- unregister_netdevice_queue(sdata->dev, &unreg_list);
+ if (sdata->dev)
+ unregister_netdevice_queue(sdata->dev, &unreg_list);
+ else
+ list_add(&sdata->list, &wdev_list);
}
mutex_unlock(&local->iflist_mtx);
unregister_netdevice_many(&unreg_list);
list_del(&unreg_list);
+
+ list_for_each_entry_safe(sdata, tmp, &wdev_list, list) {
+ list_del(&sdata->list);
+ cfg80211_unregister_wdev(&sdata->wdev);
+ kfree(sdata);
+ }
}
static int netdev_notify(struct notifier_block *nb,
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index c26e231c733..bd752936319 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -207,6 +207,10 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
sdata->vif.bss_conf.bssid = NULL;
else if (ieee80211_vif_is_mesh(&sdata->vif)) {
sdata->vif.bss_conf.bssid = zero;
+ } else if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
+ sdata->vif.bss_conf.bssid = sdata->vif.addr;
+ WARN_ONCE(changed & ~(BSS_CHANGED_IDLE),
+ "P2P Device BSS changed %#x", changed);
} else {
WARN_ON(1);
return;
@@ -514,6 +518,11 @@ ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
BIT(IEEE80211_STYPE_AUTH >> 4) |
BIT(IEEE80211_STYPE_DEAUTH >> 4),
},
+ [NL80211_IFTYPE_P2P_DEVICE] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
+ },
};
static const struct ieee80211_ht_cap mac80211_ht_capa_mod_mask = {
@@ -536,6 +545,11 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
int priv_size, i;
struct wiphy *wiphy;
+ if (WARN_ON(!ops->tx || !ops->start || !ops->stop || !ops->config ||
+ !ops->add_interface || !ops->remove_interface ||
+ !ops->configure_filter))
+ return NULL;
+
if (WARN_ON(ops->sta_state && (ops->sta_add || ops->sta_remove)))
return NULL;
@@ -588,13 +602,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN);
- BUG_ON(!ops->tx);
- BUG_ON(!ops->start);
- BUG_ON(!ops->stop);
- BUG_ON(!ops->config);
- BUG_ON(!ops->add_interface);
- BUG_ON(!ops->remove_interface);
- BUG_ON(!ops->configure_filter);
local->ops = ops;
/* set up some defaults */
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 0e2f83e7127..ff0296c7bab 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -109,11 +109,11 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
/* Disallow HT40+/- mismatch */
if (ie->ht_operation &&
- (local->_oper_channel_type == NL80211_CHAN_HT40MINUS ||
- local->_oper_channel_type == NL80211_CHAN_HT40PLUS) &&
+ (sdata->vif.bss_conf.channel_type == NL80211_CHAN_HT40MINUS ||
+ sdata->vif.bss_conf.channel_type == NL80211_CHAN_HT40PLUS) &&
(sta_channel_type == NL80211_CHAN_HT40MINUS ||
sta_channel_type == NL80211_CHAN_HT40PLUS) &&
- local->_oper_channel_type != sta_channel_type)
+ sdata->vif.bss_conf.channel_type != sta_channel_type)
goto mismatch;
return true;
@@ -355,17 +355,18 @@ int mesh_add_ds_params_ie(struct sk_buff *skb,
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *chan = local->oper_channel;
u8 *pos;
if (skb_tailroom(skb) < 3)
return -ENOMEM;
- sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+ sband = local->hw.wiphy->bands[chan->band];
if (sband->band == IEEE80211_BAND_2GHZ) {
pos = skb_put(skb, 2 + 1);
*pos++ = WLAN_EID_DS_PARAMS;
*pos++ = 1;
- *pos++ = ieee80211_frequency_to_channel(local->hw.conf.channel->center_freq);
+ *pos++ = ieee80211_frequency_to_channel(chan->center_freq);
}
return 0;
@@ -380,7 +381,7 @@ int mesh_add_ht_cap_ie(struct sk_buff *skb,
sband = local->hw.wiphy->bands[local->oper_channel->band];
if (!sband->ht_cap.ht_supported ||
- local->_oper_channel_type == NL80211_CHAN_NO_HT)
+ sdata->vif.bss_conf.channel_type == NL80211_CHAN_NO_HT)
return 0;
if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_cap))
@@ -397,7 +398,8 @@ int mesh_add_ht_oper_ie(struct sk_buff *skb,
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_channel *channel = local->oper_channel;
- enum nl80211_channel_type channel_type = local->_oper_channel_type;
+ enum nl80211_channel_type channel_type =
+ sdata->vif.bss_conf.channel_type;
struct ieee80211_supported_band *sband =
local->hw.wiphy->bands[channel->band];
struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
@@ -608,12 +610,14 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL;
sdata->vif.bss_conf.basic_rates =
ieee80211_mandatory_rates(sdata->local,
- sdata->local->hw.conf.channel->band);
+ sdata->local->oper_channel->band);
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON |
BSS_CHANGED_BEACON_ENABLED |
BSS_CHANGED_HT |
BSS_CHANGED_BASIC_RATES |
BSS_CHANGED_BEACON_INT);
+
+ netif_carrier_on(sdata->dev);
}
void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
@@ -621,9 +625,15 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ netif_carrier_off(sdata->dev);
+
+ /* stop the beacon */
ifmsh->mesh_id_len = 0;
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
- sta_info_flush(local, NULL);
+
+ /* flush STAs and mpaths on this iface */
+ sta_info_flush(sdata->local, sdata);
+ mesh_path_flush_by_iface(sdata);
del_timer_sync(&sdata->u.mesh.housekeeping_timer);
del_timer_sync(&sdata->u.mesh.mesh_path_root_timer);
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 13fd5b5fdb0..25d0f17dec7 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -215,6 +215,9 @@ struct mesh_rmc {
/* Maximum number of paths per interface */
#define MESH_MAX_MPATHS 1024
+/* Number of frames buffered per destination for unresolved destinations */
+#define MESH_FRAME_QUEUE_LEN 10
+
/* Public interfaces */
/* Various */
int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 494bc39f61a..47aeee2d8db 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -17,8 +17,6 @@
#define MAX_METRIC 0xffffffff
#define ARITH_SHIFT 8
-/* Number of frames buffered per destination for unresolved destinations */
-#define MESH_FRAME_QUEUE_LEN 10
#define MAX_PREQ_QUEUE_LEN 64
/* Destination only */
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 075bc535c60..aa749818860 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -203,23 +203,17 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
{
struct sk_buff *skb;
struct ieee80211_hdr *hdr;
- struct sk_buff_head tmpq;
unsigned long flags;
rcu_assign_pointer(mpath->next_hop, sta);
- __skb_queue_head_init(&tmpq);
-
spin_lock_irqsave(&mpath->frame_queue.lock, flags);
-
- while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
+ skb_queue_walk(&mpath->frame_queue, skb) {
hdr = (struct ieee80211_hdr *) skb->data;
memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
- __skb_queue_tail(&tmpq, skb);
}
- skb_queue_splice(&tmpq, &mpath->frame_queue);
spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
}
@@ -285,40 +279,42 @@ static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
struct mesh_path *from_mpath,
bool copy)
{
- struct sk_buff *skb, *cp_skb = NULL;
- struct sk_buff_head gateq, failq;
+ struct sk_buff *skb, *fskb, *tmp;
+ struct sk_buff_head failq;
unsigned long flags;
- int num_skbs;
BUG_ON(gate_mpath == from_mpath);
BUG_ON(!gate_mpath->next_hop);
- __skb_queue_head_init(&gateq);
__skb_queue_head_init(&failq);
spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
skb_queue_splice_init(&from_mpath->frame_queue, &failq);
spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
- num_skbs = skb_queue_len(&failq);
-
- while (num_skbs--) {
- skb = __skb_dequeue(&failq);
- if (copy) {
- cp_skb = skb_copy(skb, GFP_ATOMIC);
- if (cp_skb)
- __skb_queue_tail(&failq, cp_skb);
+ skb_queue_walk_safe(&failq, fskb, tmp) {
+ if (skb_queue_len(&gate_mpath->frame_queue) >=
+ MESH_FRAME_QUEUE_LEN) {
+ mpath_dbg(gate_mpath->sdata, "mpath queue full!\n");
+ break;
}
+ skb = skb_copy(fskb, GFP_ATOMIC);
+ if (WARN_ON(!skb))
+ break;
+
prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
- __skb_queue_tail(&gateq, skb);
+ skb_queue_tail(&gate_mpath->frame_queue, skb);
+
+ if (copy)
+ continue;
+
+ __skb_unlink(fskb, &failq);
+ kfree_skb(fskb);
}
- spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags);
- skb_queue_splice(&gateq, &gate_mpath->frame_queue);
mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
- spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags);
if (!copy)
return;
@@ -531,7 +527,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
read_lock_bh(&pathtbl_resize_lock);
memcpy(new_mpath->dst, dst, ETH_ALEN);
- memset(new_mpath->rann_snd_addr, 0xff, ETH_ALEN);
+ eth_broadcast_addr(new_mpath->rann_snd_addr);
new_mpath->is_root = false;
new_mpath->sdata = sdata;
new_mpath->flags = 0;
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index f20e9f26d13..9d7ad366ef0 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -117,7 +117,7 @@ static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
u16 ht_opmode;
bool non_ht_sta = false, ht20_sta = false;
- if (local->_oper_channel_type == NL80211_CHAN_NO_HT)
+ if (sdata->vif.bss_conf.channel_type == NL80211_CHAN_NO_HT)
return 0;
rcu_read_lock();
@@ -147,7 +147,8 @@ out:
if (non_ht_sta)
ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED;
- else if (ht20_sta && local->_oper_channel_type > NL80211_CHAN_HT20)
+ else if (ht20_sta &&
+ sdata->vif.bss_conf.channel_type > NL80211_CHAN_HT20)
ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_20MHZ;
else
ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
@@ -215,12 +216,14 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
u8 *da, __le16 llid, __le16 plid, __le16 reason) {
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
+ struct ieee80211_tx_info *info;
struct ieee80211_mgmt *mgmt;
bool include_plid = false;
u16 peering_proto = 0;
u8 *pos, ie_len = 4;
int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.self_prot) +
sizeof(mgmt->u.action.u.self_prot);
+ int err = -ENOMEM;
skb = dev_alloc_skb(local->tx_headroom +
hdr_len +
@@ -236,6 +239,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
sdata->u.mesh.ie_len);
if (!skb)
return -1;
+ info = IEEE80211_SKB_CB(skb);
skb_reserve(skb, local->tx_headroom);
mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
memset(mgmt, 0, hdr_len);
@@ -256,15 +260,18 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
pos = skb_put(skb, 2);
memcpy(pos + 2, &plid, 2);
}
- if (ieee80211_add_srates_ie(sdata, skb, true) ||
- ieee80211_add_ext_srates_ie(sdata, skb, true) ||
+ if (ieee80211_add_srates_ie(sdata, skb, true,
+ local->oper_channel->band) ||
+ ieee80211_add_ext_srates_ie(sdata, skb, true,
+ local->oper_channel->band) ||
mesh_add_rsn_ie(skb, sdata) ||
mesh_add_meshid_ie(skb, sdata) ||
mesh_add_meshconf_ie(skb, sdata))
- return -1;
+ goto free;
} else { /* WLAN_SP_MESH_PEERING_CLOSE */
+ info->flags |= IEEE80211_TX_CTL_NO_ACK;
if (mesh_add_meshid_ie(skb, sdata))
- return -1;
+ goto free;
}
/* Add Mesh Peering Management element */
@@ -283,11 +290,12 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
ie_len += 2; /* reason code */
break;
default:
- return -EINVAL;
+ err = -EINVAL;
+ goto free;
}
if (WARN_ON(skb_tailroom(skb) < 2 + ie_len))
- return -ENOMEM;
+ goto free;
pos = skb_put(skb, 2 + ie_len);
*pos++ = WLAN_EID_PEER_MGMT;
@@ -308,14 +316,17 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
if (action != WLAN_SP_MESH_PEERING_CLOSE) {
if (mesh_add_ht_cap_ie(skb, sdata) ||
mesh_add_ht_oper_ie(skb, sdata))
- return -1;
+ goto free;
}
if (mesh_add_vendor_ies(skb, sdata))
- return -1;
+ goto free;
ieee80211_tx_skb(sdata, skb);
return 0;
+free:
+ kfree_skb(skb);
+ return err;
}
/**
@@ -360,9 +371,14 @@ static struct sta_info *mesh_peer_init(struct ieee80211_sub_if_data *sdata,
spin_lock_bh(&sta->lock);
sta->last_rx = jiffies;
+ if (sta->plink_state == NL80211_PLINK_ESTAB) {
+ spin_unlock_bh(&sta->lock);
+ return sta;
+ }
+
sta->sta.supp_rates[band] = rates;
if (elems->ht_cap_elem &&
- sdata->local->_oper_channel_type != NL80211_CHAN_NO_HT)
+ sdata->vif.bss_conf.channel_type != NL80211_CHAN_NO_HT)
ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
elems->ht_cap_elem,
&sta->sta.ht_cap);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index a4a5acdbaa4..a8cf70bf1cb 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -146,6 +146,9 @@ void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata)
if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)
return;
+ if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
+ return;
+
mod_timer(&sdata->u.mgd.bcn_mon_timer,
round_jiffies_up(jiffies + sdata->u.mgd.beacon_timeout));
}
@@ -182,15 +185,15 @@ static u32 ieee80211_config_ht_tx(struct ieee80211_sub_if_data *sdata,
u16 ht_opmode;
bool disable_40 = false;
- sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+ sband = local->hw.wiphy->bands[local->oper_channel->band];
switch (sdata->vif.bss_conf.channel_type) {
case NL80211_CHAN_HT40PLUS:
- if (local->hw.conf.channel->flags & IEEE80211_CHAN_NO_HT40PLUS)
+ if (local->oper_channel->flags & IEEE80211_CHAN_NO_HT40PLUS)
disable_40 = true;
break;
case NL80211_CHAN_HT40MINUS:
- if (local->hw.conf.channel->flags & IEEE80211_CHAN_NO_HT40MINUS)
+ if (local->oper_channel->flags & IEEE80211_CHAN_NO_HT40MINUS)
disable_40 = true;
break;
default:
@@ -326,6 +329,26 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
ieee80211_ie_build_ht_cap(pos, &ht_cap, cap);
}
+static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb,
+ struct ieee80211_supported_band *sband)
+{
+ u8 *pos;
+ u32 cap;
+ struct ieee80211_sta_vht_cap vht_cap;
+
+ BUILD_BUG_ON(sizeof(vht_cap) != sizeof(sband->vht_cap));
+
+ memcpy(&vht_cap, &sband->vht_cap, sizeof(vht_cap));
+
+ /* determine capability flags */
+ cap = vht_cap.cap;
+
+ /* reserve and fill IE */
+ pos = skb_put(skb, sizeof(struct ieee80211_vht_capabilities) + 2);
+ ieee80211_ie_build_vht_cap(pos, &vht_cap, cap);
+}
+
static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
@@ -371,6 +394,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
4 + /* power capability */
2 + 2 * sband->n_channels + /* supported channels */
2 + sizeof(struct ieee80211_ht_cap) + /* HT */
+ 2 + sizeof(struct ieee80211_vht_capabilities) + /* VHT */
assoc_data->ie_len + /* extra IEs */
9, /* WMM */
GFP_KERNEL);
@@ -503,6 +527,9 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
ieee80211_add_ht_ie(sdata, skb, assoc_data->ap_ht_param,
sband, local->oper_channel, ifmgd->ap_smps);
+ if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
+ ieee80211_add_vht_ie(sdata, skb, sband);
+
/* if present, add any custom non-vendor IEs that go after HT */
if (assoc_data->ie_len && assoc_data->ie) {
noffset = ieee80211_ie_split_vendor(assoc_data->ie,
@@ -583,8 +610,6 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
IEEE80211_SKB_CB(skb)->flags |=
IEEE80211_TX_INTFL_DONT_ENCRYPT;
- drv_mgd_prepare_tx(local, sdata);
-
ieee80211_tx_skb(sdata, skb);
}
}
@@ -687,6 +712,7 @@ static void ieee80211_chswitch_work(struct work_struct *work)
/* XXX: shouldn't really modify cfg80211-owned data! */
ifmgd->associated->channel = sdata->local->oper_channel;
+ /* XXX: wait for a beacon first? */
ieee80211_wake_queues_by_reason(&sdata->local->hw,
IEEE80211_QUEUE_STOP_REASON_CSA);
out:
@@ -763,36 +789,32 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
sdata->local->csa_channel = new_ch;
+ ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
+
+ if (sw_elem->mode)
+ ieee80211_stop_queues_by_reason(&sdata->local->hw,
+ IEEE80211_QUEUE_STOP_REASON_CSA);
+
if (sdata->local->ops->channel_switch) {
/* use driver's channel switch callback */
- struct ieee80211_channel_switch ch_switch;
- memset(&ch_switch, 0, sizeof(ch_switch));
- ch_switch.timestamp = timestamp;
- if (sw_elem->mode) {
- ch_switch.block_tx = true;
- ieee80211_stop_queues_by_reason(&sdata->local->hw,
- IEEE80211_QUEUE_STOP_REASON_CSA);
- }
- ch_switch.channel = new_ch;
- ch_switch.count = sw_elem->count;
- ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
+ struct ieee80211_channel_switch ch_switch = {
+ .timestamp = timestamp,
+ .block_tx = sw_elem->mode,
+ .channel = new_ch,
+ .count = sw_elem->count,
+ };
+
drv_channel_switch(sdata->local, &ch_switch);
return;
}
/* channel switch handled in software */
- if (sw_elem->count <= 1) {
+ if (sw_elem->count <= 1)
ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work);
- } else {
- if (sw_elem->mode)
- ieee80211_stop_queues_by_reason(&sdata->local->hw,
- IEEE80211_QUEUE_STOP_REASON_CSA);
- ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
+ else
mod_timer(&ifmgd->chswitch_timer,
- jiffies +
- msecs_to_jiffies(sw_elem->count *
- cbss->beacon_interval));
- }
+ TU_TO_EXP_TIME(sw_elem->count *
+ cbss->beacon_interval));
}
static void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
@@ -1007,6 +1029,16 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
ieee80211_change_ps(local);
}
+void ieee80211_recalc_ps_vif(struct ieee80211_sub_if_data *sdata)
+{
+ bool ps_allowed = ieee80211_powersave_allowed(sdata);
+
+ if (sdata->vif.bss_conf.ps != ps_allowed) {
+ sdata->vif.bss_conf.ps = ps_allowed;
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_PS);
+ }
+}
+
void ieee80211_dynamic_ps_disable_work(struct work_struct *work)
{
struct ieee80211_local *local =
@@ -1239,7 +1271,7 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
}
use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME);
- if (sdata->local->hw.conf.channel->band == IEEE80211_BAND_5GHZ)
+ if (sdata->local->oper_channel->band == IEEE80211_BAND_5GHZ)
use_short_slot = true;
if (use_protection != bss_conf->use_cts_prot) {
@@ -1310,6 +1342,8 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
ieee80211_recalc_smps(local);
mutex_unlock(&local->iflist_mtx);
+ ieee80211_recalc_ps_vif(sdata);
+
netif_tx_start_all_queues(sdata->dev);
netif_carrier_on(sdata->dev);
}
@@ -1371,6 +1405,9 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
}
local->ps_sdata = NULL;
+ /* disable per-vif ps */
+ ieee80211_recalc_ps_vif(sdata);
+
/* flush out any pending frame (e.g. DELBA) before deauth/disassoc */
if (tx)
drv_flush(local, false);
@@ -1542,7 +1579,8 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
ssid_len = ssid[1];
ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid_len, NULL,
- 0, (u32) -1, true, false);
+ 0, (u32) -1, true, false,
+ ifmgd->associated->channel);
}
ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms);
@@ -1645,7 +1683,9 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
ssid_len = ssid[1];
skb = ieee80211_build_probe_req(sdata, cbss->bssid,
- (u32) -1, ssid + 2, ssid_len,
+ (u32) -1,
+ sdata->local->oper_channel,
+ ssid + 2, ssid_len,
NULL, 0, true);
return skb;
@@ -1656,7 +1696,6 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
- u8 bssid[ETH_ALEN];
u8 frame_buf[DEAUTH_DISASSOC_LEN];
mutex_lock(&ifmgd->mtx);
@@ -1665,9 +1704,8 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
return;
}
- memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
-
- sdata_info(sdata, "Connection to AP %pM lost\n", bssid);
+ sdata_info(sdata, "Connection to AP %pM lost\n",
+ ifmgd->associated->bssid);
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
@@ -1685,7 +1723,7 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
mutex_unlock(&local->mtx);
}
-void ieee80211_beacon_connection_loss_work(struct work_struct *work)
+static void ieee80211_beacon_connection_loss_work(struct work_struct *work)
{
struct ieee80211_sub_if_data *sdata =
container_of(work, struct ieee80211_sub_if_data,
@@ -2232,14 +2270,10 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
mutex_unlock(&local->iflist_mtx);
}
- if (elems->ch_switch_elem && (elems->ch_switch_elem_len == 3) &&
- (memcmp(mgmt->bssid, sdata->u.mgd.associated->bssid,
- ETH_ALEN) == 0)) {
- struct ieee80211_channel_sw_ie *sw_elem =
- (struct ieee80211_channel_sw_ie *)elems->ch_switch_elem;
- ieee80211_sta_process_chanswitch(sdata, sw_elem,
+ if (elems->ch_switch_ie &&
+ memcmp(mgmt->bssid, sdata->u.mgd.associated->bssid, ETH_ALEN) == 0)
+ ieee80211_sta_process_chanswitch(sdata, elems->ch_switch_ie,
bss, rx_status->mactime);
- }
}
@@ -2326,7 +2360,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
if (baselen > len)
return;
- if (rx_status->freq != local->hw.conf.channel->center_freq)
+ if (rx_status->freq != local->oper_channel->center_freq)
return;
if (ifmgd->assoc_data && !ifmgd->assoc_data->have_beacon &&
@@ -2490,7 +2524,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
!(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) {
struct ieee80211_supported_band *sband;
- sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+ sband = local->hw.wiphy->bands[local->oper_channel->band];
changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation,
bssid, true);
@@ -2673,7 +2707,8 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
* will not answer to direct packet in unassociated state.
*/
ieee80211_send_probe_req(sdata, NULL, ssidie + 2, ssidie[1],
- NULL, 0, (u32) -1, true, false);
+ NULL, 0, (u32) -1, true, false,
+ auth_data->bss->channel);
}
auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
@@ -3000,41 +3035,17 @@ int ieee80211_max_network_latency(struct notifier_block *nb,
return 0;
}
-static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
- struct cfg80211_bss *cbss, bool assoc)
+static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_bss *cbss)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct ieee80211_bss *bss = (void *)cbss->priv;
- struct sta_info *sta = NULL;
- bool have_sta = false;
- int err;
int ht_cfreq;
enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
const u8 *ht_oper_ie;
const struct ieee80211_ht_operation *ht_oper = NULL;
struct ieee80211_supported_band *sband;
- if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data))
- return -EINVAL;
-
- if (assoc) {
- rcu_read_lock();
- have_sta = sta_info_get(sdata, cbss->bssid);
- rcu_read_unlock();
- }
-
- if (!have_sta) {
- sta = sta_info_alloc(sdata, cbss->bssid, GFP_KERNEL);
- if (!sta)
- return -ENOMEM;
- }
-
- mutex_lock(&local->mtx);
- ieee80211_recalc_idle(sdata->local);
- mutex_unlock(&local->mtx);
-
- /* switch to the right channel */
sband = local->hw.wiphy->bands[cbss->channel->band];
ifmgd->flags &= ~IEEE80211_STA_DISABLE_40MHZ;
@@ -3097,10 +3108,51 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
local->oper_channel = cbss->channel;
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
- if (sta) {
+ return 0;
+}
+
+static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_bss *cbss, bool assoc)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ struct ieee80211_bss *bss = (void *)cbss->priv;
+ struct sta_info *new_sta = NULL;
+ bool have_sta = false;
+ int err;
+
+ if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data))
+ return -EINVAL;
+
+ if (assoc) {
+ rcu_read_lock();
+ have_sta = sta_info_get(sdata, cbss->bssid);
+ rcu_read_unlock();
+ }
+
+ if (!have_sta) {
+ new_sta = sta_info_alloc(sdata, cbss->bssid, GFP_KERNEL);
+ if (!new_sta)
+ return -ENOMEM;
+ }
+
+ mutex_lock(&local->mtx);
+ ieee80211_recalc_idle(sdata->local);
+ mutex_unlock(&local->mtx);
+
+ if (new_sta) {
u32 rates = 0, basic_rates = 0;
bool have_higher_than_11mbit;
int min_rate = INT_MAX, min_rate_index = -1;
+ struct ieee80211_supported_band *sband;
+
+ sband = local->hw.wiphy->bands[cbss->channel->band];
+
+ err = ieee80211_prep_channel(sdata, cbss);
+ if (err) {
+ sta_info_free(local, new_sta);
+ return err;
+ }
ieee80211_get_rates(sband, bss->supp_rates,
bss->supp_rates_len,
@@ -3122,7 +3174,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
basic_rates = BIT(min_rate_index);
}
- sta->sta.supp_rates[cbss->channel->band] = rates;
+ new_sta->sta.supp_rates[cbss->channel->band] = rates;
sdata->vif.bss_conf.basic_rates = basic_rates;
/* cf. IEEE 802.11 9.2.12 */
@@ -3145,10 +3197,10 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
BSS_CHANGED_BEACON_INT);
if (assoc)
- sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
+ sta_info_pre_move_state(new_sta, IEEE80211_STA_AUTH);
- err = sta_info_insert(sta);
- sta = NULL;
+ err = sta_info_insert(new_sta);
+ new_sta = NULL;
if (err) {
sdata_info(sdata,
"failed to insert STA entry for the AP (error %d)\n",
@@ -3300,9 +3352,13 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
}
/* prepare assoc data */
-
- ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N;
- ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
+
+ /*
+ * keep only the 40 MHz disable bit set as it might have
+ * been set during authentication already, all other bits
+ * should be reset for a new connection
+ */
+ ifmgd->flags &= IEEE80211_STA_DISABLE_40MHZ;
ifmgd->beacon_crc_valid = false;
@@ -3318,21 +3374,34 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP ||
req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) {
ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
+ ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
netdev_info(sdata->dev,
- "disabling HT due to WEP/TKIP use\n");
+ "disabling HT/VHT due to WEP/TKIP use\n");
}
}
- if (req->flags & ASSOC_REQ_DISABLE_HT)
+ if (req->flags & ASSOC_REQ_DISABLE_HT) {
ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
+ ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
+ }
/* Also disable HT if we don't support it or the AP doesn't use WMM */
sband = local->hw.wiphy->bands[req->bss->channel->band];
if (!sband->ht_cap.ht_supported ||
local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) {
ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
- netdev_info(sdata->dev,
- "disabling HT as WMM/QoS is not supported\n");
+ if (!bss->wmm_used)
+ netdev_info(sdata->dev,
+ "disabling HT as WMM/QoS is not supported by the AP\n");
+ }
+
+ /* disable VHT if we don't support it or the AP doesn't use WMM */
+ if (!sband->vht_cap.vht_supported ||
+ local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) {
+ ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
+ if (!bss->wmm_used)
+ netdev_info(sdata->dev,
+ "disabling VHT as WMM/QoS is not supported by the AP\n");
}
memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa));
@@ -3467,14 +3536,17 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
req->bssid, req->reason_code);
if (ifmgd->associated &&
- ether_addr_equal(ifmgd->associated->bssid, req->bssid))
+ ether_addr_equal(ifmgd->associated->bssid, req->bssid)) {
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
req->reason_code, true, frame_buf);
- else
+ } else {
+ drv_mgd_prepare_tx(sdata->local, sdata);
ieee80211_send_deauth_disassoc(sdata, req->bssid,
IEEE80211_STYPE_DEAUTH,
req->reason_code, true,
frame_buf);
+ }
+
mutex_unlock(&ifmgd->mtx);
__cfg80211_send_deauth(sdata->dev, frame_buf, DEAUTH_DISASSOC_LEN);
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index 635c3250c66..507121dad08 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -116,6 +116,9 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
if (!ieee80211_sdata_running(sdata))
continue;
+ if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
+ continue;
+
if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
@@ -144,6 +147,9 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
mutex_lock(&local->iflist_mtx);
list_for_each_entry(sdata, &local->interfaces, list) {
+ if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
+ continue;
+
if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index 6e4fd32c661..10de668eb9f 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -56,7 +56,7 @@ static inline void rate_control_rate_init(struct sta_info *sta)
if (!ref)
return;
- sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+ sband = local->hw.wiphy->bands[local->oper_channel->band];
ref->ops->rate_init(ref->priv, sband, ista, priv_sta);
set_sta_flag(sta, WLAN_STA_RATE_CONTROL);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 0cb4edee6af..b382605c573 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -60,7 +60,9 @@ static inline int should_drop_frame(struct sk_buff *skb,
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
+ if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
+ RX_FLAG_FAILED_PLCP_CRC |
+ RX_FLAG_AMPDU_IS_ZEROLEN))
return 1;
if (unlikely(skb->len < 16 + present_fcs_len))
return 1;
@@ -91,6 +93,13 @@ ieee80211_rx_radiotap_len(struct ieee80211_local *local,
if (status->flag & RX_FLAG_HT) /* HT info */
len += 3;
+ if (status->flag & RX_FLAG_AMPDU_DETAILS) {
+ /* padding */
+ while (len & 3)
+ len++;
+ len += 8;
+ }
+
return len;
}
@@ -215,6 +224,37 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
pos++;
*pos++ = status->rate_idx;
}
+
+ if (status->flag & RX_FLAG_AMPDU_DETAILS) {
+ u16 flags = 0;
+
+ /* ensure 4 byte alignment */
+ while ((pos - (u8 *)rthdr) & 3)
+ pos++;
+ rthdr->it_present |=
+ cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
+ put_unaligned_le32(status->ampdu_reference, pos);
+ pos += 4;
+ if (status->flag & RX_FLAG_AMPDU_REPORT_ZEROLEN)
+ flags |= IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN;
+ if (status->flag & RX_FLAG_AMPDU_IS_ZEROLEN)
+ flags |= IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN;
+ if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN)
+ flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN;
+ if (status->flag & RX_FLAG_AMPDU_IS_LAST)
+ flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST;
+ if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR)
+ flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR;
+ if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
+ flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN;
+ put_unaligned_le16(flags, pos);
+ pos += 2;
+ if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
+ *pos++ = status->ampdu_delimiter_crc;
+ else
+ *pos++ = 0;
+ *pos++ = 0;
+ }
}
/*
@@ -2268,7 +2308,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
goto queue;
case WLAN_CATEGORY_SPECTRUM_MGMT:
- if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
+ if (status->band != IEEE80211_BAND_5GHZ)
break;
if (sdata->vif.type != NL80211_IFTYPE_STATION)
@@ -2772,8 +2812,7 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
if (!bssid) {
if (!ether_addr_equal(sdata->vif.addr, hdr->addr1))
return 0;
- } else if (!ieee80211_bssid_match(bssid,
- sdata->vif.addr)) {
+ } else if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) {
/*
* Accept public action frames even when the
* BSSID doesn't match, this is used for P2P
@@ -2793,9 +2832,18 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
if (!ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2))
return 0;
break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ if (!ieee80211_is_public_action(hdr, skb->len) &&
+ !ieee80211_is_probe_req(hdr->frame_control) &&
+ !ieee80211_is_probe_resp(hdr->frame_control) &&
+ !ieee80211_is_beacon(hdr->frame_control))
+ return 0;
+ if (!ether_addr_equal(sdata->vif.addr, hdr->addr1))
+ status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
+ break;
default:
/* should never get here */
- WARN_ON(1);
+ WARN_ON_ONCE(1);
break;
}
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 839dd973798..740e414d44f 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -416,7 +416,8 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
local->scan_req->ssids[i].ssid_len,
local->scan_req->ie, local->scan_req->ie_len,
local->scan_req->rates[band], false,
- local->scan_req->no_cck);
+ local->scan_req->no_cck,
+ local->hw.conf.channel);
/*
* After sending probe requests, wait for probe responses
@@ -479,11 +480,10 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
if (local->ops->hw_scan) {
__set_bit(SCAN_HW_SCANNING, &local->scanning);
} else if ((req->n_channels == 1) &&
- (req->channels[0]->center_freq ==
- local->hw.conf.channel->center_freq)) {
-
- /* If we are scanning only on the current channel, then
- * we do not need to stop normal activities
+ (req->channels[0] == local->oper_channel)) {
+ /*
+ * If we are scanning only on the operating channel
+ * then we do not need to stop normal activities
*/
unsigned long next_delay;
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 8cd72914cda..b0801b7d572 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -519,19 +519,27 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
u64 cookie = (unsigned long)skb;
acked = info->flags & IEEE80211_TX_STAT_ACK;
- /*
- * TODO: When we have non-netdev frame TX,
- * we cannot use skb->dev->ieee80211_ptr
- */
-
if (ieee80211_is_nullfunc(hdr->frame_control) ||
- ieee80211_is_qos_nullfunc(hdr->frame_control))
+ ieee80211_is_qos_nullfunc(hdr->frame_control)) {
cfg80211_probe_status(skb->dev, hdr->addr1,
cookie, acked, GFP_ATOMIC);
- else
+ } else if (skb->dev) {
cfg80211_mgmt_tx_status(
skb->dev->ieee80211_ptr, cookie, skb->data,
skb->len, acked, GFP_ATOMIC);
+ } else {
+ struct ieee80211_sub_if_data *p2p_sdata;
+
+ rcu_read_lock();
+
+ p2p_sdata = rcu_dereference(local->p2p_sdata);
+ if (p2p_sdata) {
+ cfg80211_mgmt_tx_status(
+ &p2p_sdata->wdev, cookie, skb->data,
+ skb->len, acked, GFP_ATOMIC);
+ }
+ rcu_read_unlock();
+ }
}
if (unlikely(info->ack_frame_id)) {
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index c6d33b55b2d..18d9c8a52e9 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -24,7 +24,7 @@
__string(vif_name, sdata->dev ? sdata->dev->name : "<nodev>")
#define VIF_ASSIGN __entry->vif_type = sdata->vif.type; __entry->sdata = sdata; \
__entry->p2p = sdata->vif.p2p; \
- __assign_str(vif_name, sdata->dev ? sdata->dev->name : "<nodev>")
+ __assign_str(vif_name, sdata->dev ? sdata->dev->name : sdata->name)
#define VIF_PR_FMT " vif:%s(%d%s)"
#define VIF_PR_ARG __get_str(vif_name), __entry->vif_type, __entry->p2p ? "/p2p" : ""
@@ -274,9 +274,12 @@ TRACE_EVENT(drv_config,
__entry->dynamic_ps_timeout = local->hw.conf.dynamic_ps_timeout;
__entry->max_sleep_period = local->hw.conf.max_sleep_period;
__entry->listen_interval = local->hw.conf.listen_interval;
- __entry->long_frame_max_tx_count = local->hw.conf.long_frame_max_tx_count;
- __entry->short_frame_max_tx_count = local->hw.conf.short_frame_max_tx_count;
- __entry->center_freq = local->hw.conf.channel->center_freq;
+ __entry->long_frame_max_tx_count =
+ local->hw.conf.long_frame_max_tx_count;
+ __entry->short_frame_max_tx_count =
+ local->hw.conf.short_frame_max_tx_count;
+ __entry->center_freq = local->hw.conf.channel ?
+ local->hw.conf.channel->center_freq : 0;
__entry->channel_type = local->hw.conf.channel_type;
__entry->smps = local->hw.conf.smps_mode;
),
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index acf712ffb5e..29eb4e67823 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -55,7 +55,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
if (WARN_ON_ONCE(info->control.rates[0].idx < 0))
return 0;
- sband = local->hw.wiphy->bands[tx->channel->band];
+ sband = local->hw.wiphy->bands[info->band];
txrate = &sband->bitrates[info->control.rates[0].idx];
erp = txrate->flags & IEEE80211_RATE_ERP_G;
@@ -615,7 +615,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
memset(&txrc, 0, sizeof(txrc));
- sband = tx->local->hw.wiphy->bands[tx->channel->band];
+ sband = tx->local->hw.wiphy->bands[info->band];
len = min_t(u32, tx->skb->len + FCS_LEN,
tx->local->hw.wiphy->frag_threshold);
@@ -626,13 +626,13 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
txrc.bss_conf = &tx->sdata->vif.bss_conf;
txrc.skb = tx->skb;
txrc.reported_rate.idx = -1;
- txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[tx->channel->band];
+ txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band];
if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1)
txrc.max_rate_idx = -1;
else
txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
memcpy(txrc.rate_idx_mcs_mask,
- tx->sdata->rc_rateidx_mcs_mask[tx->channel->band],
+ tx->sdata->rc_rateidx_mcs_mask[info->band],
sizeof(txrc.rate_idx_mcs_mask));
txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
@@ -667,7 +667,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
"scanning and associated. Target station: "
"%pM on %d GHz band\n",
tx->sdata->name, hdr->addr1,
- tx->channel->band ? 5 : 2))
+ info->band ? 5 : 2))
return TX_DROP;
/*
@@ -1131,7 +1131,6 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
tx->skb = skb;
tx->local = local;
tx->sdata = sdata;
- tx->channel = local->hw.conf.channel;
__skb_queue_head_init(&tx->skbs);
/*
@@ -1204,6 +1203,7 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
struct sk_buff_head *skbs,
bool txpending)
{
+ struct ieee80211_tx_control control;
struct sk_buff *skb, *tmp;
unsigned long flags;
@@ -1240,10 +1240,10 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
info->control.vif = vif;
- info->control.sta = sta;
+ control.sta = sta;
__skb_unlink(skb, skbs);
- drv_tx(local, skb);
+ drv_tx(local, &control, skb);
}
return true;
@@ -1399,8 +1399,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
goto out;
}
- tx.channel = local->hw.conf.channel;
- info->band = tx.channel->band;
+ info->band = local->hw.conf.channel->band;
/* set up hw_queue value early */
if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) ||
@@ -1720,7 +1719,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
struct ieee80211_tx_info *info;
- int ret = NETDEV_TX_BUSY, head_need;
+ int head_need;
u16 ethertype, hdrlen, meshhdrlen = 0;
__le16 fc;
struct ieee80211_hdr hdr;
@@ -1736,10 +1735,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
u32 info_flags = 0;
u16 info_id = 0;
- if (unlikely(skb->len < ETH_HLEN)) {
- ret = NETDEV_TX_OK;
+ if (unlikely(skb->len < ETH_HLEN))
goto fail;
- }
/* convert Ethernet header to proper 802.11 header (based on
* operation mode) */
@@ -1787,7 +1784,6 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
if (!sdata->u.mesh.mshcfg.dot11MeshTTL) {
/* Do not send frames with mesh_ttl == 0 */
sdata->u.mesh.mshstats.dropped_frames_ttl++;
- ret = NETDEV_TX_OK;
goto fail;
}
rcu_read_lock();
@@ -1811,37 +1807,31 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr,
sdata, NULL, NULL);
} else {
- int is_mesh_mcast = 1;
- const u8 *mesh_da;
+ /* DS -> MBSS (802.11-2012 13.11.3.3).
+ * For unicast with unknown forwarding information,
+ * destination might be in the MBSS or if that fails
+ * forwarded to another mesh gate. In either case
+ * resolution will be handled in ieee80211_xmit(), so
+ * leave the original DA. This also works for mcast */
+ const u8 *mesh_da = skb->data;
+
+ if (mppath)
+ mesh_da = mppath->mpp;
+ else if (mpath)
+ mesh_da = mpath->dst;
+ rcu_read_unlock();
- if (is_multicast_ether_addr(skb->data))
- /* DA TA mSA AE:SA */
- mesh_da = skb->data;
- else {
- static const u8 bcast[ETH_ALEN] =
- { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
- if (mppath) {
- /* RA TA mDA mSA AE:DA SA */
- mesh_da = mppath->mpp;
- is_mesh_mcast = 0;
- } else if (mpath) {
- mesh_da = mpath->dst;
- is_mesh_mcast = 0;
- } else {
- /* DA TA mSA AE:SA */
- mesh_da = bcast;
- }
- }
hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
mesh_da, sdata->vif.addr);
- rcu_read_unlock();
- if (is_mesh_mcast)
+ if (is_multicast_ether_addr(mesh_da))
+ /* DA TA mSA AE:SA */
meshhdrlen =
ieee80211_new_mesh_header(&mesh_hdr,
sdata,
skb->data + ETH_ALEN,
NULL);
else
+ /* RA TA mDA mSA AE:DA SA */
meshhdrlen =
ieee80211_new_mesh_header(&mesh_hdr,
sdata,
@@ -1880,10 +1870,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
if (tdls_direct) {
/* link during setup - throw out frames to peer */
- if (!tdls_auth) {
- ret = NETDEV_TX_OK;
+ if (!tdls_auth)
goto fail;
- }
/* DA SA BSSID */
memcpy(hdr.addr1, skb->data, ETH_ALEN);
@@ -1917,7 +1905,6 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
hdrlen = 24;
break;
default:
- ret = NETDEV_TX_OK;
goto fail;
}
@@ -1962,7 +1949,6 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
I802_DEBUG_INC(local->tx_handlers_drop_unauth_port);
- ret = NETDEV_TX_OK;
goto fail;
}
@@ -2017,10 +2003,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
skb = skb_clone(skb, GFP_ATOMIC);
kfree_skb(tmp_skb);
- if (!skb) {
- ret = NETDEV_TX_OK;
+ if (!skb)
goto fail;
- }
}
hdr.frame_control = fc;
@@ -2123,10 +2107,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
fail:
- if (ret == NETDEV_TX_OK)
- dev_kfree_skb(skb);
-
- return ret;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
}
@@ -2301,12 +2283,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
struct ieee80211_sub_if_data *sdata = NULL;
struct ieee80211_if_ap *ap = NULL;
struct beacon_data *beacon;
- struct ieee80211_supported_band *sband;
- enum ieee80211_band band = local->hw.conf.channel->band;
+ enum ieee80211_band band = local->oper_channel->band;
struct ieee80211_tx_rate_control txrc;
- sband = local->hw.wiphy->bands[band];
-
rcu_read_lock();
sdata = vif_to_sdata(vif);
@@ -2416,7 +2395,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
memset(mgmt, 0, hdr_len);
mgmt->frame_control =
cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON);
- memset(mgmt->da, 0xff, ETH_ALEN);
+ eth_broadcast_addr(mgmt->da);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
mgmt->u.beacon.beacon_int =
@@ -2428,9 +2407,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
*pos++ = WLAN_EID_SSID;
*pos++ = 0x0;
- if (ieee80211_add_srates_ie(sdata, skb, true) ||
+ if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
mesh_add_ds_params_ie(skb, sdata) ||
- ieee80211_add_ext_srates_ie(sdata, skb, true) ||
+ ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
mesh_add_rsn_ie(skb, sdata) ||
mesh_add_ht_cap_ie(skb, sdata) ||
mesh_add_ht_oper_ie(skb, sdata) ||
@@ -2453,12 +2432,12 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
memset(&txrc, 0, sizeof(txrc));
txrc.hw = hw;
- txrc.sband = sband;
+ txrc.sband = local->hw.wiphy->bands[band];
txrc.bss_conf = &sdata->vif.bss_conf;
txrc.skb = skb;
txrc.reported_rate.idx = -1;
txrc.rate_idx_mask = sdata->rc_rateidx_mask[band];
- if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1)
+ if (txrc.rate_idx_mask == (1 << txrc.sband->n_bitrates) - 1)
txrc.max_rate_idx = -1;
else
txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
@@ -2482,7 +2461,8 @@ struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct ieee80211_if_ap *ap = NULL;
- struct sk_buff *presp = NULL, *skb = NULL;
+ struct sk_buff *skb = NULL;
+ struct probe_resp *presp = NULL;
struct ieee80211_hdr *hdr;
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
@@ -2496,10 +2476,12 @@ struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw,
if (!presp)
goto out;
- skb = skb_copy(presp, GFP_ATOMIC);
+ skb = dev_alloc_skb(presp->len);
if (!skb)
goto out;
+ memcpy(skb_put(skb, presp->len), presp->data, presp->len);
+
hdr = (struct ieee80211_hdr *) skb->data;
memset(hdr->addr1, 0, sizeof(hdr->addr1));
@@ -2610,9 +2592,9 @@ struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
memset(hdr, 0, sizeof(*hdr));
hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_PROBE_REQ);
- memset(hdr->addr1, 0xff, ETH_ALEN);
+ eth_broadcast_addr(hdr->addr1);
memcpy(hdr->addr2, vif->addr, ETH_ALEN);
- memset(hdr->addr3, 0xff, ETH_ALEN);
+ eth_broadcast_addr(hdr->addr3);
pos = skb_put(skb, ie_ssid_len);
*pos++ = WLAN_EID_SSID;
@@ -2709,8 +2691,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
info = IEEE80211_SKB_CB(skb);
tx.flags |= IEEE80211_TX_PS_BUFFERED;
- tx.channel = local->hw.conf.channel;
- info->band = tx.channel->band;
+ info->band = local->oper_channel->band;
if (invoke_tx_handlers(&tx))
skb = NULL;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 39b82fee490..471fb0516c9 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -276,6 +276,9 @@ void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue)
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
int ac;
+ if (!sdata->dev)
+ continue;
+
if (test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))
continue;
@@ -364,6 +367,9 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
int ac;
+ if (!sdata->dev)
+ continue;
+
for (ac = 0; ac < n_acs; ac++) {
if (sdata->vif.hw_queue[ac] == queue ||
sdata->vif.cab_queue == queue)
@@ -768,8 +774,11 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
elem_parse_failed = true;
break;
case WLAN_EID_CHANNEL_SWITCH:
- elems->ch_switch_elem = pos;
- elems->ch_switch_elem_len = elen;
+ if (elen != sizeof(struct ieee80211_channel_sw_ie)) {
+ elem_parse_failed = true;
+ break;
+ }
+ elems->ch_switch_ie = (void *)pos;
break;
case WLAN_EID_QUIET:
if (!elems->quiet_elem) {
@@ -832,7 +841,7 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
memset(&qparam, 0, sizeof(qparam));
- use_11b = (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ) &&
+ use_11b = (local->oper_channel->band == IEEE80211_BAND_2GHZ) &&
!(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE);
/*
@@ -899,7 +908,8 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
drv_conf_tx(local, sdata, ac, &qparam);
}
- if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
+ if (sdata->vif.type != NL80211_IFTYPE_MONITOR &&
+ sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE) {
sdata->vif.bss_conf.qos = enable_qos;
if (bss_notify)
ieee80211_bss_info_change_notify(sdata,
@@ -919,7 +929,7 @@ void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
if ((supp_rates[i] & 0x7f) * 5 > 110)
have_higher_than_11mbit = 1;
- if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ &&
+ if (local->oper_channel->band == IEEE80211_BAND_2GHZ &&
have_higher_than_11mbit)
sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
else
@@ -1100,6 +1110,7 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
u8 *dst, u32 ratemask,
+ struct ieee80211_channel *chan,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len,
bool directed)
@@ -1109,7 +1120,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt;
size_t buf_len;
u8 *buf;
- u8 chan;
+ u8 chan_no;
/* FIXME: come up with a proper value */
buf = kmalloc(200 + ie_len, GFP_KERNEL);
@@ -1122,14 +1133,12 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
* badly-behaved APs don't respond when this parameter is included.
*/
if (directed)
- chan = 0;
+ chan_no = 0;
else
- chan = ieee80211_frequency_to_channel(
- local->hw.conf.channel->center_freq);
+ chan_no = ieee80211_frequency_to_channel(chan->center_freq);
- buf_len = ieee80211_build_preq_ies(local, buf, ie, ie_len,
- local->hw.conf.channel->band,
- ratemask, chan);
+ buf_len = ieee80211_build_preq_ies(local, buf, ie, ie_len, chan->band,
+ ratemask, chan_no);
skb = ieee80211_probereq_get(&local->hw, &sdata->vif,
ssid, ssid_len,
@@ -1154,11 +1163,13 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len,
- u32 ratemask, bool directed, bool no_cck)
+ u32 ratemask, bool directed, bool no_cck,
+ struct ieee80211_channel *channel)
{
struct sk_buff *skb;
- skb = ieee80211_build_probe_req(sdata, dst, ratemask, ssid, ssid_len,
+ skb = ieee80211_build_probe_req(sdata, dst, ratemask, channel,
+ ssid, ssid_len,
ie, ie_len, directed);
if (skb) {
if (no_cck)
@@ -1359,7 +1370,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
switch (sdata->vif.type) {
case NL80211_IFTYPE_STATION:
changed |= BSS_CHANGED_ASSOC |
- BSS_CHANGED_ARP_FILTER;
+ BSS_CHANGED_ARP_FILTER |
+ BSS_CHANGED_PS;
mutex_lock(&sdata->u.mgd.mtx);
ieee80211_bss_info_change_notify(sdata, changed);
mutex_unlock(&sdata->u.mgd.mtx);
@@ -1385,6 +1397,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
case NL80211_IFTYPE_MONITOR:
/* ignore virtual */
break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ changed = BSS_CHANGED_IDLE;
+ break;
case NL80211_IFTYPE_UNSPECIFIED:
case NUM_NL80211_IFTYPES:
case NL80211_IFTYPE_P2P_CLIENT:
@@ -1571,6 +1586,8 @@ void ieee80211_recalc_smps(struct ieee80211_local *local)
list_for_each_entry(sdata, &local->interfaces, list) {
if (!ieee80211_sdata_running(sdata))
continue;
+ if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
+ continue;
if (sdata->vif.type != NL80211_IFTYPE_STATION)
goto set;
@@ -1809,7 +1826,8 @@ ieee80211_ht_oper_to_channel_type(struct ieee80211_ht_operation *ht_oper)
}
int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb, bool need_basic)
+ struct sk_buff *skb, bool need_basic,
+ enum ieee80211_band band)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
@@ -1817,7 +1835,7 @@ int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
u8 i, rates, *pos;
u32 basic_rates = sdata->vif.bss_conf.basic_rates;
- sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+ sband = local->hw.wiphy->bands[band];
rates = sband->n_bitrates;
if (rates > 8)
rates = 8;
@@ -1840,7 +1858,8 @@ int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
}
int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb, bool need_basic)
+ struct sk_buff *skb, bool need_basic,
+ enum ieee80211_band band)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
@@ -1848,7 +1867,7 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
u8 i, exrates, *pos;
u32 basic_rates = sdata->vif.bss_conf.basic_rates;
- sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+ sband = local->hw.wiphy->bands[band];
exrates = sband->n_bitrates;
if (exrates > 8)
exrates -= 8;
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 3c601378d27..767cc12da0f 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -1171,8 +1171,10 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
goto out_err;
}
svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
- if (!svc->stats.cpustats)
+ if (!svc->stats.cpustats) {
+ ret = -ENOMEM;
goto out_err;
+ }
/* I'm the first user of the service */
atomic_set(&svc->usecnt, 0);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index c9bb994ae9b..dcb27910ab3 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -255,12 +255,15 @@ static void death_by_event(unsigned long ul_conntrack)
{
struct nf_conn *ct = (void *)ul_conntrack;
struct net *net = nf_ct_net(ct);
+ struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
+
+ BUG_ON(ecache == NULL);
if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) {
/* bad luck, let's retry again */
- ct->timeout.expires = jiffies +
+ ecache->timeout.expires = jiffies +
(random32() % net->ct.sysctl_events_retry_timeout);
- add_timer(&ct->timeout);
+ add_timer(&ecache->timeout);
return;
}
/* we've got the event delivered, now it's dying */
@@ -274,6 +277,9 @@ static void death_by_event(unsigned long ul_conntrack)
void nf_ct_insert_dying_list(struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
+ struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
+
+ BUG_ON(ecache == NULL);
/* add this conntrack to the dying list */
spin_lock_bh(&nf_conntrack_lock);
@@ -281,10 +287,10 @@ void nf_ct_insert_dying_list(struct nf_conn *ct)
&net->ct.dying);
spin_unlock_bh(&nf_conntrack_lock);
/* set a new timer to retry event delivery */
- setup_timer(&ct->timeout, death_by_event, (unsigned long)ct);
- ct->timeout.expires = jiffies +
+ setup_timer(&ecache->timeout, death_by_event, (unsigned long)ct);
+ ecache->timeout.expires = jiffies +
(random32() % net->ct.sysctl_events_retry_timeout);
- add_timer(&ct->timeout);
+ add_timer(&ecache->timeout);
}
EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 966f5133a38..a205bd6ce29 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -2813,7 +2813,8 @@ static int __init ctnetlink_init(void)
goto err_unreg_subsys;
}
- if (register_pernet_subsys(&ctnetlink_net_ops)) {
+ ret = register_pernet_subsys(&ctnetlink_net_ops);
+ if (ret < 0) {
pr_err("ctnetlink_init: cannot register pernet operations\n");
goto err_unreg_exp_subsys;
}
diff --git a/net/netfilter/nf_nat_sip.c b/net/netfilter/nf_nat_sip.c
index f4db3a7bd28..16303c75221 100644
--- a/net/netfilter/nf_nat_sip.c
+++ b/net/netfilter/nf_nat_sip.c
@@ -542,7 +542,10 @@ static unsigned int nf_nat_sdp_media(struct sk_buff *skb, unsigned int protoff,
ret = nf_ct_expect_related(rtcp_exp);
if (ret == 0)
break;
- else if (ret != -EBUSY) {
+ else if (ret == -EBUSY) {
+ nf_ct_unexpect_related(rtp_exp);
+ continue;
+ } else if (ret < 0) {
nf_ct_unexpect_related(rtp_exp);
port = 0;
break;
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 169ab59ed9d..be194b14429 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -55,6 +55,7 @@ struct nfulnl_instance {
unsigned int qlen; /* number of nlmsgs in skb */
struct sk_buff *skb; /* pre-allocatd skb */
struct timer_list timer;
+ struct user_namespace *peer_user_ns; /* User namespace of the peer process */
int peer_pid; /* PID of the peer process */
/* configurable parameters */
@@ -132,7 +133,7 @@ instance_put(struct nfulnl_instance *inst)
static void nfulnl_timer(unsigned long data);
static struct nfulnl_instance *
-instance_create(u_int16_t group_num, int pid)
+instance_create(u_int16_t group_num, int pid, struct user_namespace *user_ns)
{
struct nfulnl_instance *inst;
int err;
@@ -162,6 +163,7 @@ instance_create(u_int16_t group_num, int pid)
setup_timer(&inst->timer, nfulnl_timer, (unsigned long)inst);
+ inst->peer_user_ns = user_ns;
inst->peer_pid = pid;
inst->group_num = group_num;
@@ -480,7 +482,7 @@ __build_packet_message(struct nfulnl_instance *inst,
}
if (indev && skb_mac_header_was_set(skb)) {
- if (nla_put_be32(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
+ if (nla_put_be16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
nla_put_be16(inst->skb, NFULA_HWLEN,
htons(skb->dev->hard_header_len)) ||
nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len,
@@ -503,8 +505,11 @@ __build_packet_message(struct nfulnl_instance *inst,
read_lock_bh(&skb->sk->sk_callback_lock);
if (skb->sk->sk_socket && skb->sk->sk_socket->file) {
struct file *file = skb->sk->sk_socket->file;
- __be32 uid = htonl(file->f_cred->fsuid);
- __be32 gid = htonl(file->f_cred->fsgid);
+ __be32 uid = htonl(from_kuid_munged(inst->peer_user_ns,
+ file->f_cred->fsuid));
+ __be32 gid = htonl(from_kgid_munged(inst->peer_user_ns,
+ file->f_cred->fsgid));
+ /* need to unlock here since NLA_PUT may goto */
read_unlock_bh(&skb->sk->sk_callback_lock);
if (nla_put_be32(inst->skb, NFULA_UID, uid) ||
nla_put_be32(inst->skb, NFULA_GID, gid))
@@ -783,7 +788,8 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
}
inst = instance_create(group_num,
- NETLINK_CB(skb).pid);
+ NETLINK_CB(skb).pid,
+ sk_user_ns(NETLINK_CB(skb).ssk));
if (IS_ERR(inst)) {
ret = PTR_ERR(inst);
goto out;
@@ -996,8 +1002,10 @@ static int __init nfnetlink_log_init(void)
#ifdef CONFIG_PROC_FS
if (!proc_create("nfnetlink_log", 0440,
- proc_net_netfilter, &nful_file_ops))
+ proc_net_netfilter, &nful_file_ops)) {
+ status = -ENOMEM;
goto cleanup_logger;
+ }
#endif
return status;
diff --git a/net/netfilter/xt_LOG.c b/net/netfilter/xt_LOG.c
index ff5f75fddb1..02a2bf49dcb 100644
--- a/net/netfilter/xt_LOG.c
+++ b/net/netfilter/xt_LOG.c
@@ -363,10 +363,12 @@ static void dump_ipv4_packet(struct sbuff *m,
/* Max length: 15 "UID=4294967295 " */
if ((logflags & XT_LOG_UID) && !iphoff && skb->sk) {
read_lock_bh(&skb->sk->sk_callback_lock);
- if (skb->sk->sk_socket && skb->sk->sk_socket->file)
+ if (skb->sk->sk_socket && skb->sk->sk_socket->file) {
+ const struct cred *cred = skb->sk->sk_socket->file->f_cred;
sb_add(m, "UID=%u GID=%u ",
- skb->sk->sk_socket->file->f_cred->fsuid,
- skb->sk->sk_socket->file->f_cred->fsgid);
+ from_kuid_munged(&init_user_ns, cred->fsuid),
+ from_kgid_munged(&init_user_ns, cred->fsgid));
+ }
read_unlock_bh(&skb->sk->sk_callback_lock);
}
@@ -719,10 +721,12 @@ static void dump_ipv6_packet(struct sbuff *m,
/* Max length: 15 "UID=4294967295 " */
if ((logflags & XT_LOG_UID) && recurse && skb->sk) {
read_lock_bh(&skb->sk->sk_callback_lock);
- if (skb->sk->sk_socket && skb->sk->sk_socket->file)
+ if (skb->sk->sk_socket && skb->sk->sk_socket->file) {
+ const struct cred *cred = skb->sk->sk_socket->file->f_cred;
sb_add(m, "UID=%u GID=%u ",
- skb->sk->sk_socket->file->f_cred->fsuid,
- skb->sk->sk_socket->file->f_cred->fsgid);
+ from_kuid_munged(&init_user_ns, cred->fsuid),
+ from_kgid_munged(&init_user_ns, cred->fsgid));
+ }
read_unlock_bh(&skb->sk->sk_callback_lock);
}
diff --git a/net/netfilter/xt_owner.c b/net/netfilter/xt_owner.c
index 772d7389b33..ca2e577ed8a 100644
--- a/net/netfilter/xt_owner.c
+++ b/net/netfilter/xt_owner.c
@@ -17,6 +17,17 @@
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_owner.h>
+static int owner_check(const struct xt_mtchk_param *par)
+{
+ struct xt_owner_match_info *info = par->matchinfo;
+
+ /* For now only allow adding matches from the initial user namespace */
+ if ((info->match & (XT_OWNER_UID|XT_OWNER_GID)) &&
+ (current_user_ns() != &init_user_ns))
+ return -EINVAL;
+ return 0;
+}
+
static bool
owner_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
@@ -37,17 +48,23 @@ owner_mt(const struct sk_buff *skb, struct xt_action_param *par)
return ((info->match ^ info->invert) &
(XT_OWNER_UID | XT_OWNER_GID)) == 0;
- if (info->match & XT_OWNER_UID)
- if ((filp->f_cred->fsuid >= info->uid_min &&
- filp->f_cred->fsuid <= info->uid_max) ^
+ if (info->match & XT_OWNER_UID) {
+ kuid_t uid_min = make_kuid(&init_user_ns, info->uid_min);
+ kuid_t uid_max = make_kuid(&init_user_ns, info->uid_max);
+ if ((uid_gte(filp->f_cred->fsuid, uid_min) &&
+ uid_lte(filp->f_cred->fsuid, uid_max)) ^
!(info->invert & XT_OWNER_UID))
return false;
+ }
- if (info->match & XT_OWNER_GID)
- if ((filp->f_cred->fsgid >= info->gid_min &&
- filp->f_cred->fsgid <= info->gid_max) ^
+ if (info->match & XT_OWNER_GID) {
+ kgid_t gid_min = make_kgid(&init_user_ns, info->gid_min);
+ kgid_t gid_max = make_kgid(&init_user_ns, info->gid_max);
+ if ((gid_gte(filp->f_cred->fsgid, gid_min) &&
+ gid_lte(filp->f_cred->fsgid, gid_max)) ^
!(info->invert & XT_OWNER_GID))
return false;
+ }
return true;
}
@@ -56,6 +73,7 @@ static struct xt_match owner_mt_reg __read_mostly = {
.name = "owner",
.revision = 1,
.family = NFPROTO_UNSPEC,
+ .checkentry = owner_check,
.match = owner_mt,
.matchsize = sizeof(struct xt_owner_match_info),
.hooks = (1 << NF_INET_LOCAL_OUT) |
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index ae2ad1eec8d..4635c9b0045 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -317,6 +317,8 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
struct recent_table *t;
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *pde;
+ kuid_t uid;
+ kgid_t gid;
#endif
unsigned int i;
int ret = -EINVAL;
@@ -372,6 +374,13 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
for (i = 0; i < ip_list_hash_size; i++)
INIT_LIST_HEAD(&t->iphash[i]);
#ifdef CONFIG_PROC_FS
+ uid = make_kuid(&init_user_ns, ip_list_uid);
+ gid = make_kgid(&init_user_ns, ip_list_gid);
+ if (!uid_valid(uid) || !gid_valid(gid)) {
+ kfree(t);
+ ret = -EINVAL;
+ goto out;
+ }
pde = proc_create_data(t->name, ip_list_perms, recent_net->xt_recent,
&recent_mt_fops, t);
if (pde == NULL) {
@@ -379,8 +388,8 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
ret = -ENOMEM;
goto out;
}
- pde->uid = ip_list_uid;
- pde->gid = ip_list_gid;
+ pde->uid = uid;
+ pde->gid = gid;
#endif
spin_lock_bh(&recent_lock);
list_add_tail(&t->list, &recent_net->tables);
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 1445d73533e..38211991716 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -912,7 +912,8 @@ static void netlink_rcv_wake(struct sock *sk)
wake_up_interruptible(&nlk->wait);
}
-static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb)
+static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
+ struct sock *ssk)
{
int ret;
struct netlink_sock *nlk = nlk_sk(sk);
@@ -921,6 +922,7 @@ static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb)
if (nlk->netlink_rcv != NULL) {
ret = skb->len;
skb_set_owner_r(skb, sk);
+ NETLINK_CB(skb).ssk = ssk;
nlk->netlink_rcv(skb);
consume_skb(skb);
} else {
@@ -947,7 +949,7 @@ retry:
return PTR_ERR(sk);
}
if (netlink_is_kernel(sk))
- return netlink_unicast_kernel(sk, skb);
+ return netlink_unicast_kernel(sk, skb, ssk);
if (sk_filter(sk, skb)) {
err = skb->len;
@@ -1373,7 +1375,8 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
dst_pid = addr->nl_pid;
dst_group = ffs(addr->nl_groups);
err = -EPERM;
- if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
+ if ((dst_group || dst_pid) &&
+ !netlink_capable(sock, NL_NONROOT_SEND))
goto out;
} else {
dst_pid = nlk->dst_pid;
@@ -2147,6 +2150,7 @@ static void __init netlink_add_usersock_entry(void)
rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
nl_table[NETLINK_USERSOCK].registered = 1;
+ nl_table[NETLINK_USERSOCK].nl_nonroot = NL_NONROOT_SEND;
netlink_table_ungrab();
}
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index b7f38b16190..c7bf2f26525 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -427,19 +427,11 @@ void ovs_flow_deferred_free(struct sw_flow *flow)
call_rcu(&flow->rcu, rcu_free_flow_callback);
}
-/* RCU callback used by ovs_flow_deferred_free_acts. */
-static void rcu_free_acts_callback(struct rcu_head *rcu)
-{
- struct sw_flow_actions *sf_acts = container_of(rcu,
- struct sw_flow_actions, rcu);
- kfree(sf_acts);
-}
-
/* Schedules 'sf_acts' to be freed after the next RCU grace period.
* The caller must hold rcu_read_lock for this to be sensible. */
void ovs_flow_deferred_free_acts(struct sw_flow_actions *sf_acts)
{
- call_rcu(&sf_acts->rcu, rcu_free_acts_callback);
+ kfree_rcu(sf_acts, rcu);
}
static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index f220c5bdb71..94060edbbd7 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1162,7 +1162,7 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
spin_unlock(&f->lock);
}
-bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
+static bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
{
if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout)
return true;
@@ -3749,7 +3749,7 @@ static int packet_seq_show(struct seq_file *seq, void *v)
po->ifindex,
po->running,
atomic_read(&s->sk_rmem_alloc),
- sock_i_uid(s),
+ from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
sock_i_ino(s));
}
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 0acc943f713..b7e98278225 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -612,7 +612,8 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
sk->sk_protocol, pn->sobject, pn->dobject,
pn->resource, sk->sk_state,
sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
- sock_i_uid(sk), sock_i_ino(sk),
+ from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
+ sock_i_ino(sk),
atomic_read(&sk->sk_refcnt), sk,
atomic_read(&sk->sk_drops), &len);
}
@@ -796,7 +797,8 @@ static int pn_res_seq_show(struct seq_file *seq, void *v)
struct sock *sk = *psk;
seq_printf(seq, "%02X %5d %lu%n",
- (int) (psk - pnres.sk), sock_i_uid(sk),
+ (int) (psk - pnres.sk),
+ from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
sock_i_ino(sk), &len);
}
seq_printf(seq, "%*s\n", 63 - len, "");
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 752b72360eb..c275bad1206 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -150,6 +150,20 @@ static void rfkill_led_trigger_activate(struct led_classdev *led)
rfkill_led_trigger_event(rfkill);
}
+const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
+{
+ return rfkill->led_trigger.name;
+}
+EXPORT_SYMBOL(rfkill_get_led_trigger_name);
+
+void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
+{
+ BUG_ON(!rfkill);
+
+ rfkill->ledtrigname = name;
+}
+EXPORT_SYMBOL(rfkill_set_led_trigger_name);
+
static int rfkill_led_trigger_register(struct rfkill *rfkill)
{
rfkill->led_trigger.name = rfkill->ledtrigname
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 6dd1131f2ec..dc3ef5aef35 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -319,7 +319,7 @@ replay:
}
}
- err = tp->ops->change(tp, cl, t->tcm_handle, tca, &fh);
+ err = tp->ops->change(skb, tp, cl, t->tcm_handle, tca, &fh);
if (err == 0) {
if (tp_created) {
spin_lock_bh(root_lock);
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index 590960a22a7..344a11b342e 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -162,7 +162,8 @@ errout:
return err;
}
-static int basic_change(struct tcf_proto *tp, unsigned long base, u32 handle,
+static int basic_change(struct sk_buff *in_skb,
+ struct tcf_proto *tp, unsigned long base, u32 handle,
struct nlattr **tca, unsigned long *arg)
{
int err;
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 7743ea8d1d3..91de66695b4 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -151,7 +151,8 @@ static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
[TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED },
};
-static int cls_cgroup_change(struct tcf_proto *tp, unsigned long base,
+static int cls_cgroup_change(struct sk_buff *in_skb,
+ struct tcf_proto *tp, unsigned long base,
u32 handle, struct nlattr **tca,
unsigned long *arg)
{
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index ccd08c8dc6a..ce82d0cb1b4 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -193,15 +193,19 @@ static u32 flow_get_rtclassid(const struct sk_buff *skb)
static u32 flow_get_skuid(const struct sk_buff *skb)
{
- if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file)
- return skb->sk->sk_socket->file->f_cred->fsuid;
+ if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file) {
+ kuid_t skuid = skb->sk->sk_socket->file->f_cred->fsuid;
+ return from_kuid(&init_user_ns, skuid);
+ }
return 0;
}
static u32 flow_get_skgid(const struct sk_buff *skb)
{
- if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file)
- return skb->sk->sk_socket->file->f_cred->fsgid;
+ if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file) {
+ kgid_t skgid = skb->sk->sk_socket->file->f_cred->fsgid;
+ return from_kgid(&init_user_ns, skgid);
+ }
return 0;
}
@@ -347,7 +351,8 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
[TCA_FLOW_PERTURB] = { .type = NLA_U32 },
};
-static int flow_change(struct tcf_proto *tp, unsigned long base,
+static int flow_change(struct sk_buff *in_skb,
+ struct tcf_proto *tp, unsigned long base,
u32 handle, struct nlattr **tca,
unsigned long *arg)
{
@@ -386,6 +391,10 @@ static int flow_change(struct tcf_proto *tp, unsigned long base,
if (fls(keymask) - 1 > FLOW_KEY_MAX)
return -EOPNOTSUPP;
+
+ if ((keymask & (FLOW_KEY_SKUID|FLOW_KEY_SKGID)) &&
+ sk_user_ns(NETLINK_CB(in_skb).ssk) != &init_user_ns)
+ return -EOPNOTSUPP;
}
err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &flow_ext_map);
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index 8384a479724..4075a0aef2a 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -233,7 +233,8 @@ errout:
return err;
}
-static int fw_change(struct tcf_proto *tp, unsigned long base,
+static int fw_change(struct sk_buff *in_skb,
+ struct tcf_proto *tp, unsigned long base,
u32 handle,
struct nlattr **tca,
unsigned long *arg)
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 44f405cb9aa..c10d57bf98f 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -427,7 +427,8 @@ errout:
return err;
}
-static int route4_change(struct tcf_proto *tp, unsigned long base,
+static int route4_change(struct sk_buff *in_skb,
+ struct tcf_proto *tp, unsigned long base,
u32 handle,
struct nlattr **tca,
unsigned long *arg)
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index 18ab93ec8d7..494bbb90924 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -416,7 +416,8 @@ static const struct nla_policy rsvp_policy[TCA_RSVP_MAX + 1] = {
[TCA_RSVP_PINFO] = { .len = sizeof(struct tc_rsvp_pinfo) },
};
-static int rsvp_change(struct tcf_proto *tp, unsigned long base,
+static int rsvp_change(struct sk_buff *in_skb,
+ struct tcf_proto *tp, unsigned long base,
u32 handle,
struct nlattr **tca,
unsigned long *arg)
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index fe29420d0b0..a1293b4ab7a 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -332,7 +332,8 @@ errout:
}
static int
-tcindex_change(struct tcf_proto *tp, unsigned long base, u32 handle,
+tcindex_change(struct sk_buff *in_skb,
+ struct tcf_proto *tp, unsigned long base, u32 handle,
struct nlattr **tca, unsigned long *arg)
{
struct nlattr *opt = tca[TCA_OPTIONS];
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index d45373fb00b..c7c27bc91b5 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -544,7 +544,8 @@ errout:
return err;
}
-static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
+static int u32_change(struct sk_buff *in_skb,
+ struct tcf_proto *tp, unsigned long base, u32 handle,
struct nlattr **tca,
unsigned long *arg)
{
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index d9cb2ab149f..c3bea269faf 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -220,7 +220,8 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
sctp_sk(sk)->type, sk->sk_state, hash,
epb->bind_addr.port,
- sock_i_uid(sk), sock_i_ino(sk));
+ from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
+ sock_i_ino(sk));
sctp_seq_dump_local_addrs(seq, epb);
seq_printf(seq, "\n");
@@ -332,7 +333,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
assoc->assoc_id,
assoc->sndbuf_used,
atomic_read(&assoc->rmem_alloc),
- sock_i_uid(sk), sock_i_ino(sk),
+ from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
+ sock_i_ino(sk),
epb->bind_addr.port,
assoc->peer.port);
seq_printf(seq, " ");
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index c5ee4ff6136..8a84ab64caf 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2060,10 +2060,14 @@ static int unix_shutdown(struct socket *sock, int mode)
struct sock *sk = sock->sk;
struct sock *other;
- mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
-
- if (!mode)
- return 0;
+ if (mode < SHUT_RD || mode > SHUT_RDWR)
+ return -EINVAL;
+ /* This maps:
+ * SHUT_RD (0) -> RCV_SHUTDOWN (1)
+ * SHUT_WR (1) -> SEND_SHUTDOWN (2)
+ * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
+ */
+ ++mode;
unix_state_lock(sk);
sk->sk_shutdown |= mode;
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index d355f67d0cd..2f876b9ee34 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -105,7 +105,7 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
ASSERT_WDEV_LOCK(wdev);
- if (!netif_running(wdev->netdev))
+ if (wdev->netdev && !netif_running(wdev->netdev))
return;
switch (wdev->iftype) {
@@ -143,6 +143,11 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
case NL80211_IFTYPE_WDS:
/* these interface types don't really have a channel */
return;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ if (wdev->wiphy->features &
+ NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL)
+ *chanmode = CHAN_MODE_EXCLUSIVE;
+ return;
case NL80211_IFTYPE_UNSPECIFIED:
case NUM_NL80211_IFTYPES:
WARN_ON(1);
diff --git a/net/wireless/core.c b/net/wireless/core.c
index dcd64d5b07a..443d4d7deea 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -230,9 +230,24 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked)
rtnl_lock();
mutex_lock(&rdev->devlist_mtx);
- list_for_each_entry(wdev, &rdev->wdev_list, list)
- if (wdev->netdev)
+ list_for_each_entry(wdev, &rdev->wdev_list, list) {
+ if (wdev->netdev) {
dev_close(wdev->netdev);
+ continue;
+ }
+ /* otherwise, check iftype */
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_P2P_DEVICE:
+ if (!wdev->p2p_started)
+ break;
+ rdev->ops->stop_p2p_device(&rdev->wiphy, wdev);
+ wdev->p2p_started = false;
+ rdev->opencount--;
+ break;
+ default:
+ break;
+ }
+ }
mutex_unlock(&rdev->devlist_mtx);
rtnl_unlock();
@@ -407,6 +422,11 @@ static int wiphy_verify_combinations(struct wiphy *wiphy)
if (WARN_ON(wiphy->software_iftypes & types))
return -EINVAL;
+ /* Only a single P2P_DEVICE can be allowed */
+ if (WARN_ON(types & BIT(NL80211_IFTYPE_P2P_DEVICE) &&
+ c->limits[j].max > 1))
+ return -EINVAL;
+
cnt += c->limits[j].max;
/*
* Don't advertise an unsupported type
@@ -734,6 +754,35 @@ static void wdev_cleanup_work(struct work_struct *work)
dev_put(wdev->netdev);
}
+void cfg80211_unregister_wdev(struct wireless_dev *wdev)
+{
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+
+ ASSERT_RTNL();
+
+ if (WARN_ON(wdev->netdev))
+ return;
+
+ mutex_lock(&rdev->devlist_mtx);
+ list_del_rcu(&wdev->list);
+ rdev->devlist_generation++;
+
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_P2P_DEVICE:
+ if (!wdev->p2p_started)
+ break;
+ rdev->ops->stop_p2p_device(&rdev->wiphy, wdev);
+ wdev->p2p_started = false;
+ rdev->opencount--;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+ mutex_unlock(&rdev->devlist_mtx);
+}
+EXPORT_SYMBOL(cfg80211_unregister_wdev);
+
static struct device_type wiphy_type = {
.name = "wlan",
};
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 1cdb1d5e6b0..8fd0242ee16 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -736,7 +736,6 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
const u8 *buf, size_t len, bool no_cck,
bool dont_wait_for_ack, u64 *cookie)
{
- struct net_device *dev = wdev->netdev;
const struct ieee80211_mgmt *mgmt;
u16 stype;
@@ -796,7 +795,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
case NL80211_IFTYPE_AP_VLAN:
- if (!ether_addr_equal(mgmt->bssid, dev->dev_addr))
+ if (!ether_addr_equal(mgmt->bssid, wdev_address(wdev)))
err = -EINVAL;
break;
case NL80211_IFTYPE_MESH_POINT:
@@ -809,6 +808,11 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
* cfg80211 doesn't track the stations
*/
break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ /*
+ * fall through, P2P device only supports
+ * public action frames
+ */
default:
err = -EOPNOTSUPP;
break;
@@ -819,7 +823,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
return err;
}
- if (!ether_addr_equal(mgmt->sa, dev->dev_addr))
+ if (!ether_addr_equal(mgmt->sa, wdev_address(wdev)))
return -EINVAL;
/* Transmit the Action frame as requested by user space */
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 97026f3b215..787aeaa902f 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1100,6 +1100,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
if (nla_put_u32(msg, i, NL80211_CMD_REGISTER_BEACONS))
goto nla_put_failure;
}
+ CMD(start_p2p_device, START_P2P_DEVICE);
#ifdef CONFIG_NL80211_TESTMODE
CMD(testmode_cmd, TESTMODE);
@@ -1748,13 +1749,13 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags,
if (dev &&
(nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
- nla_put_string(msg, NL80211_ATTR_IFNAME, dev->name) ||
- nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, dev->dev_addr)))
+ nla_put_string(msg, NL80211_ATTR_IFNAME, dev->name)))
goto nla_put_failure;
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
nla_put_u32(msg, NL80211_ATTR_IFTYPE, wdev->iftype) ||
nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) ||
+ nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, wdev_address(wdev)) ||
nla_put_u32(msg, NL80211_ATTR_GENERATION,
rdev->devlist_generation ^
(cfg80211_rdev_list_generation << 2)))
@@ -2021,8 +2022,10 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
return PTR_ERR(wdev);
}
- if (type == NL80211_IFTYPE_MESH_POINT &&
- info->attrs[NL80211_ATTR_MESH_ID]) {
+ switch (type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ if (!info->attrs[NL80211_ATTR_MESH_ID])
+ break;
wdev_lock(wdev);
BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN !=
IEEE80211_MAX_MESH_ID_LEN);
@@ -2031,6 +2034,26 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
memcpy(wdev->ssid, nla_data(info->attrs[NL80211_ATTR_MESH_ID]),
wdev->mesh_id_up_len);
wdev_unlock(wdev);
+ break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ /*
+ * P2P Device doesn't have a netdev, so doesn't go
+ * through the netdev notifier and must be added here
+ */
+ mutex_init(&wdev->mtx);
+ INIT_LIST_HEAD(&wdev->event_list);
+ spin_lock_init(&wdev->event_lock);
+ INIT_LIST_HEAD(&wdev->mgmt_registrations);
+ spin_lock_init(&wdev->mgmt_registrations_lock);
+
+ mutex_lock(&rdev->devlist_mtx);
+ wdev->identifier = ++rdev->wdev_id;
+ list_add_rcu(&wdev->list, &rdev->wdev_list);
+ rdev->devlist_generation++;
+ mutex_unlock(&rdev->devlist_mtx);
+ break;
+ default:
+ break;
}
if (nl80211_send_iface(msg, info->snd_pid, info->snd_seq, 0,
@@ -6053,6 +6076,7 @@ static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info)
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_P2P_GO:
+ case NL80211_IFTYPE_P2P_DEVICE:
break;
default:
return -EOPNOTSUPP;
@@ -6099,6 +6123,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_P2P_GO:
+ case NL80211_IFTYPE_P2P_DEVICE:
break;
default:
return -EOPNOTSUPP;
@@ -6195,6 +6220,7 @@ static int nl80211_tx_mgmt_cancel_wait(struct sk_buff *skb, struct genl_info *in
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_P2P_GO:
+ case NL80211_IFTYPE_P2P_DEVICE:
break;
default:
return -EOPNOTSUPP;
@@ -6810,6 +6836,68 @@ static int nl80211_register_beacons(struct sk_buff *skb, struct genl_info *info)
return 0;
}
+static int nl80211_start_p2p_device(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct wireless_dev *wdev = info->user_ptr[1];
+ int err;
+
+ if (!rdev->ops->start_p2p_device)
+ return -EOPNOTSUPP;
+
+ if (wdev->iftype != NL80211_IFTYPE_P2P_DEVICE)
+ return -EOPNOTSUPP;
+
+ if (wdev->p2p_started)
+ return 0;
+
+ mutex_lock(&rdev->devlist_mtx);
+ err = cfg80211_can_add_interface(rdev, wdev->iftype);
+ mutex_unlock(&rdev->devlist_mtx);
+ if (err)
+ return err;
+
+ err = rdev->ops->start_p2p_device(&rdev->wiphy, wdev);
+ if (err)
+ return err;
+
+ wdev->p2p_started = true;
+ mutex_lock(&rdev->devlist_mtx);
+ rdev->opencount++;
+ mutex_unlock(&rdev->devlist_mtx);
+
+ return 0;
+}
+
+static int nl80211_stop_p2p_device(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct wireless_dev *wdev = info->user_ptr[1];
+
+ if (wdev->iftype != NL80211_IFTYPE_P2P_DEVICE)
+ return -EOPNOTSUPP;
+
+ if (!rdev->ops->stop_p2p_device)
+ return -EOPNOTSUPP;
+
+ if (!wdev->p2p_started)
+ return 0;
+
+ rdev->ops->stop_p2p_device(&rdev->wiphy, wdev);
+ wdev->p2p_started = false;
+
+ mutex_lock(&rdev->devlist_mtx);
+ rdev->opencount--;
+ mutex_unlock(&rdev->devlist_mtx);
+
+ if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) {
+ rdev->scan_req->aborted = true;
+ ___cfg80211_scan_done(rdev, true);
+ }
+
+ return 0;
+}
+
#define NL80211_FLAG_NEED_WIPHY 0x01
#define NL80211_FLAG_NEED_NETDEV 0x02
#define NL80211_FLAG_NEED_RTNL 0x04
@@ -6817,7 +6905,7 @@ static int nl80211_register_beacons(struct sk_buff *skb, struct genl_info *info)
#define NL80211_FLAG_NEED_NETDEV_UP (NL80211_FLAG_NEED_NETDEV |\
NL80211_FLAG_CHECK_NETDEV_UP)
#define NL80211_FLAG_NEED_WDEV 0x10
-/* If a netdev is associated, it must be UP */
+/* If a netdev is associated, it must be UP, P2P must be started */
#define NL80211_FLAG_NEED_WDEV_UP (NL80211_FLAG_NEED_WDEV |\
NL80211_FLAG_CHECK_NETDEV_UP)
@@ -6878,6 +6966,13 @@ static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb,
}
dev_hold(dev);
+ } else if (ops->internal_flags & NL80211_FLAG_CHECK_NETDEV_UP) {
+ if (!wdev->p2p_started) {
+ mutex_unlock(&cfg80211_mutex);
+ if (rtnl)
+ rtnl_unlock();
+ return -ENETDOWN;
+ }
}
cfg80211_lock_rdev(rdev);
@@ -7439,7 +7534,22 @@ static struct genl_ops nl80211_ops[] = {
.internal_flags = NL80211_FLAG_NEED_NETDEV |
NL80211_FLAG_NEED_RTNL,
},
-
+ {
+ .cmd = NL80211_CMD_START_P2P_DEVICE,
+ .doit = nl80211_start_p2p_device,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_WDEV |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL80211_CMD_STOP_P2P_DEVICE,
+ .doit = nl80211_stop_p2p_device,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
};
static struct genl_multicast_group nl80211_mlme_mcgrp = {
diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
index c4ad7958af5..7d604c06c3d 100644
--- a/net/wireless/radiotap.c
+++ b/net/wireless/radiotap.c
@@ -41,6 +41,8 @@ static const struct radiotap_align_size rtap_namespace_sizes[] = {
[IEEE80211_RADIOTAP_TX_FLAGS] = { .align = 2, .size = 2, },
[IEEE80211_RADIOTAP_RTS_RETRIES] = { .align = 1, .size = 1, },
[IEEE80211_RADIOTAP_DATA_RETRIES] = { .align = 1, .size = 1, },
+ [IEEE80211_RADIOTAP_MCS] = { .align = 1, .size = 3, },
+ [IEEE80211_RADIOTAP_AMPDU_STATUS] = { .align = 4, .size = 8, },
/*
* add more here as they are defined in radiotap.h
*/
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 994e2f0cc7a..ef35f4ef2aa 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -684,22 +684,10 @@ EXPORT_SYMBOL(cfg80211_classify8021d);
const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 ie)
{
- u8 *end, *pos;
-
- pos = bss->information_elements;
- if (pos == NULL)
+ if (bss->information_elements == NULL)
return NULL;
- end = pos + bss->len_information_elements;
-
- while (pos + 1 < end) {
- if (pos + 2 + pos[1] > end)
- break;
- if (pos[0] == ie)
- return pos;
- pos += 2 + pos[1];
- }
-
- return NULL;
+ return cfg80211_find_ie(ie, bss->information_elements,
+ bss->len_information_elements);
}
EXPORT_SYMBOL(ieee80211_bss_get_ie);
@@ -812,6 +800,10 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
if (otype == NL80211_IFTYPE_AP_VLAN)
return -EOPNOTSUPP;
+ /* cannot change into P2P device type */
+ if (ntype == NL80211_IFTYPE_P2P_DEVICE)
+ return -EOPNOTSUPP;
+
if (!rdev->ops->change_virtual_intf ||
!(rdev->wiphy.interface_modes & (1 << ntype)))
return -EOPNOTSUPP;
@@ -889,6 +881,9 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
case NUM_NL80211_IFTYPES:
/* not happening */
break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ WARN_ON(1);
+ break;
}
}
@@ -1053,8 +1048,15 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
list_for_each_entry(wdev_iter, &rdev->wdev_list, list) {
if (wdev_iter == wdev)
continue;
- if (!netif_running(wdev_iter->netdev))
- continue;
+ if (wdev_iter->netdev) {
+ if (!netif_running(wdev_iter->netdev))
+ continue;
+ } else if (wdev_iter->iftype == NL80211_IFTYPE_P2P_DEVICE) {
+ if (!wdev_iter->p2p_started)
+ continue;
+ } else {
+ WARN_ON(1);
+ }
if (rdev->wiphy.software_iftypes & BIT(wdev_iter->iftype))
continue;