aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/Makefile2
-rw-r--r--net/bluetooth/hci_conn.c2
-rw-r--r--net/bluetooth/hci_core.c60
-rw-r--r--net/bluetooth/hci_event.c20
-rw-r--r--net/bluetooth/l2cap_core.c5
-rw-r--r--net/bluetooth/mgmt.c99
-rw-r--r--net/bluetooth/smp.c5
-rw-r--r--net/ceph/auth_x.c76
-rw-r--r--net/ceph/auth_x.h1
-rw-r--r--net/ceph/buffer.c4
-rw-r--r--net/ceph/ceph_common.c21
-rw-r--r--net/ceph/messenger.c34
-rw-r--r--net/ceph/osd_client.c118
-rw-r--r--net/core/net_namespace.c39
-rw-r--r--net/core/rtnetlink.c5
-rw-r--r--net/ipv4/geneve.c30
-rw-r--r--net/ipv4/ip_gre.c9
-rw-r--r--net/ipv4/ip_tunnel.c9
-rw-r--r--net/mac80211/chan.c4
-rw-r--r--net/mac80211/key.c2
-rw-r--r--net/mac80211/mlme.c1
-rw-r--r--net/mac80211/rx.c11
-rw-r--r--net/netlink/af_netlink.c54
-rw-r--r--net/nonet.c26
-rw-r--r--net/rds/message.c3
-rw-r--r--net/socket.c20
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c2
-rw-r--r--net/sunrpc/cache.c26
-rw-r--r--net/sunrpc/svc.c42
-rw-r--r--net/sunrpc/svc_xprt.c292
-rw-r--r--net/sunrpc/svcsock.c5
-rw-r--r--net/sunrpc/xdr.c9
-rw-r--r--net/wireless/chan.c9
-rw-r--r--net/wireless/nl80211.c2
-rw-r--r--net/wireless/reg.c20
35 files changed, 649 insertions, 418 deletions
diff --git a/net/Makefile b/net/Makefile
index 95fc694e4ddc..38704bdf941a 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -5,8 +5,6 @@
# Rewritten to use lists instead of if-statements.
#
-obj-y := nonet.o
-
obj-$(CONFIG_NET) := socket.o core/
tmp-$(CONFIG_COMPAT) := compat.o
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 79d84b88b8f0..fe18825cc8a4 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -661,7 +661,7 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
memset(&cp, 0, sizeof(cp));
/* Update random address, but set require_privacy to false so
- * that we never connect with an unresolvable address.
+ * that we never connect with an non-resolvable address.
*/
if (hci_update_random_address(req, false, &own_addr_type))
return;
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 93f92a085506..5dcacf9607e4 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1373,8 +1373,6 @@ static void hci_init1_req(struct hci_request *req, unsigned long opt)
static void bredr_setup(struct hci_request *req)
{
- struct hci_dev *hdev = req->hdev;
-
__le16 param;
__u8 flt_type;
@@ -1403,14 +1401,6 @@ static void bredr_setup(struct hci_request *req)
/* Connection accept timeout ~20 secs */
param = cpu_to_le16(0x7d00);
hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
-
- /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
- * but it does not support page scan related HCI commands.
- */
- if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
- hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
- hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
- }
}
static void le_setup(struct hci_request *req)
@@ -1718,6 +1708,16 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
if (hdev->commands[5] & 0x10)
hci_setup_link_policy(req);
+ if (hdev->commands[8] & 0x01)
+ hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
+
+ /* Some older Broadcom based Bluetooth 1.2 controllers do not
+ * support the Read Page Scan Type command. Check support for
+ * this command in the bit mask of supported commands.
+ */
+ if (hdev->commands[13] & 0x01)
+ hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
+
if (lmp_le_capable(hdev)) {
u8 events[8];
@@ -2634,6 +2634,12 @@ static int hci_dev_do_close(struct hci_dev *hdev)
drain_workqueue(hdev->workqueue);
hci_dev_lock(hdev);
+
+ if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
+ if (hdev->dev_type == HCI_BREDR)
+ mgmt_powered(hdev, 0);
+ }
+
hci_inquiry_cache_flush(hdev);
hci_pend_le_actions_clear(hdev);
hci_conn_hash_flush(hdev);
@@ -2681,14 +2687,6 @@ static int hci_dev_do_close(struct hci_dev *hdev)
hdev->flags &= BIT(HCI_RAW);
hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
- if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
- if (hdev->dev_type == HCI_BREDR) {
- hci_dev_lock(hdev);
- mgmt_powered(hdev, 0);
- hci_dev_unlock(hdev);
- }
- }
-
/* Controller radio is available but is currently powered down */
hdev->amp_status = AMP_STATUS_POWERED_DOWN;
@@ -3083,7 +3081,9 @@ static void hci_power_on(struct work_struct *work)
err = hci_dev_do_open(hdev);
if (err < 0) {
+ hci_dev_lock(hdev);
mgmt_set_powered_failed(hdev, err);
+ hci_dev_unlock(hdev);
return;
}
@@ -3959,17 +3959,29 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
}
/* In case of required privacy without resolvable private address,
- * use an unresolvable private address. This is useful for active
+ * use an non-resolvable private address. This is useful for active
* scanning and non-connectable advertising.
*/
if (require_privacy) {
- bdaddr_t urpa;
+ bdaddr_t nrpa;
+
+ while (true) {
+ /* The non-resolvable private address is generated
+ * from random six bytes with the two most significant
+ * bits cleared.
+ */
+ get_random_bytes(&nrpa, 6);
+ nrpa.b[5] &= 0x3f;
- get_random_bytes(&urpa, 6);
- urpa.b[5] &= 0x3f; /* Clear two most significant bits */
+ /* The non-resolvable private address shall not be
+ * equal to the public address.
+ */
+ if (bacmp(&hdev->bdaddr, &nrpa))
+ break;
+ }
*own_addr_type = ADDR_LE_DEV_RANDOM;
- set_random_addr(req, &urpa);
+ set_random_addr(req, &nrpa);
return 0;
}
@@ -5625,7 +5637,7 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
u8 filter_policy;
/* Set require_privacy to false since no SCAN_REQ are send
- * during passive scanning. Not using an unresolvable address
+ * during passive scanning. Not using an non-resolvable address
* here is important so that peer devices using direct
* advertising with our address will be correctly reported
* by the controller.
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 322abbbbcef9..39a5c8a01726 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -257,6 +257,8 @@ static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
if (!sent)
return;
+ hci_dev_lock(hdev);
+
if (!status) {
__u8 param = *((__u8 *) sent);
@@ -268,6 +270,8 @@ static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
if (test_bit(HCI_MGMT, &hdev->dev_flags))
mgmt_auth_enable_complete(hdev, status);
+
+ hci_dev_unlock(hdev);
}
static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
@@ -443,6 +447,8 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
if (!sent)
return;
+ hci_dev_lock(hdev);
+
if (!status) {
if (sent->mode)
hdev->features[1][0] |= LMP_HOST_SSP;
@@ -458,6 +464,8 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
else
clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
}
+
+ hci_dev_unlock(hdev);
}
static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
@@ -471,6 +479,8 @@ static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
if (!sent)
return;
+ hci_dev_lock(hdev);
+
if (!status) {
if (sent->support)
hdev->features[1][0] |= LMP_HOST_SC;
@@ -486,6 +496,8 @@ static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
else
clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
}
+
+ hci_dev_unlock(hdev);
}
static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1135,6 +1147,8 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
if (!cp)
return;
+ hci_dev_lock(hdev);
+
switch (cp->enable) {
case LE_SCAN_ENABLE:
set_bit(HCI_LE_SCAN, &hdev->dev_flags);
@@ -1184,6 +1198,8 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
break;
}
+
+ hci_dev_unlock(hdev);
}
static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
@@ -1278,6 +1294,8 @@ static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
if (!sent)
return;
+ hci_dev_lock(hdev);
+
if (sent->le) {
hdev->features[1][0] |= LMP_HOST_LE;
set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
@@ -1291,6 +1309,8 @@ static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
hdev->features[1][0] |= LMP_HOST_LE_BREDR;
else
hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
+
+ hci_dev_unlock(hdev);
}
static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index a2b6dfa38a0c..d04dc0095736 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -6966,8 +6966,9 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
test_bit(HCI_HS_ENABLED, &hcon->hdev->dev_flags))
conn->local_fixed_chan |= L2CAP_FC_A2MP;
- if (bredr_sc_enabled(hcon->hdev) &&
- test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags))
+ if (test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags) &&
+ (bredr_sc_enabled(hcon->hdev) ||
+ test_bit(HCI_FORCE_LESC, &hcon->hdev->dbg_flags)))
conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
mutex_init(&conn->ident_lock);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 7384f1161336..693ce8bcd06e 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -2199,12 +2199,14 @@ static void le_enable_complete(struct hci_dev *hdev, u8 status)
{
struct cmd_lookup match = { NULL, hdev };
+ hci_dev_lock(hdev);
+
if (status) {
u8 mgmt_err = mgmt_status(status);
mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
&mgmt_err);
- return;
+ goto unlock;
}
mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
@@ -2222,17 +2224,16 @@ static void le_enable_complete(struct hci_dev *hdev, u8 status)
if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
struct hci_request req;
- hci_dev_lock(hdev);
-
hci_req_init(&req, hdev);
update_adv_data(&req);
update_scan_rsp_data(&req);
hci_req_run(&req, NULL);
hci_update_background_scan(hdev);
-
- hci_dev_unlock(hdev);
}
+
+unlock:
+ hci_dev_unlock(hdev);
}
static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
@@ -3114,14 +3115,13 @@ static void pairing_complete(struct pending_cmd *cmd, u8 status)
conn->disconn_cfm_cb = NULL;
hci_conn_drop(conn);
- hci_conn_put(conn);
-
- mgmt_pending_remove(cmd);
/* The device is paired so there is no need to remove
* its connection parameters anymore.
*/
clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
+
+ hci_conn_put(conn);
}
void mgmt_smp_complete(struct hci_conn *conn, bool complete)
@@ -3130,8 +3130,10 @@ void mgmt_smp_complete(struct hci_conn *conn, bool complete)
struct pending_cmd *cmd;
cmd = find_pairing(conn);
- if (cmd)
+ if (cmd) {
cmd->cmd_complete(cmd, status);
+ mgmt_pending_remove(cmd);
+ }
}
static void pairing_complete_cb(struct hci_conn *conn, u8 status)
@@ -3141,10 +3143,13 @@ static void pairing_complete_cb(struct hci_conn *conn, u8 status)
BT_DBG("status %u", status);
cmd = find_pairing(conn);
- if (!cmd)
+ if (!cmd) {
BT_DBG("Unable to find a pending command");
- else
- cmd->cmd_complete(cmd, mgmt_status(status));
+ return;
+ }
+
+ cmd->cmd_complete(cmd, mgmt_status(status));
+ mgmt_pending_remove(cmd);
}
static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
@@ -3157,10 +3162,13 @@ static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
return;
cmd = find_pairing(conn);
- if (!cmd)
+ if (!cmd) {
BT_DBG("Unable to find a pending command");
- else
- cmd->cmd_complete(cmd, mgmt_status(status));
+ return;
+ }
+
+ cmd->cmd_complete(cmd, mgmt_status(status));
+ mgmt_pending_remove(cmd);
}
static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
@@ -3274,8 +3282,10 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
cmd->user_data = hci_conn_get(conn);
if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
- hci_conn_security(conn, sec_level, auth_type, true))
- pairing_complete(cmd, 0);
+ hci_conn_security(conn, sec_level, auth_type, true)) {
+ cmd->cmd_complete(cmd, 0);
+ mgmt_pending_remove(cmd);
+ }
err = 0;
@@ -3317,7 +3327,8 @@ static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
- pairing_complete(cmd, MGMT_STATUS_CANCELLED);
+ cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
+ mgmt_pending_remove(cmd);
err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
addr, sizeof(*addr));
@@ -3791,7 +3802,7 @@ static bool trigger_discovery(struct hci_request *req, u8 *status)
/* All active scans will be done with either a resolvable
* private address (when privacy feature has been enabled)
- * or unresolvable private address.
+ * or non-resolvable private address.
*/
err = hci_update_random_address(req, true, &own_addr_type);
if (err < 0) {
@@ -4279,12 +4290,14 @@ static void set_advertising_complete(struct hci_dev *hdev, u8 status)
{
struct cmd_lookup match = { NULL, hdev };
+ hci_dev_lock(hdev);
+
if (status) {
u8 mgmt_err = mgmt_status(status);
mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
cmd_status_rsp, &mgmt_err);
- return;
+ goto unlock;
}
if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
@@ -4299,6 +4312,9 @@ static void set_advertising_complete(struct hci_dev *hdev, u8 status)
if (match.sk)
sock_put(match.sk);
+
+unlock:
+ hci_dev_unlock(hdev);
}
static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
@@ -6081,6 +6097,11 @@ static int powered_update_hci(struct hci_dev *hdev)
hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
}
+ if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
+ u8 sc = 0x01;
+ hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, sizeof(sc), &sc);
+ }
+
if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
lmp_bredr_capable(hdev)) {
struct hci_cp_write_le_host_supported cp;
@@ -6130,8 +6151,7 @@ static int powered_update_hci(struct hci_dev *hdev)
int mgmt_powered(struct hci_dev *hdev, u8 powered)
{
struct cmd_lookup match = { NULL, hdev };
- u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
- u8 zero_cod[] = { 0, 0, 0 };
+ u8 status, zero_cod[] = { 0, 0, 0 };
int err;
if (!test_bit(HCI_MGMT, &hdev->dev_flags))
@@ -6147,7 +6167,20 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered)
}
mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
- mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status_not_powered);
+
+ /* If the power off is because of hdev unregistration let
+ * use the appropriate INVALID_INDEX status. Otherwise use
+ * NOT_POWERED. We cover both scenarios here since later in
+ * mgmt_index_removed() any hci_conn callbacks will have already
+ * been triggered, potentially causing misleading DISCONNECTED
+ * status responses.
+ */
+ if (test_bit(HCI_UNREGISTER, &hdev->dev_flags))
+ status = MGMT_STATUS_INVALID_INDEX;
+ else
+ status = MGMT_STATUS_NOT_POWERED;
+
+ mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
@@ -6681,8 +6714,10 @@ void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
cmd ? cmd->sk : NULL);
- if (cmd)
- pairing_complete(cmd, status);
+ if (cmd) {
+ cmd->cmd_complete(cmd, status);
+ mgmt_pending_remove(cmd);
+ }
}
void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
@@ -7046,13 +7081,15 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
* kept and checking possible scan response data
* will be skipped.
*/
- if (hdev->discovery.uuid_count > 0) {
+ if (hdev->discovery.uuid_count > 0)
match = eir_has_uuids(eir, eir_len,
hdev->discovery.uuid_count,
hdev->discovery.uuids);
- if (!match)
- return;
- }
+ else
+ match = true;
+
+ if (!match && !scan_rsp_len)
+ return;
/* Copy EIR or advertising data into event */
memcpy(ev->eir, eir, eir_len);
@@ -7061,8 +7098,10 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
* provided, results with empty EIR or advertising data
* should be dropped since they do not match any UUID.
*/
- if (hdev->discovery.uuid_count > 0)
+ if (hdev->discovery.uuid_count > 0 && !scan_rsp_len)
return;
+
+ match = false;
}
if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 6a46252fe66f..b67749bb55bf 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -1673,7 +1673,8 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
/* SMP over BR/EDR requires special treatment */
if (conn->hcon->type == ACL_LINK) {
/* We must have a BR/EDR SC link */
- if (!test_bit(HCI_CONN_AES_CCM, &conn->hcon->flags))
+ if (!test_bit(HCI_CONN_AES_CCM, &conn->hcon->flags) &&
+ !test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
return SMP_CROSS_TRANSP_NOT_ALLOWED;
set_bit(SMP_FLAG_SC, &smp->flags);
@@ -2927,7 +2928,7 @@ static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid)
tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, 0);
if (IS_ERR(tfm_aes)) {
BT_ERR("Unable to create crypto context");
- return ERR_PTR(PTR_ERR(tfm_aes));
+ return ERR_CAST(tfm_aes);
}
create_chan:
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
index 7e38b729696a..15845814a0f2 100644
--- a/net/ceph/auth_x.c
+++ b/net/ceph/auth_x.c
@@ -8,6 +8,7 @@
#include <linux/ceph/decode.h>
#include <linux/ceph/auth.h>
+#include <linux/ceph/messenger.h>
#include "crypto.h"
#include "auth_x.h"
@@ -293,6 +294,11 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
dout("build_authorizer for %s %p\n",
ceph_entity_type_name(th->service), au);
+ ceph_crypto_key_destroy(&au->session_key);
+ ret = ceph_crypto_key_clone(&au->session_key, &th->session_key);
+ if (ret)
+ return ret;
+
maxlen = sizeof(*msg_a) + sizeof(msg_b) +
ceph_x_encrypt_buflen(ticket_blob_len);
dout(" need len %d\n", maxlen);
@@ -302,8 +308,10 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
}
if (!au->buf) {
au->buf = ceph_buffer_new(maxlen, GFP_NOFS);
- if (!au->buf)
+ if (!au->buf) {
+ ceph_crypto_key_destroy(&au->session_key);
return -ENOMEM;
+ }
}
au->service = th->service;
au->secret_id = th->secret_id;
@@ -329,7 +337,7 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
get_random_bytes(&au->nonce, sizeof(au->nonce));
msg_b.struct_v = 1;
msg_b.nonce = cpu_to_le64(au->nonce);
- ret = ceph_x_encrypt(&th->session_key, &msg_b, sizeof(msg_b),
+ ret = ceph_x_encrypt(&au->session_key, &msg_b, sizeof(msg_b),
p, end - p);
if (ret < 0)
goto out_buf;
@@ -560,6 +568,8 @@ static int ceph_x_create_authorizer(
auth->authorizer_buf_len = au->buf->vec.iov_len;
auth->authorizer_reply_buf = au->reply_buf;
auth->authorizer_reply_buf_len = sizeof (au->reply_buf);
+ auth->sign_message = ac->ops->sign_message;
+ auth->check_message_signature = ac->ops->check_message_signature;
return 0;
}
@@ -588,17 +598,13 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
struct ceph_authorizer *a, size_t len)
{
struct ceph_x_authorizer *au = (void *)a;
- struct ceph_x_ticket_handler *th;
int ret = 0;
struct ceph_x_authorize_reply reply;
void *preply = &reply;
void *p = au->reply_buf;
void *end = p + sizeof(au->reply_buf);
- th = get_ticket_handler(ac, au->service);
- if (IS_ERR(th))
- return PTR_ERR(th);
- ret = ceph_x_decrypt(&th->session_key, &p, end, &preply, sizeof(reply));
+ ret = ceph_x_decrypt(&au->session_key, &p, end, &preply, sizeof(reply));
if (ret < 0)
return ret;
if (ret != sizeof(reply))
@@ -618,6 +624,7 @@ static void ceph_x_destroy_authorizer(struct ceph_auth_client *ac,
{
struct ceph_x_authorizer *au = (void *)a;
+ ceph_crypto_key_destroy(&au->session_key);
ceph_buffer_put(au->buf);
kfree(au);
}
@@ -663,6 +670,59 @@ static void ceph_x_invalidate_authorizer(struct ceph_auth_client *ac,
memset(&th->validity, 0, sizeof(th->validity));
}
+static int calcu_signature(struct ceph_x_authorizer *au,
+ struct ceph_msg *msg, __le64 *sig)
+{
+ int ret;
+ char tmp_enc[40];
+ __le32 tmp[5] = {
+ 16u, msg->hdr.crc, msg->footer.front_crc,
+ msg->footer.middle_crc, msg->footer.data_crc,
+ };
+ ret = ceph_x_encrypt(&au->session_key, &tmp, sizeof(tmp),
+ tmp_enc, sizeof(tmp_enc));
+ if (ret < 0)
+ return ret;
+ *sig = *(__le64*)(tmp_enc + 4);
+ return 0;
+}
+
+static int ceph_x_sign_message(struct ceph_auth_handshake *auth,
+ struct ceph_msg *msg)
+{
+ int ret;
+ if (!auth->authorizer)
+ return 0;
+ ret = calcu_signature((struct ceph_x_authorizer *)auth->authorizer,
+ msg, &msg->footer.sig);
+ if (ret < 0)
+ return ret;
+ msg->footer.flags |= CEPH_MSG_FOOTER_SIGNED;
+ return 0;
+}
+
+static int ceph_x_check_message_signature(struct ceph_auth_handshake *auth,
+ struct ceph_msg *msg)
+{
+ __le64 sig_check;
+ int ret;
+
+ if (!auth->authorizer)
+ return 0;
+ ret = calcu_signature((struct ceph_x_authorizer *)auth->authorizer,
+ msg, &sig_check);
+ if (ret < 0)
+ return ret;
+ if (sig_check == msg->footer.sig)
+ return 0;
+ if (msg->footer.flags & CEPH_MSG_FOOTER_SIGNED)
+ dout("ceph_x_check_message_signature %p has signature %llx "
+ "expect %llx\n", msg, msg->footer.sig, sig_check);
+ else
+ dout("ceph_x_check_message_signature %p sender did not set "
+ "CEPH_MSG_FOOTER_SIGNED\n", msg);
+ return -EBADMSG;
+}
static const struct ceph_auth_client_ops ceph_x_ops = {
.name = "x",
@@ -677,6 +737,8 @@ static const struct ceph_auth_client_ops ceph_x_ops = {
.invalidate_authorizer = ceph_x_invalidate_authorizer,
.reset = ceph_x_reset,
.destroy = ceph_x_destroy,
+ .sign_message = ceph_x_sign_message,
+ .check_message_signature = ceph_x_check_message_signature,
};
diff --git a/net/ceph/auth_x.h b/net/ceph/auth_x.h
index 65ee72082d99..e8b7c6917d47 100644
--- a/net/ceph/auth_x.h
+++ b/net/ceph/auth_x.h
@@ -26,6 +26,7 @@ struct ceph_x_ticket_handler {
struct ceph_x_authorizer {
+ struct ceph_crypto_key session_key;
struct ceph_buffer *buf;
unsigned int service;
u64 nonce;
diff --git a/net/ceph/buffer.c b/net/ceph/buffer.c
index 621b5f65407f..add5f921a0ff 100644
--- a/net/ceph/buffer.c
+++ b/net/ceph/buffer.c
@@ -6,7 +6,7 @@
#include <linux/ceph/buffer.h>
#include <linux/ceph/decode.h>
-#include <linux/ceph/libceph.h> /* for ceph_kv{malloc,free} */
+#include <linux/ceph/libceph.h> /* for ceph_kvmalloc */
struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp)
{
@@ -35,7 +35,7 @@ void ceph_buffer_release(struct kref *kref)
struct ceph_buffer *b = container_of(kref, struct ceph_buffer, kref);
dout("buffer_release %p\n", b);
- ceph_kvfree(b->vec.iov_base);
+ kvfree(b->vec.iov_base);
kfree(b);
}
EXPORT_SYMBOL(ceph_buffer_release);
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 58fbfe134f93..5d5ab67f516d 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -184,14 +184,6 @@ void *ceph_kvmalloc(size_t size, gfp_t flags)
return __vmalloc(size, flags | __GFP_HIGHMEM, PAGE_KERNEL);
}
-void ceph_kvfree(const void *ptr)
-{
- if (is_vmalloc_addr(ptr))
- vfree(ptr);
- else
- kfree(ptr);
-}
-
static int parse_fsid(const char *str, struct ceph_fsid *fsid)
{
@@ -245,6 +237,8 @@ enum {
Opt_noshare,
Opt_crc,
Opt_nocrc,
+ Opt_cephx_require_signatures,
+ Opt_nocephx_require_signatures,
};
static match_table_t opt_tokens = {
@@ -263,6 +257,8 @@ static match_table_t opt_tokens = {
{Opt_noshare, "noshare"},
{Opt_crc, "crc"},
{Opt_nocrc, "nocrc"},
+ {Opt_cephx_require_signatures, "cephx_require_signatures"},
+ {Opt_nocephx_require_signatures, "nocephx_require_signatures"},
{-1, NULL}
};
@@ -461,6 +457,12 @@ ceph_parse_options(char *options, const char *dev_name,
case Opt_nocrc:
opt->flags |= CEPH_OPT_NOCRC;
break;
+ case Opt_cephx_require_signatures:
+ opt->flags &= ~CEPH_OPT_NOMSGAUTH;
+ break;
+ case Opt_nocephx_require_signatures:
+ opt->flags |= CEPH_OPT_NOMSGAUTH;
+ break;
default:
BUG_ON(token);
@@ -504,6 +506,9 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private,
init_waitqueue_head(&client->auth_wq);
client->auth_err = 0;
+ if (!ceph_test_opt(client, NOMSGAUTH))
+ required_features |= CEPH_FEATURE_MSG_AUTH;
+
client->extra_mon_dispatch = NULL;
client->supported_features = CEPH_FEATURES_SUPPORTED_DEFAULT |
supported_features;
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 8d1653caffdb..33a2f201e460 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -1196,8 +1196,18 @@ static void prepare_write_message_footer(struct ceph_connection *con)
dout("prepare_write_message_footer %p\n", con);
con->out_kvec_is_msg = true;
con->out_kvec[v].iov_base = &m->footer;
- con->out_kvec[v].iov_len = sizeof(m->footer);
- con->out_kvec_bytes += sizeof(m->footer);
+ if (con->peer_features & CEPH_FEATURE_MSG_AUTH) {
+ if (con->ops->sign_message)
+ con->ops->sign_message(con, m);
+ else
+ m->footer.sig = 0;
+ con->out_kvec[v].iov_len = sizeof(m->footer);
+ con->out_kvec_bytes += sizeof(m->footer);
+ } else {
+ m->old_footer.flags = m->footer.flags;
+ con->out_kvec[v].iov_len = sizeof(m->old_footer);
+ con->out_kvec_bytes += sizeof(m->old_footer);
+ }
con->out_kvec_left++;
con->out_more = m->more_to_follow;
con->out_msg_done = true;
@@ -2249,6 +2259,7 @@ static int read_partial_message(struct ceph_connection *con)
int ret;
unsigned int front_len, middle_len, data_len;
bool do_datacrc = !con->msgr->nocrc;
+ bool need_sign = (con->peer_features & CEPH_FEATURE_MSG_AUTH);
u64 seq;
u32 crc;
@@ -2361,12 +2372,21 @@ static int read_partial_message(struct ceph_connection *con)
}
/* footer */
- size = sizeof (m->footer);
+ if (need_sign)
+ size = sizeof(m->footer);
+ else
+ size = sizeof(m->old_footer);
+
end += size;
ret = read_partial(con, end, size, &m->footer);
if (ret <= 0)
return ret;
+ if (!need_sign) {
+ m->footer.flags = m->old_footer.flags;
+ m->footer.sig = 0;
+ }
+
dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
m, front_len, m->footer.front_crc, middle_len,
m->footer.middle_crc, data_len, m->footer.data_crc);
@@ -2390,6 +2410,12 @@ static int read_partial_message(struct ceph_connection *con)
return -EBADMSG;
}
+ if (need_sign && con->ops->check_message_signature &&
+ con->ops->check_message_signature(con, m)) {
+ pr_err("read_partial_message %p signature check failed\n", m);
+ return -EBADMSG;
+ }
+
return 1; /* done! */
}
@@ -3288,7 +3314,7 @@ static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip)
static void ceph_msg_free(struct ceph_msg *m)
{
dout("%s %p\n", __func__, m);
- ceph_kvfree(m->front.iov_base);
+ kvfree(m->front.iov_base);
kmem_cache_free(ceph_msg_cache, m);
}
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 6f164289bde8..53299c7b0ca4 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -292,6 +292,10 @@ static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
ceph_osd_data_release(&op->cls.request_data);
ceph_osd_data_release(&op->cls.response_data);
break;
+ case CEPH_OSD_OP_SETXATTR:
+ case CEPH_OSD_OP_CMPXATTR:
+ ceph_osd_data_release(&op->xattr.osd_data);
+ break;
default:
break;
}
@@ -476,8 +480,7 @@ void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
size_t payload_len = 0;
BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
- opcode != CEPH_OSD_OP_DELETE && opcode != CEPH_OSD_OP_ZERO &&
- opcode != CEPH_OSD_OP_TRUNCATE);
+ opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE);
op->extent.offset = offset;
op->extent.length = length;
@@ -545,6 +548,39 @@ void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
}
EXPORT_SYMBOL(osd_req_op_cls_init);
+int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
+ u16 opcode, const char *name, const void *value,
+ size_t size, u8 cmp_op, u8 cmp_mode)
+{
+ struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, opcode);
+ struct ceph_pagelist *pagelist;
+ size_t payload_len;
+
+ BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR);
+
+ pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
+ if (!pagelist)
+ return -ENOMEM;
+
+ ceph_pagelist_init(pagelist);
+
+ payload_len = strlen(name);
+ op->xattr.name_len = payload_len;
+ ceph_pagelist_append(pagelist, name, payload_len);
+
+ op->xattr.value_len = size;
+ ceph_pagelist_append(pagelist, value, size);
+ payload_len += size;
+
+ op->xattr.cmp_op = cmp_op;
+ op->xattr.cmp_mode = cmp_mode;
+
+ ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist);
+ op->payload_len = payload_len;
+ return 0;
+}
+EXPORT_SYMBOL(osd_req_op_xattr_init);
+
void osd_req_op_watch_init(struct ceph_osd_request *osd_req,
unsigned int which, u16 opcode,
u64 cookie, u64 version, int flag)
@@ -626,7 +662,6 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req,
case CEPH_OSD_OP_READ:
case CEPH_OSD_OP_WRITE:
case CEPH_OSD_OP_ZERO:
- case CEPH_OSD_OP_DELETE:
case CEPH_OSD_OP_TRUNCATE:
if (src->op == CEPH_OSD_OP_WRITE)
request_data_len = src->extent.length;
@@ -676,6 +711,19 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req,
dst->alloc_hint.expected_write_size =
cpu_to_le64(src->alloc_hint.expected_write_size);
break;
+ case CEPH_OSD_OP_SETXATTR:
+ case CEPH_OSD_OP_CMPXATTR:
+ dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
+ dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
+ dst->xattr.cmp_op = src->xattr.cmp_op;
+ dst->xattr.cmp_mode = src->xattr.cmp_mode;
+ osd_data = &src->xattr.osd_data;
+ ceph_osdc_msg_data_add(req->r_request, osd_data);
+ request_data_len = osd_data->pagelist->length;
+ break;
+ case CEPH_OSD_OP_CREATE:
+ case CEPH_OSD_OP_DELETE:
+ break;
default:
pr_err("unsupported osd opcode %s\n",
ceph_osd_op_name(src->op));
@@ -705,7 +753,8 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req,
struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
struct ceph_file_layout *layout,
struct ceph_vino vino,
- u64 off, u64 *plen, int num_ops,
+ u64 off, u64 *plen,
+ unsigned int which, int num_ops,
int opcode, int flags,
struct ceph_snap_context *snapc,
u32 truncate_seq,
@@ -716,13 +765,11 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
u64 objnum = 0;
u64 objoff = 0;
u64 objlen = 0;
- u32 object_size;
- u64 object_base;
int r;
BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
- opcode != CEPH_OSD_OP_DELETE && opcode != CEPH_OSD_OP_ZERO &&
- opcode != CEPH_OSD_OP_TRUNCATE);
+ opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE &&
+ opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE);
req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
GFP_NOFS);
@@ -738,29 +785,24 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
return ERR_PTR(r);
}
- object_size = le32_to_cpu(layout->fl_object_size);
- object_base = off - objoff;
- if (!(truncate_seq == 1 && truncate_size == -1ULL)) {
- if (truncate_size <= object_base) {
- truncate_size = 0;
- } else {
- truncate_size -= object_base;
- if (truncate_size > object_size)
- truncate_size = object_size;
+ if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) {
+ osd_req_op_init(req, which, opcode);
+ } else {
+ u32 object_size = le32_to_cpu(layout->fl_object_size);
+ u32 object_base = off - objoff;
+ if (!(truncate_seq == 1 && truncate_size == -1ULL)) {
+ if (truncate_size <= object_base) {
+ truncate_size = 0;
+ } else {
+ truncate_size -= object_base;
+ if (truncate_size > object_size)
+ truncate_size = object_size;
+ }
}
+ osd_req_op_extent_init(req, which, opcode, objoff, objlen,
+ truncate_size, truncate_seq);
}
- osd_req_op_extent_init(req, 0, opcode, objoff, objlen,
- truncate_size, truncate_seq);
-
- /*
- * A second op in the ops array means the caller wants to
- * also issue a include a 'startsync' command so that the
- * osd will flush data quickly.
- */
- if (num_ops > 1)
- osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC);
-
req->r_base_oloc.pool = ceph_file_layout_pg_pool(*layout);
snprintf(req->r_base_oid.name, sizeof(req->r_base_oid.name),
@@ -2626,7 +2668,7 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc,
dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
vino.snap, off, *plen);
- req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 1,
+ req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 0, 1,
CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
NULL, truncate_seq, truncate_size,
false);
@@ -2669,7 +2711,7 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
int page_align = off & ~PAGE_MASK;
BUG_ON(vino.snap != CEPH_NOSNAP); /* snapshots aren't writeable */
- req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 1,
+ req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 0, 1,
CEPH_OSD_OP_WRITE,
CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
snapc, truncate_seq, truncate_size,
@@ -2920,6 +2962,20 @@ static int invalidate_authorizer(struct ceph_connection *con)
return ceph_monc_validate_auth(&osdc->client->monc);
}
+static int sign_message(struct ceph_connection *con, struct ceph_msg *msg)
+{
+ struct ceph_osd *o = con->private;
+ struct ceph_auth_handshake *auth = &o->o_auth;
+ return ceph_auth_sign_message(auth, msg);
+}
+
+static int check_message_signature(struct ceph_connection *con, struct ceph_msg *msg)
+{
+ struct ceph_osd *o = con->private;
+ struct ceph_auth_handshake *auth = &o->o_auth;
+ return ceph_auth_check_message_signature(auth, msg);
+}
+
static const struct ceph_connection_operations osd_con_ops = {
.get = get_osd_con,
.put = put_osd_con,
@@ -2928,5 +2984,7 @@ static const struct ceph_connection_operations osd_con_ops = {
.verify_authorizer_reply = verify_authorizer_reply,
.invalidate_authorizer = invalidate_authorizer,
.alloc_msg = alloc_msg,
+ .sign_message = sign_message,
+ .check_message_signature = check_message_signature,
.fault = osd_reset,
};
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 7f155175bba8..ce780c722e48 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -337,17 +337,17 @@ EXPORT_SYMBOL_GPL(__put_net);
struct net *get_net_ns_by_fd(int fd)
{
- struct proc_ns *ei;
struct file *file;
+ struct ns_common *ns;
struct net *net;
file = proc_ns_fget(fd);
if (IS_ERR(file))
return ERR_CAST(file);
- ei = get_proc_ns(file_inode(file));
- if (ei->ns_ops == &netns_operations)
- net = get_net(ei->ns);
+ ns = get_proc_ns(file_inode(file));
+ if (ns->ops == &netns_operations)
+ net = get_net(container_of(ns, struct net, ns));
else
net = ERR_PTR(-EINVAL);
@@ -386,12 +386,15 @@ EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
static __net_init int net_ns_net_init(struct net *net)
{
- return proc_alloc_inum(&net->proc_inum);
+#ifdef CONFIG_NET_NS
+ net->ns.ops = &netns_operations;
+#endif
+ return ns_alloc_inum(&net->ns);
}
static __net_exit void net_ns_net_exit(struct net *net)
{
- proc_free_inum(net->proc_inum);
+ ns_free_inum(&net->ns);
}
static struct pernet_operations __net_initdata net_ns_ops = {
@@ -629,7 +632,7 @@ void unregister_pernet_device(struct pernet_operations *ops)
EXPORT_SYMBOL_GPL(unregister_pernet_device);
#ifdef CONFIG_NET_NS
-static void *netns_get(struct task_struct *task)
+static struct ns_common *netns_get(struct task_struct *task)
{
struct net *net = NULL;
struct nsproxy *nsproxy;
@@ -640,17 +643,22 @@ static void *netns_get(struct task_struct *task)
net = get_net(nsproxy->net_ns);
task_unlock(task);
- return net;
+ return net ? &net->ns : NULL;
}
-static void netns_put(void *ns)
+static inline struct net *to_net_ns(struct ns_common *ns)
{
- put_net(ns);
+ return container_of(ns, struct net, ns);
}
-static int netns_install(struct nsproxy *nsproxy, void *ns)
+static void netns_put(struct ns_common *ns)
{
- struct net *net = ns;
+ put_net(to_net_ns(ns));
+}
+
+static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns)
+{
+ struct net *net = to_net_ns(ns);
if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
!ns_capable(current_user_ns(), CAP_SYS_ADMIN))
@@ -661,18 +669,11 @@ static int netns_install(struct nsproxy *nsproxy, void *ns)
return 0;
}
-static unsigned int netns_inum(void *ns)
-{
- struct net *net = ns;
- return net->proc_inum;
-}
-
const struct proc_ns_operations netns_operations = {
.name = "net",
.type = CLONE_NEWNET,
.get = netns_get,
.put = netns_put,
.install = netns_install,
- .inum = netns_inum,
};
#endif
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index d06107d36ec8..9cf6fe9ddc0c 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2368,6 +2368,11 @@ int ndo_dflt_fdb_add(struct ndmsg *ndm,
return err;
}
+ if (vid) {
+ pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
+ return err;
+ }
+
if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
err = dev_uc_add_excl(dev, addr);
else if (is_multicast_ether_addr(addr))
diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c
index a457232f0131..95e47c97585e 100644
--- a/net/ipv4/geneve.c
+++ b/net/ipv4/geneve.c
@@ -159,6 +159,15 @@ static void geneve_notify_add_rx_port(struct geneve_sock *gs)
}
}
+static void geneve_notify_del_rx_port(struct geneve_sock *gs)
+{
+ struct sock *sk = gs->sock->sk;
+ sa_family_t sa_family = sk->sk_family;
+
+ if (sa_family == AF_INET)
+ udp_del_offload(&gs->udp_offloads);
+}
+
/* Callback from net/ipv4/udp.c to receive packets */
static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
@@ -287,6 +296,7 @@ struct geneve_sock *geneve_sock_add(struct net *net, __be16 port,
geneve_rcv_t *rcv, void *data,
bool no_share, bool ipv6)
{
+ struct geneve_net *gn = net_generic(net, geneve_net_id);
struct geneve_sock *gs;
gs = geneve_socket_create(net, port, rcv, data, ipv6);
@@ -296,15 +306,15 @@ struct geneve_sock *geneve_sock_add(struct net *net, __be16 port,
if (no_share) /* Return error if sharing is not allowed. */
return ERR_PTR(-EINVAL);
+ spin_lock(&gn->sock_lock);
gs = geneve_find_sock(net, port);
- if (gs) {
- if (gs->rcv == rcv)
- atomic_inc(&gs->refcnt);
- else
+ if (gs && ((gs->rcv != rcv) ||
+ !atomic_add_unless(&gs->refcnt, 1, 0)))
gs = ERR_PTR(-EBUSY);
- } else {
+ spin_unlock(&gn->sock_lock);
+
+ if (!gs)
gs = ERR_PTR(-EINVAL);
- }
return gs;
}
@@ -312,9 +322,17 @@ EXPORT_SYMBOL_GPL(geneve_sock_add);
void geneve_sock_release(struct geneve_sock *gs)
{
+ struct net *net = sock_net(gs->sock->sk);
+ struct geneve_net *gn = net_generic(net, geneve_net_id);
+
if (!atomic_dec_and_test(&gs->refcnt))
return;
+ spin_lock(&gn->sock_lock);
+ hlist_del_rcu(&gs->hlist);
+ geneve_notify_del_rx_port(gs);
+ spin_unlock(&gn->sock_lock);
+
queue_work(geneve_wq, &gs->del_work);
}
EXPORT_SYMBOL_GPL(geneve_sock_release);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index ac8491245e5b..4f4bf5b99686 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -252,10 +252,6 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
struct ip_tunnel *tunnel = netdev_priv(dev);
const struct iphdr *tnl_params;
- skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
- if (IS_ERR(skb))
- goto out;
-
if (dev->header_ops) {
/* Need space for new headers */
if (skb_cow_head(skb, dev->needed_headroom -
@@ -268,6 +264,7 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
* to gre header.
*/
skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
+ skb_reset_mac_header(skb);
} else {
if (skb_cow_head(skb, dev->needed_headroom))
goto free_skb;
@@ -275,6 +272,10 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
tnl_params = &tunnel->parms.iph;
}
+ skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
+ if (IS_ERR(skb))
+ goto out;
+
__gre_xmit(skb, dev, tnl_params, skb->protocol);
return NETDEV_TX_OK;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 63e745aadab6..d3e447936720 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -514,6 +514,9 @@ const struct ip_tunnel_encap_ops __rcu *
int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *ops,
unsigned int num)
{
+ if (num >= MAX_IPTUN_ENCAP_OPS)
+ return -ERANGE;
+
return !cmpxchg((const struct ip_tunnel_encap_ops **)
&iptun_encaps[num],
NULL, ops) ? 0 : -1;
@@ -525,6 +528,9 @@ int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *ops,
{
int ret;
+ if (num >= MAX_IPTUN_ENCAP_OPS)
+ return -ERANGE;
+
ret = (cmpxchg((const struct ip_tunnel_encap_ops **)
&iptun_encaps[num],
ops, NULL) == ops) ? 0 : -1;
@@ -567,6 +573,9 @@ int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
if (t->encap.type == TUNNEL_ENCAP_NONE)
return 0;
+ if (t->encap.type >= MAX_IPTUN_ENCAP_OPS)
+ return -EINVAL;
+
rcu_read_lock();
ops = rcu_dereference(iptun_encaps[t->encap.type]);
if (likely(ops && ops->build_header))
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 5d6dae9e4aac..da1c12c34487 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -1011,6 +1011,10 @@ ieee80211_vif_use_reserved_reassign(struct ieee80211_sub_if_data *sdata)
ieee80211_vif_update_chandef(sdata, &sdata->reserved_chandef);
+ ieee80211_recalc_smps_chanctx(local, new_ctx);
+ ieee80211_recalc_radar_chanctx(local, new_ctx);
+ ieee80211_recalc_chanctx_min_def(local, new_ctx);
+
if (changed)
ieee80211_bss_info_change_notify(sdata, changed);
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 434a91ad12c8..0bb7038121ac 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -656,7 +656,7 @@ void ieee80211_free_sta_keys(struct ieee80211_local *local,
int i;
mutex_lock(&local->key_mtx);
- for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
+ for (i = 0; i < ARRAY_SIZE(sta->gtk); i++) {
key = key_mtx_dereference(local, sta->gtk[i]);
if (!key)
continue;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 75a9bf50207e..2c36c4765f47 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -174,6 +174,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
if (!(ht_cap->cap_info &
cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40))) {
ret = IEEE80211_STA_DISABLE_40MHZ;
+ vht_chandef = *chandef;
goto out;
}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 49c23bdf08bb..683b10f46505 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1761,14 +1761,14 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
sc = le16_to_cpu(hdr->seq_ctrl);
frag = sc & IEEE80211_SCTL_FRAG;
- if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
- goto out;
-
if (is_multicast_ether_addr(hdr->addr1)) {
rx->local->dot11MulticastReceivedFrameCount++;
- goto out;
+ goto out_no_led;
}
+ if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
+ goto out;
+
I802_DEBUG_INC(rx->local->rx_handlers_fragments);
if (skb_linearize(rx->skb))
@@ -1859,9 +1859,10 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
status->rx_flags |= IEEE80211_RX_FRAGMENTED;
out:
+ ieee80211_led_rx(rx->local);
+ out_no_led:
if (rx->sta)
rx->sta->rx_packets++;
- ieee80211_led_rx(rx->local);
return RX_CONTINUE;
}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index ef5f77b44ec7..074cf3e91c6f 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -525,14 +525,14 @@ out:
return err;
}
-static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr)
+static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len)
{
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
struct page *p_start, *p_end;
/* First page is flushed through netlink_{get,set}_status */
p_start = pgvec_to_page(hdr + PAGE_SIZE);
- p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + hdr->nm_len - 1);
+ p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
while (p_start <= p_end) {
flush_dcache_page(p_start);
p_start++;
@@ -550,9 +550,9 @@ static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
static void netlink_set_status(struct nl_mmap_hdr *hdr,
enum nl_mmap_status status)
{
+ smp_mb();
hdr->nm_status = status;
flush_dcache_page(pgvec_to_page(hdr));
- smp_wmb();
}
static struct nl_mmap_hdr *
@@ -714,24 +714,16 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
struct nl_mmap_hdr *hdr;
struct sk_buff *skb;
unsigned int maxlen;
- bool excl = true;
int err = 0, len = 0;
- /* Netlink messages are validated by the receiver before processing.
- * In order to avoid userspace changing the contents of the message
- * after validation, the socket and the ring may only be used by a
- * single process, otherwise we fall back to copying.
- */
- if (atomic_long_read(&sk->sk_socket->file->f_count) > 1 ||
- atomic_read(&nlk->mapped) > 1)
- excl = false;
-
mutex_lock(&nlk->pg_vec_lock);
ring = &nlk->tx_ring;
maxlen = ring->frame_size - NL_MMAP_HDRLEN;
do {
+ unsigned int nm_len;
+
hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
if (hdr == NULL) {
if (!(msg->msg_flags & MSG_DONTWAIT) &&
@@ -739,35 +731,23 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
schedule();
continue;
}
- if (hdr->nm_len > maxlen) {
+
+ nm_len = ACCESS_ONCE(hdr->nm_len);
+ if (nm_len > maxlen) {
err = -EINVAL;
goto out;
}
- netlink_frame_flush_dcache(hdr);
+ netlink_frame_flush_dcache(hdr, nm_len);
- if (likely(dst_portid == 0 && dst_group == 0 && excl)) {
- skb = alloc_skb_head(GFP_KERNEL);
- if (skb == NULL) {
- err = -ENOBUFS;
- goto out;
- }
- sock_hold(sk);
- netlink_ring_setup_skb(skb, sk, ring, hdr);
- NETLINK_CB(skb).flags |= NETLINK_SKB_TX;
- __skb_put(skb, hdr->nm_len);
- netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
- atomic_inc(&ring->pending);
- } else {
- skb = alloc_skb(hdr->nm_len, GFP_KERNEL);
- if (skb == NULL) {
- err = -ENOBUFS;
- goto out;
- }
- __skb_put(skb, hdr->nm_len);
- memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, hdr->nm_len);
- netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
+ skb = alloc_skb(nm_len, GFP_KERNEL);
+ if (skb == NULL) {
+ err = -ENOBUFS;
+ goto out;
}
+ __skb_put(skb, nm_len);
+ memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
+ netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
netlink_increment_head(ring);
@@ -813,7 +793,7 @@ static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
hdr->nm_pid = NETLINK_CB(skb).creds.pid;
hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
- netlink_frame_flush_dcache(hdr);
+ netlink_frame_flush_dcache(hdr, hdr->nm_len);
netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
diff --git a/net/nonet.c b/net/nonet.c
deleted file mode 100644
index b1a73fda9c12..000000000000
--- a/net/nonet.c
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * net/nonet.c
- *
- * Dummy functions to allow us to configure network support entirely
- * out of the kernel.
- *
- * Distributed under the terms of the GNU GPL version 2.
- * Copyright (c) Matthew Wilcox 2003
- */
-
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-
-static int sock_no_open(struct inode *irrelevant, struct file *dontcare)
-{
- return -ENXIO;
-}
-
-const struct file_operations bad_sock_fops = {
- .owner = THIS_MODULE,
- .open = sock_no_open,
- .llseek = noop_llseek,
-};
diff --git a/net/rds/message.c b/net/rds/message.c
index ff2202218187..5a21e6f5986f 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -325,7 +325,8 @@ int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
copied = 0;
while (iov_iter_count(to) && copied < len) {
- to_copy = min(iov_iter_count(to), sg->length - vec_off);
+ to_copy = min_t(unsigned long, iov_iter_count(to),
+ sg->length - vec_off);
to_copy = min_t(unsigned long, to_copy, len - copied);
rds_stats_add(s_copy_to_user, to_copy);
diff --git a/net/socket.c b/net/socket.c
index 8809afccf7fa..a2c33a4dc7ba 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -113,7 +113,6 @@ unsigned int sysctl_net_busy_read __read_mostly;
unsigned int sysctl_net_busy_poll __read_mostly;
#endif
-static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos);
static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov,
@@ -151,7 +150,6 @@ static const struct file_operations socket_file_ops = {
.compat_ioctl = compat_sock_ioctl,
#endif
.mmap = sock_mmap,
- .open = sock_no_open, /* special open code to disallow open via /proc */
.release = sock_close,
.fasync = sock_fasync,
.sendpage = sock_sendpage,
@@ -374,7 +372,6 @@ struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname)
path.mnt = mntget(sock_mnt);
d_instantiate(path.dentry, SOCK_INODE(sock));
- SOCK_INODE(sock)->i_fop = &socket_file_ops;
file = alloc_file(&path, FMODE_READ | FMODE_WRITE,
&socket_file_ops);
@@ -559,23 +556,6 @@ static struct socket *sock_alloc(void)
return sock;
}
-/*
- * In theory you can't get an open on this inode, but /proc provides
- * a back door. Remember to keep it shut otherwise you'll let the
- * creepy crawlies in.
- */
-
-static int sock_no_open(struct inode *irrelevant, struct file *dontcare)
-{
- return -ENXIO;
-}
-
-const struct file_operations bad_sock_fops = {
- .owner = THIS_MODULE,
- .open = sock_no_open,
- .llseek = noop_llseek,
-};
-
/**
* sock_release - close a socket
* @sock: socket to close
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index de856ddf5fed..224a82f24d3c 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -886,7 +886,7 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs
u32 priv_len, maj_stat;
int pad, saved_len, remaining_len, offset;
- rqstp->rq_splice_ok = false;
+ clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
priv_len = svc_getnl(&buf->head[0]);
if (rqstp->rq_deferred) {
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 066362141133..33fb105d4352 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -20,6 +20,7 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/ctype.h>
+#include <linux/string_helpers.h>
#include <asm/uaccess.h>
#include <linux/poll.h>
#include <linux/seq_file.h>
@@ -1067,30 +1068,15 @@ void qword_add(char **bpp, int *lp, char *str)
{
char *bp = *bpp;
int len = *lp;
- char c;
+ int ret;
if (len < 0) return;
- while ((c=*str++) && len)
- switch(c) {
- case ' ':
- case '\t':
- case '\n':
- case '\\':
- if (len >= 4) {
- *bp++ = '\\';
- *bp++ = '0' + ((c & 0300)>>6);
- *bp++ = '0' + ((c & 0070)>>3);
- *bp++ = '0' + ((c & 0007)>>0);
- }
- len -= 4;
- break;
- default:
- *bp++ = c;
- len--;
- }
- if (c || len <1) len = -1;
+ ret = string_escape_str(str, &bp, len, ESCAPE_OCTAL, "\\ \n\t");
+ if (ret < 0 || ret == len)
+ len = -1;
else {
+ len -= ret;
*bp++ = ' ';
len--;
}
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 2783fd80c229..91eaef1844c8 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -191,7 +191,7 @@ svc_pool_map_init_percpu(struct svc_pool_map *m)
return err;
for_each_online_cpu(cpu) {
- BUG_ON(pidx > maxpools);
+ BUG_ON(pidx >= maxpools);
m->to_pool[cpu] = pidx;
m->pool_to[pidx] = cpu;
pidx++;
@@ -476,15 +476,11 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
i, serv->sv_name);
pool->sp_id = i;
- INIT_LIST_HEAD(&pool->sp_threads);
INIT_LIST_HEAD(&pool->sp_sockets);
INIT_LIST_HEAD(&pool->sp_all_threads);
spin_lock_init(&pool->sp_lock);
}
- if (svc_uses_rpcbind(serv) && (!serv->sv_shutdown))
- serv->sv_shutdown = svc_rpcb_cleanup;
-
return serv;
}
@@ -505,13 +501,15 @@ svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
unsigned int npools = svc_pool_map_get();
serv = __svc_create(prog, bufsize, npools, shutdown);
+ if (!serv)
+ goto out_err;
- if (serv != NULL) {
- serv->sv_function = func;
- serv->sv_module = mod;
- }
-
+ serv->sv_function = func;
+ serv->sv_module = mod;
return serv;
+out_err:
+ svc_pool_map_put();
+ return NULL;
}
EXPORT_SYMBOL_GPL(svc_create_pooled);
@@ -615,12 +613,14 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
goto out_enomem;
serv->sv_nrthreads++;
+ __set_bit(RQ_BUSY, &rqstp->rq_flags);
+ spin_lock_init(&rqstp->rq_lock);
+ rqstp->rq_server = serv;
+ rqstp->rq_pool = pool;
spin_lock_bh(&pool->sp_lock);
pool->sp_nrthreads++;
- list_add(&rqstp->rq_all, &pool->sp_all_threads);
+ list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads);
spin_unlock_bh(&pool->sp_lock);
- rqstp->rq_server = serv;
- rqstp->rq_pool = pool;
rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
if (!rqstp->rq_argp)
@@ -685,7 +685,8 @@ found_pool:
* so we don't try to kill it again.
*/
rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all);
- list_del_init(&rqstp->rq_all);
+ set_bit(RQ_VICTIM, &rqstp->rq_flags);
+ list_del_rcu(&rqstp->rq_all);
task = rqstp->rq_task;
}
spin_unlock_bh(&pool->sp_lock);
@@ -783,10 +784,11 @@ svc_exit_thread(struct svc_rqst *rqstp)
spin_lock_bh(&pool->sp_lock);
pool->sp_nrthreads--;
- list_del(&rqstp->rq_all);
+ if (!test_and_set_bit(RQ_VICTIM, &rqstp->rq_flags))
+ list_del_rcu(&rqstp->rq_all);
spin_unlock_bh(&pool->sp_lock);
- kfree(rqstp);
+ kfree_rcu(rqstp, rq_rcu_head);
/* Release the server */
if (serv)
@@ -1086,10 +1088,10 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
goto err_short_len;
/* Will be turned off only in gss privacy case: */
- rqstp->rq_splice_ok = true;
+ set_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
/* Will be turned off only when NFSv4 Sessions are used */
- rqstp->rq_usedeferral = true;
- rqstp->rq_dropme = false;
+ set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
+ clear_bit(RQ_DROPME, &rqstp->rq_flags);
/* Setup reply header */
rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
@@ -1189,7 +1191,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
*statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
/* Encode reply */
- if (rqstp->rq_dropme) {
+ if (test_bit(RQ_DROPME, &rqstp->rq_flags)) {
if (procp->pc_release)
procp->pc_release(rqstp, NULL, rqstp->rq_resp);
goto dropit;
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index bbb3b044b877..c69358b3cf7f 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -220,9 +220,11 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
*/
static void svc_xprt_received(struct svc_xprt *xprt)
{
- WARN_ON_ONCE(!test_bit(XPT_BUSY, &xprt->xpt_flags));
- if (!test_bit(XPT_BUSY, &xprt->xpt_flags))
+ if (!test_bit(XPT_BUSY, &xprt->xpt_flags)) {
+ WARN_ONCE(1, "xprt=0x%p already busy!", xprt);
return;
+ }
+
/* As soon as we clear busy, the xprt could be closed and
* 'put', so we need a reference to call svc_xprt_do_enqueue with:
*/
@@ -310,25 +312,6 @@ char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len)
}
EXPORT_SYMBOL_GPL(svc_print_addr);
-/*
- * Queue up an idle server thread. Must have pool->sp_lock held.
- * Note: this is really a stack rather than a queue, so that we only
- * use as many different threads as we need, and the rest don't pollute
- * the cache.
- */
-static void svc_thread_enqueue(struct svc_pool *pool, struct svc_rqst *rqstp)
-{
- list_add(&rqstp->rq_list, &pool->sp_threads);
-}
-
-/*
- * Dequeue an nfsd thread. Must have pool->sp_lock held.
- */
-static void svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp)
-{
- list_del(&rqstp->rq_list);
-}
-
static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt)
{
if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE)))
@@ -341,11 +324,12 @@ static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt)
static void svc_xprt_do_enqueue(struct svc_xprt *xprt)
{
struct svc_pool *pool;
- struct svc_rqst *rqstp;
+ struct svc_rqst *rqstp = NULL;
int cpu;
+ bool queued = false;
if (!svc_xprt_has_something_to_do(xprt))
- return;
+ goto out;
/* Mark transport as busy. It will remain in this state until
* the provider calls svc_xprt_received. We update XPT_BUSY
@@ -355,43 +339,69 @@ static void svc_xprt_do_enqueue(struct svc_xprt *xprt)
if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) {
/* Don't enqueue transport while already enqueued */
dprintk("svc: transport %p busy, not enqueued\n", xprt);
- return;
+ goto out;
}
cpu = get_cpu();
pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
- spin_lock_bh(&pool->sp_lock);
- pool->sp_stats.packets++;
-
- if (!list_empty(&pool->sp_threads)) {
- rqstp = list_entry(pool->sp_threads.next,
- struct svc_rqst,
- rq_list);
- dprintk("svc: transport %p served by daemon %p\n",
- xprt, rqstp);
- svc_thread_dequeue(pool, rqstp);
- if (rqstp->rq_xprt)
- printk(KERN_ERR
- "svc_xprt_enqueue: server %p, rq_xprt=%p!\n",
- rqstp, rqstp->rq_xprt);
- /* Note the order of the following 3 lines:
- * We want to assign xprt to rqstp->rq_xprt only _after_
- * we've woken up the process, so that we don't race with
- * the lockless check in svc_get_next_xprt().
+ atomic_long_inc(&pool->sp_stats.packets);
+
+redo_search:
+ /* find a thread for this xprt */
+ rcu_read_lock();
+ list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) {
+ /* Do a lockless check first */
+ if (test_bit(RQ_BUSY, &rqstp->rq_flags))
+ continue;
+
+ /*
+ * Once the xprt has been queued, it can only be dequeued by
+ * the task that intends to service it. All we can do at that
+ * point is to try to wake this thread back up so that it can
+ * do so.
*/
- svc_xprt_get(xprt);
+ if (!queued) {
+ spin_lock_bh(&rqstp->rq_lock);
+ if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags)) {
+ /* already busy, move on... */
+ spin_unlock_bh(&rqstp->rq_lock);
+ continue;
+ }
+
+ /* this one will do */
+ rqstp->rq_xprt = xprt;
+ svc_xprt_get(xprt);
+ spin_unlock_bh(&rqstp->rq_lock);
+ }
+ rcu_read_unlock();
+
+ atomic_long_inc(&pool->sp_stats.threads_woken);
wake_up_process(rqstp->rq_task);
- rqstp->rq_xprt = xprt;
- pool->sp_stats.threads_woken++;
- } else {
+ put_cpu();
+ goto out;
+ }
+ rcu_read_unlock();
+
+ /*
+ * We didn't find an idle thread to use, so we need to queue the xprt.
+ * Do so and then search again. If we find one, we can't hook this one
+ * up to it directly but we can wake the thread up in the hopes that it
+ * will pick it up once it searches for a xprt to service.
+ */
+ if (!queued) {
+ queued = true;
dprintk("svc: transport %p put into queue\n", xprt);
+ spin_lock_bh(&pool->sp_lock);
list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
pool->sp_stats.sockets_queued++;
+ spin_unlock_bh(&pool->sp_lock);
+ goto redo_search;
}
-
- spin_unlock_bh(&pool->sp_lock);
+ rqstp = NULL;
put_cpu();
+out:
+ trace_svc_xprt_do_enqueue(xprt, rqstp);
}
/*
@@ -408,22 +418,28 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
EXPORT_SYMBOL_GPL(svc_xprt_enqueue);
/*
- * Dequeue the first transport. Must be called with the pool->sp_lock held.
+ * Dequeue the first transport, if there is one.
*/
static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool)
{
- struct svc_xprt *xprt;
+ struct svc_xprt *xprt = NULL;
if (list_empty(&pool->sp_sockets))
- return NULL;
-
- xprt = list_entry(pool->sp_sockets.next,
- struct svc_xprt, xpt_ready);
- list_del_init(&xprt->xpt_ready);
+ goto out;
- dprintk("svc: transport %p dequeued, inuse=%d\n",
- xprt, atomic_read(&xprt->xpt_ref.refcount));
+ spin_lock_bh(&pool->sp_lock);
+ if (likely(!list_empty(&pool->sp_sockets))) {
+ xprt = list_first_entry(&pool->sp_sockets,
+ struct svc_xprt, xpt_ready);
+ list_del_init(&xprt->xpt_ready);
+ svc_xprt_get(xprt);
+ dprintk("svc: transport %p dequeued, inuse=%d\n",
+ xprt, atomic_read(&xprt->xpt_ref.refcount));
+ }
+ spin_unlock_bh(&pool->sp_lock);
+out:
+ trace_svc_xprt_dequeue(xprt);
return xprt;
}
@@ -484,34 +500,36 @@ static void svc_xprt_release(struct svc_rqst *rqstp)
}
/*
- * External function to wake up a server waiting for data
- * This really only makes sense for services like lockd
- * which have exactly one thread anyway.
+ * Some svc_serv's will have occasional work to do, even when a xprt is not
+ * waiting to be serviced. This function is there to "kick" a task in one of
+ * those services so that it can wake up and do that work. Note that we only
+ * bother with pool 0 as we don't need to wake up more than one thread for
+ * this purpose.
*/
void svc_wake_up(struct svc_serv *serv)
{
struct svc_rqst *rqstp;
- unsigned int i;
struct svc_pool *pool;
- for (i = 0; i < serv->sv_nrpools; i++) {
- pool = &serv->sv_pools[i];
+ pool = &serv->sv_pools[0];
- spin_lock_bh(&pool->sp_lock);
- if (!list_empty(&pool->sp_threads)) {
- rqstp = list_entry(pool->sp_threads.next,
- struct svc_rqst,
- rq_list);
- dprintk("svc: daemon %p woken up.\n", rqstp);
- /*
- svc_thread_dequeue(pool, rqstp);
- rqstp->rq_xprt = NULL;
- */
- wake_up_process(rqstp->rq_task);
- } else
- pool->sp_task_pending = 1;
- spin_unlock_bh(&pool->sp_lock);
+ rcu_read_lock();
+ list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) {
+ /* skip any that aren't queued */
+ if (test_bit(RQ_BUSY, &rqstp->rq_flags))
+ continue;
+ rcu_read_unlock();
+ dprintk("svc: daemon %p woken up.\n", rqstp);
+ wake_up_process(rqstp->rq_task);
+ trace_svc_wake_up(rqstp->rq_task->pid);
+ return;
}
+ rcu_read_unlock();
+
+ /* No free entries available */
+ set_bit(SP_TASK_PENDING, &pool->sp_flags);
+ smp_wmb();
+ trace_svc_wake_up(0);
}
EXPORT_SYMBOL_GPL(svc_wake_up);
@@ -622,75 +640,86 @@ static int svc_alloc_arg(struct svc_rqst *rqstp)
return 0;
}
+static bool
+rqst_should_sleep(struct svc_rqst *rqstp)
+{
+ struct svc_pool *pool = rqstp->rq_pool;
+
+ /* did someone call svc_wake_up? */
+ if (test_and_clear_bit(SP_TASK_PENDING, &pool->sp_flags))
+ return false;
+
+ /* was a socket queued? */
+ if (!list_empty(&pool->sp_sockets))
+ return false;
+
+ /* are we shutting down? */
+ if (signalled() || kthread_should_stop())
+ return false;
+
+ /* are we freezing? */
+ if (freezing(current))
+ return false;
+
+ return true;
+}
+
static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
{
struct svc_xprt *xprt;
struct svc_pool *pool = rqstp->rq_pool;
long time_left = 0;
+ /* rq_xprt should be clear on entry */
+ WARN_ON_ONCE(rqstp->rq_xprt);
+
/* Normally we will wait up to 5 seconds for any required
* cache information to be provided.
*/
rqstp->rq_chandle.thread_wait = 5*HZ;
- spin_lock_bh(&pool->sp_lock);
xprt = svc_xprt_dequeue(pool);
if (xprt) {
rqstp->rq_xprt = xprt;
- svc_xprt_get(xprt);
/* As there is a shortage of threads and this request
* had to be queued, don't allow the thread to wait so
* long for cache updates.
*/
rqstp->rq_chandle.thread_wait = 1*HZ;
- pool->sp_task_pending = 0;
- } else {
- if (pool->sp_task_pending) {
- pool->sp_task_pending = 0;
- xprt = ERR_PTR(-EAGAIN);
- goto out;
- }
- /*
- * We have to be able to interrupt this wait
- * to bring down the daemons ...
- */
- set_current_state(TASK_INTERRUPTIBLE);
+ clear_bit(SP_TASK_PENDING, &pool->sp_flags);
+ return xprt;
+ }
- /* No data pending. Go to sleep */
- svc_thread_enqueue(pool, rqstp);
- spin_unlock_bh(&pool->sp_lock);
+ /*
+ * We have to be able to interrupt this wait
+ * to bring down the daemons ...
+ */
+ set_current_state(TASK_INTERRUPTIBLE);
+ clear_bit(RQ_BUSY, &rqstp->rq_flags);
+ smp_mb();
- if (!(signalled() || kthread_should_stop())) {
- time_left = schedule_timeout(timeout);
- __set_current_state(TASK_RUNNING);
+ if (likely(rqst_should_sleep(rqstp)))
+ time_left = schedule_timeout(timeout);
+ else
+ __set_current_state(TASK_RUNNING);
- try_to_freeze();
+ try_to_freeze();
- xprt = rqstp->rq_xprt;
- if (xprt != NULL)
- return xprt;
- } else
- __set_current_state(TASK_RUNNING);
+ spin_lock_bh(&rqstp->rq_lock);
+ set_bit(RQ_BUSY, &rqstp->rq_flags);
+ spin_unlock_bh(&rqstp->rq_lock);
- spin_lock_bh(&pool->sp_lock);
- if (!time_left)
- pool->sp_stats.threads_timedout++;
+ xprt = rqstp->rq_xprt;
+ if (xprt != NULL)
+ return xprt;
- xprt = rqstp->rq_xprt;
- if (!xprt) {
- svc_thread_dequeue(pool, rqstp);
- spin_unlock_bh(&pool->sp_lock);
- dprintk("svc: server %p, no data yet\n", rqstp);
- if (signalled() || kthread_should_stop())
- return ERR_PTR(-EINTR);
- else
- return ERR_PTR(-EAGAIN);
- }
- }
-out:
- spin_unlock_bh(&pool->sp_lock);
- return xprt;
+ if (!time_left)
+ atomic_long_inc(&pool->sp_stats.threads_timedout);
+
+ if (signalled() || kthread_should_stop())
+ return ERR_PTR(-EINTR);
+ return ERR_PTR(-EAGAIN);
}
static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt)
@@ -719,7 +748,7 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
dprintk("svc_recv: found XPT_CLOSE\n");
svc_delete_xprt(xprt);
/* Leave XPT_BUSY set on the dead xprt: */
- return 0;
+ goto out;
}
if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
struct svc_xprt *newxpt;
@@ -750,6 +779,8 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
}
/* clear XPT_BUSY: */
svc_xprt_received(xprt);
+out:
+ trace_svc_handle_xprt(xprt, len);
return len;
}
@@ -797,7 +828,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
clear_bit(XPT_OLD, &xprt->xpt_flags);
- rqstp->rq_secure = xprt->xpt_ops->xpo_secure_port(rqstp);
+ if (xprt->xpt_ops->xpo_secure_port(rqstp))
+ set_bit(RQ_SECURE, &rqstp->rq_flags);
+ else
+ clear_bit(RQ_SECURE, &rqstp->rq_flags);
rqstp->rq_chandle.defer = svc_defer;
rqstp->rq_xid = svc_getu32(&rqstp->rq_arg.head[0]);
@@ -895,7 +929,6 @@ static void svc_age_temp_xprts(unsigned long closure)
continue;
list_del_init(le);
set_bit(XPT_CLOSE, &xprt->xpt_flags);
- set_bit(XPT_DETACHED, &xprt->xpt_flags);
dprintk("queuing xprt %p for closing\n", xprt);
/* a thread will dequeue and close it soon */
@@ -935,8 +968,7 @@ static void svc_delete_xprt(struct svc_xprt *xprt)
xprt->xpt_ops->xpo_detach(xprt);
spin_lock_bh(&serv->sv_lock);
- if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags))
- list_del_init(&xprt->xpt_list);
+ list_del_init(&xprt->xpt_list);
WARN_ON_ONCE(!list_empty(&xprt->xpt_ready));
if (test_bit(XPT_TEMP, &xprt->xpt_flags))
serv->sv_tmpcnt--;
@@ -1080,7 +1112,7 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req)
struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle);
struct svc_deferred_req *dr;
- if (rqstp->rq_arg.page_len || !rqstp->rq_usedeferral)
+ if (rqstp->rq_arg.page_len || !test_bit(RQ_USEDEFERRAL, &rqstp->rq_flags))
return NULL; /* if more than a page, give up FIXME */
if (rqstp->rq_deferred) {
dr = rqstp->rq_deferred;
@@ -1109,7 +1141,7 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req)
}
svc_xprt_get(rqstp->rq_xprt);
dr->xprt = rqstp->rq_xprt;
- rqstp->rq_dropme = true;
+ set_bit(RQ_DROPME, &rqstp->rq_flags);
dr->handle.revisit = svc_revisit;
return &dr->handle;
@@ -1311,10 +1343,10 @@ static int svc_pool_stats_show(struct seq_file *m, void *p)
seq_printf(m, "%u %lu %lu %lu %lu\n",
pool->sp_id,
- pool->sp_stats.packets,
+ (unsigned long)atomic_long_read(&pool->sp_stats.packets),
pool->sp_stats.sockets_queued,
- pool->sp_stats.threads_woken,
- pool->sp_stats.threads_timedout);
+ (unsigned long)atomic_long_read(&pool->sp_stats.threads_woken),
+ (unsigned long)atomic_long_read(&pool->sp_stats.threads_timedout));
return 0;
}
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index f9c052d508f0..cc331b6cf573 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1145,7 +1145,10 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
rqstp->rq_xprt_ctxt = NULL;
rqstp->rq_prot = IPPROTO_TCP;
- rqstp->rq_local = !!test_bit(XPT_LOCAL, &svsk->sk_xprt.xpt_flags);
+ if (test_bit(XPT_LOCAL, &svsk->sk_xprt.xpt_flags))
+ set_bit(RQ_LOCAL, &rqstp->rq_flags);
+ else
+ clear_bit(RQ_LOCAL, &rqstp->rq_flags);
p = (__be32 *)rqstp->rq_arg.head[0].iov_base;
calldir = p[1];
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 290af97bf6f9..1cb61242e55e 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -617,9 +617,10 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
fraglen = min_t(int, buf->len - len, tail->iov_len);
tail->iov_len -= fraglen;
buf->len -= fraglen;
- if (tail->iov_len && buf->len == len) {
+ if (tail->iov_len) {
xdr->p = tail->iov_base + tail->iov_len;
- /* xdr->end, xdr->iov should be set already */
+ WARN_ON_ONCE(!xdr->end);
+ WARN_ON_ONCE(!xdr->iov);
return;
}
WARN_ON_ONCE(fraglen);
@@ -631,11 +632,11 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
old = new + fraglen;
xdr->page_ptr -= (old >> PAGE_SHIFT) - (new >> PAGE_SHIFT);
- if (buf->page_len && buf->len == len) {
+ if (buf->page_len) {
xdr->p = page_address(*xdr->page_ptr);
xdr->end = (void *)xdr->p + PAGE_SIZE;
xdr->p = (void *)xdr->p + (new % PAGE_SIZE);
- /* xdr->iov should already be NULL */
+ WARN_ON_ONCE(xdr->iov);
return;
}
if (fraglen) {
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 85506f1d0789..7aaf7415dc4c 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -603,7 +603,7 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
{
struct ieee80211_sta_ht_cap *ht_cap;
struct ieee80211_sta_vht_cap *vht_cap;
- u32 width, control_freq;
+ u32 width, control_freq, cap;
if (WARN_ON(!cfg80211_chandef_valid(chandef)))
return false;
@@ -643,7 +643,8 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
return false;
break;
case NL80211_CHAN_WIDTH_80P80:
- if (!(vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ))
+ cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
+ if (cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
return false;
case NL80211_CHAN_WIDTH_80:
if (!vht_cap->vht_supported)
@@ -654,7 +655,9 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
case NL80211_CHAN_WIDTH_160:
if (!vht_cap->vht_supported)
return false;
- if (!(vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ))
+ cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
+ if (cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ &&
+ cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
return false;
prohibited_flags |= IEEE80211_CHAN_NO_160MHZ;
width = 160;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index a17d6bc6b22c..7ca4b5133123 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -6002,7 +6002,7 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
}
/* there was no other matchset, so the RSSI one is alone */
- if (i == 0)
+ if (i == 0 && n_match_sets)
request->match_sets[0].rssi_thold = default_match_rssi;
request->min_rssi_thold = INT_MAX;
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 47be6163381c..7b8309840d4e 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1549,9 +1549,15 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
ret = cfg80211_reg_can_beacon(wiphy,
&wdev->chandef, wdev->iftype);
break;
+ case NL80211_IFTYPE_ADHOC:
+ if (!wdev->ssid_len)
+ goto out;
+
+ ret = cfg80211_reg_can_beacon(wiphy,
+ &wdev->chandef, wdev->iftype);
+ break;
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
- case NL80211_IFTYPE_ADHOC:
if (!wdev->current_bss ||
!wdev->current_bss->pub.channel)
goto out;
@@ -1907,7 +1913,7 @@ static enum reg_request_treatment
reg_process_hint_driver(struct wiphy *wiphy,
struct regulatory_request *driver_request)
{
- const struct ieee80211_regdomain *regd;
+ const struct ieee80211_regdomain *regd, *tmp;
enum reg_request_treatment treatment;
treatment = __reg_process_hint_driver(driver_request);
@@ -1927,7 +1933,10 @@ reg_process_hint_driver(struct wiphy *wiphy,
reg_free_request(driver_request);
return REG_REQ_IGNORE;
}
+
+ tmp = get_wiphy_regdom(wiphy);
rcu_assign_pointer(wiphy->regd, regd);
+ rcu_free_regdom(tmp);
}
@@ -1986,11 +1995,8 @@ __reg_process_hint_country_ie(struct wiphy *wiphy,
return REG_REQ_IGNORE;
return REG_REQ_ALREADY_SET;
}
- /*
- * Two consecutive Country IE hints on the same wiphy.
- * This should be picked up early by the driver/stack
- */
- if (WARN_ON(regdom_changes(country_ie_request->alpha2)))
+
+ if (regdom_changes(country_ie_request->alpha2))
return REG_REQ_OK;
return REG_REQ_ALREADY_SET;
}