aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-08-05 13:18:20 -0700
committerDavid S. Miller <davem@davemloft.net>2014-08-05 13:18:20 -0700
commitaef4f5b6db654e512ebcccab2a6e50424c05d2f9 (patch)
tree4daaee5ac85d1128233a45908dac5212f38ec7aa
parent61ab9efddf51cbc0d57356a4d650785cf5721fbe (diff)
parentdc6be9f54a4ecb0a09765d1f515ed947d86b7528 (diff)
Merge tag 'master-2014-07-31' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next
Conflicts: net/6lowpan/iphc.c Minor conflicts in iphc.c were changes overlapping with some style cleanups. John W. Linville says: ==================== Please pull this last(?) batch of wireless change intended for the 3.17 stream... For the NFC bits, Samuel says: "This is a rather quiet one, we have: - A new driver from ST Microelectronics for their NCI ST21NFCB, including device tree support. - p2p support for the ST21NFCA driver - A few fixes an enhancements for the NFC digital laye" For the Atheros bits, Kalle says: "Michal and Janusz did some important RX aggregation fixes, basically we were missing RX reordering altogether. The 10.1 firmware doesn't support Ad-Hoc mode and Michal fixed ath10k so that it doesn't advertise Ad-Hoc support with that firmware. Also he implemented a workaround for a KVM issue." For the Bluetooth bits, Gustavo and Johan say: "To quote Gustavo from his previous request: 'Some last minute fixes for -next. We have a fix for a use after free in RFCOMM, another fix to an issue with ADV_DIRECT_IND and one for ADV_IND with auto-connection handling. Last, we added support for reading the codec and MWS setting for controllers that support these features.' Additionally there are fixes to LE scanning, an update to conform to the 4.1 core specification as well as fixes for tracking the page scan state. All of these fixes are important for 3.17." And, "We've got: - 6lowpan fixes/cleanups - A couple crash fixes, one for the Marvell HCI driver and another in LE SMP. - Fix for an incorrect connected state check - Fix for the bondable requirement during pairing (an issue which had crept in because of using "pairable" when in fact the actual meaning was "bondable" (these have different meanings in Bluetooth)" Along with those are some late-breaking hardware support patches in brcmfmac and b43 as well as a stray ath9k patch. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--Documentation/devicetree/bindings/net/nfc/st21nfcb.txt33
-rw-r--r--Documentation/devicetree/bindings/net/wireless/brcm,bcm43xx-fmac.txt41
-rw-r--r--MAINTAINERS4
-rw-r--r--drivers/bcma/driver_chipcommon_pmu.c1
-rw-r--r--drivers/bcma/host_pci.c1
-rw-r--r--drivers/bcma/scan.c22
-rw-r--r--drivers/bcma/sprom.c1
-rw-r--r--drivers/bluetooth/btmrvl_main.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c20
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c122
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c96
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c17
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c1
-rw-r--r--drivers/net/wireless/b43/Kconfig22
-rw-r--r--drivers/net/wireless/b43/main.c3
-rw-r--r--drivers/net/wireless/b43/phy_n.c6
-rw-r--r--drivers/net/wireless/brcm80211/Kconfig10
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile7
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcdc.c20
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c20
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/chip.c8
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/commonring.c273
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/commonring.h69
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h7
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h33
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h36
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c9
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c25
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/flowring.c501
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/flowring.h84
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fweh.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fweh.h5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c1397
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h40
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/of.c56
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/of.h22
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/pcie.c1846
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/pcie.h29
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/proto.c29
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/proto.h31
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h12
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c57
-rw-r--r--drivers/net/wireless/brcm80211/include/brcm_hw_ids.h11
-rw-r--r--drivers/net/wireless/iwlegacy/common.c3
-rw-r--r--drivers/nfc/Kconfig2
-rw-r--r--drivers/nfc/Makefile3
-rw-r--r--drivers/nfc/st21nfca/Makefile2
-rw-r--r--drivers/nfc/st21nfca/i2c.c9
-rw-r--r--drivers/nfc/st21nfca/st21nfca.c272
-rw-r--r--drivers/nfc/st21nfca/st21nfca.h26
-rw-r--r--drivers/nfc/st21nfca/st21nfca_dep.c661
-rw-r--r--drivers/nfc/st21nfca/st21nfca_dep.h43
-rw-r--r--drivers/nfc/st21nfcb/Kconfig22
-rw-r--r--drivers/nfc/st21nfcb/Makefile8
-rw-r--r--drivers/nfc/st21nfcb/i2c.c462
-rw-r--r--drivers/nfc/st21nfcb/ndlc.c298
-rw-r--r--drivers/nfc/st21nfcb/ndlc.h55
-rw-r--r--drivers/nfc/st21nfcb/st21nfcb.c129
-rw-r--r--drivers/nfc/st21nfcb/st21nfcb.h38
-rw-r--r--include/linux/bcma/bcma.h23
-rw-r--r--include/linux/platform_data/st21nfcb.h32
-rw-r--r--include/net/6lowpan.h50
-rw-r--r--include/net/bluetooth/hci.h6
-rw-r--r--include/net/bluetooth/hci_core.h3
-rw-r--r--include/net/bluetooth/mgmt.h4
-rw-r--r--include/net/nfc/digital.h13
-rw-r--r--include/net/nfc/hci.h1
-rw-r--r--net/6lowpan/iphc.c212
-rw-r--r--net/bluetooth/hci_core.c185
-rw-r--r--net/bluetooth/hci_event.c50
-rw-r--r--net/bluetooth/hidp/core.c2
-rw-r--r--net/bluetooth/mgmt.c57
-rw-r--r--net/bluetooth/rfcomm/core.c7
-rw-r--r--net/bluetooth/smp.c33
-rw-r--r--net/nfc/digital.h3
-rw-r--r--net/nfc/digital_core.c27
-rw-r--r--net/nfc/digital_dep.c11
-rw-r--r--net/nfc/digital_technology.c96
-rw-r--r--net/nfc/hci/core.c7
-rw-r--r--net/nfc/nci/ntf.c4
85 files changed, 7542 insertions, 371 deletions
diff --git a/Documentation/devicetree/bindings/net/nfc/st21nfcb.txt b/Documentation/devicetree/bindings/net/nfc/st21nfcb.txt
new file mode 100644
index 000000000000..3b58ae480344
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/nfc/st21nfcb.txt
@@ -0,0 +1,33 @@
+* STMicroelectronics SAS. ST21NFCB NFC Controller
+
+Required properties:
+- compatible: Should be "st,st21nfcb_i2c".
+- clock-frequency: I²C work frequency.
+- reg: address on the bus
+- interrupt-parent: phandle for the interrupt gpio controller
+- interrupts: GPIO interrupt to which the chip is connected
+- reset-gpios: Output GPIO pin used to reset the ST21NFCB
+
+Optional SoC Specific Properties:
+- pinctrl-names: Contains only one value - "default".
+- pintctrl-0: Specifies the pin control groups used for this controller.
+
+Example (for ARM-based BeagleBoard xM with ST21NFCB on I2C2):
+
+&i2c2 {
+
+ status = "okay";
+
+ st21nfcb: st21nfcb@8 {
+
+ compatible = "st,st21nfcb_i2c";
+
+ reg = <0x08>;
+ clock-frequency = <400000>;
+
+ interrupt-parent = <&gpio5>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+
+ reset-gpios = <&gpio5 29 GPIO_ACTIVE_HIGH>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/net/wireless/brcm,bcm43xx-fmac.txt b/Documentation/devicetree/bindings/net/wireless/brcm,bcm43xx-fmac.txt
new file mode 100644
index 000000000000..5dbf169cd81c
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/wireless/brcm,bcm43xx-fmac.txt
@@ -0,0 +1,41 @@
+Broadcom BCM43xx Fullmac wireless SDIO devices
+
+This node provides properties for controlling the Broadcom wireless device. The
+node is expected to be specified as a child node to the SDIO controller that
+connects the device to the system.
+
+Required properties:
+
+ - compatible : Should be "brcm,bcm4329-fmac".
+
+Optional properties:
+ - brcm,drive-strength : drive strength used for SDIO pins on device in mA
+ (default = 6).
+ - interrupt-parent : the phandle for the interrupt controller to which the
+ device interrupts are connected.
+ - interrupts : specifies attributes for the out-of-band interrupt (host-wake).
+ When not specified the device will use in-band SDIO interrupts.
+ - interrupt-names : name of the out-of-band interrupt, which must be set
+ to "host-wake".
+
+Example:
+
+mmc3: mmc@01c12000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc3_pins_a>;
+ vmmc-supply = <&reg_vmmc3>;
+ bus-width = <4>;
+ non-removable;
+ status = "okay";
+
+ brcmf: bcrmf@1 {
+ reg = <1>;
+ compatible = "brcm,bcm4329-fmac";
+ interrupt-parent = <&pio>;
+ interrupts = <10 8>; /* PH10 / EINT10 */
+ interrupt-names = "host-wake";
+ };
+};
diff --git a/MAINTAINERS b/MAINTAINERS
index c77a0effe5dd..f00d2c52faeb 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -154,6 +154,7 @@ L: linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers)
L: linux-bluetooth@vger.kernel.org
S: Maintained
F: net/6lowpan/
+F: include/net/6lowpan.h
6PACK NETWORK DRIVER FOR AX.25
M: Andreas Koensgen <ajk@comnets.uni-bremen.de>
@@ -5714,7 +5715,8 @@ S: Maintained
F: drivers/net/ethernet/marvell/mvneta.*
MARVELL MWIFIEX WIRELESS DRIVER
-M: Bing Zhao <bzhao@marvell.com>
+M: Amitkumar Karwar <akarwar@marvell.com>
+M: Avinash Patil <patila@marvell.com>
L: linux-wireless@vger.kernel.org
S: Maintained
F: drivers/net/wireless/mwifiex/
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c
index bb694e2e9f32..fe0d48cb1778 100644
--- a/drivers/bcma/driver_chipcommon_pmu.c
+++ b/drivers/bcma/driver_chipcommon_pmu.c
@@ -603,6 +603,7 @@ void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid)
tmp = BCMA_CC_PMU_CTL_PLL_UPD | BCMA_CC_PMU_CTL_NOILPONW;
break;
+ case BCMA_CHIP_ID_BCM43131:
case BCMA_CHIP_ID_BCM43217:
case BCMA_CHIP_ID_BCM43227:
case BCMA_CHIP_ID_BCM43228:
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index 3cf725a49dc1..294a7dd25190 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -280,6 +280,7 @@ static const struct pci_device_id bcma_pci_bridge_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4365) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
{ 0, },
};
diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c
index 37768401d113..b4764c6bcf17 100644
--- a/drivers/bcma/scan.c
+++ b/drivers/bcma/scan.c
@@ -32,17 +32,17 @@ static const struct bcma_device_id_name bcma_bcm_device_names[] = {
{ BCMA_CORE_4706_CHIPCOMMON, "BCM4706 ChipCommon" },
{ BCMA_CORE_4706_SOC_RAM, "BCM4706 SOC RAM" },
{ BCMA_CORE_4706_MAC_GBIT, "BCM4706 GBit MAC" },
- { BCMA_CORE_PCIEG2, "PCIe Gen 2" },
- { BCMA_CORE_DMA, "DMA" },
- { BCMA_CORE_SDIO3, "SDIO3" },
- { BCMA_CORE_USB20, "USB 2.0" },
- { BCMA_CORE_USB30, "USB 3.0" },
- { BCMA_CORE_A9JTAG, "ARM Cortex A9 JTAG" },
- { BCMA_CORE_DDR23, "Denali DDR2/DDR3 memory controller" },
- { BCMA_CORE_ROM, "ROM" },
- { BCMA_CORE_NAND, "NAND flash controller" },
- { BCMA_CORE_QSPI, "SPI flash controller" },
- { BCMA_CORE_CHIPCOMMON_B, "Chipcommon B" },
+ { BCMA_CORE_NS_PCIEG2, "PCIe Gen 2" },
+ { BCMA_CORE_NS_DMA, "DMA" },
+ { BCMA_CORE_NS_SDIO3, "SDIO3" },
+ { BCMA_CORE_NS_USB20, "USB 2.0" },
+ { BCMA_CORE_NS_USB30, "USB 3.0" },
+ { BCMA_CORE_NS_A9JTAG, "ARM Cortex A9 JTAG" },
+ { BCMA_CORE_NS_DDR23, "Denali DDR2/DDR3 memory controller" },
+ { BCMA_CORE_NS_ROM, "ROM" },
+ { BCMA_CORE_NS_NAND, "NAND flash controller" },
+ { BCMA_CORE_NS_QSPI, "SPI flash controller" },
+ { BCMA_CORE_NS_CHIPCOMMON_B, "Chipcommon B" },
{ BCMA_CORE_ARMCA9, "ARM Cortex A9 core (ihost)" },
{ BCMA_CORE_AMEMC, "AMEMC (DDR)" },
{ BCMA_CORE_ALTA, "ALTA (I2S)" },
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index 97bb38e9ed65..efb037f9c98a 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -534,6 +534,7 @@ static bool bcma_sprom_onchip_available(struct bcma_bus *bus)
/* for these chips OTP is always available */
present = true;
break;
+ case BCMA_CHIP_ID_BCM43131:
case BCMA_CHIP_ID_BCM43217:
case BCMA_CHIP_ID_BCM43227:
case BCMA_CHIP_ID_BCM43228:
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index bae8e6a0ecf6..1d7db2064889 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -710,12 +710,17 @@ struct btmrvl_private *btmrvl_add_card(void *card)
init_waitqueue_head(&priv->main_thread.wait_q);
priv->main_thread.task = kthread_run(btmrvl_service_main_thread,
&priv->main_thread, "btmrvl_main_service");
+ if (IS_ERR(priv->main_thread.task))
+ goto err_thread;
priv->btmrvl_dev.card = card;
priv->btmrvl_dev.tx_dnld_rdy = true;
return priv;
+err_thread:
+ btmrvl_free_adapter(priv);
+
err_adapter:
kfree(priv);
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index e493db4b4a41..5fdc40d3b378 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -546,7 +546,7 @@ static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc,
int ath10k_htc_wait_target(struct ath10k_htc *htc)
{
- int status = 0;
+ int i, status = 0;
struct ath10k_htc_svc_conn_req conn_req;
struct ath10k_htc_svc_conn_resp conn_resp;
struct ath10k_htc_msg *msg;
@@ -556,10 +556,26 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
status = wait_for_completion_timeout(&htc->ctl_resp,
ATH10K_HTC_WAIT_TIMEOUT_HZ);
- if (status <= 0) {
+ if (status == 0) {
+ /* Workaround: In some cases the PCI HIF doesn't
+ * receive interrupt for the control response message
+ * even if the buffer was completed. It is suspected
+ * iomap writes unmasking PCI CE irqs aren't propagated
+ * properly in KVM PCI-passthrough sometimes.
+ */
+ ath10k_warn("failed to receive control response completion, polling..\n");
+
+ for (i = 0; i < CE_COUNT; i++)
+ ath10k_hif_send_complete_check(htc->ar, i, 1);
+
+ status = wait_for_completion_timeout(&htc->ctl_resp,
+ ATH10K_HTC_WAIT_TIMEOUT_HZ);
+
if (status == 0)
status = -ETIMEDOUT;
+ }
+ if (status < 0) {
ath10k_err("ctl_resp never came in (%d)\n", status);
return status;
}
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index eebc860c3655..80cdac15588a 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -21,6 +21,7 @@
#include "txrx.h"
#include "debug.h"
#include "trace.h"
+#include "mac.h"
#include <linux/log2.h>
@@ -307,7 +308,8 @@ static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb)
static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
u8 **fw_desc, int *fw_desc_len,
struct sk_buff **head_msdu,
- struct sk_buff **tail_msdu)
+ struct sk_buff **tail_msdu,
+ u32 *attention)
{
int msdu_len, msdu_chaining = 0;
struct sk_buff *msdu;
@@ -357,6 +359,11 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
break;
}
+ *attention |= __le32_to_cpu(rx_desc->attention.flags) &
+ (RX_ATTENTION_FLAGS_TKIP_MIC_ERR |
+ RX_ATTENTION_FLAGS_DECRYPT_ERR |
+ RX_ATTENTION_FLAGS_FCS_ERR |
+ RX_ATTENTION_FLAGS_MGMT_TYPE);
/*
* Copy the FW rx descriptor for this MSDU from the rx
* indication message into the MSDU's netbuf. HL uses the
@@ -1215,13 +1222,15 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) {
struct sk_buff *msdu_head, *msdu_tail;
+ attention = 0;
msdu_head = NULL;
msdu_tail = NULL;
ret = ath10k_htt_rx_amsdu_pop(htt,
&fw_desc,
&fw_desc_len,
&msdu_head,
- &msdu_tail);
+ &msdu_tail,
+ &attention);
if (ret < 0) {
ath10k_warn("failed to pop amsdu from htt rx ring %d\n",
@@ -1233,7 +1242,6 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
rxd = container_of((void *)msdu_head->data,
struct htt_rx_desc,
msdu_payload);
- attention = __le32_to_cpu(rxd->attention.flags);
if (!ath10k_htt_rx_amsdu_allowed(htt, msdu_head,
status,
@@ -1286,6 +1294,7 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
u8 *fw_desc;
int fw_desc_len, hdrlen, paramlen;
int trim;
+ u32 attention = 0;
fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
fw_desc = (u8 *)frag->fw_msdu_rx_desc;
@@ -1295,7 +1304,8 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
spin_lock_bh(&htt->rx_ring.lock);
ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
- &msdu_head, &msdu_tail);
+ &msdu_head, &msdu_tail,
+ &attention);
spin_unlock_bh(&htt->rx_ring.lock);
ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
@@ -1312,10 +1322,8 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
hdr = (struct ieee80211_hdr *)msdu_head->data;
rxd = (void *)msdu_head->data - sizeof(*rxd);
- tkip_mic_err = !!(__le32_to_cpu(rxd->attention.flags) &
- RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
- decrypt_err = !!(__le32_to_cpu(rxd->attention.flags) &
- RX_ATTENTION_FLAGS_DECRYPT_ERR);
+ tkip_mic_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
+ decrypt_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
RX_MSDU_START_INFO1_DECAP_FORMAT);
@@ -1422,6 +1430,86 @@ static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
}
}
+static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
+{
+ struct htt_rx_addba *ev = &resp->rx_addba;
+ struct ath10k_peer *peer;
+ struct ath10k_vif *arvif;
+ u16 info0, tid, peer_id;
+
+ info0 = __le16_to_cpu(ev->info0);
+ tid = MS(info0, HTT_RX_BA_INFO0_TID);
+ peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
+
+ ath10k_dbg(ATH10K_DBG_HTT,
+ "htt rx addba tid %hu peer_id %hu size %hhu\n",
+ tid, peer_id, ev->window_size);
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find_by_id(ar, peer_id);
+ if (!peer) {
+ ath10k_warn("received addba event for invalid peer_id: %hu\n",
+ peer_id);
+ spin_unlock_bh(&ar->data_lock);
+ return;
+ }
+
+ arvif = ath10k_get_arvif(ar, peer->vdev_id);
+ if (!arvif) {
+ ath10k_warn("received addba event for invalid vdev_id: %u\n",
+ peer->vdev_id);
+ spin_unlock_bh(&ar->data_lock);
+ return;
+ }
+
+ ath10k_dbg(ATH10K_DBG_HTT,
+ "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
+ peer->addr, tid, ev->window_size);
+
+ ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
+{
+ struct htt_rx_delba *ev = &resp->rx_delba;
+ struct ath10k_peer *peer;
+ struct ath10k_vif *arvif;
+ u16 info0, tid, peer_id;
+
+ info0 = __le16_to_cpu(ev->info0);
+ tid = MS(info0, HTT_RX_BA_INFO0_TID);
+ peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
+
+ ath10k_dbg(ATH10K_DBG_HTT,
+ "htt rx delba tid %hu peer_id %hu\n",
+ tid, peer_id);
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find_by_id(ar, peer_id);
+ if (!peer) {
+ ath10k_warn("received addba event for invalid peer_id: %hu\n",
+ peer_id);
+ spin_unlock_bh(&ar->data_lock);
+ return;
+ }
+
+ arvif = ath10k_get_arvif(ar, peer->vdev_id);
+ if (!arvif) {
+ ath10k_warn("received addba event for invalid vdev_id: %u\n",
+ peer->vdev_id);
+ spin_unlock_bh(&ar->data_lock);
+ return;
+ }
+
+ ath10k_dbg(ATH10K_DBG_HTT,
+ "htt rx stop rx ba session sta %pM tid %hu\n",
+ peer->addr, tid);
+
+ ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
+ spin_unlock_bh(&ar->data_lock);
+}
+
void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_htt *htt = &ar->htt;
@@ -1516,9 +1604,25 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
trace_ath10k_htt_stats(skb->data, skb->len);
break;
case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
+ /* Firmware can return tx frames if it's unable to fully
+ * process them and suspects host may be able to fix it. ath10k
+ * sends all tx frames as already inspected so this shouldn't
+ * happen unless fw has a bug.
+ */
+ ath10k_warn("received an unexpected htt tx inspect event\n");
+ break;
case HTT_T2H_MSG_TYPE_RX_ADDBA:
+ ath10k_htt_rx_addba(ar, resp);
+ break;
case HTT_T2H_MSG_TYPE_RX_DELBA:
- case HTT_T2H_MSG_TYPE_RX_FLUSH:
+ ath10k_htt_rx_delba(ar, resp);
+ break;
+ case HTT_T2H_MSG_TYPE_RX_FLUSH: {
+ /* Ignore this event because mac80211 takes care of Rx
+ * aggregation reordering.
+ */
+ break;
+ }
default:
ath10k_dbg(ATH10K_DBG_HTT, "htt event (%d) not handled\n",
resp->hdr.msg_type);
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index accb6b4f6faf..8b27bfcc1de3 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -531,6 +531,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
+ /* Prevent firmware from sending up tx inspection requests. There's
+ * nothing ath10k can do with frames requested for inspection so force
+ * it to simply rely a regular tx completion with discard status.
+ */
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
+
skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
skb_cb->htt.txbuf->cmd_tx.flags0 = flags0;
skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index b8314a534972..9d61bb157189 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -1865,15 +1865,13 @@ static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar,
return 0;
}
-/*
- * Frames sent to the FW have to be in "Native Wifi" format.
- * Strip the QoS field from the 802.11 header.
+/* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS
+ * Control in the header.
*/
-static void ath10k_tx_h_qos_workaround(struct ieee80211_hw *hw,
- struct ieee80211_tx_control *control,
- struct sk_buff *skb)
+static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
u8 *qos_ctl;
if (!ieee80211_is_data_qos(hdr->frame_control))
@@ -1883,6 +1881,16 @@ static void ath10k_tx_h_qos_workaround(struct ieee80211_hw *hw,
memmove(skb->data + IEEE80211_QOS_CTL_LEN,
skb->data, (void *)qos_ctl - (void *)skb->data);
skb_pull(skb, IEEE80211_QOS_CTL_LEN);
+
+ /* Fw/Hw generates a corrupted QoS Control Field for QoS NullFunc
+ * frames. Powersave is handled by the fw/hw so QoS NyllFunc frames are
+ * used only for CQM purposes (e.g. hostapd station keepalive ping) so
+ * it is safe to downgrade to NullFunc.
+ */
+ if (ieee80211_is_qos_nullfunc(hdr->frame_control)) {
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
+ cb->htt.tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+ }
}
static void ath10k_tx_wep_key_work(struct work_struct *work)
@@ -1919,14 +1927,13 @@ unlock:
mutex_unlock(&arvif->ar->conf_mutex);
}
-static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
+static void ath10k_tx_h_update_wep_key(struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *key,
+ struct sk_buff *skb)
{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_vif *vif = info->control.vif;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct ath10k *ar = arvif->ar;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct ieee80211_key_conf *key = info->control.hw_key;
if (!ieee80211_has_protected(hdr->frame_control))
return;
@@ -1948,11 +1955,11 @@ static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
ieee80211_queue_work(ar->hw, &arvif->wep_key_work);
}
-static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, struct sk_buff *skb)
+static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_vif *vif = info->control.vif;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
/* This is case only for P2P_GO */
@@ -2254,33 +2261,28 @@ static void ath10k_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
+ struct ath10k *ar = hw->priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct ieee80211_key_conf *key = info->control.hw_key;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct ath10k *ar = hw->priv;
- u8 tid, vdev_id;
/* We should disable CCK RATE due to P2P */
if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
ath10k_dbg(ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
- /* we must calculate tid before we apply qos workaround
- * as we'd lose the qos control field */
- tid = ath10k_tx_h_get_tid(hdr);
- vdev_id = ath10k_tx_h_get_vdev_id(ar, info);
+ ATH10K_SKB_CB(skb)->htt.is_offchan = false;
+ ATH10K_SKB_CB(skb)->htt.tid = ath10k_tx_h_get_tid(hdr);
+ ATH10K_SKB_CB(skb)->vdev_id = ath10k_tx_h_get_vdev_id(ar, info);
/* it makes no sense to process injected frames like that */
- if (info->control.vif &&
- info->control.vif->type != NL80211_IFTYPE_MONITOR) {
- ath10k_tx_h_qos_workaround(hw, control, skb);
- ath10k_tx_h_update_wep_key(skb);
- ath10k_tx_h_add_p2p_noa_ie(ar, skb);
- ath10k_tx_h_seq_no(skb);
+ if (vif && vif->type != NL80211_IFTYPE_MONITOR) {
+ ath10k_tx_h_nwifi(hw, skb);
+ ath10k_tx_h_update_wep_key(vif, key, skb);
+ ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
+ ath10k_tx_h_seq_no(vif, skb);
}
- ATH10K_SKB_CB(skb)->vdev_id = vdev_id;
- ATH10K_SKB_CB(skb)->htt.is_offchan = false;
- ATH10K_SKB_CB(skb)->htt.tid = tid;
-
if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
spin_lock_bh(&ar->data_lock);
ATH10K_SKB_CB(skb)->htt.is_offchan = true;
@@ -4331,6 +4333,38 @@ static u64 ath10k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
return 0;
}
+static int ath10k_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
+{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+ ath10k_dbg(ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n",
+ arvif->vdev_id, sta->addr, tid, action);
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ case IEEE80211_AMPDU_RX_STOP:
+ /* HTT AddBa/DelBa events trigger mac80211 Rx BA session
+ * creation/removal. Do we need to verify this?
+ */
+ return 0;
+ case IEEE80211_AMPDU_TX_START:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ /* Firmware offloads Tx aggregation entirely so deny mac80211
+ * Tx aggregation requests.
+ */
+ return -EOPNOTSUPP;
+ }
+
+ return -EINVAL;
+}
+
static const struct ieee80211_ops ath10k_ops = {
.tx = ath10k_tx,
.start = ath10k_start,
@@ -4358,6 +4392,7 @@ static const struct ieee80211_ops ath10k_ops = {
.set_bitrate_mask = ath10k_set_bitrate_mask,
.sta_rc_update = ath10k_sta_rc_update,
.get_tsf = ath10k_get_tsf,
+ .ampdu_action = ath10k_ampdu_action,
#ifdef CONFIG_PM
.suspend = ath10k_suspend,
.resume = ath10k_resume,
@@ -4698,7 +4733,6 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_AP);
if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
@@ -4768,6 +4802,8 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->iface_combinations = ath10k_if_comb;
ar->hw->wiphy->n_iface_combinations =
ARRAY_SIZE(ath10k_if_comb);
+
+ ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
}
ar->hw->netdev_features = NETIF_F_HW_CSUM;
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
index ba1021997b8f..ef4f84376d7c 100644
--- a/drivers/net/wireless/ath/ath10k/mac.h
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -43,11 +43,11 @@ static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
return (struct ath10k_vif *)vif->drv_priv;
}
-static inline void ath10k_tx_h_seq_no(struct sk_buff *skb)
+static inline void ath10k_tx_h_seq_no(struct ieee80211_vif *vif,
+ struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct ieee80211_vif *vif = info->control.vif;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 06840d101c45..0ffff205478d 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -726,18 +726,12 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
unsigned int nbytes, max_nbytes;
unsigned int transfer_id;
unsigned int flags;
- int err;
+ int err, num_replenish = 0;
while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
&ce_data, &nbytes, &transfer_id,
&flags) == 0) {
- err = ath10k_pci_post_rx_pipe(pipe_info, 1);
- if (unlikely(err)) {
- /* FIXME: retry */
- ath10k_warn("failed to replenish CE rx ring %d: %d\n",
- pipe_info->pipe_num, err);
- }
-
+ num_replenish++;
skb = transfer_context;
max_nbytes = skb->len + skb_tailroom(skb);
dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
@@ -753,6 +747,13 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
skb_put(skb, nbytes);
cb->rx_completion(ar, skb, pipe_info->pipe_num);
}
+
+ err = ath10k_pci_post_rx_pipe(pipe_info, num_replenish);
+ if (unlikely(err)) {
+ /* FIXME: retry */
+ ath10k_warn("failed to replenish CE rx ring %d (%d bufs): %d\n",
+ pipe_info->pipe_num, num_replenish, err);
+ }
}
static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 82669a77e553..f4fa22d1d591 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -119,8 +119,7 @@ struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
return NULL;
}
-static struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar,
- int peer_id)
+struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id)
{
struct ath10k_peer *peer;
diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h
index aee3e20058f8..a90e09f5c7f2 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.h
+++ b/drivers/net/wireless/ath/ath10k/txrx.h
@@ -24,6 +24,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
const u8 *addr);
+struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id);
int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id,
const u8 *addr);
int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id,
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 6f83cae57655..c2c87c916b5a 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -1432,7 +1432,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
continue;
}
- ath10k_tx_h_seq_no(bcn);
+ ath10k_tx_h_seq_no(arvif->vif, bcn);
ath10k_wmi_update_tim(ar, arvif, bcn, bcn_info);
ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info);
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index be3eb2a8d602..4173838f4684 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -113,6 +113,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
irq = res->start;
+ ath9k_fill_chanctx_ops();
hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
if (hw == NULL) {
dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index d4c6ae3a9210..64a5b672e30a 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -132,35 +132,31 @@ config B43_PHY_G
SoC: BCM4712, BCM5352E
config B43_PHY_N
- bool "Support for 802.11n (N-PHY) devices"
+ bool "Support for N-PHY (the main 802.11n series) devices"
depends on B43
default y
---help---
- Support for the N-PHY.
-
- This enables support for devices with N-PHY.
-
- Say N if you expect high stability and performance. Saying Y will not
- affect other devices support and may provide support for basic needs.
+ This PHY type can be found in the following chipsets:
+ PCI: BCM4321, BCM4322,
+ BCM43222, BCM43224, BCM43225,
+ BCM43131, BCM43217, BCM43227, BCM43228
+ SoC: BCM4716, BCM4717, BCM4718, BCM5356, BCM5357, BCM5358
config B43_PHY_LP
- bool "Support for low-power (LP-PHY) devices"
+ bool "Support for LP-PHY (low-power 802.11g) devices"
depends on B43 && B43_SSB
default y
---help---
- Support for the LP-PHY.
The LP-PHY is a low-power PHY built into some notebooks
and embedded devices. It supports 802.11a/b/g
(802.11a support is optional, and currently disabled).
config B43_PHY_HT
- bool "Support for HT-PHY (high throughput) devices"
+ bool "Support for HT-PHY (high throughput 802.11n) devices"
depends on B43 && B43_BCMA
default y
---help---
- Support for the HT-PHY.
-
- Enables support for BCM4331 and possibly other chipsets with that PHY.
+ This PHY type with 3x3:3 MIMO can be found in the BCM4331 PCI chipset.
config B43_PHY_LCN
bool "Support for LCN-PHY devices (BROKEN)"
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index d7055febe119..2af1ac396eb4 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -2985,7 +2985,8 @@ void b43_mac_switch_freq(struct b43_wldev *dev, u8 spurmode)
{
u16 chip_id = dev->dev->chip_id;
- if (chip_id == BCMA_CHIP_ID_BCM43217 ||
+ if (chip_id == BCMA_CHIP_ID_BCM43131 ||
+ chip_id == BCMA_CHIP_ID_BCM43217 ||
chip_id == BCMA_CHIP_ID_BCM43222 ||
chip_id == BCMA_CHIP_ID_BCM43224 ||
chip_id == BCMA_CHIP_ID_BCM43225 ||
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index d269fbb27b9e..e2a3f0d5bcc2 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -4982,7 +4982,8 @@ static void b43_nphy_int_pa_set_tx_dig_filters(struct b43_wldev *dev)
if (dev->phy.rev == 16)
b43_nphy_pa_set_tx_dig_filter(dev, 0x186, dig_filter_phy_rev16);
- if (dev->dev->chip_id == BCMA_CHIP_ID_BCM43217) {
+ /* Verified with BCM43131 and BCM43217 */
+ if (dev->phy.rev == 17) {
b43_nphy_pa_set_tx_dig_filter(dev, 0x186, dig_filter_phy_rev16);
b43_nphy_pa_set_tx_dig_filter(dev, 0x195,
tbl_tx_filter_coef_rev4[1]);
@@ -6216,6 +6217,9 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
u16 tmp16;
if (new_channel->band == IEEE80211_BAND_5GHZ) {
+ /* Switch to 2 GHz for a moment to access B43_PHY_B_BBCFG */
+ b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ);
+
tmp16 = b43_read16(dev, B43_MMIO_PSM_PHY_HDR);
b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16 | 4);
/* Put BPHY in the reset */
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig
index fcfed6b99a62..b8e2561ea645 100644
--- a/drivers/net/wireless/brcm80211/Kconfig
+++ b/drivers/net/wireless/brcm80211/Kconfig
@@ -48,6 +48,16 @@ config BRCMFMAC_USB
IEEE802.11n embedded FullMAC WLAN driver. Say Y if you want to
use the driver for an USB wireless card.
+config BRCMFMAC_PCIE
+ bool "PCIE bus interface support for FullMAC driver"
+ depends on BRCMFMAC
+ depends on PCI
+ select FW_LOADER
+ ---help---
+ This option enables the PCIE bus interface support for Broadcom
+ IEEE802.11ac embedded FullMAC WLAN driver. Say Y if you want to
+ use the driver for an PCIE wireless card.
+
config BRCM_TRACING
bool "Broadcom device tracing"
depends on BRCMSMAC || BRCMFMAC
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index de0cff3df389..c35adf4bc70b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -31,6 +31,9 @@ brcmfmac-objs += \
p2p.o \
proto.o \
bcdc.o \
+ commonring.o \
+ flowring.o \
+ msgbuf.o \
dhd_common.o \
dhd_linux.o \
firmware.o \
@@ -42,7 +45,11 @@ brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
bcmsdh.o
brcmfmac-$(CONFIG_BRCMFMAC_USB) += \
usb.o
+brcmfmac-$(CONFIG_BRCMFMAC_PCIE) += \
+ pcie.o
brcmfmac-$(CONFIG_BRCMDBG) += \
dhd_dbg.o
brcmfmac-$(CONFIG_BRCM_TRACING) += \
tracepoint.o
+brcmfmac-$(CONFIG_OF) += \
+ of.o
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.c
index c229210d50ba..a159ff3427de 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcdc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.c
@@ -337,6 +337,23 @@ brcmf_proto_bcdc_txdata(struct brcmf_pub *drvr, int ifidx, u8 offset,
return brcmf_bus_txdata(drvr->bus_if, pktbuf);
}
+static void
+brcmf_proto_bcdc_configure_addr_mode(struct brcmf_pub *drvr, int ifidx,
+ enum proto_addr_mode addr_mode)
+{
+}
+
+static void
+brcmf_proto_bcdc_delete_peer(struct brcmf_pub *drvr, int ifidx,
+ u8 peer[ETH_ALEN])
+{
+}
+
+static void
+brcmf_proto_bcdc_add_tdls_peer(struct brcmf_pub *drvr, int ifidx,
+ u8 peer[ETH_ALEN])
+{
+}
int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr)
{
@@ -356,6 +373,9 @@ int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr)
drvr->proto->query_dcmd = brcmf_proto_bcdc_query_dcmd;
drvr->proto->set_dcmd = brcmf_proto_bcdc_set_dcmd;
drvr->proto->txdata = brcmf_proto_bcdc_txdata;
+ drvr->proto->configure_addr_mode = brcmf_proto_bcdc_configure_addr_mode;
+ drvr->proto->delete_peer = brcmf_proto_bcdc_delete_peer;
+ drvr->proto->add_tdls_peer = brcmf_proto_bcdc_add_tdls_peer;
drvr->proto->pd = bcdc;
drvr->hdrlen += BCDC_HEADER_LEN + BRCMF_PROT_FW_SIGNAL_MAX_TXBYTES;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index f467cafe3e8f..8dbd5dbb78fd 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -38,10 +38,13 @@
#include <brcm_hw_ids.h>
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
+#include <chipcommon.h>
#include <soc.h>
+#include "chip.h"
#include "dhd_bus.h"
#include "dhd_dbg.h"
#include "sdio_host.h"
+#include "of.h"
#define SDIOH_API_ACCESS_RETRY_LIMIT 2
@@ -117,6 +120,7 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
{
int ret = 0;
u8 data;
+ u32 addr, gpiocontrol;
unsigned long flags;
if ((sdiodev->pdata) && (sdiodev->pdata->oob_irq_supported)) {
@@ -146,6 +150,19 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
sdio_claim_host(sdiodev->func[1]);
+ if (sdiodev->bus_if->chip == BRCM_CC_43362_CHIP_ID) {
+ /* assign GPIO to SDIO core */
+ addr = CORE_CC_REG(SI_ENUM_BASE, gpiocontrol);
+ gpiocontrol = brcmf_sdiod_regrl(sdiodev, addr, &ret);
+ gpiocontrol |= 0x2;
+ brcmf_sdiod_regwl(sdiodev, addr, gpiocontrol, &ret);
+
+ brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_SELECT, 0xf,
+ &ret);
+ brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_OUT, 0, &ret);
+ brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_EN, 0x2, &ret);
+ }
+
/* must configure SDIO_CCCR_IENx to enable irq */
data = brcmf_sdiod_regrb(sdiodev, SDIO_CCCR_IENx, &ret);
data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
@@ -1044,6 +1061,9 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
sdiodev->dev = &sdiodev->func[1]->dev;
sdiodev->pdata = brcmfmac_sdio_pdata;
+ if (!sdiodev->pdata)
+ brcmf_of_probe(sdiodev);
+
atomic_set(&sdiodev->suspend, false);
init_waitqueue_head(&sdiodev->request_word_wait);
init_waitqueue_head(&sdiodev->request_buffer_wait);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
index 96800db0536b..95efde868db8 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
@@ -506,9 +506,17 @@ static void brcmf_chip_get_raminfo(struct brcmf_chip_priv *ci)
break;
case BRCM_CC_4339_CHIP_ID:
case BRCM_CC_4354_CHIP_ID:
+ case BRCM_CC_4356_CHIP_ID:
+ case BRCM_CC_43567_CHIP_ID:
+ case BRCM_CC_43569_CHIP_ID:
+ case BRCM_CC_43570_CHIP_ID:
ci->pub.ramsize = 0xc0000;
ci->pub.rambase = 0x180000;
break;
+ case BRCM_CC_43602_CHIP_ID:
+ ci->pub.ramsize = 0xf0000;
+ ci->pub.rambase = 0x180000;
+ break;
default:
brcmf_err("unknown chip: %s\n", ci->pub.name);
break;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/commonring.c b/drivers/net/wireless/brcm80211/brcmfmac/commonring.c
new file mode 100644
index 000000000000..c6d65b8e1e15
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/commonring.c
@@ -0,0 +1,273 @@
+/* Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+
+#include <brcmu_utils.h>
+#include <brcmu_wifi.h>
+
+#include "dhd.h"
+#include "commonring.h"
+
+
+/* dma flushing needs implementation for mips and arm platforms. Should
+ * be put in util. Note, this is not real flushing. It is virtual non
+ * cached memory. Only write buffers should have to be drained. Though
+ * this may be different depending on platform......
+ * SEE ALSO msgbuf.c
+ */
+#define brcmf_dma_flush(addr, len)
+#define brcmf_dma_invalidate_cache(addr, len)
+
+
+void brcmf_commonring_register_cb(struct brcmf_commonring *commonring,
+ int (*cr_ring_bell)(void *ctx),
+ int (*cr_update_rptr)(void *ctx),
+ int (*cr_update_wptr)(void *ctx),
+ int (*cr_write_rptr)(void *ctx),
+ int (*cr_write_wptr)(void *ctx), void *ctx)
+{
+ commonring->cr_ring_bell = cr_ring_bell;
+ commonring->cr_update_rptr = cr_update_rptr;
+ commonring->cr_update_wptr = cr_update_wptr;
+ commonring->cr_write_rptr = cr_write_rptr;
+ commonring->cr_write_wptr = cr_write_wptr;
+ commonring->cr_ctx = ctx;
+}
+
+
+void brcmf_commonring_config(struct brcmf_commonring *commonring, u16 depth,
+ u16 item_len, void *buf_addr)
+{
+ commonring->depth = depth;
+ commonring->item_len = item_len;
+ commonring->buf_addr = buf_addr;
+ if (!commonring->inited) {
+ spin_lock_init(&commonring->lock);
+ commonring->inited = true;
+ }
+ commonring->r_ptr = 0;
+ if (commonring->cr_write_rptr)
+ commonring->cr_write_rptr(commonring->cr_ctx);
+ commonring->w_ptr = 0;
+ if (commonring->cr_write_wptr)
+ commonring->cr_write_wptr(commonring->cr_ctx);
+ commonring->f_ptr = 0;
+}
+
+
+void brcmf_commonring_lock(struct brcmf_commonring *commonring)
+ __acquires(&commonring->lock)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&commonring->lock, flags);
+ commonring->flags = flags;
+}
+
+
+void brcmf_commonring_unlock(struct brcmf_commonring *commonring)
+ __releases(&commonring->lock)
+{
+ spin_unlock_irqrestore(&commonring->lock, commonring->flags);
+}
+
+
+bool brcmf_commonring_write_available(struct brcmf_commonring *commonring)
+{
+ u16 available;
+ bool retry = true;
+
+again:
+ if (commonring->r_ptr <= commonring->w_ptr)
+ available = commonring->depth - commonring->w_ptr +
+ commonring->r_ptr;
+ else
+ available = commonring->r_ptr - commonring->w_ptr;
+
+ if (available > 1) {
+ if (!commonring->was_full)
+ return true;
+ if (available > commonring->depth / 8) {
+ commonring->was_full = false;
+ return true;
+ }
+ if (retry) {
+ if (commonring->cr_update_rptr)
+ commonring->cr_update_rptr(commonring->cr_ctx);
+ retry = false;
+ goto again;
+ }
+ return false;
+ }
+
+ if (retry) {
+ if (commonring->cr_update_rptr)
+ commonring->cr_update_rptr(commonring->cr_ctx);
+ retry = false;
+ goto again;
+ }
+
+ commonring->was_full = true;
+ return false;
+}
+
+
+void *brcmf_commonring_reserve_for_write(struct brcmf_commonring *commonring)
+{
+ void *ret_ptr;
+ u16 available;
+ bool retry = true;
+
+again:
+ if (commonring->r_ptr <= commonring->w_ptr)
+ available = commonring->depth - commonring->w_ptr +
+ commonring->r_ptr;
+ else
+ available = commonring->r_ptr - commonring->w_ptr;
+
+ if (available > 1) {
+ ret_ptr = commonring->buf_addr +
+ (commonring->w_ptr * commonring->item_len);
+ commonring->w_ptr++;
+ if (commonring->w_ptr == commonring->depth)
+ commonring->w_ptr = 0;
+ return ret_ptr;
+ }
+
+ if (retry) {
+ if (commonring->cr_update_rptr)
+ commonring->cr_update_rptr(commonring->cr_ctx);
+ retry = false;
+ goto again;
+ }
+
+ commonring->was_full = true;
+ return NULL;
+}
+
+
+void *
+brcmf_commonring_reserve_for_write_multiple(struct brcmf_commonring *commonring,
+ u16 n_items, u16 *alloced)
+{
+ void *ret_ptr;
+ u16 available;
+ bool retry = true;
+
+again:
+ if (commonring->r_ptr <= commonring->w_ptr)
+ available = commonring->depth - commonring->w_ptr +
+ commonring->r_ptr;
+ else
+ available = commonring->r_ptr - commonring->w_ptr;
+
+ if (available > 1) {
+ ret_ptr = commonring->buf_addr +
+ (commonring->w_ptr * commonring->item_len);
+ *alloced = min_t(u16, n_items, available - 1);
+ if (*alloced + commonring->w_ptr > commonring->depth)
+ *alloced = commonring->depth - commonring->w_ptr;
+ commonring->w_ptr += *alloced;
+ if (commonring->w_ptr == commonring->depth)
+ commonring->w_ptr = 0;
+ return ret_ptr;
+ }
+
+ if (retry) {
+ if (commonring->cr_update_rptr)
+ commonring->cr_update_rptr(commonring->cr_ctx);
+ retry = false;
+ goto again;
+ }
+
+ commonring->was_full = true;
+ return NULL;
+}
+
+
+int brcmf_commonring_write_complete(struct brcmf_commonring *commonring)
+{
+ void *address;
+
+ address = commonring->buf_addr;
+ address += (commonring->f_ptr * commonring->item_len);
+ if (commonring->f_ptr > commonring->w_ptr) {
+ brcmf_dma_flush(address,
+ (commonring->depth - commonring->f_ptr) *
+ commonring->item_len);
+ address = commonring->buf_addr;
+ commonring->f_ptr = 0;
+ }
+ brcmf_dma_flush(address, (commonring->w_ptr - commonring->f_ptr) *
+ commonring->item_len);
+
+ commonring->f_ptr = commonring->w_ptr;
+
+ if (commonring->cr_write_wptr)
+ commonring->cr_write_wptr(commonring->cr_ctx);
+ if (commonring->cr_ring_bell)
+ return commonring->cr_ring_bell(commonring->cr_ctx);
+
+ return -EIO;
+}
+
+
+void brcmf_commonring_write_cancel(struct brcmf_commonring *commonring,
+ u16 n_items)
+{
+ if (commonring->w_ptr == 0)
+ commonring->w_ptr = commonring->depth - n_items;
+ else
+ commonring->w_ptr -= n_items;
+}
+
+
+void *brcmf_commonring_get_read_ptr(struct brcmf_commonring *commonring,
+ u16 *n_items)
+{
+ void *ret_addr;
+
+ if (commonring->cr_update_wptr)
+ commonring->cr_update_wptr(commonring->cr_ctx);
+
+ *n_items = (commonring->w_ptr >= commonring->r_ptr) ?
+ (commonring->w_ptr - commonring->r_ptr) :
+ (commonring->depth - commonring->r_ptr);
+
+ if (*n_items == 0)
+ return NULL;
+
+ ret_addr = commonring->buf_addr +
+ (commonring->r_ptr * commonring->item_len);
+
+ commonring->r_ptr += *n_items;
+ if (commonring->r_ptr == commonring->depth)
+ commonring->r_ptr = 0;
+
+ brcmf_dma_invalidate_cache(ret_addr, *n_ items * commonring->item_len);
+
+ return ret_addr;
+}
+
+
+int brcmf_commonring_read_complete(struct brcmf_commonring *commonring)
+{
+ if (commonring->cr_write_rptr)
+ return commonring->cr_write_rptr(commonring->cr_ctx);
+
+ return -EIO;
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/commonring.h b/drivers/net/wireless/brcm80211/brcmfmac/commonring.h
new file mode 100644
index 000000000000..002336e35764
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/commonring.h
@@ -0,0 +1,69 @@
+/* Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef BRCMFMAC_COMMONRING_H
+#define BRCMFMAC_COMMONRING_H
+
+
+struct brcmf_commonring {
+ u16 r_ptr;
+ u16 w_ptr;
+ u16 f_ptr;
+ u16 depth;
+ u16 item_len;
+
+ void *buf_addr;
+
+ int (*cr_ring_bell)(void *ctx);
+ int (*cr_update_rptr)(void *ctx);
+ int (*cr_update_wptr)(void *ctx);
+ int (*cr_write_rptr)(void *ctx);
+ int (*cr_write_wptr)(void *ctx);
+
+ void *cr_ctx;
+
+ spinlock_t lock;
+ unsigned long flags;
+ bool inited;
+ bool was_full;
+};
+
+
+void brcmf_commonring_register_cb(struct brcmf_commonring *commonring,
+ int (*cr_ring_bell)(void *ctx),
+ int (*cr_update_rptr)(void *ctx),
+ int (*cr_update_wptr)(void *ctx),
+ int (*cr_write_rptr)(void *ctx),
+ int (*cr_write_wptr)(void *ctx), void *ctx);
+void brcmf_commonring_config(struct brcmf_commonring *commonring, u16 depth,
+ u16 item_len, void *buf_addr);
+void brcmf_commonring_lock(struct brcmf_commonring *commonring);
+void brcmf_commonring_unlock(struct brcmf_commonring *commonring);
+bool brcmf_commonring_write_available(struct brcmf_commonring *commonring);
+void *brcmf_commonring_reserve_for_write(struct brcmf_commonring *commonring);
+void *
+brcmf_commonring_reserve_for_write_multiple(struct brcmf_commonring *commonring,
+ u16 n_items, u16 *alloced);
+int brcmf_commonring_write_complete(struct brcmf_commonring *commonring);
+void brcmf_commonring_write_cancel(struct brcmf_commonring *commonring,
+ u16 n_items);
+void *brcmf_commonring_get_read_ptr(struct brcmf_commonring *commonring,
+ u16 *n_items);
+int brcmf_commonring_read_complete(struct brcmf_commonring *commonring);
+
+#define brcmf_commonring_n_items(commonring) (commonring->depth)
+#define brcmf_commonring_len_item(commonring) (commonring->item_len)
+
+
+#endif /* BRCMFMAC_COMMONRING_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 7da6441bcfa8..5e4317dbc2b0 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -121,12 +121,12 @@ struct brcmf_fws_mac_descriptor;
*
* @BRCMF_NETIF_STOP_REASON_FWS_FC:
* netif stopped due to firmware signalling flow control.
- * @BRCMF_NETIF_STOP_REASON_BLOCK_BUS:
- * netif stopped due to bus blocking.
+ * @BRCMF_NETIF_STOP_REASON_FLOW:
+ * netif stopped due to flowring full.
*/
enum brcmf_netif_stop_reason {
BRCMF_NETIF_STOP_REASON_FWS_FC = 1,
- BRCMF_NETIF_STOP_REASON_BLOCK_BUS = 2
+ BRCMF_NETIF_STOP_REASON_FLOW = 2
};
/**
@@ -181,6 +181,7 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp,
enum brcmf_netif_stop_reason reason, bool state);
void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, u8 ifidx,
bool success);
+void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb);
/* Sets dongle media info (drv_version, mac address). */
int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index 7735328fff21..3122b86050a1 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -19,6 +19,18 @@
#include "dhd_dbg.h"
+/* IDs of the 6 default common rings of msgbuf protocol */
+#define BRCMF_H2D_MSGRING_CONTROL_SUBMIT 0
+#define BRCMF_H2D_MSGRING_RXPOST_SUBMIT 1
+#define BRCMF_D2H_MSGRING_CONTROL_COMPLETE 2
+#define BRCMF_D2H_MSGRING_TX_COMPLETE 3
+#define BRCMF_D2H_MSGRING_RX_COMPLETE 4
+
+#define BRCMF_NROF_H2D_COMMON_MSGRINGS 2
+#define BRCMF_NROF_D2H_COMMON_MSGRINGS 3
+#define BRCMF_NROF_COMMON_MSGRINGS (BRCMF_NROF_H2D_COMMON_MSGRINGS + \
+ BRCMF_NROF_D2H_COMMON_MSGRINGS)
+
/* The level of bus communication with the dongle */
enum brcmf_bus_state {
BRCMF_BUS_UNKNOWN, /* Not determined yet */
@@ -70,6 +82,25 @@ struct brcmf_bus_ops {
struct pktq * (*gettxq)(struct device *dev);
};
+
+/**
+ * struct brcmf_bus_msgbuf - bus ringbuf if in case of msgbuf.
+ *
+ * @commonrings: commonrings which are always there.
+ * @flowrings: commonrings which are dynamically created and destroyed for data.
+ * @rx_dataoffset: if set then all rx data has this this offset.
+ * @max_rxbufpost: maximum number of buffers to post for rx.
+ * @nrof_flowrings: number of flowrings.
+ */
+struct brcmf_bus_msgbuf {
+ struct brcmf_commonring *commonrings[BRCMF_NROF_COMMON_MSGRINGS];
+ struct brcmf_commonring **flowrings;
+ u32 rx_dataoffset;
+ u32 max_rxbufpost;
+ u32 nrof_flowrings;
+};
+
+
/**
* struct brcmf_bus - interface structure between common and bus layer
*
@@ -89,6 +120,7 @@ struct brcmf_bus {
union {
struct brcmf_sdio_dev *sdio;
struct brcmf_usbdev *usb;
+ struct brcmf_pciedev *pcie;
} bus_priv;
enum brcmf_bus_protocol_type proto_type;
struct device *dev;
@@ -101,6 +133,7 @@ struct brcmf_bus {
bool always_use_fws_queue;
struct brcmf_bus_ops *ops;
+ struct brcmf_bus_msgbuf *msgbuf;
};
/*
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
index 6eade7c60c63..dec40d316c82 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
@@ -18,23 +18,25 @@
#define _BRCMF_DBG_H_
/* message levels */
-#define BRCMF_TRACE_VAL 0x00000002
-#define BRCMF_INFO_VAL 0x00000004
-#define BRCMF_DATA_VAL 0x00000008
-#define BRCMF_CTL_VAL 0x00000010
-#define BRCMF_TIMER_VAL 0x00000020
-#define BRCMF_HDRS_VAL 0x00000040
-#define BRCMF_BYTES_VAL 0x00000080
-#define BRCMF_INTR_VAL 0x00000100
-#define BRCMF_GLOM_VAL 0x00000200
-#define BRCMF_EVENT_VAL 0x00000400
-#define BRCMF_BTA_VAL 0x00000800
-#define BRCMF_FIL_VAL 0x00001000
-#define BRCMF_USB_VAL 0x00002000
-#define BRCMF_SCAN_VAL 0x00004000
-#define BRCMF_CONN_VAL 0x00008000
-#define BRCMF_BCDC_VAL 0x00010000
-#define BRCMF_SDIO_VAL 0x00020000
+#define BRCMF_TRACE_VAL 0x00000002
+#define BRCMF_INFO_VAL 0x00000004
+#define BRCMF_DATA_VAL 0x00000008
+#define BRCMF_CTL_VAL 0x00000010
+#define BRCMF_TIMER_VAL 0x00000020
+#define BRCMF_HDRS_VAL 0x00000040
+#define BRCMF_BYTES_VAL 0x00000080
+#define BRCMF_INTR_VAL 0x00000100
+#define BRCMF_GLOM_VAL 0x00000200
+#define BRCMF_EVENT_VAL 0x00000400
+#define BRCMF_BTA_VAL 0x00000800
+#define BRCMF_FIL_VAL 0x00001000
+#define BRCMF_USB_VAL 0x00002000
+#define BRCMF_SCAN_VAL 0x00004000
+#define BRCMF_CONN_VAL 0x00008000
+#define BRCMF_BCDC_VAL 0x00010000
+#define BRCMF_SDIO_VAL 0x00020000
+#define BRCMF_MSGBUF_VAL 0x00040000
+#define BRCMF_PCIE_VAL 0x00080000
/* set default print format */
#undef pr_fmt
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 347b4260f45b..fb1043908a23 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -32,6 +32,7 @@
#include "fwsignal.h"
#include "feature.h"
#include "proto.h"
+#include "pcie.h"
MODULE_AUTHOR("Broadcom Corporation");
MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
@@ -288,7 +289,7 @@ void brcmf_txflowblock(struct device *dev, bool state)
brcmf_fws_bus_blocked(drvr, state);
}
-static void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
+void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
{
skb->dev = ifp->ndev;
skb->protocol = eth_type_trans(skb, skb->dev);
@@ -1085,6 +1086,9 @@ static void brcmf_driver_register(struct work_struct *work)
#ifdef CONFIG_BRCMFMAC_USB
brcmf_usb_register();
#endif
+#ifdef CONFIG_BRCMFMAC_PCIE
+ brcmf_pcie_register();
+#endif
}
static DECLARE_WORK(brcmf_driver_work, brcmf_driver_register);
@@ -1110,6 +1114,9 @@ static void __exit brcmfmac_module_exit(void)
#ifdef CONFIG_BRCMFMAC_USB
brcmf_usb_exit();
#endif
+#ifdef CONFIG_BRCMFMAC_PCIE
+ brcmf_pcie_exit();
+#endif
brcmf_debugfs_exit();
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 67d91d5cc13d..f55f625fd06b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -670,6 +670,8 @@ static int brcmf_sdio_get_fwnames(struct brcmf_chip *ci,
struct brcmf_sdio_dev *sdiodev)
{
int i;
+ uint fw_len, nv_len;
+ char end;
for (i = 0; i < ARRAY_SIZE(brcmf_fwname_data); i++) {
if (brcmf_fwname_data[i].chipid == ci->chip &&
@@ -682,16 +684,25 @@ static int brcmf_sdio_get_fwnames(struct brcmf_chip *ci,
return -ENODEV;
}
+ fw_len = sizeof(sdiodev->fw_name) - 1;
+ nv_len = sizeof(sdiodev->nvram_name) - 1;
/* check if firmware path is provided by module parameter */
if (brcmf_firmware_path[0] != '\0') {
- if (brcmf_firmware_path[strlen(brcmf_firmware_path) - 1] != '/')
- strcat(brcmf_firmware_path, "/");
-
- strcpy(sdiodev->fw_name, brcmf_firmware_path);
- strcpy(sdiodev->nvram_name, brcmf_firmware_path);
+ strncpy(sdiodev->fw_name, brcmf_firmware_path, fw_len);
+ strncpy(sdiodev->nvram_name, brcmf_firmware_path, nv_len);
+ fw_len -= strlen(sdiodev->fw_name);
+ nv_len -= strlen(sdiodev->nvram_name);
+
+ end = brcmf_firmware_path[strlen(brcmf_firmware_path) - 1];
+ if (end != '/') {
+ strncat(sdiodev->fw_name, "/", fw_len);
+ strncat(sdiodev->nvram_name, "/", nv_len);
+ fw_len--;
+ nv_len--;
+ }
}
- strcat(sdiodev->fw_name, brcmf_fwname_data[i].bin);
- strcat(sdiodev->nvram_name, brcmf_fwname_data[i].nv);
+ strncat(sdiodev->fw_name, brcmf_fwname_data[i].bin, fw_len);
+ strncat(sdiodev->nvram_name, brcmf_fwname_data[i].nv, nv_len);
return 0;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
new file mode 100644
index 000000000000..a1016b811284
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
@@ -0,0 +1,501 @@
+/* Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <brcmu_utils.h>
+
+#include "dhd.h"
+#include "dhd_dbg.h"
+#include "dhd_bus.h"
+#include "proto.h"
+#include "flowring.h"
+#include "msgbuf.h"
+
+
+#define BRCMF_FLOWRING_HIGH 1024
+#define BRCMF_FLOWRING_LOW (BRCMF_FLOWRING_HIGH - 256)
+#define BRCMF_FLOWRING_INVALID_IFIDX 0xff
+
+#define BRCMF_FLOWRING_HASH_AP(da, fifo, ifidx) (da[5] + fifo + ifidx * 16)
+#define BRCMF_FLOWRING_HASH_STA(fifo, ifidx) (fifo + ifidx * 16)
+
+static const u8 ALLZEROMAC[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
+static const u8 ALLFFMAC[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+
+static const u8 brcmf_flowring_prio2fifo[] = {
+ 1,
+ 0,
+ 0,
+ 1,
+ 2,
+ 2,
+ 3,
+ 3
+};
+
+
+static bool
+brcmf_flowring_is_tdls_mac(struct brcmf_flowring *flow, u8 mac[ETH_ALEN])
+{
+ struct brcmf_flowring_tdls_entry *search;
+
+ search = flow->tdls_entry;
+
+ while (search) {
+ if (memcmp(search->mac, mac, ETH_ALEN) == 0)
+ return true;
+ search = search->next;
+ }
+
+ return false;
+}
+
+
+u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
+ u8 prio, u8 ifidx)
+{
+ struct brcmf_flowring_hash *hash;
+ u8 hash_idx;
+ u32 i;
+ bool found;
+ bool sta;
+ u8 fifo;
+ u8 *mac;
+
+ fifo = brcmf_flowring_prio2fifo[prio];
+ sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT);
+ mac = da;
+ if ((!sta) && (is_multicast_ether_addr(da))) {
+ mac = (u8 *)ALLFFMAC;
+ fifo = 0;
+ }
+ if ((sta) && (flow->tdls_active) &&
+ (brcmf_flowring_is_tdls_mac(flow, da))) {
+ sta = false;
+ }
+ hash_idx = sta ? BRCMF_FLOWRING_HASH_STA(fifo, ifidx) :
+ BRCMF_FLOWRING_HASH_AP(mac, fifo, ifidx);
+ found = false;
+ hash = flow->hash;
+ for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
+ if ((sta || (memcmp(hash[hash_idx].mac, mac, ETH_ALEN) == 0)) &&
+ (hash[hash_idx].fifo == fifo) &&
+ (hash[hash_idx].ifidx == ifidx)) {
+ found = true;
+ break;
+ }
+ hash_idx++;
+ }
+ if (found)
+ return hash[hash_idx].flowid;
+
+ return BRCMF_FLOWRING_INVALID_ID;
+}
+
+
+u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
+ u8 prio, u8 ifidx)
+{
+ struct brcmf_flowring_ring *ring;
+ struct brcmf_flowring_hash *hash;
+ u8 hash_idx;
+ u32 i;
+ bool found;
+ u8 fifo;
+ bool sta;
+ u8 *mac;
+
+ fifo = brcmf_flowring_prio2fifo[prio];
+ sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT);
+ mac = da;
+ if ((!sta) && (is_multicast_ether_addr(da))) {
+ mac = (u8 *)ALLFFMAC;
+ fifo = 0;
+ }
+ if ((sta) && (flow->tdls_active) &&
+ (brcmf_flowring_is_tdls_mac(flow, da))) {
+ sta = false;
+ }
+ hash_idx = sta ? BRCMF_FLOWRING_HASH_STA(fifo, ifidx) :
+ BRCMF_FLOWRING_HASH_AP(mac, fifo, ifidx);
+ found = false;
+ hash = flow->hash;
+ for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
+ if ((hash[hash_idx].ifidx == BRCMF_FLOWRING_INVALID_IFIDX) &&
+ (memcmp(hash[hash_idx].mac, ALLZEROMAC, ETH_ALEN) == 0)) {
+ found = true;
+ break;
+ }
+ hash_idx++;
+ }
+ if (found) {
+ for (i = 0; i < flow->nrofrings; i++) {
+ if (flow->rings[i] == NULL)
+ break;
+ }
+ if (i == flow->nrofrings)
+ return -ENOMEM;
+
+ ring = kzalloc(sizeof(*ring), GFP_ATOMIC);
+ if (!ring)
+ return -ENOMEM;
+
+ memcpy(hash[hash_idx].mac, mac, ETH_ALEN);
+ hash[hash_idx].fifo = fifo;
+ hash[hash_idx].ifidx = ifidx;
+ hash[hash_idx].flowid = i;
+
+ ring->hash_id = hash_idx;
+ ring->status = RING_CLOSED;
+ skb_queue_head_init(&ring->skblist);
+ flow->rings[i] = ring;
+
+ return i;
+ }
+ return BRCMF_FLOWRING_INVALID_ID;
+}
+
+
+u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid)
+{
+ struct brcmf_flowring_ring *ring;
+
+ ring = flow->rings[flowid];
+
+ return flow->hash[ring->hash_id].fifo;
+}
+
+
+static void brcmf_flowring_block(struct brcmf_flowring *flow, u8 flowid,
+ bool blocked)
+{
+ struct brcmf_flowring_ring *ring;
+ struct brcmf_bus *bus_if;
+ struct brcmf_pub *drvr;
+ struct brcmf_if *ifp;
+ bool currently_blocked;
+ int i;
+ u8 ifidx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&flow->block_lock, flags);
+
+ ring = flow->rings[flowid];
+ ifidx = brcmf_flowring_ifidx_get(flow, flowid);
+
+ currently_blocked = false;
+ for (i = 0; i < flow->nrofrings; i++) {
+ if (flow->rings[i]) {
+ ring = flow->rings[i];
+ if ((ring->status == RING_OPEN) &&
+ (brcmf_flowring_ifidx_get(flow, i) == ifidx)) {
+ if (ring->blocked) {
+ currently_blocked = true;
+ break;
+ }
+ }
+ }
+ }
+ ring->blocked = blocked;
+ if (currently_blocked == blocked) {
+ spin_unlock_irqrestore(&flow->block_lock, flags);
+ return;
+ }
+
+ bus_if = dev_get_drvdata(flow->dev);
+ drvr = bus_if->drvr;
+ ifp = drvr->iflist[ifidx];
+ brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_FLOW, blocked);
+
+ spin_unlock_irqrestore(&flow->block_lock, flags);
+}
+
+
+void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid)
+{
+ struct brcmf_flowring_ring *ring;
+ u8 hash_idx;
+ struct sk_buff *skb;
+
+ ring = flow->rings[flowid];
+ if (!ring)
+ return;
+ brcmf_flowring_block(flow, flowid, false);
+ hash_idx = ring->hash_id;
+ flow->hash[hash_idx].ifidx = BRCMF_FLOWRING_INVALID_IFIDX;
+ memset(flow->hash[hash_idx].mac, 0, ETH_ALEN);
+ flow->rings[flowid] = NULL;
+
+ skb = skb_dequeue(&ring->skblist);
+ while (skb) {
+ brcmu_pkt_buf_free_skb(skb);
+ skb = skb_dequeue(&ring->skblist);
+ }
+
+ kfree(ring);
+}
+
+
+void brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
+ struct sk_buff *skb)
+{
+ struct brcmf_flowring_ring *ring;
+
+ ring = flow->rings[flowid];
+
+ skb_queue_tail(&ring->skblist, skb);
+
+ if (!ring->blocked &&
+ (skb_queue_len(&ring->skblist) > BRCMF_FLOWRING_HIGH)) {
+ brcmf_flowring_block(flow, flowid, true);
+ brcmf_dbg(MSGBUF, "Flowcontrol: BLOCK for ring %d\n", flowid);
+ /* To prevent (work around) possible race condition, check
+ * queue len again. It is also possible to use locking to
+ * protect, but that is undesirable for every enqueue and
+ * dequeue. This simple check will solve a possible race
+ * condition if it occurs.
+ */
+ if (skb_queue_len(&ring->skblist) < BRCMF_FLOWRING_LOW)
+ brcmf_flowring_block(flow, flowid, false);
+ }
+}
+
+
+struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid)
+{
+ struct brcmf_flowring_ring *ring;
+ struct sk_buff *skb;
+
+ ring = flow->rings[flowid];
+ if (ring->status != RING_OPEN)
+ return NULL;
+
+ skb = skb_dequeue(&ring->skblist);
+
+ if (ring->blocked &&
+ (skb_queue_len(&ring->skblist) < BRCMF_FLOWRING_LOW)) {
+ brcmf_flowring_block(flow, flowid, false);
+ brcmf_dbg(MSGBUF, "Flowcontrol: OPEN for ring %d\n", flowid);
+ }
+
+ return skb;
+}
+
+
+void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid,
+ struct sk_buff *skb)
+{
+ struct brcmf_flowring_ring *ring;
+
+ ring = flow->rings[flowid];
+
+ skb_queue_head(&ring->skblist, skb);
+}
+
+
+u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u8 flowid)
+{
+ struct brcmf_flowring_ring *ring;
+
+ ring = flow->rings[flowid];
+ if (!ring)
+ return 0;
+
+ if (ring->status != RING_OPEN)
+ return 0;
+
+ return skb_queue_len(&ring->skblist);
+}
+
+
+void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid)
+{
+ struct brcmf_flowring_ring *ring;
+
+ ring = flow->rings[flowid];
+ if (!ring) {
+ brcmf_err("Ring NULL, for flowid %d\n", flowid);
+ return;
+ }
+
+ ring->status = RING_OPEN;
+}
+
+
+u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u8 flowid)
+{
+ struct brcmf_flowring_ring *ring;
+ u8 hash_idx;
+
+ ring = flow->rings[flowid];
+ hash_idx = ring->hash_id;
+
+ return flow->hash[hash_idx].ifidx;
+}
+
+
+struct brcmf_flowring *brcmf_flowring_attach(struct device *dev, u16 nrofrings)
+{
+ struct brcmf_flowring *flow;
+ u32 i;
+
+ flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
+ if (flow) {
+ flow->dev = dev;
+ flow->nrofrings = nrofrings;
+ spin_lock_init(&flow->block_lock);
+ for (i = 0; i < ARRAY_SIZE(flow->addr_mode); i++)
+ flow->addr_mode[i] = ADDR_INDIRECT;
+ for (i = 0; i < ARRAY_SIZE(flow->hash); i++)
+ flow->hash[i].ifidx = BRCMF_FLOWRING_INVALID_IFIDX;
+ flow->rings = kcalloc(nrofrings, sizeof(*flow->rings),
+ GFP_ATOMIC);
+ if (!flow->rings) {
+ kfree(flow);
+ flow = NULL;
+ }
+ }
+
+ return flow;
+}
+
+
+void brcmf_flowring_detach(struct brcmf_flowring *flow)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
+ struct brcmf_flowring_tdls_entry *search;
+ struct brcmf_flowring_tdls_entry *remove;
+ u8 flowid;
+
+ for (flowid = 0; flowid < flow->nrofrings; flowid++) {
+ if (flow->rings[flowid])
+ brcmf_msgbuf_delete_flowring(drvr, flowid);
+ }
+
+ search = flow->tdls_entry;
+ while (search) {
+ remove = search;
+ search = search->next;
+ kfree(remove);
+ }
+ kfree(flow->rings);
+ kfree(flow);
+}
+
+
+void brcmf_flowring_configure_addr_mode(struct brcmf_flowring *flow, int ifidx,
+ enum proto_addr_mode addr_mode)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
+ u32 i;
+ u8 flowid;
+
+ if (flow->addr_mode[ifidx] != addr_mode) {
+ for (i = 0; i < ARRAY_SIZE(flow->hash); i++) {
+ if (flow->hash[i].ifidx == ifidx) {
+ flowid = flow->hash[i].flowid;
+ if (flow->rings[flowid]->status != RING_OPEN)
+ continue;
+ flow->rings[flowid]->status = RING_CLOSING;
+ brcmf_msgbuf_delete_flowring(drvr, flowid);
+ }
+ }
+ flow->addr_mode[ifidx] = addr_mode;
+ }
+}
+
+
+void brcmf_flowring_delete_peer(struct brcmf_flowring *flow, int ifidx,
+ u8 peer[ETH_ALEN])
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
+ struct brcmf_flowring_hash *hash;
+ struct brcmf_flowring_tdls_entry *prev;
+ struct brcmf_flowring_tdls_entry *search;
+ u32 i;
+ u8 flowid;
+ bool sta;
+
+ sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT);
+
+ search = flow->tdls_entry;
+ prev = NULL;
+ while (search) {
+ if (memcmp(search->mac, peer, ETH_ALEN) == 0) {
+ sta = false;
+ break;
+ }
+ prev = search;
+ search = search->next;
+ }
+
+ hash = flow->hash;
+ for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
+ if ((sta || (memcmp(hash[i].mac, peer, ETH_ALEN) == 0)) &&
+ (hash[i].ifidx == ifidx)) {
+ flowid = flow->hash[i].flowid;
+ if (flow->rings[flowid]->status == RING_OPEN) {
+ flow->rings[flowid]->status = RING_CLOSING;
+ brcmf_msgbuf_delete_flowring(drvr, flowid);
+ }
+ }
+ }
+
+ if (search) {
+ if (prev)
+ prev->next = search->next;
+ else
+ flow->tdls_entry = search->next;
+ kfree(search);
+ if (flow->tdls_entry == NULL)
+ flow->tdls_active = false;
+ }
+}
+
+
+void brcmf_flowring_add_tdls_peer(struct brcmf_flowring *flow, int ifidx,
+ u8 peer[ETH_ALEN])
+{
+ struct brcmf_flowring_tdls_entry *tdls_entry;
+ struct brcmf_flowring_tdls_entry *search;
+
+ tdls_entry = kzalloc(sizeof(*tdls_entry), GFP_ATOMIC);
+ if (tdls_entry == NULL)
+ return;
+
+ memcpy(tdls_entry->mac, peer, ETH_ALEN);
+ tdls_entry->next = NULL;
+ if (flow->tdls_entry == NULL) {
+ flow->tdls_entry = tdls_entry;
+ } else {
+ search = flow->tdls_entry;
+ if (memcmp(search->mac, peer, ETH_ALEN) == 0)
+ return;
+ while (search->next) {
+ search = search->next;
+ if (memcmp(search->mac, peer, ETH_ALEN) == 0)
+ return;
+ }
+ search->next = tdls_entry;
+ }
+
+ flow->tdls_active = true;
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/flowring.h b/drivers/net/wireless/brcm80211/brcmfmac/flowring.h
new file mode 100644
index 000000000000..a34cd394c616
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/flowring.h
@@ -0,0 +1,84 @@
+/* Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef BRCMFMAC_FLOWRING_H
+#define BRCMFMAC_FLOWRING_H
+
+
+#define BRCMF_FLOWRING_HASHSIZE 256
+#define BRCMF_FLOWRING_INVALID_ID 0xFFFFFFFF
+
+
+struct brcmf_flowring_hash {
+ u8 mac[ETH_ALEN];
+ u8 fifo;
+ u8 ifidx;
+ u8 flowid;
+};
+
+enum ring_status {
+ RING_CLOSED,
+ RING_CLOSING,
+ RING_OPEN
+};
+
+struct brcmf_flowring_ring {
+ u8 hash_id;
+ bool blocked;
+ enum ring_status status;
+ struct sk_buff_head skblist;
+};
+
+struct brcmf_flowring_tdls_entry {
+ u8 mac[ETH_ALEN];
+ struct brcmf_flowring_tdls_entry *next;
+};
+
+struct brcmf_flowring {
+ struct device *dev;
+ struct brcmf_flowring_hash hash[BRCMF_FLOWRING_HASHSIZE];
+ struct brcmf_flowring_ring **rings;
+ spinlock_t block_lock;
+ enum proto_addr_mode addr_mode[BRCMF_MAX_IFS];
+ u16 nrofrings;
+ bool tdls_active;
+ struct brcmf_flowring_tdls_entry *tdls_entry;
+};
+
+
+u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
+ u8 prio, u8 ifidx);
+u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
+ u8 prio, u8 ifidx);
+void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid);
+void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid);
+u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid);
+void brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
+ struct sk_buff *skb);
+struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid);
+void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid,
+ struct sk_buff *skb);
+u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u8 flowid);
+u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u8 flowid);
+struct brcmf_flowring *brcmf_flowring_attach(struct device *dev, u16 nrofrings);
+void brcmf_flowring_detach(struct brcmf_flowring *flow);
+void brcmf_flowring_configure_addr_mode(struct brcmf_flowring *flow, int ifidx,
+ enum proto_addr_mode addr_mode);
+void brcmf_flowring_delete_peer(struct brcmf_flowring *flow, int ifidx,
+ u8 peer[ETH_ALEN]);
+void brcmf_flowring_add_tdls_peer(struct brcmf_flowring *flow, int ifidx,
+ u8 peer[ETH_ALEN]);
+
+
+#endif /* BRCMFMAC_FLOWRING_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
index fad77dd2a3a5..4f1daabc551b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
@@ -293,7 +293,11 @@ static void brcmf_fweh_event_worker(struct work_struct *work)
goto event_free;
}
- ifp = drvr->iflist[emsg.bsscfgidx];
+ if ((event->code == BRCMF_E_TDLS_PEER_EVENT) &&
+ (emsg.bsscfgidx == 1))
+ ifp = drvr->iflist[0];
+ else
+ ifp = drvr->iflist[emsg.bsscfgidx];
err = brcmf_fweh_call_event_handler(ifp, event->code, &emsg,
event->data);
if (err) {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
index 51b53a73d074..dd20b1862d44 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
@@ -102,6 +102,7 @@ struct brcmf_event;
BRCMF_ENUM_DEF(DCS_REQUEST, 73) \
BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74) \
BRCMF_ENUM_DEF(ACTION_FRAME_RX, 75) \
+ BRCMF_ENUM_DEF(TDLS_PEER_EVENT, 92) \
BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127) \
BRCMF_ENUM_DEF(PSTA_PRIMARY_INTF_IND, 128)
@@ -155,6 +156,10 @@ enum brcmf_fweh_event_code {
#define BRCMF_E_REASON_TSPEC_REJECTED 7
#define BRCMF_E_REASON_BETTER_AP 8
+#define BRCMF_E_REASON_TDLS_PEER_DISCOVERED 0
+#define BRCMF_E_REASON_TDLS_PEER_CONNECTED 1
+#define BRCMF_E_REASON_TDLS_PEER_DISCONNECTED 2
+
/* action field values for brcmf_ifevent */
#define BRCMF_E_IF_ADD 1
#define BRCMF_E_IF_DEL 2
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
new file mode 100644
index 000000000000..535c7eb01b3a
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
@@ -0,0 +1,1397 @@
+/* Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*******************************************************************************
+ * Communicates with the dongle by using dcmd codes.
+ * For certain dcmd codes, the dongle interprets string data from the host.
+ ******************************************************************************/
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+
+#include <brcmu_utils.h>
+#include <brcmu_wifi.h>
+
+#include "dhd.h"
+#include "dhd_dbg.h"
+#include "proto.h"
+#include "msgbuf.h"
+#include "commonring.h"
+#include "flowring.h"
+#include "dhd_bus.h"
+#include "tracepoint.h"
+
+
+#define MSGBUF_IOCTL_RESP_TIMEOUT 2000
+
+#define MSGBUF_TYPE_GEN_STATUS 0x1
+#define MSGBUF_TYPE_RING_STATUS 0x2
+#define MSGBUF_TYPE_FLOW_RING_CREATE 0x3
+#define MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT 0x4
+#define MSGBUF_TYPE_FLOW_RING_DELETE 0x5
+#define MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT 0x6
+#define MSGBUF_TYPE_FLOW_RING_FLUSH 0x7
+#define MSGBUF_TYPE_FLOW_RING_FLUSH_CMPLT 0x8
+#define MSGBUF_TYPE_IOCTLPTR_REQ 0x9
+#define MSGBUF_TYPE_IOCTLPTR_REQ_ACK 0xA
+#define MSGBUF_TYPE_IOCTLRESP_BUF_POST 0xB
+#define MSGBUF_TYPE_IOCTL_CMPLT 0xC
+#define MSGBUF_TYPE_EVENT_BUF_POST 0xD
+#define MSGBUF_TYPE_WL_EVENT 0xE
+#define MSGBUF_TYPE_TX_POST 0xF
+#define MSGBUF_TYPE_TX_STATUS 0x10
+#define MSGBUF_TYPE_RXBUF_POST 0x11
+#define MSGBUF_TYPE_RX_CMPLT 0x12
+#define MSGBUF_TYPE_LPBK_DMAXFER 0x13
+#define MSGBUF_TYPE_LPBK_DMAXFER_CMPLT 0x14
+
+#define NR_TX_PKTIDS 2048
+#define NR_RX_PKTIDS 1024
+
+#define BRCMF_IOCTL_REQ_PKTID 0xFFFE
+
+#define BRCMF_MSGBUF_MAX_PKT_SIZE 2048
+#define BRCMF_MSGBUF_RXBUFPOST_THRESHOLD 32
+#define BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST 8
+#define BRCMF_MSGBUF_MAX_EVENTBUF_POST 8
+
+#define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3 0x01
+#define BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT 5
+
+#define BRCMF_MSGBUF_TX_FLUSH_CNT1 32
+#define BRCMF_MSGBUF_TX_FLUSH_CNT2 96
+
+
+struct msgbuf_common_hdr {
+ u8 msgtype;
+ u8 ifidx;
+ u8 flags;
+ u8 rsvd0;
+ __le32 request_id;
+};
+
+struct msgbuf_buf_addr {
+ __le32 low_addr;
+ __le32 high_addr;
+};
+
+struct msgbuf_ioctl_req_hdr {
+ struct msgbuf_common_hdr msg;
+ __le32 cmd;
+ __le16 trans_id;
+ __le16 input_buf_len;
+ __le16 output_buf_len;
+ __le16 rsvd0[3];
+ struct msgbuf_buf_addr req_buf_addr;
+ __le32 rsvd1[2];
+};
+
+struct msgbuf_tx_msghdr {
+ struct msgbuf_common_hdr msg;
+ u8 txhdr[ETH_HLEN];
+ u8 flags;
+ u8 seg_cnt;
+ struct msgbuf_buf_addr metadata_buf_addr;
+ struct msgbuf_buf_addr data_buf_addr;
+ __le16 metadata_buf_len;
+ __le16 data_len;
+ __le32 rsvd0;
+};
+
+struct msgbuf_rx_bufpost {
+ struct msgbuf_common_hdr msg;
+ __le16 metadata_buf_len;
+ __le16 data_buf_len;
+ __le32 rsvd0;
+ struct msgbuf_buf_addr metadata_buf_addr;
+ struct msgbuf_buf_addr data_buf_addr;
+};
+
+struct msgbuf_rx_ioctl_resp_or_event {
+ struct msgbuf_common_hdr msg;
+ __le16 host_buf_len;
+ __le16 rsvd0[3];
+ struct msgbuf_buf_addr host_buf_addr;
+ __le32 rsvd1[4];
+};
+
+struct msgbuf_completion_hdr {
+ __le16 status;
+ __le16 flow_ring_id;
+};
+
+struct msgbuf_rx_event {
+ struct msgbuf_common_hdr msg;
+ struct msgbuf_completion_hdr compl_hdr;
+ __le16 event_data_len;
+ __le16 seqnum;
+ __le16 rsvd0[4];
+};
+
+struct msgbuf_ioctl_resp_hdr {
+ struct msgbuf_common_hdr msg;
+ struct msgbuf_completion_hdr compl_hdr;
+ __le16 resp_len;
+ __le16 trans_id;
+ __le32 cmd;
+ __le32 rsvd0;
+};
+
+struct msgbuf_tx_status {
+ struct msgbuf_common_hdr msg;
+ struct msgbuf_completion_hdr compl_hdr;
+ __le16 metadata_len;
+ __le16 tx_status;
+};
+
+struct msgbuf_rx_complete {
+ struct msgbuf_common_hdr msg;
+ struct msgbuf_completion_hdr compl_hdr;
+ __le16 metadata_len;
+ __le16 data_len;
+ __le16 data_offset;
+ __le16 flags;
+ __le32 rx_status_0;
+ __le32 rx_status_1;
+ __le32 rsvd0;
+};
+
+struct msgbuf_tx_flowring_create_req {
+ struct msgbuf_common_hdr msg;
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
+ u8 tid;
+ u8 if_flags;
+ __le16 flow_ring_id;
+ u8 tc;
+ u8 priority;
+ __le16 int_vector;
+ __le16 max_items;
+ __le16 len_item;
+ struct msgbuf_buf_addr flow_ring_addr;
+};
+
+struct msgbuf_tx_flowring_delete_req {
+ struct msgbuf_common_hdr msg;
+ __le16 flow_ring_id;
+ __le16 reason;
+ __le32 rsvd0[7];
+};
+
+struct msgbuf_flowring_create_resp {
+ struct msgbuf_common_hdr msg;
+ struct msgbuf_completion_hdr compl_hdr;
+ __le32 rsvd0[3];
+};
+
+struct msgbuf_flowring_delete_resp {
+ struct msgbuf_common_hdr msg;
+ struct msgbuf_completion_hdr compl_hdr;
+ __le32 rsvd0[3];
+};
+
+struct msgbuf_flowring_flush_resp {
+ struct msgbuf_common_hdr msg;
+ struct msgbuf_completion_hdr compl_hdr;
+ __le32 rsvd0[3];
+};
+
+struct brcmf_msgbuf {
+ struct brcmf_pub *drvr;
+
+ struct brcmf_commonring **commonrings;
+ struct brcmf_commonring **flowrings;
+ dma_addr_t *flowring_dma_handle;
+ u16 nrof_flowrings;
+
+ u16 rx_dataoffset;
+ u32 max_rxbufpost;
+ u16 rx_metadata_offset;
+ u32 rxbufpost;
+
+ u32 max_ioctlrespbuf;
+ u32 cur_ioctlrespbuf;
+ u32 max_eventbuf;
+ u32 cur_eventbuf;
+
+ void *ioctbuf;
+ dma_addr_t ioctbuf_handle;
+ u32 ioctbuf_phys_hi;
+ u32 ioctbuf_phys_lo;
+ u32 ioctl_resp_status;
+ u32 ioctl_resp_ret_len;
+ u32 ioctl_resp_pktid;
+
+ u16 data_seq_no;
+ u16 ioctl_seq_no;
+ u32 reqid;
+ wait_queue_head_t ioctl_resp_wait;
+ bool ctl_completed;
+
+ struct brcmf_msgbuf_pktids *tx_pktids;
+ struct brcmf_msgbuf_pktids *rx_pktids;
+ struct brcmf_flowring *flow;
+
+ struct workqueue_struct *txflow_wq;
+ struct work_struct txflow_work;
+ unsigned long *flow_map;
+ unsigned long *txstatus_done_map;
+};
+
+struct brcmf_msgbuf_pktid {
+ atomic_t allocated;
+ u16 data_offset;
+ struct sk_buff *skb;
+ dma_addr_t physaddr;
+};
+
+struct brcmf_msgbuf_pktids {
+ u32 array_size;
+ u32 last_allocated_idx;
+ enum dma_data_direction direction;
+ struct brcmf_msgbuf_pktid *array;
+};
+
+
+/* dma flushing needs implementation for mips and arm platforms. Should
+ * be put in util. Note, this is not real flushing. It is virtual non
+ * cached memory. Only write buffers should have to be drained. Though
+ * this may be different depending on platform......
+ */
+#define brcmf_dma_flush(addr, len)
+#define brcmf_dma_invalidate_cache(addr, len)
+
+
+static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf);
+
+
+static struct brcmf_msgbuf_pktids *
+brcmf_msgbuf_init_pktids(u32 nr_array_entries,
+ enum dma_data_direction direction)
+{
+ struct brcmf_msgbuf_pktid *array;
+ struct brcmf_msgbuf_pktids *pktids;
+
+ array = kcalloc(nr_array_entries, sizeof(*array), GFP_ATOMIC);
+ if (!array)
+ return NULL;
+
+ pktids = kzalloc(sizeof(*pktids), GFP_ATOMIC);
+ if (!pktids) {
+ kfree(array);
+ return NULL;
+ }
+ pktids->array = array;
+ pktids->array_size = nr_array_entries;
+
+ return pktids;
+}
+
+
+static int
+brcmf_msgbuf_alloc_pktid(struct device *dev,
+ struct brcmf_msgbuf_pktids *pktids,
+ struct sk_buff *skb, u16 data_offset,
+ dma_addr_t *physaddr, u32 *idx)
+{
+ struct brcmf_msgbuf_pktid *array;
+ u32 count;
+
+ array = pktids->array;
+
+ *physaddr = dma_map_single(dev, skb->data + data_offset,
+ skb->len - data_offset, pktids->direction);
+
+ if (dma_mapping_error(dev, *physaddr)) {
+ brcmf_err("dma_map_single failed !!\n");
+ return -ENOMEM;
+ }
+
+ *idx = pktids->last_allocated_idx;
+
+ count = 0;
+ do {
+ (*idx)++;
+ if (*idx == pktids->array_size)
+ *idx = 0;
+ if (array[*idx].allocated.counter == 0)
+ if (atomic_cmpxchg(&array[*idx].allocated, 0, 1) == 0)
+ break;
+ count++;
+ } while (count < pktids->array_size);
+
+ if (count == pktids->array_size)
+ return -ENOMEM;
+
+ array[*idx].data_offset = data_offset;
+ array[*idx].physaddr = *physaddr;
+ array[*idx].skb = skb;
+
+ pktids->last_allocated_idx = *idx;
+
+ return 0;
+}
+
+
+static struct sk_buff *
+brcmf_msgbuf_get_pktid(struct device *dev, struct brcmf_msgbuf_pktids *pktids,
+ u32 idx)
+{
+ struct brcmf_msgbuf_pktid *pktid;
+ struct sk_buff *skb;
+
+ if (idx >= pktids->array_size) {
+ brcmf_err("Invalid packet id %d (max %d)\n", idx,
+ pktids->array_size);
+ return NULL;
+ }
+ if (pktids->array[idx].allocated.counter) {
+ pktid = &pktids->array[idx];
+ dma_unmap_single(dev, pktid->physaddr,
+ pktid->skb->len - pktid->data_offset,
+ pktids->direction);
+ skb = pktid->skb;
+ pktid->allocated.counter = 0;
+ return skb;
+ } else {
+ brcmf_err("Invalid packet id %d (not in use)\n", idx);
+ }
+
+ return NULL;
+}
+
+
+static void
+brcmf_msgbuf_release_array(struct device *dev,
+ struct brcmf_msgbuf_pktids *pktids)
+{
+ struct brcmf_msgbuf_pktid *array;
+ struct brcmf_msgbuf_pktid *pktid;
+ u32 count;
+
+ array = pktids->array;
+ count = 0;
+ do {
+ if (array[count].allocated.counter) {
+ pktid = &array[count];
+ dma_unmap_single(dev, pktid->physaddr,
+ pktid->skb->len - pktid->data_offset,
+ pktids->direction);
+ brcmu_pkt_buf_free_skb(pktid->skb);
+ }
+ count++;
+ } while (count < pktids->array_size);
+
+ kfree(array);
+ kfree(pktids);
+}
+
+
+static void brcmf_msgbuf_release_pktids(struct brcmf_msgbuf *msgbuf)
+{
+ if (msgbuf->rx_pktids)
+ brcmf_msgbuf_release_array(msgbuf->drvr->bus_if->dev,
+ msgbuf->rx_pktids);
+ if (msgbuf->tx_pktids)
+ brcmf_msgbuf_release_array(msgbuf->drvr->bus_if->dev,
+ msgbuf->tx_pktids);
+}
+
+
+static int brcmf_msgbuf_tx_ioctl(struct brcmf_pub *drvr, int ifidx,
+ uint cmd, void *buf, uint len)
+{
+ struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
+ struct brcmf_commonring *commonring;
+ struct msgbuf_ioctl_req_hdr *request;
+ u16 buf_len;
+ void *ret_ptr;
+ int err;
+
+ commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
+ brcmf_commonring_lock(commonring);
+ ret_ptr = brcmf_commonring_reserve_for_write(commonring);
+ if (!ret_ptr) {
+ brcmf_err("Failed to reserve space in commonring\n");
+ brcmf_commonring_unlock(commonring);
+ return -ENOMEM;
+ }
+
+ msgbuf->reqid++;
+
+ request = (struct msgbuf_ioctl_req_hdr *)ret_ptr;
+ request->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ;
+ request->msg.ifidx = (u8)ifidx;
+ request->msg.flags = 0;
+ request->msg.request_id = cpu_to_le32(BRCMF_IOCTL_REQ_PKTID);
+ request->cmd = cpu_to_le32(cmd);
+ request->output_buf_len = cpu_to_le16(len);
+ request->trans_id = cpu_to_le16(msgbuf->reqid);
+
+ buf_len = min_t(u16, len, BRCMF_TX_IOCTL_MAX_MSG_SIZE);
+ request->input_buf_len = cpu_to_le16(buf_len);
+ request->req_buf_addr.high_addr = cpu_to_le32(msgbuf->ioctbuf_phys_hi);
+ request->req_buf_addr.low_addr = cpu_to_le32(msgbuf->ioctbuf_phys_lo);
+ if (buf)
+ memcpy(msgbuf->ioctbuf, buf, buf_len);
+ else
+ memset(msgbuf->ioctbuf, 0, buf_len);
+ brcmf_dma_flush(ioctl_buf, buf_len);
+
+ err = brcmf_commonring_write_complete(commonring);
+ brcmf_commonring_unlock(commonring);
+
+ return err;
+}
+
+
+static int brcmf_msgbuf_ioctl_resp_wait(struct brcmf_msgbuf *msgbuf)
+{
+ return wait_event_timeout(msgbuf->ioctl_resp_wait,
+ msgbuf->ctl_completed,
+ msecs_to_jiffies(MSGBUF_IOCTL_RESP_TIMEOUT));
+}
+
+
+static void brcmf_msgbuf_ioctl_resp_wake(struct brcmf_msgbuf *msgbuf)
+{
+ if (waitqueue_active(&msgbuf->ioctl_resp_wait)) {
+ msgbuf->ctl_completed = true;
+ wake_up(&msgbuf->ioctl_resp_wait);
+ }
+}
+
+
+static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx,
+ uint cmd, void *buf, uint len)
+{
+ struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
+ struct sk_buff *skb = NULL;
+ int timeout;
+ int err;
+
+ brcmf_dbg(MSGBUF, "ifidx=%d, cmd=%d, len=%d\n", ifidx, cmd, len);
+ msgbuf->ctl_completed = false;
+ err = brcmf_msgbuf_tx_ioctl(drvr, ifidx, cmd, buf, len);
+ if (err)
+ return err;
+
+ timeout = brcmf_msgbuf_ioctl_resp_wait(msgbuf);
+ if (!timeout) {
+ brcmf_err("Timeout on response for query command\n");
+ return -EIO;
+ }
+
+ skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
+ msgbuf->rx_pktids,
+ msgbuf->ioctl_resp_pktid);
+ if (msgbuf->ioctl_resp_ret_len != 0) {
+ if (!skb) {
+ brcmf_err("Invalid packet id idx recv'd %d\n",
+ msgbuf->ioctl_resp_pktid);
+ return -EBADF;
+ }
+ memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ?
+ len : msgbuf->ioctl_resp_ret_len);
+ }
+ if (skb)
+ brcmu_pkt_buf_free_skb(skb);
+
+ return msgbuf->ioctl_resp_status;
+}
+
+
+static int brcmf_msgbuf_set_dcmd(struct brcmf_pub *drvr, int ifidx,
+ uint cmd, void *buf, uint len)
+{
+ return brcmf_msgbuf_query_dcmd(drvr, ifidx, cmd, buf, len);
+}
+
+
+static int brcmf_msgbuf_hdrpull(struct brcmf_pub *drvr, bool do_fws,
+ u8 *ifidx, struct sk_buff *skb)
+{
+ return -ENODEV;
+}
+
+
+static void
+brcmf_msgbuf_remove_flowring(struct brcmf_msgbuf *msgbuf, u16 flowid)
+{
+ u32 dma_sz;
+ void *dma_buf;
+
+ brcmf_dbg(MSGBUF, "Removing flowring %d\n", flowid);
+
+ dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE;
+ dma_buf = msgbuf->flowrings[flowid]->buf_addr;
+ dma_free_coherent(msgbuf->drvr->bus_if->dev, dma_sz, dma_buf,
+ msgbuf->flowring_dma_handle[flowid]);
+
+ brcmf_flowring_delete(msgbuf->flow, flowid);
+}
+
+
+static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx,
+ struct sk_buff *skb)
+{
+ struct msgbuf_tx_flowring_create_req *create;
+ struct ethhdr *eh = (struct ethhdr *)(skb->data);
+ struct brcmf_commonring *commonring;
+ void *ret_ptr;
+ u32 flowid;
+ void *dma_buf;
+ u32 dma_sz;
+ long long address;
+ int err;
+
+ flowid = brcmf_flowring_create(msgbuf->flow, eh->h_dest,
+ skb->priority, ifidx);
+ if (flowid == BRCMF_FLOWRING_INVALID_ID)
+ return flowid;
+
+ dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE;
+
+ dma_buf = dma_alloc_coherent(msgbuf->drvr->bus_if->dev, dma_sz,
+ &msgbuf->flowring_dma_handle[flowid],
+ GFP_ATOMIC);
+ if (!dma_buf) {
+ brcmf_err("dma_alloc_coherent failed\n");
+ brcmf_flowring_delete(msgbuf->flow, flowid);
+ return BRCMF_FLOWRING_INVALID_ID;
+ }
+
+ brcmf_commonring_config(msgbuf->flowrings[flowid],
+ BRCMF_H2D_TXFLOWRING_MAX_ITEM,
+ BRCMF_H2D_TXFLOWRING_ITEMSIZE, dma_buf);
+
+ commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
+ brcmf_commonring_lock(commonring);
+ ret_ptr = brcmf_commonring_reserve_for_write(commonring);
+ if (!ret_ptr) {
+ brcmf_err("Failed to reserve space in commonring\n");
+ brcmf_commonring_unlock(commonring);
+ brcmf_msgbuf_remove_flowring(msgbuf, flowid);
+ return BRCMF_FLOWRING_INVALID_ID;
+ }
+
+ create = (struct msgbuf_tx_flowring_create_req *)ret_ptr;
+ create->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE;
+ create->msg.ifidx = ifidx;
+ create->msg.request_id = 0;
+ create->tid = brcmf_flowring_tid(msgbuf->flow, flowid);
+ create->flow_ring_id = cpu_to_le16(flowid +
+ BRCMF_NROF_H2D_COMMON_MSGRINGS);
+ memcpy(create->sa, eh->h_source, ETH_ALEN);
+ memcpy(create->da, eh->h_dest, ETH_ALEN);
+ address = (long long)(long)msgbuf->flowring_dma_handle[flowid];
+ create->flow_ring_addr.high_addr = cpu_to_le32(address >> 32);
+ create->flow_ring_addr.low_addr = cpu_to_le32(address & 0xffffffff);
+ create->max_items = cpu_to_le16(BRCMF_H2D_TXFLOWRING_MAX_ITEM);
+ create->len_item = cpu_to_le16(BRCMF_H2D_TXFLOWRING_ITEMSIZE);
+
+ brcmf_dbg(MSGBUF, "Send Flow Create Req flow ID %d for peer %pM prio %d ifindex %d\n",
+ flowid, eh->h_dest, create->tid, ifidx);
+
+ err = brcmf_commonring_write_complete(commonring);
+ brcmf_commonring_unlock(commonring);
+ if (err) {
+ brcmf_err("Failed to write commonring\n");
+ brcmf_msgbuf_remove_flowring(msgbuf, flowid);
+ return BRCMF_FLOWRING_INVALID_ID;
+ }
+
+ return flowid;
+}
+
+
+static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u8 flowid)
+{
+ struct brcmf_flowring *flow = msgbuf->flow;
+ struct brcmf_commonring *commonring;
+ void *ret_ptr;
+ u32 count;
+ struct sk_buff *skb;
+ dma_addr_t physaddr;
+ u32 pktid;
+ struct msgbuf_tx_msghdr *tx_msghdr;
+ long long address;
+
+ commonring = msgbuf->flowrings[flowid];
+ if (!brcmf_commonring_write_available(commonring))
+ return;
+
+ brcmf_commonring_lock(commonring);
+
+ count = BRCMF_MSGBUF_TX_FLUSH_CNT2 - BRCMF_MSGBUF_TX_FLUSH_CNT1;
+ while (brcmf_flowring_qlen(flow, flowid)) {
+ skb = brcmf_flowring_dequeue(flow, flowid);
+ if (skb == NULL) {
+ brcmf_err("No SKB, but qlen %d\n",
+ brcmf_flowring_qlen(flow, flowid));
+ break;
+ }
+ skb_orphan(skb);
+ if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev,
+ msgbuf->tx_pktids, skb, ETH_HLEN,
+ &physaddr, &pktid)) {
+ brcmf_flowring_reinsert(flow, flowid, skb);
+ brcmf_err("No PKTID available !!\n");
+ break;
+ }
+ ret_ptr = brcmf_commonring_reserve_for_write(commonring);
+ if (!ret_ptr) {
+ brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
+ msgbuf->tx_pktids, pktid);
+ brcmf_flowring_reinsert(flow, flowid, skb);
+ break;
+ }
+ count++;
+
+ tx_msghdr = (struct msgbuf_tx_msghdr *)ret_ptr;
+
+ tx_msghdr->msg.msgtype = MSGBUF_TYPE_TX_POST;
+ tx_msghdr->msg.request_id = cpu_to_le32(pktid);
+ tx_msghdr->msg.ifidx = brcmf_flowring_ifidx_get(flow, flowid);
+ tx_msghdr->flags = BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3;
+ tx_msghdr->flags |= (skb->priority & 0x07) <<
+ BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT;
+ tx_msghdr->seg_cnt = 1;
+ memcpy(tx_msghdr->txhdr, skb->data, ETH_HLEN);
+ tx_msghdr->data_len = cpu_to_le16(skb->len - ETH_HLEN);
+ address = (long long)(long)physaddr;
+ tx_msghdr->data_buf_addr.high_addr = cpu_to_le32(address >> 32);
+ tx_msghdr->data_buf_addr.low_addr =
+ cpu_to_le32(address & 0xffffffff);
+ tx_msghdr->metadata_buf_len = 0;
+ tx_msghdr->metadata_buf_addr.high_addr = 0;
+ tx_msghdr->metadata_buf_addr.low_addr = 0;
+ if (count >= BRCMF_MSGBUF_TX_FLUSH_CNT2) {
+ brcmf_commonring_write_complete(commonring);
+ count = 0;
+ }
+ }
+ if (count)
+ brcmf_commonring_write_complete(commonring);
+ brcmf_commonring_unlock(commonring);
+}
+
+
+static void brcmf_msgbuf_txflow_worker(struct work_struct *worker)
+{
+ struct brcmf_msgbuf *msgbuf;
+ u32 flowid;
+
+ msgbuf = container_of(worker, struct brcmf_msgbuf, txflow_work);
+ for_each_set_bit(flowid, msgbuf->flow_map, msgbuf->nrof_flowrings) {
+ clear_bit(flowid, msgbuf->flow_map);
+ brcmf_msgbuf_txflow(msgbuf, flowid);
+ }
+}
+
+
+static int brcmf_msgbuf_schedule_txdata(struct brcmf_msgbuf *msgbuf, u32 flowid)
+{
+ set_bit(flowid, msgbuf->flow_map);
+ queue_work(msgbuf->txflow_wq, &msgbuf->txflow_work);
+
+ return 0;
+}
+
+
+static int brcmf_msgbuf_txdata(struct brcmf_pub *drvr, int ifidx,
+ u8 offset, struct sk_buff *skb)
+{
+ struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
+ struct brcmf_flowring *flow = msgbuf->flow;
+ struct ethhdr *eh = (struct ethhdr *)(skb->data);
+ u32 flowid;
+
+ flowid = brcmf_flowring_lookup(flow, eh->h_dest, skb->priority, ifidx);
+ if (flowid == BRCMF_FLOWRING_INVALID_ID) {
+ flowid = brcmf_msgbuf_flowring_create(msgbuf, ifidx, skb);
+ if (flowid == BRCMF_FLOWRING_INVALID_ID)
+ return -ENOMEM;
+ }
+ brcmf_flowring_enqueue(flow, flowid, skb);
+ brcmf_msgbuf_schedule_txdata(msgbuf, flowid);
+
+ return 0;
+}
+
+
+static void
+brcmf_msgbuf_configure_addr_mode(struct brcmf_pub *drvr, int ifidx,
+ enum proto_addr_mode addr_mode)
+{
+ struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
+
+ brcmf_flowring_configure_addr_mode(msgbuf->flow, ifidx, addr_mode);
+}
+
+
+static void
+brcmf_msgbuf_delete_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN])
+{
+ struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
+
+ brcmf_flowring_delete_peer(msgbuf->flow, ifidx, peer);
+}
+
+
+static void
+brcmf_msgbuf_add_tdls_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN])
+{
+ struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
+
+ brcmf_flowring_add_tdls_peer(msgbuf->flow, ifidx, peer);
+}
+
+
+static void
+brcmf_msgbuf_process_ioctl_complete(struct brcmf_msgbuf *msgbuf, void *buf)
+{
+ struct msgbuf_ioctl_resp_hdr *ioctl_resp;
+
+ ioctl_resp = (struct msgbuf_ioctl_resp_hdr *)buf;
+
+ msgbuf->ioctl_resp_status = le16_to_cpu(ioctl_resp->compl_hdr.status);
+ msgbuf->ioctl_resp_ret_len = le16_to_cpu(ioctl_resp->resp_len);
+ msgbuf->ioctl_resp_pktid = le32_to_cpu(ioctl_resp->msg.request_id);
+
+ brcmf_msgbuf_ioctl_resp_wake(msgbuf);
+
+ if (msgbuf->cur_ioctlrespbuf)
+ msgbuf->cur_ioctlrespbuf--;
+ brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf);
+}
+
+
+static void
+brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf)
+{
+ struct msgbuf_tx_status *tx_status;
+ u32 idx;
+ struct sk_buff *skb;
+ u16 flowid;
+
+ tx_status = (struct msgbuf_tx_status *)buf;
+ idx = le32_to_cpu(tx_status->msg.request_id);
+ flowid = le16_to_cpu(tx_status->compl_hdr.flow_ring_id);
+ flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS;
+ skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
+ msgbuf->tx_pktids, idx);
+ if (!skb) {
+ brcmf_err("Invalid packet id idx recv'd %d\n", idx);
+ return;
+ }
+
+ set_bit(flowid, msgbuf->txstatus_done_map);
+
+ brcmf_txfinalize(msgbuf->drvr, skb, tx_status->msg.ifidx, true);
+}
+
+
+static u32 brcmf_msgbuf_rxbuf_data_post(struct brcmf_msgbuf *msgbuf, u32 count)
+{
+ struct brcmf_commonring *commonring;
+ void *ret_ptr;
+ struct sk_buff *skb;
+ u16 alloced;
+ u32 pktlen;
+ dma_addr_t physaddr;
+ struct msgbuf_rx_bufpost *rx_bufpost;
+ long long address;
+ u32 pktid;
+ u32 i;
+
+ commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT];
+ ret_ptr = brcmf_commonring_reserve_for_write_multiple(commonring,
+ count,
+ &alloced);
+ if (!ret_ptr) {
+ brcmf_err("Failed to reserve space in commonring\n");
+ return 0;
+ }
+
+ for (i = 0; i < alloced; i++) {
+ rx_bufpost = (struct msgbuf_rx_bufpost *)ret_ptr;
+ memset(rx_bufpost, 0, sizeof(*rx_bufpost));
+
+ skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE);
+
+ if (skb == NULL) {
+ brcmf_err("Failed to alloc SKB\n");
+ brcmf_commonring_write_cancel(commonring, alloced - i);
+ break;
+ }
+
+ pktlen = skb->len;
+ if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev,
+ msgbuf->rx_pktids, skb, 0,
+ &physaddr, &pktid)) {
+ dev_kfree_skb_any(skb);
+ brcmf_err("No PKTID available !!\n");
+ brcmf_commonring_write_cancel(commonring, alloced - i);
+ break;
+ }
+
+ if (msgbuf->rx_metadata_offset) {
+ address = (long long)(long)physaddr;
+ rx_bufpost->metadata_buf_len =
+ cpu_to_le16(msgbuf->rx_metadata_offset);
+ rx_bufpost->metadata_buf_addr.high_addr =
+ cpu_to_le32(address >> 32);
+ rx_bufpost->metadata_buf_addr.low_addr =
+ cpu_to_le32(address & 0xffffffff);
+
+ skb_pull(skb, msgbuf->rx_metadata_offset);
+ pktlen = skb->len;
+ physaddr += msgbuf->rx_metadata_offset;
+ }
+ rx_bufpost->msg.msgtype = MSGBUF_TYPE_RXBUF_POST;
+ rx_bufpost->msg.request_id = cpu_to_le32(pktid);
+
+ address = (long long)(long)physaddr;
+ rx_bufpost->data_buf_len = cpu_to_le16((u16)pktlen);
+ rx_bufpost->data_buf_addr.high_addr =
+ cpu_to_le32(address >> 32);
+ rx_bufpost->data_buf_addr.low_addr =
+ cpu_to_le32(address & 0xffffffff);
+
+ ret_ptr += brcmf_commonring_len_item(commonring);
+ }
+
+ if (i)
+ brcmf_commonring_write_complete(commonring);
+
+ return i;
+}
+
+
+static void
+brcmf_msgbuf_rxbuf_data_fill(struct brcmf_msgbuf *msgbuf)
+{
+ u32 fillbufs;
+ u32 retcount;
+
+ fillbufs = msgbuf->max_rxbufpost - msgbuf->rxbufpost;
+
+ while (fillbufs) {
+ retcount = brcmf_msgbuf_rxbuf_data_post(msgbuf, fillbufs);
+ if (!retcount)
+ break;
+ msgbuf->rxbufpost += retcount;
+ fillbufs -= retcount;
+ }
+}
+
+
+static void
+brcmf_msgbuf_update_rxbufpost_count(struct brcmf_msgbuf *msgbuf, u16 rxcnt)
+{
+ msgbuf->rxbufpost -= rxcnt;
+ if (msgbuf->rxbufpost <= (msgbuf->max_rxbufpost -
+ BRCMF_MSGBUF_RXBUFPOST_THRESHOLD))
+ brcmf_msgbuf_rxbuf_data_fill(msgbuf);
+}
+
+
+static u32
+brcmf_msgbuf_rxbuf_ctrl_post(struct brcmf_msgbuf *msgbuf, bool event_buf,
+ u32 count)
+{
+ struct brcmf_commonring *commonring;
+ void *ret_ptr;
+ struct sk_buff *skb;
+ u16 alloced;
+ u32 pktlen;
+ dma_addr_t physaddr;
+ struct msgbuf_rx_ioctl_resp_or_event *rx_bufpost;
+ long long address;
+ u32 pktid;
+ u32 i;
+
+ commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
+ brcmf_commonring_lock(commonring);
+ ret_ptr = brcmf_commonring_reserve_for_write_multiple(commonring,
+ count,
+ &alloced);
+ if (!ret_ptr) {
+ brcmf_err("Failed to reserve space in commonring\n");
+ brcmf_commonring_unlock(commonring);
+ return 0;
+ }
+
+ for (i = 0; i < alloced; i++) {
+ rx_bufpost = (struct msgbuf_rx_ioctl_resp_or_event *)ret_ptr;
+ memset(rx_bufpost, 0, sizeof(*rx_bufpost));
+
+ skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE);
+
+ if (skb == NULL) {
+ brcmf_err("Failed to alloc SKB\n");
+ brcmf_commonring_write_cancel(commonring, alloced - i);
+ break;
+ }
+
+ pktlen = skb->len;
+ if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev,
+ msgbuf->rx_pktids, skb, 0,
+ &physaddr, &pktid)) {
+ dev_kfree_skb_any(skb);
+ brcmf_err("No PKTID available !!\n");
+ brcmf_commonring_write_cancel(commonring, alloced - i);
+ break;
+ }
+ if (event_buf)
+ rx_bufpost->msg.msgtype = MSGBUF_TYPE_EVENT_BUF_POST;
+ else
+ rx_bufpost->msg.msgtype =
+ MSGBUF_TYPE_IOCTLRESP_BUF_POST;
+ rx_bufpost->msg.request_id = cpu_to_le32(pktid);
+
+ address = (long long)(long)physaddr;
+ rx_bufpost->host_buf_len = cpu_to_le16((u16)pktlen);
+ rx_bufpost->host_buf_addr.high_addr =
+ cpu_to_le32(address >> 32);
+ rx_bufpost->host_buf_addr.low_addr =
+ cpu_to_le32(address & 0xffffffff);
+
+ ret_ptr += brcmf_commonring_len_item(commonring);
+ }
+
+ if (i)
+ brcmf_commonring_write_complete(commonring);
+
+ brcmf_commonring_unlock(commonring);
+
+ return i;
+}
+
+
+static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf)
+{
+ u32 count;
+
+ count = msgbuf->max_ioctlrespbuf - msgbuf->cur_ioctlrespbuf;
+ count = brcmf_msgbuf_rxbuf_ctrl_post(msgbuf, false, count);
+ msgbuf->cur_ioctlrespbuf += count;
+}
+
+
+static void brcmf_msgbuf_rxbuf_event_post(struct brcmf_msgbuf *msgbuf)
+{
+ u32 count;
+
+ count = msgbuf->max_eventbuf - msgbuf->cur_eventbuf;
+ count = brcmf_msgbuf_rxbuf_ctrl_post(msgbuf, true, count);
+ msgbuf->cur_eventbuf += count;
+}
+
+
+static void
+brcmf_msgbuf_rx_skb(struct brcmf_msgbuf *msgbuf, struct sk_buff *skb,
+ u8 ifidx)
+{
+ struct brcmf_if *ifp;
+
+ ifp = msgbuf->drvr->iflist[ifidx];
+ if (!ifp || !ifp->ndev) {
+ brcmu_pkt_buf_free_skb(skb);
+ return;
+ }
+ brcmf_netif_rx(ifp, skb);
+}
+
+
+static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf)
+{
+ struct msgbuf_rx_event *event;
+ u32 idx;
+ u16 buflen;
+ struct sk_buff *skb;
+
+ event = (struct msgbuf_rx_event *)buf;
+ idx = le32_to_cpu(event->msg.request_id);
+ buflen = le16_to_cpu(event->event_data_len);
+
+ if (msgbuf->cur_eventbuf)
+ msgbuf->cur_eventbuf--;
+ brcmf_msgbuf_rxbuf_event_post(msgbuf);
+
+ skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
+ msgbuf->rx_pktids, idx);
+ if (!skb)
+ return;
+
+ if (msgbuf->rx_dataoffset)
+ skb_pull(skb, msgbuf->rx_dataoffset);
+
+ skb_trim(skb, buflen);
+
+ brcmf_msgbuf_rx_skb(msgbuf, skb, event->msg.ifidx);
+}
+
+
+static void
+brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
+{
+ struct msgbuf_rx_complete *rx_complete;
+ struct sk_buff *skb;
+ u16 data_offset;
+ u16 buflen;
+ u32 idx;
+
+ brcmf_msgbuf_update_rxbufpost_count(msgbuf, 1);
+
+ rx_complete = (struct msgbuf_rx_complete *)buf;
+ data_offset = le16_to_cpu(rx_complete->data_offset);
+ buflen = le16_to_cpu(rx_complete->data_len);
+ idx = le32_to_cpu(rx_complete->msg.request_id);
+
+ skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
+ msgbuf->rx_pktids, idx);
+
+ if (data_offset)
+ skb_pull(skb, data_offset);
+ else if (msgbuf->rx_dataoffset)
+ skb_pull(skb, msgbuf->rx_dataoffset);
+
+ skb_trim(skb, buflen);
+
+ brcmf_msgbuf_rx_skb(msgbuf, skb, rx_complete->msg.ifidx);
+}
+
+
+static void
+brcmf_msgbuf_process_flow_ring_create_response(struct brcmf_msgbuf *msgbuf,
+ void *buf)
+{
+ struct msgbuf_flowring_create_resp *flowring_create_resp;
+ u16 status;
+ u16 flowid;
+
+ flowring_create_resp = (struct msgbuf_flowring_create_resp *)buf;
+
+ flowid = le16_to_cpu(flowring_create_resp->compl_hdr.flow_ring_id);
+ flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS;
+ status = le16_to_cpu(flowring_create_resp->compl_hdr.status);
+
+ if (status) {
+ brcmf_err("Flowring creation failed, code %d\n", status);
+ brcmf_msgbuf_remove_flowring(msgbuf, flowid);
+ return;
+ }
+ brcmf_dbg(MSGBUF, "Flowring %d Create response status %d\n", flowid,
+ status);
+
+ brcmf_flowring_open(msgbuf->flow, flowid);
+
+ brcmf_msgbuf_schedule_txdata(msgbuf, flowid);
+}
+
+
+static void
+brcmf_msgbuf_process_flow_ring_delete_response(struct brcmf_msgbuf *msgbuf,
+ void *buf)
+{
+ struct msgbuf_flowring_delete_resp *flowring_delete_resp;
+ u16 status;
+ u16 flowid;
+
+ flowring_delete_resp = (struct msgbuf_flowring_delete_resp *)buf;
+
+ flowid = le16_to_cpu(flowring_delete_resp->compl_hdr.flow_ring_id);
+ flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS;
+ status = le16_to_cpu(flowring_delete_resp->compl_hdr.status);
+
+ if (status) {
+ brcmf_err("Flowring deletion failed, code %d\n", status);
+ brcmf_flowring_delete(msgbuf->flow, flowid);
+ return;
+ }
+ brcmf_dbg(MSGBUF, "Flowring %d Delete response status %d\n", flowid,
+ status);
+
+ brcmf_msgbuf_remove_flowring(msgbuf, flowid);
+}
+
+
+static void brcmf_msgbuf_process_msgtype(struct brcmf_msgbuf *msgbuf, void *buf)
+{
+ struct msgbuf_common_hdr *msg;
+
+ msg = (struct msgbuf_common_hdr *)buf;
+ switch (msg->msgtype) {
+ case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT:
+ brcmf_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT\n");
+ brcmf_msgbuf_process_flow_ring_create_response(msgbuf, buf);
+ break;
+ case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT:
+ brcmf_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT\n");
+ brcmf_msgbuf_process_flow_ring_delete_response(msgbuf, buf);
+ break;
+ case MSGBUF_TYPE_IOCTLPTR_REQ_ACK:
+ brcmf_dbg(MSGBUF, "MSGBUF_TYPE_IOCTLPTR_REQ_ACK\n");
+ break;
+ case MSGBUF_TYPE_IOCTL_CMPLT:
+ brcmf_dbg(MSGBUF, "MSGBUF_TYPE_IOCTL_CMPLT\n");
+ brcmf_msgbuf_process_ioctl_complete(msgbuf, buf);
+ break;
+ case MSGBUF_TYPE_WL_EVENT:
+ brcmf_dbg(MSGBUF, "MSGBUF_TYPE_WL_EVENT\n");
+ brcmf_msgbuf_process_event(msgbuf, buf);
+ break;
+ case MSGBUF_TYPE_TX_STATUS:
+ brcmf_dbg(MSGBUF, "MSGBUF_TYPE_TX_STATUS\n");
+ brcmf_msgbuf_process_txstatus(msgbuf, buf);
+ break;
+ case MSGBUF_TYPE_RX_CMPLT:
+ brcmf_dbg(MSGBUF, "MSGBUF_TYPE_RX_CMPLT\n");
+ brcmf_msgbuf_process_rx_complete(msgbuf, buf);
+ break;
+ default:
+ brcmf_err("Unsupported msgtype %d\n", msg->msgtype);
+ break;
+ }
+}
+
+
+static void brcmf_msgbuf_process_rx(struct brcmf_msgbuf *msgbuf,
+ struct brcmf_commonring *commonring)
+{
+ void *buf;
+ u16 count;
+
+again:
+ buf = brcmf_commonring_get_read_ptr(commonring, &count);
+ if (buf == NULL)
+ return;
+
+ while (count) {
+ brcmf_msgbuf_process_msgtype(msgbuf,
+ buf + msgbuf->rx_dataoffset);
+ buf += brcmf_commonring_len_item(commonring);
+ count--;
+ }
+ brcmf_commonring_read_complete(commonring);
+
+ if (commonring->r_ptr == 0)
+ goto again;
+}
+
+
+int brcmf_proto_msgbuf_rx_trigger(struct device *dev)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
+ struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
+ void *buf;
+ u32 flowid;
+
+ buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE];
+ brcmf_msgbuf_process_rx(msgbuf, buf);
+ buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE];
+ brcmf_msgbuf_process_rx(msgbuf, buf);
+ buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE];
+ brcmf_msgbuf_process_rx(msgbuf, buf);
+
+ for_each_set_bit(flowid, msgbuf->txstatus_done_map,
+ msgbuf->nrof_flowrings) {
+ clear_bit(flowid, msgbuf->txstatus_done_map);
+ if (brcmf_flowring_qlen(msgbuf->flow, flowid))
+ brcmf_msgbuf_schedule_txdata(msgbuf, flowid);
+ }
+
+ return 0;
+}
+
+
+void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid)
+{
+ struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
+ struct msgbuf_tx_flowring_delete_req *delete;
+ struct brcmf_commonring *commonring;
+ void *ret_ptr;
+ u8 ifidx;
+ int err;
+
+ commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
+ brcmf_commonring_lock(commonring);
+ ret_ptr = brcmf_commonring_reserve_for_write(commonring);
+ if (!ret_ptr) {
+ brcmf_err("FW unaware, flowring will be removed !!\n");
+ brcmf_commonring_unlock(commonring);
+ brcmf_msgbuf_remove_flowring(msgbuf, flowid);
+ return;
+ }
+
+ delete = (struct msgbuf_tx_flowring_delete_req *)ret_ptr;
+
+ ifidx = brcmf_flowring_ifidx_get(msgbuf->flow, flowid);
+
+ delete->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE;
+ delete->msg.ifidx = ifidx;
+ delete->msg.request_id = 0;
+
+ delete->flow_ring_id = cpu_to_le16(flowid +
+ BRCMF_NROF_H2D_COMMON_MSGRINGS);
+ delete->reason = 0;
+
+ brcmf_dbg(MSGBUF, "Send Flow Delete Req flow ID %d, ifindex %d\n",
+ flowid, ifidx);
+
+ err = brcmf_commonring_write_complete(commonring);
+ brcmf_commonring_unlock(commonring);
+ if (err) {
+ brcmf_err("Failed to submit RING_DELETE, flowring will be removed\n");
+ brcmf_msgbuf_remove_flowring(msgbuf, flowid);
+ }
+}
+
+
+int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
+{
+ struct brcmf_bus_msgbuf *if_msgbuf;
+ struct brcmf_msgbuf *msgbuf;
+ long long address;
+ u32 count;
+
+ if_msgbuf = drvr->bus_if->msgbuf;
+ msgbuf = kzalloc(sizeof(*msgbuf), GFP_ATOMIC);
+ if (!msgbuf)
+ goto fail;
+
+ msgbuf->txflow_wq = create_singlethread_workqueue("msgbuf_txflow");
+ if (msgbuf->txflow_wq == NULL) {
+ brcmf_err("workqueue creation failed\n");
+ goto fail;
+ }
+ INIT_WORK(&msgbuf->txflow_work, brcmf_msgbuf_txflow_worker);
+ count = BITS_TO_LONGS(if_msgbuf->nrof_flowrings);
+ msgbuf->flow_map = kzalloc(count, GFP_ATOMIC);
+ if (!msgbuf->flow_map)
+ goto fail;
+
+ msgbuf->txstatus_done_map = kzalloc(count, GFP_ATOMIC);
+ if (!msgbuf->txstatus_done_map)
+ goto fail;
+
+ msgbuf->drvr = drvr;
+ msgbuf->ioctbuf = dma_alloc_coherent(drvr->bus_if->dev,
+ BRCMF_TX_IOCTL_MAX_MSG_SIZE,
+ &msgbuf->ioctbuf_handle,
+ GFP_ATOMIC);
+ if (!msgbuf->ioctbuf)
+ goto fail;
+ address = (long long)(long)msgbuf->ioctbuf_handle;
+ msgbuf->ioctbuf_phys_hi = address >> 32;
+ msgbuf->ioctbuf_phys_lo = address & 0xffffffff;
+
+ drvr->proto->hdrpull = brcmf_msgbuf_hdrpull;
+ drvr->proto->query_dcmd = brcmf_msgbuf_query_dcmd;
+ drvr->proto->set_dcmd = brcmf_msgbuf_set_dcmd;
+ drvr->proto->txdata = brcmf_msgbuf_txdata;
+ drvr->proto->configure_addr_mode = brcmf_msgbuf_configure_addr_mode;
+ drvr->proto->delete_peer = brcmf_msgbuf_delete_peer;
+ drvr->proto->add_tdls_peer = brcmf_msgbuf_add_tdls_peer;
+ drvr->proto->pd = msgbuf;
+
+ init_waitqueue_head(&msgbuf->ioctl_resp_wait);
+
+ msgbuf->commonrings =
+ (struct brcmf_commonring **)if_msgbuf->commonrings;
+ msgbuf->flowrings = (struct brcmf_commonring **)if_msgbuf->flowrings;
+ msgbuf->nrof_flowrings = if_msgbuf->nrof_flowrings;
+ msgbuf->flowring_dma_handle = kzalloc(msgbuf->nrof_flowrings *
+ sizeof(*msgbuf->flowring_dma_handle), GFP_ATOMIC);
+
+ msgbuf->rx_dataoffset = if_msgbuf->rx_dataoffset;
+ msgbuf->max_rxbufpost = if_msgbuf->max_rxbufpost;
+
+ msgbuf->max_ioctlrespbuf = BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST;
+ msgbuf->max_eventbuf = BRCMF_MSGBUF_MAX_EVENTBUF_POST;
+
+ msgbuf->tx_pktids = brcmf_msgbuf_init_pktids(NR_TX_PKTIDS,
+ DMA_TO_DEVICE);
+ if (!msgbuf->tx_pktids)
+ goto fail;
+ msgbuf->rx_pktids = brcmf_msgbuf_init_pktids(NR_RX_PKTIDS,
+ DMA_FROM_DEVICE);
+ if (!msgbuf->rx_pktids)
+ goto fail;
+
+ msgbuf->flow = brcmf_flowring_attach(drvr->bus_if->dev,
+ if_msgbuf->nrof_flowrings);
+ if (!msgbuf->flow)
+ goto fail;
+
+
+ brcmf_dbg(MSGBUF, "Feeding buffers, rx data %d, rx event %d, rx ioctl resp %d\n",
+ msgbuf->max_rxbufpost, msgbuf->max_eventbuf,
+ msgbuf->max_ioctlrespbuf);
+ count = 0;
+ do {
+ brcmf_msgbuf_rxbuf_data_fill(msgbuf);
+ if (msgbuf->max_rxbufpost != msgbuf->rxbufpost)
+ msleep(10);
+ else
+ break;
+ count++;
+ } while (count < 10);
+ brcmf_msgbuf_rxbuf_event_post(msgbuf);
+ brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf);
+
+ return 0;
+
+fail:
+ if (msgbuf) {
+ kfree(msgbuf->flow_map);
+ kfree(msgbuf->txstatus_done_map);
+ brcmf_msgbuf_release_pktids(msgbuf);
+ if (msgbuf->ioctbuf)
+ dma_free_coherent(drvr->bus_if->dev,
+ BRCMF_TX_IOCTL_MAX_MSG_SIZE,
+ msgbuf->ioctbuf,
+ msgbuf->ioctbuf_handle);
+ kfree(msgbuf);
+ }
+ return -ENOMEM;
+}
+
+
+void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr)
+{
+ struct brcmf_msgbuf *msgbuf;
+
+ brcmf_dbg(TRACE, "Enter\n");
+ if (drvr->proto->pd) {
+ msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
+
+ kfree(msgbuf->flow_map);
+ kfree(msgbuf->txstatus_done_map);
+ if (msgbuf->txflow_wq)
+ destroy_workqueue(msgbuf->txflow_wq);
+
+ brcmf_flowring_detach(msgbuf->flow);
+ dma_free_coherent(drvr->bus_if->dev,
+ BRCMF_TX_IOCTL_MAX_MSG_SIZE,
+ msgbuf->ioctbuf, msgbuf->ioctbuf_handle);
+ brcmf_msgbuf_release_pktids(msgbuf);
+ kfree(msgbuf);
+ drvr->proto->pd = NULL;
+ }
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h
new file mode 100644
index 000000000000..f901ae52bf2b
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h
@@ -0,0 +1,40 @@
+/* Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef BRCMFMAC_MSGBUF_H
+#define BRCMFMAC_MSGBUF_H
+
+
+#define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM 20
+#define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM 256
+#define BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM 20
+#define BRCMF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM 1024
+#define BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM 256
+#define BRCMF_H2D_TXFLOWRING_MAX_ITEM 512
+
+#define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE 40
+#define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE 32
+#define BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE 24
+#define BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE 16
+#define BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE 32
+#define BRCMF_H2D_TXFLOWRING_ITEMSIZE 48
+
+
+int brcmf_proto_msgbuf_rx_trigger(struct device *dev);
+int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr);
+void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr);
+void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid);
+
+
+#endif /* BRCMFMAC_MSGBUF_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/of.c b/drivers/net/wireless/brcm80211/brcmfmac/of.c
new file mode 100644
index 000000000000..f05f5270fec1
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/of.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/mmc/card.h>
+#include <linux/platform_data/brcmfmac-sdio.h>
+#include <linux/mmc/sdio_func.h>
+
+#include <defs.h>
+#include "dhd_dbg.h"
+#include "sdio_host.h"
+
+void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev)
+{
+ struct device *dev = sdiodev->dev;
+ struct device_node *np = dev->of_node;
+ int irq;
+ u32 irqf;
+ u32 val;
+
+ if (!np || !of_device_is_compatible(np, "brcm,bcm4329-fmac"))
+ return;
+
+ sdiodev->pdata = devm_kzalloc(dev, sizeof(*sdiodev->pdata), GFP_KERNEL);
+ if (!sdiodev->pdata)
+ return;
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (irq < 0) {
+ brcmf_err("interrupt could not be mapped: err=%d\n", irq);
+ devm_kfree(dev, sdiodev->pdata);
+ return;
+ }
+ irqf = irqd_get_trigger_type(irq_get_irq_data(irq));
+
+ sdiodev->pdata->oob_irq_supported = true;
+ sdiodev->pdata->oob_irq_nr = irq;
+ sdiodev->pdata->oob_irq_flags = irqf;
+
+ if (of_property_read_u32(np, "brcm,drive-strength", &val) == 0)
+ sdiodev->pdata->drive_strength = val;
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/of.h b/drivers/net/wireless/brcm80211/brcmfmac/of.h
new file mode 100644
index 000000000000..5f7c3550deda
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/of.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifdef CONFIG_OF
+void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev);
+#else
+static void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev)
+{
+}
+#endif /* CONFIG_OF */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
new file mode 100644
index 000000000000..bc972c0ba5f8
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
@@ -0,0 +1,1846 @@
+/* Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <linux/unaligned/access_ok.h>
+#include <linux/interrupt.h>
+#include <linux/bcma/bcma.h>
+#include <linux/sched.h>
+
+#include <soc.h>
+#include <chipcommon.h>
+#include <brcmu_utils.h>
+#include <brcmu_wifi.h>
+#include <brcm_hw_ids.h>
+
+#include "dhd_dbg.h"
+#include "dhd_bus.h"
+#include "commonring.h"
+#include "msgbuf.h"
+#include "pcie.h"
+#include "firmware.h"
+#include "chip.h"
+
+
+enum brcmf_pcie_state {
+ BRCMFMAC_PCIE_STATE_DOWN,
+ BRCMFMAC_PCIE_STATE_UP
+};
+
+
+#define BRCMF_PCIE_43602_FW_NAME "brcm/brcmfmac43602-pcie.bin"
+#define BRCMF_PCIE_43602_NVRAM_NAME "brcm/brcmfmac43602-pcie.txt"
+#define BRCMF_PCIE_4354_FW_NAME "brcm/brcmfmac4354-pcie.bin"
+#define BRCMF_PCIE_4354_NVRAM_NAME "brcm/brcmfmac4354-pcie.txt"
+#define BRCMF_PCIE_4356_FW_NAME "brcm/brcmfmac4356-pcie.bin"
+#define BRCMF_PCIE_4356_NVRAM_NAME "brcm/brcmfmac4356-pcie.txt"
+#define BRCMF_PCIE_43570_FW_NAME "brcm/brcmfmac43570-pcie.bin"
+#define BRCMF_PCIE_43570_NVRAM_NAME "brcm/brcmfmac43570-pcie.txt"
+
+#define BRCMF_PCIE_FW_UP_TIMEOUT 2000 /* msec */
+
+#define BRCMF_PCIE_TCM_MAP_SIZE (4096 * 1024)
+#define BRCMF_PCIE_REG_MAP_SIZE (32 * 1024)
+
+/* backplane addres space accessed by BAR0 */
+#define BRCMF_PCIE_BAR0_WINDOW 0x80
+#define BRCMF_PCIE_BAR0_REG_SIZE 0x1000
+#define BRCMF_PCIE_BAR0_WRAPPERBASE 0x70
+
+#define BRCMF_PCIE_BAR0_WRAPBASE_DMP_OFFSET 0x1000
+#define BRCMF_PCIE_BARO_PCIE_ENUM_OFFSET 0x2000
+
+#define BRCMF_PCIE_ARMCR4REG_BANKIDX 0x40
+#define BRCMF_PCIE_ARMCR4REG_BANKPDA 0x4C
+
+#define BRCMF_PCIE_REG_INTSTATUS 0x90
+#define BRCMF_PCIE_REG_INTMASK 0x94
+#define BRCMF_PCIE_REG_SBMBX 0x98
+
+#define BRCMF_PCIE_PCIE2REG_INTMASK 0x24
+#define BRCMF_PCIE_PCIE2REG_MAILBOXINT 0x48
+#define BRCMF_PCIE_PCIE2REG_MAILBOXMASK 0x4C
+#define BRCMF_PCIE_PCIE2REG_CONFIGADDR 0x120
+#define BRCMF_PCIE_PCIE2REG_CONFIGDATA 0x124
+#define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX 0x140
+
+#define BRCMF_PCIE_GENREV1 1
+#define BRCMF_PCIE_GENREV2 2
+
+#define BRCMF_PCIE2_INTA 0x01
+#define BRCMF_PCIE2_INTB 0x02
+
+#define BRCMF_PCIE_INT_0 0x01
+#define BRCMF_PCIE_INT_1 0x02
+#define BRCMF_PCIE_INT_DEF (BRCMF_PCIE_INT_0 | \
+ BRCMF_PCIE_INT_1)
+
+#define BRCMF_PCIE_MB_INT_FN0_0 0x0100
+#define BRCMF_PCIE_MB_INT_FN0_1 0x0200
+#define BRCMF_PCIE_MB_INT_D2H0_DB0 0x10000
+#define BRCMF_PCIE_MB_INT_D2H0_DB1 0x20000
+#define BRCMF_PCIE_MB_INT_D2H1_DB0 0x40000
+#define BRCMF_PCIE_MB_INT_D2H1_DB1 0x80000
+#define BRCMF_PCIE_MB_INT_D2H2_DB0 0x100000
+#define BRCMF_PCIE_MB_INT_D2H2_DB1 0x200000
+#define BRCMF_PCIE_MB_INT_D2H3_DB0 0x400000
+#define BRCMF_PCIE_MB_INT_D2H3_DB1 0x800000
+
+#define BRCMF_PCIE_MB_INT_D2H_DB (BRCMF_PCIE_MB_INT_D2H0_DB0 | \
+ BRCMF_PCIE_MB_INT_D2H0_DB1 | \
+ BRCMF_PCIE_MB_INT_D2H1_DB0 | \
+ BRCMF_PCIE_MB_INT_D2H1_DB1 | \
+ BRCMF_PCIE_MB_INT_D2H2_DB0 | \
+ BRCMF_PCIE_MB_INT_D2H2_DB1 | \
+ BRCMF_PCIE_MB_INT_D2H3_DB0 | \
+ BRCMF_PCIE_MB_INT_D2H3_DB1)
+
+#define BRCMF_PCIE_MIN_SHARED_VERSION 4
+#define BRCMF_PCIE_MAX_SHARED_VERSION 5
+#define BRCMF_PCIE_SHARED_VERSION_MASK 0x00FF
+#define BRCMF_PCIE_SHARED_TXPUSH_SUPPORT 0x4000
+
+#define BRCMF_PCIE_FLAGS_HTOD_SPLIT 0x4000
+#define BRCMF_PCIE_FLAGS_DTOH_SPLIT 0x8000
+
+#define BRCMF_SHARED_MAX_RXBUFPOST_OFFSET 34
+#define BRCMF_SHARED_RING_BASE_OFFSET 52
+#define BRCMF_SHARED_RX_DATAOFFSET_OFFSET 36
+#define BRCMF_SHARED_CONSOLE_ADDR_OFFSET 20
+#define BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET 40
+#define BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET 44
+#define BRCMF_SHARED_RING_INFO_ADDR_OFFSET 48
+#define BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET 52
+#define BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET 56
+#define BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET 64
+#define BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET 68
+
+#define BRCMF_RING_H2D_RING_COUNT_OFFSET 0
+#define BRCMF_RING_D2H_RING_COUNT_OFFSET 1
+#define BRCMF_RING_H2D_RING_MEM_OFFSET 4
+#define BRCMF_RING_H2D_RING_STATE_OFFSET 8
+
+#define BRCMF_RING_MEM_BASE_ADDR_OFFSET 8
+#define BRCMF_RING_MAX_ITEM_OFFSET 4
+#define BRCMF_RING_LEN_ITEMS_OFFSET 6
+#define BRCMF_RING_MEM_SZ 16
+#define BRCMF_RING_STATE_SZ 8
+
+#define BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET 4
+#define BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET 8
+#define BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET 12
+#define BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET 16
+#define BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET 0
+#define BRCMF_SHARED_RING_MAX_SUB_QUEUES 52
+
+#define BRCMF_DEF_MAX_RXBUFPOST 255
+
+#define BRCMF_CONSOLE_BUFADDR_OFFSET 8
+#define BRCMF_CONSOLE_BUFSIZE_OFFSET 12
+#define BRCMF_CONSOLE_WRITEIDX_OFFSET 16
+
+#define BRCMF_DMA_D2H_SCRATCH_BUF_LEN 8
+#define BRCMF_DMA_D2H_RINGUPD_BUF_LEN 1024
+
+#define BRCMF_D2H_DEV_D3_ACK 0x00000001
+#define BRCMF_D2H_DEV_DS_ENTER_REQ 0x00000002
+#define BRCMF_D2H_DEV_DS_EXIT_NOTE 0x00000004
+
+#define BRCMF_H2D_HOST_D3_INFORM 0x00000001
+#define BRCMF_H2D_HOST_DS_ACK 0x00000002
+
+#define BRCMF_PCIE_MBDATA_TIMEOUT 2000
+
+#define BRCMF_PCIE_CFGREG_STATUS_CMD 0x4
+#define BRCMF_PCIE_CFGREG_PM_CSR 0x4C
+#define BRCMF_PCIE_CFGREG_MSI_CAP 0x58
+#define BRCMF_PCIE_CFGREG_MSI_ADDR_L 0x5C
+#define BRCMF_PCIE_CFGREG_MSI_ADDR_H 0x60
+#define BRCMF_PCIE_CFGREG_MSI_DATA 0x64
+#define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL 0xBC
+#define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2 0xDC
+#define BRCMF_PCIE_CFGREG_RBAR_CTRL 0x228
+#define BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1 0x248
+#define BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG 0x4E0
+#define BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG 0x4F4
+#define BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB 3
+
+
+MODULE_FIRMWARE(BRCMF_PCIE_43602_FW_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_43602_NVRAM_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4354_FW_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4354_NVRAM_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_43570_FW_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_43570_NVRAM_NAME);
+
+
+struct brcmf_pcie_console {
+ u32 base_addr;
+ u32 buf_addr;
+ u32 bufsize;
+ u32 read_idx;
+ u8 log_str[256];
+ u8 log_idx;
+};
+
+struct brcmf_pcie_shared_info {
+ u32 tcm_base_address;
+ u32 flags;
+ struct brcmf_pcie_ringbuf *commonrings[BRCMF_NROF_COMMON_MSGRINGS];
+ struct brcmf_pcie_ringbuf *flowrings;
+ u16 max_rxbufpost;
+ u32 nrof_flowrings;
+ u32 rx_dataoffset;
+ u32 htod_mb_data_addr;
+ u32 dtoh_mb_data_addr;
+ u32 ring_info_addr;
+ struct brcmf_pcie_console console;
+ void *scratch;
+ dma_addr_t scratch_dmahandle;
+ void *ringupd;
+ dma_addr_t ringupd_dmahandle;
+};
+
+struct brcmf_pcie_core_info {
+ u32 base;
+ u32 wrapbase;
+};
+
+struct brcmf_pciedev_info {
+ enum brcmf_pcie_state state;
+ bool in_irq;
+ bool irq_requested;
+ struct pci_dev *pdev;
+ char fw_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN];
+ char nvram_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN];
+ void __iomem *regs;
+ void __iomem *tcm;
+ u32 tcm_size;
+ u32 ram_base;
+ u32 ram_size;
+ struct brcmf_chip *ci;
+ u32 coreid;
+ u32 generic_corerev;
+ struct brcmf_pcie_shared_info shared;
+ void (*ringbell)(struct brcmf_pciedev_info *devinfo);
+ wait_queue_head_t mbdata_resp_wait;
+ bool mbdata_completed;
+ bool irq_allocated;
+};
+
+struct brcmf_pcie_ringbuf {
+ struct brcmf_commonring commonring;
+ dma_addr_t dma_handle;
+ u32 w_idx_addr;
+ u32 r_idx_addr;
+ struct brcmf_pciedev_info *devinfo;
+ u8 id;
+};
+
+
+static const u32 brcmf_ring_max_item[BRCMF_NROF_COMMON_MSGRINGS] = {
+ BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM,
+ BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM,
+ BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM,
+ BRCMF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM,
+ BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM
+};
+
+static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = {
+ BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE,
+ BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE,
+ BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE,
+ BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE,
+ BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE
+};
+
+
+/* dma flushing needs implementation for mips and arm platforms. Should
+ * be put in util. Note, this is not real flushing. It is virtual non
+ * cached memory. Only write buffers should have to be drained. Though
+ * this may be different depending on platform......
+ */
+#define brcmf_dma_flush(addr, len)
+#define brcmf_dma_invalidate_cache(addr, len)
+
+
+static u32
+brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset)
+{
+ void __iomem *address = devinfo->regs + reg_offset;
+
+ return (ioread32(address));
+}
+
+
+static void
+brcmf_pcie_write_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset,
+ u32 value)
+{
+ void __iomem *address = devinfo->regs + reg_offset;
+
+ iowrite32(value, address);
+}
+
+
+static u8
+brcmf_pcie_read_tcm8(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
+{
+ void __iomem *address = devinfo->tcm + mem_offset;
+
+ return (ioread8(address));
+}
+
+
+static u16
+brcmf_pcie_read_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
+{
+ void __iomem *address = devinfo->tcm + mem_offset;
+
+ return (ioread16(address));
+}
+
+
+static void
+brcmf_pcie_write_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
+ u16 value)
+{
+ void __iomem *address = devinfo->tcm + mem_offset;
+
+ iowrite16(value, address);
+}
+
+
+static u32
+brcmf_pcie_read_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
+{
+ void __iomem *address = devinfo->tcm + mem_offset;
+
+ return (ioread32(address));
+}
+
+
+static void
+brcmf_pcie_write_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
+ u32 value)
+{
+ void __iomem *address = devinfo->tcm + mem_offset;
+
+ iowrite32(value, address);
+}
+
+
+static u32
+brcmf_pcie_read_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
+{
+ void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset;
+
+ return (ioread32(addr));
+}
+
+
+static void
+brcmf_pcie_write_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
+ u32 value)
+{
+ void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset;
+
+ iowrite32(value, addr);
+}
+
+
+static void
+brcmf_pcie_copy_mem_todev(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
+ void *srcaddr, u32 len)
+{
+ void __iomem *address = devinfo->tcm + mem_offset;
+ __le32 *src32;
+ __le16 *src16;
+ u8 *src8;
+
+ if (((ulong)address & 4) || ((ulong)srcaddr & 4) || (len & 4)) {
+ if (((ulong)address & 2) || ((ulong)srcaddr & 2) || (len & 2)) {
+ src8 = (u8 *)srcaddr;
+ while (len) {
+ iowrite8(*src8, address);
+ address++;
+ src8++;
+ len--;
+ }
+ } else {
+ len = len / 2;
+ src16 = (__le16 *)srcaddr;
+ while (len) {
+ iowrite16(le16_to_cpu(*src16), address);
+ address += 2;
+ src16++;
+ len--;
+ }
+ }
+ } else {
+ len = len / 4;
+ src32 = (__le32 *)srcaddr;
+ while (len) {
+ iowrite32(le32_to_cpu(*src32), address);
+ address += 4;
+ src32++;
+ len--;
+ }
+ }
+}
+
+
+#define WRITECC32(devinfo, reg, value) brcmf_pcie_write_reg32(devinfo, \
+ CHIPCREGOFFS(reg), value)
+
+
+static void
+brcmf_pcie_select_core(struct brcmf_pciedev_info *devinfo, u16 coreid)
+{
+ const struct pci_dev *pdev = devinfo->pdev;
+ struct brcmf_core *core;
+ u32 bar0_win;
+
+ core = brcmf_chip_get_core(devinfo->ci, coreid);
+ if (core) {
+ bar0_win = core->base;
+ pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, bar0_win);
+ if (pci_read_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW,
+ &bar0_win) == 0) {
+ if (bar0_win != core->base) {
+ bar0_win = core->base;
+ pci_write_config_dword(pdev,
+ BRCMF_PCIE_BAR0_WINDOW,
+ bar0_win);
+ }
+ }
+ } else {
+ brcmf_err("Unsupported core selected %x\n", coreid);
+ }
+}
+
+
+static void brcmf_pcie_reset_device(struct brcmf_pciedev_info *devinfo)
+{
+ u16 cfg_offset[] = { BRCMF_PCIE_CFGREG_STATUS_CMD,
+ BRCMF_PCIE_CFGREG_PM_CSR,
+ BRCMF_PCIE_CFGREG_MSI_CAP,
+ BRCMF_PCIE_CFGREG_MSI_ADDR_L,
+ BRCMF_PCIE_CFGREG_MSI_ADDR_H,
+ BRCMF_PCIE_CFGREG_MSI_DATA,
+ BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2,
+ BRCMF_PCIE_CFGREG_RBAR_CTRL,
+ BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1,
+ BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG,
+ BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG };
+ u32 i;
+ u32 val;
+ u32 lsc;
+
+ if (!devinfo->ci)
+ return;
+
+ brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
+ brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR,
+ BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL);
+ lsc = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA);
+ val = lsc & (~BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB);
+ brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, val);
+
+ brcmf_pcie_select_core(devinfo, BCMA_CORE_CHIPCOMMON);
+ WRITECC32(devinfo, watchdog, 4);
+ msleep(100);
+
+ brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
+ brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR,
+ BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL);
+ brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, lsc);
+
+ brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
+ for (i = 0; i < ARRAY_SIZE(cfg_offset); i++) {
+ brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR,
+ cfg_offset[i]);
+ val = brcmf_pcie_read_reg32(devinfo,
+ BRCMF_PCIE_PCIE2REG_CONFIGDATA);
+ brcmf_dbg(PCIE, "config offset 0x%04x, value 0x%04x\n",
+ cfg_offset[i], val);
+ brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA,
+ val);
+ }
+}
+
+
+static void brcmf_pcie_attach(struct brcmf_pciedev_info *devinfo)
+{
+ u32 config;
+
+ brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
+ if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_INTMASK) != 0)
+ brcmf_pcie_reset_device(devinfo);
+ /* BAR1 window may not be sized properly */
+ brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
+ brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, 0x4e0);
+ config = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA);
+ brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, config);
+
+ device_wakeup_enable(&devinfo->pdev->dev);
+}
+
+
+static int brcmf_pcie_enter_download_state(struct brcmf_pciedev_info *devinfo)
+{
+ brcmf_chip_enter_download(devinfo->ci);
+
+ if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
+ brcmf_pcie_select_core(devinfo, BCMA_CORE_ARM_CR4);
+ brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
+ 5);
+ brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA,
+ 0);
+ brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
+ 7);
+ brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA,
+ 0);
+ }
+ return 0;
+}
+
+
+static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info *devinfo,
+ u32 resetintr)
+{
+ struct brcmf_core *core;
+
+ if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
+ core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_INTERNAL_MEM);
+ brcmf_chip_resetcore(core, 0, 0, 0);
+ }
+
+ return !brcmf_chip_exit_download(devinfo->ci, resetintr);
+}
+
+
+static void
+brcmf_pcie_send_mb_data(struct brcmf_pciedev_info *devinfo, u32 htod_mb_data)
+{
+ struct brcmf_pcie_shared_info *shared;
+ u32 addr;
+ u32 cur_htod_mb_data;
+ u32 i;
+
+ shared = &devinfo->shared;
+ addr = shared->htod_mb_data_addr;
+ cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
+
+ if (cur_htod_mb_data != 0)
+ brcmf_dbg(PCIE, "MB transaction is already pending 0x%04x\n",
+ cur_htod_mb_data);
+
+ i = 0;
+ while (cur_htod_mb_data != 0) {
+ msleep(10);
+ i++;
+ if (i > 100)
+ break;
+ cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
+ }
+
+ brcmf_pcie_write_tcm32(devinfo, addr, htod_mb_data);
+ pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
+ pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
+}
+
+
+static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo)
+{
+ struct brcmf_pcie_shared_info *shared;
+ u32 addr;
+ u32 dtoh_mb_data;
+
+ shared = &devinfo->shared;
+ addr = shared->dtoh_mb_data_addr;
+ dtoh_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
+
+ if (!dtoh_mb_data)
+ return;
+
+ brcmf_pcie_write_tcm32(devinfo, addr, 0);
+
+ brcmf_dbg(PCIE, "D2H_MB_DATA: 0x%04x\n", dtoh_mb_data);
+ if (dtoh_mb_data & BRCMF_D2H_DEV_DS_ENTER_REQ) {
+ brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP REQ\n");
+ brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_DS_ACK);
+ brcmf_dbg(PCIE, "D2H_MB_DATA: sent DEEP SLEEP ACK\n");
+ }
+ if (dtoh_mb_data & BRCMF_D2H_DEV_DS_EXIT_NOTE)
+ brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP EXIT\n");
+ if (dtoh_mb_data & BRCMF_D2H_DEV_D3_ACK)
+ brcmf_dbg(PCIE, "D2H_MB_DATA: D3 ACK\n");
+ if (waitqueue_active(&devinfo->mbdata_resp_wait)) {
+ devinfo->mbdata_completed = true;
+ wake_up(&devinfo->mbdata_resp_wait);
+ }
+}
+
+
+static void brcmf_pcie_bus_console_init(struct brcmf_pciedev_info *devinfo)
+{
+ struct brcmf_pcie_shared_info *shared;
+ struct brcmf_pcie_console *console;
+ u32 addr;
+
+ shared = &devinfo->shared;
+ console = &shared->console;
+ addr = shared->tcm_base_address + BRCMF_SHARED_CONSOLE_ADDR_OFFSET;
+ console->base_addr = brcmf_pcie_read_tcm32(devinfo, addr);
+
+ addr = console->base_addr + BRCMF_CONSOLE_BUFADDR_OFFSET;
+ console->buf_addr = brcmf_pcie_read_tcm32(devinfo, addr);
+ addr = console->base_addr + BRCMF_CONSOLE_BUFSIZE_OFFSET;
+ console->bufsize = brcmf_pcie_read_tcm32(devinfo, addr);
+
+ brcmf_dbg(PCIE, "Console: base %x, buf %x, size %d\n",
+ console->base_addr, console->buf_addr, console->bufsize);
+}
+
+
+static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo)
+{
+ struct brcmf_pcie_console *console;
+ u32 addr;
+ u8 ch;
+ u32 newidx;
+
+ console = &devinfo->shared.console;
+ addr = console->base_addr + BRCMF_CONSOLE_WRITEIDX_OFFSET;
+ newidx = brcmf_pcie_read_tcm32(devinfo, addr);
+ while (newidx != console->read_idx) {
+ addr = console->buf_addr + console->read_idx;
+ ch = brcmf_pcie_read_tcm8(devinfo, addr);
+ console->read_idx++;
+ if (console->read_idx == console->bufsize)
+ console->read_idx = 0;
+ if (ch == '\r')
+ continue;
+ console->log_str[console->log_idx] = ch;
+ console->log_idx++;
+ if ((ch != '\n') &&
+ (console->log_idx == (sizeof(console->log_str) - 2))) {
+ ch = '\n';
+ console->log_str[console->log_idx] = ch;
+ console->log_idx++;
+ }
+
+ if (ch == '\n') {
+ console->log_str[console->log_idx] = 0;
+ brcmf_dbg(PCIE, "CONSOLE: %s\n", console->log_str);
+ console->log_idx = 0;
+ }
+ }
+}
+
+
+static __used void brcmf_pcie_ringbell_v1(struct brcmf_pciedev_info *devinfo)
+{
+ u32 reg_value;
+
+ brcmf_dbg(PCIE, "RING !\n");
+ reg_value = brcmf_pcie_read_reg32(devinfo,
+ BRCMF_PCIE_PCIE2REG_MAILBOXINT);
+ reg_value |= BRCMF_PCIE2_INTB;
+ brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
+ reg_value);
+}
+
+
+static void brcmf_pcie_ringbell_v2(struct brcmf_pciedev_info *devinfo)
+{
+ brcmf_dbg(PCIE, "RING !\n");
+ /* Any arbitrary value will do, lets use 1 */
+ brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX, 1);
+}
+
+
+static void brcmf_pcie_intr_disable(struct brcmf_pciedev_info *devinfo)
+{
+ if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1)
+ pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTMASK,
+ 0);
+ else
+ brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK,
+ 0);
+}
+
+
+static void brcmf_pcie_intr_enable(struct brcmf_pciedev_info *devinfo)
+{
+ if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1)
+ pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTMASK,
+ BRCMF_PCIE_INT_DEF);
+ else
+ brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK,
+ BRCMF_PCIE_MB_INT_D2H_DB |
+ BRCMF_PCIE_MB_INT_FN0_0 |
+ BRCMF_PCIE_MB_INT_FN0_1);
+}
+
+
+static irqreturn_t brcmf_pcie_quick_check_isr_v1(int irq, void *arg)
+{
+ struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
+ u32 status;
+
+ status = 0;
+ pci_read_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTSTATUS, &status);
+ if (status) {
+ brcmf_pcie_intr_disable(devinfo);
+ brcmf_dbg(PCIE, "Enter\n");
+ return IRQ_WAKE_THREAD;
+ }
+ return IRQ_NONE;
+}
+
+
+static irqreturn_t brcmf_pcie_quick_check_isr_v2(int irq, void *arg)
+{
+ struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
+
+ if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT)) {
+ brcmf_pcie_intr_disable(devinfo);
+ brcmf_dbg(PCIE, "Enter\n");
+ return IRQ_WAKE_THREAD;
+ }
+ return IRQ_NONE;
+}
+
+
+static irqreturn_t brcmf_pcie_isr_thread_v1(int irq, void *arg)
+{
+ struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
+ const struct pci_dev *pdev = devinfo->pdev;
+ u32 status;
+
+ devinfo->in_irq = true;
+ status = 0;
+ pci_read_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, &status);
+ brcmf_dbg(PCIE, "Enter %x\n", status);
+ if (status) {
+ pci_write_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, status);
+ if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
+ brcmf_proto_msgbuf_rx_trigger(&devinfo->pdev->dev);
+ }
+ if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
+ brcmf_pcie_intr_enable(devinfo);
+ devinfo->in_irq = false;
+ return IRQ_HANDLED;
+}
+
+
+static irqreturn_t brcmf_pcie_isr_thread_v2(int irq, void *arg)
+{
+ struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
+ u32 status;
+
+ devinfo->in_irq = true;
+ status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
+ brcmf_dbg(PCIE, "Enter %x\n", status);
+ if (status) {
+ brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
+ status);
+ if (status & (BRCMF_PCIE_MB_INT_FN0_0 |
+ BRCMF_PCIE_MB_INT_FN0_1))
+ brcmf_pcie_handle_mb_data(devinfo);
+ if (status & BRCMF_PCIE_MB_INT_D2H_DB) {
+ if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
+ brcmf_proto_msgbuf_rx_trigger(
+ &devinfo->pdev->dev);
+ }
+ }
+ brcmf_pcie_bus_console_read(devinfo);
+ if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
+ brcmf_pcie_intr_enable(devinfo);
+ devinfo->in_irq = false;
+ return IRQ_HANDLED;
+}
+
+
+static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo)
+{
+ struct pci_dev *pdev;
+
+ pdev = devinfo->pdev;
+
+ brcmf_pcie_intr_disable(devinfo);
+
+ brcmf_dbg(PCIE, "Enter\n");
+ /* is it a v1 or v2 implementation */
+ devinfo->irq_requested = false;
+ if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1) {
+ if (request_threaded_irq(pdev->irq,
+ brcmf_pcie_quick_check_isr_v1,
+ brcmf_pcie_isr_thread_v1,
+ IRQF_SHARED, "brcmf_pcie_intr",
+ devinfo)) {
+ brcmf_err("Failed to request IRQ %d\n", pdev->irq);
+ return -EIO;
+ }
+ } else {
+ if (request_threaded_irq(pdev->irq,
+ brcmf_pcie_quick_check_isr_v2,
+ brcmf_pcie_isr_thread_v2,
+ IRQF_SHARED, "brcmf_pcie_intr",
+ devinfo)) {
+ brcmf_err("Failed to request IRQ %d\n", pdev->irq);
+ return -EIO;
+ }
+ }
+ devinfo->irq_requested = true;
+ devinfo->irq_allocated = true;
+ return 0;
+}
+
+
+static void brcmf_pcie_release_irq(struct brcmf_pciedev_info *devinfo)
+{
+ struct pci_dev *pdev;
+ u32 status;
+ u32 count;
+
+ if (!devinfo->irq_allocated)
+ return;
+
+ pdev = devinfo->pdev;
+
+ brcmf_pcie_intr_disable(devinfo);
+ if (!devinfo->irq_requested)
+ return;
+ devinfo->irq_requested = false;
+ free_irq(pdev->irq, devinfo);
+
+ msleep(50);
+ count = 0;
+ while ((devinfo->in_irq) && (count < 20)) {
+ msleep(50);
+ count++;
+ }
+ if (devinfo->in_irq)
+ brcmf_err("Still in IRQ (processing) !!!\n");
+
+ if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1) {
+ status = 0;
+ pci_read_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, &status);
+ pci_write_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, status);
+ } else {
+ status = brcmf_pcie_read_reg32(devinfo,
+ BRCMF_PCIE_PCIE2REG_MAILBOXINT);
+ brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
+ status);
+ }
+ devinfo->irq_allocated = false;
+}
+
+
+static int brcmf_pcie_ring_mb_write_rptr(void *ctx)
+{
+ struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
+ struct brcmf_pciedev_info *devinfo = ring->devinfo;
+ struct brcmf_commonring *commonring = &ring->commonring;
+
+ if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
+ return -EIO;
+
+ brcmf_dbg(PCIE, "W r_ptr %d (%d), ring %d\n", commonring->r_ptr,
+ commonring->w_ptr, ring->id);
+
+ brcmf_pcie_write_tcm16(devinfo, ring->r_idx_addr, commonring->r_ptr);
+
+ return 0;
+}
+
+
+static int brcmf_pcie_ring_mb_write_wptr(void *ctx)
+{
+ struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
+ struct brcmf_pciedev_info *devinfo = ring->devinfo;
+ struct brcmf_commonring *commonring = &ring->commonring;
+
+ if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
+ return -EIO;
+
+ brcmf_dbg(PCIE, "W w_ptr %d (%d), ring %d\n", commonring->w_ptr,
+ commonring->r_ptr, ring->id);
+
+ brcmf_pcie_write_tcm16(devinfo, ring->w_idx_addr, commonring->w_ptr);
+
+ return 0;
+}
+
+
+static int brcmf_pcie_ring_mb_ring_bell(void *ctx)
+{
+ struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
+ struct brcmf_pciedev_info *devinfo = ring->devinfo;
+
+ if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
+ return -EIO;
+
+ devinfo->ringbell(devinfo);
+
+ return 0;
+}
+
+
+static int brcmf_pcie_ring_mb_update_rptr(void *ctx)
+{
+ struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
+ struct brcmf_pciedev_info *devinfo = ring->devinfo;
+ struct brcmf_commonring *commonring = &ring->commonring;
+
+ if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
+ return -EIO;
+
+ commonring->r_ptr = brcmf_pcie_read_tcm16(devinfo, ring->r_idx_addr);
+
+ brcmf_dbg(PCIE, "R r_ptr %d (%d), ring %d\n", commonring->r_ptr,
+ commonring->w_ptr, ring->id);
+
+ return 0;
+}
+
+
+static int brcmf_pcie_ring_mb_update_wptr(void *ctx)
+{
+ struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
+ struct brcmf_pciedev_info *devinfo = ring->devinfo;
+ struct brcmf_commonring *commonring = &ring->commonring;
+
+ if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
+ return -EIO;
+
+ commonring->w_ptr = brcmf_pcie_read_tcm16(devinfo, ring->w_idx_addr);
+
+ brcmf_dbg(PCIE, "R w_ptr %d (%d), ring %d\n", commonring->w_ptr,
+ commonring->r_ptr, ring->id);
+
+ return 0;
+}
+
+
+static void *
+brcmf_pcie_init_dmabuffer_for_device(struct brcmf_pciedev_info *devinfo,
+ u32 size, u32 tcm_dma_phys_addr,
+ dma_addr_t *dma_handle)
+{
+ void *ring;
+ long long address;
+
+ ring = dma_alloc_coherent(&devinfo->pdev->dev, size, dma_handle,
+ GFP_KERNEL);
+ if (!ring)
+ return NULL;
+
+ address = (long long)(long)*dma_handle;
+ brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr,
+ address & 0xffffffff);
+ brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr + 4, address >> 32);
+
+ memset(ring, 0, size);
+
+ return (ring);
+}
+
+
+static struct brcmf_pcie_ringbuf *
+brcmf_pcie_alloc_dma_and_ring(struct brcmf_pciedev_info *devinfo, u32 ring_id,
+ u32 tcm_ring_phys_addr)
+{
+ void *dma_buf;
+ dma_addr_t dma_handle;
+ struct brcmf_pcie_ringbuf *ring;
+ u32 size;
+ u32 addr;
+
+ size = brcmf_ring_max_item[ring_id] * brcmf_ring_itemsize[ring_id];
+ dma_buf = brcmf_pcie_init_dmabuffer_for_device(devinfo, size,
+ tcm_ring_phys_addr + BRCMF_RING_MEM_BASE_ADDR_OFFSET,
+ &dma_handle);
+ if (!dma_buf)
+ return NULL;
+
+ addr = tcm_ring_phys_addr + BRCMF_RING_MAX_ITEM_OFFSET;
+ brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_max_item[ring_id]);
+ addr = tcm_ring_phys_addr + BRCMF_RING_LEN_ITEMS_OFFSET;
+ brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_itemsize[ring_id]);
+
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring) {
+ dma_free_coherent(&devinfo->pdev->dev, size, dma_buf,
+ dma_handle);
+ return NULL;
+ }
+ brcmf_commonring_config(&ring->commonring, brcmf_ring_max_item[ring_id],
+ brcmf_ring_itemsize[ring_id], dma_buf);
+ ring->dma_handle = dma_handle;
+ ring->devinfo = devinfo;
+ brcmf_commonring_register_cb(&ring->commonring,
+ brcmf_pcie_ring_mb_ring_bell,
+ brcmf_pcie_ring_mb_update_rptr,
+ brcmf_pcie_ring_mb_update_wptr,
+ brcmf_pcie_ring_mb_write_rptr,
+ brcmf_pcie_ring_mb_write_wptr, ring);
+
+ return (ring);
+}
+
+
+static void brcmf_pcie_release_ringbuffer(struct device *dev,
+ struct brcmf_pcie_ringbuf *ring)
+{
+ void *dma_buf;
+ u32 size;
+
+ if (!ring)
+ return;
+
+ dma_buf = ring->commonring.buf_addr;
+ if (dma_buf) {
+ size = ring->commonring.depth * ring->commonring.item_len;
+ dma_free_coherent(dev, size, dma_buf, ring->dma_handle);
+ }
+ kfree(ring);
+}
+
+
+static void brcmf_pcie_release_ringbuffers(struct brcmf_pciedev_info *devinfo)
+{
+ u32 i;
+
+ for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++) {
+ brcmf_pcie_release_ringbuffer(&devinfo->pdev->dev,
+ devinfo->shared.commonrings[i]);
+ devinfo->shared.commonrings[i] = NULL;
+ }
+ kfree(devinfo->shared.flowrings);
+ devinfo->shared.flowrings = NULL;
+}
+
+
+static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
+{
+ struct brcmf_pcie_ringbuf *ring;
+ struct brcmf_pcie_ringbuf *rings;
+ u32 ring_addr;
+ u32 d2h_w_idx_ptr;
+ u32 d2h_r_idx_ptr;
+ u32 h2d_w_idx_ptr;
+ u32 h2d_r_idx_ptr;
+ u32 addr;
+ u32 ring_mem_ptr;
+ u32 i;
+ u16 max_sub_queues;
+
+ ring_addr = devinfo->shared.ring_info_addr;
+ brcmf_dbg(PCIE, "Base ring addr = 0x%08x\n", ring_addr);
+
+ addr = ring_addr + BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET;
+ d2h_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
+ addr = ring_addr + BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET;
+ d2h_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
+ addr = ring_addr + BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET;
+ h2d_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
+ addr = ring_addr + BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET;
+ h2d_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
+
+ addr = ring_addr + BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET;
+ ring_mem_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
+
+ for (i = 0; i < BRCMF_NROF_H2D_COMMON_MSGRINGS; i++) {
+ ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
+ if (!ring)
+ goto fail;
+ ring->w_idx_addr = h2d_w_idx_ptr;
+ ring->r_idx_addr = h2d_r_idx_ptr;
+ ring->id = i;
+ devinfo->shared.commonrings[i] = ring;
+
+ h2d_w_idx_ptr += sizeof(u32);
+ h2d_r_idx_ptr += sizeof(u32);
+ ring_mem_ptr += BRCMF_RING_MEM_SZ;
+ }
+
+ for (i = BRCMF_NROF_H2D_COMMON_MSGRINGS;
+ i < BRCMF_NROF_COMMON_MSGRINGS; i++) {
+ ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
+ if (!ring)
+ goto fail;
+ ring->w_idx_addr = d2h_w_idx_ptr;
+ ring->r_idx_addr = d2h_r_idx_ptr;
+ ring->id = i;
+ devinfo->shared.commonrings[i] = ring;
+
+ d2h_w_idx_ptr += sizeof(u32);
+ d2h_r_idx_ptr += sizeof(u32);
+ ring_mem_ptr += BRCMF_RING_MEM_SZ;
+ }
+
+ addr = ring_addr + BRCMF_SHARED_RING_MAX_SUB_QUEUES;
+ max_sub_queues = brcmf_pcie_read_tcm16(devinfo, addr);
+ devinfo->shared.nrof_flowrings =
+ max_sub_queues - BRCMF_NROF_H2D_COMMON_MSGRINGS;
+ rings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(*ring),
+ GFP_KERNEL);
+ if (!rings)
+ goto fail;
+
+ brcmf_dbg(PCIE, "Nr of flowrings is %d\n",
+ devinfo->shared.nrof_flowrings);
+
+ for (i = 0; i < devinfo->shared.nrof_flowrings; i++) {
+ ring = &rings[i];
+ ring->devinfo = devinfo;
+ ring->id = i + BRCMF_NROF_COMMON_MSGRINGS;
+ brcmf_commonring_register_cb(&ring->commonring,
+ brcmf_pcie_ring_mb_ring_bell,
+ brcmf_pcie_ring_mb_update_rptr,
+ brcmf_pcie_ring_mb_update_wptr,
+ brcmf_pcie_ring_mb_write_rptr,
+ brcmf_pcie_ring_mb_write_wptr,
+ ring);
+ ring->w_idx_addr = h2d_w_idx_ptr;
+ ring->r_idx_addr = h2d_r_idx_ptr;
+ h2d_w_idx_ptr += sizeof(u32);
+ h2d_r_idx_ptr += sizeof(u32);
+ }
+ devinfo->shared.flowrings = rings;
+
+ return 0;
+
+fail:
+ brcmf_err("Allocating commonring buffers failed\n");
+ brcmf_pcie_release_ringbuffers(devinfo);
+ return -ENOMEM;
+}
+
+
+static void
+brcmf_pcie_release_scratchbuffers(struct brcmf_pciedev_info *devinfo)
+{
+ if (devinfo->shared.scratch)
+ dma_free_coherent(&devinfo->pdev->dev,
+ BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
+ devinfo->shared.scratch,
+ devinfo->shared.scratch_dmahandle);
+ if (devinfo->shared.ringupd)
+ dma_free_coherent(&devinfo->pdev->dev,
+ BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
+ devinfo->shared.ringupd,
+ devinfo->shared.ringupd_dmahandle);
+}
+
+static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
+{
+ long long address;
+ u32 addr;
+
+ devinfo->shared.scratch = dma_alloc_coherent(&devinfo->pdev->dev,
+ BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
+ &devinfo->shared.scratch_dmahandle, GFP_KERNEL);
+ if (!devinfo->shared.scratch)
+ goto fail;
+
+ memset(devinfo->shared.scratch, 0, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
+ brcmf_dma_flush(devinfo->shared.scratch, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
+
+ addr = devinfo->shared.tcm_base_address +
+ BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET;
+ address = (long long)(long)devinfo->shared.scratch_dmahandle;
+ brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
+ brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
+ addr = devinfo->shared.tcm_base_address +
+ BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET;
+ brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
+
+ devinfo->shared.ringupd = dma_alloc_coherent(&devinfo->pdev->dev,
+ BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
+ &devinfo->shared.ringupd_dmahandle, GFP_KERNEL);
+ if (!devinfo->shared.ringupd)
+ goto fail;
+
+ memset(devinfo->shared.ringupd, 0, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
+ brcmf_dma_flush(devinfo->shared.ringupd, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
+
+ addr = devinfo->shared.tcm_base_address +
+ BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET;
+ address = (long long)(long)devinfo->shared.ringupd_dmahandle;
+ brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
+ brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
+ addr = devinfo->shared.tcm_base_address +
+ BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET;
+ brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
+ return 0;
+
+fail:
+ brcmf_err("Allocating scratch buffers failed\n");
+ brcmf_pcie_release_scratchbuffers(devinfo);
+ return -ENOMEM;
+}
+
+
+static void brcmf_pcie_down(struct device *dev)
+{
+}
+
+
+static int brcmf_pcie_tx(struct device *dev, struct sk_buff *skb)
+{
+ return 0;
+}
+
+
+static int brcmf_pcie_tx_ctlpkt(struct device *dev, unsigned char *msg,
+ uint len)
+{
+ return 0;
+}
+
+
+static int brcmf_pcie_rx_ctlpkt(struct device *dev, unsigned char *msg,
+ uint len)
+{
+ return 0;
+}
+
+
+static struct brcmf_bus_ops brcmf_pcie_bus_ops = {
+ .txdata = brcmf_pcie_tx,
+ .stop = brcmf_pcie_down,
+ .txctl = brcmf_pcie_tx_ctlpkt,
+ .rxctl = brcmf_pcie_rx_ctlpkt,
+};
+
+
+static int
+brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo,
+ u32 sharedram_addr)
+{
+ struct brcmf_pcie_shared_info *shared;
+ u32 addr;
+ u32 version;
+
+ shared = &devinfo->shared;
+ shared->tcm_base_address = sharedram_addr;
+
+ shared->flags = brcmf_pcie_read_tcm32(devinfo, sharedram_addr);
+ version = shared->flags & BRCMF_PCIE_SHARED_VERSION_MASK;
+ brcmf_dbg(PCIE, "PCIe protocol version %d\n", version);
+ if ((version > BRCMF_PCIE_MAX_SHARED_VERSION) ||
+ (version < BRCMF_PCIE_MIN_SHARED_VERSION)) {
+ brcmf_err("Unsupported PCIE version %d\n", version);
+ return -EINVAL;
+ }
+ if (shared->flags & BRCMF_PCIE_SHARED_TXPUSH_SUPPORT) {
+ brcmf_err("Unsupported legacy TX mode 0x%x\n",
+ shared->flags & BRCMF_PCIE_SHARED_TXPUSH_SUPPORT);
+ return -EINVAL;
+ }
+
+ addr = sharedram_addr + BRCMF_SHARED_MAX_RXBUFPOST_OFFSET;
+ shared->max_rxbufpost = brcmf_pcie_read_tcm16(devinfo, addr);
+ if (shared->max_rxbufpost == 0)
+ shared->max_rxbufpost = BRCMF_DEF_MAX_RXBUFPOST;
+
+ addr = sharedram_addr + BRCMF_SHARED_RX_DATAOFFSET_OFFSET;
+ shared->rx_dataoffset = brcmf_pcie_read_tcm32(devinfo, addr);
+
+ addr = sharedram_addr + BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET;
+ shared->htod_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr);
+
+ addr = sharedram_addr + BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET;
+ shared->dtoh_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr);
+
+ addr = sharedram_addr + BRCMF_SHARED_RING_INFO_ADDR_OFFSET;
+ shared->ring_info_addr = brcmf_pcie_read_tcm32(devinfo, addr);
+
+ brcmf_dbg(PCIE, "max rx buf post %d, rx dataoffset %d\n",
+ shared->max_rxbufpost, shared->rx_dataoffset);
+
+ brcmf_pcie_bus_console_init(devinfo);
+
+ return 0;
+}
+
+
+static int brcmf_pcie_get_fwnames(struct brcmf_pciedev_info *devinfo)
+{
+ char *fw_name;
+ char *nvram_name;
+ uint fw_len, nv_len;
+ char end;
+
+ brcmf_dbg(PCIE, "Enter, chip 0x%04x chiprev %d\n", devinfo->ci->chip,
+ devinfo->ci->chiprev);
+
+ switch (devinfo->ci->chip) {
+ case BRCM_CC_43602_CHIP_ID:
+ fw_name = BRCMF_PCIE_43602_FW_NAME;
+ nvram_name = BRCMF_PCIE_43602_NVRAM_NAME;
+ break;
+ case BRCM_CC_4354_CHIP_ID:
+ fw_name = BRCMF_PCIE_4354_FW_NAME;
+ nvram_name = BRCMF_PCIE_4354_NVRAM_NAME;
+ break;
+ case BRCM_CC_4356_CHIP_ID:
+ fw_name = BRCMF_PCIE_4356_FW_NAME;
+ nvram_name = BRCMF_PCIE_4356_NVRAM_NAME;
+ break;
+ case BRCM_CC_43567_CHIP_ID:
+ case BRCM_CC_43569_CHIP_ID:
+ case BRCM_CC_43570_CHIP_ID:
+ fw_name = BRCMF_PCIE_43570_FW_NAME;
+ nvram_name = BRCMF_PCIE_43570_NVRAM_NAME;
+ break;
+ default:
+ brcmf_err("Unsupported chip 0x%04x\n", devinfo->ci->chip);
+ return -ENODEV;
+ }
+
+ fw_len = sizeof(devinfo->fw_name) - 1;
+ nv_len = sizeof(devinfo->nvram_name) - 1;
+ /* check if firmware path is provided by module parameter */
+ if (brcmf_firmware_path[0] != '\0') {
+ strncpy(devinfo->fw_name, brcmf_firmware_path, fw_len);
+ strncpy(devinfo->nvram_name, brcmf_firmware_path, nv_len);
+ fw_len -= strlen(devinfo->fw_name);
+ nv_len -= strlen(devinfo->nvram_name);
+
+ end = brcmf_firmware_path[strlen(brcmf_firmware_path) - 1];
+ if (end != '/') {
+ strncat(devinfo->fw_name, "/", fw_len);
+ strncat(devinfo->nvram_name, "/", nv_len);
+ fw_len--;
+ nv_len--;
+ }
+ }
+ strncat(devinfo->fw_name, fw_name, fw_len);
+ strncat(devinfo->nvram_name, nvram_name, nv_len);
+
+ return 0;
+}
+
+
+static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
+ const struct firmware *fw, void *nvram,
+ u32 nvram_len)
+{
+ u32 sharedram_addr;
+ u32 sharedram_addr_written;
+ u32 loop_counter;
+ int err;
+ u32 address;
+ u32 resetintr;
+
+ devinfo->ringbell = brcmf_pcie_ringbell_v2;
+ devinfo->generic_corerev = BRCMF_PCIE_GENREV2;
+
+ brcmf_dbg(PCIE, "Halt ARM.\n");
+ err = brcmf_pcie_enter_download_state(devinfo);
+ if (err)
+ return err;
+
+ brcmf_dbg(PCIE, "Download FW %s\n", devinfo->fw_name);
+ brcmf_pcie_copy_mem_todev(devinfo, devinfo->ci->rambase,
+ (void *)fw->data, fw->size);
+
+ resetintr = get_unaligned_le32(fw->data);
+ release_firmware(fw);
+
+ /* reset last 4 bytes of RAM address. to be used for shared
+ * area. This identifies when FW is running
+ */
+ brcmf_pcie_write_ram32(devinfo, devinfo->ci->ramsize - 4, 0);
+
+ if (nvram) {
+ brcmf_dbg(PCIE, "Download NVRAM %s\n", devinfo->nvram_name);
+ address = devinfo->ci->rambase + devinfo->ci->ramsize -
+ nvram_len;
+ brcmf_pcie_copy_mem_todev(devinfo, address, nvram, nvram_len);
+ brcmf_fw_nvram_free(nvram);
+ } else {
+ brcmf_dbg(PCIE, "No matching NVRAM file found %s\n",
+ devinfo->nvram_name);
+ }
+
+ sharedram_addr_written = brcmf_pcie_read_ram32(devinfo,
+ devinfo->ci->ramsize -
+ 4);
+ brcmf_dbg(PCIE, "Bring ARM in running state\n");
+ err = brcmf_pcie_exit_download_state(devinfo, resetintr);
+ if (err)
+ return err;
+
+ brcmf_dbg(PCIE, "Wait for FW init\n");
+ sharedram_addr = sharedram_addr_written;
+ loop_counter = BRCMF_PCIE_FW_UP_TIMEOUT / 50;
+ while ((sharedram_addr == sharedram_addr_written) && (loop_counter)) {
+ msleep(50);
+ sharedram_addr = brcmf_pcie_read_ram32(devinfo,
+ devinfo->ci->ramsize -
+ 4);
+ loop_counter--;
+ }
+ if (sharedram_addr == sharedram_addr_written) {
+ brcmf_err("FW failed to initialize\n");
+ return -ENODEV;
+ }
+ brcmf_dbg(PCIE, "Shared RAM addr: 0x%08x\n", sharedram_addr);
+
+ return (brcmf_pcie_init_share_ram_info(devinfo, sharedram_addr));
+}
+
+
+static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo)
+{
+ struct pci_dev *pdev;
+ int err;
+ phys_addr_t bar0_addr, bar1_addr;
+ ulong bar1_size;
+
+ pdev = devinfo->pdev;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ brcmf_err("pci_enable_device failed err=%d\n", err);
+ return err;
+ }
+
+ pci_set_master(pdev);
+
+ /* Bar-0 mapped address */
+ bar0_addr = pci_resource_start(pdev, 0);
+ /* Bar-1 mapped address */
+ bar1_addr = pci_resource_start(pdev, 2);
+ /* read Bar-1 mapped memory range */
+ bar1_size = pci_resource_len(pdev, 2);
+ if ((bar1_size == 0) || (bar1_addr == 0)) {
+ brcmf_err("BAR1 Not enabled, device size=%ld, addr=%#016llx\n",
+ bar1_size, (unsigned long long)bar1_addr);
+ return -EINVAL;
+ }
+
+ devinfo->regs = ioremap_nocache(bar0_addr, BRCMF_PCIE_REG_MAP_SIZE);
+ devinfo->tcm = ioremap_nocache(bar1_addr, BRCMF_PCIE_TCM_MAP_SIZE);
+ devinfo->tcm_size = BRCMF_PCIE_TCM_MAP_SIZE;
+
+ if (!devinfo->regs || !devinfo->tcm) {
+ brcmf_err("ioremap() failed (%p,%p)\n", devinfo->regs,
+ devinfo->tcm);
+ return -EINVAL;
+ }
+ brcmf_dbg(PCIE, "Phys addr : reg space = %p base addr %#016llx\n",
+ devinfo->regs, (unsigned long long)bar0_addr);
+ brcmf_dbg(PCIE, "Phys addr : mem space = %p base addr %#016llx\n",
+ devinfo->tcm, (unsigned long long)bar1_addr);
+
+ return 0;
+}
+
+
+static void brcmf_pcie_release_resource(struct brcmf_pciedev_info *devinfo)
+{
+ if (devinfo->tcm)
+ iounmap(devinfo->tcm);
+ if (devinfo->regs)
+ iounmap(devinfo->regs);
+
+ pci_disable_device(devinfo->pdev);
+}
+
+
+static int brcmf_pcie_attach_bus(struct device *dev)
+{
+ int ret;
+
+ /* Attach to the common driver interface */
+ ret = brcmf_attach(dev);
+ if (ret) {
+ brcmf_err("brcmf_attach failed\n");
+ } else {
+ ret = brcmf_bus_start(dev);
+ if (ret)
+ brcmf_err("dongle is not responding\n");
+ }
+
+ return ret;
+}
+
+
+static u32 brcmf_pcie_buscore_prep_addr(const struct pci_dev *pdev, u32 addr)
+{
+ u32 ret_addr;
+
+ ret_addr = addr & (BRCMF_PCIE_BAR0_REG_SIZE - 1);
+ addr &= ~(BRCMF_PCIE_BAR0_REG_SIZE - 1);
+ pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, addr);
+
+ return ret_addr;
+}
+
+
+static u32 brcmf_pcie_buscore_read32(void *ctx, u32 addr)
+{
+ struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
+
+ addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr);
+ return brcmf_pcie_read_reg32(devinfo, addr);
+}
+
+
+static void brcmf_pcie_buscore_write32(void *ctx, u32 addr, u32 value)
+{
+ struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
+
+ addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr);
+ brcmf_pcie_write_reg32(devinfo, addr, value);
+}
+
+
+static int brcmf_pcie_buscoreprep(void *ctx)
+{
+ struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
+ int err;
+
+ err = brcmf_pcie_get_resource(devinfo);
+ if (err == 0) {
+ /* Set CC watchdog to reset all the cores on the chip to bring
+ * back dongle to a sane state.
+ */
+ brcmf_pcie_buscore_write32(ctx, CORE_CC_REG(SI_ENUM_BASE,
+ watchdog), 4);
+ msleep(100);
+ }
+
+ return err;
+}
+
+
+static void brcmf_pcie_buscore_exitdl(void *ctx, struct brcmf_chip *chip,
+ u32 rstvec)
+{
+ struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
+
+ brcmf_pcie_write_tcm32(devinfo, 0, rstvec);
+}
+
+
+static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
+ .prepare = brcmf_pcie_buscoreprep,
+ .exit_dl = brcmf_pcie_buscore_exitdl,
+ .read32 = brcmf_pcie_buscore_read32,
+ .write32 = brcmf_pcie_buscore_write32,
+};
+
+static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw,
+ void *nvram, u32 nvram_len)
+{
+ struct brcmf_bus *bus = dev_get_drvdata(dev);
+ struct brcmf_pciedev *pcie_bus_dev = bus->bus_priv.pcie;
+ struct brcmf_pciedev_info *devinfo = pcie_bus_dev->devinfo;
+ struct brcmf_commonring **flowrings;
+ int ret;
+ u32 i;
+
+ brcmf_pcie_attach(devinfo);
+
+ ret = brcmf_pcie_download_fw_nvram(devinfo, fw, nvram, nvram_len);
+ if (ret)
+ goto fail;
+
+ devinfo->state = BRCMFMAC_PCIE_STATE_UP;
+
+ ret = brcmf_pcie_init_ringbuffers(devinfo);
+ if (ret)
+ goto fail;
+
+ ret = brcmf_pcie_init_scratchbuffers(devinfo);
+ if (ret)
+ goto fail;
+
+ brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
+ ret = brcmf_pcie_request_irq(devinfo);
+ if (ret)
+ goto fail;
+
+ /* hook the commonrings in the bus structure. */
+ for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++)
+ bus->msgbuf->commonrings[i] =
+ &devinfo->shared.commonrings[i]->commonring;
+
+ flowrings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(flowrings),
+ GFP_KERNEL);
+ if (!flowrings)
+ goto fail;
+
+ for (i = 0; i < devinfo->shared.nrof_flowrings; i++)
+ flowrings[i] = &devinfo->shared.flowrings[i].commonring;
+ bus->msgbuf->flowrings = flowrings;
+
+ bus->msgbuf->rx_dataoffset = devinfo->shared.rx_dataoffset;
+ bus->msgbuf->max_rxbufpost = devinfo->shared.max_rxbufpost;
+ bus->msgbuf->nrof_flowrings = devinfo->shared.nrof_flowrings;
+
+ init_waitqueue_head(&devinfo->mbdata_resp_wait);
+
+ brcmf_pcie_intr_enable(devinfo);
+ if (brcmf_pcie_attach_bus(bus->dev) == 0)
+ return;
+
+ brcmf_pcie_bus_console_read(devinfo);
+
+fail:
+ device_release_driver(dev);
+}
+
+static int
+brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int ret;
+ struct brcmf_pciedev_info *devinfo;
+ struct brcmf_pciedev *pcie_bus_dev;
+ struct brcmf_bus *bus;
+
+ brcmf_dbg(PCIE, "Enter %x:%x\n", pdev->vendor, pdev->device);
+
+ ret = -ENOMEM;
+ devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL);
+ if (devinfo == NULL)
+ return ret;
+
+ devinfo->pdev = pdev;
+ pcie_bus_dev = NULL;
+ devinfo->ci = brcmf_chip_attach(devinfo, &brcmf_pcie_buscore_ops);
+ if (IS_ERR(devinfo->ci)) {
+ ret = PTR_ERR(devinfo->ci);
+ devinfo->ci = NULL;
+ goto fail;
+ }
+
+ pcie_bus_dev = kzalloc(sizeof(*pcie_bus_dev), GFP_KERNEL);
+ if (pcie_bus_dev == NULL) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ bus = kzalloc(sizeof(*bus), GFP_KERNEL);
+ if (!bus) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ bus->msgbuf = kzalloc(sizeof(*bus->msgbuf), GFP_KERNEL);
+ if (!bus->msgbuf) {
+ ret = -ENOMEM;
+ kfree(bus);
+ goto fail;
+ }
+
+ /* hook it all together. */
+ pcie_bus_dev->devinfo = devinfo;
+ pcie_bus_dev->bus = bus;
+ bus->dev = &pdev->dev;
+ bus->bus_priv.pcie = pcie_bus_dev;
+ bus->ops = &brcmf_pcie_bus_ops;
+ bus->proto_type = BRCMF_PROTO_MSGBUF;
+ bus->chip = devinfo->coreid;
+ dev_set_drvdata(&pdev->dev, bus);
+
+ ret = brcmf_pcie_get_fwnames(devinfo);
+ if (ret)
+ goto fail_bus;
+
+ ret = brcmf_fw_get_firmwares(bus->dev, BRCMF_FW_REQUEST_NVRAM |
+ BRCMF_FW_REQ_NV_OPTIONAL,
+ devinfo->fw_name, devinfo->nvram_name,
+ brcmf_pcie_setup);
+ if (ret == 0)
+ return 0;
+fail_bus:
+ kfree(bus->msgbuf);
+ kfree(bus);
+fail:
+ brcmf_err("failed %x:%x\n", pdev->vendor, pdev->device);
+ brcmf_pcie_release_resource(devinfo);
+ if (devinfo->ci)
+ brcmf_chip_detach(devinfo->ci);
+ kfree(pcie_bus_dev);
+ kfree(devinfo);
+ return ret;
+}
+
+
+static void
+brcmf_pcie_remove(struct pci_dev *pdev)
+{
+ struct brcmf_pciedev_info *devinfo;
+ struct brcmf_bus *bus;
+
+ brcmf_dbg(PCIE, "Enter\n");
+
+ bus = dev_get_drvdata(&pdev->dev);
+ if (bus == NULL)
+ return;
+
+ devinfo = bus->bus_priv.pcie->devinfo;
+
+ devinfo->state = BRCMFMAC_PCIE_STATE_DOWN;
+ if (devinfo->ci)
+ brcmf_pcie_intr_disable(devinfo);
+
+ brcmf_detach(&pdev->dev);
+
+ kfree(bus->bus_priv.pcie);
+ kfree(bus->msgbuf->flowrings);
+ kfree(bus->msgbuf);
+ kfree(bus);
+
+ brcmf_pcie_release_irq(devinfo);
+ brcmf_pcie_release_scratchbuffers(devinfo);
+ brcmf_pcie_release_ringbuffers(devinfo);
+ brcmf_pcie_reset_device(devinfo);
+ brcmf_pcie_release_resource(devinfo);
+
+ if (devinfo->ci)
+ brcmf_chip_detach(devinfo->ci);
+
+ kfree(devinfo);
+ dev_set_drvdata(&pdev->dev, NULL);
+}
+
+
+#ifdef CONFIG_PM
+
+
+static int brcmf_pcie_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct brcmf_pciedev_info *devinfo;
+ struct brcmf_bus *bus;
+ int err;
+
+ brcmf_dbg(PCIE, "Enter, state=%d, pdev=%p\n", state.event, pdev);
+
+ bus = dev_get_drvdata(&pdev->dev);
+ devinfo = bus->bus_priv.pcie->devinfo;
+
+ brcmf_bus_change_state(bus, BRCMF_BUS_DOWN);
+
+ devinfo->mbdata_completed = false;
+ brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D3_INFORM);
+
+ wait_event_timeout(devinfo->mbdata_resp_wait,
+ devinfo->mbdata_completed,
+ msecs_to_jiffies(BRCMF_PCIE_MBDATA_TIMEOUT));
+ if (!devinfo->mbdata_completed) {
+ brcmf_err("Timeout on response for entering D3 substate\n");
+ return -EIO;
+ }
+ brcmf_pcie_release_irq(devinfo);
+
+ err = pci_save_state(pdev);
+ if (err) {
+ brcmf_err("pci_save_state failed, err=%d\n", err);
+ return err;
+ }
+
+ brcmf_chip_detach(devinfo->ci);
+ devinfo->ci = NULL;
+
+ brcmf_pcie_remove(pdev);
+
+ return pci_prepare_to_sleep(pdev);
+}
+
+
+static int brcmf_pcie_resume(struct pci_dev *pdev)
+{
+ int err;
+
+ brcmf_dbg(PCIE, "Enter, pdev=%p\n", pdev);
+
+ err = pci_set_power_state(pdev, PCI_D0);
+ if (err) {
+ brcmf_err("pci_set_power_state failed, err=%d\n", err);
+ return err;
+ }
+ pci_restore_state(pdev);
+
+ err = brcmf_pcie_probe(pdev, NULL);
+ if (err)
+ brcmf_err("probe after resume failed, err=%d\n", err);
+
+ return err;
+}
+
+
+#endif /* CONFIG_PM */
+
+
+#define BRCMF_PCIE_DEVICE(dev_id) { BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\
+ PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
+
+static struct pci_device_id brcmf_pcie_devid_table[] = {
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_4354_DEVICE_ID),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID),
+ { /* end: all zeroes */ }
+};
+
+
+MODULE_DEVICE_TABLE(pci, brcmf_pcie_devid_table);
+
+
+static struct pci_driver brcmf_pciedrvr = {
+ .node = {},
+ .name = KBUILD_MODNAME,
+ .id_table = brcmf_pcie_devid_table,
+ .probe = brcmf_pcie_probe,
+ .remove = brcmf_pcie_remove,
+#ifdef CONFIG_PM
+ .suspend = brcmf_pcie_suspend,
+ .resume = brcmf_pcie_resume
+#endif /* CONFIG_PM */
+};
+
+
+void brcmf_pcie_register(void)
+{
+ int err;
+
+ brcmf_dbg(PCIE, "Enter\n");
+ err = pci_register_driver(&brcmf_pciedrvr);
+ if (err)
+ brcmf_err("PCIE driver registration failed, err=%d\n", err);
+}
+
+
+void brcmf_pcie_exit(void)
+{
+ brcmf_dbg(PCIE, "Enter\n");
+ pci_unregister_driver(&brcmf_pciedrvr);
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/pcie.h b/drivers/net/wireless/brcm80211/brcmfmac/pcie.h
new file mode 100644
index 000000000000..6edaaf8ef5ce
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/pcie.h
@@ -0,0 +1,29 @@
+/* Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef BRCMFMAC_PCIE_H
+#define BRCMFMAC_PCIE_H
+
+
+struct brcmf_pciedev {
+ struct brcmf_bus *bus;
+ struct brcmf_pciedev_info *devinfo;
+};
+
+
+void brcmf_pcie_exit(void);
+void brcmf_pcie_register(void);
+
+
+#endif /* BRCMFMAC_PCIE_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/proto.c b/drivers/net/wireless/brcm80211/brcmfmac/proto.c
index b6b464184946..62b940723339 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/proto.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/proto.c
@@ -21,26 +21,40 @@
#include <brcmu_wifi.h>
#include "dhd.h"
+#include "dhd_bus.h"
#include "dhd_dbg.h"
#include "proto.h"
#include "bcdc.h"
+#include "msgbuf.h"
int brcmf_proto_attach(struct brcmf_pub *drvr)
{
struct brcmf_proto *proto;
+ brcmf_dbg(TRACE, "Enter\n");
+
proto = kzalloc(sizeof(*proto), GFP_ATOMIC);
if (!proto)
goto fail;
drvr->proto = proto;
- /* BCDC protocol is only protocol supported for the moment */
- if (brcmf_proto_bcdc_attach(drvr))
- goto fail;
+ if (drvr->bus_if->proto_type == BRCMF_PROTO_BCDC) {
+ if (brcmf_proto_bcdc_attach(drvr))
+ goto fail;
+ } else if (drvr->bus_if->proto_type == BRCMF_PROTO_MSGBUF) {
+ if (brcmf_proto_msgbuf_attach(drvr))
+ goto fail;
+ } else {
+ brcmf_err("Unsupported proto type %d\n",
+ drvr->bus_if->proto_type);
+ goto fail;
+ }
if ((proto->txdata == NULL) || (proto->hdrpull == NULL) ||
- (proto->query_dcmd == NULL) || (proto->set_dcmd == NULL)) {
+ (proto->query_dcmd == NULL) || (proto->set_dcmd == NULL) ||
+ (proto->configure_addr_mode == NULL) ||
+ (proto->delete_peer == NULL) || (proto->add_tdls_peer == NULL)) {
brcmf_err("Not all proto handlers have been installed\n");
goto fail;
}
@@ -54,8 +68,13 @@ fail:
void brcmf_proto_detach(struct brcmf_pub *drvr)
{
+ brcmf_dbg(TRACE, "Enter\n");
+
if (drvr->proto) {
- brcmf_proto_bcdc_detach(drvr);
+ if (drvr->bus_if->proto_type == BRCMF_PROTO_BCDC)
+ brcmf_proto_bcdc_detach(drvr);
+ else if (drvr->bus_if->proto_type == BRCMF_PROTO_MSGBUF)
+ brcmf_proto_msgbuf_detach(drvr);
kfree(drvr->proto);
drvr->proto = NULL;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/proto.h b/drivers/net/wireless/brcm80211/brcmfmac/proto.h
index 482fb0ba4a30..971172ff686c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/proto.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/proto.h
@@ -16,6 +16,13 @@
#ifndef BRCMFMAC_PROTO_H
#define BRCMFMAC_PROTO_H
+
+enum proto_addr_mode {
+ ADDR_INDIRECT = 0,
+ ADDR_DIRECT
+};
+
+
struct brcmf_proto {
int (*hdrpull)(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
struct sk_buff *skb);
@@ -25,6 +32,12 @@ struct brcmf_proto {
uint len);
int (*txdata)(struct brcmf_pub *drvr, int ifidx, u8 offset,
struct sk_buff *skb);
+ void (*configure_addr_mode)(struct brcmf_pub *drvr, int ifidx,
+ enum proto_addr_mode addr_mode);
+ void (*delete_peer)(struct brcmf_pub *drvr, int ifidx,
+ u8 peer[ETH_ALEN]);
+ void (*add_tdls_peer)(struct brcmf_pub *drvr, int ifidx,
+ u8 peer[ETH_ALEN]);
void *pd;
};
@@ -48,10 +61,26 @@ static inline int brcmf_proto_set_dcmd(struct brcmf_pub *drvr, int ifidx,
return drvr->proto->set_dcmd(drvr, ifidx, cmd, buf, len);
}
static inline int brcmf_proto_txdata(struct brcmf_pub *drvr, int ifidx,
- u8 offset, struct sk_buff *skb)
+ u8 offset, struct sk_buff *skb)
{
return drvr->proto->txdata(drvr, ifidx, offset, skb);
}
+static inline void
+brcmf_proto_configure_addr_mode(struct brcmf_pub *drvr, int ifidx,
+ enum proto_addr_mode addr_mode)
+{
+ drvr->proto->configure_addr_mode(drvr, ifidx, addr_mode);
+}
+static inline void
+brcmf_proto_delete_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN])
+{
+ drvr->proto->delete_peer(drvr, ifidx, peer);
+}
+static inline void
+brcmf_proto_add_tdls_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN])
+{
+ drvr->proto->add_tdls_peer(drvr, ifidx, peer);
+}
#endif /* BRCMFMAC_PROTO_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
index 6c5e585ccda9..f2d06cae366a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
@@ -74,12 +74,12 @@
#define SBSDIO_SPROM_DATA_HIGH 0x10003
/* sprom indirect access addr byte 0 */
#define SBSDIO_SPROM_ADDR_LOW 0x10004
-/* sprom indirect access addr byte 0 */
-#define SBSDIO_SPROM_ADDR_HIGH 0x10005
-/* xtal_pu (gpio) output */
-#define SBSDIO_CHIP_CTRL_DATA 0x10006
-/* xtal_pu (gpio) enable */
-#define SBSDIO_CHIP_CTRL_EN 0x10007
+/* gpio select */
+#define SBSDIO_GPIO_SELECT 0x10005
+/* gpio output */
+#define SBSDIO_GPIO_OUT 0x10006
+/* gpio enable */
+#define SBSDIO_GPIO_EN 0x10007
/* rev < 7, watermark for sdio device */
#define SBSDIO_WATERMARK 0x10008
/* control busy signal generation */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 48078a321716..02fe706fc9ec 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -35,6 +35,7 @@
#include "wl_cfg80211.h"
#include "feature.h"
#include "fwil.h"
+#include "proto.h"
#include "vendor.h"
#define BRCMF_SCAN_IE_LEN_MAX 2048
@@ -493,6 +494,22 @@ brcmf_configure_arp_offload(struct brcmf_if *ifp, bool enable)
return err;
}
+static void
+brcmf_cfg80211_update_proto_addr_mode(struct wireless_dev *wdev)
+{
+ struct net_device *ndev = wdev->netdev;
+ struct brcmf_if *ifp = netdev_priv(ndev);
+
+ if ((wdev->iftype == NL80211_IFTYPE_ADHOC) ||
+ (wdev->iftype == NL80211_IFTYPE_AP) ||
+ (wdev->iftype == NL80211_IFTYPE_P2P_GO))
+ brcmf_proto_configure_addr_mode(ifp->drvr, ifp->ifidx,
+ ADDR_DIRECT);
+ else
+ brcmf_proto_configure_addr_mode(ifp->drvr, ifp->ifidx,
+ ADDR_INDIRECT);
+}
+
static bool brcmf_is_apmode(struct brcmf_cfg80211_vif *vif)
{
enum nl80211_iftype iftype;
@@ -512,6 +529,8 @@ static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
u32 *flags,
struct vif_params *params)
{
+ struct wireless_dev *wdev;
+
brcmf_dbg(TRACE, "enter: %s type %d\n", name, type);
switch (type) {
case NL80211_IFTYPE_ADHOC:
@@ -525,7 +544,10 @@ static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_GO:
case NL80211_IFTYPE_P2P_DEVICE:
- return brcmf_p2p_add_vif(wiphy, name, type, flags, params);
+ wdev = brcmf_p2p_add_vif(wiphy, name, type, flags, params);
+ if (!IS_ERR(wdev))
+ brcmf_cfg80211_update_proto_addr_mode(wdev);
+ return wdev;
case NL80211_IFTYPE_UNSPECIFIED:
default:
return ERR_PTR(-EINVAL);
@@ -720,6 +742,8 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
}
ndev->ieee80211_ptr->iftype = type;
+ brcmf_cfg80211_update_proto_addr_mode(&vif->wdev);
+
done:
brcmf_dbg(TRACE, "Exit\n");
@@ -4131,6 +4155,27 @@ static void brcmf_cfg80211_crit_proto_stop(struct wiphy *wiphy,
clear_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status);
}
+static s32
+brcmf_notify_tdls_peer_event(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e, void *data)
+{
+ switch (e->reason) {
+ case BRCMF_E_REASON_TDLS_PEER_DISCOVERED:
+ brcmf_dbg(TRACE, "TDLS Peer Discovered\n");
+ break;
+ case BRCMF_E_REASON_TDLS_PEER_CONNECTED:
+ brcmf_dbg(TRACE, "TDLS Peer Connected\n");
+ brcmf_proto_add_tdls_peer(ifp->drvr, ifp->ifidx, (u8 *)e->addr);
+ break;
+ case BRCMF_E_REASON_TDLS_PEER_DISCONNECTED:
+ brcmf_dbg(TRACE, "TDLS Peer Disconnected\n");
+ brcmf_proto_delete_peer(ifp->drvr, ifp->ifidx, (u8 *)e->addr);
+ break;
+ }
+
+ return 0;
+}
+
static int brcmf_convert_nl80211_tdls_oper(enum nl80211_tdls_operation oper)
{
int ret;
@@ -4525,6 +4570,13 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
struct ieee80211_channel *chan;
s32 err = 0;
+ if ((e->event_code == BRCMF_E_DEAUTH) ||
+ (e->event_code == BRCMF_E_DEAUTH_IND) ||
+ (e->event_code == BRCMF_E_DISASSOC_IND) ||
+ ((e->event_code == BRCMF_E_LINK) && (!e->flags))) {
+ brcmf_proto_delete_peer(ifp->drvr, ifp->ifidx, (u8 *)e->addr);
+ }
+
if (brcmf_is_apmode(ifp->vif)) {
err = brcmf_notify_connect_status_ap(cfg, ndev, e, data);
} else if (brcmf_is_linkup(e)) {
@@ -5660,6 +5712,9 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
if (err) {
brcmf_dbg(INFO, "TDLS not enabled (%d)\n", err);
wiphy->flags &= ~WIPHY_FLAG_SUPPORTS_TDLS;
+ } else {
+ brcmf_fweh_register(cfg->pub, BRCMF_E_TDLS_PEER_EVENT,
+ brcmf_notify_tdls_peer_event);
}
return cfg;
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index 64d1a7ba040c..af26e0de1e5c 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -38,8 +38,12 @@
#define BRCM_CC_4335_CHIP_ID 0x4335
#define BRCM_CC_4339_CHIP_ID 0x4339
#define BRCM_CC_4354_CHIP_ID 0x4354
+#define BRCM_CC_4356_CHIP_ID 0x4356
#define BRCM_CC_43566_CHIP_ID 43566
+#define BRCM_CC_43567_CHIP_ID 43567
#define BRCM_CC_43569_CHIP_ID 43569
+#define BRCM_CC_43570_CHIP_ID 43570
+#define BRCM_CC_43602_CHIP_ID 43602
/* SDIO Device IDs */
#define BRCM_SDIO_43143_DEVICE_ID BRCM_CC_43143_CHIP_ID
@@ -58,6 +62,13 @@
#define BRCM_USB_43569_DEVICE_ID 0xbd27
#define BRCM_USB_BCMFW_DEVICE_ID 0x0bdc
+/* PCIE Device IDs */
+#define BRCM_PCIE_4354_DEVICE_ID 0x43df
+#define BRCM_PCIE_4356_DEVICE_ID 0x43ec
+#define BRCM_PCIE_43567_DEVICE_ID 0x43d3
+#define BRCM_PCIE_43570_DEVICE_ID 0x43d9
+#define BRCM_PCIE_43602_DEVICE_ID 0x43ba
+
/* brcmsmac IDs */
#define BCM4313_D11N2G_ID 0x4727 /* 4313 802.11n 2.4G device */
#define BCM43224_D11N_ID 0x4353 /* 43224 802.11n dualband device */
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index 03de7467aecf..2c4fa49686ef 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -2980,7 +2980,8 @@ il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id)
/* Driver ilate data, only for Tx (not command) queues,
* not shared with device. */
if (id != il->cmd_queue) {
- txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX, sizeof(struct skb *),
+ txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX,
+ sizeof(struct sk_buff *),
GFP_KERNEL);
if (!txq->skbs) {
IL_ERR("Fail to alloc skbs\n");
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index 26c66a126551..7929fac13e1c 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -72,5 +72,5 @@ source "drivers/nfc/pn544/Kconfig"
source "drivers/nfc/microread/Kconfig"
source "drivers/nfc/nfcmrvl/Kconfig"
source "drivers/nfc/st21nfca/Kconfig"
-
+source "drivers/nfc/st21nfcb/Kconfig"
endmenu
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index 23225b0287fd..6b23a2c6e34a 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_NFC_SIM) += nfcsim.o
obj-$(CONFIG_NFC_PORT100) += port100.o
obj-$(CONFIG_NFC_MRVL) += nfcmrvl/
obj-$(CONFIG_NFC_TRF7970A) += trf7970a.o
-obj-$(CONFIG_NFC_ST21NFCA) += st21nfca/
+obj-$(CONFIG_NFC_ST21NFCA) += st21nfca/
+obj-$(CONFIG_NFC_ST21NFCB) += st21nfcb/
ccflags-$(CONFIG_NFC_DEBUG) := -DDEBUG
diff --git a/drivers/nfc/st21nfca/Makefile b/drivers/nfc/st21nfca/Makefile
index 038ed093a119..db7a38ae05f7 100644
--- a/drivers/nfc/st21nfca/Makefile
+++ b/drivers/nfc/st21nfca/Makefile
@@ -4,5 +4,5 @@
st21nfca_i2c-objs = i2c.o
-obj-$(CONFIG_NFC_ST21NFCA) += st21nfca.o
+obj-$(CONFIG_NFC_ST21NFCA) += st21nfca.o st21nfca_dep.o
obj-$(CONFIG_NFC_ST21NFCA_I2C) += st21nfca_i2c.o
diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
index 3f954ed86d98..ff31939978ae 100644
--- a/drivers/nfc/st21nfca/i2c.c
+++ b/drivers/nfc/st21nfca/i2c.c
@@ -93,7 +93,7 @@ struct st21nfca_i2c_phy {
int hard_fault;
struct mutex phy_lock;
};
-static u8 len_seq[] = { 13, 24, 15, 29 };
+static u8 len_seq[] = { 16, 24, 12, 29 };
static u16 wait_tab[] = { 2, 3, 5, 15, 20, 40};
#define I2C_DUMP_SKB(info, skb) \
@@ -397,12 +397,11 @@ static int st21nfca_hci_i2c_read(struct st21nfca_i2c_phy *phy,
* The first read sequence does not start with SOF.
* Data is corrupeted so we drop it.
*/
- if (!phy->current_read_len && buf[0] != ST21NFCA_SOF_EOF) {
+ if (!phy->current_read_len && !IS_START_OF_FRAME(buf)) {
skb_trim(skb, 0);
phy->current_read_len = 0;
return -EIO;
- } else if (phy->current_read_len &&
- IS_START_OF_FRAME(buf)) {
+ } else if (phy->current_read_len && IS_START_OF_FRAME(buf)) {
/*
* Previous frame transmission was interrupted and
* the frame got repeated.
@@ -487,6 +486,8 @@ static irqreturn_t st21nfca_hci_irq_thread_fn(int irq, void *phy_id)
*/
nfc_hci_recv_frame(phy->hdev, phy->pending_skb);
phy->crc_trials = 0;
+ } else {
+ kfree_skb(phy->pending_skb);
}
phy->pending_skb = alloc_skb(ST21NFCA_HCI_LLC_MAX_SIZE * 2, GFP_KERNEL);
diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/st21nfca.c
index 51e0f00b3a4f..a902b0551c86 100644
--- a/drivers/nfc/st21nfca/st21nfca.c
+++ b/drivers/nfc/st21nfca/st21nfca.c
@@ -22,6 +22,7 @@
#include <net/nfc/llc.h>
#include "st21nfca.h"
+#include "st21nfca_dep.h"
#define DRIVER_DESC "HCI NFC driver for ST21NFCA"
@@ -53,6 +54,7 @@
#define ST21NFCA_DM_PIPE_CREATED 0x02
#define ST21NFCA_DM_PIPE_OPEN 0x04
#define ST21NFCA_DM_RF_ACTIVE 0x80
+#define ST21NFCA_DM_DISCONNECT 0x30
#define ST21NFCA_DM_IS_PIPE_OPEN(p) \
((p & 0x0f) == (ST21NFCA_DM_PIPE_CREATED | ST21NFCA_DM_PIPE_OPEN))
@@ -72,6 +74,7 @@ static struct nfc_hci_gate st21nfca_gates[] = {
{ST21NFCA_RF_READER_F_GATE, NFC_HCI_INVALID_PIPE},
{ST21NFCA_RF_READER_14443_3_A_GATE, NFC_HCI_INVALID_PIPE},
{ST21NFCA_RF_READER_ISO15693_GATE, NFC_HCI_INVALID_PIPE},
+ {ST21NFCA_RF_CARD_F_GATE, NFC_HCI_INVALID_PIPE},
};
struct st21nfca_pipe_info {
@@ -299,6 +302,9 @@ static int st21nfca_hci_start_poll(struct nfc_hci_dev *hdev,
u32 im_protocols, u32 tm_protocols)
{
int r;
+ u32 pol_req;
+ u8 param[19];
+ struct sk_buff *datarate_skb;
pr_info(DRIVER_DESC ": %s protocols 0x%x 0x%x\n",
__func__, im_protocols, tm_protocols);
@@ -331,6 +337,31 @@ static int st21nfca_hci_start_poll(struct nfc_hci_dev *hdev,
ST21NFCA_RF_READER_F_GATE);
if (r < 0)
return r;
+ } else {
+ hdev->gb = nfc_get_local_general_bytes(hdev->ndev,
+ &hdev->gb_len);
+
+ if (hdev->gb == NULL || hdev->gb_len == 0) {
+ im_protocols &= ~NFC_PROTO_NFC_DEP_MASK;
+ tm_protocols &= ~NFC_PROTO_NFC_DEP_MASK;
+ }
+
+ param[0] = ST21NFCA_RF_READER_F_DATARATE_106 |
+ ST21NFCA_RF_READER_F_DATARATE_212 |
+ ST21NFCA_RF_READER_F_DATARATE_424;
+ r = nfc_hci_set_param(hdev, ST21NFCA_RF_READER_F_GATE,
+ ST21NFCA_RF_READER_F_DATARATE,
+ param, 1);
+ if (r < 0)
+ return r;
+
+ pol_req =
+ be32_to_cpu(ST21NFCA_RF_READER_F_POL_REQ_DEFAULT);
+ r = nfc_hci_set_param(hdev, ST21NFCA_RF_READER_F_GATE,
+ ST21NFCA_RF_READER_F_POL_REQ,
+ (u8 *) &pol_req, 4);
+ if (r < 0)
+ return r;
}
if ((ST21NFCA_RF_READER_14443_3_A_GATE & im_protocols) == 0) {
@@ -353,9 +384,104 @@ static int st21nfca_hci_start_poll(struct nfc_hci_dev *hdev,
nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
NFC_HCI_EVT_END_OPERATION, NULL, 0);
}
+
+ if (tm_protocols & NFC_PROTO_NFC_DEP_MASK) {
+ r = nfc_hci_get_param(hdev, ST21NFCA_RF_CARD_F_GATE,
+ ST21NFCA_RF_CARD_F_DATARATE,
+ &datarate_skb);
+ if (r < 0)
+ return r;
+
+ /* Configure the maximum supported datarate to 424Kbps */
+ if (datarate_skb->len > 0 &&
+ datarate_skb->data[0] !=
+ ST21NFCA_RF_CARD_F_DATARATE_212_424) {
+ param[0] = ST21NFCA_RF_CARD_F_DATARATE_212_424;
+ r = nfc_hci_set_param(hdev, ST21NFCA_RF_CARD_F_GATE,
+ ST21NFCA_RF_CARD_F_DATARATE,
+ param, 1);
+ if (r < 0)
+ return r;
+ }
+
+ /*
+ * Configure sens_res
+ *
+ * NFC Forum Digital Spec Table 7:
+ * NFCID1 size: triple (10 bytes)
+ */
+ param[0] = 0x00;
+ param[1] = 0x08;
+ r = nfc_hci_set_param(hdev, ST21NFCA_RF_CARD_F_GATE,
+ ST21NFCA_RF_CARD_F_SENS_RES, param, 2);
+ if (r < 0)
+ return r;
+
+ /*
+ * Configure sel_res
+ *
+ * NFC Forum Digistal Spec Table 17:
+ * b3 set to 0b (value b7-b6):
+ * - 10b: Configured for NFC-DEP Protocol
+ */
+ param[0] = 0x40;
+ r = nfc_hci_set_param(hdev, ST21NFCA_RF_CARD_F_GATE,
+ ST21NFCA_RF_CARD_F_SEL_RES, param, 1);
+ if (r < 0)
+ return r;
+
+ /* Configure NFCID1 Random uid */
+ r = nfc_hci_set_param(hdev, ST21NFCA_RF_CARD_F_GATE,
+ ST21NFCA_RF_CARD_F_NFCID1, NULL, 0);
+ if (r < 0)
+ return r;
+
+ /* Configure NFCID2_LIST */
+ /* System Code */
+ param[0] = 0x00;
+ param[1] = 0x00;
+ /* NFCID2 */
+ param[2] = 0x01;
+ param[3] = 0xfe;
+ param[4] = 'S';
+ param[5] = 'T';
+ param[6] = 'M';
+ param[7] = 'i';
+ param[8] = 'c';
+ param[9] = 'r';
+ /* 8 byte Pad bytes used for polling respone frame */
+
+ /*
+ * Configuration byte:
+ * - bit 0: define the default NFCID2 entry used when the
+ * system code is equal to 'FFFF'
+ * - bit 1: use a random value for lowest 6 bytes of
+ * NFCID2 value
+ * - bit 2: ignore polling request frame if request code
+ * is equal to '01'
+ * - Other bits are RFU
+ */
+ param[18] = 0x01;
+ r = nfc_hci_set_param(hdev, ST21NFCA_RF_CARD_F_GATE,
+ ST21NFCA_RF_CARD_F_NFCID2_LIST, param,
+ 19);
+ if (r < 0)
+ return r;
+
+ param[0] = 0x02;
+ r = nfc_hci_set_param(hdev, ST21NFCA_RF_CARD_F_GATE,
+ ST21NFCA_RF_CARD_F_MODE, param, 1);
+ }
+
return r;
}
+static void st21nfca_hci_stop_poll(struct nfc_hci_dev *hdev)
+{
+ nfc_hci_send_cmd(hdev, ST21NFCA_DEVICE_MGNT_GATE,
+ ST21NFCA_DM_DISCONNECT, NULL, 0, NULL);
+}
+
static int st21nfca_get_iso14443_3_atqa(struct nfc_hci_dev *hdev, u16 *atqa)
{
int r;
@@ -451,6 +577,26 @@ exit:
return r;
}
+static int st21nfca_hci_dep_link_up(struct nfc_hci_dev *hdev,
+ struct nfc_target *target, u8 comm_mode,
+ u8 *gb, size_t gb_len)
+{
+ struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+ info->dep_info.idx = target->idx;
+ return st21nfca_im_send_atr_req(hdev, gb, gb_len);
+}
+
+static int st21nfca_hci_dep_link_down(struct nfc_hci_dev *hdev)
+{
+ struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+ info->state = ST21NFCA_ST_READY;
+
+ return nfc_hci_send_cmd(hdev, ST21NFCA_DEVICE_MGNT_GATE,
+ ST21NFCA_DM_DISCONNECT, NULL, 0, NULL);
+}
+
static int st21nfca_hci_target_from_gate(struct nfc_hci_dev *hdev, u8 gate,
struct nfc_target *target)
{
@@ -505,6 +651,69 @@ static int st21nfca_hci_target_from_gate(struct nfc_hci_dev *hdev, u8 gate,
return 0;
}
+static int st21nfca_hci_complete_target_discovered(struct nfc_hci_dev *hdev,
+ u8 gate,
+ struct nfc_target *target)
+{
+ int r;
+ struct sk_buff *nfcid2_skb = NULL, *nfcid1_skb;
+
+ if (gate == ST21NFCA_RF_READER_F_GATE) {
+ r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_F_GATE,
+ ST21NFCA_RF_READER_F_NFCID2, &nfcid2_skb);
+ if (r < 0)
+ goto exit;
+
+ if (nfcid2_skb->len > NFC_SENSF_RES_MAXSIZE) {
+ r = -EPROTO;
+ goto exit;
+ }
+
+ /*
+ * - After the recepton of polling response for type F frame
+ * at 212 or 424 Kbit/s, NFCID2 registry parameters will be
+ * updated.
+ * - After the reception of SEL_RES with NFCIP-1 compliant bit
+ * set for type A frame NFCID1 will be updated
+ */
+ if (nfcid2_skb->len > 0) {
+ /* P2P in type F */
+ memcpy(target->sensf_res, nfcid2_skb->data,
+ nfcid2_skb->len);
+ target->sensf_res_len = nfcid2_skb->len;
+ /* NFC Forum Digital Protocol Table 44 */
+ if (target->sensf_res[0] == 0x01 &&
+ target->sensf_res[1] == 0xfe)
+ target->supported_protocols =
+ NFC_PROTO_NFC_DEP_MASK;
+ else
+ target->supported_protocols =
+ NFC_PROTO_FELICA_MASK;
+ } else {
+ /* P2P in type A */
+ r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_F_GATE,
+ ST21NFCA_RF_READER_F_NFCID1,
+ &nfcid1_skb);
+ if (r < 0)
+ goto exit;
+
+ if (nfcid1_skb->len > NFC_NFCID1_MAXSIZE) {
+ r = -EPROTO;
+ goto exit;
+ }
+ memcpy(target->sensf_res, nfcid1_skb->data,
+ nfcid1_skb->len);
+ target->sensf_res_len = nfcid1_skb->len;
+ target->supported_protocols = NFC_PROTO_NFC_DEP_MASK;
+ }
+ target->hci_reader_gate = ST21NFCA_RF_READER_F_GATE;
+ }
+ r = 1;
+exit:
+ kfree_skb(nfcid2_skb);
+ return r;
+}
+
#define ST21NFCA_CB_TYPE_READER_ISO15693 1
static void st21nfca_hci_data_exchange_cb(void *context, struct sk_buff *skb,
int err)
@@ -541,6 +750,9 @@ static int st21nfca_hci_im_transceive(struct nfc_hci_dev *hdev,
switch (target->hci_reader_gate) {
case ST21NFCA_RF_READER_F_GATE:
+ if (target->supported_protocols == NFC_PROTO_NFC_DEP_MASK)
+ return st21nfca_im_send_dep_req(hdev, skb);
+
*skb_push(skb, 1) = 0x1a;
return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate,
ST21NFCA_WR_XCHG_DATA, skb->data,
@@ -569,6 +781,11 @@ static int st21nfca_hci_im_transceive(struct nfc_hci_dev *hdev,
}
}
+static int st21nfca_hci_tm_send(struct nfc_hci_dev *hdev, struct sk_buff *skb)
+{
+ return st21nfca_tm_send_dep_res(hdev, skb);
+}
+
static int st21nfca_hci_check_presence(struct nfc_hci_dev *hdev,
struct nfc_target *target)
{
@@ -594,6 +811,50 @@ static int st21nfca_hci_check_presence(struct nfc_hci_dev *hdev,
}
}
+/*
+ * Returns:
+ * <= 0: driver handled the event, skb consumed
+ * 1: driver does not handle the event, please do standard processing
+ */
+static int st21nfca_hci_event_received(struct nfc_hci_dev *hdev, u8 gate,
+ u8 event, struct sk_buff *skb)
+{
+ int r;
+ struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+ pr_debug("hci event: %d\n", event);
+
+ switch (event) {
+ case ST21NFCA_EVT_CARD_ACTIVATED:
+ if (gate == ST21NFCA_RF_CARD_F_GATE)
+ info->dep_info.curr_nfc_dep_pni = 0;
+ break;
+ case ST21NFCA_EVT_CARD_DEACTIVATED:
+ break;
+ case ST21NFCA_EVT_FIELD_ON:
+ break;
+ case ST21NFCA_EVT_FIELD_OFF:
+ break;
+ case ST21NFCA_EVT_SEND_DATA:
+ if (gate == ST21NFCA_RF_CARD_F_GATE) {
+ r = st21nfca_tm_event_send_data(hdev, skb, gate);
+ if (r < 0)
+ goto exit;
+ return 0;
+ } else {
+ info->dep_info.curr_nfc_dep_pni = 0;
+ return 1;
+ }
+ break;
+ default:
+ return 1;
+ }
+ kfree_skb(skb);
+ return 0;
+exit:
+ return r;
+}
+
static struct nfc_hci_ops st21nfca_hci_ops = {
.open = st21nfca_hci_open,
.close = st21nfca_hci_close,
@@ -601,9 +862,15 @@ static struct nfc_hci_ops st21nfca_hci_ops = {
.hci_ready = st21nfca_hci_ready,
.xmit = st21nfca_hci_xmit,
.start_poll = st21nfca_hci_start_poll,
+ .stop_poll = st21nfca_hci_stop_poll,
+ .dep_link_up = st21nfca_hci_dep_link_up,
+ .dep_link_down = st21nfca_hci_dep_link_down,
.target_from_gate = st21nfca_hci_target_from_gate,
+ .complete_target_discovered = st21nfca_hci_complete_target_discovered,
.im_transceive = st21nfca_hci_im_transceive,
+ .tm_send = st21nfca_hci_tm_send,
.check_presence = st21nfca_hci_check_presence,
+ .event_received = st21nfca_hci_event_received,
};
int st21nfca_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops,
@@ -648,7 +915,8 @@ int st21nfca_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops,
NFC_PROTO_FELICA_MASK |
NFC_PROTO_ISO14443_MASK |
NFC_PROTO_ISO14443_B_MASK |
- NFC_PROTO_ISO15693_MASK;
+ NFC_PROTO_ISO15693_MASK |
+ NFC_PROTO_NFC_DEP_MASK;
set_bit(NFC_HCI_QUIRK_SHORT_CLEAR, &quirks);
@@ -671,6 +939,7 @@ int st21nfca_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops,
goto err_regdev;
*hdev = info->hdev;
+ st21nfca_dep_init(info->hdev);
return 0;
@@ -688,6 +957,7 @@ void st21nfca_hci_remove(struct nfc_hci_dev *hdev)
{
struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+ st21nfca_dep_deinit(hdev);
nfc_hci_unregister_device(hdev);
nfc_hci_free_device(hdev);
kfree(info);
diff --git a/drivers/nfc/st21nfca/st21nfca.h b/drivers/nfc/st21nfca/st21nfca.h
index 334cd90bcc8c..96fe5a62dc0d 100644
--- a/drivers/nfc/st21nfca/st21nfca.h
+++ b/drivers/nfc/st21nfca/st21nfca.h
@@ -19,6 +19,8 @@
#include <net/nfc/hci.h>
+#include "st21nfca_dep.h"
+
#define HCI_MODE 0
/* framing in HCI mode */
@@ -73,7 +75,8 @@ struct st21nfca_hci_info {
data_exchange_cb_t async_cb;
void *async_cb_context;
-} __packed;
+ struct st21nfca_dep_info dep_info;
+};
/* Reader RF commands */
#define ST21NFCA_WR_XCHG_DATA 0x10
@@ -83,5 +86,26 @@ struct st21nfca_hci_info {
#define ST21NFCA_RF_READER_F_DATARATE_106 0x01
#define ST21NFCA_RF_READER_F_DATARATE_212 0x02
#define ST21NFCA_RF_READER_F_DATARATE_424 0x04
+#define ST21NFCA_RF_READER_F_POL_REQ 0x02
+#define ST21NFCA_RF_READER_F_POL_REQ_DEFAULT 0xffff0000
+#define ST21NFCA_RF_READER_F_NFCID2 0x03
+#define ST21NFCA_RF_READER_F_NFCID1 0x04
+#define ST21NFCA_RF_READER_F_SENS_RES 0x05
+
+#define ST21NFCA_RF_CARD_F_GATE 0x24
+#define ST21NFCA_RF_CARD_F_MODE 0x01
+#define ST21NFCA_RF_CARD_F_NFCID2_LIST 0x04
+#define ST21NFCA_RF_CARD_F_NFCID1 0x05
+#define ST21NFCA_RF_CARD_F_SENS_RES 0x06
+#define ST21NFCA_RF_CARD_F_SEL_RES 0x07
+#define ST21NFCA_RF_CARD_F_DATARATE 0x08
+#define ST21NFCA_RF_CARD_F_DATARATE_106 0x00
+#define ST21NFCA_RF_CARD_F_DATARATE_212_424 0x01
+
+#define ST21NFCA_EVT_SEND_DATA 0x10
+#define ST21NFCA_EVT_FIELD_ON 0x11
+#define ST21NFCA_EVT_CARD_DEACTIVATED 0x12
+#define ST21NFCA_EVT_CARD_ACTIVATED 0x13
+#define ST21NFCA_EVT_FIELD_OFF 0x14
#endif /* __LOCAL_ST21NFCA_H_ */
diff --git a/drivers/nfc/st21nfca/st21nfca_dep.c b/drivers/nfc/st21nfca/st21nfca_dep.c
new file mode 100644
index 000000000000..b2d9957b57f8
--- /dev/null
+++ b/drivers/nfc/st21nfca/st21nfca_dep.c
@@ -0,0 +1,661 @@
+/*
+ * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <net/nfc/hci.h>
+
+#include "st21nfca.h"
+#include "st21nfca_dep.h"
+
+#define ST21NFCA_NFCIP1_INITIATOR 0x00
+#define ST21NFCA_NFCIP1_REQ 0xd4
+#define ST21NFCA_NFCIP1_RES 0xd5
+#define ST21NFCA_NFCIP1_ATR_REQ 0x00
+#define ST21NFCA_NFCIP1_ATR_RES 0x01
+#define ST21NFCA_NFCIP1_PSL_REQ 0x04
+#define ST21NFCA_NFCIP1_PSL_RES 0x05
+#define ST21NFCA_NFCIP1_DEP_REQ 0x06
+#define ST21NFCA_NFCIP1_DEP_RES 0x07
+
+#define ST21NFCA_NFC_DEP_PFB_PNI(pfb) ((pfb) & 0x03)
+#define ST21NFCA_NFC_DEP_PFB_TYPE(pfb) ((pfb) & 0xE0)
+#define ST21NFCA_NFC_DEP_PFB_IS_TIMEOUT(pfb) \
+ ((pfb) & ST21NFCA_NFC_DEP_PFB_TIMEOUT_BIT)
+#define ST21NFCA_NFC_DEP_DID_BIT_SET(pfb) ((pfb) & 0x04)
+#define ST21NFCA_NFC_DEP_NAD_BIT_SET(pfb) ((pfb) & 0x08)
+#define ST21NFCA_NFC_DEP_PFB_TIMEOUT_BIT 0x10
+
+#define ST21NFCA_NFC_DEP_PFB_IS_TIMEOUT(pfb) \
+ ((pfb) & ST21NFCA_NFC_DEP_PFB_TIMEOUT_BIT)
+
+#define ST21NFCA_NFC_DEP_PFB_I_PDU 0x00
+#define ST21NFCA_NFC_DEP_PFB_ACK_NACK_PDU 0x40
+#define ST21NFCA_NFC_DEP_PFB_SUPERVISOR_PDU 0x80
+
+#define ST21NFCA_ATR_REQ_MIN_SIZE 17
+#define ST21NFCA_ATR_REQ_MAX_SIZE 65
+#define ST21NFCA_LR_BITS_PAYLOAD_SIZE_254B 0x30
+#define ST21NFCA_GB_BIT 0x02
+
+#define ST21NFCA_EVT_CARD_F_BITRATE 0x16
+#define ST21NFCA_EVT_READER_F_BITRATE 0x13
+#define ST21NFCA_PSL_REQ_SEND_SPEED(brs) (brs & 0x38)
+#define ST21NFCA_PSL_REQ_RECV_SPEED(brs) (brs & 0x07)
+#define ST21NFCA_PP2LRI(pp) ((pp & 0x30) >> 4)
+#define ST21NFCA_CARD_BITRATE_212 0x01
+#define ST21NFCA_CARD_BITRATE_424 0x02
+
+#define ST21NFCA_DEFAULT_TIMEOUT 0x0a
+
+
+#define PROTOCOL_ERR(req) pr_err("%d: ST21NFCA Protocol error: %s\n", \
+ __LINE__, req)
+
+struct st21nfca_atr_req {
+ u8 length;
+ u8 cmd0;
+ u8 cmd1;
+ u8 nfcid3[NFC_NFCID3_MAXSIZE];
+ u8 did;
+ u8 bsi;
+ u8 bri;
+ u8 ppi;
+ u8 gbi[0];
+} __packed;
+
+struct st21nfca_atr_res {
+ u8 length;
+ u8 cmd0;
+ u8 cmd1;
+ u8 nfcid3[NFC_NFCID3_MAXSIZE];
+ u8 did;
+ u8 bsi;
+ u8 bri;
+ u8 to;
+ u8 ppi;
+ u8 gbi[0];
+} __packed;
+
+struct st21nfca_psl_req {
+ u8 length;
+ u8 cmd0;
+ u8 cmd1;
+ u8 did;
+ u8 brs;
+ u8 fsl;
+} __packed;
+
+struct st21nfca_psl_res {
+ u8 length;
+ u8 cmd0;
+ u8 cmd1;
+ u8 did;
+} __packed;
+
+struct st21nfca_dep_req_res {
+ u8 length;
+ u8 cmd0;
+ u8 cmd1;
+ u8 pfb;
+ u8 did;
+ u8 nad;
+} __packed;
+
+static void st21nfca_tx_work(struct work_struct *work)
+{
+ struct st21nfca_hci_info *info = container_of(work,
+ struct st21nfca_hci_info,
+ dep_info.tx_work);
+
+ struct nfc_dev *dev;
+ struct sk_buff *skb;
+ if (info) {
+ dev = info->hdev->ndev;
+ skb = info->dep_info.tx_pending;
+
+ device_lock(&dev->dev);
+
+ nfc_hci_send_cmd_async(info->hdev, ST21NFCA_RF_READER_F_GATE,
+ ST21NFCA_WR_XCHG_DATA,
+ skb->data, skb->len,
+ info->async_cb, info);
+ device_unlock(&dev->dev);
+ kfree_skb(skb);
+ }
+}
+
+static void st21nfca_im_send_pdu(struct st21nfca_hci_info *info,
+ struct sk_buff *skb)
+{
+ info->dep_info.tx_pending = skb;
+ schedule_work(&info->dep_info.tx_work);
+}
+
+static int st21nfca_tm_send_atr_res(struct nfc_hci_dev *hdev,
+ struct st21nfca_atr_req *atr_req)
+{
+ struct st21nfca_atr_res *atr_res;
+ struct sk_buff *skb;
+ size_t gb_len;
+ int r;
+ struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+ gb_len = atr_req->length - sizeof(struct st21nfca_atr_req);
+ skb = alloc_skb(atr_req->length + 1, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, sizeof(struct st21nfca_atr_res));
+
+ atr_res = (struct st21nfca_atr_res *)skb->data;
+ memset(atr_res, 0, sizeof(struct st21nfca_atr_res));
+
+ atr_res->length = atr_req->length + 1;
+ atr_res->cmd0 = ST21NFCA_NFCIP1_RES;
+ atr_res->cmd1 = ST21NFCA_NFCIP1_ATR_RES;
+
+ memcpy(atr_res->nfcid3, atr_req->nfcid3, 6);
+ atr_res->bsi = 0x00;
+ atr_res->bri = 0x00;
+ atr_res->to = ST21NFCA_DEFAULT_TIMEOUT;
+ atr_res->ppi = ST21NFCA_LR_BITS_PAYLOAD_SIZE_254B;
+
+ if (gb_len) {
+ skb_put(skb, gb_len);
+
+ atr_res->ppi |= ST21NFCA_GB_BIT;
+ memcpy(atr_res->gbi, atr_req->gbi, gb_len);
+ r = nfc_set_remote_general_bytes(hdev->ndev, atr_res->gbi,
+ gb_len);
+ if (r < 0)
+ return r;
+ }
+
+ info->dep_info.curr_nfc_dep_pni = 0;
+
+ return nfc_hci_send_event(hdev, ST21NFCA_RF_CARD_F_GATE,
+ ST21NFCA_EVT_SEND_DATA, skb->data, skb->len);
+}
+
+static int st21nfca_tm_recv_atr_req(struct nfc_hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct st21nfca_atr_req *atr_req;
+ size_t gb_len;
+ int r;
+
+ skb_trim(skb, skb->len - 1);
+ if (IS_ERR(skb)) {
+ r = PTR_ERR(skb);
+ goto exit;
+ }
+
+ if (!skb->len) {
+ r = -EIO;
+ goto exit;
+ }
+
+ if (skb->len < ST21NFCA_ATR_REQ_MIN_SIZE) {
+ r = -EPROTO;
+ goto exit;
+ }
+
+ atr_req = (struct st21nfca_atr_req *)skb->data;
+
+ r = st21nfca_tm_send_atr_res(hdev, atr_req);
+ if (r)
+ goto exit;
+
+ gb_len = skb->len - sizeof(struct st21nfca_atr_req);
+
+ r = nfc_tm_activated(hdev->ndev, NFC_PROTO_NFC_DEP_MASK,
+ NFC_COMM_PASSIVE, atr_req->gbi, gb_len);
+ if (r)
+ goto exit;
+
+ r = 0;
+
+exit:
+ return r;
+}
+
+static int st21nfca_tm_send_psl_res(struct nfc_hci_dev *hdev,
+ struct st21nfca_psl_req *psl_req)
+{
+ struct st21nfca_psl_res *psl_res;
+ struct sk_buff *skb;
+ u8 bitrate[2] = {0, 0};
+
+ int r;
+
+ skb = alloc_skb(sizeof(struct st21nfca_psl_res), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ skb_put(skb, sizeof(struct st21nfca_psl_res));
+
+ psl_res = (struct st21nfca_psl_res *)skb->data;
+
+ psl_res->length = sizeof(struct st21nfca_psl_res);
+ psl_res->cmd0 = ST21NFCA_NFCIP1_RES;
+ psl_res->cmd1 = ST21NFCA_NFCIP1_PSL_RES;
+ psl_res->did = psl_req->did;
+
+ r = nfc_hci_send_event(hdev, ST21NFCA_RF_CARD_F_GATE,
+ ST21NFCA_EVT_SEND_DATA, skb->data, skb->len);
+
+ /*
+ * ST21NFCA only support P2P passive.
+ * PSL_REQ BRS value != 0 has only a meaning to
+ * change technology to type F.
+ * We change to BITRATE 424Kbits.
+ * In other case switch to BITRATE 106Kbits.
+ */
+ if (ST21NFCA_PSL_REQ_SEND_SPEED(psl_req->brs) &&
+ ST21NFCA_PSL_REQ_RECV_SPEED(psl_req->brs)) {
+ bitrate[0] = ST21NFCA_CARD_BITRATE_424;
+ bitrate[1] = ST21NFCA_CARD_BITRATE_424;
+ }
+
+ /* Send an event to change bitrate change event to card f */
+ return nfc_hci_send_event(hdev, ST21NFCA_RF_CARD_F_GATE,
+ ST21NFCA_EVT_CARD_F_BITRATE, bitrate, 2);
+}
+
+static int st21nfca_tm_recv_psl_req(struct nfc_hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct st21nfca_psl_req *psl_req;
+ int r;
+
+ skb_trim(skb, skb->len - 1);
+ if (IS_ERR(skb)) {
+ r = PTR_ERR(skb);
+ skb = NULL;
+ goto exit;
+ }
+
+ if (!skb->len) {
+ r = -EIO;
+ goto exit;
+ }
+
+ psl_req = (struct st21nfca_psl_req *)skb->data;
+
+ if (skb->len < sizeof(struct st21nfca_psl_req)) {
+ r = -EIO;
+ goto exit;
+ }
+
+ r = st21nfca_tm_send_psl_res(hdev, psl_req);
+exit:
+ return r;
+}
+
+int st21nfca_tm_send_dep_res(struct nfc_hci_dev *hdev, struct sk_buff *skb)
+{
+ int r;
+ struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+ *skb_push(skb, 1) = info->dep_info.curr_nfc_dep_pni;
+ *skb_push(skb, 1) = ST21NFCA_NFCIP1_DEP_RES;
+ *skb_push(skb, 1) = ST21NFCA_NFCIP1_RES;
+ *skb_push(skb, 1) = skb->len;
+
+ r = nfc_hci_send_event(hdev, ST21NFCA_RF_CARD_F_GATE,
+ ST21NFCA_EVT_SEND_DATA, skb->data, skb->len);
+ kfree_skb(skb);
+
+ return r;
+}
+EXPORT_SYMBOL(st21nfca_tm_send_dep_res);
+
+static int st21nfca_tm_recv_dep_req(struct nfc_hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct st21nfca_dep_req_res *dep_req;
+ u8 size;
+ int r;
+ struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+ skb_trim(skb, skb->len - 1);
+ if (IS_ERR(skb)) {
+ r = PTR_ERR(skb);
+ skb = NULL;
+ goto exit;
+ }
+
+ size = 4;
+
+ dep_req = (struct st21nfca_dep_req_res *)skb->data;
+ if (skb->len < size) {
+ r = -EIO;
+ goto exit;
+ }
+
+ if (ST21NFCA_NFC_DEP_DID_BIT_SET(dep_req->pfb))
+ size++;
+ if (ST21NFCA_NFC_DEP_NAD_BIT_SET(dep_req->pfb))
+ size++;
+
+ if (skb->len < size) {
+ r = -EIO;
+ goto exit;
+ }
+
+ /* Receiving DEP_REQ - Decoding */
+ switch (ST21NFCA_NFC_DEP_PFB_TYPE(dep_req->pfb)) {
+ case ST21NFCA_NFC_DEP_PFB_I_PDU:
+ info->dep_info.curr_nfc_dep_pni =
+ ST21NFCA_NFC_DEP_PFB_PNI(dep_req->pfb);
+ break;
+ case ST21NFCA_NFC_DEP_PFB_ACK_NACK_PDU:
+ pr_err("Received a ACK/NACK PDU\n");
+ break;
+ case ST21NFCA_NFC_DEP_PFB_SUPERVISOR_PDU:
+ pr_err("Received a SUPERVISOR PDU\n");
+ break;
+ }
+
+ if (IS_ERR(skb)) {
+ r = PTR_ERR(skb);
+ skb = NULL;
+ goto exit;
+ }
+
+ skb_pull(skb, size);
+
+ return nfc_tm_data_received(hdev->ndev, skb);
+exit:
+ return r;
+}
+
+int st21nfca_tm_event_send_data(struct nfc_hci_dev *hdev, struct sk_buff *skb,
+ u8 gate)
+{
+ u8 cmd0, cmd1;
+ int r;
+
+ cmd0 = skb->data[1];
+ switch (cmd0) {
+ case ST21NFCA_NFCIP1_REQ:
+ cmd1 = skb->data[2];
+ switch (cmd1) {
+ case ST21NFCA_NFCIP1_ATR_REQ:
+ r = st21nfca_tm_recv_atr_req(hdev, skb);
+ break;
+ case ST21NFCA_NFCIP1_PSL_REQ:
+ r = st21nfca_tm_recv_psl_req(hdev, skb);
+ break;
+ case ST21NFCA_NFCIP1_DEP_REQ:
+ r = st21nfca_tm_recv_dep_req(hdev, skb);
+ break;
+ default:
+ return 1;
+ }
+ default:
+ return 1;
+ }
+ return r;
+}
+EXPORT_SYMBOL(st21nfca_tm_event_send_data);
+
+static void st21nfca_im_send_psl_req(struct nfc_hci_dev *hdev, u8 did, u8 bsi,
+ u8 bri, u8 lri)
+{
+ struct sk_buff *skb;
+ struct st21nfca_psl_req *psl_req;
+ struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+ skb =
+ alloc_skb(sizeof(struct st21nfca_psl_req) + 1, GFP_KERNEL);
+ if (!skb)
+ return;
+ skb_reserve(skb, 1);
+
+ skb_put(skb, sizeof(struct st21nfca_psl_req));
+ psl_req = (struct st21nfca_psl_req *) skb->data;
+
+ psl_req->length = sizeof(struct st21nfca_psl_req);
+ psl_req->cmd0 = ST21NFCA_NFCIP1_REQ;
+ psl_req->cmd1 = ST21NFCA_NFCIP1_PSL_REQ;
+ psl_req->did = did;
+ psl_req->brs = (0x30 & bsi << 4) | (bri & 0x03);
+ psl_req->fsl = lri;
+
+ *skb_push(skb, 1) = info->dep_info.to | 0x10;
+
+ st21nfca_im_send_pdu(info, skb);
+
+ kfree_skb(skb);
+}
+
+#define ST21NFCA_CB_TYPE_READER_F 1
+static void st21nfca_im_recv_atr_res_cb(void *context, struct sk_buff *skb,
+ int err)
+{
+ struct st21nfca_hci_info *info = context;
+ struct st21nfca_atr_res *atr_res;
+ int r;
+
+ if (err != 0)
+ return;
+
+ if (IS_ERR(skb))
+ return;
+
+ switch (info->async_cb_type) {
+ case ST21NFCA_CB_TYPE_READER_F:
+ skb_trim(skb, skb->len - 1);
+ atr_res = (struct st21nfca_atr_res *)skb->data;
+ r = nfc_set_remote_general_bytes(info->hdev->ndev,
+ atr_res->gbi,
+ skb->len - sizeof(struct st21nfca_atr_res));
+ if (r < 0)
+ return;
+
+ if (atr_res->to >= 0x0e)
+ info->dep_info.to = 0x0e;
+ else
+ info->dep_info.to = atr_res->to + 1;
+
+ info->dep_info.to |= 0x10;
+
+ r = nfc_dep_link_is_up(info->hdev->ndev, info->dep_info.idx,
+ NFC_COMM_PASSIVE, NFC_RF_INITIATOR);
+ if (r < 0)
+ return;
+
+ info->dep_info.curr_nfc_dep_pni = 0;
+ if (ST21NFCA_PP2LRI(atr_res->ppi) != info->dep_info.lri)
+ st21nfca_im_send_psl_req(info->hdev, atr_res->did,
+ atr_res->bsi, atr_res->bri,
+ ST21NFCA_PP2LRI(atr_res->ppi));
+ break;
+ default:
+ if (err == 0)
+ kfree_skb(skb);
+ break;
+ }
+}
+
+int st21nfca_im_send_atr_req(struct nfc_hci_dev *hdev, u8 *gb, size_t gb_len)
+{
+ struct sk_buff *skb;
+ struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+ struct st21nfca_atr_req *atr_req;
+ struct nfc_target *target;
+ uint size;
+
+ info->dep_info.to = ST21NFCA_DEFAULT_TIMEOUT;
+ size = ST21NFCA_ATR_REQ_MIN_SIZE + gb_len;
+ if (size > ST21NFCA_ATR_REQ_MAX_SIZE) {
+ PROTOCOL_ERR("14.6.1.1");
+ return -EINVAL;
+ }
+
+ skb =
+ alloc_skb(sizeof(struct st21nfca_atr_req) + gb_len + 1, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_reserve(skb, 1);
+
+ skb_put(skb, sizeof(struct st21nfca_atr_req));
+
+ atr_req = (struct st21nfca_atr_req *)skb->data;
+ memset(atr_req, 0, sizeof(struct st21nfca_atr_req));
+
+ atr_req->cmd0 = ST21NFCA_NFCIP1_REQ;
+ atr_req->cmd1 = ST21NFCA_NFCIP1_ATR_REQ;
+ memset(atr_req->nfcid3, 0, NFC_NFCID3_MAXSIZE);
+ target = hdev->ndev->targets;
+
+ if (target->sensf_res)
+ memcpy(atr_req->nfcid3, target->sensf_res,
+ target->sensf_res_len);
+ else
+ get_random_bytes(atr_req->nfcid3, NFC_NFCID3_MAXSIZE);
+
+ atr_req->did = 0x0;
+
+ atr_req->bsi = 0x00;
+ atr_req->bri = 0x00;
+ atr_req->ppi = ST21NFCA_LR_BITS_PAYLOAD_SIZE_254B;
+ if (gb_len) {
+ atr_req->ppi |= ST21NFCA_GB_BIT;
+ memcpy(skb_put(skb, gb_len), gb, gb_len);
+ }
+ atr_req->length = sizeof(struct st21nfca_atr_req) + hdev->gb_len;
+
+ *skb_push(skb, 1) = info->dep_info.to | 0x10; /* timeout */
+
+ info->async_cb_type = ST21NFCA_CB_TYPE_READER_F;
+ info->async_cb_context = info;
+ info->async_cb = st21nfca_im_recv_atr_res_cb;
+ info->dep_info.bri = atr_req->bri;
+ info->dep_info.bsi = atr_req->bsi;
+ info->dep_info.lri = ST21NFCA_PP2LRI(atr_req->ppi);
+
+ return nfc_hci_send_cmd_async(hdev, ST21NFCA_RF_READER_F_GATE,
+ ST21NFCA_WR_XCHG_DATA, skb->data,
+ skb->len, info->async_cb, info);
+}
+EXPORT_SYMBOL(st21nfca_im_send_atr_req);
+
+static void st21nfca_im_recv_dep_res_cb(void *context, struct sk_buff *skb,
+ int err)
+{
+ struct st21nfca_hci_info *info = context;
+ struct st21nfca_dep_req_res *dep_res;
+
+ int size;
+
+ if (err != 0)
+ return;
+
+ if (IS_ERR(skb))
+ return;
+
+ switch (info->async_cb_type) {
+ case ST21NFCA_CB_TYPE_READER_F:
+ dep_res = (struct st21nfca_dep_req_res *)skb->data;
+
+ size = 3;
+ if (skb->len < size)
+ goto exit;
+
+ if (ST21NFCA_NFC_DEP_DID_BIT_SET(dep_res->pfb))
+ size++;
+ if (ST21NFCA_NFC_DEP_NAD_BIT_SET(dep_res->pfb))
+ size++;
+
+ if (skb->len < size)
+ goto exit;
+
+ skb_trim(skb, skb->len - 1);
+
+ /* Receiving DEP_REQ - Decoding */
+ switch (ST21NFCA_NFC_DEP_PFB_TYPE(dep_res->pfb)) {
+ case ST21NFCA_NFC_DEP_PFB_ACK_NACK_PDU:
+ pr_err("Received a ACK/NACK PDU\n");
+ case ST21NFCA_NFC_DEP_PFB_I_PDU:
+ info->dep_info.curr_nfc_dep_pni =
+ ST21NFCA_NFC_DEP_PFB_PNI(dep_res->pfb + 1);
+ size++;
+ skb_pull(skb, size);
+ nfc_tm_data_received(info->hdev->ndev, skb);
+ break;
+ case ST21NFCA_NFC_DEP_PFB_SUPERVISOR_PDU:
+ pr_err("Received a SUPERVISOR PDU\n");
+ skb_pull(skb, size);
+ *skb_push(skb, 1) = ST21NFCA_NFCIP1_DEP_REQ;
+ *skb_push(skb, 1) = ST21NFCA_NFCIP1_REQ;
+ *skb_push(skb, 1) = skb->len;
+ *skb_push(skb, 1) = info->dep_info.to | 0x10;
+
+ st21nfca_im_send_pdu(info, skb);
+ break;
+ }
+
+ return;
+ default:
+ break;
+ }
+
+exit:
+ if (err == 0)
+ kfree_skb(skb);
+}
+
+int st21nfca_im_send_dep_req(struct nfc_hci_dev *hdev, struct sk_buff *skb)
+{
+ struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+ info->async_cb_type = ST21NFCA_CB_TYPE_READER_F;
+ info->async_cb_context = info;
+ info->async_cb = st21nfca_im_recv_dep_res_cb;
+
+ *skb_push(skb, 1) = info->dep_info.curr_nfc_dep_pni;
+ *skb_push(skb, 1) = ST21NFCA_NFCIP1_DEP_REQ;
+ *skb_push(skb, 1) = ST21NFCA_NFCIP1_REQ;
+ *skb_push(skb, 1) = skb->len;
+
+ *skb_push(skb, 1) = info->dep_info.to | 0x10;
+
+ return nfc_hci_send_cmd_async(hdev, ST21NFCA_RF_READER_F_GATE,
+ ST21NFCA_WR_XCHG_DATA,
+ skb->data, skb->len,
+ info->async_cb, info);
+}
+EXPORT_SYMBOL(st21nfca_im_send_dep_req);
+
+void st21nfca_dep_init(struct nfc_hci_dev *hdev)
+{
+ struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+ INIT_WORK(&info->dep_info.tx_work, st21nfca_tx_work);
+ info->dep_info.curr_nfc_dep_pni = 0;
+ info->dep_info.idx = 0;
+ info->dep_info.to = ST21NFCA_DEFAULT_TIMEOUT;
+}
+EXPORT_SYMBOL(st21nfca_dep_init);
+
+void st21nfca_dep_deinit(struct nfc_hci_dev *hdev)
+{
+ struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+ cancel_work_sync(&info->dep_info.tx_work);
+}
+EXPORT_SYMBOL(st21nfca_dep_deinit);
diff --git a/drivers/nfc/st21nfca/st21nfca_dep.h b/drivers/nfc/st21nfca/st21nfca_dep.h
new file mode 100644
index 000000000000..ca213dee9c6e
--- /dev/null
+++ b/drivers/nfc/st21nfca/st21nfca_dep.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ST21NFCA_DEP_H
+#define __ST21NFCA_DEP_H
+
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+
+struct st21nfca_dep_info {
+ struct sk_buff *tx_pending;
+ struct work_struct tx_work;
+ u8 curr_nfc_dep_pni;
+ u32 idx;
+ u8 to;
+ u8 did;
+ u8 bsi;
+ u8 bri;
+ u8 lri;
+} __packed;
+
+int st21nfca_tm_event_send_data(struct nfc_hci_dev *hdev, struct sk_buff *skb,
+ u8 gate);
+int st21nfca_tm_send_dep_res(struct nfc_hci_dev *hdev, struct sk_buff *skb);
+
+int st21nfca_im_send_atr_req(struct nfc_hci_dev *hdev, u8 *gb, size_t gb_len);
+int st21nfca_im_send_dep_req(struct nfc_hci_dev *hdev, struct sk_buff *skb);
+void st21nfca_dep_init(struct nfc_hci_dev *hdev);
+void st21nfca_dep_deinit(struct nfc_hci_dev *hdev);
+#endif /* __ST21NFCA_DEP_H */
diff --git a/drivers/nfc/st21nfcb/Kconfig b/drivers/nfc/st21nfcb/Kconfig
new file mode 100644
index 000000000000..e0322dd03a70
--- /dev/null
+++ b/drivers/nfc/st21nfcb/Kconfig
@@ -0,0 +1,22 @@
+config NFC_ST21NFCB
+ tristate "STMicroelectronics ST21NFCB NFC driver"
+ depends on NFC_NCI
+ default n
+ ---help---
+ STMicroelectronics ST21NFCB core driver. It implements the chipset
+ NCI logic and hooks into the NFC kernel APIs. Physical layers will
+ register against it.
+
+ To compile this driver as a module, choose m here. The module will
+ be called st21nfcb.
+ Say N if unsure.
+
+config NFC_ST21NFCB_I2C
+ tristate "NFC ST21NFCB i2c support"
+ depends on NFC_ST21NFCB && I2C
+ ---help---
+ This module adds support for the STMicroelectronics st21nfcb i2c interface.
+ Select this if your platform is using the i2c bus.
+
+ If you choose to build a module, it'll be called st21nfcb_i2c.
+ Say N if unsure.
diff --git a/drivers/nfc/st21nfcb/Makefile b/drivers/nfc/st21nfcb/Makefile
new file mode 100644
index 000000000000..13d9f03b2fea
--- /dev/null
+++ b/drivers/nfc/st21nfcb/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for ST21NFCB NCI based NFC driver
+#
+
+st21nfcb_i2c-objs = i2c.o
+
+obj-$(CONFIG_NFC_ST21NFCB) += st21nfcb.o ndlc.o
+obj-$(CONFIG_NFC_ST21NFCB_I2C) += st21nfcb_i2c.o
diff --git a/drivers/nfc/st21nfcb/i2c.c b/drivers/nfc/st21nfcb/i2c.c
new file mode 100644
index 000000000000..8af880ead5db
--- /dev/null
+++ b/drivers/nfc/st21nfcb/i2c.c
@@ -0,0 +1,462 @@
+/*
+ * I2C Link Layer for ST21NFCB NCI based Driver
+ * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/crc-ccitt.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/miscdevice.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/nfc.h>
+#include <linux/firmware.h>
+#include <linux/unaligned/access_ok.h>
+#include <linux/platform_data/st21nfcb.h>
+
+#include <net/nfc/nci.h>
+#include <net/nfc/llc.h>
+#include <net/nfc/nfc.h>
+
+#include "ndlc.h"
+
+#define DRIVER_DESC "NCI NFC driver for ST21NFCB"
+
+/* ndlc header */
+#define ST21NFCB_FRAME_HEADROOM 1
+#define ST21NFCB_FRAME_TAILROOM 0
+
+#define ST21NFCB_NCI_I2C_MIN_SIZE 4 /* PCB(1) + NCI Packet header(3) */
+#define ST21NFCB_NCI_I2C_MAX_SIZE 250 /* req 4.2.1 */
+
+#define ST21NFCB_NCI_I2C_DRIVER_NAME "st21nfcb_nci_i2c"
+
+static struct i2c_device_id st21nfcb_nci_i2c_id_table[] = {
+ {ST21NFCB_NCI_DRIVER_NAME, 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, st21nfcb_nci_i2c_id_table);
+
+struct st21nfcb_i2c_phy {
+ struct i2c_client *i2c_dev;
+ struct llt_ndlc *ndlc;
+
+ unsigned int gpio_irq;
+ unsigned int gpio_reset;
+ unsigned int irq_polarity;
+
+ int powered;
+
+ /*
+ * < 0 if hardware error occured (e.g. i2c err)
+ * and prevents normal operation.
+ */
+ int hard_fault;
+};
+
+#define I2C_DUMP_SKB(info, skb) \
+do { \
+ pr_debug("%s:\n", info); \
+ print_hex_dump(KERN_DEBUG, "i2c: ", DUMP_PREFIX_OFFSET, \
+ 16, 1, (skb)->data, (skb)->len, 0); \
+} while (0)
+
+static int st21nfcb_nci_i2c_enable(void *phy_id)
+{
+ struct st21nfcb_i2c_phy *phy = phy_id;
+
+ gpio_set_value(phy->gpio_reset, 0);
+ usleep_range(10000, 15000);
+ gpio_set_value(phy->gpio_reset, 1);
+ phy->powered = 1;
+ usleep_range(80000, 85000);
+
+ return 0;
+}
+
+static void st21nfcb_nci_i2c_disable(void *phy_id)
+{
+ struct st21nfcb_i2c_phy *phy = phy_id;
+
+ pr_info("\n");
+
+ phy->powered = 0;
+ /* reset chip in order to flush clf */
+ gpio_set_value(phy->gpio_reset, 0);
+ usleep_range(10000, 15000);
+ gpio_set_value(phy->gpio_reset, 1);
+}
+
+static void st21nfcb_nci_remove_header(struct sk_buff *skb)
+{
+ skb_pull(skb, ST21NFCB_FRAME_HEADROOM);
+}
+
+/*
+ * Writing a frame must not return the number of written bytes.
+ * It must return either zero for success, or <0 for error.
+ * In addition, it must not alter the skb
+ */
+static int st21nfcb_nci_i2c_write(void *phy_id, struct sk_buff *skb)
+{
+ int r = -1;
+ struct st21nfcb_i2c_phy *phy = phy_id;
+ struct i2c_client *client = phy->i2c_dev;
+
+ I2C_DUMP_SKB("st21nfcb_nci_i2c_write", skb);
+
+ if (phy->hard_fault != 0)
+ return phy->hard_fault;
+
+ r = i2c_master_send(client, skb->data, skb->len);
+ if (r == -EREMOTEIO) { /* Retry, chip was in standby */
+ usleep_range(1000, 4000);
+ r = i2c_master_send(client, skb->data, skb->len);
+ }
+
+ if (r >= 0) {
+ if (r != skb->len)
+ r = -EREMOTEIO;
+ else
+ r = 0;
+ }
+
+ st21nfcb_nci_remove_header(skb);
+
+ return r;
+}
+
+/*
+ * Reads an ndlc frame and returns it in a newly allocated sk_buff.
+ * returns:
+ * frame size : if received frame is complete (find ST21NFCB_SOF_EOF at
+ * end of read)
+ * -EAGAIN : if received frame is incomplete (not find ST21NFCB_SOF_EOF
+ * at end of read)
+ * -EREMOTEIO : i2c read error (fatal)
+ * -EBADMSG : frame was incorrect and discarded
+ * (value returned from st21nfcb_nci_i2c_repack)
+ * -EIO : if no ST21NFCB_SOF_EOF is found after reaching
+ * the read length end sequence
+ */
+static int st21nfcb_nci_i2c_read(struct st21nfcb_i2c_phy *phy,
+ struct sk_buff **skb)
+{
+ int r;
+ u8 len;
+ u8 buf[ST21NFCB_NCI_I2C_MAX_SIZE];
+ struct i2c_client *client = phy->i2c_dev;
+
+ r = i2c_master_recv(client, buf, ST21NFCB_NCI_I2C_MIN_SIZE);
+ if (r == -EREMOTEIO) { /* Retry, chip was in standby */
+ usleep_range(1000, 4000);
+ r = i2c_master_recv(client, buf, ST21NFCB_NCI_I2C_MIN_SIZE);
+ } else if (r != ST21NFCB_NCI_I2C_MIN_SIZE) {
+ nfc_err(&client->dev, "cannot read ndlc & nci header\n");
+ return -EREMOTEIO;
+ }
+
+ len = be16_to_cpu(*(__be16 *) (buf + 2));
+ if (len > ST21NFCB_NCI_I2C_MAX_SIZE) {
+ nfc_err(&client->dev, "invalid frame len\n");
+ return -EBADMSG;
+ }
+
+ *skb = alloc_skb(ST21NFCB_NCI_I2C_MIN_SIZE + len, GFP_KERNEL);
+ if (*skb == NULL)
+ return -ENOMEM;
+
+ skb_reserve(*skb, ST21NFCB_NCI_I2C_MIN_SIZE);
+ skb_put(*skb, ST21NFCB_NCI_I2C_MIN_SIZE);
+ memcpy((*skb)->data, buf, ST21NFCB_NCI_I2C_MIN_SIZE);
+
+ if (!len)
+ return 0;
+
+ r = i2c_master_recv(client, buf, len);
+ if (r != len) {
+ kfree_skb(*skb);
+ return -EREMOTEIO;
+ }
+
+ skb_put(*skb, len);
+ memcpy((*skb)->data + ST21NFCB_NCI_I2C_MIN_SIZE, buf, len);
+
+ I2C_DUMP_SKB("i2c frame read", *skb);
+
+ return 0;
+}
+
+/*
+ * Reads an ndlc frame from the chip.
+ *
+ * On ST21NFCB, IRQ goes in idle state when read starts.
+ */
+static irqreturn_t st21nfcb_nci_irq_thread_fn(int irq, void *phy_id)
+{
+ struct st21nfcb_i2c_phy *phy = phy_id;
+ struct i2c_client *client;
+ struct sk_buff *skb = NULL;
+ int r;
+
+ if (!phy || irq != phy->i2c_dev->irq) {
+ WARN_ON_ONCE(1);
+ return IRQ_NONE;
+ }
+
+ client = phy->i2c_dev;
+ dev_dbg(&client->dev, "IRQ\n");
+
+ if (phy->hard_fault)
+ return IRQ_HANDLED;
+
+ if (!phy->powered) {
+ st21nfcb_nci_i2c_disable(phy);
+ return IRQ_HANDLED;
+ }
+
+ r = st21nfcb_nci_i2c_read(phy, &skb);
+ if (r == -EREMOTEIO) {
+ phy->hard_fault = r;
+ ndlc_recv(phy->ndlc, NULL);
+ return IRQ_HANDLED;
+ } else if (r == -ENOMEM || r == -EBADMSG) {
+ return IRQ_HANDLED;
+ }
+
+ ndlc_recv(phy->ndlc, skb);
+
+ return IRQ_HANDLED;
+}
+
+static struct nfc_phy_ops i2c_phy_ops = {
+ .write = st21nfcb_nci_i2c_write,
+ .enable = st21nfcb_nci_i2c_enable,
+ .disable = st21nfcb_nci_i2c_disable,
+};
+
+#ifdef CONFIG_OF
+static int st21nfcb_nci_i2c_of_request_resources(struct i2c_client *client)
+{
+ struct st21nfcb_i2c_phy *phy = i2c_get_clientdata(client);
+ struct device_node *pp;
+ int gpio;
+ int r;
+
+ pp = client->dev.of_node;
+ if (!pp)
+ return -ENODEV;
+
+ /* Get GPIO from device tree */
+ gpio = of_get_named_gpio(pp, "reset-gpios", 0);
+ if (gpio < 0) {
+ nfc_err(&client->dev,
+ "Failed to retrieve reset-gpios from device tree\n");
+ return gpio;
+ }
+
+ /* GPIO request and configuration */
+ r = devm_gpio_request(&client->dev, gpio, "clf_reset");
+ if (r) {
+ nfc_err(&client->dev, "Failed to request reset pin\n");
+ return -ENODEV;
+ }
+
+ r = gpio_direction_output(gpio, 1);
+ if (r) {
+ nfc_err(&client->dev,
+ "Failed to set reset pin direction as output\n");
+ return -ENODEV;
+ }
+ phy->gpio_reset = gpio;
+
+ /* IRQ */
+ r = irq_of_parse_and_map(pp, 0);
+ if (r < 0) {
+ nfc_err(&client->dev,
+ "Unable to get irq, error: %d\n", r);
+ return r;
+ }
+
+ phy->irq_polarity = irq_get_trigger_type(r);
+ client->irq = r;
+
+ return 0;
+}
+#else
+static int st21nfcb_nci_i2c_of_request_resources(struct i2c_client *client)
+{
+ return -ENODEV;
+}
+#endif
+
+static int st21nfcb_nci_i2c_request_resources(struct i2c_client *client)
+{
+ struct st21nfcb_nfc_platform_data *pdata;
+ struct st21nfcb_i2c_phy *phy = i2c_get_clientdata(client);
+ int r;
+ int irq;
+
+ pdata = client->dev.platform_data;
+ if (pdata == NULL) {
+ nfc_err(&client->dev, "No platform data\n");
+ return -EINVAL;
+ }
+
+ /* store for later use */
+ phy->gpio_irq = pdata->gpio_irq;
+ phy->gpio_reset = pdata->gpio_reset;
+ phy->irq_polarity = pdata->irq_polarity;
+
+ r = devm_gpio_request(&client->dev, phy->gpio_irq, "wake_up");
+ if (r) {
+ pr_err("%s : gpio_request failed\n", __FILE__);
+ return -ENODEV;
+ }
+
+ r = gpio_direction_input(phy->gpio_irq);
+ if (r) {
+ pr_err("%s : gpio_direction_input failed\n", __FILE__);
+ return -ENODEV;
+ }
+
+ r = devm_gpio_request(&client->dev,
+ phy->gpio_reset, "clf_reset");
+ if (r) {
+ pr_err("%s : reset gpio_request failed\n", __FILE__);
+ return -ENODEV;
+ }
+
+ r = gpio_direction_output(phy->gpio_reset, 1);
+ if (r) {
+ pr_err("%s : reset gpio_direction_output failed\n",
+ __FILE__);
+ return -ENODEV;
+ }
+
+ /* IRQ */
+ irq = gpio_to_irq(phy->gpio_irq);
+ if (irq < 0) {
+ nfc_err(&client->dev,
+ "Unable to get irq number for GPIO %d error %d\n",
+ phy->gpio_irq, r);
+ return -ENODEV;
+ }
+ client->irq = irq;
+
+ return 0;
+}
+
+static int st21nfcb_nci_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct st21nfcb_i2c_phy *phy;
+ struct st21nfcb_nfc_platform_data *pdata;
+ int r;
+
+ dev_dbg(&client->dev, "%s\n", __func__);
+ dev_dbg(&client->dev, "IRQ: %d\n", client->irq);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ nfc_err(&client->dev, "Need I2C_FUNC_I2C\n");
+ return -ENODEV;
+ }
+
+ phy = devm_kzalloc(&client->dev, sizeof(struct st21nfcb_i2c_phy),
+ GFP_KERNEL);
+ if (!phy) {
+ nfc_err(&client->dev,
+ "Cannot allocate memory for st21nfcb i2c phy.\n");
+ return -ENOMEM;
+ }
+
+ phy->i2c_dev = client;
+
+ i2c_set_clientdata(client, phy);
+
+ pdata = client->dev.platform_data;
+ if (!pdata && client->dev.of_node) {
+ r = st21nfcb_nci_i2c_of_request_resources(client);
+ if (r) {
+ nfc_err(&client->dev, "No platform data\n");
+ return r;
+ }
+ } else if (pdata) {
+ r = st21nfcb_nci_i2c_request_resources(client);
+ if (r) {
+ nfc_err(&client->dev,
+ "Cannot get platform resources\n");
+ return r;
+ }
+ } else {
+ nfc_err(&client->dev,
+ "st21nfcb platform resources not available\n");
+ return -ENODEV;
+ }
+
+ r = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+ st21nfcb_nci_irq_thread_fn,
+ phy->irq_polarity | IRQF_ONESHOT,
+ ST21NFCB_NCI_DRIVER_NAME, phy);
+ if (r < 0) {
+ nfc_err(&client->dev, "Unable to register IRQ handler\n");
+ return r;
+ }
+
+ return ndlc_probe(phy, &i2c_phy_ops, &client->dev,
+ ST21NFCB_FRAME_HEADROOM, ST21NFCB_FRAME_TAILROOM,
+ &phy->ndlc);
+}
+
+static int st21nfcb_nci_i2c_remove(struct i2c_client *client)
+{
+ struct st21nfcb_i2c_phy *phy = i2c_get_clientdata(client);
+
+ dev_dbg(&client->dev, "%s\n", __func__);
+
+ ndlc_remove(phy->ndlc);
+
+ if (phy->powered)
+ st21nfcb_nci_i2c_disable(phy);
+
+ return 0;
+}
+
+static const struct of_device_id of_st21nfcb_i2c_match[] = {
+ { .compatible = "st,st21nfcb_i2c", },
+ {}
+};
+
+static struct i2c_driver st21nfcb_nci_i2c_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = ST21NFCB_NCI_I2C_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(of_st21nfcb_i2c_match),
+ },
+ .probe = st21nfcb_nci_i2c_probe,
+ .id_table = st21nfcb_nci_i2c_id_table,
+ .remove = st21nfcb_nci_i2c_remove,
+};
+
+module_i2c_driver(st21nfcb_nci_i2c_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/nfc/st21nfcb/ndlc.c b/drivers/nfc/st21nfcb/ndlc.c
new file mode 100644
index 000000000000..83c97c36112b
--- /dev/null
+++ b/drivers/nfc/st21nfcb/ndlc.c
@@ -0,0 +1,298 @@
+/*
+ * Low Level Transport (NDLC) Driver for STMicroelectronics NFC Chip
+ *
+ * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/sched.h>
+#include <net/nfc/nci_core.h>
+
+#include "ndlc.h"
+#include "st21nfcb.h"
+
+#define NDLC_TIMER_T1 100
+#define NDLC_TIMER_T1_WAIT 400
+#define NDLC_TIMER_T2 1200
+
+#define PCB_TYPE_DATAFRAME 0x80
+#define PCB_TYPE_SUPERVISOR 0xc0
+#define PCB_TYPE_MASK PCB_TYPE_SUPERVISOR
+
+#define PCB_SYNC_ACK 0x20
+#define PCB_SYNC_NACK 0x10
+#define PCB_SYNC_WAIT 0x30
+#define PCB_SYNC_NOINFO 0x00
+#define PCB_SYNC_MASK PCB_SYNC_WAIT
+
+#define PCB_DATAFRAME_RETRANSMIT_YES 0x00
+#define PCB_DATAFRAME_RETRANSMIT_NO 0x04
+#define PCB_DATAFRAME_RETRANSMIT_MASK PCB_DATAFRAME_RETRANSMIT_NO
+
+#define PCB_SUPERVISOR_RETRANSMIT_YES 0x00
+#define PCB_SUPERVISOR_RETRANSMIT_NO 0x02
+#define PCB_SUPERVISOR_RETRANSMIT_MASK PCB_SUPERVISOR_RETRANSMIT_NO
+
+#define PCB_FRAME_CRC_INFO_PRESENT 0x08
+#define PCB_FRAME_CRC_INFO_NOTPRESENT 0x00
+#define PCB_FRAME_CRC_INFO_MASK PCB_FRAME_CRC_INFO_PRESENT
+
+#define NDLC_DUMP_SKB(info, skb) \
+do { \
+ pr_debug("%s:\n", info); \
+ print_hex_dump(KERN_DEBUG, "ndlc: ", DUMP_PREFIX_OFFSET, \
+ 16, 1, skb->data, skb->len, 0); \
+} while (0)
+
+int ndlc_open(struct llt_ndlc *ndlc)
+{
+ /* toggle reset pin */
+ ndlc->ops->enable(ndlc->phy_id);
+ return 0;
+}
+EXPORT_SYMBOL(ndlc_open);
+
+void ndlc_close(struct llt_ndlc *ndlc)
+{
+ /* toggle reset pin */
+ ndlc->ops->disable(ndlc->phy_id);
+}
+EXPORT_SYMBOL(ndlc_close);
+
+int ndlc_send(struct llt_ndlc *ndlc, struct sk_buff *skb)
+{
+ /* add ndlc header */
+ u8 pcb = PCB_TYPE_DATAFRAME | PCB_DATAFRAME_RETRANSMIT_NO |
+ PCB_FRAME_CRC_INFO_NOTPRESENT;
+
+ *skb_push(skb, 1) = pcb;
+ skb_queue_tail(&ndlc->send_q, skb);
+
+ schedule_work(&ndlc->sm_work);
+
+ return 0;
+}
+EXPORT_SYMBOL(ndlc_send);
+
+static void llt_ndlc_send_queue(struct llt_ndlc *ndlc)
+{
+ struct sk_buff *skb;
+ int r;
+ unsigned long time_sent;
+
+ if (ndlc->send_q.qlen)
+ pr_debug("sendQlen=%d unackQlen=%d\n",
+ ndlc->send_q.qlen, ndlc->ack_pending_q.qlen);
+
+ while (ndlc->send_q.qlen) {
+ skb = skb_dequeue(&ndlc->send_q);
+ NDLC_DUMP_SKB("ndlc frame written", skb);
+ r = ndlc->ops->write(ndlc->phy_id, skb);
+ if (r < 0) {
+ ndlc->hard_fault = r;
+ break;
+ }
+ time_sent = jiffies;
+ *(unsigned long *)skb->cb = time_sent;
+
+ skb_queue_tail(&ndlc->ack_pending_q, skb);
+
+ /* start timer t1 for ndlc aknowledge */
+ ndlc->t1_active = true;
+ mod_timer(&ndlc->t1_timer, time_sent +
+ msecs_to_jiffies(NDLC_TIMER_T1));
+ }
+}
+
+static void llt_ndlc_requeue_data_pending(struct llt_ndlc *ndlc)
+{
+ struct sk_buff *skb;
+ u8 pcb;
+
+ while ((skb = skb_dequeue_tail(&ndlc->ack_pending_q))) {
+ pcb = skb->data[0];
+ switch (pcb & PCB_TYPE_MASK) {
+ case PCB_TYPE_SUPERVISOR:
+ skb->data[0] = (pcb & ~PCB_SUPERVISOR_RETRANSMIT_MASK) |
+ PCB_SUPERVISOR_RETRANSMIT_YES;
+ break;
+ case PCB_TYPE_DATAFRAME:
+ skb->data[0] = (pcb & ~PCB_DATAFRAME_RETRANSMIT_MASK) |
+ PCB_DATAFRAME_RETRANSMIT_YES;
+ break;
+ default:
+ pr_err("UNKNOWN Packet Control Byte=%d\n", pcb);
+ kfree_skb(skb);
+ break;
+ }
+ skb_queue_head(&ndlc->send_q, skb);
+ }
+}
+
+static void llt_ndlc_rcv_queue(struct llt_ndlc *ndlc)
+{
+ struct sk_buff *skb;
+ u8 pcb;
+ unsigned long time_sent;
+
+ if (ndlc->rcv_q.qlen)
+ pr_debug("rcvQlen=%d\n", ndlc->rcv_q.qlen);
+
+ while ((skb = skb_dequeue(&ndlc->rcv_q)) != NULL) {
+ pcb = skb->data[0];
+ skb_pull(skb, 1);
+ if ((pcb & PCB_TYPE_MASK) == PCB_TYPE_SUPERVISOR) {
+ switch (pcb & PCB_SYNC_MASK) {
+ case PCB_SYNC_ACK:
+ del_timer_sync(&ndlc->t1_timer);
+ del_timer_sync(&ndlc->t2_timer);
+ ndlc->t2_active = false;
+ ndlc->t1_active = false;
+ break;
+ case PCB_SYNC_NACK:
+ llt_ndlc_requeue_data_pending(ndlc);
+ llt_ndlc_send_queue(ndlc);
+ /* start timer t1 for ndlc aknowledge */
+ time_sent = jiffies;
+ ndlc->t1_active = true;
+ mod_timer(&ndlc->t1_timer, time_sent +
+ msecs_to_jiffies(NDLC_TIMER_T1));
+ break;
+ case PCB_SYNC_WAIT:
+ time_sent = jiffies;
+ ndlc->t1_active = true;
+ mod_timer(&ndlc->t1_timer, time_sent +
+ msecs_to_jiffies(NDLC_TIMER_T1_WAIT));
+ break;
+ default:
+ pr_err("UNKNOWN Packet Control Byte=%d\n", pcb);
+ kfree_skb(skb);
+ break;
+ }
+ } else {
+ nci_recv_frame(ndlc->ndev, skb);
+ }
+ }
+}
+
+static void llt_ndlc_sm_work(struct work_struct *work)
+{
+ struct llt_ndlc *ndlc = container_of(work, struct llt_ndlc, sm_work);
+
+ llt_ndlc_send_queue(ndlc);
+ llt_ndlc_rcv_queue(ndlc);
+
+ if (ndlc->t1_active && timer_pending(&ndlc->t1_timer) == 0) {
+ pr_debug
+ ("Handle T1(recv SUPERVISOR) elapsed (T1 now inactive)\n");
+ ndlc->t1_active = false;
+
+ llt_ndlc_requeue_data_pending(ndlc);
+ llt_ndlc_send_queue(ndlc);
+ }
+
+ if (ndlc->t2_active && timer_pending(&ndlc->t2_timer) == 0) {
+ pr_debug("Handle T2(recv DATA) elapsed (T2 now inactive)\n");
+ ndlc->t2_active = false;
+ ndlc->t1_active = false;
+ del_timer_sync(&ndlc->t1_timer);
+
+ ndlc_close(ndlc);
+ ndlc->hard_fault = -EREMOTEIO;
+ }
+}
+
+void ndlc_recv(struct llt_ndlc *ndlc, struct sk_buff *skb)
+{
+ if (skb == NULL) {
+ pr_err("NULL Frame -> link is dead\n");
+ ndlc->hard_fault = -EREMOTEIO;
+ ndlc_close(ndlc);
+ } else {
+ NDLC_DUMP_SKB("incoming frame", skb);
+ skb_queue_tail(&ndlc->rcv_q, skb);
+ }
+
+ schedule_work(&ndlc->sm_work);
+}
+EXPORT_SYMBOL(ndlc_recv);
+
+static void ndlc_t1_timeout(unsigned long data)
+{
+ struct llt_ndlc *ndlc = (struct llt_ndlc *)data;
+
+ pr_debug("\n");
+
+ schedule_work(&ndlc->sm_work);
+}
+
+static void ndlc_t2_timeout(unsigned long data)
+{
+ struct llt_ndlc *ndlc = (struct llt_ndlc *)data;
+
+ pr_debug("\n");
+
+ schedule_work(&ndlc->sm_work);
+}
+
+int ndlc_probe(void *phy_id, struct nfc_phy_ops *phy_ops, struct device *dev,
+ int phy_headroom, int phy_tailroom, struct llt_ndlc **ndlc_id)
+{
+ struct llt_ndlc *ndlc;
+
+ ndlc = devm_kzalloc(dev, sizeof(struct llt_ndlc), GFP_KERNEL);
+ if (!ndlc) {
+ nfc_err(dev, "Cannot allocate memory for ndlc.\n");
+ return -ENOMEM;
+ }
+ ndlc->ops = phy_ops;
+ ndlc->phy_id = phy_id;
+ ndlc->dev = dev;
+
+ *ndlc_id = ndlc;
+
+ /* start timers */
+ init_timer(&ndlc->t1_timer);
+ ndlc->t1_timer.data = (unsigned long)ndlc;
+ ndlc->t1_timer.function = ndlc_t1_timeout;
+
+ init_timer(&ndlc->t2_timer);
+ ndlc->t2_timer.data = (unsigned long)ndlc;
+ ndlc->t2_timer.function = ndlc_t2_timeout;
+
+ skb_queue_head_init(&ndlc->rcv_q);
+ skb_queue_head_init(&ndlc->send_q);
+ skb_queue_head_init(&ndlc->ack_pending_q);
+
+ INIT_WORK(&ndlc->sm_work, llt_ndlc_sm_work);
+
+ return st21nfcb_nci_probe(ndlc, phy_headroom, phy_tailroom);
+}
+EXPORT_SYMBOL(ndlc_probe);
+
+void ndlc_remove(struct llt_ndlc *ndlc)
+{
+ /* cancel timers */
+ del_timer_sync(&ndlc->t1_timer);
+ del_timer_sync(&ndlc->t2_timer);
+ ndlc->t2_active = false;
+ ndlc->t1_active = false;
+
+ skb_queue_purge(&ndlc->rcv_q);
+ skb_queue_purge(&ndlc->send_q);
+
+ st21nfcb_nci_remove(ndlc->ndev);
+ kfree(ndlc);
+}
+EXPORT_SYMBOL(ndlc_remove);
diff --git a/drivers/nfc/st21nfcb/ndlc.h b/drivers/nfc/st21nfcb/ndlc.h
new file mode 100644
index 000000000000..c30a2f0faa5f
--- /dev/null
+++ b/drivers/nfc/st21nfcb/ndlc.h
@@ -0,0 +1,55 @@
+/*
+ * NCI based Driver for STMicroelectronics NFC Chip
+ *
+ * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __LOCAL_NDLC_H_
+#define __LOCAL_NDLC_H_
+
+#include <linux/skbuff.h>
+#include <net/nfc/nfc.h>
+
+/* Low Level Transport description */
+struct llt_ndlc {
+ struct nci_dev *ndev;
+ struct nfc_phy_ops *ops;
+ void *phy_id;
+
+ struct timer_list t1_timer;
+ bool t1_active;
+
+ struct timer_list t2_timer;
+ bool t2_active;
+
+ struct sk_buff_head rcv_q;
+ struct sk_buff_head send_q;
+ struct sk_buff_head ack_pending_q;
+
+ struct work_struct sm_work;
+
+ struct device *dev;
+
+ int hard_fault;
+};
+
+int ndlc_open(struct llt_ndlc *ndlc);
+void ndlc_close(struct llt_ndlc *ndlc);
+int ndlc_send(struct llt_ndlc *ndlc, struct sk_buff *skb);
+void ndlc_recv(struct llt_ndlc *ndlc, struct sk_buff *skb);
+int ndlc_probe(void *phy_id, struct nfc_phy_ops *phy_ops, struct device *dev,
+ int phy_headroom, int phy_tailroom, struct llt_ndlc **ndlc_id);
+void ndlc_remove(struct llt_ndlc *ndlc);
+#endif /* __LOCAL_NDLC_H__ */
diff --git a/drivers/nfc/st21nfcb/st21nfcb.c b/drivers/nfc/st21nfcb/st21nfcb.c
new file mode 100644
index 000000000000..4d95863e3063
--- /dev/null
+++ b/drivers/nfc/st21nfcb/st21nfcb.c
@@ -0,0 +1,129 @@
+/*
+ * NCI based Driver for STMicroelectronics NFC Chip
+ *
+ * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/nfc.h>
+#include <net/nfc/nci.h>
+#include <net/nfc/nci_core.h>
+
+#include "st21nfcb.h"
+#include "ndlc.h"
+
+#define DRIVER_DESC "NCI NFC driver for ST21NFCB"
+
+static int st21nfcb_nci_open(struct nci_dev *ndev)
+{
+ struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
+ int r;
+
+ if (test_and_set_bit(ST21NFCB_NCI_RUNNING, &info->flags))
+ return 0;
+
+ r = ndlc_open(info->ndlc);
+ if (r)
+ clear_bit(ST21NFCB_NCI_RUNNING, &info->flags);
+
+ return r;
+}
+
+static int st21nfcb_nci_close(struct nci_dev *ndev)
+{
+ struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
+
+ if (!test_and_clear_bit(ST21NFCB_NCI_RUNNING, &info->flags))
+ return 0;
+
+ ndlc_close(info->ndlc);
+
+ return 0;
+}
+
+static int st21nfcb_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
+{
+ struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
+
+ skb->dev = (void *)ndev;
+
+ if (!test_bit(ST21NFCB_NCI_RUNNING, &info->flags))
+ return -EBUSY;
+
+ return ndlc_send(info->ndlc, skb);
+}
+
+static struct nci_ops st21nfcb_nci_ops = {
+ .open = st21nfcb_nci_open,
+ .close = st21nfcb_nci_close,
+ .send = st21nfcb_nci_send,
+};
+
+int st21nfcb_nci_probe(struct llt_ndlc *ndlc, int phy_headroom,
+ int phy_tailroom)
+{
+ struct st21nfcb_nci_info *info;
+ int r;
+ u32 protocols;
+
+ info = devm_kzalloc(ndlc->dev,
+ sizeof(struct st21nfcb_nci_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ protocols = NFC_PROTO_JEWEL_MASK
+ | NFC_PROTO_MIFARE_MASK
+ | NFC_PROTO_FELICA_MASK
+ | NFC_PROTO_ISO14443_MASK
+ | NFC_PROTO_ISO14443_B_MASK
+ | NFC_PROTO_NFC_DEP_MASK;
+
+ ndlc->ndev = nci_allocate_device(&st21nfcb_nci_ops, protocols,
+ phy_headroom, phy_tailroom);
+ if (!ndlc->ndev) {
+ pr_err("Cannot allocate nfc ndev\n");
+ r = -ENOMEM;
+ goto err_alloc_ndev;
+ }
+ info->ndlc = ndlc;
+
+ nci_set_drvdata(ndlc->ndev, info);
+
+ r = nci_register_device(ndlc->ndev);
+ if (r)
+ goto err_regdev;
+
+ return r;
+err_regdev:
+ nci_free_device(ndlc->ndev);
+
+err_alloc_ndev:
+ kfree(info);
+ return r;
+}
+EXPORT_SYMBOL_GPL(st21nfcb_nci_probe);
+
+void st21nfcb_nci_remove(struct nci_dev *ndev)
+{
+ struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
+
+ nci_unregister_device(ndev);
+ nci_free_device(ndev);
+ kfree(info);
+}
+EXPORT_SYMBOL_GPL(st21nfcb_nci_remove);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/nfc/st21nfcb/st21nfcb.h b/drivers/nfc/st21nfcb/st21nfcb.h
new file mode 100644
index 000000000000..4bbbebb9f34d
--- /dev/null
+++ b/drivers/nfc/st21nfcb/st21nfcb.h
@@ -0,0 +1,38 @@
+/*
+ * NCI based Driver for STMicroelectronics NFC Chip
+ *
+ * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __LOCAL_ST21NFCB_H_
+#define __LOCAL_ST21NFCB_H_
+
+#include <net/nfc/nci_core.h>
+
+#include "ndlc.h"
+
+/* Define private flags: */
+#define ST21NFCB_NCI_RUNNING 1
+
+struct st21nfcb_nci_info {
+ struct llt_ndlc *ndlc;
+ unsigned long flags;
+};
+
+void st21nfcb_nci_remove(struct nci_dev *ndev);
+int st21nfcb_nci_probe(struct llt_ndlc *ndlc, int phy_headroom,
+ int phy_tailroom);
+
+#endif /* __LOCAL_ST21NFCB_H_ */
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 969af0f2bdf9..0272e49135d0 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -73,17 +73,17 @@ struct bcma_host_ops {
/* Core-ID values. */
#define BCMA_CORE_OOB_ROUTER 0x367 /* Out of band */
#define BCMA_CORE_4706_CHIPCOMMON 0x500
-#define BCMA_CORE_PCIEG2 0x501
-#define BCMA_CORE_DMA 0x502
-#define BCMA_CORE_SDIO3 0x503
-#define BCMA_CORE_USB20 0x504
-#define BCMA_CORE_USB30 0x505
-#define BCMA_CORE_A9JTAG 0x506
-#define BCMA_CORE_DDR23 0x507
-#define BCMA_CORE_ROM 0x508
-#define BCMA_CORE_NAND 0x509
-#define BCMA_CORE_QSPI 0x50A
-#define BCMA_CORE_CHIPCOMMON_B 0x50B
+#define BCMA_CORE_NS_PCIEG2 0x501
+#define BCMA_CORE_NS_DMA 0x502
+#define BCMA_CORE_NS_SDIO3 0x503
+#define BCMA_CORE_NS_USB20 0x504
+#define BCMA_CORE_NS_USB30 0x505
+#define BCMA_CORE_NS_A9JTAG 0x506
+#define BCMA_CORE_NS_DDR23 0x507
+#define BCMA_CORE_NS_ROM 0x508
+#define BCMA_CORE_NS_NAND 0x509
+#define BCMA_CORE_NS_QSPI 0x50A
+#define BCMA_CORE_NS_CHIPCOMMON_B 0x50B
#define BCMA_CORE_4706_SOC_RAM 0x50E
#define BCMA_CORE_ARMCA9 0x510
#define BCMA_CORE_4706_MAC_GBIT 0x52D
@@ -158,6 +158,7 @@ struct bcma_host_ops {
/* Chip IDs of PCIe devices */
#define BCMA_CHIP_ID_BCM4313 0x4313
#define BCMA_CHIP_ID_BCM43142 43142
+#define BCMA_CHIP_ID_BCM43131 43131
#define BCMA_CHIP_ID_BCM43217 43217
#define BCMA_CHIP_ID_BCM43222 43222
#define BCMA_CHIP_ID_BCM43224 43224
diff --git a/include/linux/platform_data/st21nfcb.h b/include/linux/platform_data/st21nfcb.h
new file mode 100644
index 000000000000..2d11f1f5efab
--- /dev/null
+++ b/include/linux/platform_data/st21nfcb.h
@@ -0,0 +1,32 @@
+/*
+ * Driver include for the ST21NFCB NFC chip.
+ *
+ * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ST21NFCB_NCI_H_
+#define _ST21NFCB_NCI_H_
+
+#include <linux/i2c.h>
+
+#define ST21NFCB_NCI_DRIVER_NAME "st21nfcb_nci"
+
+struct st21nfcb_nfc_platform_data {
+ unsigned int gpio_irq;
+ unsigned int gpio_reset;
+ unsigned int irq_polarity;
+};
+
+#endif /* _ST21NFCA_HCI_H_ */
diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h
index 79b530fb2c4d..d184df1d0d41 100644
--- a/include/net/6lowpan.h
+++ b/include/net/6lowpan.h
@@ -75,20 +75,6 @@
(((a)->s6_addr[14]) == (m)[6]) && \
(((a)->s6_addr[15]) == (m)[7]))
-/* ipv6 address is unspecified */
-#define is_addr_unspecified(a) \
- ((((a)->s6_addr32[0]) == 0) && \
- (((a)->s6_addr32[1]) == 0) && \
- (((a)->s6_addr32[2]) == 0) && \
- (((a)->s6_addr32[3]) == 0))
-
-/* compare ipv6 addresses prefixes */
-#define ipaddr_prefixcmp(addr1, addr2, length) \
- (memcmp(addr1, addr2, length >> 3) == 0)
-
-/* local link, i.e. FE80::/10 */
-#define is_addr_link_local(a) (((a)->s6_addr16[0]) == htons(0xFE80))
-
/*
* check whether we can compress the IID to 16 bits,
* it's possible for unicast adresses with first 49 bits are zero only.
@@ -100,22 +86,8 @@
(((a)->s6_addr[12]) == 0xfe) && \
(((a)->s6_addr[13]) == 0))
-/* multicast address */
-#define is_addr_mcast(a) (((a)->s6_addr[0]) == 0xFF)
-
/* check whether the 112-bit gid of the multicast address is mappable to: */
-/* 9 bits, for FF02::1 (all nodes) and FF02::2 (all routers) addresses only. */
-#define lowpan_is_mcast_addr_compressable(a) \
- ((((a)->s6_addr16[1]) == 0) && \
- (((a)->s6_addr16[2]) == 0) && \
- (((a)->s6_addr16[3]) == 0) && \
- (((a)->s6_addr16[4]) == 0) && \
- (((a)->s6_addr16[5]) == 0) && \
- (((a)->s6_addr16[6]) == 0) && \
- (((a)->s6_addr[14]) == 0) && \
- ((((a)->s6_addr[15]) == 1) || (((a)->s6_addr[15]) == 2)))
-
/* 48 bits, FFXX::00XX:XXXX:XXXX */
#define lowpan_is_mcast_addr_compressable48(a) \
((((a)->s6_addr16[1]) == 0) && \
@@ -168,17 +140,6 @@
#define LOWPAN_FRAGN_HEAD_SIZE 0x5
/*
- * According IEEE802.15.4 standard:
- * - MTU is 127 octets
- * - maximum MHR size is 37 octets
- * - MFR size is 2 octets
- *
- * so minimal payload size that we may guarantee is:
- * MTU - MHR - MFR = 88 octets
- */
-#define LOWPAN_FRAG_SIZE 88
-
-/*
* Values of fields within the IPHC encoding first byte
* (C stands for compressed and I for inline)
*/
@@ -279,17 +240,6 @@ static inline int lowpan_fetch_skb_u8(struct sk_buff *skb, u8 *val)
return 0;
}
-static inline int lowpan_fetch_skb_u16(struct sk_buff *skb, u16 *val)
-{
- if (unlikely(!pskb_may_pull(skb, 2)))
- return -EINVAL;
-
- *val = (skb->data[0] << 8) | skb->data[1];
- skb_pull(skb, 2);
-
- return 0;
-}
-
static inline bool lowpan_fetch_skb(struct sk_buff *skb,
void *data, const unsigned int len)
{
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index f0a3d8890760..3f8547f1c6f8 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -167,7 +167,7 @@ enum {
HCI_AUTO_OFF,
HCI_RFKILLED,
HCI_MGMT,
- HCI_PAIRABLE,
+ HCI_BONDABLE,
HCI_SERVICE_CACHE,
HCI_KEEP_DEBUG_KEYS,
HCI_USE_DEBUG_KEYS,
@@ -1074,6 +1074,8 @@ struct hci_rp_read_data_block_size {
__le16 num_blocks;
} __packed;
+#define HCI_OP_READ_LOCAL_CODECS 0x100b
+
#define HCI_OP_READ_PAGE_SCAN_ACTIVITY 0x0c1b
struct hci_rp_read_page_scan_activity {
__u8 status;
@@ -1170,6 +1172,8 @@ struct hci_rp_write_remote_amp_assoc {
__u8 phy_handle;
} __packed;
+#define HCI_OP_GET_MWS_TRANSPORT_CONFIG 0x140c
+
#define HCI_OP_ENABLE_DUT_MODE 0x1803
#define HCI_OP_WRITE_SSP_DEBUG_MODE 0x1804
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 996ed065b6c2..b5d5af3aa469 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -203,6 +203,8 @@ struct hci_dev {
__u16 page_scan_window;
__u8 page_scan_type;
__u8 le_adv_channel_map;
+ __u16 le_adv_min_interval;
+ __u16 le_adv_max_interval;
__u8 le_scan_type;
__u16 le_scan_interval;
__u16 le_scan_window;
@@ -458,6 +460,7 @@ struct hci_conn_params {
enum {
HCI_AUTO_CONN_DISABLED,
HCI_AUTO_CONN_REPORT,
+ HCI_AUTO_CONN_DIRECT,
HCI_AUTO_CONN_ALWAYS,
HCI_AUTO_CONN_LINK_LOSS,
} auto_connect;
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index 623d5203c592..414cd2f9a437 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -87,7 +87,7 @@ struct mgmt_rp_read_index_list {
#define MGMT_SETTING_CONNECTABLE 0x00000002
#define MGMT_SETTING_FAST_CONNECTABLE 0x00000004
#define MGMT_SETTING_DISCOVERABLE 0x00000008
-#define MGMT_SETTING_PAIRABLE 0x00000010
+#define MGMT_SETTING_BONDABLE 0x00000010
#define MGMT_SETTING_LINK_SECURITY 0x00000020
#define MGMT_SETTING_SSP 0x00000040
#define MGMT_SETTING_BREDR 0x00000080
@@ -131,7 +131,7 @@ struct mgmt_cp_set_discoverable {
#define MGMT_OP_SET_FAST_CONNECTABLE 0x0008
-#define MGMT_OP_SET_PAIRABLE 0x0009
+#define MGMT_OP_SET_BONDABLE 0x0009
#define MGMT_OP_SET_LINK_SECURITY 0x000A
diff --git a/include/net/nfc/digital.h b/include/net/nfc/digital.h
index bdf55c3b7a19..d9a5cf7ac1c4 100644
--- a/include/net/nfc/digital.h
+++ b/include/net/nfc/digital.h
@@ -49,6 +49,7 @@ enum {
NFC_DIGITAL_FRAMING_NFCA_SHORT = 0,
NFC_DIGITAL_FRAMING_NFCA_STANDARD,
NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A,
+ NFC_DIGITAL_FRAMING_NFCA_ANTICOL_COMPLETE,
NFC_DIGITAL_FRAMING_NFCA_T1T,
NFC_DIGITAL_FRAMING_NFCA_T2T,
@@ -126,6 +127,15 @@ typedef void (*nfc_digital_cmd_complete_t)(struct nfc_digital_dev *ddev,
* the NFC-DEP ATR_REQ command through cb. The digital stack deducts the RF
* tech by analyzing the SoD of the frame containing the ATR_REQ command.
* This is an asynchronous function.
+ * @tg_listen_md: If supported, put the device in automatic listen mode with
+ * mode detection but without automatic anti-collision. In this mode, the
+ * device automatically detects the RF technology. What the actual
+ * RF technology is can be retrieved by calling @tg_get_rf_tech.
+ * The digital stack will then perform the appropriate anti-collision
+ * sequence. This is an asynchronous function.
+ * @tg_get_rf_tech: Required when @tg_listen_md is supported, unused otherwise.
+ * Return the RF Technology that was detected by the @tg_listen_md call.
+ * This is a synchronous function.
*
* @switch_rf: Turns device radio on or off. The stack does not call explicitly
* switch_rf to turn the radio on. A call to in|tg_configure_hw must turn
@@ -160,6 +170,9 @@ struct nfc_digital_ops {
struct digital_tg_mdaa_params *mdaa_params,
u16 timeout, nfc_digital_cmd_complete_t cb,
void *arg);
+ int (*tg_listen_md)(struct nfc_digital_dev *ddev, u16 timeout,
+ nfc_digital_cmd_complete_t cb, void *arg);
+ int (*tg_get_rf_tech)(struct nfc_digital_dev *ddev, u8 *rf_tech);
int (*switch_rf)(struct nfc_digital_dev *ddev, bool on);
void (*abort_cmd)(struct nfc_digital_dev *ddev);
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
index 61286db54388..7ee8f4cc610b 100644
--- a/include/net/nfc/hci.h
+++ b/include/net/nfc/hci.h
@@ -37,6 +37,7 @@ struct nfc_hci_ops {
int (*xmit) (struct nfc_hci_dev *hdev, struct sk_buff *skb);
int (*start_poll) (struct nfc_hci_dev *hdev,
u32 im_protocols, u32 tm_protocols);
+ void (*stop_poll) (struct nfc_hci_dev *hdev);
int (*dep_link_up)(struct nfc_hci_dev *hdev, struct nfc_target *target,
u8 comm_mode, u8 *gb, size_t gb_len);
int (*dep_link_down)(struct nfc_hci_dev *hdev);
diff --git a/net/6lowpan/iphc.c b/net/6lowpan/iphc.c
index a1b7117a9600..142eef55c9e2 100644
--- a/net/6lowpan/iphc.c
+++ b/net/6lowpan/iphc.c
@@ -177,8 +177,8 @@ static int skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr,
struct sk_buff *new;
int stat;
- new = skb_copy_expand(skb, sizeof(struct ipv6hdr),
- skb_tailroom(skb), GFP_ATOMIC);
+ new = skb_copy_expand(skb, sizeof(struct ipv6hdr), skb_tailroom(skb),
+ GFP_ATOMIC);
kfree_skb(skb);
if (!new)
@@ -205,10 +205,9 @@ static int skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr,
/* Uncompress function for multicast destination address,
* when M bit is set.
*/
-static int
-lowpan_uncompress_multicast_daddr(struct sk_buff *skb,
- struct in6_addr *ipaddr,
- const u8 dam)
+static int lowpan_uncompress_multicast_daddr(struct sk_buff *skb,
+ struct in6_addr *ipaddr,
+ const u8 dam)
{
bool fail;
@@ -254,41 +253,41 @@ lowpan_uncompress_multicast_daddr(struct sk_buff *skb,
}
raw_dump_inline(NULL, "Reconstructed ipv6 multicast addr is",
- ipaddr->s6_addr, 16);
+ ipaddr->s6_addr, 16);
return 0;
}
-static int
-uncompress_udp_header(struct sk_buff *skb, struct udphdr *uh)
+static int uncompress_udp_header(struct sk_buff *skb, struct udphdr *uh)
{
bool fail;
u8 tmp = 0, val = 0;
- if (!uh)
- goto err;
-
- fail = lowpan_fetch_skb(skb, &tmp, 1);
+ fail = lowpan_fetch_skb(skb, &tmp, sizeof(tmp));
if ((tmp & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) {
pr_debug("UDP header uncompression\n");
switch (tmp & LOWPAN_NHC_UDP_CS_P_11) {
case LOWPAN_NHC_UDP_CS_P_00:
- fail |= lowpan_fetch_skb(skb, &uh->source, 2);
- fail |= lowpan_fetch_skb(skb, &uh->dest, 2);
+ fail |= lowpan_fetch_skb(skb, &uh->source,
+ sizeof(uh->source));
+ fail |= lowpan_fetch_skb(skb, &uh->dest,
+ sizeof(uh->dest));
break;
case LOWPAN_NHC_UDP_CS_P_01:
- fail |= lowpan_fetch_skb(skb, &uh->source, 2);
- fail |= lowpan_fetch_skb(skb, &val, 1);
+ fail |= lowpan_fetch_skb(skb, &uh->source,
+ sizeof(uh->source));
+ fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
uh->dest = htons(val + LOWPAN_NHC_UDP_8BIT_PORT);
break;
case LOWPAN_NHC_UDP_CS_P_10:
- fail |= lowpan_fetch_skb(skb, &val, 1);
+ fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
uh->source = htons(val + LOWPAN_NHC_UDP_8BIT_PORT);
- fail |= lowpan_fetch_skb(skb, &uh->dest, 2);
+ fail |= lowpan_fetch_skb(skb, &uh->dest,
+ sizeof(uh->dest));
break;
case LOWPAN_NHC_UDP_CS_P_11:
- fail |= lowpan_fetch_skb(skb, &val, 1);
+ fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
uh->source = htons(LOWPAN_NHC_UDP_4BIT_PORT +
(val >> 4));
uh->dest = htons(LOWPAN_NHC_UDP_4BIT_PORT +
@@ -307,10 +306,11 @@ uncompress_udp_header(struct sk_buff *skb, struct udphdr *uh)
pr_debug_ratelimited("checksum elided currently not supported\n");
goto err;
} else {
- fail |= lowpan_fetch_skb(skb, &uh->check, 2);
+ fail |= lowpan_fetch_skb(skb, &uh->check,
+ sizeof(uh->check));
}
- /* UDP lenght needs to be infered from the lower layers
+ /* UDP length needs to be infered from the lower layers
* here, we obtain the hint from the remaining size of the
* frame
*/
@@ -333,9 +333,8 @@ err:
static const u8 lowpan_ttl_values[] = { 0, 1, 64, 255 };
int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
- const u8 *saddr, const u8 saddr_type,
- const u8 saddr_len, const u8 *daddr,
- const u8 daddr_type, const u8 daddr_len,
+ const u8 *saddr, const u8 saddr_type, const u8 saddr_len,
+ const u8 *daddr, const u8 daddr_type, const u8 daddr_len,
u8 iphc0, u8 iphc1, skb_delivery_cb deliver_skb)
{
struct ipv6hdr hdr = {};
@@ -348,7 +347,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
/* another if the CID flag is set */
if (iphc1 & LOWPAN_IPHC_CID) {
pr_debug("CID flag is set, increase header with one\n");
- if (lowpan_fetch_skb_u8(skb, &num_context))
+ if (lowpan_fetch_skb(skb, &num_context, sizeof(num_context)))
goto drop;
}
@@ -360,7 +359,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
* ECN + DSCP + 4-bit Pad + Flow Label (4 bytes)
*/
case 0: /* 00b */
- if (lowpan_fetch_skb_u8(skb, &tmp))
+ if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp)))
goto drop;
memcpy(&hdr.flow_lbl, &skb->data[0], 3);
@@ -373,7 +372,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
* ECN + DSCP (1 byte), Flow Label is elided
*/
case 2: /* 10b */
- if (lowpan_fetch_skb_u8(skb, &tmp))
+ if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp)))
goto drop;
hdr.priority = ((tmp >> 2) & 0x0f);
@@ -383,7 +382,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
* ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided
*/
case 1: /* 01b */
- if (lowpan_fetch_skb_u8(skb, &tmp))
+ if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp)))
goto drop;
hdr.flow_lbl[0] = (skb->data[0] & 0x0F) | ((tmp >> 2) & 0x30);
@@ -400,7 +399,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
/* Next Header */
if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
/* Next header is carried inline */
- if (lowpan_fetch_skb_u8(skb, &(hdr.nexthdr)))
+ if (lowpan_fetch_skb(skb, &hdr.nexthdr, sizeof(hdr.nexthdr)))
goto drop;
pr_debug("NH flag is set, next header carried inline: %02x\n",
@@ -411,7 +410,8 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
if ((iphc0 & 0x03) != LOWPAN_IPHC_TTL_I) {
hdr.hop_limit = lowpan_ttl_values[iphc0 & 0x03];
} else {
- if (lowpan_fetch_skb_u8(skb, &(hdr.hop_limit)))
+ if (lowpan_fetch_skb(skb, &hdr.hop_limit,
+ sizeof(hdr.hop_limit)))
goto drop;
}
@@ -421,8 +421,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
if (iphc1 & LOWPAN_IPHC_SAC) {
/* Source address context based uncompression */
pr_debug("SAC bit is set. Handle context based source address.\n");
- err = uncompress_context_based_src_addr(
- skb, &hdr.saddr, tmp);
+ err = uncompress_context_based_src_addr(skb, &hdr.saddr, tmp);
} else {
/* Source address uncompression */
pr_debug("source address stateless compression\n");
@@ -443,8 +442,9 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
pr_debug("dest: context-based mcast compression\n");
/* TODO: implement this */
} else {
- err = lowpan_uncompress_multicast_daddr(
- skb, &hdr.daddr, tmp);
+ err = lowpan_uncompress_multicast_daddr(skb, &hdr.daddr,
+ tmp);
+
if (err)
goto drop;
}
@@ -497,8 +497,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
hdr.version, ntohs(hdr.payload_len), hdr.nexthdr,
hdr.hop_limit, &hdr.daddr);
- raw_dump_table(__func__, "raw header dump",
- (u8 *)&hdr, sizeof(hdr));
+ raw_dump_table(__func__, "raw header dump", (u8 *)&hdr, sizeof(hdr));
return skb_deliver(skb, &hdr, dev, deliver_skb);
@@ -508,7 +507,7 @@ drop:
}
EXPORT_SYMBOL_GPL(lowpan_process_data);
-static u8 lowpan_compress_addr_64(u8 **hc06_ptr, u8 shift,
+static u8 lowpan_compress_addr_64(u8 **hc_ptr, u8 shift,
const struct in6_addr *ipaddr,
const unsigned char *lladdr)
{
@@ -519,24 +518,22 @@ static u8 lowpan_compress_addr_64(u8 **hc06_ptr, u8 shift,
pr_debug("address compression 0 bits\n");
} else if (lowpan_is_iid_16_bit_compressable(ipaddr)) {
/* compress IID to 16 bits xxxx::XXXX */
- memcpy(*hc06_ptr, &ipaddr->s6_addr16[7], 2);
- *hc06_ptr += 2;
+ lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr16[7], 2);
val = 2; /* 16-bits */
raw_dump_inline(NULL, "Compressed ipv6 addr is (16 bits)",
- *hc06_ptr - 2, 2);
+ *hc_ptr - 2, 2);
} else {
/* do not compress IID => xxxx::IID */
- memcpy(*hc06_ptr, &ipaddr->s6_addr16[4], 8);
- *hc06_ptr += 8;
+ lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr16[4], 8);
val = 1; /* 64-bits */
raw_dump_inline(NULL, "Compressed ipv6 addr is (64 bits)",
- *hc06_ptr - 8, 8);
+ *hc_ptr - 8, 8);
}
return rol8(val, shift);
}
-static void compress_udp_header(u8 **hc06_ptr, struct sk_buff *skb)
+static void compress_udp_header(u8 **hc_ptr, struct sk_buff *skb)
{
struct udphdr *uh = udp_hdr(skb);
u8 tmp;
@@ -548,46 +545,46 @@ static void compress_udp_header(u8 **hc06_ptr, struct sk_buff *skb)
pr_debug("UDP header: both ports compression to 4 bits\n");
/* compression value */
tmp = LOWPAN_NHC_UDP_CS_P_11;
- lowpan_push_hc_data(hc06_ptr, &tmp, sizeof(tmp));
+ lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
/* source and destination port */
tmp = ntohs(uh->dest) - LOWPAN_NHC_UDP_4BIT_PORT +
((ntohs(uh->source) - LOWPAN_NHC_UDP_4BIT_PORT) << 4);
- lowpan_push_hc_data(hc06_ptr, &tmp, sizeof(tmp));
+ lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
} else if ((ntohs(uh->dest) & LOWPAN_NHC_UDP_8BIT_MASK) ==
LOWPAN_NHC_UDP_8BIT_PORT) {
pr_debug("UDP header: remove 8 bits of dest\n");
/* compression value */
tmp = LOWPAN_NHC_UDP_CS_P_01;
- lowpan_push_hc_data(hc06_ptr, &tmp, sizeof(tmp));
+ lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
/* source port */
- lowpan_push_hc_data(hc06_ptr, &uh->source, sizeof(uh->source));
+ lowpan_push_hc_data(hc_ptr, &uh->source, sizeof(uh->source));
/* destination port */
tmp = ntohs(uh->dest) - LOWPAN_NHC_UDP_8BIT_PORT;
- lowpan_push_hc_data(hc06_ptr, &tmp, sizeof(tmp));
+ lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
} else if ((ntohs(uh->source) & LOWPAN_NHC_UDP_8BIT_MASK) ==
LOWPAN_NHC_UDP_8BIT_PORT) {
pr_debug("UDP header: remove 8 bits of source\n");
/* compression value */
tmp = LOWPAN_NHC_UDP_CS_P_10;
- lowpan_push_hc_data(hc06_ptr, &tmp, sizeof(tmp));
+ lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
/* source port */
tmp = ntohs(uh->source) - LOWPAN_NHC_UDP_8BIT_PORT;
- lowpan_push_hc_data(hc06_ptr, &tmp, sizeof(tmp));
+ lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
/* destination port */
- lowpan_push_hc_data(hc06_ptr, &uh->dest, sizeof(uh->dest));
+ lowpan_push_hc_data(hc_ptr, &uh->dest, sizeof(uh->dest));
} else {
pr_debug("UDP header: can't compress\n");
/* compression value */
tmp = LOWPAN_NHC_UDP_CS_P_00;
- lowpan_push_hc_data(hc06_ptr, &tmp, sizeof(tmp));
+ lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
/* source port */
- lowpan_push_hc_data(hc06_ptr, &uh->source, sizeof(uh->source));
+ lowpan_push_hc_data(hc_ptr, &uh->source, sizeof(uh->source));
/* destination port */
- lowpan_push_hc_data(hc06_ptr, &uh->dest, sizeof(uh->dest));
+ lowpan_push_hc_data(hc_ptr, &uh->dest, sizeof(uh->dest));
}
/* checksum is always inline */
- lowpan_push_hc_data(hc06_ptr, &uh->check, sizeof(uh->check));
+ lowpan_push_hc_data(hc_ptr, &uh->check, sizeof(uh->check));
/* skip the UDP header */
skb_pull(skb, sizeof(struct udphdr));
@@ -597,15 +594,16 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *_daddr,
const void *_saddr, unsigned int len)
{
- u8 tmp, iphc0, iphc1, *hc06_ptr;
+ u8 tmp, iphc0, iphc1, *hc_ptr;
struct ipv6hdr *hdr;
u8 head[100] = {};
+ int addr_type;
if (type != ETH_P_IPV6)
return -EINVAL;
hdr = ipv6_hdr(skb);
- hc06_ptr = head + 2;
+ hc_ptr = head + 2;
pr_debug("IPv6 header dump:\n\tversion = %d\n\tlength = %d\n"
"\tnexthdr = 0x%02x\n\thop_lim = %d\n\tdest = %pI6c\n",
@@ -630,8 +628,7 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
raw_dump_inline(__func__, "daddr",
(unsigned char *)_daddr, IEEE802154_ADDR_LEN);
- raw_dump_table(__func__,
- "sending raw skb network uncompressed packet",
+ raw_dump_table(__func__, "sending raw skb network uncompressed packet",
skb->data, skb->len);
/* Traffic class, flow label
@@ -640,7 +637,7 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
* class depends on the presence of version and flow label
*/
- /* hc06 format of TC is ECN | DSCP , original one is DSCP | ECN */
+ /* hc format of TC is ECN | DSCP , original one is DSCP | ECN */
tmp = (hdr->priority << 4) | (hdr->flow_lbl[0] >> 4);
tmp = ((tmp & 0x03) << 6) | (tmp >> 2);
@@ -654,8 +651,8 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
iphc0 |= LOWPAN_IPHC_TC_C;
} else {
/* compress only the flow label */
- *hc06_ptr = tmp;
- hc06_ptr += 1;
+ *hc_ptr = tmp;
+ hc_ptr += 1;
}
} else {
/* Flow label cannot be compressed */
@@ -663,15 +660,15 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
((hdr->flow_lbl[0] & 0xF0) == 0)) {
/* compress only traffic class */
iphc0 |= LOWPAN_IPHC_TC_C;
- *hc06_ptr = (tmp & 0xc0) | (hdr->flow_lbl[0] & 0x0F);
- memcpy(hc06_ptr + 1, &hdr->flow_lbl[1], 2);
- hc06_ptr += 3;
+ *hc_ptr = (tmp & 0xc0) | (hdr->flow_lbl[0] & 0x0F);
+ memcpy(hc_ptr + 1, &hdr->flow_lbl[1], 2);
+ hc_ptr += 3;
} else {
/* compress nothing */
- memcpy(hc06_ptr, hdr, 4);
+ memcpy(hc_ptr, hdr, 4);
/* replace the top byte with new ECN | DSCP format */
- *hc06_ptr = tmp;
- hc06_ptr += 4;
+ *hc_ptr = tmp;
+ hc_ptr += 4;
}
}
@@ -681,10 +678,9 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
if (hdr->nexthdr == UIP_PROTO_UDP)
iphc0 |= LOWPAN_IPHC_NH_C;
- if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
- *hc06_ptr = hdr->nexthdr;
- hc06_ptr += 1;
- }
+ if ((iphc0 & LOWPAN_IPHC_NH_C) == 0)
+ lowpan_push_hc_data(&hc_ptr, &hdr->nexthdr,
+ sizeof(hdr->nexthdr));
/* Hop limit
* if 1: compress, encoding is 01
@@ -703,84 +699,86 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
iphc0 |= LOWPAN_IPHC_TTL_255;
break;
default:
- *hc06_ptr = hdr->hop_limit;
- hc06_ptr += 1;
- break;
+ lowpan_push_hc_data(&hc_ptr, &hdr->hop_limit,
+ sizeof(hdr->hop_limit));
}
+ addr_type = ipv6_addr_type(&hdr->saddr);
/* source address compression */
- if (is_addr_unspecified(&hdr->saddr)) {
+ if (addr_type == IPV6_ADDR_ANY) {
pr_debug("source address is unspecified, setting SAC\n");
iphc1 |= LOWPAN_IPHC_SAC;
- /* TODO: context lookup */
- } else if (is_addr_link_local(&hdr->saddr)) {
- iphc1 |= lowpan_compress_addr_64(&hc06_ptr,
- LOWPAN_IPHC_SAM_BIT, &hdr->saddr, _saddr);
- pr_debug("source address unicast link-local %pI6c "
- "iphc1 0x%02x\n", &hdr->saddr, iphc1);
} else {
- pr_debug("send the full source address\n");
- memcpy(hc06_ptr, &hdr->saddr.s6_addr16[0], 16);
- hc06_ptr += 16;
+ if (addr_type & IPV6_ADDR_LINKLOCAL) {
+ iphc1 |= lowpan_compress_addr_64(&hc_ptr,
+ LOWPAN_IPHC_SAM_BIT,
+ &hdr->saddr, _saddr);
+ pr_debug("source address unicast link-local %pI6c iphc1 0x%02x\n",
+ &hdr->saddr, iphc1);
+ } else {
+ pr_debug("send the full source address\n");
+ lowpan_push_hc_data(&hc_ptr, hdr->saddr.s6_addr, 16);
+ }
}
+ addr_type = ipv6_addr_type(&hdr->daddr);
/* destination address compression */
- if (is_addr_mcast(&hdr->daddr)) {
+ if (addr_type & IPV6_ADDR_MULTICAST) {
pr_debug("destination address is multicast: ");
iphc1 |= LOWPAN_IPHC_M;
if (lowpan_is_mcast_addr_compressable8(&hdr->daddr)) {
pr_debug("compressed to 1 octet\n");
iphc1 |= LOWPAN_IPHC_DAM_11;
/* use last byte */
- *hc06_ptr = hdr->daddr.s6_addr[15];
- hc06_ptr += 1;
+ lowpan_push_hc_data(&hc_ptr,
+ &hdr->daddr.s6_addr[15], 1);
} else if (lowpan_is_mcast_addr_compressable32(&hdr->daddr)) {
pr_debug("compressed to 4 octets\n");
iphc1 |= LOWPAN_IPHC_DAM_10;
/* second byte + the last three */
- *hc06_ptr = hdr->daddr.s6_addr[1];
- memcpy(hc06_ptr + 1, &hdr->daddr.s6_addr[13], 3);
- hc06_ptr += 4;
+ lowpan_push_hc_data(&hc_ptr,
+ &hdr->daddr.s6_addr[1], 1);
+ lowpan_push_hc_data(&hc_ptr,
+ &hdr->daddr.s6_addr[13], 3);
} else if (lowpan_is_mcast_addr_compressable48(&hdr->daddr)) {
pr_debug("compressed to 6 octets\n");
iphc1 |= LOWPAN_IPHC_DAM_01;
/* second byte + the last five */
- *hc06_ptr = hdr->daddr.s6_addr[1];
- memcpy(hc06_ptr + 1, &hdr->daddr.s6_addr[11], 5);
- hc06_ptr += 6;
+ lowpan_push_hc_data(&hc_ptr,
+ &hdr->daddr.s6_addr[1], 1);
+ lowpan_push_hc_data(&hc_ptr,
+ &hdr->daddr.s6_addr[11], 5);
} else {
pr_debug("using full address\n");
iphc1 |= LOWPAN_IPHC_DAM_00;
- memcpy(hc06_ptr, &hdr->daddr.s6_addr[0], 16);
- hc06_ptr += 16;
+ lowpan_push_hc_data(&hc_ptr, hdr->daddr.s6_addr, 16);
}
} else {
- /* TODO: context lookup */
- if (is_addr_link_local(&hdr->daddr)) {
- iphc1 |= lowpan_compress_addr_64(&hc06_ptr,
+ if (addr_type & IPV6_ADDR_LINKLOCAL) {
+ /* TODO: context lookup */
+ iphc1 |= lowpan_compress_addr_64(&hc_ptr,
LOWPAN_IPHC_DAM_BIT, &hdr->daddr, _daddr);
pr_debug("dest address unicast link-local %pI6c "
- "iphc1 0x%02x\n", &hdr->daddr, iphc1);
+ "iphc1 0x%02x\n", &hdr->daddr, iphc1);
} else {
pr_debug("dest address unicast %pI6c\n", &hdr->daddr);
- memcpy(hc06_ptr, &hdr->daddr.s6_addr16[0], 16);
- hc06_ptr += 16;
+ lowpan_push_hc_data(&hc_ptr, hdr->daddr.s6_addr, 16);
}
}
/* UDP header compression */
if (hdr->nexthdr == UIP_PROTO_UDP)
- compress_udp_header(&hc06_ptr, skb);
+ compress_udp_header(&hc_ptr, skb);
head[0] = iphc0;
head[1] = iphc1;
skb_pull(skb, sizeof(struct ipv6hdr));
skb_reset_transport_header(skb);
- memcpy(skb_push(skb, hc06_ptr - head), head, hc06_ptr - head);
+ memcpy(skb_push(skb, hc_ptr - head), head, hc_ptr - head);
skb_reset_network_header(skb);
- pr_debug("header len %d skb %u\n", (int)(hc06_ptr - head), skb->len);
+ pr_debug("header len %d skb %u\n", (int)(hc_ptr - head), skb->len);
raw_dump_table(__func__, "raw skb data dump compressed",
skb->data, skb->len);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index cfcb6055ced8..32b96f1aaf42 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -970,6 +970,62 @@ static int adv_channel_map_get(void *data, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
adv_channel_map_set, "%llu\n");
+static int adv_min_interval_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+
+ if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
+ return -EINVAL;
+
+ hci_dev_lock(hdev);
+ hdev->le_adv_min_interval = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int adv_min_interval_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->le_adv_min_interval;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
+ adv_min_interval_set, "%llu\n");
+
+static int adv_max_interval_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+
+ if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
+ return -EINVAL;
+
+ hci_dev_lock(hdev);
+ hdev->le_adv_max_interval = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int adv_max_interval_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->le_adv_max_interval;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
+ adv_max_interval_set, "%llu\n");
+
static int device_list_show(struct seq_file *f, void *ptr)
{
struct hci_dev *hdev = f->private;
@@ -1567,7 +1623,7 @@ static void hci_set_le_support(struct hci_request *req)
if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
cp.le = 0x01;
- cp.simul = lmp_le_br_capable(hdev);
+ cp.simul = 0x00;
}
if (cp.le != lmp_host_le_capable(hdev))
@@ -1686,6 +1742,14 @@ static void hci_init4_req(struct hci_request *req, unsigned long opt)
if (hdev->commands[22] & 0x04)
hci_set_event_mask_page_2(req);
+ /* Read local codec list if the HCI command is supported */
+ if (hdev->commands[29] & 0x20)
+ hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
+
+ /* Get MWS transport configuration if the HCI command is supported */
+ if (hdev->commands[30] & 0x08)
+ hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
+
/* Check for Synchronization Train support */
if (lmp_sync_train_capable(hdev))
hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
@@ -1825,6 +1889,10 @@ static int __hci_init(struct hci_dev *hdev)
hdev, &supervision_timeout_fops);
debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
hdev, &adv_channel_map_fops);
+ debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
+ hdev, &adv_min_interval_fops);
+ debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
+ hdev, &adv_max_interval_fops);
debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
&device_list_fops);
debugfs_create_u16("discov_interleaved_timeout", 0644,
@@ -2453,14 +2521,14 @@ int hci_dev_open(__u16 dev)
flush_workqueue(hdev->req_workqueue);
/* For controllers not using the management interface and that
- * are brought up using legacy ioctl, set the HCI_PAIRABLE bit
+ * are brought up using legacy ioctl, set the HCI_BONDABLE bit
* so that pairing works for them. Once the management interface
* is in use this bit will be cleared again and userspace has
* to explicitly enable it.
*/
if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
!test_bit(HCI_MGMT, &hdev->dev_flags))
- set_bit(HCI_PAIRABLE, &hdev->dev_flags);
+ set_bit(HCI_BONDABLE, &hdev->dev_flags);
err = hci_dev_do_open(hdev);
@@ -3639,6 +3707,7 @@ int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
list_add(&params->action, &hdev->pend_le_reports);
hci_update_background_scan(hdev);
break;
+ case HCI_AUTO_CONN_DIRECT:
case HCI_AUTO_CONN_ALWAYS:
if (!is_connected(hdev, addr, addr_type)) {
list_add(&params->action, &hdev->pend_le_conns);
@@ -3914,6 +3983,8 @@ struct hci_dev *hci_alloc_dev(void)
hdev->sniff_min_interval = 80;
hdev->le_adv_channel_map = 0x07;
+ hdev->le_adv_min_interval = 0x0800;
+ hdev->le_adv_max_interval = 0x0800;
hdev->le_scan_interval = 0x0060;
hdev->le_scan_window = 0x0030;
hdev->le_conn_min_interval = 0x0028;
@@ -5397,12 +5468,113 @@ void hci_req_add_le_scan_disable(struct hci_request *req)
hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
}
+static void add_to_white_list(struct hci_request *req,
+ struct hci_conn_params *params)
+{
+ struct hci_cp_le_add_to_white_list cp;
+
+ cp.bdaddr_type = params->addr_type;
+ bacpy(&cp.bdaddr, &params->addr);
+
+ hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
+}
+
+static u8 update_white_list(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct hci_conn_params *params;
+ struct bdaddr_list *b;
+ uint8_t white_list_entries = 0;
+
+ /* Go through the current white list programmed into the
+ * controller one by one and check if that address is still
+ * in the list of pending connections or list of devices to
+ * report. If not present in either list, then queue the
+ * command to remove it from the controller.
+ */
+ list_for_each_entry(b, &hdev->le_white_list, list) {
+ struct hci_cp_le_del_from_white_list cp;
+
+ if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
+ &b->bdaddr, b->bdaddr_type) ||
+ hci_pend_le_action_lookup(&hdev->pend_le_reports,
+ &b->bdaddr, b->bdaddr_type)) {
+ white_list_entries++;
+ continue;
+ }
+
+ cp.bdaddr_type = b->bdaddr_type;
+ bacpy(&cp.bdaddr, &b->bdaddr);
+
+ hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
+ sizeof(cp), &cp);
+ }
+
+ /* Since all no longer valid white list entries have been
+ * removed, walk through the list of pending connections
+ * and ensure that any new device gets programmed into
+ * the controller.
+ *
+ * If the list of the devices is larger than the list of
+ * available white list entries in the controller, then
+ * just abort and return filer policy value to not use the
+ * white list.
+ */
+ list_for_each_entry(params, &hdev->pend_le_conns, action) {
+ if (hci_bdaddr_list_lookup(&hdev->le_white_list,
+ &params->addr, params->addr_type))
+ continue;
+
+ if (white_list_entries >= hdev->le_white_list_size) {
+ /* Select filter policy to accept all advertising */
+ return 0x00;
+ }
+
+ if (hci_find_irk_by_addr(hdev, &params->addr,
+ params->addr_type)) {
+ /* White list can not be used with RPAs */
+ return 0x00;
+ }
+
+ white_list_entries++;
+ add_to_white_list(req, params);
+ }
+
+ /* After adding all new pending connections, walk through
+ * the list of pending reports and also add these to the
+ * white list if there is still space.
+ */
+ list_for_each_entry(params, &hdev->pend_le_reports, action) {
+ if (hci_bdaddr_list_lookup(&hdev->le_white_list,
+ &params->addr, params->addr_type))
+ continue;
+
+ if (white_list_entries >= hdev->le_white_list_size) {
+ /* Select filter policy to accept all advertising */
+ return 0x00;
+ }
+
+ if (hci_find_irk_by_addr(hdev, &params->addr,
+ params->addr_type)) {
+ /* White list can not be used with RPAs */
+ return 0x00;
+ }
+
+ white_list_entries++;
+ add_to_white_list(req, params);
+ }
+
+ /* Select filter policy to use white list */
+ return 0x01;
+}
+
void hci_req_add_le_passive_scan(struct hci_request *req)
{
struct hci_cp_le_set_scan_param param_cp;
struct hci_cp_le_set_scan_enable enable_cp;
struct hci_dev *hdev = req->hdev;
u8 own_addr_type;
+ u8 filter_policy;
/* Set require_privacy to false since no SCAN_REQ are send
* during passive scanning. Not using an unresolvable address
@@ -5413,11 +5585,18 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
if (hci_update_random_address(req, false, &own_addr_type))
return;
+ /* Adding or removing entries from the white list must
+ * happen before enabling scanning. The controller does
+ * not allow white list modification while scanning.
+ */
+ filter_policy = update_white_list(req);
+
memset(&param_cp, 0, sizeof(param_cp));
param_cp.type = LE_SCAN_PASSIVE;
param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
param_cp.window = cpu_to_le16(hdev->le_scan_window);
param_cp.own_address_type = own_addr_type;
+ param_cp.filter_policy = filter_policy;
hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
&param_cp);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 4c41774aa556..be35598984d9 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -317,7 +317,7 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
if (param & SCAN_PAGE)
set_bit(HCI_PSCAN, &hdev->flags);
else
- clear_bit(HCI_ISCAN, &hdev->flags);
+ clear_bit(HCI_PSCAN, &hdev->flags);
done:
hci_dev_unlock(hdev);
@@ -2259,6 +2259,7 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
break;
/* Fall through */
+ case HCI_AUTO_CONN_DIRECT:
case HCI_AUTO_CONN_ALWAYS:
list_del_init(&params->action);
list_add(&params->action, &hdev->pend_le_conns);
@@ -3118,7 +3119,7 @@ static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_conn_drop(conn);
}
- if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags) &&
+ if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) &&
!test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
sizeof(ev->bdaddr), &ev->bdaddr);
@@ -3651,7 +3652,7 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
/* Allow pairing if we're pairable, the initiators of the
* pairing or if the remote is not requesting bonding.
*/
- if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
+ if (test_bit(HCI_BONDABLE, &hdev->dev_flags) ||
test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
(conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
struct hci_cp_io_capability_reply cp;
@@ -3670,13 +3671,18 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
conn->auth_type != HCI_AT_NO_BONDING)
conn->auth_type |= 0x01;
-
- cp.authentication = conn->auth_type;
} else {
conn->auth_type = hci_get_auth_req(conn);
- cp.authentication = conn->auth_type;
}
+ /* If we're not bondable, force one of the non-bondable
+ * authentication requirement values.
+ */
+ if (!test_bit(HCI_BONDABLE, &hdev->dev_flags))
+ conn->auth_type &= HCI_AT_NO_BONDING_MITM;
+
+ cp.authentication = conn->auth_type;
+
if (hci_find_remote_oob_data(hdev, &conn->dst) &&
(conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
cp.oob_data = 0x01;
@@ -4251,6 +4257,7 @@ static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
u8 addr_type, u8 adv_type)
{
struct hci_conn *conn;
+ struct hci_conn_params *params;
/* If the event is not connectable don't proceed further */
if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
@@ -4266,18 +4273,35 @@ static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
if (hdev->conn_hash.le_num_slave > 0)
return;
- /* If we're connectable, always connect any ADV_DIRECT_IND event */
- if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
- adv_type == LE_ADV_DIRECT_IND)
- goto connect;
-
/* If we're not connectable only connect devices that we have in
* our pend_le_conns list.
*/
- if (!hci_pend_le_action_lookup(&hdev->pend_le_conns, addr, addr_type))
+ params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
+ addr, addr_type);
+ if (!params)
+ return;
+
+ switch (params->auto_connect) {
+ case HCI_AUTO_CONN_DIRECT:
+ /* Only devices advertising with ADV_DIRECT_IND are
+ * triggering a connection attempt. This is allowing
+ * incoming connections from slave devices.
+ */
+ if (adv_type != LE_ADV_DIRECT_IND)
+ return;
+ break;
+ case HCI_AUTO_CONN_ALWAYS:
+ /* Devices advertising with ADV_IND or ADV_DIRECT_IND
+ * are triggering a connection attempt. This means
+ * that incoming connectioms from slave device are
+ * accepted and also outgoing connections to slave
+ * devices are established when found.
+ */
+ break;
+ default:
return;
+ }
-connect:
conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
if (!IS_ERR(conn))
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 8181ea4bc2f2..6c7ecf116e74 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -154,7 +154,7 @@ static int hidp_input_event(struct input_dev *dev, unsigned int type,
(!!test_bit(LED_COMPOSE, dev->led) << 3) |
(!!test_bit(LED_SCROLLL, dev->led) << 2) |
(!!test_bit(LED_CAPSL, dev->led) << 1) |
- (!!test_bit(LED_NUML, dev->led));
+ (!!test_bit(LED_NUML, dev->led) << 0);
if (session->leds == newleds)
return 0;
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 190668367e42..b8554d429d88 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -44,7 +44,7 @@ static const u16 mgmt_commands[] = {
MGMT_OP_SET_DISCOVERABLE,
MGMT_OP_SET_CONNECTABLE,
MGMT_OP_SET_FAST_CONNECTABLE,
- MGMT_OP_SET_PAIRABLE,
+ MGMT_OP_SET_BONDABLE,
MGMT_OP_SET_LINK_SECURITY,
MGMT_OP_SET_SSP,
MGMT_OP_SET_HS,
@@ -553,7 +553,7 @@ static u32 get_supported_settings(struct hci_dev *hdev)
u32 settings = 0;
settings |= MGMT_SETTING_POWERED;
- settings |= MGMT_SETTING_PAIRABLE;
+ settings |= MGMT_SETTING_BONDABLE;
settings |= MGMT_SETTING_DEBUG_KEYS;
settings |= MGMT_SETTING_CONNECTABLE;
settings |= MGMT_SETTING_DISCOVERABLE;
@@ -603,8 +603,8 @@ static u32 get_current_settings(struct hci_dev *hdev)
if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
settings |= MGMT_SETTING_DISCOVERABLE;
- if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
- settings |= MGMT_SETTING_PAIRABLE;
+ if (test_bit(HCI_BONDABLE, &hdev->dev_flags))
+ settings |= MGMT_SETTING_BONDABLE;
if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
settings |= MGMT_SETTING_BREDR;
@@ -1086,8 +1086,8 @@ static void enable_advertising(struct hci_request *req)
return;
memset(&cp, 0, sizeof(cp));
- cp.min_interval = cpu_to_le16(0x0800);
- cp.max_interval = cpu_to_le16(0x0800);
+ cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
+ cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
cp.own_address_type = own_addr_type;
cp.channel_map = hdev->le_adv_channel_map;
@@ -1152,7 +1152,7 @@ static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
* for mgmt we require user-space to explicitly enable
* it
*/
- clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
+ clear_bit(HCI_BONDABLE, &hdev->dev_flags);
}
static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
@@ -1881,7 +1881,18 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
if (cp->val) {
scan = SCAN_PAGE;
} else {
- scan = 0;
+ /* If we don't have any whitelist entries just
+ * disable all scanning. If there are entries
+ * and we had both page and inquiry scanning
+ * enabled then fall back to only page scanning.
+ * Otherwise no changes are needed.
+ */
+ if (list_empty(&hdev->whitelist))
+ scan = SCAN_DISABLED;
+ else if (test_bit(HCI_ISCAN, &hdev->flags))
+ scan = SCAN_PAGE;
+ else
+ goto no_scan_update;
if (test_bit(HCI_ISCAN, &hdev->flags) &&
hdev->discov_timeout > 0)
@@ -1891,6 +1902,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
}
+no_scan_update:
/* If we're going from non-connectable to connectable or
* vice-versa when fast connectable is enabled ensure that fast
* connectable gets disabled. write_fast_connectable won't do
@@ -1918,7 +1930,7 @@ failed:
return err;
}
-static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
+static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
struct mgmt_mode *cp = data;
@@ -1928,17 +1940,17 @@ static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
BT_DBG("request for %s", hdev->name);
if (cp->val != 0x00 && cp->val != 0x01)
- return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
MGMT_STATUS_INVALID_PARAMS);
hci_dev_lock(hdev);
if (cp->val)
- changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
+ changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags);
else
- changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
+ changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags);
- err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
+ err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
if (err < 0)
goto unlock;
@@ -2264,7 +2276,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
if (val) {
hci_cp.le = val;
- hci_cp.simul = lmp_le_br_capable(hdev);
+ hci_cp.simul = 0x00;
} else {
if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
disable_advertising(&req);
@@ -3201,7 +3213,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
conn->io_capability = cp->io_cap;
cmd->user_data = conn;
- if (conn->state == BT_CONNECTED &&
+ if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
hci_conn_security(conn, sec_level, auth_type, true))
pairing_complete(cmd, 0);
@@ -5271,7 +5283,7 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
MGMT_STATUS_INVALID_PARAMS,
&cp->addr, sizeof(cp->addr));
- if (cp->action != 0x00 && cp->action != 0x01)
+ if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
MGMT_STATUS_INVALID_PARAMS,
&cp->addr, sizeof(cp->addr));
@@ -5281,7 +5293,7 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
if (cp->addr.type == BDADDR_BREDR) {
bool update_scan;
- /* Only "connect" action supported for now */
+ /* Only incoming connections action is supported for now */
if (cp->action != 0x01) {
err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
MGMT_STATUS_INVALID_PARAMS,
@@ -5307,8 +5319,10 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
else
addr_type = ADDR_LE_DEV_RANDOM;
- if (cp->action)
+ if (cp->action == 0x02)
auto_conn = HCI_AUTO_CONN_ALWAYS;
+ else if (cp->action == 0x01)
+ auto_conn = HCI_AUTO_CONN_DIRECT;
else
auto_conn = HCI_AUTO_CONN_REPORT;
@@ -5665,7 +5679,7 @@ static const struct mgmt_handler {
{ set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
{ set_connectable, false, MGMT_SETTING_SIZE },
{ set_fast_connectable, false, MGMT_SETTING_SIZE },
- { set_pairable, false, MGMT_SETTING_SIZE },
+ { set_bondable, false, MGMT_SETTING_SIZE },
{ set_link_security, false, MGMT_SETTING_SIZE },
{ set_ssp, false, MGMT_SETTING_SIZE },
{ set_hs, false, MGMT_SETTING_SIZE },
@@ -5870,6 +5884,7 @@ static void restart_le_actions(struct hci_dev *hdev)
list_del_init(&p->action);
switch (p->auto_connect) {
+ case HCI_AUTO_CONN_DIRECT:
case HCI_AUTO_CONN_ALWAYS:
list_add(&p->action, &hdev->pend_le_conns);
break;
@@ -5922,8 +5937,8 @@ static int powered_update_hci(struct hci_dev *hdev)
lmp_bredr_capable(hdev)) {
struct hci_cp_write_le_host_supported cp;
- cp.le = 1;
- cp.simul = lmp_le_br_capable(hdev);
+ cp.le = 0x01;
+ cp.simul = 0x00;
/* Check first if we already have the right
* host state (host features set)
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index a0690a84f3e9..af73bc3acb40 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -1910,10 +1910,13 @@ static struct rfcomm_session *rfcomm_process_rx(struct rfcomm_session *s)
/* Get data directly from socket receive queue without copying it. */
while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
skb_orphan(skb);
- if (!skb_linearize(skb))
+ if (!skb_linearize(skb)) {
s = rfcomm_recv_frame(s, skb);
- else
+ if (!s)
+ break;
+ } else {
kfree_skb(skb);
+ }
}
if (s && (sk->sk_state == BT_CLOSED))
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index e49c83d8b957..fd3294300803 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -307,7 +307,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
struct hci_dev *hdev = hcon->hdev;
u8 local_dist = 0, remote_dist = 0;
- if (test_bit(HCI_PAIRABLE, &conn->hcon->hdev->dev_flags)) {
+ if (test_bit(HCI_BONDABLE, &conn->hcon->hdev->dev_flags)) {
local_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN;
remote_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN;
authreq |= SMP_AUTH_BONDING;
@@ -579,13 +579,16 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
struct smp_chan *smp;
smp = kzalloc(sizeof(*smp), GFP_ATOMIC);
- if (!smp)
+ if (!smp) {
+ clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags);
return NULL;
+ }
smp->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(smp->tfm_aes)) {
BT_ERR("Unable to create ECB crypto context");
kfree(smp);
+ clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags);
return NULL;
}
@@ -701,7 +704,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
if (!smp)
return SMP_UNSPECIFIED;
- if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags) &&
+ if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) &&
(req->auth_req & SMP_AUTH_BONDING))
return SMP_PAIRING_NOTSUPP;
@@ -923,14 +926,14 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
return 0;
- if (!test_bit(HCI_PAIRABLE, &hcon->hdev->dev_flags) &&
- (rp->auth_req & SMP_AUTH_BONDING))
- return SMP_PAIRING_NOTSUPP;
-
smp = smp_chan_create(conn);
if (!smp)
return SMP_UNSPECIFIED;
+ if (!test_bit(HCI_BONDABLE, &hcon->hdev->dev_flags) &&
+ (rp->auth_req & SMP_AUTH_BONDING))
+ return SMP_PAIRING_NOTSUPP;
+
skb_pull(skb, sizeof(*rp));
memset(&cp, 0, sizeof(cp));
@@ -1291,6 +1294,22 @@ static void smp_notify_keys(struct l2cap_conn *conn)
bacpy(&hcon->dst, &smp->remote_irk->bdaddr);
hcon->dst_type = smp->remote_irk->addr_type;
l2cap_conn_update_id_addr(hcon);
+
+ /* When receiving an indentity resolving key for
+ * a remote device that does not use a resolvable
+ * private address, just remove the key so that
+ * it is possible to use the controller white
+ * list for scanning.
+ *
+ * Userspace will have been told to not store
+ * this key at this point. So it is safe to
+ * just remove it.
+ */
+ if (!bacmp(&smp->remote_irk->rpa, BDADDR_ANY)) {
+ list_del(&smp->remote_irk->list);
+ kfree(smp->remote_irk);
+ smp->remote_irk = NULL;
+ }
}
/* The LTKs and CSRKs should be persistent only if both sides
diff --git a/net/nfc/digital.h b/net/nfc/digital.h
index 71ad7eefddd4..3c39c72eb038 100644
--- a/net/nfc/digital.h
+++ b/net/nfc/digital.h
@@ -29,6 +29,7 @@
#define DIGITAL_CMD_TG_SEND 1
#define DIGITAL_CMD_TG_LISTEN 2
#define DIGITAL_CMD_TG_LISTEN_MDAA 3
+#define DIGITAL_CMD_TG_LISTEN_MD 4
#define DIGITAL_MAX_HEADER_LEN 7
#define DIGITAL_CRC_LEN 2
@@ -121,6 +122,8 @@ int digital_tg_send_dep_res(struct nfc_digital_dev *ddev, struct sk_buff *skb);
int digital_tg_listen_nfca(struct nfc_digital_dev *ddev, u8 rf_tech);
int digital_tg_listen_nfcf(struct nfc_digital_dev *ddev, u8 rf_tech);
+void digital_tg_recv_md_req(struct nfc_digital_dev *ddev, void *arg,
+ struct sk_buff *resp);
typedef u16 (*crc_func_t)(u16, const u8 *, size_t);
diff --git a/net/nfc/digital_core.c b/net/nfc/digital_core.c
index a6ce3c627e4e..009bcf317101 100644
--- a/net/nfc/digital_core.c
+++ b/net/nfc/digital_core.c
@@ -201,6 +201,11 @@ static void digital_wq_cmd(struct work_struct *work)
digital_send_cmd_complete, cmd);
break;
+ case DIGITAL_CMD_TG_LISTEN_MD:
+ rc = ddev->ops->tg_listen_md(ddev, cmd->timeout,
+ digital_send_cmd_complete, cmd);
+ break;
+
default:
pr_err("Unknown cmd type %d\n", cmd->type);
return;
@@ -293,12 +298,19 @@ static int digital_tg_listen_mdaa(struct nfc_digital_dev *ddev, u8 rf_tech)
500, digital_tg_recv_atr_req, NULL);
}
+static int digital_tg_listen_md(struct nfc_digital_dev *ddev, u8 rf_tech)
+{
+ return digital_send_cmd(ddev, DIGITAL_CMD_TG_LISTEN_MD, NULL, NULL, 500,
+ digital_tg_recv_md_req, NULL);
+}
+
int digital_target_found(struct nfc_digital_dev *ddev,
struct nfc_target *target, u8 protocol)
{
int rc;
u8 framing;
u8 rf_tech;
+ u8 poll_tech_count;
int (*check_crc)(struct sk_buff *skb);
void (*add_crc)(struct sk_buff *skb);
@@ -375,12 +387,16 @@ int digital_target_found(struct nfc_digital_dev *ddev,
return rc;
target->supported_protocols = (1 << protocol);
- rc = nfc_targets_found(ddev->nfc_dev, target, 1);
- if (rc)
- return rc;
+ poll_tech_count = ddev->poll_tech_count;
ddev->poll_tech_count = 0;
+ rc = nfc_targets_found(ddev->nfc_dev, target, 1);
+ if (rc) {
+ ddev->poll_tech_count = poll_tech_count;
+ return rc;
+ }
+
return 0;
}
@@ -505,6 +521,9 @@ static int digital_start_poll(struct nfc_dev *nfc_dev, __u32 im_protocols,
if (ddev->ops->tg_listen_mdaa) {
digital_add_poll_tech(ddev, 0,
digital_tg_listen_mdaa);
+ } else if (ddev->ops->tg_listen_md) {
+ digital_add_poll_tech(ddev, 0,
+ digital_tg_listen_md);
} else {
digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_106A,
digital_tg_listen_nfca);
@@ -732,7 +751,7 @@ struct nfc_digital_dev *nfc_digital_allocate_device(struct nfc_digital_ops *ops,
if (!ops->in_configure_hw || !ops->in_send_cmd || !ops->tg_listen ||
!ops->tg_configure_hw || !ops->tg_send_cmd || !ops->abort_cmd ||
- !ops->switch_rf)
+ !ops->switch_rf || (ops->tg_listen_md && !ops->tg_get_rf_tech))
return NULL;
ddev = kzalloc(sizeof(struct nfc_digital_dev), GFP_KERNEL);
diff --git a/net/nfc/digital_dep.c b/net/nfc/digital_dep.c
index 37deb173c956..e1638dab076d 100644
--- a/net/nfc/digital_dep.c
+++ b/net/nfc/digital_dep.c
@@ -671,6 +671,7 @@ void digital_tg_recv_atr_req(struct nfc_digital_dev *ddev, void *arg,
int rc;
struct digital_atr_req *atr_req;
size_t gb_len, min_size;
+ u8 poll_tech_count;
if (IS_ERR(resp)) {
rc = PTR_ERR(resp);
@@ -728,12 +729,16 @@ void digital_tg_recv_atr_req(struct nfc_digital_dev *ddev, void *arg,
goto exit;
gb_len = resp->len - sizeof(struct digital_atr_req);
+
+ poll_tech_count = ddev->poll_tech_count;
+ ddev->poll_tech_count = 0;
+
rc = nfc_tm_activated(ddev->nfc_dev, NFC_PROTO_NFC_DEP_MASK,
NFC_COMM_PASSIVE, atr_req->gb, gb_len);
- if (rc)
+ if (rc) {
+ ddev->poll_tech_count = poll_tech_count;
goto exit;
-
- ddev->poll_tech_count = 0;
+ }
rc = 0;
exit:
diff --git a/net/nfc/digital_technology.c b/net/nfc/digital_technology.c
index c2c1c0189b7c..fb58ed2dd41d 100644
--- a/net/nfc/digital_technology.c
+++ b/net/nfc/digital_technology.c
@@ -318,6 +318,8 @@ static void digital_in_recv_sel_res(struct nfc_digital_dev *ddev, void *arg,
if (DIGITAL_SEL_RES_IS_T2T(sel_res)) {
nfc_proto = NFC_PROTO_MIFARE;
+ } else if (DIGITAL_SEL_RES_IS_NFC_DEP(sel_res)) {
+ nfc_proto = NFC_PROTO_NFC_DEP;
} else if (DIGITAL_SEL_RES_IS_T4T(sel_res)) {
rc = digital_in_send_rats(ddev, target);
if (rc)
@@ -327,8 +329,6 @@ static void digital_in_recv_sel_res(struct nfc_digital_dev *ddev, void *arg,
* done when receiving the ATS
*/
goto exit_free_skb;
- } else if (DIGITAL_SEL_RES_IS_NFC_DEP(sel_res)) {
- nfc_proto = NFC_PROTO_NFC_DEP;
} else {
rc = -EOPNOTSUPP;
goto exit;
@@ -944,6 +944,13 @@ static int digital_tg_send_sel_res(struct nfc_digital_dev *ddev)
if (!DIGITAL_DRV_CAPS_TG_CRC(ddev))
digital_skb_add_crc_a(skb);
+ rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
+ NFC_DIGITAL_FRAMING_NFCA_ANTICOL_COMPLETE);
+ if (rc) {
+ kfree_skb(skb);
+ return rc;
+ }
+
rc = digital_tg_send_cmd(ddev, skb, 300, digital_tg_recv_atr_req,
NULL);
if (rc)
@@ -1002,6 +1009,13 @@ static int digital_tg_send_sdd_res(struct nfc_digital_dev *ddev)
for (i = 0; i < 4; i++)
sdd_res->bcc ^= sdd_res->nfcid1[i];
+ rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
+ NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A);
+ if (rc) {
+ kfree_skb(skb);
+ return rc;
+ }
+
rc = digital_tg_send_cmd(ddev, skb, 300, digital_tg_recv_sel_req,
NULL);
if (rc)
@@ -1054,6 +1068,13 @@ static int digital_tg_send_sens_res(struct nfc_digital_dev *ddev)
sens_res[0] = (DIGITAL_SENS_RES_NFC_DEP >> 8) & 0xFF;
sens_res[1] = DIGITAL_SENS_RES_NFC_DEP & 0xFF;
+ rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
+ NFC_DIGITAL_FRAMING_NFCA_STANDARD);
+ if (rc) {
+ kfree_skb(skb);
+ return rc;
+ }
+
rc = digital_tg_send_cmd(ddev, skb, 300, digital_tg_recv_sdd_req,
NULL);
if (rc)
@@ -1197,33 +1218,48 @@ exit:
dev_kfree_skb(resp);
}
-int digital_tg_listen_nfca(struct nfc_digital_dev *ddev, u8 rf_tech)
+static int digital_tg_config_nfca(struct nfc_digital_dev *ddev)
{
int rc;
- rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH, rf_tech);
+ rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH,
+ NFC_DIGITAL_RF_TECH_106A);
if (rc)
return rc;
- rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
- NFC_DIGITAL_FRAMING_NFCA_NFC_DEP);
+ return digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
+ NFC_DIGITAL_FRAMING_NFCA_NFC_DEP);
+}
+
+int digital_tg_listen_nfca(struct nfc_digital_dev *ddev, u8 rf_tech)
+{
+ int rc;
+
+ rc = digital_tg_config_nfca(ddev);
if (rc)
return rc;
return digital_tg_listen(ddev, 300, digital_tg_recv_sens_req, NULL);
}
-int digital_tg_listen_nfcf(struct nfc_digital_dev *ddev, u8 rf_tech)
+static int digital_tg_config_nfcf(struct nfc_digital_dev *ddev, u8 rf_tech)
{
int rc;
- u8 *nfcid2;
rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH, rf_tech);
if (rc)
return rc;
- rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
- NFC_DIGITAL_FRAMING_NFCF_NFC_DEP);
+ return digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
+ NFC_DIGITAL_FRAMING_NFCF_NFC_DEP);
+}
+
+int digital_tg_listen_nfcf(struct nfc_digital_dev *ddev, u8 rf_tech)
+{
+ int rc;
+ u8 *nfcid2;
+
+ rc = digital_tg_config_nfcf(ddev, rf_tech);
if (rc)
return rc;
@@ -1237,3 +1273,43 @@ int digital_tg_listen_nfcf(struct nfc_digital_dev *ddev, u8 rf_tech)
return digital_tg_listen(ddev, 300, digital_tg_recv_sensf_req, nfcid2);
}
+
+void digital_tg_recv_md_req(struct nfc_digital_dev *ddev, void *arg,
+ struct sk_buff *resp)
+{
+ u8 rf_tech;
+ int rc;
+
+ if (IS_ERR(resp)) {
+ resp = NULL;
+ goto exit_free_skb;
+ }
+
+ rc = ddev->ops->tg_get_rf_tech(ddev, &rf_tech);
+ if (rc)
+ goto exit_free_skb;
+
+ switch (rf_tech) {
+ case NFC_DIGITAL_RF_TECH_106A:
+ rc = digital_tg_config_nfca(ddev);
+ if (rc)
+ goto exit_free_skb;
+ digital_tg_recv_sens_req(ddev, arg, resp);
+ break;
+ case NFC_DIGITAL_RF_TECH_212F:
+ case NFC_DIGITAL_RF_TECH_424F:
+ rc = digital_tg_config_nfcf(ddev, rf_tech);
+ if (rc)
+ goto exit_free_skb;
+ digital_tg_recv_sensf_req(ddev, arg, resp);
+ break;
+ default:
+ goto exit_free_skb;
+ }
+
+ return;
+
+exit_free_skb:
+ digital_poll_next_tech(ddev);
+ dev_kfree_skb(resp);
+}
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
index 47403705197e..117708263ced 100644
--- a/net/nfc/hci/core.c
+++ b/net/nfc/hci/core.c
@@ -553,8 +553,11 @@ static void hci_stop_poll(struct nfc_dev *nfc_dev)
{
struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
- nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
- NFC_HCI_EVT_END_OPERATION, NULL, 0);
+ if (hdev->ops->stop_poll)
+ hdev->ops->stop_poll(hdev);
+ else
+ nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
+ NFC_HCI_EVT_END_OPERATION, NULL, 0);
}
static int hci_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
index f8f6af231381..df91bb95b12a 100644
--- a/net/nfc/nci/ntf.c
+++ b/net/nfc/nci/ntf.c
@@ -166,7 +166,9 @@ static int nci_add_new_protocol(struct nci_dev *ndev,
struct rf_tech_specific_params_nfcf_poll *nfcf_poll;
__u32 protocol;
- if (rf_protocol == NCI_RF_PROTOCOL_T2T)
+ if (rf_protocol == NCI_RF_PROTOCOL_T1T)
+ protocol = NFC_PROTO_JEWEL_MASK;
+ else if (rf_protocol == NCI_RF_PROTOCOL_T2T)
protocol = NFC_PROTO_MIFARE_MASK;
else if (rf_protocol == NCI_RF_PROTOCOL_ISO_DEP)
if (rf_tech_and_mode == NCI_NFC_A_PASSIVE_POLL_MODE)