Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2005-2011 Atheros Communications Inc. |
| 3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. |
| 4 | * |
| 5 | * Permission to use, copy, modify, and/or distribute this software for any |
| 6 | * purpose with or without fee is hereby granted, provided that the above |
| 7 | * copyright notice and this permission notice appear in all copies. |
| 8 | * |
| 9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
| 10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
| 11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
| 12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
| 13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
| 14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
| 15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
| 16 | */ |
| 17 | |
| 18 | #ifndef _PCI_H_ |
| 19 | #define _PCI_H_ |
| 20 | |
| 21 | #include <linux/interrupt.h> |
| 22 | |
| 23 | #include "hw.h" |
| 24 | #include "ce.h" |
| 25 | |
| 26 | /* FW dump area */ |
| 27 | #define REG_DUMP_COUNT_QCA988X 60 |
| 28 | |
| 29 | /* |
| 30 | * maximum number of bytes that can be handled atomically by DiagRead/DiagWrite |
| 31 | */ |
| 32 | #define DIAG_TRANSFER_LIMIT 2048 |
| 33 | |
| 34 | /* |
| 35 | * maximum number of bytes that can be |
| 36 | * handled atomically by DiagRead/DiagWrite |
| 37 | */ |
| 38 | #define DIAG_TRANSFER_LIMIT 2048 |
| 39 | |
| 40 | struct bmi_xfer { |
| 41 | struct completion done; |
| 42 | bool wait_for_resp; |
| 43 | u32 resp_len; |
| 44 | }; |
| 45 | |
| 46 | struct ath10k_pci_compl { |
| 47 | struct list_head list; |
| 48 | int send_or_recv; |
| 49 | struct ce_state *ce_state; |
| 50 | struct hif_ce_pipe_info *pipe_info; |
| 51 | void *transfer_context; |
| 52 | unsigned int nbytes; |
| 53 | unsigned int transfer_id; |
| 54 | unsigned int flags; |
| 55 | }; |
| 56 | |
| 57 | /* compl_state.send_or_recv */ |
| 58 | #define HIF_CE_COMPLETE_FREE 0 |
| 59 | #define HIF_CE_COMPLETE_SEND 1 |
| 60 | #define HIF_CE_COMPLETE_RECV 2 |
| 61 | |
| 62 | /* |
| 63 | * PCI-specific Target state |
| 64 | * |
| 65 | * NOTE: Structure is shared between Host software and Target firmware! |
| 66 | * |
| 67 | * Much of this may be of interest to the Host so |
| 68 | * HOST_INTEREST->hi_interconnect_state points here |
| 69 | * (and all members are 32-bit quantities in order to |
| 70 | * facilitate Host access). In particular, Host software is |
| 71 | * required to initialize pipe_cfg_addr and svc_to_pipe_map. |
| 72 | */ |
| 73 | struct pcie_state { |
| 74 | /* Pipe configuration Target address */ |
| 75 | /* NB: ce_pipe_config[CE_COUNT] */ |
| 76 | u32 pipe_cfg_addr; |
| 77 | |
| 78 | /* Service to pipe map Target address */ |
| 79 | /* NB: service_to_pipe[PIPE_TO_CE_MAP_CN] */ |
| 80 | u32 svc_to_pipe_map; |
| 81 | |
| 82 | /* number of MSI interrupts requested */ |
| 83 | u32 msi_requested; |
| 84 | |
| 85 | /* number of MSI interrupts granted */ |
| 86 | u32 msi_granted; |
| 87 | |
| 88 | /* Message Signalled Interrupt address */ |
| 89 | u32 msi_addr; |
| 90 | |
| 91 | /* Base data */ |
| 92 | u32 msi_data; |
| 93 | |
| 94 | /* |
| 95 | * Data for firmware interrupt; |
| 96 | * MSI data for other interrupts are |
| 97 | * in various SoC registers |
| 98 | */ |
| 99 | u32 msi_fw_intr_data; |
| 100 | |
| 101 | /* PCIE_PWR_METHOD_* */ |
| 102 | u32 power_mgmt_method; |
| 103 | |
| 104 | /* PCIE_CONFIG_FLAG_* */ |
| 105 | u32 config_flags; |
| 106 | }; |
| 107 | |
| 108 | /* PCIE_CONFIG_FLAG definitions */ |
| 109 | #define PCIE_CONFIG_FLAG_ENABLE_L1 0x0000001 |
| 110 | |
| 111 | /* Host software's Copy Engine configuration. */ |
| 112 | #define CE_ATTR_FLAGS 0 |
| 113 | |
| 114 | /* |
| 115 | * Configuration information for a Copy Engine pipe. |
| 116 | * Passed from Host to Target during startup (one per CE). |
| 117 | * |
| 118 | * NOTE: Structure is shared between Host software and Target firmware! |
| 119 | */ |
| 120 | struct ce_pipe_config { |
| 121 | u32 pipenum; |
| 122 | u32 pipedir; |
| 123 | u32 nentries; |
| 124 | u32 nbytes_max; |
| 125 | u32 flags; |
| 126 | u32 reserved; |
| 127 | }; |
| 128 | |
| 129 | /* |
| 130 | * Directions for interconnect pipe configuration. |
| 131 | * These definitions may be used during configuration and are shared |
| 132 | * between Host and Target. |
| 133 | * |
| 134 | * Pipe Directions are relative to the Host, so PIPEDIR_IN means |
| 135 | * "coming IN over air through Target to Host" as with a WiFi Rx operation. |
| 136 | * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air" |
| 137 | * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man" |
| 138 | * Target since things that are "PIPEDIR_OUT" are coming IN to the Target |
| 139 | * over the interconnect. |
| 140 | */ |
| 141 | #define PIPEDIR_NONE 0 |
| 142 | #define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */ |
| 143 | #define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */ |
| 144 | #define PIPEDIR_INOUT 3 /* bidirectional */ |
| 145 | |
| 146 | /* Establish a mapping between a service/direction and a pipe. */ |
| 147 | struct service_to_pipe { |
| 148 | u32 service_id; |
| 149 | u32 pipedir; |
| 150 | u32 pipenum; |
| 151 | }; |
| 152 | |
| 153 | enum ath10k_pci_features { |
| 154 | ATH10K_PCI_FEATURE_MSI_X = 0, |
Michal Kazior | cba4ca7 | 2013-07-05 16:15:07 +0300 | [diff] [blame] | 155 | ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND = 1, |
Bartosz Markowski | 8cc8df9 | 2013-08-02 09:58:49 +0200 | [diff] [blame] | 156 | ATH10K_PCI_FEATURE_SOC_POWER_SAVE = 2, |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 157 | |
| 158 | /* keep last */ |
| 159 | ATH10K_PCI_FEATURE_COUNT |
| 160 | }; |
| 161 | |
| 162 | /* Per-pipe state. */ |
| 163 | struct hif_ce_pipe_info { |
| 164 | /* Handle of underlying Copy Engine */ |
| 165 | struct ce_state *ce_hdl; |
| 166 | |
| 167 | /* Our pipe number; facilitiates use of pipe_info ptrs. */ |
| 168 | u8 pipe_num; |
| 169 | |
| 170 | /* Convenience back pointer to hif_ce_state. */ |
| 171 | struct ath10k *hif_ce_state; |
| 172 | |
| 173 | size_t buf_sz; |
| 174 | |
| 175 | /* protects compl_free and num_send_allowed */ |
| 176 | spinlock_t pipe_lock; |
| 177 | |
| 178 | /* List of free CE completion slots */ |
| 179 | struct list_head compl_free; |
| 180 | |
| 181 | /* Limit the number of outstanding send requests. */ |
| 182 | int num_sends_allowed; |
| 183 | |
| 184 | struct ath10k_pci *ar_pci; |
| 185 | struct tasklet_struct intr; |
| 186 | }; |
| 187 | |
| 188 | struct ath10k_pci { |
| 189 | struct pci_dev *pdev; |
| 190 | struct device *dev; |
| 191 | struct ath10k *ar; |
| 192 | void __iomem *mem; |
| 193 | int cacheline_sz; |
| 194 | |
| 195 | DECLARE_BITMAP(features, ATH10K_PCI_FEATURE_COUNT); |
| 196 | |
| 197 | /* |
| 198 | * Number of MSI interrupts granted, 0 --> using legacy PCI line |
| 199 | * interrupts. |
| 200 | */ |
| 201 | int num_msi_intrs; |
| 202 | |
| 203 | struct tasklet_struct intr_tq; |
| 204 | struct tasklet_struct msi_fw_err; |
| 205 | |
| 206 | /* Number of Copy Engines supported */ |
| 207 | unsigned int ce_count; |
| 208 | |
| 209 | int started; |
| 210 | |
| 211 | atomic_t keep_awake_count; |
| 212 | bool verified_awake; |
| 213 | |
| 214 | /* List of CE completions to be processed */ |
| 215 | struct list_head compl_process; |
| 216 | |
| 217 | /* protects compl_processing and compl_process */ |
| 218 | spinlock_t compl_lock; |
| 219 | |
| 220 | bool compl_processing; |
| 221 | |
| 222 | struct hif_ce_pipe_info pipe_info[CE_COUNT_MAX]; |
| 223 | |
| 224 | struct ath10k_hif_cb msg_callbacks_current; |
| 225 | |
| 226 | /* Target address used to signal a pending firmware event */ |
| 227 | u32 fw_indicator_address; |
| 228 | |
| 229 | /* Copy Engine used for Diagnostic Accesses */ |
| 230 | struct ce_state *ce_diag; |
| 231 | |
| 232 | /* FIXME: document what this really protects */ |
| 233 | spinlock_t ce_lock; |
| 234 | |
| 235 | /* Map CE id to ce_state */ |
| 236 | struct ce_state *ce_id_to_state[CE_COUNT_MAX]; |
| 237 | |
| 238 | /* makes sure that dummy reads are atomic */ |
| 239 | spinlock_t hw_v1_workaround_lock; |
| 240 | }; |
| 241 | |
| 242 | static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar) |
| 243 | { |
| 244 | return ar->hif.priv; |
| 245 | } |
| 246 | |
| 247 | static inline u32 ath10k_pci_reg_read32(void __iomem *mem, u32 addr) |
| 248 | { |
| 249 | return ioread32(mem + PCIE_LOCAL_BASE_ADDRESS + addr); |
| 250 | } |
| 251 | |
| 252 | static inline void ath10k_pci_reg_write32(void __iomem *mem, u32 addr, u32 val) |
| 253 | { |
| 254 | iowrite32(val, mem + PCIE_LOCAL_BASE_ADDRESS + addr); |
| 255 | } |
| 256 | |
| 257 | #define ATH_PCI_RESET_WAIT_MAX 10 /* ms */ |
| 258 | #define PCIE_WAKE_TIMEOUT 5000 /* 5ms */ |
| 259 | |
| 260 | #define BAR_NUM 0 |
| 261 | |
| 262 | #define CDC_WAR_MAGIC_STR 0xceef0000 |
| 263 | #define CDC_WAR_DATA_CE 4 |
| 264 | |
| 265 | /* |
| 266 | * TODO: Should be a function call specific to each Target-type. |
| 267 | * This convoluted macro converts from Target CPU Virtual Address Space to CE |
| 268 | * Address Space. As part of this process, we conservatively fetch the current |
| 269 | * PCIE_BAR. MOST of the time, this should match the upper bits of PCI space |
| 270 | * for this device; but that's not guaranteed. |
| 271 | */ |
| 272 | #define TARG_CPU_SPACE_TO_CE_SPACE(ar, pci_addr, addr) \ |
| 273 | (((ioread32((pci_addr)+(SOC_CORE_BASE_ADDRESS| \ |
| 274 | CORE_CTRL_ADDRESS)) & 0x7ff) << 21) | \ |
| 275 | 0x100000 | ((addr) & 0xfffff)) |
| 276 | |
| 277 | /* Wait up to this many Ms for a Diagnostic Access CE operation to complete */ |
| 278 | #define DIAG_ACCESS_CE_TIMEOUT_MS 10 |
| 279 | |
| 280 | /* |
| 281 | * This API allows the Host to access Target registers directly |
| 282 | * and relatively efficiently over PCIe. |
| 283 | * This allows the Host to avoid extra overhead associated with |
| 284 | * sending a message to firmware and waiting for a response message |
| 285 | * from firmware, as is done on other interconnects. |
| 286 | * |
| 287 | * Yet there is some complexity with direct accesses because the |
| 288 | * Target's power state is not known a priori. The Host must issue |
| 289 | * special PCIe reads/writes in order to explicitly wake the Target |
| 290 | * and to verify that it is awake and will remain awake. |
| 291 | * |
| 292 | * Usage: |
| 293 | * |
| 294 | * Use ath10k_pci_read32 and ath10k_pci_write32 to access Target space. |
| 295 | * These calls must be bracketed by ath10k_pci_wake and |
| 296 | * ath10k_pci_sleep. A single BEGIN/END pair is adequate for |
| 297 | * multiple READ/WRITE operations. |
| 298 | * |
| 299 | * Use ath10k_pci_wake to put the Target in a state in |
| 300 | * which it is legal for the Host to directly access it. This |
| 301 | * may involve waking the Target from a low power state, which |
| 302 | * may take up to 2Ms! |
| 303 | * |
| 304 | * Use ath10k_pci_sleep to tell the Target that as far as |
| 305 | * this code path is concerned, it no longer needs to remain |
| 306 | * directly accessible. BEGIN/END is under a reference counter; |
| 307 | * multiple code paths may issue BEGIN/END on a single targid. |
| 308 | */ |
| 309 | static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, |
| 310 | u32 value) |
| 311 | { |
| 312 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 313 | void __iomem *addr = ar_pci->mem; |
| 314 | |
Michal Kazior | cba4ca7 | 2013-07-05 16:15:07 +0300 | [diff] [blame] | 315 | if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) { |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 316 | unsigned long irq_flags; |
| 317 | |
| 318 | spin_lock_irqsave(&ar_pci->hw_v1_workaround_lock, irq_flags); |
| 319 | |
| 320 | ioread32(addr+offset+4); /* 3rd read prior to write */ |
| 321 | ioread32(addr+offset+4); /* 2nd read prior to write */ |
| 322 | ioread32(addr+offset+4); /* 1st read prior to write */ |
| 323 | iowrite32(value, addr+offset); |
| 324 | |
| 325 | spin_unlock_irqrestore(&ar_pci->hw_v1_workaround_lock, |
| 326 | irq_flags); |
| 327 | } else { |
| 328 | iowrite32(value, addr+offset); |
| 329 | } |
| 330 | } |
| 331 | |
| 332 | static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) |
| 333 | { |
| 334 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 335 | |
| 336 | return ioread32(ar_pci->mem + offset); |
| 337 | } |
| 338 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 339 | void ath10k_do_pci_wake(struct ath10k *ar); |
| 340 | void ath10k_do_pci_sleep(struct ath10k *ar); |
| 341 | |
| 342 | static inline void ath10k_pci_wake(struct ath10k *ar) |
| 343 | { |
Bartosz Markowski | 8cc8df9 | 2013-08-02 09:58:49 +0200 | [diff] [blame] | 344 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 345 | |
| 346 | if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 347 | ath10k_do_pci_wake(ar); |
| 348 | } |
| 349 | |
| 350 | static inline void ath10k_pci_sleep(struct ath10k *ar) |
| 351 | { |
Bartosz Markowski | 8cc8df9 | 2013-08-02 09:58:49 +0200 | [diff] [blame] | 352 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 353 | |
| 354 | if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 355 | ath10k_do_pci_sleep(ar); |
| 356 | } |
| 357 | |
| 358 | #endif /* _PCI_H_ */ |