aboutsummaryrefslogtreecommitdiff
path: root/arch/mips/cavium-octeon
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/cavium-octeon')
-rw-r--r--arch/mips/cavium-octeon/dma-octeon.c311
-rw-r--r--arch/mips/cavium-octeon/executive/Makefile1
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-bootmem.c104
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-errata.c73
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c144
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-sysinfo.c2
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c63
-rw-r--r--arch/mips/cavium-octeon/octeon_boot.h70
-rw-r--r--arch/mips/cavium-octeon/setup.c1
-rw-r--r--arch/mips/cavium-octeon/smp.c238
10 files changed, 1001 insertions, 6 deletions
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
index 01b1ef94b36..4b92bfc662d 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -13,20 +13,327 @@
*/
#include <linux/types.h>
#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+
+#include <linux/cache.h>
+#include <linux/io.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-npi-defs.h>
+#include <asm/octeon/cvmx-pci-defs.h>
#include <dma-coherence.h>
+#ifdef CONFIG_PCI
+#include <asm/octeon/pci-octeon.h>
+#endif
+
+#define BAR2_PCI_ADDRESS 0x8000000000ul
+
+struct bar1_index_state {
+ int16_t ref_count; /* Number of PCI mappings using this index */
+ uint16_t address_bits; /* Upper bits of physical address. This is
+ shifted 22 bits */
+};
+
+#ifdef CONFIG_PCI
+static DEFINE_SPINLOCK(bar1_lock);
+static struct bar1_index_state bar1_state[32];
+#endif
+
dma_addr_t octeon_map_dma_mem(struct device *dev, void *ptr, size_t size)
{
+#ifndef CONFIG_PCI
/* Without PCI/PCIe this function can be called for Octeon internal
devices such as USB. These devices all support 64bit addressing */
mb();
return virt_to_phys(ptr);
+#else
+ unsigned long flags;
+ uint64_t dma_mask;
+ int64_t start_index;
+ dma_addr_t result = -1;
+ uint64_t physical = virt_to_phys(ptr);
+ int64_t index;
+
+ mb();
+ /*
+ * Use the DMA masks to determine the allowed memory
+ * region. For us it doesn't limit the actual memory, just the
+ * address visible over PCI. Devices with limits need to use
+ * lower indexed Bar1 entries.
+ */
+ if (dev) {
+ dma_mask = dev->coherent_dma_mask;
+ if (dev->dma_mask)
+ dma_mask = *dev->dma_mask;
+ } else {
+ dma_mask = 0xfffffffful;
+ }
+
+ /*
+ * Platform devices, such as the internal USB, skip all
+ * translation and use Octeon physical addresses directly.
+ */
+ if (!dev || dev->bus == &platform_bus_type)
+ return physical;
+
+ switch (octeon_dma_bar_type) {
+ case OCTEON_DMA_BAR_TYPE_PCIE:
+ if (unlikely(physical < (16ul << 10)))
+ panic("dma_map_single: Not allowed to map first 16KB."
+ " It interferes with BAR0 special area\n");
+ else if ((physical + size >= (256ul << 20)) &&
+ (physical < (512ul << 20)))
+ panic("dma_map_single: Not allowed to map bootbus\n");
+ else if ((physical + size >= 0x400000000ull) &&
+ physical < 0x410000000ull)
+ panic("dma_map_single: "
+ "Attempt to map illegal memory address 0x%llx\n",
+ physical);
+ else if (physical >= 0x420000000ull)
+ panic("dma_map_single: "
+ "Attempt to map illegal memory address 0x%llx\n",
+ physical);
+ else if ((physical + size >=
+ (4ull<<30) - (OCTEON_PCI_BAR1_HOLE_SIZE<<20))
+ && physical < (4ull<<30))
+ pr_warning("dma_map_single: Warning: "
+ "Mapping memory address that might "
+ "conflict with devices 0x%llx-0x%llx\n",
+ physical, physical+size-1);
+ /* The 2nd 256MB is mapped at 256<<20 instead of 0x410000000 */
+ if ((physical >= 0x410000000ull) && physical < 0x420000000ull)
+ result = physical - 0x400000000ull;
+ else
+ result = physical;
+ if (((result+size-1) & dma_mask) != result+size-1)
+ panic("dma_map_single: Attempt to map address "
+ "0x%llx-0x%llx, which can't be accessed "
+ "according to the dma mask 0x%llx\n",
+ physical, physical+size-1, dma_mask);
+ goto done;
+
+ case OCTEON_DMA_BAR_TYPE_BIG:
+#ifdef CONFIG_64BIT
+ /* If the device supports 64bit addressing, then use BAR2 */
+ if (dma_mask > BAR2_PCI_ADDRESS) {
+ result = physical + BAR2_PCI_ADDRESS;
+ goto done;
+ }
+#endif
+ if (unlikely(physical < (4ul << 10))) {
+ panic("dma_map_single: Not allowed to map first 4KB. "
+ "It interferes with BAR0 special area\n");
+ } else if (physical < (256ul << 20)) {
+ if (unlikely(physical + size > (256ul << 20)))
+ panic("dma_map_single: Requested memory spans "
+ "Bar0 0:256MB and bootbus\n");
+ result = physical;
+ goto done;
+ } else if (unlikely(physical < (512ul << 20))) {
+ panic("dma_map_single: Not allowed to map bootbus\n");
+ } else if (physical < (2ul << 30)) {
+ if (unlikely(physical + size > (2ul << 30)))
+ panic("dma_map_single: Requested memory spans "
+ "Bar0 512MB:2GB and BAR1\n");
+ result = physical;
+ goto done;
+ } else if (physical < (2ul << 30) + (128 << 20)) {
+ /* Fall through */
+ } else if (physical <
+ (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20)) {
+ if (unlikely
+ (physical + size >
+ (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20)))
+ panic("dma_map_single: Requested memory "
+ "extends past Bar1 (4GB-%luMB)\n",
+ OCTEON_PCI_BAR1_HOLE_SIZE);
+ result = physical;
+ goto done;
+ } else if ((physical >= 0x410000000ull) &&
+ (physical < 0x420000000ull)) {
+ if (unlikely(physical + size > 0x420000000ull))
+ panic("dma_map_single: Requested memory spans "
+ "non existant memory\n");
+ /* BAR0 fixed mapping 256MB:512MB ->
+ * 16GB+256MB:16GB+512MB */
+ result = physical - 0x400000000ull;
+ goto done;
+ } else {
+ /* Continued below switch statement */
+ }
+ break;
+
+ case OCTEON_DMA_BAR_TYPE_SMALL:
+#ifdef CONFIG_64BIT
+ /* If the device supports 64bit addressing, then use BAR2 */
+ if (dma_mask > BAR2_PCI_ADDRESS) {
+ result = physical + BAR2_PCI_ADDRESS;
+ goto done;
+ }
+#endif
+ /* Continued below switch statement */
+ break;
+
+ default:
+ panic("dma_map_single: Invalid octeon_dma_bar_type\n");
+ }
+
+ /* Don't allow mapping to span multiple Bar entries. The hardware guys
+ won't guarantee that DMA across boards work */
+ if (unlikely((physical >> 22) != ((physical + size - 1) >> 22)))
+ panic("dma_map_single: "
+ "Requested memory spans more than one Bar1 entry\n");
+
+ if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG)
+ start_index = 31;
+ else if (unlikely(dma_mask < (1ul << 27)))
+ start_index = (dma_mask >> 22);
+ else
+ start_index = 31;
+
+ /* Only one processor can access the Bar register at once */
+ spin_lock_irqsave(&bar1_lock, flags);
+
+ /* Look through Bar1 for existing mapping that will work */
+ for (index = start_index; index >= 0; index--) {
+ if ((bar1_state[index].address_bits == physical >> 22) &&
+ (bar1_state[index].ref_count)) {
+ /* An existing mapping will work, use it */
+ bar1_state[index].ref_count++;
+ if (unlikely(bar1_state[index].ref_count < 0))
+ panic("dma_map_single: "
+ "Bar1[%d] reference count overflowed\n",
+ (int) index);
+ result = (index << 22) | (physical & ((1 << 22) - 1));
+ /* Large BAR1 is offset at 2GB */
+ if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG)
+ result += 2ul << 30;
+ goto done_unlock;
+ }
+ }
+
+ /* No existing mappings, look for a free entry */
+ for (index = start_index; index >= 0; index--) {
+ if (unlikely(bar1_state[index].ref_count == 0)) {
+ union cvmx_pci_bar1_indexx bar1_index;
+ /* We have a free entry, use it */
+ bar1_state[index].ref_count = 1;
+ bar1_state[index].address_bits = physical >> 22;
+ bar1_index.u32 = 0;
+ /* Address bits[35:22] sent to L2C */
+ bar1_index.s.addr_idx = physical >> 22;
+ /* Don't put PCI accesses in L2. */
+ bar1_index.s.ca = 1;
+ /* Endian Swap Mode */
+ bar1_index.s.end_swp = 1;
+ /* Set '1' when the selected address range is valid. */
+ bar1_index.s.addr_v = 1;
+ octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index),
+ bar1_index.u32);
+ /* An existing mapping will work, use it */
+ result = (index << 22) | (physical & ((1 << 22) - 1));
+ /* Large BAR1 is offset at 2GB */
+ if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG)
+ result += 2ul << 30;
+ goto done_unlock;
+ }
+ }
+
+ pr_err("dma_map_single: "
+ "Can't find empty BAR1 index for physical mapping 0x%llx\n",
+ (unsigned long long) physical);
+
+done_unlock:
+ spin_unlock_irqrestore(&bar1_lock, flags);
+done:
+ pr_debug("dma_map_single 0x%llx->0x%llx\n", physical, result);
+ return result;
+#endif
}
void octeon_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
{
- /* Without PCI/PCIe this function can be called for Octeon internal
- * devices such as USB. These devices all support 64bit addressing */
+#ifndef CONFIG_PCI
+ /*
+ * Without PCI/PCIe this function can be called for Octeon internal
+ * devices such as USB. These devices all support 64bit addressing.
+ */
+ return;
+#else
+ unsigned long flags;
+ uint64_t index;
+
+ /*
+ * Platform devices, such as the internal USB, skip all
+ * translation and use Octeon physical addresses directly.
+ */
+ if (dev->bus == &platform_bus_type)
+ return;
+
+ switch (octeon_dma_bar_type) {
+ case OCTEON_DMA_BAR_TYPE_PCIE:
+ /* Nothing to do, all mappings are static */
+ goto done;
+
+ case OCTEON_DMA_BAR_TYPE_BIG:
+#ifdef CONFIG_64BIT
+ /* Nothing to do for addresses using BAR2 */
+ if (dma_addr >= BAR2_PCI_ADDRESS)
+ goto done;
+#endif
+ if (unlikely(dma_addr < (4ul << 10)))
+ panic("dma_unmap_single: Unexpect DMA address 0x%llx\n",
+ dma_addr);
+ else if (dma_addr < (2ul << 30))
+ /* Nothing to do for addresses using BAR0 */
+ goto done;
+ else if (dma_addr < (2ul << 30) + (128ul << 20))
+ /* Need to unmap, fall through */
+ index = (dma_addr - (2ul << 30)) >> 22;
+ else if (dma_addr <
+ (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20))
+ goto done; /* Nothing to do for the rest of BAR1 */
+ else
+ panic("dma_unmap_single: Unexpect DMA address 0x%llx\n",
+ dma_addr);
+ /* Continued below switch statement */
+ break;
+
+ case OCTEON_DMA_BAR_TYPE_SMALL:
+#ifdef CONFIG_64BIT
+ /* Nothing to do for addresses using BAR2 */
+ if (dma_addr >= BAR2_PCI_ADDRESS)
+ goto done;
+#endif
+ index = dma_addr >> 22;
+ /* Continued below switch statement */
+ break;
+
+ default:
+ panic("dma_unmap_single: Invalid octeon_dma_bar_type\n");
+ }
+
+ if (unlikely(index > 31))
+ panic("dma_unmap_single: "
+ "Attempt to unmap an invalid address (0x%llx)\n",
+ dma_addr);
+
+ spin_lock_irqsave(&bar1_lock, flags);
+ bar1_state[index].ref_count--;
+ if (bar1_state[index].ref_count == 0)
+ octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index), 0);
+ else if (unlikely(bar1_state[index].ref_count < 0))
+ panic("dma_unmap_single: Bar1[%u] reference count < 0\n",
+ (int) index);
+ spin_unlock_irqrestore(&bar1_lock, flags);
+done:
+ pr_debug("dma_unmap_single 0x%llx\n", dma_addr);
return;
+#endif
}
diff --git a/arch/mips/cavium-octeon/executive/Makefile b/arch/mips/cavium-octeon/executive/Makefile
index 80d6cb26766..2fd66db6939 100644
--- a/arch/mips/cavium-octeon/executive/Makefile
+++ b/arch/mips/cavium-octeon/executive/Makefile
@@ -11,3 +11,4 @@
obj-y += cvmx-bootmem.o cvmx-l2c.o cvmx-sysinfo.o octeon-model.o
+obj-$(CONFIG_PCI) += cvmx-helper-errata.o cvmx-helper-jtag.o
diff --git a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
index 4f5a08b37cc..25666da17b2 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
@@ -31,6 +31,7 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-spinlock.h>
@@ -97,6 +98,33 @@ void *cvmx_bootmem_alloc(uint64_t size, uint64_t alignment)
return cvmx_bootmem_alloc_range(size, alignment, 0, 0);
}
+void *cvmx_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr,
+ uint64_t max_addr, uint64_t align,
+ char *name)
+{
+ int64_t addr;
+
+ addr = cvmx_bootmem_phy_named_block_alloc(size, min_addr, max_addr,
+ align, name, 0);
+ if (addr >= 0)
+ return cvmx_phys_to_ptr(addr);
+ else
+ return NULL;
+}
+
+void *cvmx_bootmem_alloc_named_address(uint64_t size, uint64_t address,
+ char *name)
+{
+ return cvmx_bootmem_alloc_named_range(size, address, address + size,
+ 0, name);
+}
+
+void *cvmx_bootmem_alloc_named(uint64_t size, uint64_t alignment, char *name)
+{
+ return cvmx_bootmem_alloc_named_range(size, 0, 0, alignment, name);
+}
+EXPORT_SYMBOL(cvmx_bootmem_alloc_named);
+
int cvmx_bootmem_free_named(char *name)
{
return cvmx_bootmem_phy_named_block_free(name, 0);
@@ -106,6 +134,7 @@ struct cvmx_bootmem_named_block_desc *cvmx_bootmem_find_named_block(char *name)
{
return cvmx_bootmem_phy_named_block_find(name, 0);
}
+EXPORT_SYMBOL(cvmx_bootmem_find_named_block);
void cvmx_bootmem_lock(void)
{
@@ -584,3 +613,78 @@ int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags)
cvmx_bootmem_unlock();
return named_block_ptr != NULL; /* 0 on failure, 1 on success */
}
+
+int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr,
+ uint64_t max_addr,
+ uint64_t alignment,
+ char *name,
+ uint32_t flags)
+{
+ int64_t addr_allocated;
+ struct cvmx_bootmem_named_block_desc *named_block_desc_ptr;
+
+#ifdef DEBUG
+ cvmx_dprintf("cvmx_bootmem_phy_named_block_alloc: size: 0x%llx, min: "
+ "0x%llx, max: 0x%llx, align: 0x%llx, name: %s\n",
+ (unsigned long long)size,
+ (unsigned long long)min_addr,
+ (unsigned long long)max_addr,
+ (unsigned long long)alignment,
+ name);
+#endif
+ if (cvmx_bootmem_desc->major_version != 3) {
+ cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: "
+ "%d.%d at addr: %p\n",
+ (int)cvmx_bootmem_desc->major_version,
+ (int)cvmx_bootmem_desc->minor_version,
+ cvmx_bootmem_desc);
+ return -1;
+ }
+
+ /*
+ * Take lock here, as name lookup/block alloc/name add need to
+ * be atomic.
+ */
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_spinlock_lock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
+
+ /* Get pointer to first available named block descriptor */
+ named_block_desc_ptr =
+ cvmx_bootmem_phy_named_block_find(NULL,
+ flags | CVMX_BOOTMEM_FLAG_NO_LOCKING);
+
+ /*
+ * Check to see if name already in use, return error if name
+ * not available or no more room for blocks.
+ */
+ if (cvmx_bootmem_phy_named_block_find(name,
+ flags | CVMX_BOOTMEM_FLAG_NO_LOCKING) || !named_block_desc_ptr) {
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
+ return -1;
+ }
+
+
+ /*
+ * Round size up to mult of minimum alignment bytes We need
+ * the actual size allocated to allow for blocks to be
+ * coallesced when they are freed. The alloc routine does the
+ * same rounding up on all allocations.
+ */
+ size = __ALIGN_MASK(size, (CVMX_BOOTMEM_ALIGNMENT_SIZE - 1));
+
+ addr_allocated = cvmx_bootmem_phy_alloc(size, min_addr, max_addr,
+ alignment,
+ flags | CVMX_BOOTMEM_FLAG_NO_LOCKING);
+ if (addr_allocated >= 0) {
+ named_block_desc_ptr->base_addr = addr_allocated;
+ named_block_desc_ptr->size = size;
+ strncpy(named_block_desc_ptr->name, name,
+ cvmx_bootmem_desc->named_block_name_len);
+ named_block_desc_ptr->name[cvmx_bootmem_desc->named_block_name_len - 1] = 0;
+ }
+
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
+ return addr_allocated;
+}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-errata.c b/arch/mips/cavium-octeon/executive/cvmx-helper-errata.c
new file mode 100644
index 00000000000..868659e64d4
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-errata.c
@@ -0,0 +1,73 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ *
+ * Fixes and workaround for Octeon chip errata. This file
+ * contains functions called by cvmx-helper to workaround known
+ * chip errata. For the most part, code doesn't need to call
+ * these functions directly.
+ *
+ */
+#include <linux/module.h>
+
+#include <asm/octeon/octeon.h>
+
+#include <asm/octeon/cvmx-helper-jtag.h>
+
+/**
+ * Due to errata G-720, the 2nd order CDR circuit on CN52XX pass
+ * 1 doesn't work properly. The following code disables 2nd order
+ * CDR for the specified QLM.
+ *
+ * @qlm: QLM to disable 2nd order CDR for.
+ */
+void __cvmx_helper_errata_qlm_disable_2nd_order_cdr(int qlm)
+{
+ int lane;
+ cvmx_helper_qlm_jtag_init();
+ /* We need to load all four lanes of the QLM, a total of 1072 bits */
+ for (lane = 0; lane < 4; lane++) {
+ /*
+ * Each lane has 268 bits. We need to set
+ * cfg_cdr_incx<67:64> = 3 and cfg_cdr_secord<77> =
+ * 1. All other bits are zero. Bits go in LSB first,
+ * so start off with the zeros for bits <63:0>.
+ */
+ cvmx_helper_qlm_jtag_shift_zeros(qlm, 63 - 0 + 1);
+ /* cfg_cdr_incx<67:64>=3 */
+ cvmx_helper_qlm_jtag_shift(qlm, 67 - 64 + 1, 3);
+ /* Zeros for bits <76:68> */
+ cvmx_helper_qlm_jtag_shift_zeros(qlm, 76 - 68 + 1);
+ /* cfg_cdr_secord<77>=1 */
+ cvmx_helper_qlm_jtag_shift(qlm, 77 - 77 + 1, 1);
+ /* Zeros for bits <267:78> */
+ cvmx_helper_qlm_jtag_shift_zeros(qlm, 267 - 78 + 1);
+ }
+ cvmx_helper_qlm_jtag_update(qlm);
+}
+EXPORT_SYMBOL(__cvmx_helper_errata_qlm_disable_2nd_order_cdr);
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c b/arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c
new file mode 100644
index 00000000000..c1c54890bae
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c
@@ -0,0 +1,144 @@
+
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ *
+ * Helper utilities for qlm_jtag.
+ *
+ */
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-helper-jtag.h>
+
+
+/**
+ * Initialize the internal QLM JTAG logic to allow programming
+ * of the JTAG chain by the cvmx_helper_qlm_jtag_*() functions.
+ * These functions should only be used at the direction of Cavium
+ * Networks. Programming incorrect values into the JTAG chain
+ * can cause chip damage.
+ */
+void cvmx_helper_qlm_jtag_init(void)
+{
+ union cvmx_ciu_qlm_jtgc jtgc;
+ uint32_t clock_div = 0;
+ uint32_t divisor = cvmx_sysinfo_get()->cpu_clock_hz / (25 * 1000000);
+ divisor = (divisor - 1) >> 2;
+ /* Convert the divisor into a power of 2 shift */
+ while (divisor) {
+ clock_div++;
+ divisor = divisor >> 1;
+ }
+
+ /*
+ * Clock divider for QLM JTAG operations. eclk is divided by
+ * 2^(CLK_DIV + 2)
+ */
+ jtgc.u64 = 0;
+ jtgc.s.clk_div = clock_div;
+ jtgc.s.mux_sel = 0;
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX))
+ jtgc.s.bypass = 0x3;
+ else
+ jtgc.s.bypass = 0xf;
+ cvmx_write_csr(CVMX_CIU_QLM_JTGC, jtgc.u64);
+ cvmx_read_csr(CVMX_CIU_QLM_JTGC);
+}
+
+/**
+ * Write up to 32bits into the QLM jtag chain. Bits are shifted
+ * into the MSB and out the LSB, so you should shift in the low
+ * order bits followed by the high order bits. The JTAG chain is
+ * 4 * 268 bits long, or 1072.
+ *
+ * @qlm: QLM to shift value into
+ * @bits: Number of bits to shift in (1-32).
+ * @data: Data to shift in. Bit 0 enters the chain first, followed by
+ * bit 1, etc.
+ *
+ * Returns The low order bits of the JTAG chain that shifted out of the
+ * circle.
+ */
+uint32_t cvmx_helper_qlm_jtag_shift(int qlm, int bits, uint32_t data)
+{
+ union cvmx_ciu_qlm_jtgd jtgd;
+ jtgd.u64 = 0;
+ jtgd.s.shift = 1;
+ jtgd.s.shft_cnt = bits - 1;
+ jtgd.s.shft_reg = data;
+ if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X))
+ jtgd.s.select = 1 << qlm;
+ cvmx_write_csr(CVMX_CIU_QLM_JTGD, jtgd.u64);
+ do {
+ jtgd.u64 = cvmx_read_csr(CVMX_CIU_QLM_JTGD);
+ } while (jtgd.s.shift);
+ return jtgd.s.shft_reg >> (32 - bits);
+}
+
+/**
+ * Shift long sequences of zeros into the QLM JTAG chain. It is
+ * common to need to shift more than 32 bits of zeros into the
+ * chain. This function is a convience wrapper around
+ * cvmx_helper_qlm_jtag_shift() to shift more than 32 bits of
+ * zeros at a time.
+ *
+ * @qlm: QLM to shift zeros into
+ * @bits:
+ */
+void cvmx_helper_qlm_jtag_shift_zeros(int qlm, int bits)
+{
+ while (bits > 0) {
+ int n = bits;
+ if (n > 32)
+ n = 32;
+ cvmx_helper_qlm_jtag_shift(qlm, n, 0);
+ bits -= n;
+ }
+}
+
+/**
+ * Program the QLM JTAG chain into all lanes of the QLM. You must
+ * have already shifted in 268*4, or 1072 bits into the JTAG
+ * chain. Updating invalid values can possibly cause chip damage.
+ *
+ * @qlm: QLM to program
+ */
+void cvmx_helper_qlm_jtag_update(int qlm)
+{
+ union cvmx_ciu_qlm_jtgd jtgd;
+
+ /* Update the new data */
+ jtgd.u64 = 0;
+ jtgd.s.update = 1;
+ if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X))
+ jtgd.s.select = 1 << qlm;
+ cvmx_write_csr(CVMX_CIU_QLM_JTGD, jtgd.u64);
+ do {
+ jtgd.u64 = cvmx_read_csr(CVMX_CIU_QLM_JTGD);
+ } while (jtgd.s.update);
+}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c b/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c
index 4812370706a..e5838890cba 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c
@@ -29,6 +29,7 @@
* This module provides system/board/application information obtained
* by the bootloader.
*/
+#include <linux/module.h>
#include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-spinlock.h>
@@ -69,6 +70,7 @@ struct cvmx_sysinfo *cvmx_sysinfo_get(void)
{
return &(state.sysinfo);
}
+EXPORT_SYMBOL(cvmx_sysinfo_get);
/**
* This function is used in non-simple executive environments (such as
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index d3a0c8154be..384f1842bfb 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -7,9 +7,11 @@
*/
#include <linux/irq.h>
#include <linux/interrupt.h>
-#include <linux/hardirq.h>
+#include <linux/smp.h>
#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-pexp-defs.h>
+#include <asm/octeon/cvmx-npi-defs.h>
DEFINE_RWLOCK(octeon_irq_ciu0_rwlock);
DEFINE_RWLOCK(octeon_irq_ciu1_rwlock);
@@ -499,3 +501,62 @@ asmlinkage void plat_irq_dispatch(void)
}
}
}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int is_irq_enabled_on_cpu(unsigned int irq, unsigned int cpu)
+{
+ unsigned int isset;
+#ifdef CONFIG_SMP
+ int coreid = cpu_logical_map(cpu);
+#else
+ int coreid = cvmx_get_core_num();
+#endif
+ int bit = (irq < OCTEON_IRQ_WDOG0) ?
+ irq - OCTEON_IRQ_WORKQ0 : irq - OCTEON_IRQ_WDOG0;
+ if (irq < 64) {
+ isset = (cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)) &
+ (1ull << bit)) >> bit;
+ } else {
+ isset = (cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)) &
+ (1ull << bit)) >> bit;
+ }
+ return isset;
+}
+
+void fixup_irqs(void)
+{
+ int irq;
+
+ for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++)
+ octeon_irq_core_disable_local(irq);
+
+ for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_GPIO15; irq++) {
+ if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
+ /* ciu irq migrates to next cpu */
+ octeon_irq_chip_ciu0.disable(irq);
+ octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
+ }
+ }
+
+#if 0
+ for (irq = OCTEON_IRQ_MBOX0; irq <= OCTEON_IRQ_MBOX1; irq++)
+ octeon_irq_mailbox_mask(irq);
+#endif
+ for (irq = OCTEON_IRQ_UART0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
+ if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
+ /* ciu irq migrates to next cpu */
+ octeon_irq_chip_ciu0.disable(irq);
+ octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
+ }
+ }
+
+ for (irq = OCTEON_IRQ_UART2; irq <= OCTEON_IRQ_RESERVED135; irq++) {
+ if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
+ /* ciu irq migrates to next cpu */
+ octeon_irq_chip_ciu1.disable(irq);
+ octeon_irq_ciu1_set_affinity(irq, &cpu_online_map);
+ }
+ }
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/mips/cavium-octeon/octeon_boot.h b/arch/mips/cavium-octeon/octeon_boot.h
new file mode 100644
index 00000000000..0f7f84accf9
--- /dev/null
+++ b/arch/mips/cavium-octeon/octeon_boot.h
@@ -0,0 +1,70 @@
+/*
+ * (C) Copyright 2004, 2005 Cavium Networks
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __OCTEON_BOOT_H__
+#define __OCTEON_BOOT_H__
+
+#include <linux/types.h>
+
+struct boot_init_vector {
+ uint32_t stack_addr;
+ uint32_t code_addr;
+ uint32_t app_start_func_addr;
+ uint32_t k0_val;
+ uint32_t flags;
+ uint32_t boot_info_addr;
+ uint32_t pad;
+ uint32_t pad2;
+};
+
+/* similar to bootloader's linux_app_boot_info but without global data */
+struct linux_app_boot_info {
+ uint32_t labi_signature;
+ uint32_t start_core0_addr;
+ uint32_t avail_coremask;
+ uint32_t pci_console_active;
+ uint32_t icache_prefetch_disable;
+ uint32_t InitTLBStart_addr;
+ uint32_t start_app_addr;
+ uint32_t cur_exception_base;
+ uint32_t no_mark_private_data;
+ uint32_t compact_flash_common_base_addr;
+ uint32_t compact_flash_attribute_base_addr;
+ uint32_t led_display_base_addr;
+};
+
+/* If not to copy a lot of bootloader's structures
+ here is only offset of requested member */
+#define AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK 0x765c
+
+/* hardcoded in bootloader */
+#define LABI_ADDR_IN_BOOTLOADER 0x700
+
+#define LINUX_APP_BOOT_BLOCK_NAME "linux-app-boot"
+
+#define LABI_SIGNATURE 0xAABBCCDD
+
+/* from uboot-headers/octeon_mem_map.h */
+#define EXCEPTION_BASE_INCR (4 * 1024)
+ /* Increment size for exception base addresses (4k minimum) */
+#define EXCEPTION_BASE_BASE 0
+#define BOOTLOADER_PRIV_DATA_BASE (EXCEPTION_BASE_BASE + 0x800)
+#define BOOTLOADER_BOOT_VECTOR (BOOTLOADER_PRIV_DATA_BASE)
+
+#endif /* __OCTEON_BOOT_H__ */
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index 5f4e49ba471..da559249cc2 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -13,6 +13,7 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/serial.h>
+#include <linux/smp.h>
#include <linux/types.h>
#include <linux/string.h> /* for memset */
#include <linux/tty.h>
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 24e0ad63980..32d51a31dc4 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -5,6 +5,7 @@
*
* Copyright (C) 2004-2008 Cavium Networks
*/
+#include <linux/cpu.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/smp.h>
@@ -19,10 +20,16 @@
#include <asm/octeon/octeon.h>
+#include "octeon_boot.h"
+
volatile unsigned long octeon_processor_boot = 0xff;
volatile unsigned long octeon_processor_sp;
volatile unsigned long octeon_processor_gp;
+#ifdef CONFIG_HOTPLUG_CPU
+static unsigned int InitTLBStart_addr;
+#endif
+
static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
{
const int coreid = cvmx_get_core_num();
@@ -67,8 +74,28 @@ static inline void octeon_send_ipi_mask(cpumask_t mask, unsigned int action)
}
/**
- * Detect available CPUs, populate phys_cpu_present_map
+ * Detect available CPUs, populate cpu_possible_map
*/
+static void octeon_smp_hotplug_setup(void)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+ uint32_t labi_signature;
+
+ labi_signature =
+ cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ LABI_ADDR_IN_BOOTLOADER +
+ offsetof(struct linux_app_boot_info,
+ labi_signature)));
+ if (labi_signature != LABI_SIGNATURE)
+ pr_err("The bootloader version on this board is incorrect\n");
+ InitTLBStart_addr =
+ cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ LABI_ADDR_IN_BOOTLOADER +
+ offsetof(struct linux_app_boot_info,
+ InitTLBStart_addr)));
+#endif
+}
+
static void octeon_smp_setup(void)
{
const int coreid = cvmx_get_core_num();
@@ -91,6 +118,9 @@ static void octeon_smp_setup(void)
cpus++;
}
}
+ cpu_present_map = cpu_possible_map;
+
+ octeon_smp_hotplug_setup();
}
/**
@@ -128,6 +158,17 @@ static void octeon_init_secondary(void)
const int coreid = cvmx_get_core_num();
union cvmx_ciu_intx_sum0 interrupt_enable;
+#ifdef CONFIG_HOTPLUG_CPU
+ unsigned int cur_exception_base;
+
+ cur_exception_base = cvmx_read64_uint32(
+ CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ LABI_ADDR_IN_BOOTLOADER +
+ offsetof(struct linux_app_boot_info,
+ cur_exception_base)));
+ /* cur_exception_base is incremented in bootloader after setting */
+ write_c0_ebase((unsigned int)(cur_exception_base - EXCEPTION_BASE_INCR));
+#endif
octeon_check_cpu_bist();
octeon_init_cvmcount();
/*
@@ -153,11 +194,11 @@ static void octeon_init_secondary(void)
void octeon_prepare_cpus(unsigned int max_cpus)
{
cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffffffff);
- if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_SHARED,
+ if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_DISABLED,
"mailbox0", mailbox_interrupt)) {
panic("Cannot request_irq(OCTEON_IRQ_MBOX0)\n");
}
- if (request_irq(OCTEON_IRQ_MBOX1, mailbox_interrupt, IRQF_SHARED,
+ if (request_irq(OCTEON_IRQ_MBOX1, mailbox_interrupt, IRQF_DISABLED,
"mailbox1", mailbox_interrupt)) {
panic("Cannot request_irq(OCTEON_IRQ_MBOX1)\n");
}
@@ -199,6 +240,193 @@ static void octeon_cpus_done(void)
#endif
}
+#ifdef CONFIG_HOTPLUG_CPU
+
+/* State of each CPU. */
+DEFINE_PER_CPU(int, cpu_state);
+
+extern void fixup_irqs(void);
+
+static DEFINE_SPINLOCK(smp_reserve_lock);
+
+static int octeon_cpu_disable(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ if (cpu == 0)
+ return -EBUSY;
+
+ spin_lock(&smp_reserve_lock);
+
+ cpu_clear(cpu, cpu_online_map);
+ cpu_clear(cpu, cpu_callin_map);
+ local_irq_disable();
+ fixup_irqs();
+ local_irq_enable();
+
+ flush_cache_all();
+ local_flush_tlb_all();
+
+ spin_unlock(&smp_reserve_lock);
+
+ return 0;
+}
+
+static void octeon_cpu_die(unsigned int cpu)
+{
+ int coreid = cpu_logical_map(cpu);
+ uint32_t avail_coremask;
+ struct cvmx_bootmem_named_block_desc *block_desc;
+
+#ifdef CONFIG_CAVIUM_OCTEON_WATCHDOG
+ /* Disable the watchdog */
+ cvmx_ciu_wdogx_t ciu_wdog;
+ ciu_wdog.u64 = cvmx_read_csr(CVMX_CIU_WDOGX(cpu));
+ ciu_wdog.s.mode = 0;
+ cvmx_write_csr(CVMX_CIU_WDOGX(cpu), ciu_wdog.u64);
+#endif
+
+ while (per_cpu(cpu_state, cpu) != CPU_DEAD)
+ cpu_relax();
+
+ /*
+ * This is a bit complicated strategics of getting/settig available
+ * cores mask, copied from bootloader
+ */
+ /* LINUX_APP_BOOT_BLOCK is initialized in bootoct binary */
+ block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);
+
+ if (!block_desc) {
+ avail_coremask =
+ cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ LABI_ADDR_IN_BOOTLOADER +
+ offsetof
+ (struct linux_app_boot_info,
+ avail_coremask)));
+ } else { /* alternative, already initialized */
+ avail_coremask =
+ cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ block_desc->base_addr +
+ AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK));
+ }
+
+ avail_coremask |= 1 << coreid;
+
+ /* Setting avail_coremask for bootoct binary */
+ if (!block_desc) {
+ cvmx_write64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ LABI_ADDR_IN_BOOTLOADER +
+ offsetof(struct linux_app_boot_info,
+ avail_coremask)),
+ avail_coremask);
+ } else {
+ cvmx_write64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ block_desc->base_addr +
+ AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK),
+ avail_coremask);
+ }
+
+ pr_info("Reset core %d. Available Coremask = %x \n", coreid,
+ avail_coremask);
+ cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
+ cvmx_write_csr(CVMX_CIU_PP_RST, 0);
+}
+
+void play_dead(void)
+{
+ int coreid = cvmx_get_core_num();
+
+ idle_task_exit();
+ octeon_processor_boot = 0xff;
+ per_cpu(cpu_state, coreid) = CPU_DEAD;
+
+ while (1) /* core will be reset here */
+ ;
+}
+
+extern void kernel_entry(unsigned long arg1, ...);
+
+static void start_after_reset(void)
+{
+ kernel_entry(0, 0, 0); /* set a2 = 0 for secondary core */
+}
+
+int octeon_update_boot_vector(unsigned int cpu)
+{
+
+ int coreid = cpu_logical_map(cpu);
+ unsigned int avail_coremask;
+ struct cvmx_bootmem_named_block_desc *block_desc;
+ struct boot_init_vector *boot_vect =
+ (struct boot_init_vector *) cvmx_phys_to_ptr(0x0 +
+ BOOTLOADER_BOOT_VECTOR);
+
+ block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);
+
+ if (!block_desc) {
+ avail_coremask =
+ cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ LABI_ADDR_IN_BOOTLOADER +
+ offsetof(struct linux_app_boot_info,
+ avail_coremask)));
+ } else { /* alternative, already initialized */
+ avail_coremask =
+ cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ block_desc->base_addr +
+ AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK));
+ }
+
+ if (!(avail_coremask & (1 << coreid))) {
+ /* core not available, assume, that catched by simple-executive */
+ cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
+ cvmx_write_csr(CVMX_CIU_PP_RST, 0);
+ }
+
+ boot_vect[coreid].app_start_func_addr =
+ (uint32_t) (unsigned long) start_after_reset;
+ boot_vect[coreid].code_addr = InitTLBStart_addr;
+
+ CVMX_SYNC;
+
+ cvmx_write_csr(CVMX_CIU_NMI, (1 << coreid) & avail_coremask);
+
+ return 0;
+}
+
+static int __cpuinit octeon_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ octeon_update_boot_vector(cpu);
+ break;
+ case CPU_ONLINE:
+ pr_info("Cpu %d online\n", cpu);
+ break;
+ case CPU_DEAD:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata octeon_cpu_notifier = {
+ .notifier_call = octeon_cpu_callback,
+};
+
+static int __cpuinit register_cavium_notifier(void)
+{
+ register_hotcpu_notifier(&octeon_cpu_notifier);
+
+ return 0;
+}
+
+late_initcall(register_cavium_notifier);
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
struct plat_smp_ops octeon_smp_ops = {
.send_ipi_single = octeon_send_ipi_single,
.send_ipi_mask = octeon_send_ipi_mask,
@@ -208,4 +436,8 @@ struct plat_smp_ops octeon_smp_ops = {
.boot_secondary = octeon_boot_secondary,
.smp_setup = octeon_smp_setup,
.prepare_cpus = octeon_prepare_cpus,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_disable = octeon_cpu_disable,
+ .cpu_die = octeon_cpu_die,
+#endif
};