blob: f1eea5df54c69b7bfc1fdc1d80cdf5f54b75b981 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090062#include <linux/slab.h>
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -030063#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070064#include <linux/ioport.h>
65#include <linux/delay.h>
66#include <linux/pci.h>
67#include <linux/wait.h>
68#include <linux/spinlock.h>
69#include <linux/sched.h>
70#include <linux/interrupt.h>
71#include <linux/blkdev.h>
72#include <linux/firmware.h>
73#include <linux/module.h>
74#include <linux/moduleparam.h>
Brian King35a39692006-09-25 12:39:20 -050075#include <linux/libata.h>
Brian King0ce3a7e2008-07-11 13:37:50 -050076#include <linux/hdreg.h>
Wayne Boyerf72919e2010-02-19 13:24:21 -080077#include <linux/reboot.h>
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -080078#include <linux/stringify.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <asm/io.h>
80#include <asm/irq.h>
81#include <asm/processor.h>
82#include <scsi/scsi.h>
83#include <scsi/scsi_host.h>
84#include <scsi/scsi_tcq.h>
85#include <scsi/scsi_eh.h>
86#include <scsi/scsi_cmnd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070087#include "ipr.h"
88
89/*
90 * Global Data
91 */
Denis Chengb7d68ca2007-12-13 16:14:27 -080092static LIST_HEAD(ipr_ioa_head);
Linus Torvalds1da177e2005-04-16 15:20:36 -070093static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94static unsigned int ipr_max_speed = 1;
95static int ipr_testmode = 0;
96static unsigned int ipr_fastfail = 0;
Brian King5469cb52007-03-29 12:42:40 -050097static unsigned int ipr_transop_timeout = 0;
brking@us.ibm.comd3c74872005-11-01 17:01:34 -060098static unsigned int ipr_debug = 0;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -080099static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
Brian Kingac09c342007-04-26 16:00:16 -0500100static unsigned int ipr_dual_ioa_raid = 1;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600101static unsigned int ipr_number_of_msix = 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102static DEFINE_SPINLOCK(ipr_driver_lock);
103
104/* This table describes the differences between DMA controller chips */
105static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
Brian King60e74862006-11-21 10:28:10 -0600106 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 .mailbox = 0x0042C,
Brian King89aad422012-03-14 21:20:10 -0500108 .max_cmds = 100,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 .cache_line_size = 0x20,
Brian King7dd21302012-03-14 21:20:08 -0500110 .clear_isr = 1,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 {
112 .set_interrupt_mask_reg = 0x0022C,
113 .clr_interrupt_mask_reg = 0x00230,
Wayne Boyer214777b2010-02-19 13:24:26 -0800114 .clr_interrupt_mask_reg32 = 0x00230,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 .sense_interrupt_mask_reg = 0x0022C,
Wayne Boyer214777b2010-02-19 13:24:26 -0800116 .sense_interrupt_mask_reg32 = 0x0022C,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 .clr_interrupt_reg = 0x00228,
Wayne Boyer214777b2010-02-19 13:24:26 -0800118 .clr_interrupt_reg32 = 0x00228,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 .sense_interrupt_reg = 0x00224,
Wayne Boyer214777b2010-02-19 13:24:26 -0800120 .sense_interrupt_reg32 = 0x00224,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 .ioarrin_reg = 0x00404,
122 .sense_uproc_interrupt_reg = 0x00214,
Wayne Boyer214777b2010-02-19 13:24:26 -0800123 .sense_uproc_interrupt_reg32 = 0x00214,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 .set_uproc_interrupt_reg = 0x00214,
Wayne Boyer214777b2010-02-19 13:24:26 -0800125 .set_uproc_interrupt_reg32 = 0x00214,
126 .clr_uproc_interrupt_reg = 0x00218,
127 .clr_uproc_interrupt_reg32 = 0x00218
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 }
129 },
130 { /* Snipe and Scamp */
131 .mailbox = 0x0052C,
Brian King89aad422012-03-14 21:20:10 -0500132 .max_cmds = 100,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 .cache_line_size = 0x20,
Brian King7dd21302012-03-14 21:20:08 -0500134 .clear_isr = 1,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 {
136 .set_interrupt_mask_reg = 0x00288,
137 .clr_interrupt_mask_reg = 0x0028C,
Wayne Boyer214777b2010-02-19 13:24:26 -0800138 .clr_interrupt_mask_reg32 = 0x0028C,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 .sense_interrupt_mask_reg = 0x00288,
Wayne Boyer214777b2010-02-19 13:24:26 -0800140 .sense_interrupt_mask_reg32 = 0x00288,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 .clr_interrupt_reg = 0x00284,
Wayne Boyer214777b2010-02-19 13:24:26 -0800142 .clr_interrupt_reg32 = 0x00284,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 .sense_interrupt_reg = 0x00280,
Wayne Boyer214777b2010-02-19 13:24:26 -0800144 .sense_interrupt_reg32 = 0x00280,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 .ioarrin_reg = 0x00504,
146 .sense_uproc_interrupt_reg = 0x00290,
Wayne Boyer214777b2010-02-19 13:24:26 -0800147 .sense_uproc_interrupt_reg32 = 0x00290,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 .set_uproc_interrupt_reg = 0x00290,
Wayne Boyer214777b2010-02-19 13:24:26 -0800149 .set_uproc_interrupt_reg32 = 0x00290,
150 .clr_uproc_interrupt_reg = 0x00294,
151 .clr_uproc_interrupt_reg32 = 0x00294
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 }
153 },
Wayne Boyera74c1632010-02-19 13:23:51 -0800154 { /* CRoC */
Wayne Boyer110def82010-11-04 09:36:16 -0700155 .mailbox = 0x00044,
Brian King89aad422012-03-14 21:20:10 -0500156 .max_cmds = 1000,
Wayne Boyera74c1632010-02-19 13:23:51 -0800157 .cache_line_size = 0x20,
Brian King7dd21302012-03-14 21:20:08 -0500158 .clear_isr = 0,
Wayne Boyera74c1632010-02-19 13:23:51 -0800159 {
160 .set_interrupt_mask_reg = 0x00010,
161 .clr_interrupt_mask_reg = 0x00018,
Wayne Boyer214777b2010-02-19 13:24:26 -0800162 .clr_interrupt_mask_reg32 = 0x0001C,
Wayne Boyera74c1632010-02-19 13:23:51 -0800163 .sense_interrupt_mask_reg = 0x00010,
Wayne Boyer214777b2010-02-19 13:24:26 -0800164 .sense_interrupt_mask_reg32 = 0x00014,
Wayne Boyera74c1632010-02-19 13:23:51 -0800165 .clr_interrupt_reg = 0x00008,
Wayne Boyer214777b2010-02-19 13:24:26 -0800166 .clr_interrupt_reg32 = 0x0000C,
Wayne Boyera74c1632010-02-19 13:23:51 -0800167 .sense_interrupt_reg = 0x00000,
Wayne Boyer214777b2010-02-19 13:24:26 -0800168 .sense_interrupt_reg32 = 0x00004,
Wayne Boyera74c1632010-02-19 13:23:51 -0800169 .ioarrin_reg = 0x00070,
170 .sense_uproc_interrupt_reg = 0x00020,
Wayne Boyer214777b2010-02-19 13:24:26 -0800171 .sense_uproc_interrupt_reg32 = 0x00024,
Wayne Boyera74c1632010-02-19 13:23:51 -0800172 .set_uproc_interrupt_reg = 0x00020,
Wayne Boyer214777b2010-02-19 13:24:26 -0800173 .set_uproc_interrupt_reg32 = 0x00024,
Wayne Boyerdcbad002010-02-19 13:24:14 -0800174 .clr_uproc_interrupt_reg = 0x00028,
Wayne Boyer214777b2010-02-19 13:24:26 -0800175 .clr_uproc_interrupt_reg32 = 0x0002C,
176 .init_feedback_reg = 0x0005C,
Wayne Boyerdcbad002010-02-19 13:24:14 -0800177 .dump_addr_reg = 0x00064,
Wayne Boyer8701f182010-06-04 10:26:50 -0700178 .dump_data_reg = 0x00068,
179 .endian_swap_reg = 0x00084
Wayne Boyera74c1632010-02-19 13:23:51 -0800180 }
181 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182};
183
184static const struct ipr_chip_t ipr_chip[] = {
Wayne Boyercb237ef2010-06-17 11:51:40 -0700185 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
186 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
187 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
188 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
189 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
191 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
Wayne Boyercd9b3d02012-02-23 11:54:55 -0800193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194};
195
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -0300196static int ipr_max_bus_speeds[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
198};
199
200MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
201MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
202module_param_named(max_speed, ipr_max_speed, uint, 0);
203MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
204module_param_named(log_level, ipr_log_level, uint, 0);
205MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
206module_param_named(testmode, ipr_testmode, int, 0);
207MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
Wayne Boyer2cf22be2009-02-24 11:36:00 -0800208module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
210module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
211MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
Wayne Boyer2cf22be2009-02-24 11:36:00 -0800212module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
brking@us.ibm.comd3c74872005-11-01 17:01:34 -0600213MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
Brian Kingac09c342007-04-26 16:00:16 -0500214module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
215MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -0800216module_param_named(max_devs, ipr_max_devs, int, 0);
217MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
218 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600219module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
220MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 5). (default:2)");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221MODULE_LICENSE("GPL");
222MODULE_VERSION(IPR_DRIVER_VERSION);
223
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224/* A constant array of IOASCs/URCs/Error Messages */
225static const
226struct ipr_error_table_t ipr_error_table[] = {
Brian King933916f2007-03-29 12:43:30 -0500227 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 "8155: An unknown error was received"},
229 {0x00330000, 0, 0,
230 "Soft underlength error"},
231 {0x005A0000, 0, 0,
232 "Command to be cancelled not found"},
233 {0x00808000, 0, 0,
234 "Qualified success"},
Brian King933916f2007-03-29 12:43:30 -0500235 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 "FFFE: Soft device bus error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500237 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500238 "4101: Soft device bus fabric error"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800239 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
240 "FFFC: Logical block guard error recovered by the device"},
241 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
242 "FFFC: Logical block reference tag error recovered by the device"},
243 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
244 "4171: Recovered scatter list tag / sequence number error"},
245 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
246 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
247 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
248 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
249 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
250 "FFFD: Recovered logical block reference tag error detected by the IOA"},
251 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
252 "FFFD: Logical block guard error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500253 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 "FFF9: Device sector reassign successful"},
Brian King933916f2007-03-29 12:43:30 -0500255 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 "FFF7: Media error recovered by device rewrite procedures"},
Brian King933916f2007-03-29 12:43:30 -0500257 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 "7001: IOA sector reassignment successful"},
Brian King933916f2007-03-29 12:43:30 -0500259 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 "FFF9: Soft media error. Sector reassignment recommended"},
Brian King933916f2007-03-29 12:43:30 -0500261 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 "FFF7: Media error recovered by IOA rewrite procedures"},
Brian King933916f2007-03-29 12:43:30 -0500263 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 "FF3D: Soft PCI bus error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500265 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 "FFF6: Device hardware error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500267 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 "FFF6: Device hardware error recovered by the device"},
Brian King933916f2007-03-29 12:43:30 -0500269 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 "FF3D: Soft IOA error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500271 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 "FFFA: Undefined device response recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500273 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 "FFF6: Device bus error, message or command phase"},
Brian King933916f2007-03-29 12:43:30 -0500275 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King35a39692006-09-25 12:39:20 -0500276 "FFFE: Task Management Function failed"},
Brian King933916f2007-03-29 12:43:30 -0500277 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 "FFF6: Failure prediction threshold exceeded"},
Brian King933916f2007-03-29 12:43:30 -0500279 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 "8009: Impending cache battery pack failure"},
281 {0x02040400, 0, 0,
282 "34FF: Disk device format in progress"},
Brian King65f56472007-04-26 16:00:12 -0500283 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
284 "9070: IOA requested reset"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 {0x023F0000, 0, 0,
286 "Synchronization required"},
287 {0x024E0000, 0, 0,
288 "No ready, IOA shutdown"},
289 {0x025A0000, 0, 0,
290 "Not ready, IOA has been shutdown"},
Brian King933916f2007-03-29 12:43:30 -0500291 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 "3020: Storage subsystem configuration error"},
293 {0x03110B00, 0, 0,
294 "FFF5: Medium error, data unreadable, recommend reassign"},
295 {0x03110C00, 0, 0,
296 "7000: Medium error, data unreadable, do not reassign"},
Brian King933916f2007-03-29 12:43:30 -0500297 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 "FFF3: Disk media format bad"},
Brian King933916f2007-03-29 12:43:30 -0500299 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 "3002: Addressed device failed to respond to selection"},
Brian King933916f2007-03-29 12:43:30 -0500301 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 "3100: Device bus error"},
Brian King933916f2007-03-29 12:43:30 -0500303 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 "3109: IOA timed out a device command"},
305 {0x04088000, 0, 0,
306 "3120: SCSI bus is not operational"},
Brian King933916f2007-03-29 12:43:30 -0500307 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500308 "4100: Hard device bus fabric error"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800309 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
310 "310C: Logical block guard error detected by the device"},
311 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
312 "310C: Logical block reference tag error detected by the device"},
313 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
314 "4170: Scatter list tag / sequence number error"},
315 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
316 "8150: Logical block CRC error on IOA to Host transfer"},
317 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
318 "4170: Logical block sequence number error on IOA to Host transfer"},
319 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
320 "310D: Logical block reference tag error detected by the IOA"},
321 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
322 "310D: Logical block guard error detected by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500323 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 "9000: IOA reserved area data check"},
Brian King933916f2007-03-29 12:43:30 -0500325 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 "9001: IOA reserved area invalid data pattern"},
Brian King933916f2007-03-29 12:43:30 -0500327 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 "9002: IOA reserved area LRC error"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800329 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
330 "Hardware Error, IOA metadata access error"},
Brian King933916f2007-03-29 12:43:30 -0500331 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 "102E: Out of alternate sectors for disk storage"},
Brian King933916f2007-03-29 12:43:30 -0500333 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 "FFF4: Data transfer underlength error"},
Brian King933916f2007-03-29 12:43:30 -0500335 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 "FFF4: Data transfer overlength error"},
Brian King933916f2007-03-29 12:43:30 -0500337 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 "3400: Logical unit failure"},
Brian King933916f2007-03-29 12:43:30 -0500339 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 "FFF4: Device microcode is corrupt"},
Brian King933916f2007-03-29 12:43:30 -0500341 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 "8150: PCI bus error"},
343 {0x04430000, 1, 0,
344 "Unsupported device bus message received"},
Brian King933916f2007-03-29 12:43:30 -0500345 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 "FFF4: Disk device problem"},
Brian King933916f2007-03-29 12:43:30 -0500347 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 "8150: Permanent IOA failure"},
Brian King933916f2007-03-29 12:43:30 -0500349 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 "3010: Disk device returned wrong response to IOA"},
Brian King933916f2007-03-29 12:43:30 -0500351 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 "8151: IOA microcode error"},
353 {0x04448500, 0, 0,
354 "Device bus status error"},
Brian King933916f2007-03-29 12:43:30 -0500355 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 "8157: IOA error requiring IOA reset to recover"},
Brian King35a39692006-09-25 12:39:20 -0500357 {0x04448700, 0, 0,
358 "ATA device status error"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 {0x04490000, 0, 0,
360 "Message reject received from the device"},
Brian King933916f2007-03-29 12:43:30 -0500361 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 "8008: A permanent cache battery pack failure occurred"},
Brian King933916f2007-03-29 12:43:30 -0500363 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 "9090: Disk unit has been modified after the last known status"},
Brian King933916f2007-03-29 12:43:30 -0500365 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 "9081: IOA detected device error"},
Brian King933916f2007-03-29 12:43:30 -0500367 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 "9082: IOA detected device error"},
Brian King933916f2007-03-29 12:43:30 -0500369 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 "3110: Device bus error, message or command phase"},
Brian King933916f2007-03-29 12:43:30 -0500371 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
Brian King35a39692006-09-25 12:39:20 -0500372 "3110: SAS Command / Task Management Function failed"},
Brian King933916f2007-03-29 12:43:30 -0500373 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 "9091: Incorrect hardware configuration change has been detected"},
Brian King933916f2007-03-29 12:43:30 -0500375 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600376 "9073: Invalid multi-adapter configuration"},
Brian King933916f2007-03-29 12:43:30 -0500377 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500378 "4010: Incorrect connection between cascaded expanders"},
Brian King933916f2007-03-29 12:43:30 -0500379 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500380 "4020: Connections exceed IOA design limits"},
Brian King933916f2007-03-29 12:43:30 -0500381 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500382 "4030: Incorrect multipath connection"},
Brian King933916f2007-03-29 12:43:30 -0500383 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500384 "4110: Unsupported enclosure function"},
Brian King933916f2007-03-29 12:43:30 -0500385 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 "FFF4: Command to logical unit failed"},
387 {0x05240000, 1, 0,
388 "Illegal request, invalid request type or request packet"},
389 {0x05250000, 0, 0,
390 "Illegal request, invalid resource handle"},
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600391 {0x05258000, 0, 0,
392 "Illegal request, commands not allowed to this device"},
393 {0x05258100, 0, 0,
394 "Illegal request, command not allowed to a secondary adapter"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800395 {0x05258200, 0, 0,
396 "Illegal request, command not allowed to a non-optimized resource"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 {0x05260000, 0, 0,
398 "Illegal request, invalid field in parameter list"},
399 {0x05260100, 0, 0,
400 "Illegal request, parameter not supported"},
401 {0x05260200, 0, 0,
402 "Illegal request, parameter value invalid"},
403 {0x052C0000, 0, 0,
404 "Illegal request, command sequence error"},
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600405 {0x052C8000, 1, 0,
406 "Illegal request, dual adapter support not enabled"},
Brian King933916f2007-03-29 12:43:30 -0500407 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 "9031: Array protection temporarily suspended, protection resuming"},
Brian King933916f2007-03-29 12:43:30 -0500409 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 "9040: Array protection temporarily suspended, protection resuming"},
Brian King933916f2007-03-29 12:43:30 -0500411 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500412 "3140: Device bus not ready to ready transition"},
Brian King933916f2007-03-29 12:43:30 -0500413 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 "FFFB: SCSI bus was reset"},
415 {0x06290500, 0, 0,
416 "FFFE: SCSI bus transition to single ended"},
417 {0x06290600, 0, 0,
418 "FFFE: SCSI bus transition to LVD"},
Brian King933916f2007-03-29 12:43:30 -0500419 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 "FFFB: SCSI bus was reset by another initiator"},
Brian King933916f2007-03-29 12:43:30 -0500421 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 "3029: A device replacement has occurred"},
Brian King933916f2007-03-29 12:43:30 -0500423 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 "9051: IOA cache data exists for a missing or failed device"},
Brian King933916f2007-03-29 12:43:30 -0500425 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600426 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
Brian King933916f2007-03-29 12:43:30 -0500427 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 "9025: Disk unit is not supported at its physical location"},
Brian King933916f2007-03-29 12:43:30 -0500429 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 "3020: IOA detected a SCSI bus configuration error"},
Brian King933916f2007-03-29 12:43:30 -0500431 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 "3150: SCSI bus configuration error"},
Brian King933916f2007-03-29 12:43:30 -0500433 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600434 "9074: Asymmetric advanced function disk configuration"},
Brian King933916f2007-03-29 12:43:30 -0500435 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500436 "4040: Incomplete multipath connection between IOA and enclosure"},
Brian King933916f2007-03-29 12:43:30 -0500437 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500438 "4041: Incomplete multipath connection between enclosure and device"},
Brian King933916f2007-03-29 12:43:30 -0500439 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500440 "9075: Incomplete multipath connection between IOA and remote IOA"},
Brian King933916f2007-03-29 12:43:30 -0500441 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500442 "9076: Configuration error, missing remote IOA"},
Brian King933916f2007-03-29 12:43:30 -0500443 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500444 "4050: Enclosure does not support a required multipath function"},
Wayne Boyerb75424f2009-01-28 08:24:50 -0800445 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
446 "4070: Logically bad block written on device"},
Brian King933916f2007-03-29 12:43:30 -0500447 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 "9041: Array protection temporarily suspended"},
Brian King933916f2007-03-29 12:43:30 -0500449 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 "9042: Corrupt array parity detected on specified device"},
Brian King933916f2007-03-29 12:43:30 -0500451 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 "9030: Array no longer protected due to missing or failed disk unit"},
Brian King933916f2007-03-29 12:43:30 -0500453 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600454 "9071: Link operational transition"},
Brian King933916f2007-03-29 12:43:30 -0500455 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600456 "9072: Link not operational transition"},
Brian King933916f2007-03-29 12:43:30 -0500457 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 "9032: Array exposed but still protected"},
Brian Kinge4353402007-03-29 12:43:37 -0500459 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
460 "70DD: Device forced failed by disrupt device command"},
Brian King933916f2007-03-29 12:43:30 -0500461 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500462 "4061: Multipath redundancy level got better"},
Brian King933916f2007-03-29 12:43:30 -0500463 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500464 "4060: Multipath redundancy level got worse"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 {0x07270000, 0, 0,
466 "Failure due to other device"},
Brian King933916f2007-03-29 12:43:30 -0500467 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 "9008: IOA does not support functions expected by devices"},
Brian King933916f2007-03-29 12:43:30 -0500469 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 "9010: Cache data associated with attached devices cannot be found"},
Brian King933916f2007-03-29 12:43:30 -0500471 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 "9011: Cache data belongs to devices other than those attached"},
Brian King933916f2007-03-29 12:43:30 -0500473 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 "9020: Array missing 2 or more devices with only 1 device present"},
Brian King933916f2007-03-29 12:43:30 -0500475 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 "9021: Array missing 2 or more devices with 2 or more devices present"},
Brian King933916f2007-03-29 12:43:30 -0500477 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 "9022: Exposed array is missing a required device"},
Brian King933916f2007-03-29 12:43:30 -0500479 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 "9023: Array member(s) not at required physical locations"},
Brian King933916f2007-03-29 12:43:30 -0500481 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482 "9024: Array not functional due to present hardware configuration"},
Brian King933916f2007-03-29 12:43:30 -0500483 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 "9026: Array not functional due to present hardware configuration"},
Brian King933916f2007-03-29 12:43:30 -0500485 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 "9027: Array is missing a device and parity is out of sync"},
Brian King933916f2007-03-29 12:43:30 -0500487 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 "9028: Maximum number of arrays already exist"},
Brian King933916f2007-03-29 12:43:30 -0500489 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 "9050: Required cache data cannot be located for a disk unit"},
Brian King933916f2007-03-29 12:43:30 -0500491 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 "9052: Cache data exists for a device that has been modified"},
Brian King933916f2007-03-29 12:43:30 -0500493 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 "9054: IOA resources not available due to previous problems"},
Brian King933916f2007-03-29 12:43:30 -0500495 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 "9092: Disk unit requires initialization before use"},
Brian King933916f2007-03-29 12:43:30 -0500497 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 "9029: Incorrect hardware configuration change has been detected"},
Brian King933916f2007-03-29 12:43:30 -0500499 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 "9060: One or more disk pairs are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500501 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 "9061: One or more disks are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500503 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 "9062: One or more disks are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500505 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 "9063: Maximum number of functional arrays has been exceeded"},
507 {0x0B260000, 0, 0,
508 "Aborted command, invalid descriptor"},
509 {0x0B5A0000, 0, 0,
510 "Command terminated by host"}
511};
512
513static const struct ipr_ses_table_entry ipr_ses_table[] = {
514 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
515 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
516 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
517 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
518 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
519 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
520 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
521 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
522 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
523 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
524 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
525 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
526 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
527};
528
529/*
530 * Function Prototypes
531 */
532static int ipr_reset_alert(struct ipr_cmnd *);
533static void ipr_process_ccn(struct ipr_cmnd *);
534static void ipr_process_error(struct ipr_cmnd *);
535static void ipr_reset_ioa_job(struct ipr_cmnd *);
536static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
537 enum ipr_shutdown_type);
538
539#ifdef CONFIG_SCSI_IPR_TRACE
540/**
541 * ipr_trc_hook - Add a trace entry to the driver trace
542 * @ipr_cmd: ipr command struct
543 * @type: trace type
544 * @add_data: additional data
545 *
546 * Return value:
547 * none
548 **/
549static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
550 u8 type, u32 add_data)
551{
552 struct ipr_trace_entry *trace_entry;
553 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
554
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600555 trace_entry = &ioa_cfg->trace[atomic_add_return
556 (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 trace_entry->time = jiffies;
558 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
559 trace_entry->type = type;
Wayne Boyera32c0552010-02-19 13:23:36 -0800560 if (ipr_cmd->ioa_cfg->sis64)
561 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
562 else
563 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
Brian King35a39692006-09-25 12:39:20 -0500564 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
566 trace_entry->u.add_data = add_data;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600567 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568}
569#else
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -0300570#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571#endif
572
573/**
Brian King172cd6e2012-07-17 08:14:40 -0500574 * ipr_lock_and_done - Acquire lock and complete command
575 * @ipr_cmd: ipr command struct
576 *
577 * Return value:
578 * none
579 **/
580static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
581{
582 unsigned long lock_flags;
583 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
584
585 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
586 ipr_cmd->done(ipr_cmd);
587 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
588}
589
590/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
592 * @ipr_cmd: ipr command struct
593 *
594 * Return value:
595 * none
596 **/
597static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
598{
599 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -0700600 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
601 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
Wayne Boyera32c0552010-02-19 13:23:36 -0800602 dma_addr_t dma_addr = ipr_cmd->dma_addr;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600603 int hrrq_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600605 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600607 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
Wayne Boyera32c0552010-02-19 13:23:36 -0800608 ioarcb->data_transfer_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 ioarcb->read_data_transfer_length = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800610 ioarcb->ioadl_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 ioarcb->read_ioadl_len = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800612
Wayne Boyer96d21f02010-05-10 09:13:27 -0700613 if (ipr_cmd->ioa_cfg->sis64) {
Wayne Boyera32c0552010-02-19 13:23:36 -0800614 ioarcb->u.sis64_addr_data.data_ioadl_addr =
615 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
Wayne Boyer96d21f02010-05-10 09:13:27 -0700616 ioasa64->u.gata.status = 0;
617 } else {
Wayne Boyera32c0552010-02-19 13:23:36 -0800618 ioarcb->write_ioadl_addr =
619 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
620 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
Wayne Boyer96d21f02010-05-10 09:13:27 -0700621 ioasa->u.gata.status = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800622 }
623
Wayne Boyer96d21f02010-05-10 09:13:27 -0700624 ioasa->hdr.ioasc = 0;
625 ioasa->hdr.residual_data_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 ipr_cmd->scsi_cmd = NULL;
Brian King35a39692006-09-25 12:39:20 -0500627 ipr_cmd->qc = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 ipr_cmd->sense_buffer[0] = 0;
629 ipr_cmd->dma_use_sg = 0;
630}
631
632/**
633 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
634 * @ipr_cmd: ipr command struct
635 *
636 * Return value:
637 * none
638 **/
Brian King172cd6e2012-07-17 08:14:40 -0500639static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
640 void (*fast_done) (struct ipr_cmnd *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641{
642 ipr_reinit_ipr_cmnd(ipr_cmd);
643 ipr_cmd->u.scratch = 0;
644 ipr_cmd->sibling = NULL;
Brian King172cd6e2012-07-17 08:14:40 -0500645 ipr_cmd->fast_done = fast_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 init_timer(&ipr_cmd->timer);
647}
648
649/**
Brian King00bfef22012-07-17 08:13:52 -0500650 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 * @ioa_cfg: ioa config struct
652 *
653 * Return value:
654 * pointer to ipr command struct
655 **/
656static
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600657struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600659 struct ipr_cmnd *ipr_cmd = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600661 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
662 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
663 struct ipr_cmnd, queue);
664 list_del(&ipr_cmd->queue);
665 }
666
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667
668 return ipr_cmd;
669}
670
671/**
Brian King00bfef22012-07-17 08:13:52 -0500672 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
673 * @ioa_cfg: ioa config struct
674 *
675 * Return value:
676 * pointer to ipr command struct
677 **/
678static
679struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
680{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600681 struct ipr_cmnd *ipr_cmd =
682 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
Brian King172cd6e2012-07-17 08:14:40 -0500683 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
Brian King00bfef22012-07-17 08:13:52 -0500684 return ipr_cmd;
685}
686
687/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
689 * @ioa_cfg: ioa config struct
690 * @clr_ints: interrupts to clear
691 *
692 * This function masks all interrupts on the adapter, then clears the
693 * interrupts specified in the mask
694 *
695 * Return value:
696 * none
697 **/
698static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
699 u32 clr_ints)
700{
701 volatile u32 int_reg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600702 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703
704 /* Stop new interrupts */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600705 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
706 spin_lock(&ioa_cfg->hrrq[i]._lock);
707 ioa_cfg->hrrq[i].allow_interrupts = 0;
708 spin_unlock(&ioa_cfg->hrrq[i]._lock);
709 }
710 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711
712 /* Set interrupt mask to stop all new interrupts */
Wayne Boyer214777b2010-02-19 13:24:26 -0800713 if (ioa_cfg->sis64)
714 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
715 else
716 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717
718 /* Clear any pending interrupts */
Wayne Boyer214777b2010-02-19 13:24:26 -0800719 if (ioa_cfg->sis64)
720 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
721 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
723}
724
725/**
726 * ipr_save_pcix_cmd_reg - Save PCI-X command register
727 * @ioa_cfg: ioa config struct
728 *
729 * Return value:
730 * 0 on success / -EIO on failure
731 **/
732static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
733{
734 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
735
Brian King7dce0e12007-01-23 11:25:30 -0600736 if (pcix_cmd_reg == 0)
737 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738
739 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
740 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
741 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
742 return -EIO;
743 }
744
745 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
746 return 0;
747}
748
749/**
750 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
751 * @ioa_cfg: ioa config struct
752 *
753 * Return value:
754 * 0 on success / -EIO on failure
755 **/
756static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
757{
758 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
759
760 if (pcix_cmd_reg) {
761 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
762 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
763 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
764 return -EIO;
765 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 }
767
768 return 0;
769}
770
771/**
Brian King35a39692006-09-25 12:39:20 -0500772 * ipr_sata_eh_done - done function for aborted SATA commands
773 * @ipr_cmd: ipr command struct
774 *
775 * This function is invoked for ops generated to SATA
776 * devices which are being aborted.
777 *
778 * Return value:
779 * none
780 **/
781static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
782{
Brian King35a39692006-09-25 12:39:20 -0500783 struct ata_queued_cmd *qc = ipr_cmd->qc;
784 struct ipr_sata_port *sata_port = qc->ap->private_data;
785
786 qc->err_mask |= AC_ERR_OTHER;
787 sata_port->ioasa.status |= ATA_BUSY;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600788 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Brian King35a39692006-09-25 12:39:20 -0500789 ata_qc_complete(qc);
790}
791
792/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 * ipr_scsi_eh_done - mid-layer done function for aborted ops
794 * @ipr_cmd: ipr command struct
795 *
796 * This function is invoked by the interrupt handler for
797 * ops generated by the SCSI mid-layer which are being aborted.
798 *
799 * Return value:
800 * none
801 **/
802static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
803{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
805
806 scsi_cmd->result |= (DID_ERROR << 16);
807
FUJITA Tomonori63015bc2007-05-26 00:26:59 +0900808 scsi_dma_unmap(ipr_cmd->scsi_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 scsi_cmd->scsi_done(scsi_cmd);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600810 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811}
812
813/**
814 * ipr_fail_all_ops - Fails all outstanding ops.
815 * @ioa_cfg: ioa config struct
816 *
817 * This function fails all outstanding ops.
818 *
819 * Return value:
820 * none
821 **/
822static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
823{
824 struct ipr_cmnd *ipr_cmd, *temp;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600825 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826
827 ENTER;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600828 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600829 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600830 list_for_each_entry_safe(ipr_cmd,
831 temp, &hrrq->hrrq_pending_q, queue) {
832 list_del(&ipr_cmd->queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600834 ipr_cmd->s.ioasa.hdr.ioasc =
835 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
836 ipr_cmd->s.ioasa.hdr.ilid =
837 cpu_to_be32(IPR_DRIVER_ILID);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600839 if (ipr_cmd->scsi_cmd)
840 ipr_cmd->done = ipr_scsi_eh_done;
841 else if (ipr_cmd->qc)
842 ipr_cmd->done = ipr_sata_eh_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600844 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
845 IPR_IOASC_IOA_WAS_RESET);
846 del_timer(&ipr_cmd->timer);
847 ipr_cmd->done(ipr_cmd);
848 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600849 spin_unlock(&hrrq->_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 LEAVE;
852}
853
854/**
Wayne Boyera32c0552010-02-19 13:23:36 -0800855 * ipr_send_command - Send driver initiated requests.
856 * @ipr_cmd: ipr command struct
857 *
858 * This function sends a command to the adapter using the correct write call.
859 * In the case of sis64, calculate the ioarcb size required. Then or in the
860 * appropriate bits.
861 *
862 * Return value:
863 * none
864 **/
865static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
866{
867 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
868 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
869
870 if (ioa_cfg->sis64) {
871 /* The default size is 256 bytes */
872 send_dma_addr |= 0x1;
873
874 /* If the number of ioadls * size of ioadl > 128 bytes,
875 then use a 512 byte ioarcb */
876 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
877 send_dma_addr |= 0x4;
878 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
879 } else
880 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
881}
882
883/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 * ipr_do_req - Send driver initiated requests.
885 * @ipr_cmd: ipr command struct
886 * @done: done function
887 * @timeout_func: timeout function
888 * @timeout: timeout value
889 *
890 * This function sends the specified command to the adapter with the
891 * timeout given. The done function is invoked on command completion.
892 *
893 * Return value:
894 * none
895 **/
896static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
897 void (*done) (struct ipr_cmnd *),
898 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
899{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600900 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901
902 ipr_cmd->done = done;
903
904 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
905 ipr_cmd->timer.expires = jiffies + timeout;
906 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
907
908 add_timer(&ipr_cmd->timer);
909
910 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
911
Wayne Boyera32c0552010-02-19 13:23:36 -0800912 ipr_send_command(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913}
914
915/**
916 * ipr_internal_cmd_done - Op done function for an internally generated op.
917 * @ipr_cmd: ipr command struct
918 *
919 * This function is the op done function for an internally generated,
920 * blocking op. It simply wakes the sleeping thread.
921 *
922 * Return value:
923 * none
924 **/
925static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
926{
927 if (ipr_cmd->sibling)
928 ipr_cmd->sibling = NULL;
929 else
930 complete(&ipr_cmd->completion);
931}
932
933/**
Wayne Boyera32c0552010-02-19 13:23:36 -0800934 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
935 * @ipr_cmd: ipr command struct
936 * @dma_addr: dma address
937 * @len: transfer length
938 * @flags: ioadl flag value
939 *
940 * This function initializes an ioadl in the case where there is only a single
941 * descriptor.
942 *
943 * Return value:
944 * nothing
945 **/
946static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
947 u32 len, int flags)
948{
949 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
950 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
951
952 ipr_cmd->dma_use_sg = 1;
953
954 if (ipr_cmd->ioa_cfg->sis64) {
955 ioadl64->flags = cpu_to_be32(flags);
956 ioadl64->data_len = cpu_to_be32(len);
957 ioadl64->address = cpu_to_be64(dma_addr);
958
959 ipr_cmd->ioarcb.ioadl_len =
960 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
961 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
962 } else {
963 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
964 ioadl->address = cpu_to_be32(dma_addr);
965
966 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
967 ipr_cmd->ioarcb.read_ioadl_len =
968 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
969 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
970 } else {
971 ipr_cmd->ioarcb.ioadl_len =
972 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
973 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
974 }
975 }
976}
977
978/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 * ipr_send_blocking_cmd - Send command and sleep on its completion.
980 * @ipr_cmd: ipr command struct
981 * @timeout_func: function to invoke if command times out
982 * @timeout: timeout
983 *
984 * Return value:
985 * none
986 **/
987static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
988 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
989 u32 timeout)
990{
991 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
992
993 init_completion(&ipr_cmd->completion);
994 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
995
996 spin_unlock_irq(ioa_cfg->host->host_lock);
997 wait_for_completion(&ipr_cmd->completion);
998 spin_lock_irq(ioa_cfg->host->host_lock);
999}
1000
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001001static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1002{
1003 if (ioa_cfg->hrrq_num == 1)
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06001004 return 0;
1005 else
1006 return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001007}
1008
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009/**
1010 * ipr_send_hcam - Send an HCAM to the adapter.
1011 * @ioa_cfg: ioa config struct
1012 * @type: HCAM type
1013 * @hostrcb: hostrcb struct
1014 *
1015 * This function will send a Host Controlled Async command to the adapter.
1016 * If HCAMs are currently not allowed to be issued to the adapter, it will
1017 * place the hostrcb on the free queue.
1018 *
1019 * Return value:
1020 * none
1021 **/
1022static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1023 struct ipr_hostrcb *hostrcb)
1024{
1025 struct ipr_cmnd *ipr_cmd;
1026 struct ipr_ioarcb *ioarcb;
1027
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06001028 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001030 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1032
1033 ipr_cmd->u.hostrcb = hostrcb;
1034 ioarcb = &ipr_cmd->ioarcb;
1035
1036 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1037 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1038 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1039 ioarcb->cmd_pkt.cdb[1] = type;
1040 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1041 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1042
Wayne Boyera32c0552010-02-19 13:23:36 -08001043 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1044 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045
1046 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1047 ipr_cmd->done = ipr_process_ccn;
1048 else
1049 ipr_cmd->done = ipr_process_error;
1050
1051 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1052
Wayne Boyera32c0552010-02-19 13:23:36 -08001053 ipr_send_command(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 } else {
1055 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1056 }
1057}
1058
1059/**
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001060 * ipr_update_ata_class - Update the ata class in the resource entry
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061 * @res: resource entry struct
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001062 * @proto: cfgte device bus protocol value
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063 *
1064 * Return value:
1065 * none
1066 **/
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001067static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068{
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03001069 switch (proto) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001070 case IPR_PROTO_SATA:
1071 case IPR_PROTO_SAS_STP:
1072 res->ata_class = ATA_DEV_ATA;
1073 break;
1074 case IPR_PROTO_SATA_ATAPI:
1075 case IPR_PROTO_SAS_STP_ATAPI:
1076 res->ata_class = ATA_DEV_ATAPI;
1077 break;
1078 default:
1079 res->ata_class = ATA_DEV_UNKNOWN;
1080 break;
1081 };
1082}
1083
1084/**
1085 * ipr_init_res_entry - Initialize a resource entry struct.
1086 * @res: resource entry struct
1087 * @cfgtew: config table entry wrapper struct
1088 *
1089 * Return value:
1090 * none
1091 **/
1092static void ipr_init_res_entry(struct ipr_resource_entry *res,
1093 struct ipr_config_table_entry_wrapper *cfgtew)
1094{
1095 int found = 0;
1096 unsigned int proto;
1097 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1098 struct ipr_resource_entry *gscsi_res = NULL;
1099
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06001100 res->needs_sync_complete = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 res->in_erp = 0;
1102 res->add_to_ml = 0;
1103 res->del_from_ml = 0;
1104 res->resetting_device = 0;
1105 res->sdev = NULL;
Brian King35a39692006-09-25 12:39:20 -05001106 res->sata_port = NULL;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001107
1108 if (ioa_cfg->sis64) {
1109 proto = cfgtew->u.cfgte64->proto;
1110 res->res_flags = cfgtew->u.cfgte64->res_flags;
1111 res->qmodel = IPR_QUEUEING_MODEL64(res);
Wayne Boyer438b0332010-05-10 09:13:00 -07001112 res->type = cfgtew->u.cfgte64->res_type;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001113
1114 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1115 sizeof(res->res_path));
1116
1117 res->bus = 0;
Wayne Boyer0cb992e2010-11-04 09:35:58 -07001118 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1119 sizeof(res->dev_lun.scsi_lun));
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001120 res->lun = scsilun_to_int(&res->dev_lun);
1121
1122 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1123 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1124 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1125 found = 1;
1126 res->target = gscsi_res->target;
1127 break;
1128 }
1129 }
1130 if (!found) {
1131 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1132 ioa_cfg->max_devs_supported);
1133 set_bit(res->target, ioa_cfg->target_ids);
1134 }
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001135 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1136 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1137 res->target = 0;
1138 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1139 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1140 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1141 ioa_cfg->max_devs_supported);
1142 set_bit(res->target, ioa_cfg->array_ids);
1143 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1144 res->bus = IPR_VSET_VIRTUAL_BUS;
1145 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1146 ioa_cfg->max_devs_supported);
1147 set_bit(res->target, ioa_cfg->vset_ids);
1148 } else {
1149 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1150 ioa_cfg->max_devs_supported);
1151 set_bit(res->target, ioa_cfg->target_ids);
1152 }
1153 } else {
1154 proto = cfgtew->u.cfgte->proto;
1155 res->qmodel = IPR_QUEUEING_MODEL(res);
1156 res->flags = cfgtew->u.cfgte->flags;
1157 if (res->flags & IPR_IS_IOA_RESOURCE)
1158 res->type = IPR_RES_TYPE_IOAFP;
1159 else
1160 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1161
1162 res->bus = cfgtew->u.cfgte->res_addr.bus;
1163 res->target = cfgtew->u.cfgte->res_addr.target;
1164 res->lun = cfgtew->u.cfgte->res_addr.lun;
Wayne Boyer46d74562010-08-11 07:15:17 -07001165 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001166 }
1167
1168 ipr_update_ata_class(res, proto);
1169}
1170
1171/**
1172 * ipr_is_same_device - Determine if two devices are the same.
1173 * @res: resource entry struct
1174 * @cfgtew: config table entry wrapper struct
1175 *
1176 * Return value:
1177 * 1 if the devices are the same / 0 otherwise
1178 **/
1179static int ipr_is_same_device(struct ipr_resource_entry *res,
1180 struct ipr_config_table_entry_wrapper *cfgtew)
1181{
1182 if (res->ioa_cfg->sis64) {
1183 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1184 sizeof(cfgtew->u.cfgte64->dev_id)) &&
Wayne Boyer0cb992e2010-11-04 09:35:58 -07001185 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001186 sizeof(cfgtew->u.cfgte64->lun))) {
1187 return 1;
1188 }
1189 } else {
1190 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1191 res->target == cfgtew->u.cfgte->res_addr.target &&
1192 res->lun == cfgtew->u.cfgte->res_addr.lun)
1193 return 1;
1194 }
1195
1196 return 0;
1197}
1198
1199/**
Brian Kingb3b3b402013-01-11 17:43:49 -06001200 * __ipr_format_res_path - Format the resource path for printing.
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001201 * @res_path: resource path
1202 * @buf: buffer
Brian Kingb3b3b402013-01-11 17:43:49 -06001203 * @len: length of buffer provided
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001204 *
1205 * Return value:
1206 * pointer to buffer
1207 **/
Brian Kingb3b3b402013-01-11 17:43:49 -06001208static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001209{
1210 int i;
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07001211 char *p = buffer;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001212
Wayne Boyer46d74562010-08-11 07:15:17 -07001213 *p = '\0';
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07001214 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1215 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1216 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001217
1218 return buffer;
1219}
1220
1221/**
Brian Kingb3b3b402013-01-11 17:43:49 -06001222 * ipr_format_res_path - Format the resource path for printing.
1223 * @ioa_cfg: ioa config struct
1224 * @res_path: resource path
1225 * @buf: buffer
1226 * @len: length of buffer provided
1227 *
1228 * Return value:
1229 * pointer to buffer
1230 **/
1231static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1232 u8 *res_path, char *buffer, int len)
1233{
1234 char *p = buffer;
1235
1236 *p = '\0';
1237 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1238 __ipr_format_res_path(res_path, p, len - (buffer - p));
1239 return buffer;
1240}
1241
1242/**
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001243 * ipr_update_res_entry - Update the resource entry.
1244 * @res: resource entry struct
1245 * @cfgtew: config table entry wrapper struct
1246 *
1247 * Return value:
1248 * none
1249 **/
1250static void ipr_update_res_entry(struct ipr_resource_entry *res,
1251 struct ipr_config_table_entry_wrapper *cfgtew)
1252{
1253 char buffer[IPR_MAX_RES_PATH_LENGTH];
1254 unsigned int proto;
1255 int new_path = 0;
1256
1257 if (res->ioa_cfg->sis64) {
1258 res->flags = cfgtew->u.cfgte64->flags;
1259 res->res_flags = cfgtew->u.cfgte64->res_flags;
Wayne Boyer75576bb2010-07-14 10:50:14 -07001260 res->type = cfgtew->u.cfgte64->res_type;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001261
1262 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1263 sizeof(struct ipr_std_inq_data));
1264
1265 res->qmodel = IPR_QUEUEING_MODEL64(res);
1266 proto = cfgtew->u.cfgte64->proto;
1267 res->res_handle = cfgtew->u.cfgte64->res_handle;
1268 res->dev_id = cfgtew->u.cfgte64->dev_id;
1269
1270 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1271 sizeof(res->dev_lun.scsi_lun));
1272
1273 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1274 sizeof(res->res_path))) {
1275 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1276 sizeof(res->res_path));
1277 new_path = 1;
1278 }
1279
1280 if (res->sdev && new_path)
1281 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06001282 ipr_format_res_path(res->ioa_cfg,
1283 res->res_path, buffer, sizeof(buffer)));
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001284 } else {
1285 res->flags = cfgtew->u.cfgte->flags;
1286 if (res->flags & IPR_IS_IOA_RESOURCE)
1287 res->type = IPR_RES_TYPE_IOAFP;
1288 else
1289 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1290
1291 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1292 sizeof(struct ipr_std_inq_data));
1293
1294 res->qmodel = IPR_QUEUEING_MODEL(res);
1295 proto = cfgtew->u.cfgte->proto;
1296 res->res_handle = cfgtew->u.cfgte->res_handle;
1297 }
1298
1299 ipr_update_ata_class(res, proto);
1300}
1301
1302/**
1303 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1304 * for the resource.
1305 * @res: resource entry struct
1306 * @cfgtew: config table entry wrapper struct
1307 *
1308 * Return value:
1309 * none
1310 **/
1311static void ipr_clear_res_target(struct ipr_resource_entry *res)
1312{
1313 struct ipr_resource_entry *gscsi_res = NULL;
1314 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1315
1316 if (!ioa_cfg->sis64)
1317 return;
1318
1319 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1320 clear_bit(res->target, ioa_cfg->array_ids);
1321 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1322 clear_bit(res->target, ioa_cfg->vset_ids);
1323 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1324 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1325 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1326 return;
1327 clear_bit(res->target, ioa_cfg->target_ids);
1328
1329 } else if (res->bus == 0)
1330 clear_bit(res->target, ioa_cfg->target_ids);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331}
1332
1333/**
1334 * ipr_handle_config_change - Handle a config change from the adapter
1335 * @ioa_cfg: ioa config struct
1336 * @hostrcb: hostrcb
1337 *
1338 * Return value:
1339 * none
1340 **/
1341static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001342 struct ipr_hostrcb *hostrcb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343{
1344 struct ipr_resource_entry *res = NULL;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001345 struct ipr_config_table_entry_wrapper cfgtew;
1346 __be32 cc_res_handle;
1347
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 u32 is_ndn = 1;
1349
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001350 if (ioa_cfg->sis64) {
1351 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1352 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1353 } else {
1354 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1355 cc_res_handle = cfgtew.u.cfgte->res_handle;
1356 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357
1358 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001359 if (res->res_handle == cc_res_handle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 is_ndn = 0;
1361 break;
1362 }
1363 }
1364
1365 if (is_ndn) {
1366 if (list_empty(&ioa_cfg->free_res_q)) {
1367 ipr_send_hcam(ioa_cfg,
1368 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1369 hostrcb);
1370 return;
1371 }
1372
1373 res = list_entry(ioa_cfg->free_res_q.next,
1374 struct ipr_resource_entry, queue);
1375
1376 list_del(&res->queue);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001377 ipr_init_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1379 }
1380
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001381 ipr_update_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382
1383 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1384 if (res->sdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385 res->del_from_ml = 1;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001386 res->res_handle = IPR_INVALID_RES_HANDLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387 if (ioa_cfg->allow_ml_add_del)
1388 schedule_work(&ioa_cfg->work_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001389 } else {
1390 ipr_clear_res_target(res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001392 }
Kleber Sacilotto de Souza5767a1c2011-02-14 20:19:31 -02001393 } else if (!res->sdev || res->del_from_ml) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 res->add_to_ml = 1;
1395 if (ioa_cfg->allow_ml_add_del)
1396 schedule_work(&ioa_cfg->work_q);
1397 }
1398
1399 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1400}
1401
1402/**
1403 * ipr_process_ccn - Op done function for a CCN.
1404 * @ipr_cmd: ipr command struct
1405 *
1406 * This function is the op done function for a configuration
1407 * change notification host controlled async from the adapter.
1408 *
1409 * Return value:
1410 * none
1411 **/
1412static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1413{
1414 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1415 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -07001416 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417
1418 list_del(&hostrcb->queue);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001419 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420
1421 if (ioasc) {
1422 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1423 dev_err(&ioa_cfg->pdev->dev,
1424 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1425
1426 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1427 } else {
1428 ipr_handle_config_change(ioa_cfg, hostrcb);
1429 }
1430}
1431
1432/**
Brian King8cf093e2007-04-26 16:00:14 -05001433 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1434 * @i: index into buffer
1435 * @buf: string to modify
1436 *
1437 * This function will strip all trailing whitespace, pad the end
1438 * of the string with a single space, and NULL terminate the string.
1439 *
1440 * Return value:
1441 * new length of string
1442 **/
1443static int strip_and_pad_whitespace(int i, char *buf)
1444{
1445 while (i && buf[i] == ' ')
1446 i--;
1447 buf[i+1] = ' ';
1448 buf[i+2] = '\0';
1449 return i + 2;
1450}
1451
1452/**
1453 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1454 * @prefix: string to print at start of printk
1455 * @hostrcb: hostrcb pointer
1456 * @vpd: vendor/product id/sn struct
1457 *
1458 * Return value:
1459 * none
1460 **/
1461static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1462 struct ipr_vpd *vpd)
1463{
1464 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1465 int i = 0;
1466
1467 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1468 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1469
1470 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1471 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1472
1473 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1474 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1475
1476 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1477}
1478
1479/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480 * ipr_log_vpd - Log the passed VPD to the error log.
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001481 * @vpd: vendor/product id/sn struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 *
1483 * Return value:
1484 * none
1485 **/
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001486static void ipr_log_vpd(struct ipr_vpd *vpd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487{
1488 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1489 + IPR_SERIAL_NUM_LEN];
1490
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001491 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1492 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 IPR_PROD_ID_LEN);
1494 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1495 ipr_err("Vendor/Product ID: %s\n", buffer);
1496
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001497 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1499 ipr_err(" Serial Number: %s\n", buffer);
1500}
1501
1502/**
Brian King8cf093e2007-04-26 16:00:14 -05001503 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1504 * @prefix: string to print at start of printk
1505 * @hostrcb: hostrcb pointer
1506 * @vpd: vendor/product id/sn/wwn struct
1507 *
1508 * Return value:
1509 * none
1510 **/
1511static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1512 struct ipr_ext_vpd *vpd)
1513{
1514 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1515 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1516 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1517}
1518
1519/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001520 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1521 * @vpd: vendor/product id/sn/wwn struct
1522 *
1523 * Return value:
1524 * none
1525 **/
1526static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1527{
1528 ipr_log_vpd(&vpd->vpd);
1529 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1530 be32_to_cpu(vpd->wwid[1]));
1531}
1532
1533/**
1534 * ipr_log_enhanced_cache_error - Log a cache error.
1535 * @ioa_cfg: ioa config struct
1536 * @hostrcb: hostrcb struct
1537 *
1538 * Return value:
1539 * none
1540 **/
1541static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1542 struct ipr_hostrcb *hostrcb)
1543{
Wayne Boyer4565e372010-02-19 13:24:07 -08001544 struct ipr_hostrcb_type_12_error *error;
1545
1546 if (ioa_cfg->sis64)
1547 error = &hostrcb->hcam.u.error64.u.type_12_error;
1548 else
1549 error = &hostrcb->hcam.u.error.u.type_12_error;
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001550
1551 ipr_err("-----Current Configuration-----\n");
1552 ipr_err("Cache Directory Card Information:\n");
1553 ipr_log_ext_vpd(&error->ioa_vpd);
1554 ipr_err("Adapter Card Information:\n");
1555 ipr_log_ext_vpd(&error->cfc_vpd);
1556
1557 ipr_err("-----Expected Configuration-----\n");
1558 ipr_err("Cache Directory Card Information:\n");
1559 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1560 ipr_err("Adapter Card Information:\n");
1561 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1562
1563 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1564 be32_to_cpu(error->ioa_data[0]),
1565 be32_to_cpu(error->ioa_data[1]),
1566 be32_to_cpu(error->ioa_data[2]));
1567}
1568
1569/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570 * ipr_log_cache_error - Log a cache error.
1571 * @ioa_cfg: ioa config struct
1572 * @hostrcb: hostrcb struct
1573 *
1574 * Return value:
1575 * none
1576 **/
1577static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1578 struct ipr_hostrcb *hostrcb)
1579{
1580 struct ipr_hostrcb_type_02_error *error =
1581 &hostrcb->hcam.u.error.u.type_02_error;
1582
1583 ipr_err("-----Current Configuration-----\n");
1584 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001585 ipr_log_vpd(&error->ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001587 ipr_log_vpd(&error->cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588
1589 ipr_err("-----Expected Configuration-----\n");
1590 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001591 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001593 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594
1595 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1596 be32_to_cpu(error->ioa_data[0]),
1597 be32_to_cpu(error->ioa_data[1]),
1598 be32_to_cpu(error->ioa_data[2]));
1599}
1600
1601/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001602 * ipr_log_enhanced_config_error - Log a configuration error.
1603 * @ioa_cfg: ioa config struct
1604 * @hostrcb: hostrcb struct
1605 *
1606 * Return value:
1607 * none
1608 **/
1609static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1610 struct ipr_hostrcb *hostrcb)
1611{
1612 int errors_logged, i;
1613 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1614 struct ipr_hostrcb_type_13_error *error;
1615
1616 error = &hostrcb->hcam.u.error.u.type_13_error;
1617 errors_logged = be32_to_cpu(error->errors_logged);
1618
1619 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1620 be32_to_cpu(error->errors_detected), errors_logged);
1621
1622 dev_entry = error->dev;
1623
1624 for (i = 0; i < errors_logged; i++, dev_entry++) {
1625 ipr_err_separator;
1626
1627 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1628 ipr_log_ext_vpd(&dev_entry->vpd);
1629
1630 ipr_err("-----New Device Information-----\n");
1631 ipr_log_ext_vpd(&dev_entry->new_vpd);
1632
1633 ipr_err("Cache Directory Card Information:\n");
1634 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1635
1636 ipr_err("Adapter Card Information:\n");
1637 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1638 }
1639}
1640
1641/**
Wayne Boyer4565e372010-02-19 13:24:07 -08001642 * ipr_log_sis64_config_error - Log a device error.
1643 * @ioa_cfg: ioa config struct
1644 * @hostrcb: hostrcb struct
1645 *
1646 * Return value:
1647 * none
1648 **/
1649static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1650 struct ipr_hostrcb *hostrcb)
1651{
1652 int errors_logged, i;
1653 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1654 struct ipr_hostrcb_type_23_error *error;
1655 char buffer[IPR_MAX_RES_PATH_LENGTH];
1656
1657 error = &hostrcb->hcam.u.error64.u.type_23_error;
1658 errors_logged = be32_to_cpu(error->errors_logged);
1659
1660 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1661 be32_to_cpu(error->errors_detected), errors_logged);
1662
1663 dev_entry = error->dev;
1664
1665 for (i = 0; i < errors_logged; i++, dev_entry++) {
1666 ipr_err_separator;
1667
1668 ipr_err("Device %d : %s", i + 1,
Brian Kingb3b3b402013-01-11 17:43:49 -06001669 __ipr_format_res_path(dev_entry->res_path,
1670 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08001671 ipr_log_ext_vpd(&dev_entry->vpd);
1672
1673 ipr_err("-----New Device Information-----\n");
1674 ipr_log_ext_vpd(&dev_entry->new_vpd);
1675
1676 ipr_err("Cache Directory Card Information:\n");
1677 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1678
1679 ipr_err("Adapter Card Information:\n");
1680 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1681 }
1682}
1683
1684/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685 * ipr_log_config_error - Log a configuration error.
1686 * @ioa_cfg: ioa config struct
1687 * @hostrcb: hostrcb struct
1688 *
1689 * Return value:
1690 * none
1691 **/
1692static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1693 struct ipr_hostrcb *hostrcb)
1694{
1695 int errors_logged, i;
1696 struct ipr_hostrcb_device_data_entry *dev_entry;
1697 struct ipr_hostrcb_type_03_error *error;
1698
1699 error = &hostrcb->hcam.u.error.u.type_03_error;
1700 errors_logged = be32_to_cpu(error->errors_logged);
1701
1702 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1703 be32_to_cpu(error->errors_detected), errors_logged);
1704
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001705 dev_entry = error->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706
1707 for (i = 0; i < errors_logged; i++, dev_entry++) {
1708 ipr_err_separator;
1709
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001710 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001711 ipr_log_vpd(&dev_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712
1713 ipr_err("-----New Device Information-----\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001714 ipr_log_vpd(&dev_entry->new_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715
1716 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001717 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718
1719 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001720 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721
1722 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1723 be32_to_cpu(dev_entry->ioa_data[0]),
1724 be32_to_cpu(dev_entry->ioa_data[1]),
1725 be32_to_cpu(dev_entry->ioa_data[2]),
1726 be32_to_cpu(dev_entry->ioa_data[3]),
1727 be32_to_cpu(dev_entry->ioa_data[4]));
1728 }
1729}
1730
1731/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001732 * ipr_log_enhanced_array_error - Log an array configuration error.
1733 * @ioa_cfg: ioa config struct
1734 * @hostrcb: hostrcb struct
1735 *
1736 * Return value:
1737 * none
1738 **/
1739static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1740 struct ipr_hostrcb *hostrcb)
1741{
1742 int i, num_entries;
1743 struct ipr_hostrcb_type_14_error *error;
1744 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1745 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1746
1747 error = &hostrcb->hcam.u.error.u.type_14_error;
1748
1749 ipr_err_separator;
1750
1751 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1752 error->protection_level,
1753 ioa_cfg->host->host_no,
1754 error->last_func_vset_res_addr.bus,
1755 error->last_func_vset_res_addr.target,
1756 error->last_func_vset_res_addr.lun);
1757
1758 ipr_err_separator;
1759
1760 array_entry = error->array_member;
1761 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
Wayne Boyer72620262010-09-27 10:45:28 -07001762 ARRAY_SIZE(error->array_member));
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001763
1764 for (i = 0; i < num_entries; i++, array_entry++) {
1765 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1766 continue;
1767
1768 if (be32_to_cpu(error->exposed_mode_adn) == i)
1769 ipr_err("Exposed Array Member %d:\n", i);
1770 else
1771 ipr_err("Array Member %d:\n", i);
1772
1773 ipr_log_ext_vpd(&array_entry->vpd);
1774 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1775 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1776 "Expected Location");
1777
1778 ipr_err_separator;
1779 }
1780}
1781
1782/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783 * ipr_log_array_error - Log an array configuration error.
1784 * @ioa_cfg: ioa config struct
1785 * @hostrcb: hostrcb struct
1786 *
1787 * Return value:
1788 * none
1789 **/
1790static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1791 struct ipr_hostrcb *hostrcb)
1792{
1793 int i;
1794 struct ipr_hostrcb_type_04_error *error;
1795 struct ipr_hostrcb_array_data_entry *array_entry;
1796 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1797
1798 error = &hostrcb->hcam.u.error.u.type_04_error;
1799
1800 ipr_err_separator;
1801
1802 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1803 error->protection_level,
1804 ioa_cfg->host->host_no,
1805 error->last_func_vset_res_addr.bus,
1806 error->last_func_vset_res_addr.target,
1807 error->last_func_vset_res_addr.lun);
1808
1809 ipr_err_separator;
1810
1811 array_entry = error->array_member;
1812
1813 for (i = 0; i < 18; i++) {
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001814 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 continue;
1816
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001817 if (be32_to_cpu(error->exposed_mode_adn) == i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818 ipr_err("Exposed Array Member %d:\n", i);
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001819 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820 ipr_err("Array Member %d:\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001822 ipr_log_vpd(&array_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001824 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1825 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1826 "Expected Location");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827
1828 ipr_err_separator;
1829
1830 if (i == 9)
1831 array_entry = error->array_member2;
1832 else
1833 array_entry++;
1834 }
1835}
1836
1837/**
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001838 * ipr_log_hex_data - Log additional hex IOA error data.
Brian Kingac719ab2006-11-21 10:28:42 -06001839 * @ioa_cfg: ioa config struct
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001840 * @data: IOA error data
1841 * @len: data length
1842 *
1843 * Return value:
1844 * none
1845 **/
Brian Kingac719ab2006-11-21 10:28:42 -06001846static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001847{
1848 int i;
1849
1850 if (len == 0)
1851 return;
1852
Brian Kingac719ab2006-11-21 10:28:42 -06001853 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1854 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1855
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001856 for (i = 0; i < len / 4; i += 4) {
1857 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1858 be32_to_cpu(data[i]),
1859 be32_to_cpu(data[i+1]),
1860 be32_to_cpu(data[i+2]),
1861 be32_to_cpu(data[i+3]));
1862 }
1863}
1864
1865/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001866 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1867 * @ioa_cfg: ioa config struct
1868 * @hostrcb: hostrcb struct
1869 *
1870 * Return value:
1871 * none
1872 **/
1873static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1874 struct ipr_hostrcb *hostrcb)
1875{
1876 struct ipr_hostrcb_type_17_error *error;
1877
Wayne Boyer4565e372010-02-19 13:24:07 -08001878 if (ioa_cfg->sis64)
1879 error = &hostrcb->hcam.u.error64.u.type_17_error;
1880 else
1881 error = &hostrcb->hcam.u.error.u.type_17_error;
1882
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001883 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
KOSAKI Motohiroca54cb82009-12-14 18:01:15 -08001884 strim(error->failure_reason);
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001885
Brian King8cf093e2007-04-26 16:00:14 -05001886 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1887 be32_to_cpu(hostrcb->hcam.u.error.prc));
1888 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
Brian Kingac719ab2006-11-21 10:28:42 -06001889 ipr_log_hex_data(ioa_cfg, error->data,
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001890 be32_to_cpu(hostrcb->hcam.length) -
1891 (offsetof(struct ipr_hostrcb_error, u) +
1892 offsetof(struct ipr_hostrcb_type_17_error, data)));
1893}
1894
1895/**
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001896 * ipr_log_dual_ioa_error - Log a dual adapter error.
1897 * @ioa_cfg: ioa config struct
1898 * @hostrcb: hostrcb struct
1899 *
1900 * Return value:
1901 * none
1902 **/
1903static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1904 struct ipr_hostrcb *hostrcb)
1905{
1906 struct ipr_hostrcb_type_07_error *error;
1907
1908 error = &hostrcb->hcam.u.error.u.type_07_error;
1909 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
KOSAKI Motohiroca54cb82009-12-14 18:01:15 -08001910 strim(error->failure_reason);
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001911
Brian King8cf093e2007-04-26 16:00:14 -05001912 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1913 be32_to_cpu(hostrcb->hcam.u.error.prc));
1914 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
Brian Kingac719ab2006-11-21 10:28:42 -06001915 ipr_log_hex_data(ioa_cfg, error->data,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001916 be32_to_cpu(hostrcb->hcam.length) -
1917 (offsetof(struct ipr_hostrcb_error, u) +
1918 offsetof(struct ipr_hostrcb_type_07_error, data)));
1919}
1920
Brian King49dc6a12006-11-21 10:28:35 -06001921static const struct {
1922 u8 active;
1923 char *desc;
1924} path_active_desc[] = {
1925 { IPR_PATH_NO_INFO, "Path" },
1926 { IPR_PATH_ACTIVE, "Active path" },
1927 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1928};
1929
1930static const struct {
1931 u8 state;
1932 char *desc;
1933} path_state_desc[] = {
1934 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1935 { IPR_PATH_HEALTHY, "is healthy" },
1936 { IPR_PATH_DEGRADED, "is degraded" },
1937 { IPR_PATH_FAILED, "is failed" }
1938};
1939
1940/**
1941 * ipr_log_fabric_path - Log a fabric path error
1942 * @hostrcb: hostrcb struct
1943 * @fabric: fabric descriptor
1944 *
1945 * Return value:
1946 * none
1947 **/
1948static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1949 struct ipr_hostrcb_fabric_desc *fabric)
1950{
1951 int i, j;
1952 u8 path_state = fabric->path_state;
1953 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1954 u8 state = path_state & IPR_PATH_STATE_MASK;
1955
1956 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1957 if (path_active_desc[i].active != active)
1958 continue;
1959
1960 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1961 if (path_state_desc[j].state != state)
1962 continue;
1963
1964 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1965 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1966 path_active_desc[i].desc, path_state_desc[j].desc,
1967 fabric->ioa_port);
1968 } else if (fabric->cascaded_expander == 0xff) {
1969 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1970 path_active_desc[i].desc, path_state_desc[j].desc,
1971 fabric->ioa_port, fabric->phy);
1972 } else if (fabric->phy == 0xff) {
1973 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1974 path_active_desc[i].desc, path_state_desc[j].desc,
1975 fabric->ioa_port, fabric->cascaded_expander);
1976 } else {
1977 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1978 path_active_desc[i].desc, path_state_desc[j].desc,
1979 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1980 }
1981 return;
1982 }
1983 }
1984
1985 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1986 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1987}
1988
Wayne Boyer4565e372010-02-19 13:24:07 -08001989/**
1990 * ipr_log64_fabric_path - Log a fabric path error
1991 * @hostrcb: hostrcb struct
1992 * @fabric: fabric descriptor
1993 *
1994 * Return value:
1995 * none
1996 **/
1997static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1998 struct ipr_hostrcb64_fabric_desc *fabric)
1999{
2000 int i, j;
2001 u8 path_state = fabric->path_state;
2002 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2003 u8 state = path_state & IPR_PATH_STATE_MASK;
2004 char buffer[IPR_MAX_RES_PATH_LENGTH];
2005
2006 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2007 if (path_active_desc[i].active != active)
2008 continue;
2009
2010 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2011 if (path_state_desc[j].state != state)
2012 continue;
2013
2014 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2015 path_active_desc[i].desc, path_state_desc[j].desc,
Brian Kingb3b3b402013-01-11 17:43:49 -06002016 ipr_format_res_path(hostrcb->ioa_cfg,
2017 fabric->res_path,
2018 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002019 return;
2020 }
2021 }
2022
2023 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
Brian Kingb3b3b402013-01-11 17:43:49 -06002024 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2025 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002026}
2027
Brian King49dc6a12006-11-21 10:28:35 -06002028static const struct {
2029 u8 type;
2030 char *desc;
2031} path_type_desc[] = {
2032 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2033 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2034 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2035 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2036};
2037
2038static const struct {
2039 u8 status;
2040 char *desc;
2041} path_status_desc[] = {
2042 { IPR_PATH_CFG_NO_PROB, "Functional" },
2043 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2044 { IPR_PATH_CFG_FAILED, "Failed" },
2045 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2046 { IPR_PATH_NOT_DETECTED, "Missing" },
2047 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2048};
2049
2050static const char *link_rate[] = {
2051 "unknown",
2052 "disabled",
2053 "phy reset problem",
2054 "spinup hold",
2055 "port selector",
2056 "unknown",
2057 "unknown",
2058 "unknown",
2059 "1.5Gbps",
2060 "3.0Gbps",
2061 "unknown",
2062 "unknown",
2063 "unknown",
2064 "unknown",
2065 "unknown",
2066 "unknown"
2067};
2068
2069/**
2070 * ipr_log_path_elem - Log a fabric path element.
2071 * @hostrcb: hostrcb struct
2072 * @cfg: fabric path element struct
2073 *
2074 * Return value:
2075 * none
2076 **/
2077static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2078 struct ipr_hostrcb_config_element *cfg)
2079{
2080 int i, j;
2081 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2082 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2083
2084 if (type == IPR_PATH_CFG_NOT_EXIST)
2085 return;
2086
2087 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2088 if (path_type_desc[i].type != type)
2089 continue;
2090
2091 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2092 if (path_status_desc[j].status != status)
2093 continue;
2094
2095 if (type == IPR_PATH_CFG_IOA_PORT) {
2096 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2097 path_status_desc[j].desc, path_type_desc[i].desc,
2098 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2099 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2100 } else {
2101 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2102 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2103 path_status_desc[j].desc, path_type_desc[i].desc,
2104 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2105 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2106 } else if (cfg->cascaded_expander == 0xff) {
2107 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2108 "WWN=%08X%08X\n", path_status_desc[j].desc,
2109 path_type_desc[i].desc, cfg->phy,
2110 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2111 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2112 } else if (cfg->phy == 0xff) {
2113 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2114 "WWN=%08X%08X\n", path_status_desc[j].desc,
2115 path_type_desc[i].desc, cfg->cascaded_expander,
2116 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2117 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2118 } else {
2119 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2120 "WWN=%08X%08X\n", path_status_desc[j].desc,
2121 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2122 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2123 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2124 }
2125 }
2126 return;
2127 }
2128 }
2129
2130 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2131 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2132 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2133 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2134}
2135
2136/**
Wayne Boyer4565e372010-02-19 13:24:07 -08002137 * ipr_log64_path_elem - Log a fabric path element.
2138 * @hostrcb: hostrcb struct
2139 * @cfg: fabric path element struct
2140 *
2141 * Return value:
2142 * none
2143 **/
2144static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2145 struct ipr_hostrcb64_config_element *cfg)
2146{
2147 int i, j;
2148 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2149 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2150 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2151 char buffer[IPR_MAX_RES_PATH_LENGTH];
2152
2153 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2154 return;
2155
2156 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2157 if (path_type_desc[i].type != type)
2158 continue;
2159
2160 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2161 if (path_status_desc[j].status != status)
2162 continue;
2163
2164 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2165 path_status_desc[j].desc, path_type_desc[i].desc,
Brian Kingb3b3b402013-01-11 17:43:49 -06002166 ipr_format_res_path(hostrcb->ioa_cfg,
2167 cfg->res_path, buffer, sizeof(buffer)),
2168 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2169 be32_to_cpu(cfg->wwid[0]),
2170 be32_to_cpu(cfg->wwid[1]));
Wayne Boyer4565e372010-02-19 13:24:07 -08002171 return;
2172 }
2173 }
2174 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2175 "WWN=%08X%08X\n", cfg->type_status,
Brian Kingb3b3b402013-01-11 17:43:49 -06002176 ipr_format_res_path(hostrcb->ioa_cfg,
2177 cfg->res_path, buffer, sizeof(buffer)),
2178 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2179 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
Wayne Boyer4565e372010-02-19 13:24:07 -08002180}
2181
2182/**
Brian King49dc6a12006-11-21 10:28:35 -06002183 * ipr_log_fabric_error - Log a fabric error.
2184 * @ioa_cfg: ioa config struct
2185 * @hostrcb: hostrcb struct
2186 *
2187 * Return value:
2188 * none
2189 **/
2190static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2191 struct ipr_hostrcb *hostrcb)
2192{
2193 struct ipr_hostrcb_type_20_error *error;
2194 struct ipr_hostrcb_fabric_desc *fabric;
2195 struct ipr_hostrcb_config_element *cfg;
2196 int i, add_len;
2197
2198 error = &hostrcb->hcam.u.error.u.type_20_error;
2199 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2200 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2201
2202 add_len = be32_to_cpu(hostrcb->hcam.length) -
2203 (offsetof(struct ipr_hostrcb_error, u) +
2204 offsetof(struct ipr_hostrcb_type_20_error, desc));
2205
2206 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2207 ipr_log_fabric_path(hostrcb, fabric);
2208 for_each_fabric_cfg(fabric, cfg)
2209 ipr_log_path_elem(hostrcb, cfg);
2210
2211 add_len -= be16_to_cpu(fabric->length);
2212 fabric = (struct ipr_hostrcb_fabric_desc *)
2213 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2214 }
2215
Brian Kingac719ab2006-11-21 10:28:42 -06002216 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
Brian King49dc6a12006-11-21 10:28:35 -06002217}
2218
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002219/**
Wayne Boyer4565e372010-02-19 13:24:07 -08002220 * ipr_log_sis64_array_error - Log a sis64 array error.
2221 * @ioa_cfg: ioa config struct
2222 * @hostrcb: hostrcb struct
2223 *
2224 * Return value:
2225 * none
2226 **/
2227static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2228 struct ipr_hostrcb *hostrcb)
2229{
2230 int i, num_entries;
2231 struct ipr_hostrcb_type_24_error *error;
2232 struct ipr_hostrcb64_array_data_entry *array_entry;
2233 char buffer[IPR_MAX_RES_PATH_LENGTH];
2234 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2235
2236 error = &hostrcb->hcam.u.error64.u.type_24_error;
2237
2238 ipr_err_separator;
2239
2240 ipr_err("RAID %s Array Configuration: %s\n",
2241 error->protection_level,
Brian Kingb3b3b402013-01-11 17:43:49 -06002242 ipr_format_res_path(ioa_cfg, error->last_res_path,
2243 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002244
2245 ipr_err_separator;
2246
2247 array_entry = error->array_member;
Wayne Boyer72620262010-09-27 10:45:28 -07002248 num_entries = min_t(u32, error->num_entries,
2249 ARRAY_SIZE(error->array_member));
Wayne Boyer4565e372010-02-19 13:24:07 -08002250
2251 for (i = 0; i < num_entries; i++, array_entry++) {
2252
2253 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2254 continue;
2255
2256 if (error->exposed_mode_adn == i)
2257 ipr_err("Exposed Array Member %d:\n", i);
2258 else
2259 ipr_err("Array Member %d:\n", i);
2260
2261 ipr_err("Array Member %d:\n", i);
2262 ipr_log_ext_vpd(&array_entry->vpd);
Wayne Boyer72620262010-09-27 10:45:28 -07002263 ipr_err("Current Location: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06002264 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2265 buffer, sizeof(buffer)));
Wayne Boyer72620262010-09-27 10:45:28 -07002266 ipr_err("Expected Location: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06002267 ipr_format_res_path(ioa_cfg,
2268 array_entry->expected_res_path,
2269 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002270
2271 ipr_err_separator;
2272 }
2273}
2274
2275/**
2276 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2277 * @ioa_cfg: ioa config struct
2278 * @hostrcb: hostrcb struct
2279 *
2280 * Return value:
2281 * none
2282 **/
2283static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2284 struct ipr_hostrcb *hostrcb)
2285{
2286 struct ipr_hostrcb_type_30_error *error;
2287 struct ipr_hostrcb64_fabric_desc *fabric;
2288 struct ipr_hostrcb64_config_element *cfg;
2289 int i, add_len;
2290
2291 error = &hostrcb->hcam.u.error64.u.type_30_error;
2292
2293 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2294 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2295
2296 add_len = be32_to_cpu(hostrcb->hcam.length) -
2297 (offsetof(struct ipr_hostrcb64_error, u) +
2298 offsetof(struct ipr_hostrcb_type_30_error, desc));
2299
2300 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2301 ipr_log64_fabric_path(hostrcb, fabric);
2302 for_each_fabric_cfg(fabric, cfg)
2303 ipr_log64_path_elem(hostrcb, cfg);
2304
2305 add_len -= be16_to_cpu(fabric->length);
2306 fabric = (struct ipr_hostrcb64_fabric_desc *)
2307 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2308 }
2309
2310 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2311}
2312
2313/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314 * ipr_log_generic_error - Log an adapter error.
2315 * @ioa_cfg: ioa config struct
2316 * @hostrcb: hostrcb struct
2317 *
2318 * Return value:
2319 * none
2320 **/
2321static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2322 struct ipr_hostrcb *hostrcb)
2323{
Brian Kingac719ab2006-11-21 10:28:42 -06002324 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002325 be32_to_cpu(hostrcb->hcam.length));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326}
2327
2328/**
2329 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2330 * @ioasc: IOASC
2331 *
2332 * This function will return the index of into the ipr_error_table
2333 * for the specified IOASC. If the IOASC is not in the table,
2334 * 0 will be returned, which points to the entry used for unknown errors.
2335 *
2336 * Return value:
2337 * index into the ipr_error_table
2338 **/
2339static u32 ipr_get_error(u32 ioasc)
2340{
2341 int i;
2342
2343 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
Brian King35a39692006-09-25 12:39:20 -05002344 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345 return i;
2346
2347 return 0;
2348}
2349
2350/**
2351 * ipr_handle_log_data - Log an adapter error.
2352 * @ioa_cfg: ioa config struct
2353 * @hostrcb: hostrcb struct
2354 *
2355 * This function logs an adapter error to the system.
2356 *
2357 * Return value:
2358 * none
2359 **/
2360static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2361 struct ipr_hostrcb *hostrcb)
2362{
2363 u32 ioasc;
2364 int error_index;
2365
2366 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2367 return;
2368
2369 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2370 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2371
Wayne Boyer4565e372010-02-19 13:24:07 -08002372 if (ioa_cfg->sis64)
2373 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2374 else
2375 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376
Wayne Boyer4565e372010-02-19 13:24:07 -08002377 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2378 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2380 scsi_report_bus_reset(ioa_cfg->host,
Wayne Boyer4565e372010-02-19 13:24:07 -08002381 hostrcb->hcam.u.error.fd_res_addr.bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382 }
2383
2384 error_index = ipr_get_error(ioasc);
2385
2386 if (!ipr_error_table[error_index].log_hcam)
2387 return;
2388
Brian King49dc6a12006-11-21 10:28:35 -06002389 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390
2391 /* Set indication we have logged an error */
2392 ioa_cfg->errors_logged++;
2393
Brian King933916f2007-03-29 12:43:30 -05002394 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395 return;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06002396 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2397 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398
2399 switch (hostrcb->hcam.overlay_id) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400 case IPR_HOST_RCB_OVERLAY_ID_2:
2401 ipr_log_cache_error(ioa_cfg, hostrcb);
2402 break;
2403 case IPR_HOST_RCB_OVERLAY_ID_3:
2404 ipr_log_config_error(ioa_cfg, hostrcb);
2405 break;
2406 case IPR_HOST_RCB_OVERLAY_ID_4:
2407 case IPR_HOST_RCB_OVERLAY_ID_6:
2408 ipr_log_array_error(ioa_cfg, hostrcb);
2409 break;
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002410 case IPR_HOST_RCB_OVERLAY_ID_7:
2411 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2412 break;
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06002413 case IPR_HOST_RCB_OVERLAY_ID_12:
2414 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2415 break;
2416 case IPR_HOST_RCB_OVERLAY_ID_13:
2417 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2418 break;
2419 case IPR_HOST_RCB_OVERLAY_ID_14:
2420 case IPR_HOST_RCB_OVERLAY_ID_16:
2421 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2422 break;
2423 case IPR_HOST_RCB_OVERLAY_ID_17:
2424 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2425 break;
Brian King49dc6a12006-11-21 10:28:35 -06002426 case IPR_HOST_RCB_OVERLAY_ID_20:
2427 ipr_log_fabric_error(ioa_cfg, hostrcb);
2428 break;
Wayne Boyer4565e372010-02-19 13:24:07 -08002429 case IPR_HOST_RCB_OVERLAY_ID_23:
2430 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2431 break;
2432 case IPR_HOST_RCB_OVERLAY_ID_24:
2433 case IPR_HOST_RCB_OVERLAY_ID_26:
2434 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2435 break;
2436 case IPR_HOST_RCB_OVERLAY_ID_30:
2437 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2438 break;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06002439 case IPR_HOST_RCB_OVERLAY_ID_1:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002440 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441 default:
brking@us.ibm.coma9cfca92005-11-01 17:00:41 -06002442 ipr_log_generic_error(ioa_cfg, hostrcb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443 break;
2444 }
2445}
2446
2447/**
2448 * ipr_process_error - Op done function for an adapter error log.
2449 * @ipr_cmd: ipr command struct
2450 *
2451 * This function is the op done function for an error log host
2452 * controlled async from the adapter. It will log the error and
2453 * send the HCAM back to the adapter.
2454 *
2455 * Return value:
2456 * none
2457 **/
2458static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2459{
2460 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2461 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -07002462 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Wayne Boyer4565e372010-02-19 13:24:07 -08002463 u32 fd_ioasc;
2464
2465 if (ioa_cfg->sis64)
2466 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2467 else
2468 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469
2470 list_del(&hostrcb->queue);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06002471 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472
2473 if (!ioasc) {
2474 ipr_handle_log_data(ioa_cfg, hostrcb);
Brian King65f56472007-04-26 16:00:12 -05002475 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2476 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2478 dev_err(&ioa_cfg->pdev->dev,
2479 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2480 }
2481
2482 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2483}
2484
2485/**
2486 * ipr_timeout - An internally generated op has timed out.
2487 * @ipr_cmd: ipr command struct
2488 *
2489 * This function blocks host requests and initiates an
2490 * adapter reset.
2491 *
2492 * Return value:
2493 * none
2494 **/
2495static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2496{
2497 unsigned long lock_flags = 0;
2498 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2499
2500 ENTER;
2501 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2502
2503 ioa_cfg->errors_logged++;
2504 dev_err(&ioa_cfg->pdev->dev,
2505 "Adapter being reset due to command timeout.\n");
2506
2507 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2508 ioa_cfg->sdt_state = GET_DUMP;
2509
2510 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2511 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2512
2513 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2514 LEAVE;
2515}
2516
2517/**
2518 * ipr_oper_timeout - Adapter timed out transitioning to operational
2519 * @ipr_cmd: ipr command struct
2520 *
2521 * This function blocks host requests and initiates an
2522 * adapter reset.
2523 *
2524 * Return value:
2525 * none
2526 **/
2527static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2528{
2529 unsigned long lock_flags = 0;
2530 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2531
2532 ENTER;
2533 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2534
2535 ioa_cfg->errors_logged++;
2536 dev_err(&ioa_cfg->pdev->dev,
2537 "Adapter timed out transitioning to operational.\n");
2538
2539 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2540 ioa_cfg->sdt_state = GET_DUMP;
2541
2542 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2543 if (ipr_fastfail)
2544 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2545 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2546 }
2547
2548 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2549 LEAVE;
2550}
2551
2552/**
2553 * ipr_reset_reload - Reset/Reload the IOA
2554 * @ioa_cfg: ioa config struct
2555 * @shutdown_type: shutdown type
2556 *
2557 * This function resets the adapter and re-initializes it.
2558 * This function assumes that all new host commands have been stopped.
2559 * Return value:
2560 * SUCCESS / FAILED
2561 **/
2562static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
2563 enum ipr_shutdown_type shutdown_type)
2564{
2565 if (!ioa_cfg->in_reset_reload)
2566 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
2567
2568 spin_unlock_irq(ioa_cfg->host->host_lock);
2569 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2570 spin_lock_irq(ioa_cfg->host->host_lock);
2571
2572 /* If we got hit with a host reset while we were already resetting
2573 the adapter for some reason, and the reset failed. */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06002574 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575 ipr_trace;
2576 return FAILED;
2577 }
2578
2579 return SUCCESS;
2580}
2581
2582/**
2583 * ipr_find_ses_entry - Find matching SES in SES table
2584 * @res: resource entry struct of SES
2585 *
2586 * Return value:
2587 * pointer to SES table entry / NULL on failure
2588 **/
2589static const struct ipr_ses_table_entry *
2590ipr_find_ses_entry(struct ipr_resource_entry *res)
2591{
2592 int i, j, matches;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002593 struct ipr_std_inq_vpids *vpids;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002594 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2595
2596 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2597 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2598 if (ste->compare_product_id_byte[j] == 'X') {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002599 vpids = &res->std_inq_data.vpids;
2600 if (vpids->product_id[j] == ste->product_id[j])
Linus Torvalds1da177e2005-04-16 15:20:36 -07002601 matches++;
2602 else
2603 break;
2604 } else
2605 matches++;
2606 }
2607
2608 if (matches == IPR_PROD_ID_LEN)
2609 return ste;
2610 }
2611
2612 return NULL;
2613}
2614
2615/**
2616 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2617 * @ioa_cfg: ioa config struct
2618 * @bus: SCSI bus
2619 * @bus_width: bus width
2620 *
2621 * Return value:
2622 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2623 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2624 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2625 * max 160MHz = max 320MB/sec).
2626 **/
2627static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2628{
2629 struct ipr_resource_entry *res;
2630 const struct ipr_ses_table_entry *ste;
2631 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2632
2633 /* Loop through each config table entry in the config table buffer */
2634 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002635 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002636 continue;
2637
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002638 if (bus != res->bus)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639 continue;
2640
2641 if (!(ste = ipr_find_ses_entry(res)))
2642 continue;
2643
2644 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2645 }
2646
2647 return max_xfer_rate;
2648}
2649
2650/**
2651 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2652 * @ioa_cfg: ioa config struct
2653 * @max_delay: max delay in micro-seconds to wait
2654 *
2655 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2656 *
2657 * Return value:
2658 * 0 on success / other on failure
2659 **/
2660static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2661{
2662 volatile u32 pcii_reg;
2663 int delay = 1;
2664
2665 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2666 while (delay < max_delay) {
2667 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2668
2669 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2670 return 0;
2671
2672 /* udelay cannot be used if delay is more than a few milliseconds */
2673 if ((delay / 1000) > MAX_UDELAY_MS)
2674 mdelay(delay / 1000);
2675 else
2676 udelay(delay);
2677
2678 delay += delay;
2679 }
2680 return -EIO;
2681}
2682
2683/**
Wayne Boyerdcbad002010-02-19 13:24:14 -08002684 * ipr_get_sis64_dump_data_section - Dump IOA memory
2685 * @ioa_cfg: ioa config struct
2686 * @start_addr: adapter address to dump
2687 * @dest: destination kernel buffer
2688 * @length_in_words: length to dump in 4 byte words
2689 *
2690 * Return value:
2691 * 0 on success
2692 **/
2693static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2694 u32 start_addr,
2695 __be32 *dest, u32 length_in_words)
2696{
2697 int i;
2698
2699 for (i = 0; i < length_in_words; i++) {
2700 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2701 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2702 dest++;
2703 }
2704
2705 return 0;
2706}
2707
2708/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709 * ipr_get_ldump_data_section - Dump IOA memory
2710 * @ioa_cfg: ioa config struct
2711 * @start_addr: adapter address to dump
2712 * @dest: destination kernel buffer
2713 * @length_in_words: length to dump in 4 byte words
2714 *
2715 * Return value:
2716 * 0 on success / -EIO on failure
2717 **/
2718static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2719 u32 start_addr,
2720 __be32 *dest, u32 length_in_words)
2721{
2722 volatile u32 temp_pcii_reg;
2723 int i, delay = 0;
2724
Wayne Boyerdcbad002010-02-19 13:24:14 -08002725 if (ioa_cfg->sis64)
2726 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2727 dest, length_in_words);
2728
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729 /* Write IOA interrupt reg starting LDUMP state */
2730 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
Wayne Boyer214777b2010-02-19 13:24:26 -08002731 ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002732
2733 /* Wait for IO debug acknowledge */
2734 if (ipr_wait_iodbg_ack(ioa_cfg,
2735 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2736 dev_err(&ioa_cfg->pdev->dev,
2737 "IOA dump long data transfer timeout\n");
2738 return -EIO;
2739 }
2740
2741 /* Signal LDUMP interlocked - clear IO debug ack */
2742 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2743 ioa_cfg->regs.clr_interrupt_reg);
2744
2745 /* Write Mailbox with starting address */
2746 writel(start_addr, ioa_cfg->ioa_mailbox);
2747
2748 /* Signal address valid - clear IOA Reset alert */
2749 writel(IPR_UPROCI_RESET_ALERT,
Wayne Boyer214777b2010-02-19 13:24:26 -08002750 ioa_cfg->regs.clr_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002751
2752 for (i = 0; i < length_in_words; i++) {
2753 /* Wait for IO debug acknowledge */
2754 if (ipr_wait_iodbg_ack(ioa_cfg,
2755 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2756 dev_err(&ioa_cfg->pdev->dev,
2757 "IOA dump short data transfer timeout\n");
2758 return -EIO;
2759 }
2760
2761 /* Read data from mailbox and increment destination pointer */
2762 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2763 dest++;
2764
2765 /* For all but the last word of data, signal data received */
2766 if (i < (length_in_words - 1)) {
2767 /* Signal dump data received - Clear IO debug Ack */
2768 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2769 ioa_cfg->regs.clr_interrupt_reg);
2770 }
2771 }
2772
2773 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2774 writel(IPR_UPROCI_RESET_ALERT,
Wayne Boyer214777b2010-02-19 13:24:26 -08002775 ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776
2777 writel(IPR_UPROCI_IO_DEBUG_ALERT,
Wayne Boyer214777b2010-02-19 13:24:26 -08002778 ioa_cfg->regs.clr_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779
2780 /* Signal dump data received - Clear IO debug Ack */
2781 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2782 ioa_cfg->regs.clr_interrupt_reg);
2783
2784 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2785 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2786 temp_pcii_reg =
Wayne Boyer214777b2010-02-19 13:24:26 -08002787 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002788
2789 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2790 return 0;
2791
2792 udelay(10);
2793 delay += 10;
2794 }
2795
2796 return 0;
2797}
2798
2799#ifdef CONFIG_SCSI_IPR_DUMP
2800/**
2801 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2802 * @ioa_cfg: ioa config struct
2803 * @pci_address: adapter address
2804 * @length: length of data to copy
2805 *
2806 * Copy data from PCI adapter to kernel buffer.
2807 * Note: length MUST be a 4 byte multiple
2808 * Return value:
2809 * 0 on success / other on failure
2810 **/
2811static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2812 unsigned long pci_address, u32 length)
2813{
2814 int bytes_copied = 0;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03002815 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816 __be32 *page;
2817 unsigned long lock_flags = 0;
2818 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2819
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03002820 if (ioa_cfg->sis64)
2821 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2822 else
2823 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2824
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825 while (bytes_copied < length &&
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03002826 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002827 if (ioa_dump->page_offset >= PAGE_SIZE ||
2828 ioa_dump->page_offset == 0) {
2829 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2830
2831 if (!page) {
2832 ipr_trace;
2833 return bytes_copied;
2834 }
2835
2836 ioa_dump->page_offset = 0;
2837 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2838 ioa_dump->next_page_index++;
2839 } else
2840 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2841
2842 rem_len = length - bytes_copied;
2843 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2844 cur_len = min(rem_len, rem_page_len);
2845
2846 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2847 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2848 rc = -EIO;
2849 } else {
2850 rc = ipr_get_ldump_data_section(ioa_cfg,
2851 pci_address + bytes_copied,
2852 &page[ioa_dump->page_offset / 4],
2853 (cur_len / sizeof(u32)));
2854 }
2855 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2856
2857 if (!rc) {
2858 ioa_dump->page_offset += cur_len;
2859 bytes_copied += cur_len;
2860 } else {
2861 ipr_trace;
2862 break;
2863 }
2864 schedule();
2865 }
2866
2867 return bytes_copied;
2868}
2869
2870/**
2871 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2872 * @hdr: dump entry header struct
2873 *
2874 * Return value:
2875 * nothing
2876 **/
2877static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2878{
2879 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2880 hdr->num_elems = 1;
2881 hdr->offset = sizeof(*hdr);
2882 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2883}
2884
2885/**
2886 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2887 * @ioa_cfg: ioa config struct
2888 * @driver_dump: driver dump struct
2889 *
2890 * Return value:
2891 * nothing
2892 **/
2893static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2894 struct ipr_driver_dump *driver_dump)
2895{
2896 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2897
2898 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2899 driver_dump->ioa_type_entry.hdr.len =
2900 sizeof(struct ipr_dump_ioa_type_entry) -
2901 sizeof(struct ipr_dump_entry_header);
2902 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2903 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2904 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2905 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2906 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2907 ucode_vpd->minor_release[1];
2908 driver_dump->hdr.num_entries++;
2909}
2910
2911/**
2912 * ipr_dump_version_data - Fill in the driver version in the dump.
2913 * @ioa_cfg: ioa config struct
2914 * @driver_dump: driver dump struct
2915 *
2916 * Return value:
2917 * nothing
2918 **/
2919static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2920 struct ipr_driver_dump *driver_dump)
2921{
2922 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2923 driver_dump->version_entry.hdr.len =
2924 sizeof(struct ipr_dump_version_entry) -
2925 sizeof(struct ipr_dump_entry_header);
2926 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2927 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2928 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2929 driver_dump->hdr.num_entries++;
2930}
2931
2932/**
2933 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2934 * @ioa_cfg: ioa config struct
2935 * @driver_dump: driver dump struct
2936 *
2937 * Return value:
2938 * nothing
2939 **/
2940static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2941 struct ipr_driver_dump *driver_dump)
2942{
2943 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2944 driver_dump->trace_entry.hdr.len =
2945 sizeof(struct ipr_dump_trace_entry) -
2946 sizeof(struct ipr_dump_entry_header);
2947 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2948 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2949 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2950 driver_dump->hdr.num_entries++;
2951}
2952
2953/**
2954 * ipr_dump_location_data - Fill in the IOA location in the dump.
2955 * @ioa_cfg: ioa config struct
2956 * @driver_dump: driver dump struct
2957 *
2958 * Return value:
2959 * nothing
2960 **/
2961static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2962 struct ipr_driver_dump *driver_dump)
2963{
2964 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2965 driver_dump->location_entry.hdr.len =
2966 sizeof(struct ipr_dump_location_entry) -
2967 sizeof(struct ipr_dump_entry_header);
2968 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2969 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
Kay Sievers71610f52008-12-03 22:41:36 +01002970 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002971 driver_dump->hdr.num_entries++;
2972}
2973
2974/**
2975 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2976 * @ioa_cfg: ioa config struct
2977 * @dump: dump struct
2978 *
2979 * Return value:
2980 * nothing
2981 **/
2982static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2983{
2984 unsigned long start_addr, sdt_word;
2985 unsigned long lock_flags = 0;
2986 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2987 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03002988 u32 num_entries, max_num_entries, start_off, end_off;
2989 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002990 struct ipr_sdt *sdt;
Wayne Boyerdcbad002010-02-19 13:24:14 -08002991 int valid = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002992 int i;
2993
2994 ENTER;
2995
2996 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2997
Brian King41e9a692011-09-21 08:51:11 -05002998 if (ioa_cfg->sdt_state != READ_DUMP) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002999 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3000 return;
3001 }
3002
Wayne Boyer110def82010-11-04 09:36:16 -07003003 if (ioa_cfg->sis64) {
3004 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3005 ssleep(IPR_DUMP_DELAY_SECONDS);
3006 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3007 }
3008
Linus Torvalds1da177e2005-04-16 15:20:36 -07003009 start_addr = readl(ioa_cfg->ioa_mailbox);
3010
Wayne Boyerdcbad002010-02-19 13:24:14 -08003011 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003012 dev_err(&ioa_cfg->pdev->dev,
3013 "Invalid dump table format: %lx\n", start_addr);
3014 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3015 return;
3016 }
3017
3018 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3019
3020 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3021
3022 /* Initialize the overall dump header */
3023 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3024 driver_dump->hdr.num_entries = 1;
3025 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3026 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3027 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3028 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3029
3030 ipr_dump_version_data(ioa_cfg, driver_dump);
3031 ipr_dump_location_data(ioa_cfg, driver_dump);
3032 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3033 ipr_dump_trace_data(ioa_cfg, driver_dump);
3034
3035 /* Update dump_header */
3036 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3037
3038 /* IOA Dump entry */
3039 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003040 ioa_dump->hdr.len = 0;
3041 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3042 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3043
3044 /* First entries in sdt are actually a list of dump addresses and
3045 lengths to gather the real dump data. sdt represents the pointer
3046 to the ioa generated dump table. Dump data will be extracted based
3047 on entries in this table */
3048 sdt = &ioa_dump->sdt;
3049
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003050 if (ioa_cfg->sis64) {
3051 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3052 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3053 } else {
3054 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3055 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3056 }
3057
3058 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3059 (max_num_entries * sizeof(struct ipr_sdt_entry));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003060 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003061 bytes_to_copy / sizeof(__be32));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003062
3063 /* Smart Dump table is ready to use and the first entry is valid */
Wayne Boyerdcbad002010-02-19 13:24:14 -08003064 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3065 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003066 dev_err(&ioa_cfg->pdev->dev,
3067 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3068 rc, be32_to_cpu(sdt->hdr.state));
3069 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3070 ioa_cfg->sdt_state = DUMP_OBTAINED;
3071 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3072 return;
3073 }
3074
3075 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3076
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003077 if (num_entries > max_num_entries)
3078 num_entries = max_num_entries;
3079
3080 /* Update dump length to the actual data to be copied */
3081 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3082 if (ioa_cfg->sis64)
3083 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3084 else
3085 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003086
3087 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3088
3089 for (i = 0; i < num_entries; i++) {
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003090 if (ioa_dump->hdr.len > max_dump_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003091 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3092 break;
3093 }
3094
3095 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
Wayne Boyerdcbad002010-02-19 13:24:14 -08003096 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3097 if (ioa_cfg->sis64)
3098 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3099 else {
3100 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3101 end_off = be32_to_cpu(sdt->entry[i].end_token);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003102
Wayne Boyerdcbad002010-02-19 13:24:14 -08003103 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3104 bytes_to_copy = end_off - start_off;
3105 else
3106 valid = 0;
3107 }
3108 if (valid) {
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003109 if (bytes_to_copy > max_dump_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003110 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3111 continue;
3112 }
3113
3114 /* Copy data from adapter to driver buffers */
3115 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3116 bytes_to_copy);
3117
3118 ioa_dump->hdr.len += bytes_copied;
3119
3120 if (bytes_copied != bytes_to_copy) {
3121 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3122 break;
3123 }
3124 }
3125 }
3126 }
3127
3128 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3129
3130 /* Update dump_header */
3131 driver_dump->hdr.len += ioa_dump->hdr.len;
3132 wmb();
3133 ioa_cfg->sdt_state = DUMP_OBTAINED;
3134 LEAVE;
3135}
3136
3137#else
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003138#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003139#endif
3140
3141/**
3142 * ipr_release_dump - Free adapter dump memory
3143 * @kref: kref struct
3144 *
3145 * Return value:
3146 * nothing
3147 **/
3148static void ipr_release_dump(struct kref *kref)
3149{
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003150 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003151 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3152 unsigned long lock_flags = 0;
3153 int i;
3154
3155 ENTER;
3156 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3157 ioa_cfg->dump = NULL;
3158 ioa_cfg->sdt_state = INACTIVE;
3159 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3160
3161 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3162 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3163
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003164 vfree(dump->ioa_dump.ioa_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003165 kfree(dump);
3166 LEAVE;
3167}
3168
3169/**
3170 * ipr_worker_thread - Worker thread
David Howellsc4028952006-11-22 14:57:56 +00003171 * @work: ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07003172 *
3173 * Called at task level from a work thread. This function takes care
3174 * of adding and removing device from the mid-layer as configuration
3175 * changes are detected by the adapter.
3176 *
3177 * Return value:
3178 * nothing
3179 **/
David Howellsc4028952006-11-22 14:57:56 +00003180static void ipr_worker_thread(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003181{
3182 unsigned long lock_flags;
3183 struct ipr_resource_entry *res;
3184 struct scsi_device *sdev;
3185 struct ipr_dump *dump;
David Howellsc4028952006-11-22 14:57:56 +00003186 struct ipr_ioa_cfg *ioa_cfg =
3187 container_of(work, struct ipr_ioa_cfg, work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003188 u8 bus, target, lun;
3189 int did_work;
3190
3191 ENTER;
3192 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3193
Brian King41e9a692011-09-21 08:51:11 -05003194 if (ioa_cfg->sdt_state == READ_DUMP) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003195 dump = ioa_cfg->dump;
3196 if (!dump) {
3197 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3198 return;
3199 }
3200 kref_get(&dump->kref);
3201 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3202 ipr_get_ioa_dump(ioa_cfg, dump);
3203 kref_put(&dump->kref, ipr_release_dump);
3204
3205 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Brian King4c647e92011-10-15 09:08:56 -05003206 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003207 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3208 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3209 return;
3210 }
3211
3212restart:
3213 do {
3214 did_work = 0;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06003215 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
3216 !ioa_cfg->allow_ml_add_del) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003217 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3218 return;
3219 }
3220
3221 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3222 if (res->del_from_ml && res->sdev) {
3223 did_work = 1;
3224 sdev = res->sdev;
3225 if (!scsi_device_get(sdev)) {
Kleber Sacilotto de Souza5767a1c2011-02-14 20:19:31 -02003226 if (!res->add_to_ml)
3227 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3228 else
3229 res->del_from_ml = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003230 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3231 scsi_remove_device(sdev);
3232 scsi_device_put(sdev);
3233 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3234 }
3235 break;
3236 }
3237 }
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003238 } while (did_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003239
3240 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3241 if (res->add_to_ml) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08003242 bus = res->bus;
3243 target = res->target;
3244 lun = res->lun;
Brian King1121b792006-03-29 09:37:16 -06003245 res->add_to_ml = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003246 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3247 scsi_add_device(ioa_cfg->host, bus, target, lun);
3248 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3249 goto restart;
3250 }
3251 }
3252
3253 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Tony Jonesee959b02008-02-22 00:13:36 +01003254 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003255 LEAVE;
3256}
3257
3258#ifdef CONFIG_SCSI_IPR_TRACE
3259/**
3260 * ipr_read_trace - Dump the adapter trace
Chris Wright2c3c8be2010-05-12 18:28:57 -07003261 * @filp: open sysfs file
Linus Torvalds1da177e2005-04-16 15:20:36 -07003262 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08003263 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07003264 * @buf: buffer
3265 * @off: offset
3266 * @count: buffer size
3267 *
3268 * Return value:
3269 * number of bytes printed to buffer
3270 **/
Chris Wright2c3c8be2010-05-12 18:28:57 -07003271static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
Zhang Rui91a69022007-06-09 13:57:22 +08003272 struct bin_attribute *bin_attr,
3273 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003274{
Tony Jonesee959b02008-02-22 00:13:36 +01003275 struct device *dev = container_of(kobj, struct device, kobj);
3276 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003277 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3278 unsigned long lock_flags = 0;
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003279 ssize_t ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003280
3281 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003282 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3283 IPR_TRACE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003284 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003285
3286 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003287}
3288
3289static struct bin_attribute ipr_trace_attr = {
3290 .attr = {
3291 .name = "trace",
3292 .mode = S_IRUGO,
3293 },
3294 .size = 0,
3295 .read = ipr_read_trace,
3296};
3297#endif
3298
3299/**
3300 * ipr_show_fw_version - Show the firmware version
Tony Jonesee959b02008-02-22 00:13:36 +01003301 * @dev: class device struct
3302 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003303 *
3304 * Return value:
3305 * number of bytes printed to buffer
3306 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003307static ssize_t ipr_show_fw_version(struct device *dev,
3308 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003309{
Tony Jonesee959b02008-02-22 00:13:36 +01003310 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003311 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3312 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3313 unsigned long lock_flags = 0;
3314 int len;
3315
3316 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3317 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3318 ucode_vpd->major_release, ucode_vpd->card_type,
3319 ucode_vpd->minor_release[0],
3320 ucode_vpd->minor_release[1]);
3321 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3322 return len;
3323}
3324
Tony Jonesee959b02008-02-22 00:13:36 +01003325static struct device_attribute ipr_fw_version_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003326 .attr = {
3327 .name = "fw_version",
3328 .mode = S_IRUGO,
3329 },
3330 .show = ipr_show_fw_version,
3331};
3332
3333/**
3334 * ipr_show_log_level - Show the adapter's error logging level
Tony Jonesee959b02008-02-22 00:13:36 +01003335 * @dev: class device struct
3336 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003337 *
3338 * Return value:
3339 * number of bytes printed to buffer
3340 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003341static ssize_t ipr_show_log_level(struct device *dev,
3342 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003343{
Tony Jonesee959b02008-02-22 00:13:36 +01003344 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003345 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3346 unsigned long lock_flags = 0;
3347 int len;
3348
3349 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3350 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3351 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3352 return len;
3353}
3354
3355/**
3356 * ipr_store_log_level - Change the adapter's error logging level
Tony Jonesee959b02008-02-22 00:13:36 +01003357 * @dev: class device struct
3358 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003359 *
3360 * Return value:
3361 * number of bytes printed to buffer
3362 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003363static ssize_t ipr_store_log_level(struct device *dev,
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003364 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003365 const char *buf, size_t count)
3366{
Tony Jonesee959b02008-02-22 00:13:36 +01003367 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003368 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3369 unsigned long lock_flags = 0;
3370
3371 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3372 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3373 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3374 return strlen(buf);
3375}
3376
Tony Jonesee959b02008-02-22 00:13:36 +01003377static struct device_attribute ipr_log_level_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003378 .attr = {
3379 .name = "log_level",
3380 .mode = S_IRUGO | S_IWUSR,
3381 },
3382 .show = ipr_show_log_level,
3383 .store = ipr_store_log_level
3384};
3385
3386/**
3387 * ipr_store_diagnostics - IOA Diagnostics interface
Tony Jonesee959b02008-02-22 00:13:36 +01003388 * @dev: device struct
3389 * @buf: buffer
3390 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003391 *
3392 * This function will reset the adapter and wait a reasonable
3393 * amount of time for any errors that the adapter might log.
3394 *
3395 * Return value:
3396 * count on success / other on failure
3397 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003398static ssize_t ipr_store_diagnostics(struct device *dev,
3399 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003400 const char *buf, size_t count)
3401{
Tony Jonesee959b02008-02-22 00:13:36 +01003402 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003403 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3404 unsigned long lock_flags = 0;
3405 int rc = count;
3406
3407 if (!capable(CAP_SYS_ADMIN))
3408 return -EACCES;
3409
Linus Torvalds1da177e2005-04-16 15:20:36 -07003410 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003411 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -05003412 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3413 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3414 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3415 }
3416
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417 ioa_cfg->errors_logged = 0;
3418 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3419
3420 if (ioa_cfg->in_reset_reload) {
3421 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3422 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3423
3424 /* Wait for a second for any errors to be logged */
3425 msleep(1000);
3426 } else {
3427 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3428 return -EIO;
3429 }
3430
3431 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3432 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3433 rc = -EIO;
3434 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3435
3436 return rc;
3437}
3438
Tony Jonesee959b02008-02-22 00:13:36 +01003439static struct device_attribute ipr_diagnostics_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003440 .attr = {
3441 .name = "run_diagnostics",
3442 .mode = S_IWUSR,
3443 },
3444 .store = ipr_store_diagnostics
3445};
3446
3447/**
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003448 * ipr_show_adapter_state - Show the adapter's state
Tony Jonesee959b02008-02-22 00:13:36 +01003449 * @class_dev: device struct
3450 * @buf: buffer
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003451 *
3452 * Return value:
3453 * number of bytes printed to buffer
3454 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003455static ssize_t ipr_show_adapter_state(struct device *dev,
3456 struct device_attribute *attr, char *buf)
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003457{
Tony Jonesee959b02008-02-22 00:13:36 +01003458 struct Scsi_Host *shost = class_to_shost(dev);
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003459 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3460 unsigned long lock_flags = 0;
3461 int len;
3462
3463 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06003464 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003465 len = snprintf(buf, PAGE_SIZE, "offline\n");
3466 else
3467 len = snprintf(buf, PAGE_SIZE, "online\n");
3468 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3469 return len;
3470}
3471
3472/**
3473 * ipr_store_adapter_state - Change adapter state
Tony Jonesee959b02008-02-22 00:13:36 +01003474 * @dev: device struct
3475 * @buf: buffer
3476 * @count: buffer size
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003477 *
3478 * This function will change the adapter's state.
3479 *
3480 * Return value:
3481 * count on success / other on failure
3482 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003483static ssize_t ipr_store_adapter_state(struct device *dev,
3484 struct device_attribute *attr,
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003485 const char *buf, size_t count)
3486{
Tony Jonesee959b02008-02-22 00:13:36 +01003487 struct Scsi_Host *shost = class_to_shost(dev);
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003488 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3489 unsigned long lock_flags;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06003490 int result = count, i;
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003491
3492 if (!capable(CAP_SYS_ADMIN))
3493 return -EACCES;
3494
3495 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06003496 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3497 !strncmp(buf, "online", 6)) {
3498 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3499 spin_lock(&ioa_cfg->hrrq[i]._lock);
3500 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3501 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3502 }
3503 wmb();
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003504 ioa_cfg->reset_retries = 0;
3505 ioa_cfg->in_ioa_bringdown = 0;
3506 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3507 }
3508 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3509 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3510
3511 return result;
3512}
3513
Tony Jonesee959b02008-02-22 00:13:36 +01003514static struct device_attribute ipr_ioa_state_attr = {
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003515 .attr = {
Brian King49dd0962008-04-28 17:36:20 -05003516 .name = "online_state",
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003517 .mode = S_IRUGO | S_IWUSR,
3518 },
3519 .show = ipr_show_adapter_state,
3520 .store = ipr_store_adapter_state
3521};
3522
3523/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003524 * ipr_store_reset_adapter - Reset the adapter
Tony Jonesee959b02008-02-22 00:13:36 +01003525 * @dev: device struct
3526 * @buf: buffer
3527 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003528 *
3529 * This function will reset the adapter.
3530 *
3531 * Return value:
3532 * count on success / other on failure
3533 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003534static ssize_t ipr_store_reset_adapter(struct device *dev,
3535 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003536 const char *buf, size_t count)
3537{
Tony Jonesee959b02008-02-22 00:13:36 +01003538 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003539 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3540 unsigned long lock_flags;
3541 int result = count;
3542
3543 if (!capable(CAP_SYS_ADMIN))
3544 return -EACCES;
3545
3546 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3547 if (!ioa_cfg->in_reset_reload)
3548 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3549 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3550 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3551
3552 return result;
3553}
3554
Tony Jonesee959b02008-02-22 00:13:36 +01003555static struct device_attribute ipr_ioa_reset_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003556 .attr = {
3557 .name = "reset_host",
3558 .mode = S_IWUSR,
3559 },
3560 .store = ipr_store_reset_adapter
3561};
3562
3563/**
3564 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3565 * @buf_len: buffer length
3566 *
3567 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3568 * list to use for microcode download
3569 *
3570 * Return value:
3571 * pointer to sglist / NULL on failure
3572 **/
3573static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3574{
3575 int sg_size, order, bsize_elem, num_elem, i, j;
3576 struct ipr_sglist *sglist;
3577 struct scatterlist *scatterlist;
3578 struct page *page;
3579
3580 /* Get the minimum size per scatter/gather element */
3581 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3582
3583 /* Get the actual size per element */
3584 order = get_order(sg_size);
3585
3586 /* Determine the actual number of bytes per element */
3587 bsize_elem = PAGE_SIZE * (1 << order);
3588
3589 /* Determine the actual number of sg entries needed */
3590 if (buf_len % bsize_elem)
3591 num_elem = (buf_len / bsize_elem) + 1;
3592 else
3593 num_elem = buf_len / bsize_elem;
3594
3595 /* Allocate a scatter/gather list for the DMA */
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06003596 sglist = kzalloc(sizeof(struct ipr_sglist) +
Linus Torvalds1da177e2005-04-16 15:20:36 -07003597 (sizeof(struct scatterlist) * (num_elem - 1)),
3598 GFP_KERNEL);
3599
3600 if (sglist == NULL) {
3601 ipr_trace;
3602 return NULL;
3603 }
3604
Linus Torvalds1da177e2005-04-16 15:20:36 -07003605 scatterlist = sglist->scatterlist;
Jens Axboe45711f12007-10-22 21:19:53 +02003606 sg_init_table(scatterlist, num_elem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003607
3608 sglist->order = order;
3609 sglist->num_sg = num_elem;
3610
3611 /* Allocate a bunch of sg elements */
3612 for (i = 0; i < num_elem; i++) {
3613 page = alloc_pages(GFP_KERNEL, order);
3614 if (!page) {
3615 ipr_trace;
3616
3617 /* Free up what we already allocated */
3618 for (j = i - 1; j >= 0; j--)
Jens Axboe45711f12007-10-22 21:19:53 +02003619 __free_pages(sg_page(&scatterlist[j]), order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003620 kfree(sglist);
3621 return NULL;
3622 }
3623
Jens Axboe642f1492007-10-24 11:20:47 +02003624 sg_set_page(&scatterlist[i], page, 0, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003625 }
3626
3627 return sglist;
3628}
3629
3630/**
3631 * ipr_free_ucode_buffer - Frees a microcode download buffer
3632 * @p_dnld: scatter/gather list pointer
3633 *
3634 * Free a DMA'able ucode download buffer previously allocated with
3635 * ipr_alloc_ucode_buffer
3636 *
3637 * Return value:
3638 * nothing
3639 **/
3640static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3641{
3642 int i;
3643
3644 for (i = 0; i < sglist->num_sg; i++)
Jens Axboe45711f12007-10-22 21:19:53 +02003645 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003646
3647 kfree(sglist);
3648}
3649
3650/**
3651 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3652 * @sglist: scatter/gather list pointer
3653 * @buffer: buffer pointer
3654 * @len: buffer length
3655 *
3656 * Copy a microcode image from a user buffer into a buffer allocated by
3657 * ipr_alloc_ucode_buffer
3658 *
3659 * Return value:
3660 * 0 on success / other on failure
3661 **/
3662static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3663 u8 *buffer, u32 len)
3664{
3665 int bsize_elem, i, result = 0;
3666 struct scatterlist *scatterlist;
3667 void *kaddr;
3668
3669 /* Determine the actual number of bytes per element */
3670 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3671
3672 scatterlist = sglist->scatterlist;
3673
3674 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
Jens Axboe45711f12007-10-22 21:19:53 +02003675 struct page *page = sg_page(&scatterlist[i]);
3676
3677 kaddr = kmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003678 memcpy(kaddr, buffer, bsize_elem);
Jens Axboe45711f12007-10-22 21:19:53 +02003679 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003680
3681 scatterlist[i].length = bsize_elem;
3682
3683 if (result != 0) {
3684 ipr_trace;
3685 return result;
3686 }
3687 }
3688
3689 if (len % bsize_elem) {
Jens Axboe45711f12007-10-22 21:19:53 +02003690 struct page *page = sg_page(&scatterlist[i]);
3691
3692 kaddr = kmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003693 memcpy(kaddr, buffer, len % bsize_elem);
Jens Axboe45711f12007-10-22 21:19:53 +02003694 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003695
3696 scatterlist[i].length = len % bsize_elem;
3697 }
3698
3699 sglist->buffer_len = len;
3700 return result;
3701}
3702
3703/**
Wayne Boyera32c0552010-02-19 13:23:36 -08003704 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3705 * @ipr_cmd: ipr command struct
3706 * @sglist: scatter/gather list
3707 *
3708 * Builds a microcode download IOA data list (IOADL).
3709 *
3710 **/
3711static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3712 struct ipr_sglist *sglist)
3713{
3714 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3715 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3716 struct scatterlist *scatterlist = sglist->scatterlist;
3717 int i;
3718
3719 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3720 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3721 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3722
3723 ioarcb->ioadl_len =
3724 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3725 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3726 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3727 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3728 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3729 }
3730
3731 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3732}
3733
3734/**
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003735 * ipr_build_ucode_ioadl - Build a microcode download IOADL
Linus Torvalds1da177e2005-04-16 15:20:36 -07003736 * @ipr_cmd: ipr command struct
3737 * @sglist: scatter/gather list
Linus Torvalds1da177e2005-04-16 15:20:36 -07003738 *
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003739 * Builds a microcode download IOA data list (IOADL).
Linus Torvalds1da177e2005-04-16 15:20:36 -07003740 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07003741 **/
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003742static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3743 struct ipr_sglist *sglist)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003744{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003745 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08003746 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003747 struct scatterlist *scatterlist = sglist->scatterlist;
3748 int i;
3749
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003750 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003751 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08003752 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3753
3754 ioarcb->ioadl_len =
Linus Torvalds1da177e2005-04-16 15:20:36 -07003755 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3756
3757 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3758 ioadl[i].flags_and_data_len =
3759 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3760 ioadl[i].address =
3761 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3762 }
3763
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003764 ioadl[i-1].flags_and_data_len |=
3765 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3766}
3767
3768/**
3769 * ipr_update_ioa_ucode - Update IOA's microcode
3770 * @ioa_cfg: ioa config struct
3771 * @sglist: scatter/gather list
3772 *
3773 * Initiate an adapter reset to update the IOA's microcode
3774 *
3775 * Return value:
3776 * 0 on success / -EIO on failure
3777 **/
3778static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3779 struct ipr_sglist *sglist)
3780{
3781 unsigned long lock_flags;
3782
3783 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003784 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -05003785 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3786 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3787 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3788 }
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003789
3790 if (ioa_cfg->ucode_sglist) {
3791 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3792 dev_err(&ioa_cfg->pdev->dev,
3793 "Microcode download already in progress\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003794 return -EIO;
3795 }
3796
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003797 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3798 sglist->num_sg, DMA_TO_DEVICE);
3799
3800 if (!sglist->num_dma_sg) {
3801 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3802 dev_err(&ioa_cfg->pdev->dev,
3803 "Failed to map microcode download buffer!\n");
3804 return -EIO;
3805 }
3806
3807 ioa_cfg->ucode_sglist = sglist;
3808 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3809 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3810 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3811
3812 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3813 ioa_cfg->ucode_sglist = NULL;
3814 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003815 return 0;
3816}
3817
3818/**
3819 * ipr_store_update_fw - Update the firmware on the adapter
Tony Jonesee959b02008-02-22 00:13:36 +01003820 * @class_dev: device struct
3821 * @buf: buffer
3822 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003823 *
3824 * This function will update the firmware on the adapter.
3825 *
3826 * Return value:
3827 * count on success / other on failure
3828 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003829static ssize_t ipr_store_update_fw(struct device *dev,
3830 struct device_attribute *attr,
3831 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003832{
Tony Jonesee959b02008-02-22 00:13:36 +01003833 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003834 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3835 struct ipr_ucode_image_header *image_hdr;
3836 const struct firmware *fw_entry;
3837 struct ipr_sglist *sglist;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003838 char fname[100];
3839 char *src;
3840 int len, result, dnld_size;
3841
3842 if (!capable(CAP_SYS_ADMIN))
3843 return -EACCES;
3844
3845 len = snprintf(fname, 99, "%s", buf);
3846 fname[len-1] = '\0';
3847
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003848 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003849 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3850 return -EIO;
3851 }
3852
3853 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3854
Linus Torvalds1da177e2005-04-16 15:20:36 -07003855 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3856 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3857 sglist = ipr_alloc_ucode_buffer(dnld_size);
3858
3859 if (!sglist) {
3860 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3861 release_firmware(fw_entry);
3862 return -ENOMEM;
3863 }
3864
3865 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3866
3867 if (result) {
3868 dev_err(&ioa_cfg->pdev->dev,
3869 "Microcode buffer copy to DMA buffer failed\n");
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003870 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003871 }
3872
Wayne Boyer14ed9cc2011-10-03 20:54:37 -07003873 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
3874
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003875 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003876
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003877 if (!result)
3878 result = count;
3879out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003880 ipr_free_ucode_buffer(sglist);
3881 release_firmware(fw_entry);
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003882 return result;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003883}
3884
Tony Jonesee959b02008-02-22 00:13:36 +01003885static struct device_attribute ipr_update_fw_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003886 .attr = {
3887 .name = "update_fw",
3888 .mode = S_IWUSR,
3889 },
3890 .store = ipr_store_update_fw
3891};
3892
Wayne Boyer75576bb2010-07-14 10:50:14 -07003893/**
3894 * ipr_show_fw_type - Show the adapter's firmware type.
3895 * @dev: class device struct
3896 * @buf: buffer
3897 *
3898 * Return value:
3899 * number of bytes printed to buffer
3900 **/
3901static ssize_t ipr_show_fw_type(struct device *dev,
3902 struct device_attribute *attr, char *buf)
3903{
3904 struct Scsi_Host *shost = class_to_shost(dev);
3905 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3906 unsigned long lock_flags = 0;
3907 int len;
3908
3909 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3910 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
3911 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3912 return len;
3913}
3914
3915static struct device_attribute ipr_ioa_fw_type_attr = {
3916 .attr = {
3917 .name = "fw_type",
3918 .mode = S_IRUGO,
3919 },
3920 .show = ipr_show_fw_type
3921};
3922
Tony Jonesee959b02008-02-22 00:13:36 +01003923static struct device_attribute *ipr_ioa_attrs[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003924 &ipr_fw_version_attr,
3925 &ipr_log_level_attr,
3926 &ipr_diagnostics_attr,
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003927 &ipr_ioa_state_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003928 &ipr_ioa_reset_attr,
3929 &ipr_update_fw_attr,
Wayne Boyer75576bb2010-07-14 10:50:14 -07003930 &ipr_ioa_fw_type_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003931 NULL,
3932};
3933
3934#ifdef CONFIG_SCSI_IPR_DUMP
3935/**
3936 * ipr_read_dump - Dump the adapter
Chris Wright2c3c8be2010-05-12 18:28:57 -07003937 * @filp: open sysfs file
Linus Torvalds1da177e2005-04-16 15:20:36 -07003938 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08003939 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07003940 * @buf: buffer
3941 * @off: offset
3942 * @count: buffer size
3943 *
3944 * Return value:
3945 * number of bytes printed to buffer
3946 **/
Chris Wright2c3c8be2010-05-12 18:28:57 -07003947static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
Zhang Rui91a69022007-06-09 13:57:22 +08003948 struct bin_attribute *bin_attr,
3949 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003950{
Tony Jonesee959b02008-02-22 00:13:36 +01003951 struct device *cdev = container_of(kobj, struct device, kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003952 struct Scsi_Host *shost = class_to_shost(cdev);
3953 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3954 struct ipr_dump *dump;
3955 unsigned long lock_flags = 0;
3956 char *src;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003957 int len, sdt_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003958 size_t rc = count;
3959
3960 if (!capable(CAP_SYS_ADMIN))
3961 return -EACCES;
3962
3963 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3964 dump = ioa_cfg->dump;
3965
3966 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3967 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3968 return 0;
3969 }
3970 kref_get(&dump->kref);
3971 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3972
3973 if (off > dump->driver_dump.hdr.len) {
3974 kref_put(&dump->kref, ipr_release_dump);
3975 return 0;
3976 }
3977
3978 if (off + count > dump->driver_dump.hdr.len) {
3979 count = dump->driver_dump.hdr.len - off;
3980 rc = count;
3981 }
3982
3983 if (count && off < sizeof(dump->driver_dump)) {
3984 if (off + count > sizeof(dump->driver_dump))
3985 len = sizeof(dump->driver_dump) - off;
3986 else
3987 len = count;
3988 src = (u8 *)&dump->driver_dump + off;
3989 memcpy(buf, src, len);
3990 buf += len;
3991 off += len;
3992 count -= len;
3993 }
3994
3995 off -= sizeof(dump->driver_dump);
3996
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003997 if (ioa_cfg->sis64)
3998 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
3999 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4000 sizeof(struct ipr_sdt_entry));
4001 else
4002 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4003 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4004
4005 if (count && off < sdt_end) {
4006 if (off + count > sdt_end)
4007 len = sdt_end - off;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004008 else
4009 len = count;
4010 src = (u8 *)&dump->ioa_dump + off;
4011 memcpy(buf, src, len);
4012 buf += len;
4013 off += len;
4014 count -= len;
4015 }
4016
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004017 off -= sdt_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004018
4019 while (count) {
4020 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4021 len = PAGE_ALIGN(off) - off;
4022 else
4023 len = count;
4024 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4025 src += off & ~PAGE_MASK;
4026 memcpy(buf, src, len);
4027 buf += len;
4028 off += len;
4029 count -= len;
4030 }
4031
4032 kref_put(&dump->kref, ipr_release_dump);
4033 return rc;
4034}
4035
4036/**
4037 * ipr_alloc_dump - Prepare for adapter dump
4038 * @ioa_cfg: ioa config struct
4039 *
4040 * Return value:
4041 * 0 on success / other on failure
4042 **/
4043static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4044{
4045 struct ipr_dump *dump;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004046 __be32 **ioa_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004047 unsigned long lock_flags = 0;
4048
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06004049 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004050
4051 if (!dump) {
4052 ipr_err("Dump memory allocation failed\n");
4053 return -ENOMEM;
4054 }
4055
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004056 if (ioa_cfg->sis64)
4057 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4058 else
4059 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4060
4061 if (!ioa_data) {
4062 ipr_err("Dump memory allocation failed\n");
4063 kfree(dump);
4064 return -ENOMEM;
4065 }
4066
4067 dump->ioa_dump.ioa_data = ioa_data;
4068
Linus Torvalds1da177e2005-04-16 15:20:36 -07004069 kref_init(&dump->kref);
4070 dump->ioa_cfg = ioa_cfg;
4071
4072 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4073
4074 if (INACTIVE != ioa_cfg->sdt_state) {
4075 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004076 vfree(dump->ioa_dump.ioa_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004077 kfree(dump);
4078 return 0;
4079 }
4080
4081 ioa_cfg->dump = dump;
4082 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06004083 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004084 ioa_cfg->dump_taken = 1;
4085 schedule_work(&ioa_cfg->work_q);
4086 }
4087 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4088
Linus Torvalds1da177e2005-04-16 15:20:36 -07004089 return 0;
4090}
4091
4092/**
4093 * ipr_free_dump - Free adapter dump memory
4094 * @ioa_cfg: ioa config struct
4095 *
4096 * Return value:
4097 * 0 on success / other on failure
4098 **/
4099static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4100{
4101 struct ipr_dump *dump;
4102 unsigned long lock_flags = 0;
4103
4104 ENTER;
4105
4106 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4107 dump = ioa_cfg->dump;
4108 if (!dump) {
4109 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4110 return 0;
4111 }
4112
4113 ioa_cfg->dump = NULL;
4114 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4115
4116 kref_put(&dump->kref, ipr_release_dump);
4117
4118 LEAVE;
4119 return 0;
4120}
4121
4122/**
4123 * ipr_write_dump - Setup dump state of adapter
Chris Wright2c3c8be2010-05-12 18:28:57 -07004124 * @filp: open sysfs file
Linus Torvalds1da177e2005-04-16 15:20:36 -07004125 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08004126 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07004127 * @buf: buffer
4128 * @off: offset
4129 * @count: buffer size
4130 *
4131 * Return value:
4132 * number of bytes printed to buffer
4133 **/
Chris Wright2c3c8be2010-05-12 18:28:57 -07004134static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
Zhang Rui91a69022007-06-09 13:57:22 +08004135 struct bin_attribute *bin_attr,
4136 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004137{
Tony Jonesee959b02008-02-22 00:13:36 +01004138 struct device *cdev = container_of(kobj, struct device, kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004139 struct Scsi_Host *shost = class_to_shost(cdev);
4140 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4141 int rc;
4142
4143 if (!capable(CAP_SYS_ADMIN))
4144 return -EACCES;
4145
4146 if (buf[0] == '1')
4147 rc = ipr_alloc_dump(ioa_cfg);
4148 else if (buf[0] == '0')
4149 rc = ipr_free_dump(ioa_cfg);
4150 else
4151 return -EINVAL;
4152
4153 if (rc)
4154 return rc;
4155 else
4156 return count;
4157}
4158
4159static struct bin_attribute ipr_dump_attr = {
4160 .attr = {
4161 .name = "dump",
4162 .mode = S_IRUSR | S_IWUSR,
4163 },
4164 .size = 0,
4165 .read = ipr_read_dump,
4166 .write = ipr_write_dump
4167};
4168#else
4169static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4170#endif
4171
4172/**
4173 * ipr_change_queue_depth - Change the device's queue depth
4174 * @sdev: scsi device struct
4175 * @qdepth: depth to set
Mike Christiee881a172009-10-15 17:46:39 -07004176 * @reason: calling context
Linus Torvalds1da177e2005-04-16 15:20:36 -07004177 *
4178 * Return value:
4179 * actual depth set
4180 **/
Mike Christiee881a172009-10-15 17:46:39 -07004181static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
4182 int reason)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004183{
Brian King35a39692006-09-25 12:39:20 -05004184 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4185 struct ipr_resource_entry *res;
4186 unsigned long lock_flags = 0;
4187
Mike Christiee881a172009-10-15 17:46:39 -07004188 if (reason != SCSI_QDEPTH_DEFAULT)
4189 return -EOPNOTSUPP;
4190
Brian King35a39692006-09-25 12:39:20 -05004191 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4192 res = (struct ipr_resource_entry *)sdev->hostdata;
4193
4194 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4195 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4196 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4197
Linus Torvalds1da177e2005-04-16 15:20:36 -07004198 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4199 return sdev->queue_depth;
4200}
4201
4202/**
4203 * ipr_change_queue_type - Change the device's queue type
4204 * @dsev: scsi device struct
4205 * @tag_type: type of tags to use
4206 *
4207 * Return value:
4208 * actual queue type set
4209 **/
4210static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4211{
4212 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4213 struct ipr_resource_entry *res;
4214 unsigned long lock_flags = 0;
4215
4216 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4217 res = (struct ipr_resource_entry *)sdev->hostdata;
4218
4219 if (res) {
4220 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4221 /*
4222 * We don't bother quiescing the device here since the
4223 * adapter firmware does it for us.
4224 */
4225 scsi_set_tag_type(sdev, tag_type);
4226
4227 if (tag_type)
4228 scsi_activate_tcq(sdev, sdev->queue_depth);
4229 else
4230 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4231 } else
4232 tag_type = 0;
4233 } else
4234 tag_type = 0;
4235
4236 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4237 return tag_type;
4238}
4239
4240/**
4241 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4242 * @dev: device struct
Wayne Boyer46d74562010-08-11 07:15:17 -07004243 * @attr: device attribute structure
Linus Torvalds1da177e2005-04-16 15:20:36 -07004244 * @buf: buffer
4245 *
4246 * Return value:
4247 * number of bytes printed to buffer
4248 **/
Yani Ioannou10523b32005-05-17 06:43:37 -04004249static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004250{
4251 struct scsi_device *sdev = to_scsi_device(dev);
4252 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4253 struct ipr_resource_entry *res;
4254 unsigned long lock_flags = 0;
4255 ssize_t len = -ENXIO;
4256
4257 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4258 res = (struct ipr_resource_entry *)sdev->hostdata;
4259 if (res)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004260 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004261 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4262 return len;
4263}
4264
4265static struct device_attribute ipr_adapter_handle_attr = {
4266 .attr = {
4267 .name = "adapter_handle",
4268 .mode = S_IRUSR,
4269 },
4270 .show = ipr_show_adapter_handle
4271};
4272
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004273/**
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07004274 * ipr_show_resource_path - Show the resource path or the resource address for
4275 * this device.
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004276 * @dev: device struct
Wayne Boyer46d74562010-08-11 07:15:17 -07004277 * @attr: device attribute structure
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004278 * @buf: buffer
4279 *
4280 * Return value:
4281 * number of bytes printed to buffer
4282 **/
4283static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4284{
4285 struct scsi_device *sdev = to_scsi_device(dev);
4286 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4287 struct ipr_resource_entry *res;
4288 unsigned long lock_flags = 0;
4289 ssize_t len = -ENXIO;
4290 char buffer[IPR_MAX_RES_PATH_LENGTH];
4291
4292 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4293 res = (struct ipr_resource_entry *)sdev->hostdata;
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07004294 if (res && ioa_cfg->sis64)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004295 len = snprintf(buf, PAGE_SIZE, "%s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06004296 __ipr_format_res_path(res->res_path, buffer,
4297 sizeof(buffer)));
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07004298 else if (res)
4299 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4300 res->bus, res->target, res->lun);
4301
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004302 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4303 return len;
4304}
4305
4306static struct device_attribute ipr_resource_path_attr = {
4307 .attr = {
4308 .name = "resource_path",
Wayne Boyer75576bb2010-07-14 10:50:14 -07004309 .mode = S_IRUGO,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004310 },
4311 .show = ipr_show_resource_path
4312};
4313
Wayne Boyer75576bb2010-07-14 10:50:14 -07004314/**
Wayne Boyer46d74562010-08-11 07:15:17 -07004315 * ipr_show_device_id - Show the device_id for this device.
4316 * @dev: device struct
4317 * @attr: device attribute structure
4318 * @buf: buffer
4319 *
4320 * Return value:
4321 * number of bytes printed to buffer
4322 **/
4323static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4324{
4325 struct scsi_device *sdev = to_scsi_device(dev);
4326 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4327 struct ipr_resource_entry *res;
4328 unsigned long lock_flags = 0;
4329 ssize_t len = -ENXIO;
4330
4331 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4332 res = (struct ipr_resource_entry *)sdev->hostdata;
4333 if (res && ioa_cfg->sis64)
4334 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4335 else if (res)
4336 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4337
4338 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4339 return len;
4340}
4341
4342static struct device_attribute ipr_device_id_attr = {
4343 .attr = {
4344 .name = "device_id",
4345 .mode = S_IRUGO,
4346 },
4347 .show = ipr_show_device_id
4348};
4349
4350/**
Wayne Boyer75576bb2010-07-14 10:50:14 -07004351 * ipr_show_resource_type - Show the resource type for this device.
4352 * @dev: device struct
Wayne Boyer46d74562010-08-11 07:15:17 -07004353 * @attr: device attribute structure
Wayne Boyer75576bb2010-07-14 10:50:14 -07004354 * @buf: buffer
4355 *
4356 * Return value:
4357 * number of bytes printed to buffer
4358 **/
4359static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4360{
4361 struct scsi_device *sdev = to_scsi_device(dev);
4362 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4363 struct ipr_resource_entry *res;
4364 unsigned long lock_flags = 0;
4365 ssize_t len = -ENXIO;
4366
4367 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4368 res = (struct ipr_resource_entry *)sdev->hostdata;
4369
4370 if (res)
4371 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4372
4373 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4374 return len;
4375}
4376
4377static struct device_attribute ipr_resource_type_attr = {
4378 .attr = {
4379 .name = "resource_type",
4380 .mode = S_IRUGO,
4381 },
4382 .show = ipr_show_resource_type
4383};
4384
Linus Torvalds1da177e2005-04-16 15:20:36 -07004385static struct device_attribute *ipr_dev_attrs[] = {
4386 &ipr_adapter_handle_attr,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004387 &ipr_resource_path_attr,
Wayne Boyer46d74562010-08-11 07:15:17 -07004388 &ipr_device_id_attr,
Wayne Boyer75576bb2010-07-14 10:50:14 -07004389 &ipr_resource_type_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004390 NULL,
4391};
4392
4393/**
4394 * ipr_biosparam - Return the HSC mapping
4395 * @sdev: scsi device struct
4396 * @block_device: block device pointer
4397 * @capacity: capacity of the device
4398 * @parm: Array containing returned HSC values.
4399 *
4400 * This function generates the HSC parms that fdisk uses.
4401 * We want to make sure we return something that places partitions
4402 * on 4k boundaries for best performance with the IOA.
4403 *
4404 * Return value:
4405 * 0 on success
4406 **/
4407static int ipr_biosparam(struct scsi_device *sdev,
4408 struct block_device *block_device,
4409 sector_t capacity, int *parm)
4410{
4411 int heads, sectors;
4412 sector_t cylinders;
4413
4414 heads = 128;
4415 sectors = 32;
4416
4417 cylinders = capacity;
4418 sector_div(cylinders, (128 * 32));
4419
4420 /* return result */
4421 parm[0] = heads;
4422 parm[1] = sectors;
4423 parm[2] = cylinders;
4424
4425 return 0;
4426}
4427
4428/**
Brian King35a39692006-09-25 12:39:20 -05004429 * ipr_find_starget - Find target based on bus/target.
4430 * @starget: scsi target struct
4431 *
4432 * Return value:
4433 * resource entry pointer if found / NULL if not found
4434 **/
4435static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4436{
4437 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4438 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4439 struct ipr_resource_entry *res;
4440
4441 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004442 if ((res->bus == starget->channel) &&
Brian King0ee1d712012-03-14 21:20:06 -05004443 (res->target == starget->id)) {
Brian King35a39692006-09-25 12:39:20 -05004444 return res;
4445 }
4446 }
4447
4448 return NULL;
4449}
4450
4451static struct ata_port_info sata_port_info;
4452
4453/**
4454 * ipr_target_alloc - Prepare for commands to a SCSI target
4455 * @starget: scsi target struct
4456 *
4457 * If the device is a SATA device, this function allocates an
4458 * ATA port with libata, else it does nothing.
4459 *
4460 * Return value:
4461 * 0 on success / non-0 on failure
4462 **/
4463static int ipr_target_alloc(struct scsi_target *starget)
4464{
4465 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4466 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4467 struct ipr_sata_port *sata_port;
4468 struct ata_port *ap;
4469 struct ipr_resource_entry *res;
4470 unsigned long lock_flags;
4471
4472 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4473 res = ipr_find_starget(starget);
4474 starget->hostdata = NULL;
4475
4476 if (res && ipr_is_gata(res)) {
4477 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4478 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4479 if (!sata_port)
4480 return -ENOMEM;
4481
4482 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4483 if (ap) {
4484 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4485 sata_port->ioa_cfg = ioa_cfg;
4486 sata_port->ap = ap;
4487 sata_port->res = res;
4488
4489 res->sata_port = sata_port;
4490 ap->private_data = sata_port;
4491 starget->hostdata = sata_port;
4492 } else {
4493 kfree(sata_port);
4494 return -ENOMEM;
4495 }
4496 }
4497 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4498
4499 return 0;
4500}
4501
4502/**
4503 * ipr_target_destroy - Destroy a SCSI target
4504 * @starget: scsi target struct
4505 *
4506 * If the device was a SATA device, this function frees the libata
4507 * ATA port, else it does nothing.
4508 *
4509 **/
4510static void ipr_target_destroy(struct scsi_target *starget)
4511{
4512 struct ipr_sata_port *sata_port = starget->hostdata;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004513 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4514 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4515
4516 if (ioa_cfg->sis64) {
Brian King0ee1d712012-03-14 21:20:06 -05004517 if (!ipr_find_starget(starget)) {
4518 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4519 clear_bit(starget->id, ioa_cfg->array_ids);
4520 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4521 clear_bit(starget->id, ioa_cfg->vset_ids);
4522 else if (starget->channel == 0)
4523 clear_bit(starget->id, ioa_cfg->target_ids);
4524 }
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004525 }
Brian King35a39692006-09-25 12:39:20 -05004526
4527 if (sata_port) {
4528 starget->hostdata = NULL;
4529 ata_sas_port_destroy(sata_port->ap);
4530 kfree(sata_port);
4531 }
4532}
4533
4534/**
4535 * ipr_find_sdev - Find device based on bus/target/lun.
4536 * @sdev: scsi device struct
4537 *
4538 * Return value:
4539 * resource entry pointer if found / NULL if not found
4540 **/
4541static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4542{
4543 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4544 struct ipr_resource_entry *res;
4545
4546 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004547 if ((res->bus == sdev->channel) &&
4548 (res->target == sdev->id) &&
4549 (res->lun == sdev->lun))
Brian King35a39692006-09-25 12:39:20 -05004550 return res;
4551 }
4552
4553 return NULL;
4554}
4555
4556/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004557 * ipr_slave_destroy - Unconfigure a SCSI device
4558 * @sdev: scsi device struct
4559 *
4560 * Return value:
4561 * nothing
4562 **/
4563static void ipr_slave_destroy(struct scsi_device *sdev)
4564{
4565 struct ipr_resource_entry *res;
4566 struct ipr_ioa_cfg *ioa_cfg;
4567 unsigned long lock_flags = 0;
4568
4569 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4570
4571 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4572 res = (struct ipr_resource_entry *) sdev->hostdata;
4573 if (res) {
Brian King35a39692006-09-25 12:39:20 -05004574 if (res->sata_port)
Tejun Heo3e4ec342010-05-10 21:41:30 +02004575 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004576 sdev->hostdata = NULL;
4577 res->sdev = NULL;
Brian King35a39692006-09-25 12:39:20 -05004578 res->sata_port = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004579 }
4580 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4581}
4582
4583/**
4584 * ipr_slave_configure - Configure a SCSI device
4585 * @sdev: scsi device struct
4586 *
4587 * This function configures the specified scsi device.
4588 *
4589 * Return value:
4590 * 0 on success
4591 **/
4592static int ipr_slave_configure(struct scsi_device *sdev)
4593{
4594 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4595 struct ipr_resource_entry *res;
Brian Kingdd406ef2009-04-22 08:58:02 -05004596 struct ata_port *ap = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004597 unsigned long lock_flags = 0;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004598 char buffer[IPR_MAX_RES_PATH_LENGTH];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004599
4600 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4601 res = sdev->hostdata;
4602 if (res) {
4603 if (ipr_is_af_dasd_device(res))
4604 sdev->type = TYPE_RAID;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06004605 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004606 sdev->scsi_level = 4;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06004607 sdev->no_uld_attach = 1;
4608 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004609 if (ipr_is_vset_device(res)) {
Jens Axboe242f9dc2008-09-14 05:55:09 -07004610 blk_queue_rq_timeout(sdev->request_queue,
4611 IPR_VSET_RW_TIMEOUT);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -05004612 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004613 }
Brian Kingdd406ef2009-04-22 08:58:02 -05004614 if (ipr_is_gata(res) && res->sata_port)
4615 ap = res->sata_port->ap;
4616 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4617
4618 if (ap) {
Brian King35a39692006-09-25 12:39:20 -05004619 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
Brian Kingdd406ef2009-04-22 08:58:02 -05004620 ata_sas_slave_configure(sdev, ap);
4621 } else
Brian King35a39692006-09-25 12:39:20 -05004622 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004623 if (ioa_cfg->sis64)
4624 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06004625 ipr_format_res_path(ioa_cfg,
4626 res->res_path, buffer, sizeof(buffer)));
Brian Kingdd406ef2009-04-22 08:58:02 -05004627 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004628 }
4629 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4630 return 0;
4631}
4632
4633/**
Brian King35a39692006-09-25 12:39:20 -05004634 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4635 * @sdev: scsi device struct
4636 *
4637 * This function initializes an ATA port so that future commands
4638 * sent through queuecommand will work.
4639 *
4640 * Return value:
4641 * 0 on success
4642 **/
4643static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4644{
4645 struct ipr_sata_port *sata_port = NULL;
4646 int rc = -ENXIO;
4647
4648 ENTER;
4649 if (sdev->sdev_target)
4650 sata_port = sdev->sdev_target->hostdata;
Dan Williamsb2024452012-03-21 21:09:07 -07004651 if (sata_port) {
Brian King35a39692006-09-25 12:39:20 -05004652 rc = ata_sas_port_init(sata_port->ap);
Dan Williamsb2024452012-03-21 21:09:07 -07004653 if (rc == 0)
4654 rc = ata_sas_sync_probe(sata_port->ap);
4655 }
4656
Brian King35a39692006-09-25 12:39:20 -05004657 if (rc)
4658 ipr_slave_destroy(sdev);
4659
4660 LEAVE;
4661 return rc;
4662}
4663
4664/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004665 * ipr_slave_alloc - Prepare for commands to a device.
4666 * @sdev: scsi device struct
4667 *
4668 * This function saves a pointer to the resource entry
4669 * in the scsi device struct if the device exists. We
4670 * can then use this pointer in ipr_queuecommand when
4671 * handling new commands.
4672 *
4673 * Return value:
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06004674 * 0 on success / -ENXIO if device does not exist
Linus Torvalds1da177e2005-04-16 15:20:36 -07004675 **/
4676static int ipr_slave_alloc(struct scsi_device *sdev)
4677{
4678 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4679 struct ipr_resource_entry *res;
4680 unsigned long lock_flags;
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06004681 int rc = -ENXIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004682
4683 sdev->hostdata = NULL;
4684
4685 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4686
Brian King35a39692006-09-25 12:39:20 -05004687 res = ipr_find_sdev(sdev);
4688 if (res) {
4689 res->sdev = sdev;
4690 res->add_to_ml = 0;
4691 res->in_erp = 0;
4692 sdev->hostdata = res;
4693 if (!ipr_is_naca_model(res))
4694 res->needs_sync_complete = 1;
4695 rc = 0;
4696 if (ipr_is_gata(res)) {
4697 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4698 return ipr_ata_slave_alloc(sdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004699 }
4700 }
4701
4702 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4703
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06004704 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004705}
4706
4707/**
4708 * ipr_eh_host_reset - Reset the host adapter
4709 * @scsi_cmd: scsi command struct
4710 *
4711 * Return value:
4712 * SUCCESS / FAILED
4713 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03004714static int __ipr_eh_host_reset(struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004715{
4716 struct ipr_ioa_cfg *ioa_cfg;
4717 int rc;
4718
4719 ENTER;
4720 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4721
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02004722 if (!ioa_cfg->in_reset_reload) {
4723 dev_err(&ioa_cfg->pdev->dev,
4724 "Adapter being reset as a result of error recovery.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004725
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02004726 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4727 ioa_cfg->sdt_state = GET_DUMP;
4728 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004729
4730 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4731
4732 LEAVE;
4733 return rc;
4734}
4735
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03004736static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
Jeff Garzik df0ae242005-05-28 07:57:14 -04004737{
4738 int rc;
4739
4740 spin_lock_irq(cmd->device->host->host_lock);
4741 rc = __ipr_eh_host_reset(cmd);
4742 spin_unlock_irq(cmd->device->host->host_lock);
4743
4744 return rc;
4745}
4746
Linus Torvalds1da177e2005-04-16 15:20:36 -07004747/**
Brian Kingc6513092006-03-29 09:37:43 -06004748 * ipr_device_reset - Reset the device
4749 * @ioa_cfg: ioa config struct
4750 * @res: resource entry struct
4751 *
4752 * This function issues a device reset to the affected device.
4753 * If the device is a SCSI device, a LUN reset will be sent
4754 * to the device first. If that does not work, a target reset
Brian King35a39692006-09-25 12:39:20 -05004755 * will be sent. If the device is a SATA device, a PHY reset will
4756 * be sent.
Brian Kingc6513092006-03-29 09:37:43 -06004757 *
4758 * Return value:
4759 * 0 on success / non-zero on failure
4760 **/
4761static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4762 struct ipr_resource_entry *res)
4763{
4764 struct ipr_cmnd *ipr_cmd;
4765 struct ipr_ioarcb *ioarcb;
4766 struct ipr_cmd_pkt *cmd_pkt;
Brian King35a39692006-09-25 12:39:20 -05004767 struct ipr_ioarcb_ata_regs *regs;
Brian Kingc6513092006-03-29 09:37:43 -06004768 u32 ioasc;
4769
4770 ENTER;
4771 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4772 ioarcb = &ipr_cmd->ioarcb;
4773 cmd_pkt = &ioarcb->cmd_pkt;
Wayne Boyera32c0552010-02-19 13:23:36 -08004774
4775 if (ipr_cmd->ioa_cfg->sis64) {
4776 regs = &ipr_cmd->i.ata_ioadl.regs;
4777 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4778 } else
4779 regs = &ioarcb->u.add_data.u.regs;
Brian Kingc6513092006-03-29 09:37:43 -06004780
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004781 ioarcb->res_handle = res->res_handle;
Brian Kingc6513092006-03-29 09:37:43 -06004782 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4783 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
Brian King35a39692006-09-25 12:39:20 -05004784 if (ipr_is_gata(res)) {
4785 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
Wayne Boyera32c0552010-02-19 13:23:36 -08004786 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
Brian King35a39692006-09-25 12:39:20 -05004787 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4788 }
Brian Kingc6513092006-03-29 09:37:43 -06004789
4790 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
Wayne Boyer96d21f02010-05-10 09:13:27 -07004791 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06004792 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Wayne Boyer96d21f02010-05-10 09:13:27 -07004793 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4794 if (ipr_cmd->ioa_cfg->sis64)
4795 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4796 sizeof(struct ipr_ioasa_gata));
4797 else
4798 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4799 sizeof(struct ipr_ioasa_gata));
4800 }
Brian Kingc6513092006-03-29 09:37:43 -06004801
4802 LEAVE;
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03004803 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
Brian Kingc6513092006-03-29 09:37:43 -06004804}
4805
4806/**
Brian King35a39692006-09-25 12:39:20 -05004807 * ipr_sata_reset - Reset the SATA port
Tejun Heocc0680a2007-08-06 18:36:23 +09004808 * @link: SATA link to reset
Brian King35a39692006-09-25 12:39:20 -05004809 * @classes: class of the attached device
4810 *
Tejun Heocc0680a2007-08-06 18:36:23 +09004811 * This function issues a SATA phy reset to the affected ATA link.
Brian King35a39692006-09-25 12:39:20 -05004812 *
4813 * Return value:
4814 * 0 on success / non-zero on failure
4815 **/
Tejun Heocc0680a2007-08-06 18:36:23 +09004816static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
Andrew Morton120bda32007-03-26 02:17:43 -07004817 unsigned long deadline)
Brian King35a39692006-09-25 12:39:20 -05004818{
Tejun Heocc0680a2007-08-06 18:36:23 +09004819 struct ipr_sata_port *sata_port = link->ap->private_data;
Brian King35a39692006-09-25 12:39:20 -05004820 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4821 struct ipr_resource_entry *res;
4822 unsigned long lock_flags = 0;
4823 int rc = -ENXIO;
4824
4825 ENTER;
4826 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03004827 while (ioa_cfg->in_reset_reload) {
Brian King73d98ff2006-11-21 10:27:58 -06004828 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4829 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4830 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4831 }
4832
Brian King35a39692006-09-25 12:39:20 -05004833 res = sata_port->res;
4834 if (res) {
4835 rc = ipr_device_reset(ioa_cfg, res);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004836 *classes = res->ata_class;
Brian King35a39692006-09-25 12:39:20 -05004837 }
4838
4839 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4840 LEAVE;
4841 return rc;
4842}
4843
4844/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004845 * ipr_eh_dev_reset - Reset the device
4846 * @scsi_cmd: scsi command struct
4847 *
4848 * This function issues a device reset to the affected device.
4849 * A LUN reset will be sent to the device first. If that does
4850 * not work, a target reset will be sent.
4851 *
4852 * Return value:
4853 * SUCCESS / FAILED
4854 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03004855static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004856{
4857 struct ipr_cmnd *ipr_cmd;
4858 struct ipr_ioa_cfg *ioa_cfg;
4859 struct ipr_resource_entry *res;
Brian King35a39692006-09-25 12:39:20 -05004860 struct ata_port *ap;
4861 int rc = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06004862 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004863
4864 ENTER;
4865 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4866 res = scsi_cmd->device->hostdata;
4867
brking@us.ibm.comeeb883072005-11-01 17:02:29 -06004868 if (!res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004869 return FAILED;
4870
4871 /*
4872 * If we are currently going through reset/reload, return failed. This will force the
4873 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4874 * reset to complete
4875 */
4876 if (ioa_cfg->in_reset_reload)
4877 return FAILED;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06004878 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004879 return FAILED;
4880
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06004881 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06004882 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06004883 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4884 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4885 if (ipr_cmd->scsi_cmd)
4886 ipr_cmd->done = ipr_scsi_eh_done;
4887 if (ipr_cmd->qc)
4888 ipr_cmd->done = ipr_sata_eh_done;
4889 if (ipr_cmd->qc &&
4890 !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4891 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4892 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4893 }
Brian King7402ece2006-11-21 10:28:23 -06004894 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004895 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06004896 spin_unlock(&hrrq->_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004897 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004898 res->resetting_device = 1;
Brian Kingfb3ed3c2006-03-29 09:37:37 -06004899 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
Brian King35a39692006-09-25 12:39:20 -05004900
4901 if (ipr_is_gata(res) && res->sata_port) {
4902 ap = res->sata_port->ap;
4903 spin_unlock_irq(scsi_cmd->device->host->host_lock);
Tejun Heoa1efdab2008-03-25 12:22:50 +09004904 ata_std_error_handler(ap);
Brian King35a39692006-09-25 12:39:20 -05004905 spin_lock_irq(scsi_cmd->device->host->host_lock);
Brian King5af23d22007-05-09 15:36:35 -05004906
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06004907 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06004908 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06004909 list_for_each_entry(ipr_cmd,
4910 &hrrq->hrrq_pending_q, queue) {
4911 if (ipr_cmd->ioarcb.res_handle ==
4912 res->res_handle) {
4913 rc = -EIO;
4914 break;
4915 }
Brian King5af23d22007-05-09 15:36:35 -05004916 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06004917 spin_unlock(&hrrq->_lock);
Brian King5af23d22007-05-09 15:36:35 -05004918 }
Brian King35a39692006-09-25 12:39:20 -05004919 } else
4920 rc = ipr_device_reset(ioa_cfg, res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004921 res->resetting_device = 0;
4922
Linus Torvalds1da177e2005-04-16 15:20:36 -07004923 LEAVE;
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03004924 return rc ? FAILED : SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004925}
4926
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03004927static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04004928{
4929 int rc;
4930
4931 spin_lock_irq(cmd->device->host->host_lock);
4932 rc = __ipr_eh_dev_reset(cmd);
4933 spin_unlock_irq(cmd->device->host->host_lock);
4934
4935 return rc;
4936}
4937
Linus Torvalds1da177e2005-04-16 15:20:36 -07004938/**
4939 * ipr_bus_reset_done - Op done function for bus reset.
4940 * @ipr_cmd: ipr command struct
4941 *
4942 * This function is the op done function for a bus reset
4943 *
4944 * Return value:
4945 * none
4946 **/
4947static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
4948{
4949 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4950 struct ipr_resource_entry *res;
4951
4952 ENTER;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004953 if (!ioa_cfg->sis64)
4954 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4955 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
4956 scsi_report_bus_reset(ioa_cfg->host, res->bus);
4957 break;
4958 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004959 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004960
4961 /*
4962 * If abort has not completed, indicate the reset has, else call the
4963 * abort's done function to wake the sleeping eh thread
4964 */
4965 if (ipr_cmd->sibling->sibling)
4966 ipr_cmd->sibling->sibling = NULL;
4967 else
4968 ipr_cmd->sibling->done(ipr_cmd->sibling);
4969
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06004970 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004971 LEAVE;
4972}
4973
4974/**
4975 * ipr_abort_timeout - An abort task has timed out
4976 * @ipr_cmd: ipr command struct
4977 *
4978 * This function handles when an abort task times out. If this
4979 * happens we issue a bus reset since we have resources tied
4980 * up that must be freed before returning to the midlayer.
4981 *
4982 * Return value:
4983 * none
4984 **/
4985static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4986{
4987 struct ipr_cmnd *reset_cmd;
4988 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4989 struct ipr_cmd_pkt *cmd_pkt;
4990 unsigned long lock_flags = 0;
4991
4992 ENTER;
4993 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4994 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4995 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4996 return;
4997 }
4998
Brian Kingfb3ed3c2006-03-29 09:37:37 -06004999 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005000 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5001 ipr_cmd->sibling = reset_cmd;
5002 reset_cmd->sibling = ipr_cmd;
5003 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5004 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5005 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5006 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5007 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5008
5009 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5010 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5011 LEAVE;
5012}
5013
5014/**
5015 * ipr_cancel_op - Cancel specified op
5016 * @scsi_cmd: scsi command struct
5017 *
5018 * This function cancels specified op.
5019 *
5020 * Return value:
5021 * SUCCESS / FAILED
5022 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005023static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005024{
5025 struct ipr_cmnd *ipr_cmd;
5026 struct ipr_ioa_cfg *ioa_cfg;
5027 struct ipr_resource_entry *res;
5028 struct ipr_cmd_pkt *cmd_pkt;
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02005029 u32 ioasc, int_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005030 int op_found = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005031 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005032
5033 ENTER;
5034 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5035 res = scsi_cmd->device->hostdata;
5036
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005037 /* If we are currently going through reset/reload, return failed.
5038 * This will force the mid-layer to call ipr_eh_host_reset,
5039 * which will then go to sleep and wait for the reset to complete
5040 */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005041 if (ioa_cfg->in_reset_reload ||
5042 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005043 return FAILED;
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02005044 if (!res)
5045 return FAILED;
5046
5047 /*
5048 * If we are aborting a timed out op, chances are that the timeout was caused
5049 * by a still not detected EEH error. In such cases, reading a register will
5050 * trigger the EEH recovery infrastructure.
5051 */
5052 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5053
5054 if (!ipr_is_gscsi(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005055 return FAILED;
5056
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005057 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005058 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005059 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5060 if (ipr_cmd->scsi_cmd == scsi_cmd) {
5061 ipr_cmd->done = ipr_scsi_eh_done;
5062 op_found = 1;
5063 break;
5064 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005065 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005066 spin_unlock(&hrrq->_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005067 }
5068
5069 if (!op_found)
5070 return SUCCESS;
5071
5072 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005073 ipr_cmd->ioarcb.res_handle = res->res_handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005074 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5075 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5076 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5077 ipr_cmd->u.sdev = scsi_cmd->device;
5078
Brian Kingfb3ed3c2006-03-29 09:37:37 -06005079 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5080 scsi_cmd->cmnd[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005081 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
Wayne Boyer96d21f02010-05-10 09:13:27 -07005082 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005083
5084 /*
5085 * If the abort task timed out and we sent a bus reset, we will get
5086 * one the following responses to the abort
5087 */
5088 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5089 ioasc = 0;
5090 ipr_trace;
5091 }
5092
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005093 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005094 if (!ipr_is_naca_model(res))
5095 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005096
5097 LEAVE;
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005098 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005099}
5100
5101/**
5102 * ipr_eh_abort - Abort a single op
5103 * @scsi_cmd: scsi command struct
5104 *
5105 * Return value:
5106 * SUCCESS / FAILED
5107 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005108static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005109{
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005110 unsigned long flags;
5111 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005112
5113 ENTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005114
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005115 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5116 rc = ipr_cancel_op(scsi_cmd);
5117 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005118
5119 LEAVE;
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005120 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005121}
5122
5123/**
5124 * ipr_handle_other_interrupt - Handle "other" interrupts
5125 * @ioa_cfg: ioa config struct
Wayne Boyer634651f2010-08-27 14:45:07 -07005126 * @int_reg: interrupt register
Linus Torvalds1da177e2005-04-16 15:20:36 -07005127 *
5128 * Return value:
5129 * IRQ_NONE / IRQ_HANDLED
5130 **/
Wayne Boyer634651f2010-08-27 14:45:07 -07005131static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
Wayne Boyer630ad8312011-04-07 12:12:30 -07005132 u32 int_reg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005133{
5134 irqreturn_t rc = IRQ_HANDLED;
Wayne Boyer7dacb642011-04-12 10:29:02 -07005135 u32 int_mask_reg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005136
Wayne Boyer7dacb642011-04-12 10:29:02 -07005137 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5138 int_reg &= ~int_mask_reg;
5139
5140 /* If an interrupt on the adapter did not occur, ignore it.
5141 * Or in the case of SIS 64, check for a stage change interrupt.
5142 */
5143 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5144 if (ioa_cfg->sis64) {
5145 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5146 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5147 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5148
5149 /* clear stage change */
5150 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5151 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5152 list_del(&ioa_cfg->reset_cmd->queue);
5153 del_timer(&ioa_cfg->reset_cmd->timer);
5154 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5155 return IRQ_HANDLED;
5156 }
5157 }
5158
5159 return IRQ_NONE;
5160 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005161
5162 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5163 /* Mask the interrupt */
5164 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5165
5166 /* Clear the interrupt */
5167 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
5168 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5169
5170 list_del(&ioa_cfg->reset_cmd->queue);
5171 del_timer(&ioa_cfg->reset_cmd->timer);
5172 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
Wayne Boyer7dacb642011-04-12 10:29:02 -07005173 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
Brian King7dd21302012-03-14 21:20:08 -05005174 if (ioa_cfg->clear_isr) {
5175 if (ipr_debug && printk_ratelimit())
5176 dev_err(&ioa_cfg->pdev->dev,
5177 "Spurious interrupt detected. 0x%08X\n", int_reg);
5178 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5179 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5180 return IRQ_NONE;
5181 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005182 } else {
5183 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5184 ioa_cfg->ioa_unit_checked = 1;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005185 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5186 dev_err(&ioa_cfg->pdev->dev,
5187 "No Host RRQ. 0x%08X\n", int_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005188 else
5189 dev_err(&ioa_cfg->pdev->dev,
5190 "Permanent IOA failure. 0x%08X\n", int_reg);
5191
5192 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5193 ioa_cfg->sdt_state = GET_DUMP;
5194
5195 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5196 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5197 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005198
Linus Torvalds1da177e2005-04-16 15:20:36 -07005199 return rc;
5200}
5201
5202/**
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005203 * ipr_isr_eh - Interrupt service routine error handler
5204 * @ioa_cfg: ioa config struct
5205 * @msg: message to log
5206 *
5207 * Return value:
5208 * none
5209 **/
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005210static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005211{
5212 ioa_cfg->errors_logged++;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005213 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005214
5215 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5216 ioa_cfg->sdt_state = GET_DUMP;
5217
5218 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5219}
5220
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005221static int __ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue,
5222 struct list_head *doneq)
5223{
5224 u32 ioasc;
5225 u16 cmd_index;
5226 struct ipr_cmnd *ipr_cmd;
5227 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5228 int num_hrrq = 0;
5229
5230 /* If interrupts are disabled, ignore the interrupt */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005231 if (!hrr_queue->allow_interrupts)
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005232 return 0;
5233
5234 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5235 hrr_queue->toggle_bit) {
5236
5237 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5238 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5239 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5240
5241 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5242 cmd_index < hrr_queue->min_cmd_id)) {
5243 ipr_isr_eh(ioa_cfg,
5244 "Invalid response handle from IOA: ",
5245 cmd_index);
5246 break;
5247 }
5248
5249 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5250 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5251
5252 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5253
5254 list_move_tail(&ipr_cmd->queue, doneq);
5255
5256 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5257 hrr_queue->hrrq_curr++;
5258 } else {
5259 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5260 hrr_queue->toggle_bit ^= 1u;
5261 }
5262 num_hrrq++;
5263 }
5264 return num_hrrq;
5265}
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005266/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005267 * ipr_isr - Interrupt service routine
5268 * @irq: irq number
5269 * @devp: pointer to ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07005270 *
5271 * Return value:
5272 * IRQ_NONE / IRQ_HANDLED
5273 **/
David Howells7d12e782006-10-05 14:55:46 +01005274static irqreturn_t ipr_isr(int irq, void *devp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005275{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005276 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5277 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005278 unsigned long hrrq_flags = 0;
Wayne Boyer7dacb642011-04-12 10:29:02 -07005279 u32 int_reg = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005280 u32 ioasc;
5281 u16 cmd_index;
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005282 int num_hrrq = 0;
Wayne Boyer7dacb642011-04-12 10:29:02 -07005283 int irq_none = 0;
Brian King172cd6e2012-07-17 08:14:40 -05005284 struct ipr_cmnd *ipr_cmd, *temp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005285 irqreturn_t rc = IRQ_NONE;
Brian King172cd6e2012-07-17 08:14:40 -05005286 LIST_HEAD(doneq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005287
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005288 spin_lock_irqsave(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005289 /* If interrupts are disabled, ignore the interrupt */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005290 if (!hrrq->allow_interrupts) {
5291 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005292 return IRQ_NONE;
5293 }
5294
Linus Torvalds1da177e2005-04-16 15:20:36 -07005295 while (1) {
5296 ipr_cmd = NULL;
5297
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005298 while ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5299 hrrq->toggle_bit) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005300
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005301 cmd_index = (be32_to_cpu(*hrrq->hrrq_curr) &
Linus Torvalds1da177e2005-04-16 15:20:36 -07005302 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5303
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005304 if (unlikely(cmd_index > hrrq->max_cmd_id ||
5305 cmd_index < hrrq->min_cmd_id)) {
5306 ipr_isr_eh(ioa_cfg,
5307 "Invalid response handle from IOA: ",
5308 cmd_index);
Brian King172cd6e2012-07-17 08:14:40 -05005309 rc = IRQ_HANDLED;
5310 goto unlock_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005311 }
5312
5313 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
Wayne Boyer96d21f02010-05-10 09:13:27 -07005314 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005315
5316 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5317
Brian King172cd6e2012-07-17 08:14:40 -05005318 list_move_tail(&ipr_cmd->queue, &doneq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005319
5320 rc = IRQ_HANDLED;
5321
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005322 if (hrrq->hrrq_curr < hrrq->hrrq_end) {
5323 hrrq->hrrq_curr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005324 } else {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005325 hrrq->hrrq_curr = hrrq->hrrq_start;
5326 hrrq->toggle_bit ^= 1u;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005327 }
5328 }
5329
Brian King7dd21302012-03-14 21:20:08 -05005330 if (ipr_cmd && !ioa_cfg->clear_isr)
5331 break;
5332
Linus Torvalds1da177e2005-04-16 15:20:36 -07005333 if (ipr_cmd != NULL) {
5334 /* Clear the PCI interrupt */
Wayne Boyera5442ba2011-05-17 09:18:53 -07005335 num_hrrq = 0;
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005336 do {
Wayne Boyer214777b2010-02-19 13:24:26 -08005337 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
Wayne Boyer7dacb642011-04-12 10:29:02 -07005338 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005339 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5340 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5341
Wayne Boyer7dacb642011-04-12 10:29:02 -07005342 } else if (rc == IRQ_NONE && irq_none == 0) {
5343 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5344 irq_none++;
Wayne Boyera5442ba2011-05-17 09:18:53 -07005345 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5346 int_reg & IPR_PCII_HRRQ_UPDATED) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005347 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ: ", num_hrrq);
Brian King172cd6e2012-07-17 08:14:40 -05005348 rc = IRQ_HANDLED;
5349 goto unlock_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005350 } else
5351 break;
5352 }
5353
5354 if (unlikely(rc == IRQ_NONE))
Wayne Boyer634651f2010-08-27 14:45:07 -07005355 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005356
Brian King172cd6e2012-07-17 08:14:40 -05005357unlock_out:
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005358 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King172cd6e2012-07-17 08:14:40 -05005359 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5360 list_del(&ipr_cmd->queue);
5361 del_timer(&ipr_cmd->timer);
5362 ipr_cmd->fast_done(ipr_cmd);
5363 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005364 return rc;
5365}
Brian King172cd6e2012-07-17 08:14:40 -05005366
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005367/**
5368 * ipr_isr_mhrrq - Interrupt service routine
5369 * @irq: irq number
5370 * @devp: pointer to ioa config struct
5371 *
5372 * Return value:
5373 * IRQ_NONE / IRQ_HANDLED
5374 **/
5375static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5376{
5377 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005378 unsigned long hrrq_flags = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005379 struct ipr_cmnd *ipr_cmd, *temp;
5380 irqreturn_t rc = IRQ_NONE;
5381 LIST_HEAD(doneq);
5382
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005383 spin_lock_irqsave(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005384
5385 /* If interrupts are disabled, ignore the interrupt */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005386 if (!hrrq->allow_interrupts) {
5387 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005388 return IRQ_NONE;
5389 }
5390
5391 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5392 hrrq->toggle_bit)
5393
5394 if (__ipr_process_hrrq(hrrq, &doneq))
5395 rc = IRQ_HANDLED;
5396
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005397 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005398
5399 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5400 list_del(&ipr_cmd->queue);
5401 del_timer(&ipr_cmd->timer);
5402 ipr_cmd->fast_done(ipr_cmd);
5403 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005404 return rc;
5405}
5406
5407/**
Wayne Boyera32c0552010-02-19 13:23:36 -08005408 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07005409 * @ioa_cfg: ioa config struct
5410 * @ipr_cmd: ipr command struct
5411 *
5412 * Return value:
5413 * 0 on success / -1 on failure
5414 **/
Wayne Boyera32c0552010-02-19 13:23:36 -08005415static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5416 struct ipr_cmnd *ipr_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005417{
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005418 int i, nseg;
5419 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005420 u32 length;
5421 u32 ioadl_flags = 0;
5422 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5423 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08005424 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005425
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005426 length = scsi_bufflen(scsi_cmd);
5427 if (!length)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005428 return 0;
5429
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005430 nseg = scsi_dma_map(scsi_cmd);
5431 if (nseg < 0) {
Anton Blanchard51f52a42011-05-09 10:07:40 +10005432 if (printk_ratelimit())
5433 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005434 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005435 }
5436
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005437 ipr_cmd->dma_use_sg = nseg;
5438
Wayne Boyer438b0332010-05-10 09:13:00 -07005439 ioarcb->data_transfer_length = cpu_to_be32(length);
Wayne Boyerb8803b12010-05-14 08:55:13 -07005440 ioarcb->ioadl_len =
5441 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
Wayne Boyer438b0332010-05-10 09:13:00 -07005442
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005443 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5444 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5445 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08005446 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5447 ioadl_flags = IPR_IOADL_FLAGS_READ;
5448
5449 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5450 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5451 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5452 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5453 }
5454
5455 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5456 return 0;
5457}
5458
5459/**
5460 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5461 * @ioa_cfg: ioa config struct
5462 * @ipr_cmd: ipr command struct
5463 *
5464 * Return value:
5465 * 0 on success / -1 on failure
5466 **/
5467static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5468 struct ipr_cmnd *ipr_cmd)
5469{
5470 int i, nseg;
5471 struct scatterlist *sg;
5472 u32 length;
5473 u32 ioadl_flags = 0;
5474 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5475 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5476 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5477
5478 length = scsi_bufflen(scsi_cmd);
5479 if (!length)
5480 return 0;
5481
5482 nseg = scsi_dma_map(scsi_cmd);
5483 if (nseg < 0) {
5484 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5485 return -1;
5486 }
5487
5488 ipr_cmd->dma_use_sg = nseg;
5489
5490 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5491 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5492 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5493 ioarcb->data_transfer_length = cpu_to_be32(length);
5494 ioarcb->ioadl_len =
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005495 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5496 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5497 ioadl_flags = IPR_IOADL_FLAGS_READ;
5498 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5499 ioarcb->read_ioadl_len =
5500 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5501 }
5502
Wayne Boyera32c0552010-02-19 13:23:36 -08005503 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5504 ioadl = ioarcb->u.add_data.u.ioadl;
5505 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5506 offsetof(struct ipr_ioarcb, u.add_data));
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005507 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5508 }
5509
5510 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5511 ioadl[i].flags_and_data_len =
5512 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5513 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5514 }
5515
5516 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5517 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005518}
5519
5520/**
5521 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5522 * @scsi_cmd: scsi command struct
5523 *
5524 * Return value:
5525 * task attributes
5526 **/
5527static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5528{
5529 u8 tag[2];
5530 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5531
5532 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5533 switch (tag[0]) {
5534 case MSG_SIMPLE_TAG:
5535 rc = IPR_FLAGS_LO_SIMPLE_TASK;
5536 break;
5537 case MSG_HEAD_TAG:
5538 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5539 break;
5540 case MSG_ORDERED_TAG:
5541 rc = IPR_FLAGS_LO_ORDERED_TASK;
5542 break;
5543 };
5544 }
5545
5546 return rc;
5547}
5548
5549/**
5550 * ipr_erp_done - Process completion of ERP for a device
5551 * @ipr_cmd: ipr command struct
5552 *
5553 * This function copies the sense buffer into the scsi_cmd
5554 * struct and pushes the scsi_done function.
5555 *
5556 * Return value:
5557 * nothing
5558 **/
5559static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5560{
5561 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5562 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005563 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005564
5565 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5566 scsi_cmd->result |= (DID_ERROR << 16);
Brian Kingfb3ed3c2006-03-29 09:37:37 -06005567 scmd_printk(KERN_ERR, scsi_cmd,
5568 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005569 } else {
5570 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5571 SCSI_SENSE_BUFFERSIZE);
5572 }
5573
5574 if (res) {
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005575 if (!ipr_is_naca_model(res))
5576 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005577 res->in_erp = 0;
5578 }
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005579 scsi_dma_unmap(ipr_cmd->scsi_cmd);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005580 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005581 scsi_cmd->scsi_done(scsi_cmd);
5582}
5583
5584/**
5585 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5586 * @ipr_cmd: ipr command struct
5587 *
5588 * Return value:
5589 * none
5590 **/
5591static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5592{
Brian King51b1c7e2007-03-29 12:43:50 -05005593 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005594 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
Wayne Boyera32c0552010-02-19 13:23:36 -08005595 dma_addr_t dma_addr = ipr_cmd->dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005596
5597 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
Wayne Boyera32c0552010-02-19 13:23:36 -08005598 ioarcb->data_transfer_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005599 ioarcb->read_data_transfer_length = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -08005600 ioarcb->ioadl_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005601 ioarcb->read_ioadl_len = 0;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005602 ioasa->hdr.ioasc = 0;
5603 ioasa->hdr.residual_data_len = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -08005604
5605 if (ipr_cmd->ioa_cfg->sis64)
5606 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5607 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5608 else {
5609 ioarcb->write_ioadl_addr =
5610 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5611 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5612 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005613}
5614
5615/**
5616 * ipr_erp_request_sense - Send request sense to a device
5617 * @ipr_cmd: ipr command struct
5618 *
5619 * This function sends a request sense to a device as a result
5620 * of a check condition.
5621 *
5622 * Return value:
5623 * nothing
5624 **/
5625static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5626{
5627 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005628 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005629
5630 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5631 ipr_erp_done(ipr_cmd);
5632 return;
5633 }
5634
5635 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5636
5637 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5638 cmd_pkt->cdb[0] = REQUEST_SENSE;
5639 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5640 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5641 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5642 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5643
Wayne Boyera32c0552010-02-19 13:23:36 -08005644 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5645 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005646
5647 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5648 IPR_REQUEST_SENSE_TIMEOUT * 2);
5649}
5650
5651/**
5652 * ipr_erp_cancel_all - Send cancel all to a device
5653 * @ipr_cmd: ipr command struct
5654 *
5655 * This function sends a cancel all to a device to clear the
5656 * queue. If we are running TCQ on the device, QERR is set to 1,
5657 * which means all outstanding ops have been dropped on the floor.
5658 * Cancel all will return them to us.
5659 *
5660 * Return value:
5661 * nothing
5662 **/
5663static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5664{
5665 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5666 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5667 struct ipr_cmd_pkt *cmd_pkt;
5668
5669 res->in_erp = 1;
5670
5671 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5672
5673 if (!scsi_get_tag_type(scsi_cmd->device)) {
5674 ipr_erp_request_sense(ipr_cmd);
5675 return;
5676 }
5677
5678 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5679 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5680 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5681
5682 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5683 IPR_CANCEL_ALL_TIMEOUT);
5684}
5685
5686/**
5687 * ipr_dump_ioasa - Dump contents of IOASA
5688 * @ioa_cfg: ioa config struct
5689 * @ipr_cmd: ipr command struct
Brian Kingfe964d02006-03-29 09:37:29 -06005690 * @res: resource entry struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07005691 *
5692 * This function is invoked by the interrupt handler when ops
5693 * fail. It will log the IOASA if appropriate. Only called
5694 * for GPDD ops.
5695 *
5696 * Return value:
5697 * none
5698 **/
5699static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
Brian Kingfe964d02006-03-29 09:37:29 -06005700 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005701{
5702 int i;
5703 u16 data_len;
Brian Kingb0692dd2007-03-29 12:43:09 -05005704 u32 ioasc, fd_ioasc;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005705 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005706 __be32 *ioasa_data = (__be32 *)ioasa;
5707 int error_index;
5708
Wayne Boyer96d21f02010-05-10 09:13:27 -07005709 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5710 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005711
5712 if (0 == ioasc)
5713 return;
5714
5715 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5716 return;
5717
Brian Kingb0692dd2007-03-29 12:43:09 -05005718 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5719 error_index = ipr_get_error(fd_ioasc);
5720 else
5721 error_index = ipr_get_error(ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005722
5723 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5724 /* Don't log an error if the IOA already logged one */
Wayne Boyer96d21f02010-05-10 09:13:27 -07005725 if (ioasa->hdr.ilid != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005726 return;
5727
Brian Kingcc9bd5d2007-03-29 12:43:01 -05005728 if (!ipr_is_gscsi(res))
5729 return;
5730
Linus Torvalds1da177e2005-04-16 15:20:36 -07005731 if (ipr_error_table[error_index].log_ioasa == 0)
5732 return;
5733 }
5734
Brian Kingfe964d02006-03-29 09:37:29 -06005735 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005736
Wayne Boyer96d21f02010-05-10 09:13:27 -07005737 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5738 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5739 data_len = sizeof(struct ipr_ioasa64);
5740 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005741 data_len = sizeof(struct ipr_ioasa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005742
5743 ipr_err("IOASA Dump:\n");
5744
5745 for (i = 0; i < data_len / 4; i += 4) {
5746 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5747 be32_to_cpu(ioasa_data[i]),
5748 be32_to_cpu(ioasa_data[i+1]),
5749 be32_to_cpu(ioasa_data[i+2]),
5750 be32_to_cpu(ioasa_data[i+3]));
5751 }
5752}
5753
5754/**
5755 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5756 * @ioasa: IOASA
5757 * @sense_buf: sense data buffer
5758 *
5759 * Return value:
5760 * none
5761 **/
5762static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5763{
5764 u32 failing_lba;
5765 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5766 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005767 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5768 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005769
5770 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5771
5772 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5773 return;
5774
5775 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5776
5777 if (ipr_is_vset_device(res) &&
5778 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5779 ioasa->u.vset.failing_lba_hi != 0) {
5780 sense_buf[0] = 0x72;
5781 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5782 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5783 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5784
5785 sense_buf[7] = 12;
5786 sense_buf[8] = 0;
5787 sense_buf[9] = 0x0A;
5788 sense_buf[10] = 0x80;
5789
5790 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5791
5792 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5793 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5794 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5795 sense_buf[15] = failing_lba & 0x000000ff;
5796
5797 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5798
5799 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5800 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5801 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5802 sense_buf[19] = failing_lba & 0x000000ff;
5803 } else {
5804 sense_buf[0] = 0x70;
5805 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5806 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5807 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5808
5809 /* Illegal request */
5810 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
Wayne Boyer96d21f02010-05-10 09:13:27 -07005811 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005812 sense_buf[7] = 10; /* additional length */
5813
5814 /* IOARCB was in error */
5815 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5816 sense_buf[15] = 0xC0;
5817 else /* Parameter data was invalid */
5818 sense_buf[15] = 0x80;
5819
5820 sense_buf[16] =
5821 ((IPR_FIELD_POINTER_MASK &
Wayne Boyer96d21f02010-05-10 09:13:27 -07005822 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005823 sense_buf[17] =
5824 (IPR_FIELD_POINTER_MASK &
Wayne Boyer96d21f02010-05-10 09:13:27 -07005825 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005826 } else {
5827 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5828 if (ipr_is_vset_device(res))
5829 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5830 else
5831 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5832
5833 sense_buf[0] |= 0x80; /* Or in the Valid bit */
5834 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5835 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5836 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5837 sense_buf[6] = failing_lba & 0x000000ff;
5838 }
5839
5840 sense_buf[7] = 6; /* additional length */
5841 }
5842 }
5843}
5844
5845/**
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005846 * ipr_get_autosense - Copy autosense data to sense buffer
5847 * @ipr_cmd: ipr command struct
5848 *
5849 * This function copies the autosense buffer to the buffer
5850 * in the scsi_cmd, if there is autosense available.
5851 *
5852 * Return value:
5853 * 1 if autosense was available / 0 if not
5854 **/
5855static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5856{
Wayne Boyer96d21f02010-05-10 09:13:27 -07005857 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5858 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005859
Wayne Boyer96d21f02010-05-10 09:13:27 -07005860 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005861 return 0;
5862
Wayne Boyer96d21f02010-05-10 09:13:27 -07005863 if (ipr_cmd->ioa_cfg->sis64)
5864 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
5865 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
5866 SCSI_SENSE_BUFFERSIZE));
5867 else
5868 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5869 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5870 SCSI_SENSE_BUFFERSIZE));
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005871 return 1;
5872}
5873
5874/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005875 * ipr_erp_start - Process an error response for a SCSI op
5876 * @ioa_cfg: ioa config struct
5877 * @ipr_cmd: ipr command struct
5878 *
5879 * This function determines whether or not to initiate ERP
5880 * on the affected device.
5881 *
5882 * Return value:
5883 * nothing
5884 **/
5885static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5886 struct ipr_cmnd *ipr_cmd)
5887{
5888 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5889 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005890 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian King8a048992007-04-26 16:00:10 -05005891 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005892
5893 if (!res) {
5894 ipr_scsi_eh_done(ipr_cmd);
5895 return;
5896 }
5897
Brian King8a048992007-04-26 16:00:10 -05005898 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005899 ipr_gen_sense(ipr_cmd);
5900
Brian Kingcc9bd5d2007-03-29 12:43:01 -05005901 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5902
Brian King8a048992007-04-26 16:00:10 -05005903 switch (masked_ioasc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005904 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005905 if (ipr_is_naca_model(res))
5906 scsi_cmd->result |= (DID_ABORT << 16);
5907 else
5908 scsi_cmd->result |= (DID_IMM_RETRY << 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005909 break;
5910 case IPR_IOASC_IR_RESOURCE_HANDLE:
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06005911 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005912 scsi_cmd->result |= (DID_NO_CONNECT << 16);
5913 break;
5914 case IPR_IOASC_HW_SEL_TIMEOUT:
5915 scsi_cmd->result |= (DID_NO_CONNECT << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005916 if (!ipr_is_naca_model(res))
5917 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005918 break;
5919 case IPR_IOASC_SYNC_REQUIRED:
5920 if (!res->in_erp)
5921 res->needs_sync_complete = 1;
5922 scsi_cmd->result |= (DID_IMM_RETRY << 16);
5923 break;
5924 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06005925 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005926 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
5927 break;
5928 case IPR_IOASC_BUS_WAS_RESET:
5929 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
5930 /*
5931 * Report the bus reset and ask for a retry. The device
5932 * will give CC/UA the next command.
5933 */
5934 if (!res->resetting_device)
5935 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
5936 scsi_cmd->result |= (DID_ERROR << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005937 if (!ipr_is_naca_model(res))
5938 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005939 break;
5940 case IPR_IOASC_HW_DEV_BUS_STATUS:
5941 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
5942 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005943 if (!ipr_get_autosense(ipr_cmd)) {
5944 if (!ipr_is_naca_model(res)) {
5945 ipr_erp_cancel_all(ipr_cmd);
5946 return;
5947 }
5948 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005949 }
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005950 if (!ipr_is_naca_model(res))
5951 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005952 break;
5953 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
5954 break;
5955 default:
Brian King5b7304f2006-08-02 14:57:51 -05005956 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5957 scsi_cmd->result |= (DID_ERROR << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005958 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005959 res->needs_sync_complete = 1;
5960 break;
5961 }
5962
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005963 scsi_dma_unmap(ipr_cmd->scsi_cmd);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005964 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005965 scsi_cmd->scsi_done(scsi_cmd);
5966}
5967
5968/**
5969 * ipr_scsi_done - mid-layer done function
5970 * @ipr_cmd: ipr command struct
5971 *
5972 * This function is invoked by the interrupt handler for
5973 * ops generated by the SCSI mid-layer
5974 *
5975 * Return value:
5976 * none
5977 **/
5978static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5979{
5980 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5981 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005982 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005983 unsigned long hrrq_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005984
Wayne Boyer96d21f02010-05-10 09:13:27 -07005985 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005986
5987 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
Brian King172cd6e2012-07-17 08:14:40 -05005988 scsi_dma_unmap(scsi_cmd);
5989
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005990 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005991 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005992 scsi_cmd->scsi_done(scsi_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005993 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
Brian King172cd6e2012-07-17 08:14:40 -05005994 } else {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005995 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005996 ipr_erp_start(ioa_cfg, ipr_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005997 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
Brian King172cd6e2012-07-17 08:14:40 -05005998 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005999}
6000
6001/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006002 * ipr_queuecommand - Queue a mid-layer request
Brian King00bfef22012-07-17 08:13:52 -05006003 * @shost: scsi host struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07006004 * @scsi_cmd: scsi command struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07006005 *
6006 * This function queues a request generated by the mid-layer.
6007 *
6008 * Return value:
6009 * 0 on success
6010 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6011 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6012 **/
Brian King00bfef22012-07-17 08:13:52 -05006013static int ipr_queuecommand(struct Scsi_Host *shost,
6014 struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006015{
6016 struct ipr_ioa_cfg *ioa_cfg;
6017 struct ipr_resource_entry *res;
6018 struct ipr_ioarcb *ioarcb;
6019 struct ipr_cmnd *ipr_cmd;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006020 unsigned long hrrq_flags, lock_flags;
Dan Carpenterd12f1572012-07-30 11:18:22 +03006021 int rc;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006022 struct ipr_hrr_queue *hrrq;
6023 int hrrq_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006024
Brian King00bfef22012-07-17 08:13:52 -05006025 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6026
Linus Torvalds1da177e2005-04-16 15:20:36 -07006027 scsi_cmd->result = (DID_OK << 16);
Brian King00bfef22012-07-17 08:13:52 -05006028 res = scsi_cmd->device->hostdata;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006029
6030 if (ipr_is_gata(res) && res->sata_port) {
6031 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6032 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6033 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6034 return rc;
6035 }
6036
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006037 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6038 hrrq = &ioa_cfg->hrrq[hrrq_id];
Linus Torvalds1da177e2005-04-16 15:20:36 -07006039
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006040 spin_lock_irqsave(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006041 /*
6042 * We are currently blocking all devices due to a host reset
6043 * We have told the host to stop giving us new requests, but
6044 * ERP ops don't count. FIXME
6045 */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006046 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead)) {
6047 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006048 return SCSI_MLQUEUE_HOST_BUSY;
Brian King00bfef22012-07-17 08:13:52 -05006049 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006050
6051 /*
6052 * FIXME - Create scsi_set_host_offline interface
6053 * and the ioa_is_dead check can be removed
6054 */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006055 if (unlikely(hrrq->ioa_is_dead || !res)) {
6056 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006057 goto err_nodev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006058 }
6059
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006060 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6061 if (ipr_cmd == NULL) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006062 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006063 return SCSI_MLQUEUE_HOST_BUSY;
6064 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006065 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006066
Brian King172cd6e2012-07-17 08:14:40 -05006067 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006068 ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006069
6070 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6071 ipr_cmd->scsi_cmd = scsi_cmd;
Brian King172cd6e2012-07-17 08:14:40 -05006072 ipr_cmd->done = ipr_scsi_eh_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006073
6074 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6075 if (scsi_cmd->underflow == 0)
6076 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6077
Linus Torvalds1da177e2005-04-16 15:20:36 -07006078 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
Wayne Boyerab6c10b2011-03-31 09:56:10 -07006079 if (ipr_is_gscsi(res))
6080 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006081 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6082 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
6083 }
6084
6085 if (scsi_cmd->cmnd[0] >= 0xC0 &&
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006086 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006087 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006088 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006089
Dan Carpenterd12f1572012-07-30 11:18:22 +03006090 if (ioa_cfg->sis64)
6091 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6092 else
6093 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006094
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006095 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6096 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006097 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006098 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006099 if (!rc)
6100 scsi_dma_unmap(scsi_cmd);
Brian Kinga5fb4072012-03-14 21:20:09 -05006101 return SCSI_MLQUEUE_HOST_BUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006102 }
6103
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006104 if (unlikely(hrrq->ioa_is_dead)) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006105 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006106 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006107 scsi_dma_unmap(scsi_cmd);
6108 goto err_nodev;
6109 }
6110
6111 ioarcb->res_handle = res->res_handle;
6112 if (res->needs_sync_complete) {
6113 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6114 res->needs_sync_complete = 0;
6115 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006116 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
Brian King00bfef22012-07-17 08:13:52 -05006117 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
Brian Kinga5fb4072012-03-14 21:20:09 -05006118 ipr_send_command(ipr_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006119 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006120 return 0;
6121
6122err_nodev:
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006123 spin_lock_irqsave(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006124 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6125 scsi_cmd->result = (DID_NO_CONNECT << 16);
6126 scsi_cmd->scsi_done(scsi_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006127 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006128 return 0;
6129}
6130
6131/**
Brian King35a39692006-09-25 12:39:20 -05006132 * ipr_ioctl - IOCTL handler
6133 * @sdev: scsi device struct
6134 * @cmd: IOCTL cmd
6135 * @arg: IOCTL arg
6136 *
6137 * Return value:
6138 * 0 on success / other on failure
6139 **/
Adrian Bunkbd705f22006-11-21 10:28:48 -06006140static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
Brian King35a39692006-09-25 12:39:20 -05006141{
6142 struct ipr_resource_entry *res;
6143
6144 res = (struct ipr_resource_entry *)sdev->hostdata;
Brian King0ce3a7e2008-07-11 13:37:50 -05006145 if (res && ipr_is_gata(res)) {
6146 if (cmd == HDIO_GET_IDENTITY)
6147 return -ENOTTY;
Jeff Garzik94be9a52009-01-16 10:17:09 -05006148 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
Brian King0ce3a7e2008-07-11 13:37:50 -05006149 }
Brian King35a39692006-09-25 12:39:20 -05006150
6151 return -EINVAL;
6152}
6153
6154/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006155 * ipr_info - Get information about the card/driver
6156 * @scsi_host: scsi host struct
6157 *
6158 * Return value:
6159 * pointer to buffer with description string
6160 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03006161static const char *ipr_ioa_info(struct Scsi_Host *host)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006162{
6163 static char buffer[512];
6164 struct ipr_ioa_cfg *ioa_cfg;
6165 unsigned long lock_flags = 0;
6166
6167 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6168
6169 spin_lock_irqsave(host->host_lock, lock_flags);
6170 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6171 spin_unlock_irqrestore(host->host_lock, lock_flags);
6172
6173 return buffer;
6174}
6175
6176static struct scsi_host_template driver_template = {
6177 .module = THIS_MODULE,
6178 .name = "IPR",
6179 .info = ipr_ioa_info,
Brian King35a39692006-09-25 12:39:20 -05006180 .ioctl = ipr_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006181 .queuecommand = ipr_queuecommand,
6182 .eh_abort_handler = ipr_eh_abort,
6183 .eh_device_reset_handler = ipr_eh_dev_reset,
6184 .eh_host_reset_handler = ipr_eh_host_reset,
6185 .slave_alloc = ipr_slave_alloc,
6186 .slave_configure = ipr_slave_configure,
6187 .slave_destroy = ipr_slave_destroy,
Brian King35a39692006-09-25 12:39:20 -05006188 .target_alloc = ipr_target_alloc,
6189 .target_destroy = ipr_target_destroy,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006190 .change_queue_depth = ipr_change_queue_depth,
6191 .change_queue_type = ipr_change_queue_type,
6192 .bios_param = ipr_biosparam,
6193 .can_queue = IPR_MAX_COMMANDS,
6194 .this_id = -1,
6195 .sg_tablesize = IPR_MAX_SGLIST,
6196 .max_sectors = IPR_IOA_MAX_SECTORS,
6197 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6198 .use_clustering = ENABLE_CLUSTERING,
6199 .shost_attrs = ipr_ioa_attrs,
6200 .sdev_attrs = ipr_dev_attrs,
6201 .proc_name = IPR_NAME
6202};
6203
Brian King35a39692006-09-25 12:39:20 -05006204/**
6205 * ipr_ata_phy_reset - libata phy_reset handler
6206 * @ap: ata port to reset
6207 *
6208 **/
6209static void ipr_ata_phy_reset(struct ata_port *ap)
6210{
6211 unsigned long flags;
6212 struct ipr_sata_port *sata_port = ap->private_data;
6213 struct ipr_resource_entry *res = sata_port->res;
6214 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6215 int rc;
6216
6217 ENTER;
6218 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03006219 while (ioa_cfg->in_reset_reload) {
Brian King35a39692006-09-25 12:39:20 -05006220 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6221 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6222 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6223 }
6224
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006225 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
Brian King35a39692006-09-25 12:39:20 -05006226 goto out_unlock;
6227
6228 rc = ipr_device_reset(ioa_cfg, res);
6229
6230 if (rc) {
Tejun Heo3e4ec342010-05-10 21:41:30 +02006231 ap->link.device[0].class = ATA_DEV_NONE;
Brian King35a39692006-09-25 12:39:20 -05006232 goto out_unlock;
6233 }
6234
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006235 ap->link.device[0].class = res->ata_class;
6236 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
Tejun Heo3e4ec342010-05-10 21:41:30 +02006237 ap->link.device[0].class = ATA_DEV_NONE;
Brian King35a39692006-09-25 12:39:20 -05006238
6239out_unlock:
6240 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6241 LEAVE;
6242}
6243
6244/**
6245 * ipr_ata_post_internal - Cleanup after an internal command
6246 * @qc: ATA queued command
6247 *
6248 * Return value:
6249 * none
6250 **/
6251static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6252{
6253 struct ipr_sata_port *sata_port = qc->ap->private_data;
6254 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6255 struct ipr_cmnd *ipr_cmd;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006256 struct ipr_hrr_queue *hrrq;
Brian King35a39692006-09-25 12:39:20 -05006257 unsigned long flags;
6258
6259 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03006260 while (ioa_cfg->in_reset_reload) {
Brian King73d98ff2006-11-21 10:27:58 -06006261 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6262 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6263 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6264 }
6265
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006266 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006267 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006268 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6269 if (ipr_cmd->qc == qc) {
6270 ipr_device_reset(ioa_cfg, sata_port->res);
6271 break;
6272 }
Brian King35a39692006-09-25 12:39:20 -05006273 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006274 spin_unlock(&hrrq->_lock);
Brian King35a39692006-09-25 12:39:20 -05006275 }
6276 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6277}
6278
6279/**
Brian King35a39692006-09-25 12:39:20 -05006280 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6281 * @regs: destination
6282 * @tf: source ATA taskfile
6283 *
6284 * Return value:
6285 * none
6286 **/
6287static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6288 struct ata_taskfile *tf)
6289{
6290 regs->feature = tf->feature;
6291 regs->nsect = tf->nsect;
6292 regs->lbal = tf->lbal;
6293 regs->lbam = tf->lbam;
6294 regs->lbah = tf->lbah;
6295 regs->device = tf->device;
6296 regs->command = tf->command;
6297 regs->hob_feature = tf->hob_feature;
6298 regs->hob_nsect = tf->hob_nsect;
6299 regs->hob_lbal = tf->hob_lbal;
6300 regs->hob_lbam = tf->hob_lbam;
6301 regs->hob_lbah = tf->hob_lbah;
6302 regs->ctl = tf->ctl;
6303}
6304
6305/**
6306 * ipr_sata_done - done function for SATA commands
6307 * @ipr_cmd: ipr command struct
6308 *
6309 * This function is invoked by the interrupt handler for
6310 * ops generated by the SCSI mid-layer to SATA devices
6311 *
6312 * Return value:
6313 * none
6314 **/
6315static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6316{
6317 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6318 struct ata_queued_cmd *qc = ipr_cmd->qc;
6319 struct ipr_sata_port *sata_port = qc->ap->private_data;
6320 struct ipr_resource_entry *res = sata_port->res;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006321 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian King35a39692006-09-25 12:39:20 -05006322
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006323 spin_lock(&ipr_cmd->hrrq->_lock);
Wayne Boyer96d21f02010-05-10 09:13:27 -07006324 if (ipr_cmd->ioa_cfg->sis64)
6325 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6326 sizeof(struct ipr_ioasa_gata));
6327 else
6328 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6329 sizeof(struct ipr_ioasa_gata));
Brian King35a39692006-09-25 12:39:20 -05006330 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6331
Wayne Boyer96d21f02010-05-10 09:13:27 -07006332 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006333 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
Brian King35a39692006-09-25 12:39:20 -05006334
6335 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
Wayne Boyer96d21f02010-05-10 09:13:27 -07006336 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
Brian King35a39692006-09-25 12:39:20 -05006337 else
Wayne Boyer96d21f02010-05-10 09:13:27 -07006338 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006339 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006340 spin_unlock(&ipr_cmd->hrrq->_lock);
Brian King35a39692006-09-25 12:39:20 -05006341 ata_qc_complete(qc);
6342}
6343
6344/**
Wayne Boyera32c0552010-02-19 13:23:36 -08006345 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6346 * @ipr_cmd: ipr command struct
6347 * @qc: ATA queued command
6348 *
6349 **/
6350static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6351 struct ata_queued_cmd *qc)
6352{
6353 u32 ioadl_flags = 0;
6354 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6355 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
6356 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6357 int len = qc->nbytes;
6358 struct scatterlist *sg;
6359 unsigned int si;
6360 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6361
6362 if (len == 0)
6363 return;
6364
6365 if (qc->dma_dir == DMA_TO_DEVICE) {
6366 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6367 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6368 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6369 ioadl_flags = IPR_IOADL_FLAGS_READ;
6370
6371 ioarcb->data_transfer_length = cpu_to_be32(len);
6372 ioarcb->ioadl_len =
6373 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6374 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6375 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
6376
6377 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6378 ioadl64->flags = cpu_to_be32(ioadl_flags);
6379 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6380 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6381
6382 last_ioadl64 = ioadl64;
6383 ioadl64++;
6384 }
6385
6386 if (likely(last_ioadl64))
6387 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6388}
6389
6390/**
Brian King35a39692006-09-25 12:39:20 -05006391 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6392 * @ipr_cmd: ipr command struct
6393 * @qc: ATA queued command
6394 *
6395 **/
6396static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6397 struct ata_queued_cmd *qc)
6398{
6399 u32 ioadl_flags = 0;
6400 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08006401 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04006402 struct ipr_ioadl_desc *last_ioadl = NULL;
James Bottomleydde20202008-02-19 11:36:56 +01006403 int len = qc->nbytes;
Brian King35a39692006-09-25 12:39:20 -05006404 struct scatterlist *sg;
Tejun Heoff2aeb12007-12-05 16:43:11 +09006405 unsigned int si;
Brian King35a39692006-09-25 12:39:20 -05006406
6407 if (len == 0)
6408 return;
6409
6410 if (qc->dma_dir == DMA_TO_DEVICE) {
6411 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6412 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08006413 ioarcb->data_transfer_length = cpu_to_be32(len);
6414 ioarcb->ioadl_len =
Brian King35a39692006-09-25 12:39:20 -05006415 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6416 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6417 ioadl_flags = IPR_IOADL_FLAGS_READ;
6418 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6419 ioarcb->read_ioadl_len =
6420 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6421 }
6422
Tejun Heoff2aeb12007-12-05 16:43:11 +09006423 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Brian King35a39692006-09-25 12:39:20 -05006424 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6425 ioadl->address = cpu_to_be32(sg_dma_address(sg));
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04006426
6427 last_ioadl = ioadl;
6428 ioadl++;
Brian King35a39692006-09-25 12:39:20 -05006429 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04006430
6431 if (likely(last_ioadl))
6432 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
Brian King35a39692006-09-25 12:39:20 -05006433}
6434
6435/**
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006436 * ipr_qc_defer - Get a free ipr_cmd
6437 * @qc: queued command
6438 *
6439 * Return value:
6440 * 0 if success
6441 **/
6442static int ipr_qc_defer(struct ata_queued_cmd *qc)
6443{
6444 struct ata_port *ap = qc->ap;
6445 struct ipr_sata_port *sata_port = ap->private_data;
6446 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6447 struct ipr_cmnd *ipr_cmd;
6448 struct ipr_hrr_queue *hrrq;
6449 int hrrq_id;
6450
6451 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6452 hrrq = &ioa_cfg->hrrq[hrrq_id];
6453
6454 qc->lldd_task = NULL;
6455 spin_lock(&hrrq->_lock);
6456 if (unlikely(hrrq->ioa_is_dead)) {
6457 spin_unlock(&hrrq->_lock);
6458 return 0;
6459 }
6460
6461 if (unlikely(!hrrq->allow_cmds)) {
6462 spin_unlock(&hrrq->_lock);
6463 return ATA_DEFER_LINK;
6464 }
6465
6466 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6467 if (ipr_cmd == NULL) {
6468 spin_unlock(&hrrq->_lock);
6469 return ATA_DEFER_LINK;
6470 }
6471
6472 qc->lldd_task = ipr_cmd;
6473 spin_unlock(&hrrq->_lock);
6474 return 0;
6475}
6476
6477/**
Brian King35a39692006-09-25 12:39:20 -05006478 * ipr_qc_issue - Issue a SATA qc to a device
6479 * @qc: queued command
6480 *
6481 * Return value:
6482 * 0 if success
6483 **/
6484static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6485{
6486 struct ata_port *ap = qc->ap;
6487 struct ipr_sata_port *sata_port = ap->private_data;
6488 struct ipr_resource_entry *res = sata_port->res;
6489 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6490 struct ipr_cmnd *ipr_cmd;
6491 struct ipr_ioarcb *ioarcb;
6492 struct ipr_ioarcb_ata_regs *regs;
6493
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006494 if (qc->lldd_task == NULL)
6495 ipr_qc_defer(qc);
6496
6497 ipr_cmd = qc->lldd_task;
6498 if (ipr_cmd == NULL)
Brian King0feeed82007-03-29 12:43:43 -05006499 return AC_ERR_SYSTEM;
Brian King35a39692006-09-25 12:39:20 -05006500
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006501 qc->lldd_task = NULL;
6502 spin_lock(&ipr_cmd->hrrq->_lock);
6503 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6504 ipr_cmd->hrrq->ioa_is_dead)) {
6505 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6506 spin_unlock(&ipr_cmd->hrrq->_lock);
6507 return AC_ERR_SYSTEM;
6508 }
6509
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006510 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
Brian King35a39692006-09-25 12:39:20 -05006511 ioarcb = &ipr_cmd->ioarcb;
Brian King35a39692006-09-25 12:39:20 -05006512
Wayne Boyera32c0552010-02-19 13:23:36 -08006513 if (ioa_cfg->sis64) {
6514 regs = &ipr_cmd->i.ata_ioadl.regs;
6515 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6516 } else
6517 regs = &ioarcb->u.add_data.u.regs;
6518
6519 memset(regs, 0, sizeof(*regs));
6520 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
Brian King35a39692006-09-25 12:39:20 -05006521
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006522 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Brian King35a39692006-09-25 12:39:20 -05006523 ipr_cmd->qc = qc;
6524 ipr_cmd->done = ipr_sata_done;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006525 ipr_cmd->ioarcb.res_handle = res->res_handle;
Brian King35a39692006-09-25 12:39:20 -05006526 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6527 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6528 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
James Bottomleydde20202008-02-19 11:36:56 +01006529 ipr_cmd->dma_use_sg = qc->n_elem;
Brian King35a39692006-09-25 12:39:20 -05006530
Wayne Boyera32c0552010-02-19 13:23:36 -08006531 if (ioa_cfg->sis64)
6532 ipr_build_ata_ioadl64(ipr_cmd, qc);
6533 else
6534 ipr_build_ata_ioadl(ipr_cmd, qc);
6535
Brian King35a39692006-09-25 12:39:20 -05006536 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6537 ipr_copy_sata_tf(regs, &qc->tf);
6538 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006539 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
Brian King35a39692006-09-25 12:39:20 -05006540
6541 switch (qc->tf.protocol) {
6542 case ATA_PROT_NODATA:
6543 case ATA_PROT_PIO:
6544 break;
6545
6546 case ATA_PROT_DMA:
6547 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6548 break;
6549
Tejun Heo0dc36882007-12-18 16:34:43 -05006550 case ATAPI_PROT_PIO:
6551 case ATAPI_PROT_NODATA:
Brian King35a39692006-09-25 12:39:20 -05006552 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6553 break;
6554
Tejun Heo0dc36882007-12-18 16:34:43 -05006555 case ATAPI_PROT_DMA:
Brian King35a39692006-09-25 12:39:20 -05006556 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6557 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6558 break;
6559
6560 default:
6561 WARN_ON(1);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006562 spin_unlock(&ipr_cmd->hrrq->_lock);
Brian King0feeed82007-03-29 12:43:43 -05006563 return AC_ERR_INVALID;
Brian King35a39692006-09-25 12:39:20 -05006564 }
6565
Wayne Boyera32c0552010-02-19 13:23:36 -08006566 ipr_send_command(ipr_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006567 spin_unlock(&ipr_cmd->hrrq->_lock);
Wayne Boyera32c0552010-02-19 13:23:36 -08006568
Brian King35a39692006-09-25 12:39:20 -05006569 return 0;
6570}
6571
6572/**
Tejun Heo4c9bf4e2008-04-07 22:47:20 +09006573 * ipr_qc_fill_rtf - Read result TF
6574 * @qc: ATA queued command
6575 *
6576 * Return value:
6577 * true
6578 **/
6579static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6580{
6581 struct ipr_sata_port *sata_port = qc->ap->private_data;
6582 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6583 struct ata_taskfile *tf = &qc->result_tf;
6584
6585 tf->feature = g->error;
6586 tf->nsect = g->nsect;
6587 tf->lbal = g->lbal;
6588 tf->lbam = g->lbam;
6589 tf->lbah = g->lbah;
6590 tf->device = g->device;
6591 tf->command = g->status;
6592 tf->hob_nsect = g->hob_nsect;
6593 tf->hob_lbal = g->hob_lbal;
6594 tf->hob_lbam = g->hob_lbam;
6595 tf->hob_lbah = g->hob_lbah;
6596 tf->ctl = g->alt_status;
6597
6598 return true;
6599}
6600
Brian King35a39692006-09-25 12:39:20 -05006601static struct ata_port_operations ipr_sata_ops = {
Brian King35a39692006-09-25 12:39:20 -05006602 .phy_reset = ipr_ata_phy_reset,
Tejun Heoa1efdab2008-03-25 12:22:50 +09006603 .hardreset = ipr_sata_reset,
Brian King35a39692006-09-25 12:39:20 -05006604 .post_internal_cmd = ipr_ata_post_internal,
Brian King35a39692006-09-25 12:39:20 -05006605 .qc_prep = ata_noop_qc_prep,
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006606 .qc_defer = ipr_qc_defer,
Brian King35a39692006-09-25 12:39:20 -05006607 .qc_issue = ipr_qc_issue,
Tejun Heo4c9bf4e2008-04-07 22:47:20 +09006608 .qc_fill_rtf = ipr_qc_fill_rtf,
Brian King35a39692006-09-25 12:39:20 -05006609 .port_start = ata_sas_port_start,
6610 .port_stop = ata_sas_port_stop
6611};
6612
6613static struct ata_port_info sata_port_info = {
Sergei Shtylyov9cbe0562011-02-04 22:05:48 +03006614 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
Sergei Shtylyov0f2e0332011-01-21 20:32:01 +03006615 .pio_mask = ATA_PIO4_ONLY,
6616 .mwdma_mask = ATA_MWDMA2,
6617 .udma_mask = ATA_UDMA6,
Brian King35a39692006-09-25 12:39:20 -05006618 .port_ops = &ipr_sata_ops
6619};
6620
Linus Torvalds1da177e2005-04-16 15:20:36 -07006621#ifdef CONFIG_PPC_PSERIES
6622static const u16 ipr_blocked_processors[] = {
Michael Ellermand3dbeef2012-08-19 21:44:01 +00006623 PVR_NORTHSTAR,
6624 PVR_PULSAR,
6625 PVR_POWER4,
6626 PVR_ICESTAR,
6627 PVR_SSTAR,
6628 PVR_POWER4p,
6629 PVR_630,
6630 PVR_630p
Linus Torvalds1da177e2005-04-16 15:20:36 -07006631};
6632
6633/**
6634 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6635 * @ioa_cfg: ioa cfg struct
6636 *
6637 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6638 * certain pSeries hardware. This function determines if the given
6639 * adapter is in one of these confgurations or not.
6640 *
6641 * Return value:
6642 * 1 if adapter is not supported / 0 if adapter is supported
6643 **/
6644static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6645{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006646 int i;
6647
Auke Kok44c10132007-06-08 15:46:36 -07006648 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03006649 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
Michael Ellermand3dbeef2012-08-19 21:44:01 +00006650 if (pvr_version_is(ipr_blocked_processors[i]))
Auke Kok44c10132007-06-08 15:46:36 -07006651 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006652 }
6653 }
6654 return 0;
6655}
6656#else
6657#define ipr_invalid_adapter(ioa_cfg) 0
6658#endif
6659
6660/**
6661 * ipr_ioa_bringdown_done - IOA bring down completion.
6662 * @ipr_cmd: ipr command struct
6663 *
6664 * This function processes the completion of an adapter bring down.
6665 * It wakes any reset sleepers.
6666 *
6667 * Return value:
6668 * IPR_RC_JOB_RETURN
6669 **/
6670static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6671{
6672 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6673
6674 ENTER;
6675 ioa_cfg->in_reset_reload = 0;
6676 ioa_cfg->reset_retries = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006677 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006678 wake_up_all(&ioa_cfg->reset_wait_q);
6679
6680 spin_unlock_irq(ioa_cfg->host->host_lock);
6681 scsi_unblock_requests(ioa_cfg->host);
6682 spin_lock_irq(ioa_cfg->host->host_lock);
6683 LEAVE;
6684
6685 return IPR_RC_JOB_RETURN;
6686}
6687
6688/**
6689 * ipr_ioa_reset_done - IOA reset completion.
6690 * @ipr_cmd: ipr command struct
6691 *
6692 * This function processes the completion of an adapter reset.
6693 * It schedules any necessary mid-layer add/removes and
6694 * wakes any reset sleepers.
6695 *
6696 * Return value:
6697 * IPR_RC_JOB_RETURN
6698 **/
6699static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6700{
6701 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6702 struct ipr_resource_entry *res;
6703 struct ipr_hostrcb *hostrcb, *temp;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006704 int i = 0, j;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006705
6706 ENTER;
6707 ioa_cfg->in_reset_reload = 0;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006708 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
6709 spin_lock(&ioa_cfg->hrrq[j]._lock);
6710 ioa_cfg->hrrq[j].allow_cmds = 1;
6711 spin_unlock(&ioa_cfg->hrrq[j]._lock);
6712 }
6713 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006714 ioa_cfg->reset_cmd = NULL;
brking@us.ibm.com3d1d0da2005-11-01 17:01:54 -06006715 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006716
6717 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6718 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6719 ipr_trace;
6720 break;
6721 }
6722 }
6723 schedule_work(&ioa_cfg->work_q);
6724
6725 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6726 list_del(&hostrcb->queue);
6727 if (i++ < IPR_NUM_LOG_HCAMS)
6728 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6729 else
6730 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6731 }
6732
Brian King6bb04172007-04-26 16:00:08 -05006733 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006734 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6735
6736 ioa_cfg->reset_retries = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006737 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006738 wake_up_all(&ioa_cfg->reset_wait_q);
6739
Mark Nelson30237852008-12-10 12:23:20 +11006740 spin_unlock(ioa_cfg->host->host_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006741 scsi_unblock_requests(ioa_cfg->host);
Mark Nelson30237852008-12-10 12:23:20 +11006742 spin_lock(ioa_cfg->host->host_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006743
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006744 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006745 scsi_block_requests(ioa_cfg->host);
6746
6747 LEAVE;
6748 return IPR_RC_JOB_RETURN;
6749}
6750
6751/**
6752 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6753 * @supported_dev: supported device struct
6754 * @vpids: vendor product id struct
6755 *
6756 * Return value:
6757 * none
6758 **/
6759static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6760 struct ipr_std_inq_vpids *vpids)
6761{
6762 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6763 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6764 supported_dev->num_records = 1;
6765 supported_dev->data_length =
6766 cpu_to_be16(sizeof(struct ipr_supported_device));
6767 supported_dev->reserved = 0;
6768}
6769
6770/**
6771 * ipr_set_supported_devs - Send Set Supported Devices for a device
6772 * @ipr_cmd: ipr command struct
6773 *
Wayne Boyera32c0552010-02-19 13:23:36 -08006774 * This function sends a Set Supported Devices to the adapter
Linus Torvalds1da177e2005-04-16 15:20:36 -07006775 *
6776 * Return value:
6777 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6778 **/
6779static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6780{
6781 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6782 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006783 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6784 struct ipr_resource_entry *res = ipr_cmd->u.res;
6785
6786 ipr_cmd->job_step = ipr_ioa_reset_done;
6787
6788 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
Brian Kinge4fbf442006-03-29 09:37:22 -06006789 if (!ipr_is_scsi_disk(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006790 continue;
6791
6792 ipr_cmd->u.res = res;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006793 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006794
6795 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6796 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6797 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6798
6799 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006800 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006801 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6802 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6803
Wayne Boyera32c0552010-02-19 13:23:36 -08006804 ipr_init_ioadl(ipr_cmd,
6805 ioa_cfg->vpd_cbs_dma +
6806 offsetof(struct ipr_misc_cbs, supp_dev),
6807 sizeof(struct ipr_supported_device),
6808 IPR_IOADL_FLAGS_WRITE_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006809
6810 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6811 IPR_SET_SUP_DEVICE_TIMEOUT);
6812
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006813 if (!ioa_cfg->sis64)
6814 ipr_cmd->job_step = ipr_set_supported_devs;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006815 LEAVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006816 return IPR_RC_JOB_RETURN;
6817 }
6818
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006819 LEAVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006820 return IPR_RC_JOB_CONTINUE;
6821}
6822
6823/**
6824 * ipr_get_mode_page - Locate specified mode page
6825 * @mode_pages: mode page buffer
6826 * @page_code: page code to find
6827 * @len: minimum required length for mode page
6828 *
6829 * Return value:
6830 * pointer to mode page / NULL on failure
6831 **/
6832static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6833 u32 page_code, u32 len)
6834{
6835 struct ipr_mode_page_hdr *mode_hdr;
6836 u32 page_length;
6837 u32 length;
6838
6839 if (!mode_pages || (mode_pages->hdr.length == 0))
6840 return NULL;
6841
6842 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6843 mode_hdr = (struct ipr_mode_page_hdr *)
6844 (mode_pages->data + mode_pages->hdr.block_desc_len);
6845
6846 while (length) {
6847 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6848 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6849 return mode_hdr;
6850 break;
6851 } else {
6852 page_length = (sizeof(struct ipr_mode_page_hdr) +
6853 mode_hdr->page_length);
6854 length -= page_length;
6855 mode_hdr = (struct ipr_mode_page_hdr *)
6856 ((unsigned long)mode_hdr + page_length);
6857 }
6858 }
6859 return NULL;
6860}
6861
6862/**
6863 * ipr_check_term_power - Check for term power errors
6864 * @ioa_cfg: ioa config struct
6865 * @mode_pages: IOAFP mode pages buffer
6866 *
6867 * Check the IOAFP's mode page 28 for term power errors
6868 *
6869 * Return value:
6870 * nothing
6871 **/
6872static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6873 struct ipr_mode_pages *mode_pages)
6874{
6875 int i;
6876 int entry_length;
6877 struct ipr_dev_bus_entry *bus;
6878 struct ipr_mode_page28 *mode_page;
6879
6880 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6881 sizeof(struct ipr_mode_page28));
6882
6883 entry_length = mode_page->entry_length;
6884
6885 bus = mode_page->bus;
6886
6887 for (i = 0; i < mode_page->num_entries; i++) {
6888 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
6889 dev_err(&ioa_cfg->pdev->dev,
6890 "Term power is absent on scsi bus %d\n",
6891 bus->res_addr.bus);
6892 }
6893
6894 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
6895 }
6896}
6897
6898/**
6899 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
6900 * @ioa_cfg: ioa config struct
6901 *
6902 * Looks through the config table checking for SES devices. If
6903 * the SES device is in the SES table indicating a maximum SCSI
6904 * bus speed, the speed is limited for the bus.
6905 *
6906 * Return value:
6907 * none
6908 **/
6909static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6910{
6911 u32 max_xfer_rate;
6912 int i;
6913
6914 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6915 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6916 ioa_cfg->bus_attr[i].bus_width);
6917
6918 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6919 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
6920 }
6921}
6922
6923/**
6924 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
6925 * @ioa_cfg: ioa config struct
6926 * @mode_pages: mode page 28 buffer
6927 *
6928 * Updates mode page 28 based on driver configuration
6929 *
6930 * Return value:
6931 * none
6932 **/
6933static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03006934 struct ipr_mode_pages *mode_pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006935{
6936 int i, entry_length;
6937 struct ipr_dev_bus_entry *bus;
6938 struct ipr_bus_attributes *bus_attr;
6939 struct ipr_mode_page28 *mode_page;
6940
6941 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6942 sizeof(struct ipr_mode_page28));
6943
6944 entry_length = mode_page->entry_length;
6945
6946 /* Loop for each device bus entry */
6947 for (i = 0, bus = mode_page->bus;
6948 i < mode_page->num_entries;
6949 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
6950 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
6951 dev_err(&ioa_cfg->pdev->dev,
6952 "Invalid resource address reported: 0x%08X\n",
6953 IPR_GET_PHYS_LOC(bus->res_addr));
6954 continue;
6955 }
6956
6957 bus_attr = &ioa_cfg->bus_attr[i];
6958 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
6959 bus->bus_width = bus_attr->bus_width;
6960 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
6961 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
6962 if (bus_attr->qas_enabled)
6963 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
6964 else
6965 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
6966 }
6967}
6968
6969/**
6970 * ipr_build_mode_select - Build a mode select command
6971 * @ipr_cmd: ipr command struct
6972 * @res_handle: resource handle to send command to
6973 * @parm: Byte 2 of Mode Sense command
6974 * @dma_addr: DMA buffer address
6975 * @xfer_len: data transfer length
6976 *
6977 * Return value:
6978 * none
6979 **/
6980static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
Wayne Boyera32c0552010-02-19 13:23:36 -08006981 __be32 res_handle, u8 parm,
6982 dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006983{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006984 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6985
6986 ioarcb->res_handle = res_handle;
6987 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6988 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6989 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
6990 ioarcb->cmd_pkt.cdb[1] = parm;
6991 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6992
Wayne Boyera32c0552010-02-19 13:23:36 -08006993 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006994}
6995
6996/**
6997 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
6998 * @ipr_cmd: ipr command struct
6999 *
7000 * This function sets up the SCSI bus attributes and sends
7001 * a Mode Select for Page 28 to activate them.
7002 *
7003 * Return value:
7004 * IPR_RC_JOB_RETURN
7005 **/
7006static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7007{
7008 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7009 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7010 int length;
7011
7012 ENTER;
Brian King47338042006-02-08 20:57:42 -06007013 ipr_scsi_bus_speed_limit(ioa_cfg);
7014 ipr_check_term_power(ioa_cfg, mode_pages);
7015 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7016 length = mode_pages->hdr.length + 1;
7017 mode_pages->hdr.length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007018
7019 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7020 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7021 length);
7022
Wayne Boyerf72919e2010-02-19 13:24:21 -08007023 ipr_cmd->job_step = ipr_set_supported_devs;
7024 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7025 struct ipr_resource_entry, queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007026 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7027
7028 LEAVE;
7029 return IPR_RC_JOB_RETURN;
7030}
7031
7032/**
7033 * ipr_build_mode_sense - Builds a mode sense command
7034 * @ipr_cmd: ipr command struct
7035 * @res: resource entry struct
7036 * @parm: Byte 2 of mode sense command
7037 * @dma_addr: DMA address of mode sense buffer
7038 * @xfer_len: Size of DMA buffer
7039 *
7040 * Return value:
7041 * none
7042 **/
7043static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7044 __be32 res_handle,
Wayne Boyera32c0552010-02-19 13:23:36 -08007045 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007046{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007047 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7048
7049 ioarcb->res_handle = res_handle;
7050 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7051 ioarcb->cmd_pkt.cdb[2] = parm;
7052 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7053 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7054
Wayne Boyera32c0552010-02-19 13:23:36 -08007055 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007056}
7057
7058/**
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007059 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7060 * @ipr_cmd: ipr command struct
7061 *
7062 * This function handles the failure of an IOA bringup command.
7063 *
7064 * Return value:
7065 * IPR_RC_JOB_RETURN
7066 **/
7067static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7068{
7069 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyer96d21f02010-05-10 09:13:27 -07007070 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007071
7072 dev_err(&ioa_cfg->pdev->dev,
7073 "0x%02X failed with IOASC: 0x%08X\n",
7074 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7075
7076 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007077 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007078 return IPR_RC_JOB_RETURN;
7079}
7080
7081/**
7082 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7083 * @ipr_cmd: ipr command struct
7084 *
7085 * This function handles the failure of a Mode Sense to the IOAFP.
7086 * Some adapters do not handle all mode pages.
7087 *
7088 * Return value:
7089 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7090 **/
7091static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7092{
Wayne Boyerf72919e2010-02-19 13:24:21 -08007093 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyer96d21f02010-05-10 09:13:27 -07007094 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007095
7096 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
Wayne Boyerf72919e2010-02-19 13:24:21 -08007097 ipr_cmd->job_step = ipr_set_supported_devs;
7098 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7099 struct ipr_resource_entry, queue);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007100 return IPR_RC_JOB_CONTINUE;
7101 }
7102
7103 return ipr_reset_cmd_failed(ipr_cmd);
7104}
7105
7106/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007107 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7108 * @ipr_cmd: ipr command struct
7109 *
7110 * This function send a Page 28 mode sense to the IOA to
7111 * retrieve SCSI bus attributes.
7112 *
7113 * Return value:
7114 * IPR_RC_JOB_RETURN
7115 **/
7116static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7117{
7118 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7119
7120 ENTER;
7121 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7122 0x28, ioa_cfg->vpd_cbs_dma +
7123 offsetof(struct ipr_misc_cbs, mode_pages),
7124 sizeof(struct ipr_mode_pages));
7125
7126 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007127 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007128
7129 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7130
7131 LEAVE;
7132 return IPR_RC_JOB_RETURN;
7133}
7134
7135/**
Brian Kingac09c342007-04-26 16:00:16 -05007136 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7137 * @ipr_cmd: ipr command struct
7138 *
7139 * This function enables dual IOA RAID support if possible.
7140 *
7141 * Return value:
7142 * IPR_RC_JOB_RETURN
7143 **/
7144static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7145{
7146 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7147 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7148 struct ipr_mode_page24 *mode_page;
7149 int length;
7150
7151 ENTER;
7152 mode_page = ipr_get_mode_page(mode_pages, 0x24,
7153 sizeof(struct ipr_mode_page24));
7154
7155 if (mode_page)
7156 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7157
7158 length = mode_pages->hdr.length + 1;
7159 mode_pages->hdr.length = 0;
7160
7161 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7162 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7163 length);
7164
7165 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7166 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7167
7168 LEAVE;
7169 return IPR_RC_JOB_RETURN;
7170}
7171
7172/**
7173 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7174 * @ipr_cmd: ipr command struct
7175 *
7176 * This function handles the failure of a Mode Sense to the IOAFP.
7177 * Some adapters do not handle all mode pages.
7178 *
7179 * Return value:
7180 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7181 **/
7182static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7183{
Wayne Boyer96d21f02010-05-10 09:13:27 -07007184 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian Kingac09c342007-04-26 16:00:16 -05007185
7186 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7187 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7188 return IPR_RC_JOB_CONTINUE;
7189 }
7190
7191 return ipr_reset_cmd_failed(ipr_cmd);
7192}
7193
7194/**
7195 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7196 * @ipr_cmd: ipr command struct
7197 *
7198 * This function send a mode sense to the IOA to retrieve
7199 * the IOA Advanced Function Control mode page.
7200 *
7201 * Return value:
7202 * IPR_RC_JOB_RETURN
7203 **/
7204static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7205{
7206 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7207
7208 ENTER;
7209 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7210 0x24, ioa_cfg->vpd_cbs_dma +
7211 offsetof(struct ipr_misc_cbs, mode_pages),
7212 sizeof(struct ipr_mode_pages));
7213
7214 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7215 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7216
7217 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7218
7219 LEAVE;
7220 return IPR_RC_JOB_RETURN;
7221}
7222
7223/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007224 * ipr_init_res_table - Initialize the resource table
7225 * @ipr_cmd: ipr command struct
7226 *
7227 * This function looks through the existing resource table, comparing
7228 * it with the config table. This function will take care of old/new
7229 * devices and schedule adding/removing them from the mid-layer
7230 * as appropriate.
7231 *
7232 * Return value:
7233 * IPR_RC_JOB_CONTINUE
7234 **/
7235static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7236{
7237 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7238 struct ipr_resource_entry *res, *temp;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007239 struct ipr_config_table_entry_wrapper cfgtew;
7240 int entries, found, flag, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007241 LIST_HEAD(old_res);
7242
7243 ENTER;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007244 if (ioa_cfg->sis64)
7245 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7246 else
7247 flag = ioa_cfg->u.cfg_table->hdr.flags;
7248
7249 if (flag & IPR_UCODE_DOWNLOAD_REQ)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007250 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7251
7252 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7253 list_move_tail(&res->queue, &old_res);
7254
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007255 if (ioa_cfg->sis64)
Wayne Boyer438b0332010-05-10 09:13:00 -07007256 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007257 else
7258 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7259
7260 for (i = 0; i < entries; i++) {
7261 if (ioa_cfg->sis64)
7262 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7263 else
7264 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07007265 found = 0;
7266
7267 list_for_each_entry_safe(res, temp, &old_res, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007268 if (ipr_is_same_device(res, &cfgtew)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007269 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7270 found = 1;
7271 break;
7272 }
7273 }
7274
7275 if (!found) {
7276 if (list_empty(&ioa_cfg->free_res_q)) {
7277 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7278 break;
7279 }
7280
7281 found = 1;
7282 res = list_entry(ioa_cfg->free_res_q.next,
7283 struct ipr_resource_entry, queue);
7284 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007285 ipr_init_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007286 res->add_to_ml = 1;
Wayne Boyer56115592010-06-10 14:46:34 -07007287 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7288 res->sdev->allow_restart = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007289
7290 if (found)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007291 ipr_update_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007292 }
7293
7294 list_for_each_entry_safe(res, temp, &old_res, queue) {
7295 if (res->sdev) {
7296 res->del_from_ml = 1;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007297 res->res_handle = IPR_INVALID_RES_HANDLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007298 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007299 }
7300 }
7301
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007302 list_for_each_entry_safe(res, temp, &old_res, queue) {
7303 ipr_clear_res_target(res);
7304 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7305 }
7306
Brian Kingac09c342007-04-26 16:00:16 -05007307 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7308 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7309 else
7310 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007311
7312 LEAVE;
7313 return IPR_RC_JOB_CONTINUE;
7314}
7315
7316/**
7317 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7318 * @ipr_cmd: ipr command struct
7319 *
7320 * This function sends a Query IOA Configuration command
7321 * to the adapter to retrieve the IOA configuration table.
7322 *
7323 * Return value:
7324 * IPR_RC_JOB_RETURN
7325 **/
7326static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7327{
7328 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7329 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007330 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
Brian Kingac09c342007-04-26 16:00:16 -05007331 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007332
7333 ENTER;
Brian Kingac09c342007-04-26 16:00:16 -05007334 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7335 ioa_cfg->dual_raid = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007336 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7337 ucode_vpd->major_release, ucode_vpd->card_type,
7338 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7339 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7340 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7341
7342 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
Wayne Boyer438b0332010-05-10 09:13:00 -07007343 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007344 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7345 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007346
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007347 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
Wayne Boyera32c0552010-02-19 13:23:36 -08007348 IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007349
7350 ipr_cmd->job_step = ipr_init_res_table;
7351
7352 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7353
7354 LEAVE;
7355 return IPR_RC_JOB_RETURN;
7356}
7357
7358/**
7359 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7360 * @ipr_cmd: ipr command struct
7361 *
7362 * This utility function sends an inquiry to the adapter.
7363 *
7364 * Return value:
7365 * none
7366 **/
7367static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
Wayne Boyera32c0552010-02-19 13:23:36 -08007368 dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007369{
7370 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007371
7372 ENTER;
7373 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7374 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7375
7376 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7377 ioarcb->cmd_pkt.cdb[1] = flags;
7378 ioarcb->cmd_pkt.cdb[2] = page;
7379 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7380
Wayne Boyera32c0552010-02-19 13:23:36 -08007381 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007382
7383 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7384 LEAVE;
7385}
7386
7387/**
brking@us.ibm.com62275042005-11-01 17:01:14 -06007388 * ipr_inquiry_page_supported - Is the given inquiry page supported
7389 * @page0: inquiry page 0 buffer
7390 * @page: page code.
7391 *
7392 * This function determines if the specified inquiry page is supported.
7393 *
7394 * Return value:
7395 * 1 if page is supported / 0 if not
7396 **/
7397static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7398{
7399 int i;
7400
7401 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7402 if (page0->page[i] == page)
7403 return 1;
7404
7405 return 0;
7406}
7407
7408/**
Brian Kingac09c342007-04-26 16:00:16 -05007409 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7410 * @ipr_cmd: ipr command struct
7411 *
7412 * This function sends a Page 0xD0 inquiry to the adapter
7413 * to retrieve adapter capabilities.
7414 *
7415 * Return value:
7416 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7417 **/
7418static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7419{
7420 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7421 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7422 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7423
7424 ENTER;
7425 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7426 memset(cap, 0, sizeof(*cap));
7427
7428 if (ipr_inquiry_page_supported(page0, 0xD0)) {
7429 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7430 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7431 sizeof(struct ipr_inquiry_cap));
7432 return IPR_RC_JOB_RETURN;
7433 }
7434
7435 LEAVE;
7436 return IPR_RC_JOB_CONTINUE;
7437}
7438
7439/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007440 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7441 * @ipr_cmd: ipr command struct
7442 *
7443 * This function sends a Page 3 inquiry to the adapter
7444 * to retrieve software VPD information.
7445 *
7446 * Return value:
7447 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7448 **/
7449static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7450{
7451 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
brking@us.ibm.com62275042005-11-01 17:01:14 -06007452
7453 ENTER;
7454
Brian Kingac09c342007-04-26 16:00:16 -05007455 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
brking@us.ibm.com62275042005-11-01 17:01:14 -06007456
7457 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7458 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7459 sizeof(struct ipr_inquiry_page3));
7460
7461 LEAVE;
7462 return IPR_RC_JOB_RETURN;
7463}
7464
7465/**
7466 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7467 * @ipr_cmd: ipr command struct
7468 *
7469 * This function sends a Page 0 inquiry to the adapter
7470 * to retrieve supported inquiry pages.
7471 *
7472 * Return value:
7473 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7474 **/
7475static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7476{
7477 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007478 char type[5];
7479
7480 ENTER;
7481
7482 /* Grab the type out of the VPD and store it away */
7483 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7484 type[4] = '\0';
7485 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7486
brking@us.ibm.com62275042005-11-01 17:01:14 -06007487 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007488
brking@us.ibm.com62275042005-11-01 17:01:14 -06007489 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7490 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7491 sizeof(struct ipr_inquiry_page0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07007492
7493 LEAVE;
7494 return IPR_RC_JOB_RETURN;
7495}
7496
7497/**
7498 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7499 * @ipr_cmd: ipr command struct
7500 *
7501 * This function sends a standard inquiry to the adapter.
7502 *
7503 * Return value:
7504 * IPR_RC_JOB_RETURN
7505 **/
7506static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7507{
7508 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7509
7510 ENTER;
brking@us.ibm.com62275042005-11-01 17:01:14 -06007511 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007512
7513 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7514 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7515 sizeof(struct ipr_ioa_vpd));
7516
7517 LEAVE;
7518 return IPR_RC_JOB_RETURN;
7519}
7520
7521/**
Wayne Boyer214777b2010-02-19 13:24:26 -08007522 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007523 * @ipr_cmd: ipr command struct
7524 *
7525 * This function send an Identify Host Request Response Queue
7526 * command to establish the HRRQ with the adapter.
7527 *
7528 * Return value:
7529 * IPR_RC_JOB_RETURN
7530 **/
Wayne Boyer214777b2010-02-19 13:24:26 -08007531static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007532{
7533 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7534 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007535 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007536
7537 ENTER;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007538 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007539 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7540
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007541 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7542 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
Linus Torvalds1da177e2005-04-16 15:20:36 -07007543
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007544 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7545 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007546
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007547 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7548 if (ioa_cfg->sis64)
7549 ioarcb->cmd_pkt.cdb[1] = 0x1;
7550
7551 if (ioa_cfg->nvectors == 1)
7552 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7553 else
7554 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7555
7556 ioarcb->cmd_pkt.cdb[2] =
7557 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7558 ioarcb->cmd_pkt.cdb[3] =
7559 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7560 ioarcb->cmd_pkt.cdb[4] =
7561 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7562 ioarcb->cmd_pkt.cdb[5] =
7563 ((u64) hrrq->host_rrq_dma) & 0xff;
7564 ioarcb->cmd_pkt.cdb[7] =
7565 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7566 ioarcb->cmd_pkt.cdb[8] =
7567 (sizeof(u32) * hrrq->size) & 0xff;
7568
7569 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007570 ioarcb->cmd_pkt.cdb[9] =
7571 ioa_cfg->identify_hrrq_index;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007572
7573 if (ioa_cfg->sis64) {
7574 ioarcb->cmd_pkt.cdb[10] =
7575 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7576 ioarcb->cmd_pkt.cdb[11] =
7577 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7578 ioarcb->cmd_pkt.cdb[12] =
7579 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7580 ioarcb->cmd_pkt.cdb[13] =
7581 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7582 }
7583
7584 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007585 ioarcb->cmd_pkt.cdb[14] =
7586 ioa_cfg->identify_hrrq_index;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007587
7588 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7589 IPR_INTERNAL_TIMEOUT);
7590
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007591 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7592 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007593
7594 LEAVE;
7595 return IPR_RC_JOB_RETURN;
Wayne Boyer214777b2010-02-19 13:24:26 -08007596 }
7597
Linus Torvalds1da177e2005-04-16 15:20:36 -07007598 LEAVE;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007599 return IPR_RC_JOB_CONTINUE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007600}
7601
7602/**
7603 * ipr_reset_timer_done - Adapter reset timer function
7604 * @ipr_cmd: ipr command struct
7605 *
7606 * Description: This function is used in adapter reset processing
7607 * for timing events. If the reset_cmd pointer in the IOA
7608 * config struct is not this adapter's we are doing nested
7609 * resets and fail_all_ops will take care of freeing the
7610 * command block.
7611 *
7612 * Return value:
7613 * none
7614 **/
7615static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7616{
7617 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7618 unsigned long lock_flags = 0;
7619
7620 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7621
7622 if (ioa_cfg->reset_cmd == ipr_cmd) {
7623 list_del(&ipr_cmd->queue);
7624 ipr_cmd->done(ipr_cmd);
7625 }
7626
7627 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7628}
7629
7630/**
7631 * ipr_reset_start_timer - Start a timer for adapter reset job
7632 * @ipr_cmd: ipr command struct
7633 * @timeout: timeout value
7634 *
7635 * Description: This function is used in adapter reset processing
7636 * for timing events. If the reset_cmd pointer in the IOA
7637 * config struct is not this adapter's we are doing nested
7638 * resets and fail_all_ops will take care of freeing the
7639 * command block.
7640 *
7641 * Return value:
7642 * none
7643 **/
7644static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7645 unsigned long timeout)
7646{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007647
7648 ENTER;
7649 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007650 ipr_cmd->done = ipr_reset_ioa_job;
7651
7652 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7653 ipr_cmd->timer.expires = jiffies + timeout;
7654 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7655 add_timer(&ipr_cmd->timer);
7656}
7657
7658/**
7659 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7660 * @ioa_cfg: ioa cfg struct
7661 *
7662 * Return value:
7663 * nothing
7664 **/
7665static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7666{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007667 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007668
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007669 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007670 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007671 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7672
7673 /* Initialize Host RRQ pointers */
7674 hrrq->hrrq_start = hrrq->host_rrq;
7675 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7676 hrrq->hrrq_curr = hrrq->hrrq_start;
7677 hrrq->toggle_bit = 1;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007678 spin_unlock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007679 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007680 wmb();
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007681
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007682 ioa_cfg->identify_hrrq_index = 0;
7683 if (ioa_cfg->hrrq_num == 1)
7684 atomic_set(&ioa_cfg->hrrq_index, 0);
7685 else
7686 atomic_set(&ioa_cfg->hrrq_index, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007687
7688 /* Zero out config table */
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007689 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007690}
7691
7692/**
Wayne Boyer214777b2010-02-19 13:24:26 -08007693 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7694 * @ipr_cmd: ipr command struct
7695 *
7696 * Return value:
7697 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7698 **/
7699static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7700{
7701 unsigned long stage, stage_time;
7702 u32 feedback;
7703 volatile u32 int_reg;
7704 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7705 u64 maskval = 0;
7706
7707 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7708 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7709 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7710
7711 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7712
7713 /* sanity check the stage_time value */
Wayne Boyer438b0332010-05-10 09:13:00 -07007714 if (stage_time == 0)
7715 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7716 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
Wayne Boyer214777b2010-02-19 13:24:26 -08007717 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7718 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7719 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7720
7721 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7722 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7723 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7724 stage_time = ioa_cfg->transop_timeout;
7725 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7726 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
Wayne Boyer1df79ca2010-07-14 10:49:43 -07007727 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7728 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7729 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7730 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7731 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7732 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7733 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7734 return IPR_RC_JOB_CONTINUE;
7735 }
Wayne Boyer214777b2010-02-19 13:24:26 -08007736 }
7737
7738 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7739 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7740 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7741 ipr_cmd->done = ipr_reset_ioa_job;
7742 add_timer(&ipr_cmd->timer);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007743
7744 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Wayne Boyer214777b2010-02-19 13:24:26 -08007745
7746 return IPR_RC_JOB_RETURN;
7747}
7748
7749/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007750 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7751 * @ipr_cmd: ipr command struct
7752 *
7753 * This function reinitializes some control blocks and
7754 * enables destructive diagnostics on the adapter.
7755 *
7756 * Return value:
7757 * IPR_RC_JOB_RETURN
7758 **/
7759static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7760{
7761 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7762 volatile u32 int_reg;
Wayne Boyer7be96902010-05-10 09:14:07 -07007763 volatile u64 maskval;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007764 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007765
7766 ENTER;
Wayne Boyer214777b2010-02-19 13:24:26 -08007767 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007768 ipr_init_ioa_mem(ioa_cfg);
7769
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007770 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7771 spin_lock(&ioa_cfg->hrrq[i]._lock);
7772 ioa_cfg->hrrq[i].allow_interrupts = 1;
7773 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7774 }
7775 wmb();
Wayne Boyer8701f182010-06-04 10:26:50 -07007776 if (ioa_cfg->sis64) {
7777 /* Set the adapter to the correct endian mode. */
7778 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7779 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7780 }
7781
Wayne Boyer7be96902010-05-10 09:14:07 -07007782 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007783
7784 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7785 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
Wayne Boyer214777b2010-02-19 13:24:26 -08007786 ioa_cfg->regs.clr_interrupt_mask_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007787 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7788 return IPR_RC_JOB_CONTINUE;
7789 }
7790
7791 /* Enable destructive diagnostics on IOA */
Wayne Boyer214777b2010-02-19 13:24:26 -08007792 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007793
Wayne Boyer7be96902010-05-10 09:14:07 -07007794 if (ioa_cfg->sis64) {
7795 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7796 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7797 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7798 } else
7799 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
Wayne Boyer214777b2010-02-19 13:24:26 -08007800
Linus Torvalds1da177e2005-04-16 15:20:36 -07007801 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7802
7803 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7804
Wayne Boyer214777b2010-02-19 13:24:26 -08007805 if (ioa_cfg->sis64) {
7806 ipr_cmd->job_step = ipr_reset_next_stage;
7807 return IPR_RC_JOB_CONTINUE;
7808 }
7809
Linus Torvalds1da177e2005-04-16 15:20:36 -07007810 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
Brian King5469cb52007-03-29 12:42:40 -05007811 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007812 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7813 ipr_cmd->done = ipr_reset_ioa_job;
7814 add_timer(&ipr_cmd->timer);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007815 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007816
7817 LEAVE;
7818 return IPR_RC_JOB_RETURN;
7819}
7820
7821/**
7822 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7823 * @ipr_cmd: ipr command struct
7824 *
7825 * This function is invoked when an adapter dump has run out
7826 * of processing time.
7827 *
7828 * Return value:
7829 * IPR_RC_JOB_CONTINUE
7830 **/
7831static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7832{
7833 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7834
7835 if (ioa_cfg->sdt_state == GET_DUMP)
Brian King41e9a692011-09-21 08:51:11 -05007836 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7837 else if (ioa_cfg->sdt_state == READ_DUMP)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007838 ioa_cfg->sdt_state = ABORT_DUMP;
7839
Brian King4c647e92011-10-15 09:08:56 -05007840 ioa_cfg->dump_timeout = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007841 ipr_cmd->job_step = ipr_reset_alert;
7842
7843 return IPR_RC_JOB_CONTINUE;
7844}
7845
7846/**
7847 * ipr_unit_check_no_data - Log a unit check/no data error log
7848 * @ioa_cfg: ioa config struct
7849 *
7850 * Logs an error indicating the adapter unit checked, but for some
7851 * reason, we were unable to fetch the unit check buffer.
7852 *
7853 * Return value:
7854 * nothing
7855 **/
7856static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7857{
7858 ioa_cfg->errors_logged++;
7859 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7860}
7861
7862/**
7863 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7864 * @ioa_cfg: ioa config struct
7865 *
7866 * Fetches the unit check buffer from the adapter by clocking the data
7867 * through the mailbox register.
7868 *
7869 * Return value:
7870 * nothing
7871 **/
7872static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7873{
7874 unsigned long mailbox;
7875 struct ipr_hostrcb *hostrcb;
7876 struct ipr_uc_sdt sdt;
7877 int rc, length;
Brian King65f56472007-04-26 16:00:12 -05007878 u32 ioasc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007879
7880 mailbox = readl(ioa_cfg->ioa_mailbox);
7881
Wayne Boyerdcbad002010-02-19 13:24:14 -08007882 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007883 ipr_unit_check_no_data(ioa_cfg);
7884 return;
7885 }
7886
7887 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
7888 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7889 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
7890
Wayne Boyerdcbad002010-02-19 13:24:14 -08007891 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
7892 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7893 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007894 ipr_unit_check_no_data(ioa_cfg);
7895 return;
7896 }
7897
7898 /* Find length of the first sdt entry (UC buffer) */
Wayne Boyerdcbad002010-02-19 13:24:14 -08007899 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
7900 length = be32_to_cpu(sdt.entry[0].end_token);
7901 else
7902 length = (be32_to_cpu(sdt.entry[0].end_token) -
7903 be32_to_cpu(sdt.entry[0].start_token)) &
7904 IPR_FMT2_MBX_ADDR_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007905
7906 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7907 struct ipr_hostrcb, queue);
7908 list_del(&hostrcb->queue);
7909 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
7910
7911 rc = ipr_get_ldump_data_section(ioa_cfg,
Wayne Boyerdcbad002010-02-19 13:24:14 -08007912 be32_to_cpu(sdt.entry[0].start_token),
Linus Torvalds1da177e2005-04-16 15:20:36 -07007913 (__be32 *)&hostrcb->hcam,
7914 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
7915
Brian King65f56472007-04-26 16:00:12 -05007916 if (!rc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007917 ipr_handle_log_data(ioa_cfg, hostrcb);
Wayne Boyer4565e372010-02-19 13:24:07 -08007918 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Brian King65f56472007-04-26 16:00:12 -05007919 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
7920 ioa_cfg->sdt_state == GET_DUMP)
7921 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7922 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07007923 ipr_unit_check_no_data(ioa_cfg);
7924
7925 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
7926}
7927
7928/**
Wayne Boyer110def82010-11-04 09:36:16 -07007929 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
7930 * @ipr_cmd: ipr command struct
7931 *
7932 * Description: This function will call to get the unit check buffer.
7933 *
7934 * Return value:
7935 * IPR_RC_JOB_RETURN
7936 **/
7937static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
7938{
7939 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7940
7941 ENTER;
7942 ioa_cfg->ioa_unit_checked = 0;
7943 ipr_get_unit_check_buffer(ioa_cfg);
7944 ipr_cmd->job_step = ipr_reset_alert;
7945 ipr_reset_start_timer(ipr_cmd, 0);
7946
7947 LEAVE;
7948 return IPR_RC_JOB_RETURN;
7949}
7950
7951/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007952 * ipr_reset_restore_cfg_space - Restore PCI config space.
7953 * @ipr_cmd: ipr command struct
7954 *
7955 * Description: This function restores the saved PCI config space of
7956 * the adapter, fails all outstanding ops back to the callers, and
7957 * fetches the dump/unit check if applicable to this reset.
7958 *
7959 * Return value:
7960 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7961 **/
7962static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7963{
7964 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyer630ad8312011-04-07 12:12:30 -07007965 u32 int_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007966
7967 ENTER;
Kleber Sacilotto de Souza99c965d2009-11-25 20:13:43 -02007968 ioa_cfg->pdev->state_saved = true;
Jon Mason1d3c16a2010-11-30 17:43:26 -06007969 pci_restore_state(ioa_cfg->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007970
7971 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
Wayne Boyer96d21f02010-05-10 09:13:27 -07007972 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007973 return IPR_RC_JOB_CONTINUE;
7974 }
7975
7976 ipr_fail_all_ops(ioa_cfg);
7977
Wayne Boyer8701f182010-06-04 10:26:50 -07007978 if (ioa_cfg->sis64) {
7979 /* Set the adapter to the correct endian mode. */
7980 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7981 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7982 }
7983
Linus Torvalds1da177e2005-04-16 15:20:36 -07007984 if (ioa_cfg->ioa_unit_checked) {
Wayne Boyer110def82010-11-04 09:36:16 -07007985 if (ioa_cfg->sis64) {
7986 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
7987 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
7988 return IPR_RC_JOB_RETURN;
7989 } else {
7990 ioa_cfg->ioa_unit_checked = 0;
7991 ipr_get_unit_check_buffer(ioa_cfg);
7992 ipr_cmd->job_step = ipr_reset_alert;
7993 ipr_reset_start_timer(ipr_cmd, 0);
7994 return IPR_RC_JOB_RETURN;
7995 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007996 }
7997
7998 if (ioa_cfg->in_ioa_bringdown) {
7999 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8000 } else {
8001 ipr_cmd->job_step = ipr_reset_enable_ioa;
8002
8003 if (GET_DUMP == ioa_cfg->sdt_state) {
Brian King41e9a692011-09-21 08:51:11 -05008004 ioa_cfg->sdt_state = READ_DUMP;
Brian King4c647e92011-10-15 09:08:56 -05008005 ioa_cfg->dump_timeout = 0;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03008006 if (ioa_cfg->sis64)
8007 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8008 else
8009 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008010 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8011 schedule_work(&ioa_cfg->work_q);
8012 return IPR_RC_JOB_RETURN;
8013 }
8014 }
8015
Wayne Boyer438b0332010-05-10 09:13:00 -07008016 LEAVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008017 return IPR_RC_JOB_CONTINUE;
8018}
8019
8020/**
Brian Kinge619e1a2007-01-23 11:25:37 -06008021 * ipr_reset_bist_done - BIST has completed on the adapter.
8022 * @ipr_cmd: ipr command struct
8023 *
8024 * Description: Unblock config space and resume the reset process.
8025 *
8026 * Return value:
8027 * IPR_RC_JOB_CONTINUE
8028 **/
8029static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8030{
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008031 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8032
Brian Kinge619e1a2007-01-23 11:25:37 -06008033 ENTER;
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008034 if (ioa_cfg->cfg_locked)
8035 pci_cfg_access_unlock(ioa_cfg->pdev);
8036 ioa_cfg->cfg_locked = 0;
Brian Kinge619e1a2007-01-23 11:25:37 -06008037 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8038 LEAVE;
8039 return IPR_RC_JOB_CONTINUE;
8040}
8041
8042/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008043 * ipr_reset_start_bist - Run BIST on the adapter.
8044 * @ipr_cmd: ipr command struct
8045 *
8046 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8047 *
8048 * Return value:
8049 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8050 **/
8051static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8052{
8053 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008054 int rc = PCIBIOS_SUCCESSFUL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008055
8056 ENTER;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008057 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8058 writel(IPR_UPROCI_SIS64_START_BIST,
8059 ioa_cfg->regs.set_uproc_interrupt_reg32);
8060 else
8061 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8062
8063 if (rc == PCIBIOS_SUCCESSFUL) {
Brian Kinge619e1a2007-01-23 11:25:37 -06008064 ipr_cmd->job_step = ipr_reset_bist_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008065 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8066 rc = IPR_RC_JOB_RETURN;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008067 } else {
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008068 if (ioa_cfg->cfg_locked)
8069 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8070 ioa_cfg->cfg_locked = 0;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008071 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8072 rc = IPR_RC_JOB_CONTINUE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008073 }
8074
8075 LEAVE;
8076 return rc;
8077}
8078
8079/**
Brian King463fc692007-05-07 17:09:05 -05008080 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8081 * @ipr_cmd: ipr command struct
8082 *
8083 * Description: This clears PCI reset to the adapter and delays two seconds.
8084 *
8085 * Return value:
8086 * IPR_RC_JOB_RETURN
8087 **/
8088static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8089{
8090 ENTER;
8091 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
8092 ipr_cmd->job_step = ipr_reset_bist_done;
8093 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8094 LEAVE;
8095 return IPR_RC_JOB_RETURN;
8096}
8097
8098/**
8099 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8100 * @ipr_cmd: ipr command struct
8101 *
8102 * Description: This asserts PCI reset to the adapter.
8103 *
8104 * Return value:
8105 * IPR_RC_JOB_RETURN
8106 **/
8107static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8108{
8109 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8110 struct pci_dev *pdev = ioa_cfg->pdev;
8111
8112 ENTER;
Brian King463fc692007-05-07 17:09:05 -05008113 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8114 ipr_cmd->job_step = ipr_reset_slot_reset_done;
8115 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
8116 LEAVE;
8117 return IPR_RC_JOB_RETURN;
8118}
8119
8120/**
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008121 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8122 * @ipr_cmd: ipr command struct
8123 *
8124 * Description: This attempts to block config access to the IOA.
8125 *
8126 * Return value:
8127 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8128 **/
8129static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8130{
8131 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8132 int rc = IPR_RC_JOB_CONTINUE;
8133
8134 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8135 ioa_cfg->cfg_locked = 1;
8136 ipr_cmd->job_step = ioa_cfg->reset;
8137 } else {
8138 if (ipr_cmd->u.time_left) {
8139 rc = IPR_RC_JOB_RETURN;
8140 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8141 ipr_reset_start_timer(ipr_cmd,
8142 IPR_CHECK_FOR_RESET_TIMEOUT);
8143 } else {
8144 ipr_cmd->job_step = ioa_cfg->reset;
8145 dev_err(&ioa_cfg->pdev->dev,
8146 "Timed out waiting to lock config access. Resetting anyway.\n");
8147 }
8148 }
8149
8150 return rc;
8151}
8152
8153/**
8154 * ipr_reset_block_config_access - Block config access to the IOA
8155 * @ipr_cmd: ipr command struct
8156 *
8157 * Description: This attempts to block config access to the IOA
8158 *
8159 * Return value:
8160 * IPR_RC_JOB_CONTINUE
8161 **/
8162static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8163{
8164 ipr_cmd->ioa_cfg->cfg_locked = 0;
8165 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8166 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8167 return IPR_RC_JOB_CONTINUE;
8168}
8169
8170/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008171 * ipr_reset_allowed - Query whether or not IOA can be reset
8172 * @ioa_cfg: ioa config struct
8173 *
8174 * Return value:
8175 * 0 if reset not allowed / non-zero if reset is allowed
8176 **/
8177static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8178{
8179 volatile u32 temp_reg;
8180
8181 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8182 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8183}
8184
8185/**
8186 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8187 * @ipr_cmd: ipr command struct
8188 *
8189 * Description: This function waits for adapter permission to run BIST,
8190 * then runs BIST. If the adapter does not give permission after a
8191 * reasonable time, we will reset the adapter anyway. The impact of
8192 * resetting the adapter without warning the adapter is the risk of
8193 * losing the persistent error log on the adapter. If the adapter is
8194 * reset while it is writing to the flash on the adapter, the flash
8195 * segment will have bad ECC and be zeroed.
8196 *
8197 * Return value:
8198 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8199 **/
8200static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8201{
8202 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8203 int rc = IPR_RC_JOB_RETURN;
8204
8205 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8206 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8207 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8208 } else {
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008209 ipr_cmd->job_step = ipr_reset_block_config_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008210 rc = IPR_RC_JOB_CONTINUE;
8211 }
8212
8213 return rc;
8214}
8215
8216/**
Wayne Boyer8701f182010-06-04 10:26:50 -07008217 * ipr_reset_alert - Alert the adapter of a pending reset
Linus Torvalds1da177e2005-04-16 15:20:36 -07008218 * @ipr_cmd: ipr command struct
8219 *
8220 * Description: This function alerts the adapter that it will be reset.
8221 * If memory space is not currently enabled, proceed directly
8222 * to running BIST on the adapter. The timer must always be started
8223 * so we guarantee we do not run BIST from ipr_isr.
8224 *
8225 * Return value:
8226 * IPR_RC_JOB_RETURN
8227 **/
8228static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8229{
8230 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8231 u16 cmd_reg;
8232 int rc;
8233
8234 ENTER;
8235 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8236
8237 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8238 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
Wayne Boyer214777b2010-02-19 13:24:26 -08008239 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008240 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8241 } else {
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008242 ipr_cmd->job_step = ipr_reset_block_config_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008243 }
8244
8245 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8246 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8247
8248 LEAVE;
8249 return IPR_RC_JOB_RETURN;
8250}
8251
8252/**
8253 * ipr_reset_ucode_download_done - Microcode download completion
8254 * @ipr_cmd: ipr command struct
8255 *
8256 * Description: This function unmaps the microcode download buffer.
8257 *
8258 * Return value:
8259 * IPR_RC_JOB_CONTINUE
8260 **/
8261static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8262{
8263 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8264 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8265
8266 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
8267 sglist->num_sg, DMA_TO_DEVICE);
8268
8269 ipr_cmd->job_step = ipr_reset_alert;
8270 return IPR_RC_JOB_CONTINUE;
8271}
8272
8273/**
8274 * ipr_reset_ucode_download - Download microcode to the adapter
8275 * @ipr_cmd: ipr command struct
8276 *
8277 * Description: This function checks to see if it there is microcode
8278 * to download to the adapter. If there is, a download is performed.
8279 *
8280 * Return value:
8281 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8282 **/
8283static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8284{
8285 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8286 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8287
8288 ENTER;
8289 ipr_cmd->job_step = ipr_reset_alert;
8290
8291 if (!sglist)
8292 return IPR_RC_JOB_CONTINUE;
8293
8294 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8295 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8296 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8297 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8298 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8299 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8300 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8301
Wayne Boyera32c0552010-02-19 13:23:36 -08008302 if (ioa_cfg->sis64)
8303 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8304 else
8305 ipr_build_ucode_ioadl(ipr_cmd, sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008306 ipr_cmd->job_step = ipr_reset_ucode_download_done;
8307
8308 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8309 IPR_WRITE_BUFFER_TIMEOUT);
8310
8311 LEAVE;
8312 return IPR_RC_JOB_RETURN;
8313}
8314
8315/**
8316 * ipr_reset_shutdown_ioa - Shutdown the adapter
8317 * @ipr_cmd: ipr command struct
8318 *
8319 * Description: This function issues an adapter shutdown of the
8320 * specified type to the specified adapter as part of the
8321 * adapter reset job.
8322 *
8323 * Return value:
8324 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8325 **/
8326static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8327{
8328 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8329 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8330 unsigned long timeout;
8331 int rc = IPR_RC_JOB_CONTINUE;
8332
8333 ENTER;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008334 if (shutdown_type != IPR_SHUTDOWN_NONE &&
8335 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008336 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8337 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8338 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8339 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8340
Brian Kingac09c342007-04-26 16:00:16 -05008341 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8342 timeout = IPR_SHUTDOWN_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008343 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8344 timeout = IPR_INTERNAL_TIMEOUT;
Brian Kingac09c342007-04-26 16:00:16 -05008345 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8346 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008347 else
Brian Kingac09c342007-04-26 16:00:16 -05008348 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008349
8350 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8351
8352 rc = IPR_RC_JOB_RETURN;
8353 ipr_cmd->job_step = ipr_reset_ucode_download;
8354 } else
8355 ipr_cmd->job_step = ipr_reset_alert;
8356
8357 LEAVE;
8358 return rc;
8359}
8360
8361/**
8362 * ipr_reset_ioa_job - Adapter reset job
8363 * @ipr_cmd: ipr command struct
8364 *
8365 * Description: This function is the job router for the adapter reset job.
8366 *
8367 * Return value:
8368 * none
8369 **/
8370static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8371{
8372 u32 rc, ioasc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008373 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8374
8375 do {
Wayne Boyer96d21f02010-05-10 09:13:27 -07008376 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008377
8378 if (ioa_cfg->reset_cmd != ipr_cmd) {
8379 /*
8380 * We are doing nested adapter resets and this is
8381 * not the current reset job.
8382 */
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008383 list_add_tail(&ipr_cmd->queue,
8384 &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008385 return;
8386 }
8387
8388 if (IPR_IOASC_SENSE_KEY(ioasc)) {
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06008389 rc = ipr_cmd->job_step_failed(ipr_cmd);
8390 if (rc == IPR_RC_JOB_RETURN)
8391 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008392 }
8393
8394 ipr_reinit_ipr_cmnd(ipr_cmd);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06008395 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008396 rc = ipr_cmd->job_step(ipr_cmd);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03008397 } while (rc == IPR_RC_JOB_CONTINUE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008398}
8399
8400/**
8401 * _ipr_initiate_ioa_reset - Initiate an adapter reset
8402 * @ioa_cfg: ioa config struct
8403 * @job_step: first job step of reset job
8404 * @shutdown_type: shutdown type
8405 *
8406 * Description: This function will initiate the reset of the given adapter
8407 * starting at the selected job step.
8408 * If the caller needs to wait on the completion of the reset,
8409 * the caller must sleep on the reset_wait_q.
8410 *
8411 * Return value:
8412 * none
8413 **/
8414static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8415 int (*job_step) (struct ipr_cmnd *),
8416 enum ipr_shutdown_type shutdown_type)
8417{
8418 struct ipr_cmnd *ipr_cmd;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008419 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008420
8421 ioa_cfg->in_reset_reload = 1;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008422 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8423 spin_lock(&ioa_cfg->hrrq[i]._lock);
8424 ioa_cfg->hrrq[i].allow_cmds = 0;
8425 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8426 }
8427 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008428 scsi_block_requests(ioa_cfg->host);
8429
8430 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8431 ioa_cfg->reset_cmd = ipr_cmd;
8432 ipr_cmd->job_step = job_step;
8433 ipr_cmd->u.shutdown_type = shutdown_type;
8434
8435 ipr_reset_ioa_job(ipr_cmd);
8436}
8437
8438/**
8439 * ipr_initiate_ioa_reset - Initiate an adapter reset
8440 * @ioa_cfg: ioa config struct
8441 * @shutdown_type: shutdown type
8442 *
8443 * Description: This function will initiate the reset of the given adapter.
8444 * If the caller needs to wait on the completion of the reset,
8445 * the caller must sleep on the reset_wait_q.
8446 *
8447 * Return value:
8448 * none
8449 **/
8450static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8451 enum ipr_shutdown_type shutdown_type)
8452{
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008453 int i;
8454
8455 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008456 return;
8457
Brian King41e9a692011-09-21 08:51:11 -05008458 if (ioa_cfg->in_reset_reload) {
8459 if (ioa_cfg->sdt_state == GET_DUMP)
8460 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8461 else if (ioa_cfg->sdt_state == READ_DUMP)
8462 ioa_cfg->sdt_state = ABORT_DUMP;
8463 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008464
8465 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8466 dev_err(&ioa_cfg->pdev->dev,
8467 "IOA taken offline - error recovery failed\n");
8468
8469 ioa_cfg->reset_retries = 0;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008470 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8471 spin_lock(&ioa_cfg->hrrq[i]._lock);
8472 ioa_cfg->hrrq[i].ioa_is_dead = 1;
8473 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8474 }
8475 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008476
8477 if (ioa_cfg->in_ioa_bringdown) {
8478 ioa_cfg->reset_cmd = NULL;
8479 ioa_cfg->in_reset_reload = 0;
8480 ipr_fail_all_ops(ioa_cfg);
8481 wake_up_all(&ioa_cfg->reset_wait_q);
8482
8483 spin_unlock_irq(ioa_cfg->host->host_lock);
8484 scsi_unblock_requests(ioa_cfg->host);
8485 spin_lock_irq(ioa_cfg->host->host_lock);
8486 return;
8487 } else {
8488 ioa_cfg->in_ioa_bringdown = 1;
8489 shutdown_type = IPR_SHUTDOWN_NONE;
8490 }
8491 }
8492
8493 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8494 shutdown_type);
8495}
8496
8497/**
Linas Vepstasf8a88b192006-02-03 16:52:42 -06008498 * ipr_reset_freeze - Hold off all I/O activity
8499 * @ipr_cmd: ipr command struct
8500 *
8501 * Description: If the PCI slot is frozen, hold off all I/O
8502 * activity; then, as soon as the slot is available again,
8503 * initiate an adapter reset.
8504 */
8505static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8506{
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008507 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8508 int i;
8509
Linas Vepstasf8a88b192006-02-03 16:52:42 -06008510 /* Disallow new interrupts, avoid loop */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008511 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8512 spin_lock(&ioa_cfg->hrrq[i]._lock);
8513 ioa_cfg->hrrq[i].allow_interrupts = 0;
8514 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8515 }
8516 wmb();
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008517 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06008518 ipr_cmd->done = ipr_reset_ioa_job;
8519 return IPR_RC_JOB_RETURN;
8520}
8521
8522/**
8523 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8524 * @pdev: PCI device struct
8525 *
8526 * Description: This routine is called to tell us that the PCI bus
8527 * is down. Can't do anything here, except put the device driver
8528 * into a holding pattern, waiting for the PCI bus to come back.
8529 */
8530static void ipr_pci_frozen(struct pci_dev *pdev)
8531{
8532 unsigned long flags = 0;
8533 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8534
8535 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8536 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8537 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8538}
8539
8540/**
8541 * ipr_pci_slot_reset - Called when PCI slot has been reset.
8542 * @pdev: PCI device struct
8543 *
8544 * Description: This routine is called by the pci error recovery
8545 * code after the PCI slot has been reset, just before we
8546 * should resume normal operations.
8547 */
8548static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8549{
8550 unsigned long flags = 0;
8551 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8552
8553 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King463fc692007-05-07 17:09:05 -05008554 if (ioa_cfg->needs_warm_reset)
8555 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8556 else
8557 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8558 IPR_SHUTDOWN_NONE);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06008559 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8560 return PCI_ERS_RESULT_RECOVERED;
8561}
8562
8563/**
8564 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8565 * @pdev: PCI device struct
8566 *
8567 * Description: This routine is called when the PCI bus has
8568 * permanently failed.
8569 */
8570static void ipr_pci_perm_failure(struct pci_dev *pdev)
8571{
8572 unsigned long flags = 0;
8573 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008574 int i;
Linas Vepstasf8a88b192006-02-03 16:52:42 -06008575
8576 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8577 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8578 ioa_cfg->sdt_state = ABORT_DUMP;
8579 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
8580 ioa_cfg->in_ioa_bringdown = 1;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008581 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8582 spin_lock(&ioa_cfg->hrrq[i]._lock);
8583 ioa_cfg->hrrq[i].allow_cmds = 0;
8584 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8585 }
8586 wmb();
Linas Vepstasf8a88b192006-02-03 16:52:42 -06008587 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8588 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8589}
8590
8591/**
8592 * ipr_pci_error_detected - Called when a PCI error is detected.
8593 * @pdev: PCI device struct
8594 * @state: PCI channel state
8595 *
8596 * Description: Called when a PCI error is detected.
8597 *
8598 * Return value:
8599 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8600 */
8601static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8602 pci_channel_state_t state)
8603{
8604 switch (state) {
8605 case pci_channel_io_frozen:
8606 ipr_pci_frozen(pdev);
8607 return PCI_ERS_RESULT_NEED_RESET;
8608 case pci_channel_io_perm_failure:
8609 ipr_pci_perm_failure(pdev);
8610 return PCI_ERS_RESULT_DISCONNECT;
8611 break;
8612 default:
8613 break;
8614 }
8615 return PCI_ERS_RESULT_NEED_RESET;
8616}
8617
8618/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008619 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8620 * @ioa_cfg: ioa cfg struct
8621 *
8622 * Description: This is the second phase of adapter intialization
8623 * This function takes care of initilizing the adapter to the point
8624 * where it can accept new commands.
8625
8626 * Return value:
Joe Perchesb1c11812008-02-03 17:28:22 +02008627 * 0 on success / -EIO on failure
Linus Torvalds1da177e2005-04-16 15:20:36 -07008628 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08008629static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008630{
8631 int rc = 0;
8632 unsigned long host_lock_flags = 0;
8633
8634 ENTER;
8635 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8636 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06008637 if (ioa_cfg->needs_hard_reset) {
8638 ioa_cfg->needs_hard_reset = 0;
8639 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8640 } else
8641 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8642 IPR_SHUTDOWN_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008643 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8644 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8645 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8646
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008647 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008648 rc = -EIO;
8649 } else if (ipr_invalid_adapter(ioa_cfg)) {
8650 if (!ipr_testmode)
8651 rc = -EIO;
8652
8653 dev_err(&ioa_cfg->pdev->dev,
8654 "Adapter not supported in this hardware configuration.\n");
8655 }
8656
8657 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8658
8659 LEAVE;
8660 return rc;
8661}
8662
8663/**
8664 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8665 * @ioa_cfg: ioa config struct
8666 *
8667 * Return value:
8668 * none
8669 **/
8670static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8671{
8672 int i;
8673
8674 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8675 if (ioa_cfg->ipr_cmnd_list[i])
8676 pci_pool_free(ioa_cfg->ipr_cmd_pool,
8677 ioa_cfg->ipr_cmnd_list[i],
8678 ioa_cfg->ipr_cmnd_list_dma[i]);
8679
8680 ioa_cfg->ipr_cmnd_list[i] = NULL;
8681 }
8682
8683 if (ioa_cfg->ipr_cmd_pool)
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03008684 pci_pool_destroy(ioa_cfg->ipr_cmd_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008685
Brian King89aad422012-03-14 21:20:10 -05008686 kfree(ioa_cfg->ipr_cmnd_list);
8687 kfree(ioa_cfg->ipr_cmnd_list_dma);
8688 ioa_cfg->ipr_cmnd_list = NULL;
8689 ioa_cfg->ipr_cmnd_list_dma = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008690 ioa_cfg->ipr_cmd_pool = NULL;
8691}
8692
8693/**
8694 * ipr_free_mem - Frees memory allocated for an adapter
8695 * @ioa_cfg: ioa cfg struct
8696 *
8697 * Return value:
8698 * nothing
8699 **/
8700static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8701{
8702 int i;
8703
8704 kfree(ioa_cfg->res_entries);
8705 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
8706 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8707 ipr_free_cmd_blks(ioa_cfg);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008708
8709 for (i = 0; i < ioa_cfg->hrrq_num; i++)
8710 pci_free_consistent(ioa_cfg->pdev,
8711 sizeof(u32) * ioa_cfg->hrrq[i].size,
8712 ioa_cfg->hrrq[i].host_rrq,
8713 ioa_cfg->hrrq[i].host_rrq_dma);
8714
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008715 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
8716 ioa_cfg->u.cfg_table,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008717 ioa_cfg->cfg_table_dma);
8718
8719 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8720 pci_free_consistent(ioa_cfg->pdev,
8721 sizeof(struct ipr_hostrcb),
8722 ioa_cfg->hostrcb[i],
8723 ioa_cfg->hostrcb_dma[i]);
8724 }
8725
8726 ipr_free_dump(ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008727 kfree(ioa_cfg->trace);
8728}
8729
8730/**
8731 * ipr_free_all_resources - Free all allocated resources for an adapter.
8732 * @ipr_cmd: ipr command struct
8733 *
8734 * This function frees all allocated resources for the
8735 * specified adapter.
8736 *
8737 * Return value:
8738 * none
8739 **/
8740static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8741{
8742 struct pci_dev *pdev = ioa_cfg->pdev;
8743
8744 ENTER;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008745 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
8746 ioa_cfg->intr_flag == IPR_USE_MSIX) {
8747 int i;
8748 for (i = 0; i < ioa_cfg->nvectors; i++)
8749 free_irq(ioa_cfg->vectors_info[i].vec,
8750 &ioa_cfg->hrrq[i]);
8751 } else
8752 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
8753
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008754 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008755 pci_disable_msi(pdev);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008756 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
8757 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008758 pci_disable_msix(pdev);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008759 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
8760 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008761
Linus Torvalds1da177e2005-04-16 15:20:36 -07008762 iounmap(ioa_cfg->hdw_dma_regs);
8763 pci_release_regions(pdev);
8764 ipr_free_mem(ioa_cfg);
8765 scsi_host_put(ioa_cfg->host);
8766 pci_disable_device(pdev);
8767 LEAVE;
8768}
8769
8770/**
8771 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8772 * @ioa_cfg: ioa config struct
8773 *
8774 * Return value:
8775 * 0 on success / -ENOMEM on allocation failure
8776 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08008777static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008778{
8779 struct ipr_cmnd *ipr_cmd;
8780 struct ipr_ioarcb *ioarcb;
8781 dma_addr_t dma_addr;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008782 int i, entries_each_hrrq, hrrq_id = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008783
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03008784 ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev,
8785 sizeof(struct ipr_cmnd), 512, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008786
8787 if (!ioa_cfg->ipr_cmd_pool)
8788 return -ENOMEM;
8789
Brian King89aad422012-03-14 21:20:10 -05008790 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
8791 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
8792
8793 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
8794 ipr_free_cmd_blks(ioa_cfg);
8795 return -ENOMEM;
8796 }
8797
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008798 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8799 if (ioa_cfg->hrrq_num > 1) {
8800 if (i == 0) {
8801 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
8802 ioa_cfg->hrrq[i].min_cmd_id = 0;
8803 ioa_cfg->hrrq[i].max_cmd_id =
8804 (entries_each_hrrq - 1);
8805 } else {
8806 entries_each_hrrq =
8807 IPR_NUM_BASE_CMD_BLKS/
8808 (ioa_cfg->hrrq_num - 1);
8809 ioa_cfg->hrrq[i].min_cmd_id =
8810 IPR_NUM_INTERNAL_CMD_BLKS +
8811 (i - 1) * entries_each_hrrq;
8812 ioa_cfg->hrrq[i].max_cmd_id =
8813 (IPR_NUM_INTERNAL_CMD_BLKS +
8814 i * entries_each_hrrq - 1);
8815 }
8816 } else {
8817 entries_each_hrrq = IPR_NUM_CMD_BLKS;
8818 ioa_cfg->hrrq[i].min_cmd_id = 0;
8819 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
8820 }
8821 ioa_cfg->hrrq[i].size = entries_each_hrrq;
8822 }
8823
8824 BUG_ON(ioa_cfg->hrrq_num == 0);
8825
8826 i = IPR_NUM_CMD_BLKS -
8827 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
8828 if (i > 0) {
8829 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
8830 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
8831 }
8832
Linus Torvalds1da177e2005-04-16 15:20:36 -07008833 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03008834 ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008835
8836 if (!ipr_cmd) {
8837 ipr_free_cmd_blks(ioa_cfg);
8838 return -ENOMEM;
8839 }
8840
8841 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
8842 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8843 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8844
8845 ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08008846 ipr_cmd->dma_addr = dma_addr;
8847 if (ioa_cfg->sis64)
8848 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
8849 else
8850 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
8851
Linus Torvalds1da177e2005-04-16 15:20:36 -07008852 ioarcb->host_response_handle = cpu_to_be32(i << 2);
Wayne Boyera32c0552010-02-19 13:23:36 -08008853 if (ioa_cfg->sis64) {
8854 ioarcb->u.sis64_addr_data.data_ioadl_addr =
8855 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
8856 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
Wayne Boyer96d21f02010-05-10 09:13:27 -07008857 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
Wayne Boyera32c0552010-02-19 13:23:36 -08008858 } else {
8859 ioarcb->write_ioadl_addr =
8860 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8861 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8862 ioarcb->ioasa_host_pci_addr =
Wayne Boyer96d21f02010-05-10 09:13:27 -07008863 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
Wayne Boyera32c0552010-02-19 13:23:36 -08008864 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008865 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
8866 ipr_cmd->cmd_index = i;
8867 ipr_cmd->ioa_cfg = ioa_cfg;
8868 ipr_cmd->sense_buffer_dma = dma_addr +
8869 offsetof(struct ipr_cmnd, sense_buffer);
8870
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008871 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
8872 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
8873 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8874 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
8875 hrrq_id++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008876 }
8877
8878 return 0;
8879}
8880
8881/**
8882 * ipr_alloc_mem - Allocate memory for an adapter
8883 * @ioa_cfg: ioa config struct
8884 *
8885 * Return value:
8886 * 0 on success / non-zero for error
8887 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08008888static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008889{
8890 struct pci_dev *pdev = ioa_cfg->pdev;
8891 int i, rc = -ENOMEM;
8892
8893 ENTER;
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06008894 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008895 ioa_cfg->max_devs_supported, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008896
8897 if (!ioa_cfg->res_entries)
8898 goto out;
8899
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008900 if (ioa_cfg->sis64) {
8901 ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
8902 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8903 ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
8904 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8905 ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8906 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
Brian Kinga2e49cb2013-01-11 17:43:48 -06008907
8908 if (!ioa_cfg->target_ids || !ioa_cfg->array_ids
8909 || !ioa_cfg->vset_ids)
8910 goto out_free_res_entries;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008911 }
8912
8913 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008914 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008915 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
8916 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008917
8918 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
8919 sizeof(struct ipr_misc_cbs),
8920 &ioa_cfg->vpd_cbs_dma);
8921
8922 if (!ioa_cfg->vpd_cbs)
8923 goto out_free_res_entries;
8924
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008925 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8926 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
8927 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008928 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
8929 if (i == 0)
8930 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
8931 else
8932 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008933 }
8934
Linus Torvalds1da177e2005-04-16 15:20:36 -07008935 if (ipr_alloc_cmd_blks(ioa_cfg))
8936 goto out_free_vpd_cbs;
8937
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008938 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8939 ioa_cfg->hrrq[i].host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
8940 sizeof(u32) * ioa_cfg->hrrq[i].size,
8941 &ioa_cfg->hrrq[i].host_rrq_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008942
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008943 if (!ioa_cfg->hrrq[i].host_rrq) {
8944 while (--i > 0)
8945 pci_free_consistent(pdev,
8946 sizeof(u32) * ioa_cfg->hrrq[i].size,
8947 ioa_cfg->hrrq[i].host_rrq,
8948 ioa_cfg->hrrq[i].host_rrq_dma);
8949 goto out_ipr_free_cmd_blocks;
8950 }
8951 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
8952 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008953
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008954 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
8955 ioa_cfg->cfg_table_size,
8956 &ioa_cfg->cfg_table_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008957
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008958 if (!ioa_cfg->u.cfg_table)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008959 goto out_free_host_rrq;
8960
8961 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8962 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
8963 sizeof(struct ipr_hostrcb),
8964 &ioa_cfg->hostrcb_dma[i]);
8965
8966 if (!ioa_cfg->hostrcb[i])
8967 goto out_free_hostrcb_dma;
8968
8969 ioa_cfg->hostrcb[i]->hostrcb_dma =
8970 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
Brian King49dc6a12006-11-21 10:28:35 -06008971 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008972 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
8973 }
8974
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06008975 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008976 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
8977
8978 if (!ioa_cfg->trace)
8979 goto out_free_hostrcb_dma;
8980
Linus Torvalds1da177e2005-04-16 15:20:36 -07008981 rc = 0;
8982out:
8983 LEAVE;
8984 return rc;
8985
8986out_free_hostrcb_dma:
8987 while (i-- > 0) {
8988 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
8989 ioa_cfg->hostrcb[i],
8990 ioa_cfg->hostrcb_dma[i]);
8991 }
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008992 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
8993 ioa_cfg->u.cfg_table,
8994 ioa_cfg->cfg_table_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008995out_free_host_rrq:
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008996 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8997 pci_free_consistent(pdev,
8998 sizeof(u32) * ioa_cfg->hrrq[i].size,
8999 ioa_cfg->hrrq[i].host_rrq,
9000 ioa_cfg->hrrq[i].host_rrq_dma);
9001 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009002out_ipr_free_cmd_blocks:
9003 ipr_free_cmd_blks(ioa_cfg);
9004out_free_vpd_cbs:
9005 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
9006 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9007out_free_res_entries:
9008 kfree(ioa_cfg->res_entries);
Brian Kinga2e49cb2013-01-11 17:43:48 -06009009 kfree(ioa_cfg->target_ids);
9010 kfree(ioa_cfg->array_ids);
9011 kfree(ioa_cfg->vset_ids);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009012 goto out;
9013}
9014
9015/**
9016 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9017 * @ioa_cfg: ioa config struct
9018 *
9019 * Return value:
9020 * none
9021 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009022static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009023{
9024 int i;
9025
9026 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9027 ioa_cfg->bus_attr[i].bus = i;
9028 ioa_cfg->bus_attr[i].qas_enabled = 0;
9029 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9030 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9031 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9032 else
9033 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9034 }
9035}
9036
9037/**
9038 * ipr_init_ioa_cfg - Initialize IOA config struct
9039 * @ioa_cfg: ioa config struct
9040 * @host: scsi host struct
9041 * @pdev: PCI dev struct
9042 *
9043 * Return value:
9044 * none
9045 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009046static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9047 struct Scsi_Host *host, struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009048{
9049 const struct ipr_interrupt_offsets *p;
9050 struct ipr_interrupts *t;
9051 void __iomem *base;
9052
9053 ioa_cfg->host = host;
9054 ioa_cfg->pdev = pdev;
9055 ioa_cfg->log_level = ipr_log_level;
brking@us.ibm.com3d1d0da2005-11-01 17:01:54 -06009056 ioa_cfg->doorbell = IPR_DOORBELL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009057 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9058 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009059 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9060 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9061 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9062 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9063
Linus Torvalds1da177e2005-04-16 15:20:36 -07009064 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9065 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9066 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9067 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
David Howellsc4028952006-11-22 14:57:56 +00009068 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009069 init_waitqueue_head(&ioa_cfg->reset_wait_q);
Wayne Boyer95fecd92009-06-16 15:13:28 -07009070 init_waitqueue_head(&ioa_cfg->msi_wait_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009071 ioa_cfg->sdt_state = INACTIVE;
9072
9073 ipr_initialize_bus_attr(ioa_cfg);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009074 ioa_cfg->max_devs_supported = ipr_max_devs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009075
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009076 if (ioa_cfg->sis64) {
9077 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9078 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9079 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9080 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9081 } else {
9082 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9083 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9084 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9085 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9086 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009087 host->max_channel = IPR_MAX_BUS_TO_SCAN;
9088 host->unique_id = host->host_no;
9089 host->max_cmd_len = IPR_MAX_CDB_LEN;
Brian King89aad422012-03-14 21:20:10 -05009090 host->can_queue = ioa_cfg->max_cmds;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009091 pci_set_drvdata(pdev, ioa_cfg);
9092
9093 p = &ioa_cfg->chip_cfg->regs;
9094 t = &ioa_cfg->regs;
9095 base = ioa_cfg->hdw_dma_regs;
9096
9097 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9098 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009099 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009100 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009101 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009102 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009103 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009104 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009105 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009106 t->ioarrin_reg = base + p->ioarrin_reg;
9107 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009108 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009109 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009110 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009111 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009112 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
Wayne Boyerdcbad002010-02-19 13:24:14 -08009113
9114 if (ioa_cfg->sis64) {
Wayne Boyer214777b2010-02-19 13:24:26 -08009115 t->init_feedback_reg = base + p->init_feedback_reg;
Wayne Boyerdcbad002010-02-19 13:24:14 -08009116 t->dump_addr_reg = base + p->dump_addr_reg;
9117 t->dump_data_reg = base + p->dump_data_reg;
Wayne Boyer8701f182010-06-04 10:26:50 -07009118 t->endian_swap_reg = base + p->endian_swap_reg;
Wayne Boyerdcbad002010-02-19 13:24:14 -08009119 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009120}
9121
9122/**
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009123 * ipr_get_chip_info - Find adapter chip information
Linus Torvalds1da177e2005-04-16 15:20:36 -07009124 * @dev_id: PCI device id struct
9125 *
9126 * Return value:
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009127 * ptr to chip information on success / NULL on failure
Linus Torvalds1da177e2005-04-16 15:20:36 -07009128 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009129static const struct ipr_chip_t *
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009130ipr_get_chip_info(const struct pci_device_id *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009131{
9132 int i;
9133
Linus Torvalds1da177e2005-04-16 15:20:36 -07009134 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9135 if (ipr_chip[i].vendor == dev_id->vendor &&
9136 ipr_chip[i].device == dev_id->device)
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009137 return &ipr_chip[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07009138 return NULL;
9139}
9140
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009141static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9142{
9143 struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
9144 int i, err, vectors;
9145
9146 for (i = 0; i < ARRAY_SIZE(entries); ++i)
9147 entries[i].entry = i;
9148
9149 vectors = ipr_number_of_msix;
9150
9151 while ((err = pci_enable_msix(ioa_cfg->pdev, entries, vectors)) > 0)
9152 vectors = err;
9153
9154 if (err < 0) {
9155 pci_disable_msix(ioa_cfg->pdev);
9156 return err;
9157 }
9158
9159 if (!err) {
9160 for (i = 0; i < vectors; i++)
9161 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9162 ioa_cfg->nvectors = vectors;
9163 }
9164
9165 return err;
9166}
9167
9168static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9169{
9170 int i, err, vectors;
9171
9172 vectors = ipr_number_of_msix;
9173
9174 while ((err = pci_enable_msi_block(ioa_cfg->pdev, vectors)) > 0)
9175 vectors = err;
9176
9177 if (err < 0) {
9178 pci_disable_msi(ioa_cfg->pdev);
9179 return err;
9180 }
9181
9182 if (!err) {
9183 for (i = 0; i < vectors; i++)
9184 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9185 ioa_cfg->nvectors = vectors;
9186 }
9187
9188 return err;
9189}
9190
9191static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9192{
9193 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9194
9195 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9196 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9197 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9198 ioa_cfg->vectors_info[vec_idx].
9199 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9200 }
9201}
9202
9203static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9204{
9205 int i, rc;
9206
9207 for (i = 1; i < ioa_cfg->nvectors; i++) {
9208 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9209 ipr_isr_mhrrq,
9210 0,
9211 ioa_cfg->vectors_info[i].desc,
9212 &ioa_cfg->hrrq[i]);
9213 if (rc) {
9214 while (--i >= 0)
9215 free_irq(ioa_cfg->vectors_info[i].vec,
9216 &ioa_cfg->hrrq[i]);
9217 return rc;
9218 }
9219 }
9220 return 0;
9221}
9222
Linus Torvalds1da177e2005-04-16 15:20:36 -07009223/**
Wayne Boyer95fecd92009-06-16 15:13:28 -07009224 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9225 * @pdev: PCI device struct
9226 *
9227 * Description: Simply set the msi_received flag to 1 indicating that
9228 * Message Signaled Interrupts are supported.
9229 *
9230 * Return value:
9231 * 0 on success / non-zero on failure
9232 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009233static irqreturn_t ipr_test_intr(int irq, void *devp)
Wayne Boyer95fecd92009-06-16 15:13:28 -07009234{
9235 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9236 unsigned long lock_flags = 0;
9237 irqreturn_t rc = IRQ_HANDLED;
9238
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009239 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
Wayne Boyer95fecd92009-06-16 15:13:28 -07009240 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9241
9242 ioa_cfg->msi_received = 1;
9243 wake_up(&ioa_cfg->msi_wait_q);
9244
9245 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9246 return rc;
9247}
9248
9249/**
9250 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9251 * @pdev: PCI device struct
9252 *
9253 * Description: The return value from pci_enable_msi() can not always be
9254 * trusted. This routine sets up and initiates a test interrupt to determine
9255 * if the interrupt is received via the ipr_test_intr() service routine.
9256 * If the tests fails, the driver will fall back to LSI.
9257 *
9258 * Return value:
9259 * 0 on success / non-zero on failure
9260 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009261static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
Wayne Boyer95fecd92009-06-16 15:13:28 -07009262{
9263 int rc;
9264 volatile u32 int_reg;
9265 unsigned long lock_flags = 0;
9266
9267 ENTER;
9268
9269 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9270 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9271 ioa_cfg->msi_received = 0;
9272 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
Wayne Boyer214777b2010-02-19 13:24:26 -08009273 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
Wayne Boyer95fecd92009-06-16 15:13:28 -07009274 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9275 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9276
9277 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9278 if (rc) {
9279 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9280 return rc;
9281 } else if (ipr_debug)
9282 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
9283
Wayne Boyer214777b2010-02-19 13:24:26 -08009284 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
Wayne Boyer95fecd92009-06-16 15:13:28 -07009285 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9286 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009287 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Wayne Boyer95fecd92009-06-16 15:13:28 -07009288 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9289
Wayne Boyer95fecd92009-06-16 15:13:28 -07009290 if (!ioa_cfg->msi_received) {
9291 /* MSI test failed */
9292 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
9293 rc = -EOPNOTSUPP;
9294 } else if (ipr_debug)
9295 dev_info(&pdev->dev, "MSI test succeeded.\n");
9296
9297 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9298
9299 free_irq(pdev->irq, ioa_cfg);
9300
9301 LEAVE;
9302
9303 return rc;
9304}
9305
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009306 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
Linus Torvalds1da177e2005-04-16 15:20:36 -07009307 * @pdev: PCI device struct
9308 * @dev_id: PCI device id struct
9309 *
9310 * Return value:
9311 * 0 on success / non-zero on failure
9312 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009313static int ipr_probe_ioa(struct pci_dev *pdev,
9314 const struct pci_device_id *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009315{
9316 struct ipr_ioa_cfg *ioa_cfg;
9317 struct Scsi_Host *host;
9318 unsigned long ipr_regs_pci;
9319 void __iomem *ipr_regs;
Eric Sesterhenna2a65a32006-09-25 16:59:07 -07009320 int rc = PCIBIOS_SUCCESSFUL;
Brian King473b1e82007-05-02 10:44:11 -05009321 volatile u32 mask, uproc, interrupts;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009322 unsigned long lock_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009323
9324 ENTER;
9325
9326 if ((rc = pci_enable_device(pdev))) {
9327 dev_err(&pdev->dev, "Cannot enable adapter\n");
9328 goto out;
9329 }
9330
9331 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
9332
9333 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9334
9335 if (!host) {
9336 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9337 rc = -ENOMEM;
9338 goto out_disable;
9339 }
9340
9341 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9342 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
Dan Williams8d8e7d12012-07-09 21:06:08 -07009343 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009344
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009345 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009346
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009347 if (!ioa_cfg->ipr_chip) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009348 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9349 dev_id->vendor, dev_id->device);
9350 goto out_scsi_host_put;
9351 }
9352
Wayne Boyera32c0552010-02-19 13:23:36 -08009353 /* set SIS 32 or SIS 64 */
9354 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009355 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
Brian King7dd21302012-03-14 21:20:08 -05009356 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
Brian King89aad422012-03-14 21:20:10 -05009357 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009358
Brian King5469cb52007-03-29 12:42:40 -05009359 if (ipr_transop_timeout)
9360 ioa_cfg->transop_timeout = ipr_transop_timeout;
9361 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
9362 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9363 else
9364 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9365
Auke Kok44c10132007-06-08 15:46:36 -07009366 ioa_cfg->revid = pdev->revision;
Brian King463fc692007-05-07 17:09:05 -05009367
Linus Torvalds1da177e2005-04-16 15:20:36 -07009368 ipr_regs_pci = pci_resource_start(pdev, 0);
9369
9370 rc = pci_request_regions(pdev, IPR_NAME);
9371 if (rc < 0) {
9372 dev_err(&pdev->dev,
9373 "Couldn't register memory range of registers\n");
9374 goto out_scsi_host_put;
9375 }
9376
Arjan van de Ven25729a72008-09-28 16:18:02 -07009377 ipr_regs = pci_ioremap_bar(pdev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009378
9379 if (!ipr_regs) {
9380 dev_err(&pdev->dev,
9381 "Couldn't map memory range of registers\n");
9382 rc = -ENOMEM;
9383 goto out_release_regions;
9384 }
9385
9386 ioa_cfg->hdw_dma_regs = ipr_regs;
9387 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9388 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9389
9390 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9391
9392 pci_set_master(pdev);
9393
Wayne Boyera32c0552010-02-19 13:23:36 -08009394 if (ioa_cfg->sis64) {
9395 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
9396 if (rc < 0) {
9397 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
9398 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9399 }
9400
9401 } else
9402 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9403
Linus Torvalds1da177e2005-04-16 15:20:36 -07009404 if (rc < 0) {
9405 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
9406 goto cleanup_nomem;
9407 }
9408
9409 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
9410 ioa_cfg->chip_cfg->cache_line_size);
9411
9412 if (rc != PCIBIOS_SUCCESSFUL) {
9413 dev_err(&pdev->dev, "Write of cache line size failed\n");
9414 rc = -EIO;
9415 goto cleanup_nomem;
9416 }
9417
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009418 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9419 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9420 IPR_MAX_MSIX_VECTORS);
9421 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9422 }
9423
9424 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009425 ipr_enable_msix(ioa_cfg) == 0)
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009426 ioa_cfg->intr_flag = IPR_USE_MSIX;
9427 else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009428 ipr_enable_msi(ioa_cfg) == 0)
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009429 ioa_cfg->intr_flag = IPR_USE_MSI;
9430 else {
9431 ioa_cfg->intr_flag = IPR_USE_LSI;
9432 ioa_cfg->nvectors = 1;
9433 dev_info(&pdev->dev, "Cannot enable MSI.\n");
9434 }
9435
9436 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9437 ioa_cfg->intr_flag == IPR_USE_MSIX) {
Wayne Boyer95fecd92009-06-16 15:13:28 -07009438 rc = ipr_test_msi(ioa_cfg, pdev);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009439 if (rc == -EOPNOTSUPP) {
9440 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9441 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9442 pci_disable_msi(pdev);
9443 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9444 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9445 pci_disable_msix(pdev);
9446 }
9447
9448 ioa_cfg->intr_flag = IPR_USE_LSI;
9449 ioa_cfg->nvectors = 1;
9450 }
Wayne Boyer95fecd92009-06-16 15:13:28 -07009451 else if (rc)
9452 goto out_msi_disable;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009453 else {
9454 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9455 dev_info(&pdev->dev,
9456 "Request for %d MSIs succeeded with starting IRQ: %d\n",
9457 ioa_cfg->nvectors, pdev->irq);
9458 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9459 dev_info(&pdev->dev,
9460 "Request for %d MSIXs succeeded.",
9461 ioa_cfg->nvectors);
9462 }
9463 }
9464
9465 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
9466 (unsigned int)num_online_cpus(),
9467 (unsigned int)IPR_MAX_HRRQ_NUM);
Wayne Boyer95fecd92009-06-16 15:13:28 -07009468
Linus Torvalds1da177e2005-04-16 15:20:36 -07009469 /* Save away PCI config space for use following IOA reset */
9470 rc = pci_save_state(pdev);
9471
9472 if (rc != PCIBIOS_SUCCESSFUL) {
9473 dev_err(&pdev->dev, "Failed to save PCI config space\n");
9474 rc = -EIO;
Julia Lawallf170c682011-07-11 14:08:25 -07009475 goto out_msi_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009476 }
9477
9478 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
Julia Lawallf170c682011-07-11 14:08:25 -07009479 goto out_msi_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009480
9481 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
Julia Lawallf170c682011-07-11 14:08:25 -07009482 goto out_msi_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009483
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009484 if (ioa_cfg->sis64)
9485 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9486 + ((sizeof(struct ipr_config_table_entry64)
9487 * ioa_cfg->max_devs_supported)));
9488 else
9489 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9490 + ((sizeof(struct ipr_config_table_entry)
9491 * ioa_cfg->max_devs_supported)));
9492
Linus Torvalds1da177e2005-04-16 15:20:36 -07009493 rc = ipr_alloc_mem(ioa_cfg);
9494 if (rc < 0) {
9495 dev_err(&pdev->dev,
9496 "Couldn't allocate enough memory for device driver!\n");
Julia Lawallf170c682011-07-11 14:08:25 -07009497 goto out_msi_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009498 }
9499
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06009500 /*
9501 * If HRRQ updated interrupt is not masked, or reset alert is set,
9502 * the card is in an unknown state and needs a hard reset
9503 */
Wayne Boyer214777b2010-02-19 13:24:26 -08009504 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
9505 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
9506 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06009507 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
9508 ioa_cfg->needs_hard_reset = 1;
Anton Blanchard5d7c20b2011-08-01 19:43:45 +10009509 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
Brian King473b1e82007-05-02 10:44:11 -05009510 ioa_cfg->needs_hard_reset = 1;
9511 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
9512 ioa_cfg->ioa_unit_checked = 1;
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06009513
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009514 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009515 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009516 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009517
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009518 if (ioa_cfg->intr_flag == IPR_USE_MSI
9519 || ioa_cfg->intr_flag == IPR_USE_MSIX) {
9520 name_msi_vectors(ioa_cfg);
9521 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
9522 0,
9523 ioa_cfg->vectors_info[0].desc,
9524 &ioa_cfg->hrrq[0]);
9525 if (!rc)
9526 rc = ipr_request_other_msi_irqs(ioa_cfg);
9527 } else {
9528 rc = request_irq(pdev->irq, ipr_isr,
9529 IRQF_SHARED,
9530 IPR_NAME, &ioa_cfg->hrrq[0]);
9531 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009532 if (rc) {
9533 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
9534 pdev->irq, rc);
9535 goto cleanup_nolog;
9536 }
9537
Brian King463fc692007-05-07 17:09:05 -05009538 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
9539 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
9540 ioa_cfg->needs_warm_reset = 1;
9541 ioa_cfg->reset = ipr_reset_slot_reset;
9542 } else
9543 ioa_cfg->reset = ipr_reset_start_bist;
9544
Linus Torvalds1da177e2005-04-16 15:20:36 -07009545 spin_lock(&ipr_driver_lock);
9546 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
9547 spin_unlock(&ipr_driver_lock);
9548
9549 LEAVE;
9550out:
9551 return rc;
9552
9553cleanup_nolog:
9554 ipr_free_mem(ioa_cfg);
Wayne Boyer95fecd92009-06-16 15:13:28 -07009555out_msi_disable:
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009556 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9557 pci_disable_msi(pdev);
9558 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9559 pci_disable_msix(pdev);
Julia Lawallf170c682011-07-11 14:08:25 -07009560cleanup_nomem:
9561 iounmap(ipr_regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009562out_release_regions:
9563 pci_release_regions(pdev);
9564out_scsi_host_put:
9565 scsi_host_put(host);
9566out_disable:
9567 pci_disable_device(pdev);
9568 goto out;
9569}
9570
9571/**
9572 * ipr_scan_vsets - Scans for VSET devices
9573 * @ioa_cfg: ioa config struct
9574 *
9575 * Description: Since the VSET resources do not follow SAM in that we can have
9576 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
9577 *
9578 * Return value:
9579 * none
9580 **/
9581static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
9582{
9583 int target, lun;
9584
9585 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03009586 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009587 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
9588}
9589
9590/**
9591 * ipr_initiate_ioa_bringdown - Bring down an adapter
9592 * @ioa_cfg: ioa config struct
9593 * @shutdown_type: shutdown type
9594 *
9595 * Description: This function will initiate bringing down the adapter.
9596 * This consists of issuing an IOA shutdown to the adapter
9597 * to flush the cache, and running BIST.
9598 * If the caller needs to wait on the completion of the reset,
9599 * the caller must sleep on the reset_wait_q.
9600 *
9601 * Return value:
9602 * none
9603 **/
9604static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
9605 enum ipr_shutdown_type shutdown_type)
9606{
9607 ENTER;
9608 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9609 ioa_cfg->sdt_state = ABORT_DUMP;
9610 ioa_cfg->reset_retries = 0;
9611 ioa_cfg->in_ioa_bringdown = 1;
9612 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
9613 LEAVE;
9614}
9615
9616/**
9617 * __ipr_remove - Remove a single adapter
9618 * @pdev: pci device struct
9619 *
9620 * Adapter hot plug remove entry point.
9621 *
9622 * Return value:
9623 * none
9624 **/
9625static void __ipr_remove(struct pci_dev *pdev)
9626{
9627 unsigned long host_lock_flags = 0;
9628 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9629 ENTER;
9630
9631 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03009632 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -05009633 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9634 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9635 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9636 }
9637
Linus Torvalds1da177e2005-04-16 15:20:36 -07009638 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9639
9640 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9641 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
Tejun Heo43829732012-08-20 14:51:24 -07009642 flush_work(&ioa_cfg->work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009643 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9644
9645 spin_lock(&ipr_driver_lock);
9646 list_del(&ioa_cfg->queue);
9647 spin_unlock(&ipr_driver_lock);
9648
9649 if (ioa_cfg->sdt_state == ABORT_DUMP)
9650 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9651 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9652
9653 ipr_free_all_resources(ioa_cfg);
9654
9655 LEAVE;
9656}
9657
9658/**
9659 * ipr_remove - IOA hot plug remove entry point
9660 * @pdev: pci device struct
9661 *
9662 * Adapter hot plug remove entry point.
9663 *
9664 * Return value:
9665 * none
9666 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009667static void ipr_remove(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009668{
9669 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9670
9671 ENTER;
9672
Tony Jonesee959b02008-02-22 00:13:36 +01009673 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009674 &ipr_trace_attr);
Tony Jonesee959b02008-02-22 00:13:36 +01009675 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009676 &ipr_dump_attr);
9677 scsi_remove_host(ioa_cfg->host);
9678
9679 __ipr_remove(pdev);
9680
9681 LEAVE;
9682}
9683
9684/**
9685 * ipr_probe - Adapter hot plug add entry point
9686 *
9687 * Return value:
9688 * 0 on success / non-zero on failure
9689 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009690static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009691{
9692 struct ipr_ioa_cfg *ioa_cfg;
9693 int rc;
9694
9695 rc = ipr_probe_ioa(pdev, dev_id);
9696
9697 if (rc)
9698 return rc;
9699
9700 ioa_cfg = pci_get_drvdata(pdev);
9701 rc = ipr_probe_ioa_part2(ioa_cfg);
9702
9703 if (rc) {
9704 __ipr_remove(pdev);
9705 return rc;
9706 }
9707
9708 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9709
9710 if (rc) {
9711 __ipr_remove(pdev);
9712 return rc;
9713 }
9714
Tony Jonesee959b02008-02-22 00:13:36 +01009715 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009716 &ipr_trace_attr);
9717
9718 if (rc) {
9719 scsi_remove_host(ioa_cfg->host);
9720 __ipr_remove(pdev);
9721 return rc;
9722 }
9723
Tony Jonesee959b02008-02-22 00:13:36 +01009724 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009725 &ipr_dump_attr);
9726
9727 if (rc) {
Tony Jonesee959b02008-02-22 00:13:36 +01009728 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009729 &ipr_trace_attr);
9730 scsi_remove_host(ioa_cfg->host);
9731 __ipr_remove(pdev);
9732 return rc;
9733 }
9734
9735 scsi_scan_host(ioa_cfg->host);
9736 ipr_scan_vsets(ioa_cfg);
9737 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
9738 ioa_cfg->allow_ml_add_del = 1;
brking@us.ibm.com11cd8f12005-11-01 17:00:11 -06009739 ioa_cfg->host->max_channel = IPR_VSET_BUS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009740 schedule_work(&ioa_cfg->work_q);
9741 return 0;
9742}
9743
9744/**
9745 * ipr_shutdown - Shutdown handler.
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07009746 * @pdev: pci device struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07009747 *
9748 * This function is invoked upon system shutdown/reboot. It will issue
9749 * an adapter shutdown to the adapter to flush the write cache.
9750 *
9751 * Return value:
9752 * none
9753 **/
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07009754static void ipr_shutdown(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009755{
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07009756 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009757 unsigned long lock_flags = 0;
9758
9759 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03009760 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -05009761 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9762 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9763 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9764 }
9765
Linus Torvalds1da177e2005-04-16 15:20:36 -07009766 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9767 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9768 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9769}
9770
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009771static struct pci_device_id ipr_pci_table[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009772 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -06009773 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07009774 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -06009775 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07009776 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -06009777 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07009778 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -06009779 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07009780 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -06009781 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07009782 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -06009783 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07009784 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -06009785 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06009786 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King5469cb52007-03-29 12:42:40 -05009787 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
9788 IPR_USE_LONG_TRANSOP_TIMEOUT },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06009789 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King6d84c942007-01-23 11:25:23 -06009790 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06009791 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King22d2e402007-04-26 16:00:13 -05009792 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9793 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -06009794 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King5469cb52007-03-29 12:42:40 -05009795 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9796 IPR_USE_LONG_TRANSOP_TIMEOUT },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06009797 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King6d84c942007-01-23 11:25:23 -06009798 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06009799 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King22d2e402007-04-26 16:00:13 -05009800 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9801 IPR_USE_LONG_TRANSOP_TIMEOUT},
Brian King60e74862006-11-21 10:28:10 -06009802 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King5469cb52007-03-29 12:42:40 -05009803 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9804 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -06009805 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King22d2e402007-04-26 16:00:13 -05009806 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
9807 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King185eb312007-03-29 12:42:53 -05009808 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King185eb312007-03-29 12:42:53 -05009809 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
9810 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Wayne Boyerb0f56d32010-06-24 13:34:14 -07009811 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
9812 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King5469cb52007-03-29 12:42:40 -05009813 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
Brian King463fc692007-05-07 17:09:05 -05009814 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
Linus Torvalds1da177e2005-04-16 15:20:36 -07009815 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
Brian King6d84c942007-01-23 11:25:23 -06009816 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07009817 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King6d84c942007-01-23 11:25:23 -06009818 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06009819 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King5469cb52007-03-29 12:42:40 -05009820 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
9821 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -06009822 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King5469cb52007-03-29 12:42:40 -05009823 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
9824 IPR_USE_LONG_TRANSOP_TIMEOUT },
Wayne Boyerd7b46272010-02-19 13:24:38 -08009825 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9826 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
9827 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9828 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
9829 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9830 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
Wayne Boyer32622bd2010-10-18 20:24:34 -07009831 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
wenxiong@linux.vnet.ibm.comb8d5d562013-01-11 17:43:47 -06009832 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
9833 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
Wayne Boyer5a918352011-10-27 11:58:21 -07009834 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
9835 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
Wayne Boyer32622bd2010-10-18 20:24:34 -07009836 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -08009837 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -08009838 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -08009839 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -08009840 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -08009841 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -08009842 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -08009843 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9844 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
9845 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -08009846 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
wenxiong@linux.vnet.ibm.comb8d5d562013-01-11 17:43:47 -06009847 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9848 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
9849 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9850 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
9851 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9852 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
9853 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9854 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07009855 { }
9856};
9857MODULE_DEVICE_TABLE(pci, ipr_pci_table);
9858
Stephen Hemmingera55b2d22012-09-07 09:33:16 -07009859static const struct pci_error_handlers ipr_err_handler = {
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009860 .error_detected = ipr_pci_error_detected,
9861 .slot_reset = ipr_pci_slot_reset,
9862};
9863
Linus Torvalds1da177e2005-04-16 15:20:36 -07009864static struct pci_driver ipr_driver = {
9865 .name = IPR_NAME,
9866 .id_table = ipr_pci_table,
9867 .probe = ipr_probe,
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009868 .remove = ipr_remove,
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07009869 .shutdown = ipr_shutdown,
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009870 .err_handler = &ipr_err_handler,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009871};
9872
9873/**
Wayne Boyerf72919e2010-02-19 13:24:21 -08009874 * ipr_halt_done - Shutdown prepare completion
9875 *
9876 * Return value:
9877 * none
9878 **/
9879static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
9880{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009881 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Wayne Boyerf72919e2010-02-19 13:24:21 -08009882}
9883
9884/**
9885 * ipr_halt - Issue shutdown prepare to all adapters
9886 *
9887 * Return value:
9888 * NOTIFY_OK on success / NOTIFY_DONE on failure
9889 **/
9890static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
9891{
9892 struct ipr_cmnd *ipr_cmd;
9893 struct ipr_ioa_cfg *ioa_cfg;
9894 unsigned long flags = 0;
9895
9896 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
9897 return NOTIFY_DONE;
9898
9899 spin_lock(&ipr_driver_lock);
9900
9901 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
9902 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009903 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
Wayne Boyerf72919e2010-02-19 13:24:21 -08009904 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9905 continue;
9906 }
9907
9908 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9909 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9910 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9911 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9912 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
9913
9914 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
9915 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9916 }
9917 spin_unlock(&ipr_driver_lock);
9918
9919 return NOTIFY_OK;
9920}
9921
9922static struct notifier_block ipr_notifier = {
9923 ipr_halt, NULL, 0
9924};
9925
9926/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07009927 * ipr_init - Module entry point
9928 *
9929 * Return value:
9930 * 0 on success / negative value on failure
9931 **/
9932static int __init ipr_init(void)
9933{
9934 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
9935 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
9936
Wayne Boyerf72919e2010-02-19 13:24:21 -08009937 register_reboot_notifier(&ipr_notifier);
Henrik Kretzschmardcbccbde2006-09-25 16:58:58 -07009938 return pci_register_driver(&ipr_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009939}
9940
9941/**
9942 * ipr_exit - Module unload
9943 *
9944 * Module unload entry point.
9945 *
9946 * Return value:
9947 * none
9948 **/
9949static void __exit ipr_exit(void)
9950{
Wayne Boyerf72919e2010-02-19 13:24:21 -08009951 unregister_reboot_notifier(&ipr_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009952 pci_unregister_driver(&ipr_driver);
9953}
9954
9955module_init(ipr_init);
9956module_exit(ipr_exit);