blob: f7732f3b98042fd4f476d3159c11d6b397d2350d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090062#include <linux/slab.h>
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -030063#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070064#include <linux/ioport.h>
65#include <linux/delay.h>
66#include <linux/pci.h>
67#include <linux/wait.h>
68#include <linux/spinlock.h>
69#include <linux/sched.h>
70#include <linux/interrupt.h>
71#include <linux/blkdev.h>
72#include <linux/firmware.h>
73#include <linux/module.h>
74#include <linux/moduleparam.h>
Brian King35a39692006-09-25 12:39:20 -050075#include <linux/libata.h>
Brian King0ce3a7e2008-07-11 13:37:50 -050076#include <linux/hdreg.h>
Wayne Boyerf72919e2010-02-19 13:24:21 -080077#include <linux/reboot.h>
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -080078#include <linux/stringify.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <asm/io.h>
80#include <asm/irq.h>
81#include <asm/processor.h>
82#include <scsi/scsi.h>
83#include <scsi/scsi_host.h>
84#include <scsi/scsi_tcq.h>
85#include <scsi/scsi_eh.h>
86#include <scsi/scsi_cmnd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070087#include "ipr.h"
88
89/*
90 * Global Data
91 */
Denis Chengb7d68ca2007-12-13 16:14:27 -080092static LIST_HEAD(ipr_ioa_head);
Linus Torvalds1da177e2005-04-16 15:20:36 -070093static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94static unsigned int ipr_max_speed = 1;
95static int ipr_testmode = 0;
96static unsigned int ipr_fastfail = 0;
Brian King5469cb52007-03-29 12:42:40 -050097static unsigned int ipr_transop_timeout = 0;
brking@us.ibm.comd3c74872005-11-01 17:01:34 -060098static unsigned int ipr_debug = 0;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -080099static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
Brian Kingac09c342007-04-26 16:00:16 -0500100static unsigned int ipr_dual_ioa_raid = 1;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600101static unsigned int ipr_number_of_msix = 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102static DEFINE_SPINLOCK(ipr_driver_lock);
103
104/* This table describes the differences between DMA controller chips */
105static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
Brian King60e74862006-11-21 10:28:10 -0600106 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 .mailbox = 0x0042C,
Brian King89aad422012-03-14 21:20:10 -0500108 .max_cmds = 100,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 .cache_line_size = 0x20,
Brian King7dd21302012-03-14 21:20:08 -0500110 .clear_isr = 1,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -0600111 .iopoll_weight = 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 {
113 .set_interrupt_mask_reg = 0x0022C,
114 .clr_interrupt_mask_reg = 0x00230,
Wayne Boyer214777b2010-02-19 13:24:26 -0800115 .clr_interrupt_mask_reg32 = 0x00230,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 .sense_interrupt_mask_reg = 0x0022C,
Wayne Boyer214777b2010-02-19 13:24:26 -0800117 .sense_interrupt_mask_reg32 = 0x0022C,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 .clr_interrupt_reg = 0x00228,
Wayne Boyer214777b2010-02-19 13:24:26 -0800119 .clr_interrupt_reg32 = 0x00228,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 .sense_interrupt_reg = 0x00224,
Wayne Boyer214777b2010-02-19 13:24:26 -0800121 .sense_interrupt_reg32 = 0x00224,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 .ioarrin_reg = 0x00404,
123 .sense_uproc_interrupt_reg = 0x00214,
Wayne Boyer214777b2010-02-19 13:24:26 -0800124 .sense_uproc_interrupt_reg32 = 0x00214,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 .set_uproc_interrupt_reg = 0x00214,
Wayne Boyer214777b2010-02-19 13:24:26 -0800126 .set_uproc_interrupt_reg32 = 0x00214,
127 .clr_uproc_interrupt_reg = 0x00218,
128 .clr_uproc_interrupt_reg32 = 0x00218
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 }
130 },
131 { /* Snipe and Scamp */
132 .mailbox = 0x0052C,
Brian King89aad422012-03-14 21:20:10 -0500133 .max_cmds = 100,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 .cache_line_size = 0x20,
Brian King7dd21302012-03-14 21:20:08 -0500135 .clear_isr = 1,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -0600136 .iopoll_weight = 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 {
138 .set_interrupt_mask_reg = 0x00288,
139 .clr_interrupt_mask_reg = 0x0028C,
Wayne Boyer214777b2010-02-19 13:24:26 -0800140 .clr_interrupt_mask_reg32 = 0x0028C,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 .sense_interrupt_mask_reg = 0x00288,
Wayne Boyer214777b2010-02-19 13:24:26 -0800142 .sense_interrupt_mask_reg32 = 0x00288,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 .clr_interrupt_reg = 0x00284,
Wayne Boyer214777b2010-02-19 13:24:26 -0800144 .clr_interrupt_reg32 = 0x00284,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 .sense_interrupt_reg = 0x00280,
Wayne Boyer214777b2010-02-19 13:24:26 -0800146 .sense_interrupt_reg32 = 0x00280,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 .ioarrin_reg = 0x00504,
148 .sense_uproc_interrupt_reg = 0x00290,
Wayne Boyer214777b2010-02-19 13:24:26 -0800149 .sense_uproc_interrupt_reg32 = 0x00290,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 .set_uproc_interrupt_reg = 0x00290,
Wayne Boyer214777b2010-02-19 13:24:26 -0800151 .set_uproc_interrupt_reg32 = 0x00290,
152 .clr_uproc_interrupt_reg = 0x00294,
153 .clr_uproc_interrupt_reg32 = 0x00294
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 }
155 },
Wayne Boyera74c1632010-02-19 13:23:51 -0800156 { /* CRoC */
Wayne Boyer110def82010-11-04 09:36:16 -0700157 .mailbox = 0x00044,
Brian King89aad422012-03-14 21:20:10 -0500158 .max_cmds = 1000,
Wayne Boyera74c1632010-02-19 13:23:51 -0800159 .cache_line_size = 0x20,
Brian King7dd21302012-03-14 21:20:08 -0500160 .clear_isr = 0,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -0600161 .iopoll_weight = 64,
Wayne Boyera74c1632010-02-19 13:23:51 -0800162 {
163 .set_interrupt_mask_reg = 0x00010,
164 .clr_interrupt_mask_reg = 0x00018,
Wayne Boyer214777b2010-02-19 13:24:26 -0800165 .clr_interrupt_mask_reg32 = 0x0001C,
Wayne Boyera74c1632010-02-19 13:23:51 -0800166 .sense_interrupt_mask_reg = 0x00010,
Wayne Boyer214777b2010-02-19 13:24:26 -0800167 .sense_interrupt_mask_reg32 = 0x00014,
Wayne Boyera74c1632010-02-19 13:23:51 -0800168 .clr_interrupt_reg = 0x00008,
Wayne Boyer214777b2010-02-19 13:24:26 -0800169 .clr_interrupt_reg32 = 0x0000C,
Wayne Boyera74c1632010-02-19 13:23:51 -0800170 .sense_interrupt_reg = 0x00000,
Wayne Boyer214777b2010-02-19 13:24:26 -0800171 .sense_interrupt_reg32 = 0x00004,
Wayne Boyera74c1632010-02-19 13:23:51 -0800172 .ioarrin_reg = 0x00070,
173 .sense_uproc_interrupt_reg = 0x00020,
Wayne Boyer214777b2010-02-19 13:24:26 -0800174 .sense_uproc_interrupt_reg32 = 0x00024,
Wayne Boyera74c1632010-02-19 13:23:51 -0800175 .set_uproc_interrupt_reg = 0x00020,
Wayne Boyer214777b2010-02-19 13:24:26 -0800176 .set_uproc_interrupt_reg32 = 0x00024,
Wayne Boyerdcbad002010-02-19 13:24:14 -0800177 .clr_uproc_interrupt_reg = 0x00028,
Wayne Boyer214777b2010-02-19 13:24:26 -0800178 .clr_uproc_interrupt_reg32 = 0x0002C,
179 .init_feedback_reg = 0x0005C,
Wayne Boyerdcbad002010-02-19 13:24:14 -0800180 .dump_addr_reg = 0x00064,
Wayne Boyer8701f182010-06-04 10:26:50 -0700181 .dump_data_reg = 0x00068,
182 .endian_swap_reg = 0x00084
Wayne Boyera74c1632010-02-19 13:23:51 -0800183 }
184 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185};
186
187static const struct ipr_chip_t ipr_chip[] = {
Wayne Boyercb237ef2010-06-17 11:51:40 -0700188 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
189 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
194 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
Wayne Boyercd9b3d02012-02-23 11:54:55 -0800196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197};
198
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -0300199static int ipr_max_bus_speeds[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
201};
202
203MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
204MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
205module_param_named(max_speed, ipr_max_speed, uint, 0);
206MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
207module_param_named(log_level, ipr_log_level, uint, 0);
208MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
209module_param_named(testmode, ipr_testmode, int, 0);
210MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
Wayne Boyer2cf22be2009-02-24 11:36:00 -0800211module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
213module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
214MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
Wayne Boyer2cf22be2009-02-24 11:36:00 -0800215module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
brking@us.ibm.comd3c74872005-11-01 17:01:34 -0600216MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
Brian Kingac09c342007-04-26 16:00:16 -0500217module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
218MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -0800219module_param_named(max_devs, ipr_max_devs, int, 0);
220MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
221 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600222module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
223MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 5). (default:2)");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224MODULE_LICENSE("GPL");
225MODULE_VERSION(IPR_DRIVER_VERSION);
226
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227/* A constant array of IOASCs/URCs/Error Messages */
228static const
229struct ipr_error_table_t ipr_error_table[] = {
Brian King933916f2007-03-29 12:43:30 -0500230 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 "8155: An unknown error was received"},
232 {0x00330000, 0, 0,
233 "Soft underlength error"},
234 {0x005A0000, 0, 0,
235 "Command to be cancelled not found"},
236 {0x00808000, 0, 0,
237 "Qualified success"},
Brian King933916f2007-03-29 12:43:30 -0500238 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 "FFFE: Soft device bus error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500240 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500241 "4101: Soft device bus fabric error"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800242 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
243 "FFFC: Logical block guard error recovered by the device"},
244 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
245 "FFFC: Logical block reference tag error recovered by the device"},
246 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
247 "4171: Recovered scatter list tag / sequence number error"},
248 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
249 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
250 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
251 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
252 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
253 "FFFD: Recovered logical block reference tag error detected by the IOA"},
254 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
255 "FFFD: Logical block guard error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500256 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 "FFF9: Device sector reassign successful"},
Brian King933916f2007-03-29 12:43:30 -0500258 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 "FFF7: Media error recovered by device rewrite procedures"},
Brian King933916f2007-03-29 12:43:30 -0500260 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 "7001: IOA sector reassignment successful"},
Brian King933916f2007-03-29 12:43:30 -0500262 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 "FFF9: Soft media error. Sector reassignment recommended"},
Brian King933916f2007-03-29 12:43:30 -0500264 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 "FFF7: Media error recovered by IOA rewrite procedures"},
Brian King933916f2007-03-29 12:43:30 -0500266 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 "FF3D: Soft PCI bus error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500268 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 "FFF6: Device hardware error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500270 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 "FFF6: Device hardware error recovered by the device"},
Brian King933916f2007-03-29 12:43:30 -0500272 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 "FF3D: Soft IOA error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500274 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 "FFFA: Undefined device response recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500276 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 "FFF6: Device bus error, message or command phase"},
Brian King933916f2007-03-29 12:43:30 -0500278 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King35a39692006-09-25 12:39:20 -0500279 "FFFE: Task Management Function failed"},
Brian King933916f2007-03-29 12:43:30 -0500280 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 "FFF6: Failure prediction threshold exceeded"},
Brian King933916f2007-03-29 12:43:30 -0500282 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 "8009: Impending cache battery pack failure"},
284 {0x02040400, 0, 0,
285 "34FF: Disk device format in progress"},
Brian King65f56472007-04-26 16:00:12 -0500286 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
287 "9070: IOA requested reset"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 {0x023F0000, 0, 0,
289 "Synchronization required"},
290 {0x024E0000, 0, 0,
291 "No ready, IOA shutdown"},
292 {0x025A0000, 0, 0,
293 "Not ready, IOA has been shutdown"},
Brian King933916f2007-03-29 12:43:30 -0500294 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 "3020: Storage subsystem configuration error"},
296 {0x03110B00, 0, 0,
297 "FFF5: Medium error, data unreadable, recommend reassign"},
298 {0x03110C00, 0, 0,
299 "7000: Medium error, data unreadable, do not reassign"},
Brian King933916f2007-03-29 12:43:30 -0500300 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 "FFF3: Disk media format bad"},
Brian King933916f2007-03-29 12:43:30 -0500302 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 "3002: Addressed device failed to respond to selection"},
Brian King933916f2007-03-29 12:43:30 -0500304 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 "3100: Device bus error"},
Brian King933916f2007-03-29 12:43:30 -0500306 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 "3109: IOA timed out a device command"},
308 {0x04088000, 0, 0,
309 "3120: SCSI bus is not operational"},
Brian King933916f2007-03-29 12:43:30 -0500310 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500311 "4100: Hard device bus fabric error"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800312 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
313 "310C: Logical block guard error detected by the device"},
314 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
315 "310C: Logical block reference tag error detected by the device"},
316 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
317 "4170: Scatter list tag / sequence number error"},
318 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
319 "8150: Logical block CRC error on IOA to Host transfer"},
320 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
321 "4170: Logical block sequence number error on IOA to Host transfer"},
322 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
323 "310D: Logical block reference tag error detected by the IOA"},
324 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
325 "310D: Logical block guard error detected by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500326 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 "9000: IOA reserved area data check"},
Brian King933916f2007-03-29 12:43:30 -0500328 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 "9001: IOA reserved area invalid data pattern"},
Brian King933916f2007-03-29 12:43:30 -0500330 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 "9002: IOA reserved area LRC error"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800332 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
333 "Hardware Error, IOA metadata access error"},
Brian King933916f2007-03-29 12:43:30 -0500334 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 "102E: Out of alternate sectors for disk storage"},
Brian King933916f2007-03-29 12:43:30 -0500336 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 "FFF4: Data transfer underlength error"},
Brian King933916f2007-03-29 12:43:30 -0500338 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 "FFF4: Data transfer overlength error"},
Brian King933916f2007-03-29 12:43:30 -0500340 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 "3400: Logical unit failure"},
Brian King933916f2007-03-29 12:43:30 -0500342 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 "FFF4: Device microcode is corrupt"},
Brian King933916f2007-03-29 12:43:30 -0500344 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 "8150: PCI bus error"},
346 {0x04430000, 1, 0,
347 "Unsupported device bus message received"},
Brian King933916f2007-03-29 12:43:30 -0500348 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 "FFF4: Disk device problem"},
Brian King933916f2007-03-29 12:43:30 -0500350 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 "8150: Permanent IOA failure"},
Brian King933916f2007-03-29 12:43:30 -0500352 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 "3010: Disk device returned wrong response to IOA"},
Brian King933916f2007-03-29 12:43:30 -0500354 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 "8151: IOA microcode error"},
356 {0x04448500, 0, 0,
357 "Device bus status error"},
Brian King933916f2007-03-29 12:43:30 -0500358 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 "8157: IOA error requiring IOA reset to recover"},
Brian King35a39692006-09-25 12:39:20 -0500360 {0x04448700, 0, 0,
361 "ATA device status error"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 {0x04490000, 0, 0,
363 "Message reject received from the device"},
Brian King933916f2007-03-29 12:43:30 -0500364 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 "8008: A permanent cache battery pack failure occurred"},
Brian King933916f2007-03-29 12:43:30 -0500366 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 "9090: Disk unit has been modified after the last known status"},
Brian King933916f2007-03-29 12:43:30 -0500368 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 "9081: IOA detected device error"},
Brian King933916f2007-03-29 12:43:30 -0500370 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 "9082: IOA detected device error"},
Brian King933916f2007-03-29 12:43:30 -0500372 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 "3110: Device bus error, message or command phase"},
Brian King933916f2007-03-29 12:43:30 -0500374 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
Brian King35a39692006-09-25 12:39:20 -0500375 "3110: SAS Command / Task Management Function failed"},
Brian King933916f2007-03-29 12:43:30 -0500376 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 "9091: Incorrect hardware configuration change has been detected"},
Brian King933916f2007-03-29 12:43:30 -0500378 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600379 "9073: Invalid multi-adapter configuration"},
Brian King933916f2007-03-29 12:43:30 -0500380 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500381 "4010: Incorrect connection between cascaded expanders"},
Brian King933916f2007-03-29 12:43:30 -0500382 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500383 "4020: Connections exceed IOA design limits"},
Brian King933916f2007-03-29 12:43:30 -0500384 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500385 "4030: Incorrect multipath connection"},
Brian King933916f2007-03-29 12:43:30 -0500386 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500387 "4110: Unsupported enclosure function"},
Brian King933916f2007-03-29 12:43:30 -0500388 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 "FFF4: Command to logical unit failed"},
390 {0x05240000, 1, 0,
391 "Illegal request, invalid request type or request packet"},
392 {0x05250000, 0, 0,
393 "Illegal request, invalid resource handle"},
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600394 {0x05258000, 0, 0,
395 "Illegal request, commands not allowed to this device"},
396 {0x05258100, 0, 0,
397 "Illegal request, command not allowed to a secondary adapter"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800398 {0x05258200, 0, 0,
399 "Illegal request, command not allowed to a non-optimized resource"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 {0x05260000, 0, 0,
401 "Illegal request, invalid field in parameter list"},
402 {0x05260100, 0, 0,
403 "Illegal request, parameter not supported"},
404 {0x05260200, 0, 0,
405 "Illegal request, parameter value invalid"},
406 {0x052C0000, 0, 0,
407 "Illegal request, command sequence error"},
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600408 {0x052C8000, 1, 0,
409 "Illegal request, dual adapter support not enabled"},
Brian King933916f2007-03-29 12:43:30 -0500410 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 "9031: Array protection temporarily suspended, protection resuming"},
Brian King933916f2007-03-29 12:43:30 -0500412 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 "9040: Array protection temporarily suspended, protection resuming"},
Brian King933916f2007-03-29 12:43:30 -0500414 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500415 "3140: Device bus not ready to ready transition"},
Brian King933916f2007-03-29 12:43:30 -0500416 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 "FFFB: SCSI bus was reset"},
418 {0x06290500, 0, 0,
419 "FFFE: SCSI bus transition to single ended"},
420 {0x06290600, 0, 0,
421 "FFFE: SCSI bus transition to LVD"},
Brian King933916f2007-03-29 12:43:30 -0500422 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 "FFFB: SCSI bus was reset by another initiator"},
Brian King933916f2007-03-29 12:43:30 -0500424 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 "3029: A device replacement has occurred"},
Brian King933916f2007-03-29 12:43:30 -0500426 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 "9051: IOA cache data exists for a missing or failed device"},
Brian King933916f2007-03-29 12:43:30 -0500428 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600429 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
Brian King933916f2007-03-29 12:43:30 -0500430 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 "9025: Disk unit is not supported at its physical location"},
Brian King933916f2007-03-29 12:43:30 -0500432 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 "3020: IOA detected a SCSI bus configuration error"},
Brian King933916f2007-03-29 12:43:30 -0500434 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 "3150: SCSI bus configuration error"},
Brian King933916f2007-03-29 12:43:30 -0500436 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600437 "9074: Asymmetric advanced function disk configuration"},
Brian King933916f2007-03-29 12:43:30 -0500438 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500439 "4040: Incomplete multipath connection between IOA and enclosure"},
Brian King933916f2007-03-29 12:43:30 -0500440 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500441 "4041: Incomplete multipath connection between enclosure and device"},
Brian King933916f2007-03-29 12:43:30 -0500442 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500443 "9075: Incomplete multipath connection between IOA and remote IOA"},
Brian King933916f2007-03-29 12:43:30 -0500444 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500445 "9076: Configuration error, missing remote IOA"},
Brian King933916f2007-03-29 12:43:30 -0500446 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500447 "4050: Enclosure does not support a required multipath function"},
Wayne Boyerb75424f2009-01-28 08:24:50 -0800448 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
449 "4070: Logically bad block written on device"},
Brian King933916f2007-03-29 12:43:30 -0500450 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 "9041: Array protection temporarily suspended"},
Brian King933916f2007-03-29 12:43:30 -0500452 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 "9042: Corrupt array parity detected on specified device"},
Brian King933916f2007-03-29 12:43:30 -0500454 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 "9030: Array no longer protected due to missing or failed disk unit"},
Brian King933916f2007-03-29 12:43:30 -0500456 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600457 "9071: Link operational transition"},
Brian King933916f2007-03-29 12:43:30 -0500458 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600459 "9072: Link not operational transition"},
Brian King933916f2007-03-29 12:43:30 -0500460 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 "9032: Array exposed but still protected"},
Brian Kinge4353402007-03-29 12:43:37 -0500462 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
463 "70DD: Device forced failed by disrupt device command"},
Brian King933916f2007-03-29 12:43:30 -0500464 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500465 "4061: Multipath redundancy level got better"},
Brian King933916f2007-03-29 12:43:30 -0500466 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500467 "4060: Multipath redundancy level got worse"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 {0x07270000, 0, 0,
469 "Failure due to other device"},
Brian King933916f2007-03-29 12:43:30 -0500470 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 "9008: IOA does not support functions expected by devices"},
Brian King933916f2007-03-29 12:43:30 -0500472 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 "9010: Cache data associated with attached devices cannot be found"},
Brian King933916f2007-03-29 12:43:30 -0500474 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 "9011: Cache data belongs to devices other than those attached"},
Brian King933916f2007-03-29 12:43:30 -0500476 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 "9020: Array missing 2 or more devices with only 1 device present"},
Brian King933916f2007-03-29 12:43:30 -0500478 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 "9021: Array missing 2 or more devices with 2 or more devices present"},
Brian King933916f2007-03-29 12:43:30 -0500480 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 "9022: Exposed array is missing a required device"},
Brian King933916f2007-03-29 12:43:30 -0500482 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 "9023: Array member(s) not at required physical locations"},
Brian King933916f2007-03-29 12:43:30 -0500484 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 "9024: Array not functional due to present hardware configuration"},
Brian King933916f2007-03-29 12:43:30 -0500486 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 "9026: Array not functional due to present hardware configuration"},
Brian King933916f2007-03-29 12:43:30 -0500488 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 "9027: Array is missing a device and parity is out of sync"},
Brian King933916f2007-03-29 12:43:30 -0500490 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 "9028: Maximum number of arrays already exist"},
Brian King933916f2007-03-29 12:43:30 -0500492 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 "9050: Required cache data cannot be located for a disk unit"},
Brian King933916f2007-03-29 12:43:30 -0500494 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 "9052: Cache data exists for a device that has been modified"},
Brian King933916f2007-03-29 12:43:30 -0500496 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 "9054: IOA resources not available due to previous problems"},
Brian King933916f2007-03-29 12:43:30 -0500498 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 "9092: Disk unit requires initialization before use"},
Brian King933916f2007-03-29 12:43:30 -0500500 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 "9029: Incorrect hardware configuration change has been detected"},
Brian King933916f2007-03-29 12:43:30 -0500502 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 "9060: One or more disk pairs are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500504 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 "9061: One or more disks are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500506 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 "9062: One or more disks are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500508 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 "9063: Maximum number of functional arrays has been exceeded"},
510 {0x0B260000, 0, 0,
511 "Aborted command, invalid descriptor"},
512 {0x0B5A0000, 0, 0,
513 "Command terminated by host"}
514};
515
516static const struct ipr_ses_table_entry ipr_ses_table[] = {
517 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
518 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
519 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
520 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
521 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
522 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
523 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
524 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
525 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
526 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
527 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
528 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
529 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
530};
531
532/*
533 * Function Prototypes
534 */
535static int ipr_reset_alert(struct ipr_cmnd *);
536static void ipr_process_ccn(struct ipr_cmnd *);
537static void ipr_process_error(struct ipr_cmnd *);
538static void ipr_reset_ioa_job(struct ipr_cmnd *);
539static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
540 enum ipr_shutdown_type);
541
542#ifdef CONFIG_SCSI_IPR_TRACE
543/**
544 * ipr_trc_hook - Add a trace entry to the driver trace
545 * @ipr_cmd: ipr command struct
546 * @type: trace type
547 * @add_data: additional data
548 *
549 * Return value:
550 * none
551 **/
552static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
553 u8 type, u32 add_data)
554{
555 struct ipr_trace_entry *trace_entry;
556 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
557
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600558 trace_entry = &ioa_cfg->trace[atomic_add_return
559 (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560 trace_entry->time = jiffies;
561 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
562 trace_entry->type = type;
Wayne Boyera32c0552010-02-19 13:23:36 -0800563 if (ipr_cmd->ioa_cfg->sis64)
564 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
565 else
566 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
Brian King35a39692006-09-25 12:39:20 -0500567 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
569 trace_entry->u.add_data = add_data;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600570 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571}
572#else
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -0300573#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574#endif
575
576/**
Brian King172cd6e2012-07-17 08:14:40 -0500577 * ipr_lock_and_done - Acquire lock and complete command
578 * @ipr_cmd: ipr command struct
579 *
580 * Return value:
581 * none
582 **/
583static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
584{
585 unsigned long lock_flags;
586 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
587
588 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
589 ipr_cmd->done(ipr_cmd);
590 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
591}
592
593/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
595 * @ipr_cmd: ipr command struct
596 *
597 * Return value:
598 * none
599 **/
600static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
601{
602 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -0700603 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
604 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
Wayne Boyera32c0552010-02-19 13:23:36 -0800605 dma_addr_t dma_addr = ipr_cmd->dma_addr;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600606 int hrrq_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600608 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600610 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
Wayne Boyera32c0552010-02-19 13:23:36 -0800611 ioarcb->data_transfer_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 ioarcb->read_data_transfer_length = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800613 ioarcb->ioadl_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 ioarcb->read_ioadl_len = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800615
Wayne Boyer96d21f02010-05-10 09:13:27 -0700616 if (ipr_cmd->ioa_cfg->sis64) {
Wayne Boyera32c0552010-02-19 13:23:36 -0800617 ioarcb->u.sis64_addr_data.data_ioadl_addr =
618 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
Wayne Boyer96d21f02010-05-10 09:13:27 -0700619 ioasa64->u.gata.status = 0;
620 } else {
Wayne Boyera32c0552010-02-19 13:23:36 -0800621 ioarcb->write_ioadl_addr =
622 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
623 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
Wayne Boyer96d21f02010-05-10 09:13:27 -0700624 ioasa->u.gata.status = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800625 }
626
Wayne Boyer96d21f02010-05-10 09:13:27 -0700627 ioasa->hdr.ioasc = 0;
628 ioasa->hdr.residual_data_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 ipr_cmd->scsi_cmd = NULL;
Brian King35a39692006-09-25 12:39:20 -0500630 ipr_cmd->qc = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 ipr_cmd->sense_buffer[0] = 0;
632 ipr_cmd->dma_use_sg = 0;
633}
634
635/**
636 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
637 * @ipr_cmd: ipr command struct
638 *
639 * Return value:
640 * none
641 **/
Brian King172cd6e2012-07-17 08:14:40 -0500642static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
643 void (*fast_done) (struct ipr_cmnd *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644{
645 ipr_reinit_ipr_cmnd(ipr_cmd);
646 ipr_cmd->u.scratch = 0;
647 ipr_cmd->sibling = NULL;
Brian King7f8e9be2014-10-30 17:27:10 -0500648 ipr_cmd->eh_comp = NULL;
Brian King172cd6e2012-07-17 08:14:40 -0500649 ipr_cmd->fast_done = fast_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 init_timer(&ipr_cmd->timer);
651}
652
653/**
Brian King00bfef22012-07-17 08:13:52 -0500654 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 * @ioa_cfg: ioa config struct
656 *
657 * Return value:
658 * pointer to ipr command struct
659 **/
660static
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600661struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600663 struct ipr_cmnd *ipr_cmd = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600665 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
666 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
667 struct ipr_cmnd, queue);
668 list_del(&ipr_cmd->queue);
669 }
670
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671
672 return ipr_cmd;
673}
674
675/**
Brian King00bfef22012-07-17 08:13:52 -0500676 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
677 * @ioa_cfg: ioa config struct
678 *
679 * Return value:
680 * pointer to ipr command struct
681 **/
682static
683struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
684{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600685 struct ipr_cmnd *ipr_cmd =
686 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
Brian King172cd6e2012-07-17 08:14:40 -0500687 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
Brian King00bfef22012-07-17 08:13:52 -0500688 return ipr_cmd;
689}
690
691/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
693 * @ioa_cfg: ioa config struct
694 * @clr_ints: interrupts to clear
695 *
696 * This function masks all interrupts on the adapter, then clears the
697 * interrupts specified in the mask
698 *
699 * Return value:
700 * none
701 **/
702static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
703 u32 clr_ints)
704{
705 volatile u32 int_reg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600706 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707
708 /* Stop new interrupts */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600709 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
710 spin_lock(&ioa_cfg->hrrq[i]._lock);
711 ioa_cfg->hrrq[i].allow_interrupts = 0;
712 spin_unlock(&ioa_cfg->hrrq[i]._lock);
713 }
714 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715
716 /* Set interrupt mask to stop all new interrupts */
Wayne Boyer214777b2010-02-19 13:24:26 -0800717 if (ioa_cfg->sis64)
718 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
719 else
720 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721
722 /* Clear any pending interrupts */
Wayne Boyer214777b2010-02-19 13:24:26 -0800723 if (ioa_cfg->sis64)
724 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
725 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
727}
728
729/**
730 * ipr_save_pcix_cmd_reg - Save PCI-X command register
731 * @ioa_cfg: ioa config struct
732 *
733 * Return value:
734 * 0 on success / -EIO on failure
735 **/
736static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
737{
738 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
739
Brian King7dce0e12007-01-23 11:25:30 -0600740 if (pcix_cmd_reg == 0)
741 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742
743 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
744 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
745 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
746 return -EIO;
747 }
748
749 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
750 return 0;
751}
752
753/**
754 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
755 * @ioa_cfg: ioa config struct
756 *
757 * Return value:
758 * 0 on success / -EIO on failure
759 **/
760static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
761{
762 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
763
764 if (pcix_cmd_reg) {
765 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
766 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
767 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
768 return -EIO;
769 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 }
771
772 return 0;
773}
774
775/**
Brian King35a39692006-09-25 12:39:20 -0500776 * ipr_sata_eh_done - done function for aborted SATA commands
777 * @ipr_cmd: ipr command struct
778 *
779 * This function is invoked for ops generated to SATA
780 * devices which are being aborted.
781 *
782 * Return value:
783 * none
784 **/
785static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
786{
Brian King35a39692006-09-25 12:39:20 -0500787 struct ata_queued_cmd *qc = ipr_cmd->qc;
788 struct ipr_sata_port *sata_port = qc->ap->private_data;
789
790 qc->err_mask |= AC_ERR_OTHER;
791 sata_port->ioasa.status |= ATA_BUSY;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600792 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Brian King35a39692006-09-25 12:39:20 -0500793 ata_qc_complete(qc);
794}
795
796/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 * ipr_scsi_eh_done - mid-layer done function for aborted ops
798 * @ipr_cmd: ipr command struct
799 *
800 * This function is invoked by the interrupt handler for
801 * ops generated by the SCSI mid-layer which are being aborted.
802 *
803 * Return value:
804 * none
805 **/
806static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
807{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
809
810 scsi_cmd->result |= (DID_ERROR << 16);
811
FUJITA Tomonori63015bc2007-05-26 00:26:59 +0900812 scsi_dma_unmap(ipr_cmd->scsi_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 scsi_cmd->scsi_done(scsi_cmd);
Brian King7f8e9be2014-10-30 17:27:10 -0500814 if (ipr_cmd->eh_comp)
815 complete(ipr_cmd->eh_comp);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600816 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817}
818
819/**
820 * ipr_fail_all_ops - Fails all outstanding ops.
821 * @ioa_cfg: ioa config struct
822 *
823 * This function fails all outstanding ops.
824 *
825 * Return value:
826 * none
827 **/
828static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
829{
830 struct ipr_cmnd *ipr_cmd, *temp;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600831 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832
833 ENTER;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600834 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600835 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600836 list_for_each_entry_safe(ipr_cmd,
837 temp, &hrrq->hrrq_pending_q, queue) {
838 list_del(&ipr_cmd->queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600840 ipr_cmd->s.ioasa.hdr.ioasc =
841 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
842 ipr_cmd->s.ioasa.hdr.ilid =
843 cpu_to_be32(IPR_DRIVER_ILID);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600845 if (ipr_cmd->scsi_cmd)
846 ipr_cmd->done = ipr_scsi_eh_done;
847 else if (ipr_cmd->qc)
848 ipr_cmd->done = ipr_sata_eh_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600850 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
851 IPR_IOASC_IOA_WAS_RESET);
852 del_timer(&ipr_cmd->timer);
853 ipr_cmd->done(ipr_cmd);
854 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600855 spin_unlock(&hrrq->_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 LEAVE;
858}
859
860/**
Wayne Boyera32c0552010-02-19 13:23:36 -0800861 * ipr_send_command - Send driver initiated requests.
862 * @ipr_cmd: ipr command struct
863 *
864 * This function sends a command to the adapter using the correct write call.
865 * In the case of sis64, calculate the ioarcb size required. Then or in the
866 * appropriate bits.
867 *
868 * Return value:
869 * none
870 **/
871static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
872{
873 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
874 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
875
876 if (ioa_cfg->sis64) {
877 /* The default size is 256 bytes */
878 send_dma_addr |= 0x1;
879
880 /* If the number of ioadls * size of ioadl > 128 bytes,
881 then use a 512 byte ioarcb */
882 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
883 send_dma_addr |= 0x4;
884 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
885 } else
886 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
887}
888
889/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890 * ipr_do_req - Send driver initiated requests.
891 * @ipr_cmd: ipr command struct
892 * @done: done function
893 * @timeout_func: timeout function
894 * @timeout: timeout value
895 *
896 * This function sends the specified command to the adapter with the
897 * timeout given. The done function is invoked on command completion.
898 *
899 * Return value:
900 * none
901 **/
902static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
903 void (*done) (struct ipr_cmnd *),
904 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
905{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600906 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907
908 ipr_cmd->done = done;
909
910 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
911 ipr_cmd->timer.expires = jiffies + timeout;
912 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
913
914 add_timer(&ipr_cmd->timer);
915
916 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
917
Wayne Boyera32c0552010-02-19 13:23:36 -0800918 ipr_send_command(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919}
920
921/**
922 * ipr_internal_cmd_done - Op done function for an internally generated op.
923 * @ipr_cmd: ipr command struct
924 *
925 * This function is the op done function for an internally generated,
926 * blocking op. It simply wakes the sleeping thread.
927 *
928 * Return value:
929 * none
930 **/
931static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
932{
933 if (ipr_cmd->sibling)
934 ipr_cmd->sibling = NULL;
935 else
936 complete(&ipr_cmd->completion);
937}
938
939/**
Wayne Boyera32c0552010-02-19 13:23:36 -0800940 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
941 * @ipr_cmd: ipr command struct
942 * @dma_addr: dma address
943 * @len: transfer length
944 * @flags: ioadl flag value
945 *
946 * This function initializes an ioadl in the case where there is only a single
947 * descriptor.
948 *
949 * Return value:
950 * nothing
951 **/
952static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
953 u32 len, int flags)
954{
955 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
956 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
957
958 ipr_cmd->dma_use_sg = 1;
959
960 if (ipr_cmd->ioa_cfg->sis64) {
961 ioadl64->flags = cpu_to_be32(flags);
962 ioadl64->data_len = cpu_to_be32(len);
963 ioadl64->address = cpu_to_be64(dma_addr);
964
965 ipr_cmd->ioarcb.ioadl_len =
966 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
967 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
968 } else {
969 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
970 ioadl->address = cpu_to_be32(dma_addr);
971
972 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
973 ipr_cmd->ioarcb.read_ioadl_len =
974 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
975 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
976 } else {
977 ipr_cmd->ioarcb.ioadl_len =
978 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
979 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
980 }
981 }
982}
983
984/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 * ipr_send_blocking_cmd - Send command and sleep on its completion.
986 * @ipr_cmd: ipr command struct
987 * @timeout_func: function to invoke if command times out
988 * @timeout: timeout
989 *
990 * Return value:
991 * none
992 **/
993static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
994 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
995 u32 timeout)
996{
997 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
998
999 init_completion(&ipr_cmd->completion);
1000 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1001
1002 spin_unlock_irq(ioa_cfg->host->host_lock);
1003 wait_for_completion(&ipr_cmd->completion);
1004 spin_lock_irq(ioa_cfg->host->host_lock);
1005}
1006
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001007static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1008{
1009 if (ioa_cfg->hrrq_num == 1)
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06001010 return 0;
1011 else
1012 return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001013}
1014
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015/**
1016 * ipr_send_hcam - Send an HCAM to the adapter.
1017 * @ioa_cfg: ioa config struct
1018 * @type: HCAM type
1019 * @hostrcb: hostrcb struct
1020 *
1021 * This function will send a Host Controlled Async command to the adapter.
1022 * If HCAMs are currently not allowed to be issued to the adapter, it will
1023 * place the hostrcb on the free queue.
1024 *
1025 * Return value:
1026 * none
1027 **/
1028static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1029 struct ipr_hostrcb *hostrcb)
1030{
1031 struct ipr_cmnd *ipr_cmd;
1032 struct ipr_ioarcb *ioarcb;
1033
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06001034 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001036 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1038
1039 ipr_cmd->u.hostrcb = hostrcb;
1040 ioarcb = &ipr_cmd->ioarcb;
1041
1042 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1043 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1044 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1045 ioarcb->cmd_pkt.cdb[1] = type;
1046 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1047 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1048
Wayne Boyera32c0552010-02-19 13:23:36 -08001049 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1050 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051
1052 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1053 ipr_cmd->done = ipr_process_ccn;
1054 else
1055 ipr_cmd->done = ipr_process_error;
1056
1057 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1058
Wayne Boyera32c0552010-02-19 13:23:36 -08001059 ipr_send_command(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 } else {
1061 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1062 }
1063}
1064
1065/**
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001066 * ipr_update_ata_class - Update the ata class in the resource entry
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067 * @res: resource entry struct
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001068 * @proto: cfgte device bus protocol value
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 *
1070 * Return value:
1071 * none
1072 **/
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001073static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074{
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03001075 switch (proto) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001076 case IPR_PROTO_SATA:
1077 case IPR_PROTO_SAS_STP:
1078 res->ata_class = ATA_DEV_ATA;
1079 break;
1080 case IPR_PROTO_SATA_ATAPI:
1081 case IPR_PROTO_SAS_STP_ATAPI:
1082 res->ata_class = ATA_DEV_ATAPI;
1083 break;
1084 default:
1085 res->ata_class = ATA_DEV_UNKNOWN;
1086 break;
1087 };
1088}
1089
1090/**
1091 * ipr_init_res_entry - Initialize a resource entry struct.
1092 * @res: resource entry struct
1093 * @cfgtew: config table entry wrapper struct
1094 *
1095 * Return value:
1096 * none
1097 **/
1098static void ipr_init_res_entry(struct ipr_resource_entry *res,
1099 struct ipr_config_table_entry_wrapper *cfgtew)
1100{
1101 int found = 0;
1102 unsigned int proto;
1103 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1104 struct ipr_resource_entry *gscsi_res = NULL;
1105
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06001106 res->needs_sync_complete = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107 res->in_erp = 0;
1108 res->add_to_ml = 0;
1109 res->del_from_ml = 0;
1110 res->resetting_device = 0;
1111 res->sdev = NULL;
Brian King35a39692006-09-25 12:39:20 -05001112 res->sata_port = NULL;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001113
1114 if (ioa_cfg->sis64) {
1115 proto = cfgtew->u.cfgte64->proto;
1116 res->res_flags = cfgtew->u.cfgte64->res_flags;
1117 res->qmodel = IPR_QUEUEING_MODEL64(res);
Wayne Boyer438b0332010-05-10 09:13:00 -07001118 res->type = cfgtew->u.cfgte64->res_type;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001119
1120 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1121 sizeof(res->res_path));
1122
1123 res->bus = 0;
Wayne Boyer0cb992e2010-11-04 09:35:58 -07001124 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1125 sizeof(res->dev_lun.scsi_lun));
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001126 res->lun = scsilun_to_int(&res->dev_lun);
1127
1128 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1129 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1130 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1131 found = 1;
1132 res->target = gscsi_res->target;
1133 break;
1134 }
1135 }
1136 if (!found) {
1137 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1138 ioa_cfg->max_devs_supported);
1139 set_bit(res->target, ioa_cfg->target_ids);
1140 }
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001141 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1142 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1143 res->target = 0;
1144 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1145 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1146 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1147 ioa_cfg->max_devs_supported);
1148 set_bit(res->target, ioa_cfg->array_ids);
1149 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1150 res->bus = IPR_VSET_VIRTUAL_BUS;
1151 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1152 ioa_cfg->max_devs_supported);
1153 set_bit(res->target, ioa_cfg->vset_ids);
1154 } else {
1155 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1156 ioa_cfg->max_devs_supported);
1157 set_bit(res->target, ioa_cfg->target_ids);
1158 }
1159 } else {
1160 proto = cfgtew->u.cfgte->proto;
1161 res->qmodel = IPR_QUEUEING_MODEL(res);
1162 res->flags = cfgtew->u.cfgte->flags;
1163 if (res->flags & IPR_IS_IOA_RESOURCE)
1164 res->type = IPR_RES_TYPE_IOAFP;
1165 else
1166 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1167
1168 res->bus = cfgtew->u.cfgte->res_addr.bus;
1169 res->target = cfgtew->u.cfgte->res_addr.target;
1170 res->lun = cfgtew->u.cfgte->res_addr.lun;
Wayne Boyer46d74562010-08-11 07:15:17 -07001171 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001172 }
1173
1174 ipr_update_ata_class(res, proto);
1175}
1176
1177/**
1178 * ipr_is_same_device - Determine if two devices are the same.
1179 * @res: resource entry struct
1180 * @cfgtew: config table entry wrapper struct
1181 *
1182 * Return value:
1183 * 1 if the devices are the same / 0 otherwise
1184 **/
1185static int ipr_is_same_device(struct ipr_resource_entry *res,
1186 struct ipr_config_table_entry_wrapper *cfgtew)
1187{
1188 if (res->ioa_cfg->sis64) {
1189 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1190 sizeof(cfgtew->u.cfgte64->dev_id)) &&
Wayne Boyer0cb992e2010-11-04 09:35:58 -07001191 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001192 sizeof(cfgtew->u.cfgte64->lun))) {
1193 return 1;
1194 }
1195 } else {
1196 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1197 res->target == cfgtew->u.cfgte->res_addr.target &&
1198 res->lun == cfgtew->u.cfgte->res_addr.lun)
1199 return 1;
1200 }
1201
1202 return 0;
1203}
1204
1205/**
Brian Kingb3b3b402013-01-11 17:43:49 -06001206 * __ipr_format_res_path - Format the resource path for printing.
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001207 * @res_path: resource path
1208 * @buf: buffer
Brian Kingb3b3b402013-01-11 17:43:49 -06001209 * @len: length of buffer provided
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001210 *
1211 * Return value:
1212 * pointer to buffer
1213 **/
Brian Kingb3b3b402013-01-11 17:43:49 -06001214static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001215{
1216 int i;
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07001217 char *p = buffer;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001218
Wayne Boyer46d74562010-08-11 07:15:17 -07001219 *p = '\0';
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07001220 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1221 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1222 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001223
1224 return buffer;
1225}
1226
1227/**
Brian Kingb3b3b402013-01-11 17:43:49 -06001228 * ipr_format_res_path - Format the resource path for printing.
1229 * @ioa_cfg: ioa config struct
1230 * @res_path: resource path
1231 * @buf: buffer
1232 * @len: length of buffer provided
1233 *
1234 * Return value:
1235 * pointer to buffer
1236 **/
1237static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1238 u8 *res_path, char *buffer, int len)
1239{
1240 char *p = buffer;
1241
1242 *p = '\0';
1243 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1244 __ipr_format_res_path(res_path, p, len - (buffer - p));
1245 return buffer;
1246}
1247
1248/**
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001249 * ipr_update_res_entry - Update the resource entry.
1250 * @res: resource entry struct
1251 * @cfgtew: config table entry wrapper struct
1252 *
1253 * Return value:
1254 * none
1255 **/
1256static void ipr_update_res_entry(struct ipr_resource_entry *res,
1257 struct ipr_config_table_entry_wrapper *cfgtew)
1258{
1259 char buffer[IPR_MAX_RES_PATH_LENGTH];
1260 unsigned int proto;
1261 int new_path = 0;
1262
1263 if (res->ioa_cfg->sis64) {
1264 res->flags = cfgtew->u.cfgte64->flags;
1265 res->res_flags = cfgtew->u.cfgte64->res_flags;
Wayne Boyer75576bb2010-07-14 10:50:14 -07001266 res->type = cfgtew->u.cfgte64->res_type;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001267
1268 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1269 sizeof(struct ipr_std_inq_data));
1270
1271 res->qmodel = IPR_QUEUEING_MODEL64(res);
1272 proto = cfgtew->u.cfgte64->proto;
1273 res->res_handle = cfgtew->u.cfgte64->res_handle;
1274 res->dev_id = cfgtew->u.cfgte64->dev_id;
1275
1276 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1277 sizeof(res->dev_lun.scsi_lun));
1278
1279 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1280 sizeof(res->res_path))) {
1281 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1282 sizeof(res->res_path));
1283 new_path = 1;
1284 }
1285
1286 if (res->sdev && new_path)
1287 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06001288 ipr_format_res_path(res->ioa_cfg,
1289 res->res_path, buffer, sizeof(buffer)));
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001290 } else {
1291 res->flags = cfgtew->u.cfgte->flags;
1292 if (res->flags & IPR_IS_IOA_RESOURCE)
1293 res->type = IPR_RES_TYPE_IOAFP;
1294 else
1295 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1296
1297 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1298 sizeof(struct ipr_std_inq_data));
1299
1300 res->qmodel = IPR_QUEUEING_MODEL(res);
1301 proto = cfgtew->u.cfgte->proto;
1302 res->res_handle = cfgtew->u.cfgte->res_handle;
1303 }
1304
1305 ipr_update_ata_class(res, proto);
1306}
1307
1308/**
1309 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1310 * for the resource.
1311 * @res: resource entry struct
1312 * @cfgtew: config table entry wrapper struct
1313 *
1314 * Return value:
1315 * none
1316 **/
1317static void ipr_clear_res_target(struct ipr_resource_entry *res)
1318{
1319 struct ipr_resource_entry *gscsi_res = NULL;
1320 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1321
1322 if (!ioa_cfg->sis64)
1323 return;
1324
1325 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1326 clear_bit(res->target, ioa_cfg->array_ids);
1327 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1328 clear_bit(res->target, ioa_cfg->vset_ids);
1329 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1330 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1331 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1332 return;
1333 clear_bit(res->target, ioa_cfg->target_ids);
1334
1335 } else if (res->bus == 0)
1336 clear_bit(res->target, ioa_cfg->target_ids);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337}
1338
1339/**
1340 * ipr_handle_config_change - Handle a config change from the adapter
1341 * @ioa_cfg: ioa config struct
1342 * @hostrcb: hostrcb
1343 *
1344 * Return value:
1345 * none
1346 **/
1347static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001348 struct ipr_hostrcb *hostrcb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349{
1350 struct ipr_resource_entry *res = NULL;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001351 struct ipr_config_table_entry_wrapper cfgtew;
1352 __be32 cc_res_handle;
1353
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354 u32 is_ndn = 1;
1355
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001356 if (ioa_cfg->sis64) {
1357 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1358 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1359 } else {
1360 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1361 cc_res_handle = cfgtew.u.cfgte->res_handle;
1362 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363
1364 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001365 if (res->res_handle == cc_res_handle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366 is_ndn = 0;
1367 break;
1368 }
1369 }
1370
1371 if (is_ndn) {
1372 if (list_empty(&ioa_cfg->free_res_q)) {
1373 ipr_send_hcam(ioa_cfg,
1374 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1375 hostrcb);
1376 return;
1377 }
1378
1379 res = list_entry(ioa_cfg->free_res_q.next,
1380 struct ipr_resource_entry, queue);
1381
1382 list_del(&res->queue);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001383 ipr_init_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1385 }
1386
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001387 ipr_update_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388
1389 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1390 if (res->sdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391 res->del_from_ml = 1;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001392 res->res_handle = IPR_INVALID_RES_HANDLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393 if (ioa_cfg->allow_ml_add_del)
1394 schedule_work(&ioa_cfg->work_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001395 } else {
1396 ipr_clear_res_target(res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001398 }
Kleber Sacilotto de Souza5767a1c2011-02-14 20:19:31 -02001399 } else if (!res->sdev || res->del_from_ml) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400 res->add_to_ml = 1;
1401 if (ioa_cfg->allow_ml_add_del)
1402 schedule_work(&ioa_cfg->work_q);
1403 }
1404
1405 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1406}
1407
1408/**
1409 * ipr_process_ccn - Op done function for a CCN.
1410 * @ipr_cmd: ipr command struct
1411 *
1412 * This function is the op done function for a configuration
1413 * change notification host controlled async from the adapter.
1414 *
1415 * Return value:
1416 * none
1417 **/
1418static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1419{
1420 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1421 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -07001422 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423
1424 list_del(&hostrcb->queue);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001425 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426
1427 if (ioasc) {
1428 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1429 dev_err(&ioa_cfg->pdev->dev,
1430 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1431
1432 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1433 } else {
1434 ipr_handle_config_change(ioa_cfg, hostrcb);
1435 }
1436}
1437
1438/**
Brian King8cf093e2007-04-26 16:00:14 -05001439 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1440 * @i: index into buffer
1441 * @buf: string to modify
1442 *
1443 * This function will strip all trailing whitespace, pad the end
1444 * of the string with a single space, and NULL terminate the string.
1445 *
1446 * Return value:
1447 * new length of string
1448 **/
1449static int strip_and_pad_whitespace(int i, char *buf)
1450{
1451 while (i && buf[i] == ' ')
1452 i--;
1453 buf[i+1] = ' ';
1454 buf[i+2] = '\0';
1455 return i + 2;
1456}
1457
1458/**
1459 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1460 * @prefix: string to print at start of printk
1461 * @hostrcb: hostrcb pointer
1462 * @vpd: vendor/product id/sn struct
1463 *
1464 * Return value:
1465 * none
1466 **/
1467static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1468 struct ipr_vpd *vpd)
1469{
1470 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1471 int i = 0;
1472
1473 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1474 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1475
1476 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1477 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1478
1479 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1480 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1481
1482 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1483}
1484
1485/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 * ipr_log_vpd - Log the passed VPD to the error log.
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001487 * @vpd: vendor/product id/sn struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 *
1489 * Return value:
1490 * none
1491 **/
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001492static void ipr_log_vpd(struct ipr_vpd *vpd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493{
1494 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1495 + IPR_SERIAL_NUM_LEN];
1496
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001497 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1498 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 IPR_PROD_ID_LEN);
1500 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1501 ipr_err("Vendor/Product ID: %s\n", buffer);
1502
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001503 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1505 ipr_err(" Serial Number: %s\n", buffer);
1506}
1507
1508/**
Brian King8cf093e2007-04-26 16:00:14 -05001509 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1510 * @prefix: string to print at start of printk
1511 * @hostrcb: hostrcb pointer
1512 * @vpd: vendor/product id/sn/wwn struct
1513 *
1514 * Return value:
1515 * none
1516 **/
1517static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1518 struct ipr_ext_vpd *vpd)
1519{
1520 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1521 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1522 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1523}
1524
1525/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001526 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1527 * @vpd: vendor/product id/sn/wwn struct
1528 *
1529 * Return value:
1530 * none
1531 **/
1532static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1533{
1534 ipr_log_vpd(&vpd->vpd);
1535 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1536 be32_to_cpu(vpd->wwid[1]));
1537}
1538
1539/**
1540 * ipr_log_enhanced_cache_error - Log a cache error.
1541 * @ioa_cfg: ioa config struct
1542 * @hostrcb: hostrcb struct
1543 *
1544 * Return value:
1545 * none
1546 **/
1547static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1548 struct ipr_hostrcb *hostrcb)
1549{
Wayne Boyer4565e372010-02-19 13:24:07 -08001550 struct ipr_hostrcb_type_12_error *error;
1551
1552 if (ioa_cfg->sis64)
1553 error = &hostrcb->hcam.u.error64.u.type_12_error;
1554 else
1555 error = &hostrcb->hcam.u.error.u.type_12_error;
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001556
1557 ipr_err("-----Current Configuration-----\n");
1558 ipr_err("Cache Directory Card Information:\n");
1559 ipr_log_ext_vpd(&error->ioa_vpd);
1560 ipr_err("Adapter Card Information:\n");
1561 ipr_log_ext_vpd(&error->cfc_vpd);
1562
1563 ipr_err("-----Expected Configuration-----\n");
1564 ipr_err("Cache Directory Card Information:\n");
1565 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1566 ipr_err("Adapter Card Information:\n");
1567 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1568
1569 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1570 be32_to_cpu(error->ioa_data[0]),
1571 be32_to_cpu(error->ioa_data[1]),
1572 be32_to_cpu(error->ioa_data[2]));
1573}
1574
1575/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576 * ipr_log_cache_error - Log a cache error.
1577 * @ioa_cfg: ioa config struct
1578 * @hostrcb: hostrcb struct
1579 *
1580 * Return value:
1581 * none
1582 **/
1583static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1584 struct ipr_hostrcb *hostrcb)
1585{
1586 struct ipr_hostrcb_type_02_error *error =
1587 &hostrcb->hcam.u.error.u.type_02_error;
1588
1589 ipr_err("-----Current Configuration-----\n");
1590 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001591 ipr_log_vpd(&error->ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001593 ipr_log_vpd(&error->cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594
1595 ipr_err("-----Expected Configuration-----\n");
1596 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001597 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001599 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600
1601 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1602 be32_to_cpu(error->ioa_data[0]),
1603 be32_to_cpu(error->ioa_data[1]),
1604 be32_to_cpu(error->ioa_data[2]));
1605}
1606
1607/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001608 * ipr_log_enhanced_config_error - Log a configuration error.
1609 * @ioa_cfg: ioa config struct
1610 * @hostrcb: hostrcb struct
1611 *
1612 * Return value:
1613 * none
1614 **/
1615static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1616 struct ipr_hostrcb *hostrcb)
1617{
1618 int errors_logged, i;
1619 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1620 struct ipr_hostrcb_type_13_error *error;
1621
1622 error = &hostrcb->hcam.u.error.u.type_13_error;
1623 errors_logged = be32_to_cpu(error->errors_logged);
1624
1625 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1626 be32_to_cpu(error->errors_detected), errors_logged);
1627
1628 dev_entry = error->dev;
1629
1630 for (i = 0; i < errors_logged; i++, dev_entry++) {
1631 ipr_err_separator;
1632
1633 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1634 ipr_log_ext_vpd(&dev_entry->vpd);
1635
1636 ipr_err("-----New Device Information-----\n");
1637 ipr_log_ext_vpd(&dev_entry->new_vpd);
1638
1639 ipr_err("Cache Directory Card Information:\n");
1640 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1641
1642 ipr_err("Adapter Card Information:\n");
1643 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1644 }
1645}
1646
1647/**
Wayne Boyer4565e372010-02-19 13:24:07 -08001648 * ipr_log_sis64_config_error - Log a device error.
1649 * @ioa_cfg: ioa config struct
1650 * @hostrcb: hostrcb struct
1651 *
1652 * Return value:
1653 * none
1654 **/
1655static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1656 struct ipr_hostrcb *hostrcb)
1657{
1658 int errors_logged, i;
1659 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1660 struct ipr_hostrcb_type_23_error *error;
1661 char buffer[IPR_MAX_RES_PATH_LENGTH];
1662
1663 error = &hostrcb->hcam.u.error64.u.type_23_error;
1664 errors_logged = be32_to_cpu(error->errors_logged);
1665
1666 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1667 be32_to_cpu(error->errors_detected), errors_logged);
1668
1669 dev_entry = error->dev;
1670
1671 for (i = 0; i < errors_logged; i++, dev_entry++) {
1672 ipr_err_separator;
1673
1674 ipr_err("Device %d : %s", i + 1,
Brian Kingb3b3b402013-01-11 17:43:49 -06001675 __ipr_format_res_path(dev_entry->res_path,
1676 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08001677 ipr_log_ext_vpd(&dev_entry->vpd);
1678
1679 ipr_err("-----New Device Information-----\n");
1680 ipr_log_ext_vpd(&dev_entry->new_vpd);
1681
1682 ipr_err("Cache Directory Card Information:\n");
1683 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1684
1685 ipr_err("Adapter Card Information:\n");
1686 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1687 }
1688}
1689
1690/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691 * ipr_log_config_error - Log a configuration error.
1692 * @ioa_cfg: ioa config struct
1693 * @hostrcb: hostrcb struct
1694 *
1695 * Return value:
1696 * none
1697 **/
1698static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1699 struct ipr_hostrcb *hostrcb)
1700{
1701 int errors_logged, i;
1702 struct ipr_hostrcb_device_data_entry *dev_entry;
1703 struct ipr_hostrcb_type_03_error *error;
1704
1705 error = &hostrcb->hcam.u.error.u.type_03_error;
1706 errors_logged = be32_to_cpu(error->errors_logged);
1707
1708 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1709 be32_to_cpu(error->errors_detected), errors_logged);
1710
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001711 dev_entry = error->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712
1713 for (i = 0; i < errors_logged; i++, dev_entry++) {
1714 ipr_err_separator;
1715
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001716 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001717 ipr_log_vpd(&dev_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718
1719 ipr_err("-----New Device Information-----\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001720 ipr_log_vpd(&dev_entry->new_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721
1722 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001723 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724
1725 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001726 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727
1728 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1729 be32_to_cpu(dev_entry->ioa_data[0]),
1730 be32_to_cpu(dev_entry->ioa_data[1]),
1731 be32_to_cpu(dev_entry->ioa_data[2]),
1732 be32_to_cpu(dev_entry->ioa_data[3]),
1733 be32_to_cpu(dev_entry->ioa_data[4]));
1734 }
1735}
1736
1737/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001738 * ipr_log_enhanced_array_error - Log an array configuration error.
1739 * @ioa_cfg: ioa config struct
1740 * @hostrcb: hostrcb struct
1741 *
1742 * Return value:
1743 * none
1744 **/
1745static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1746 struct ipr_hostrcb *hostrcb)
1747{
1748 int i, num_entries;
1749 struct ipr_hostrcb_type_14_error *error;
1750 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1751 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1752
1753 error = &hostrcb->hcam.u.error.u.type_14_error;
1754
1755 ipr_err_separator;
1756
1757 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1758 error->protection_level,
1759 ioa_cfg->host->host_no,
1760 error->last_func_vset_res_addr.bus,
1761 error->last_func_vset_res_addr.target,
1762 error->last_func_vset_res_addr.lun);
1763
1764 ipr_err_separator;
1765
1766 array_entry = error->array_member;
1767 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
Wayne Boyer72620262010-09-27 10:45:28 -07001768 ARRAY_SIZE(error->array_member));
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001769
1770 for (i = 0; i < num_entries; i++, array_entry++) {
1771 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1772 continue;
1773
1774 if (be32_to_cpu(error->exposed_mode_adn) == i)
1775 ipr_err("Exposed Array Member %d:\n", i);
1776 else
1777 ipr_err("Array Member %d:\n", i);
1778
1779 ipr_log_ext_vpd(&array_entry->vpd);
1780 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1781 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1782 "Expected Location");
1783
1784 ipr_err_separator;
1785 }
1786}
1787
1788/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 * ipr_log_array_error - Log an array configuration error.
1790 * @ioa_cfg: ioa config struct
1791 * @hostrcb: hostrcb struct
1792 *
1793 * Return value:
1794 * none
1795 **/
1796static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1797 struct ipr_hostrcb *hostrcb)
1798{
1799 int i;
1800 struct ipr_hostrcb_type_04_error *error;
1801 struct ipr_hostrcb_array_data_entry *array_entry;
1802 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1803
1804 error = &hostrcb->hcam.u.error.u.type_04_error;
1805
1806 ipr_err_separator;
1807
1808 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1809 error->protection_level,
1810 ioa_cfg->host->host_no,
1811 error->last_func_vset_res_addr.bus,
1812 error->last_func_vset_res_addr.target,
1813 error->last_func_vset_res_addr.lun);
1814
1815 ipr_err_separator;
1816
1817 array_entry = error->array_member;
1818
1819 for (i = 0; i < 18; i++) {
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001820 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821 continue;
1822
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001823 if (be32_to_cpu(error->exposed_mode_adn) == i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824 ipr_err("Exposed Array Member %d:\n", i);
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001825 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826 ipr_err("Array Member %d:\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001828 ipr_log_vpd(&array_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001830 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1831 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1832 "Expected Location");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833
1834 ipr_err_separator;
1835
1836 if (i == 9)
1837 array_entry = error->array_member2;
1838 else
1839 array_entry++;
1840 }
1841}
1842
1843/**
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001844 * ipr_log_hex_data - Log additional hex IOA error data.
Brian Kingac719ab2006-11-21 10:28:42 -06001845 * @ioa_cfg: ioa config struct
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001846 * @data: IOA error data
1847 * @len: data length
1848 *
1849 * Return value:
1850 * none
1851 **/
Brian Kingac719ab2006-11-21 10:28:42 -06001852static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001853{
1854 int i;
1855
1856 if (len == 0)
1857 return;
1858
Brian Kingac719ab2006-11-21 10:28:42 -06001859 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1860 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1861
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001862 for (i = 0; i < len / 4; i += 4) {
1863 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1864 be32_to_cpu(data[i]),
1865 be32_to_cpu(data[i+1]),
1866 be32_to_cpu(data[i+2]),
1867 be32_to_cpu(data[i+3]));
1868 }
1869}
1870
1871/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001872 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1873 * @ioa_cfg: ioa config struct
1874 * @hostrcb: hostrcb struct
1875 *
1876 * Return value:
1877 * none
1878 **/
1879static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1880 struct ipr_hostrcb *hostrcb)
1881{
1882 struct ipr_hostrcb_type_17_error *error;
1883
Wayne Boyer4565e372010-02-19 13:24:07 -08001884 if (ioa_cfg->sis64)
1885 error = &hostrcb->hcam.u.error64.u.type_17_error;
1886 else
1887 error = &hostrcb->hcam.u.error.u.type_17_error;
1888
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001889 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
KOSAKI Motohiroca54cb82009-12-14 18:01:15 -08001890 strim(error->failure_reason);
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001891
Brian King8cf093e2007-04-26 16:00:14 -05001892 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1893 be32_to_cpu(hostrcb->hcam.u.error.prc));
1894 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
Brian Kingac719ab2006-11-21 10:28:42 -06001895 ipr_log_hex_data(ioa_cfg, error->data,
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001896 be32_to_cpu(hostrcb->hcam.length) -
1897 (offsetof(struct ipr_hostrcb_error, u) +
1898 offsetof(struct ipr_hostrcb_type_17_error, data)));
1899}
1900
1901/**
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001902 * ipr_log_dual_ioa_error - Log a dual adapter error.
1903 * @ioa_cfg: ioa config struct
1904 * @hostrcb: hostrcb struct
1905 *
1906 * Return value:
1907 * none
1908 **/
1909static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1910 struct ipr_hostrcb *hostrcb)
1911{
1912 struct ipr_hostrcb_type_07_error *error;
1913
1914 error = &hostrcb->hcam.u.error.u.type_07_error;
1915 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
KOSAKI Motohiroca54cb82009-12-14 18:01:15 -08001916 strim(error->failure_reason);
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001917
Brian King8cf093e2007-04-26 16:00:14 -05001918 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1919 be32_to_cpu(hostrcb->hcam.u.error.prc));
1920 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
Brian Kingac719ab2006-11-21 10:28:42 -06001921 ipr_log_hex_data(ioa_cfg, error->data,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001922 be32_to_cpu(hostrcb->hcam.length) -
1923 (offsetof(struct ipr_hostrcb_error, u) +
1924 offsetof(struct ipr_hostrcb_type_07_error, data)));
1925}
1926
Brian King49dc6a12006-11-21 10:28:35 -06001927static const struct {
1928 u8 active;
1929 char *desc;
1930} path_active_desc[] = {
1931 { IPR_PATH_NO_INFO, "Path" },
1932 { IPR_PATH_ACTIVE, "Active path" },
1933 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1934};
1935
1936static const struct {
1937 u8 state;
1938 char *desc;
1939} path_state_desc[] = {
1940 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1941 { IPR_PATH_HEALTHY, "is healthy" },
1942 { IPR_PATH_DEGRADED, "is degraded" },
1943 { IPR_PATH_FAILED, "is failed" }
1944};
1945
1946/**
1947 * ipr_log_fabric_path - Log a fabric path error
1948 * @hostrcb: hostrcb struct
1949 * @fabric: fabric descriptor
1950 *
1951 * Return value:
1952 * none
1953 **/
1954static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1955 struct ipr_hostrcb_fabric_desc *fabric)
1956{
1957 int i, j;
1958 u8 path_state = fabric->path_state;
1959 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1960 u8 state = path_state & IPR_PATH_STATE_MASK;
1961
1962 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1963 if (path_active_desc[i].active != active)
1964 continue;
1965
1966 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1967 if (path_state_desc[j].state != state)
1968 continue;
1969
1970 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1971 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1972 path_active_desc[i].desc, path_state_desc[j].desc,
1973 fabric->ioa_port);
1974 } else if (fabric->cascaded_expander == 0xff) {
1975 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1976 path_active_desc[i].desc, path_state_desc[j].desc,
1977 fabric->ioa_port, fabric->phy);
1978 } else if (fabric->phy == 0xff) {
1979 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1980 path_active_desc[i].desc, path_state_desc[j].desc,
1981 fabric->ioa_port, fabric->cascaded_expander);
1982 } else {
1983 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1984 path_active_desc[i].desc, path_state_desc[j].desc,
1985 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1986 }
1987 return;
1988 }
1989 }
1990
1991 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1992 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1993}
1994
Wayne Boyer4565e372010-02-19 13:24:07 -08001995/**
1996 * ipr_log64_fabric_path - Log a fabric path error
1997 * @hostrcb: hostrcb struct
1998 * @fabric: fabric descriptor
1999 *
2000 * Return value:
2001 * none
2002 **/
2003static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2004 struct ipr_hostrcb64_fabric_desc *fabric)
2005{
2006 int i, j;
2007 u8 path_state = fabric->path_state;
2008 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2009 u8 state = path_state & IPR_PATH_STATE_MASK;
2010 char buffer[IPR_MAX_RES_PATH_LENGTH];
2011
2012 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2013 if (path_active_desc[i].active != active)
2014 continue;
2015
2016 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2017 if (path_state_desc[j].state != state)
2018 continue;
2019
2020 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2021 path_active_desc[i].desc, path_state_desc[j].desc,
Brian Kingb3b3b402013-01-11 17:43:49 -06002022 ipr_format_res_path(hostrcb->ioa_cfg,
2023 fabric->res_path,
2024 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002025 return;
2026 }
2027 }
2028
2029 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
Brian Kingb3b3b402013-01-11 17:43:49 -06002030 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2031 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002032}
2033
Brian King49dc6a12006-11-21 10:28:35 -06002034static const struct {
2035 u8 type;
2036 char *desc;
2037} path_type_desc[] = {
2038 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2039 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2040 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2041 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2042};
2043
2044static const struct {
2045 u8 status;
2046 char *desc;
2047} path_status_desc[] = {
2048 { IPR_PATH_CFG_NO_PROB, "Functional" },
2049 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2050 { IPR_PATH_CFG_FAILED, "Failed" },
2051 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2052 { IPR_PATH_NOT_DETECTED, "Missing" },
2053 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2054};
2055
2056static const char *link_rate[] = {
2057 "unknown",
2058 "disabled",
2059 "phy reset problem",
2060 "spinup hold",
2061 "port selector",
2062 "unknown",
2063 "unknown",
2064 "unknown",
2065 "1.5Gbps",
2066 "3.0Gbps",
2067 "unknown",
2068 "unknown",
2069 "unknown",
2070 "unknown",
2071 "unknown",
2072 "unknown"
2073};
2074
2075/**
2076 * ipr_log_path_elem - Log a fabric path element.
2077 * @hostrcb: hostrcb struct
2078 * @cfg: fabric path element struct
2079 *
2080 * Return value:
2081 * none
2082 **/
2083static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2084 struct ipr_hostrcb_config_element *cfg)
2085{
2086 int i, j;
2087 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2088 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2089
2090 if (type == IPR_PATH_CFG_NOT_EXIST)
2091 return;
2092
2093 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2094 if (path_type_desc[i].type != type)
2095 continue;
2096
2097 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2098 if (path_status_desc[j].status != status)
2099 continue;
2100
2101 if (type == IPR_PATH_CFG_IOA_PORT) {
2102 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2103 path_status_desc[j].desc, path_type_desc[i].desc,
2104 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2105 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2106 } else {
2107 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2108 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2109 path_status_desc[j].desc, path_type_desc[i].desc,
2110 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2111 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2112 } else if (cfg->cascaded_expander == 0xff) {
2113 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2114 "WWN=%08X%08X\n", path_status_desc[j].desc,
2115 path_type_desc[i].desc, cfg->phy,
2116 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2117 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2118 } else if (cfg->phy == 0xff) {
2119 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2120 "WWN=%08X%08X\n", path_status_desc[j].desc,
2121 path_type_desc[i].desc, cfg->cascaded_expander,
2122 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2123 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2124 } else {
2125 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2126 "WWN=%08X%08X\n", path_status_desc[j].desc,
2127 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2128 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2129 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2130 }
2131 }
2132 return;
2133 }
2134 }
2135
2136 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2137 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2138 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2139 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2140}
2141
2142/**
Wayne Boyer4565e372010-02-19 13:24:07 -08002143 * ipr_log64_path_elem - Log a fabric path element.
2144 * @hostrcb: hostrcb struct
2145 * @cfg: fabric path element struct
2146 *
2147 * Return value:
2148 * none
2149 **/
2150static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2151 struct ipr_hostrcb64_config_element *cfg)
2152{
2153 int i, j;
2154 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2155 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2156 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2157 char buffer[IPR_MAX_RES_PATH_LENGTH];
2158
2159 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2160 return;
2161
2162 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2163 if (path_type_desc[i].type != type)
2164 continue;
2165
2166 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2167 if (path_status_desc[j].status != status)
2168 continue;
2169
2170 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2171 path_status_desc[j].desc, path_type_desc[i].desc,
Brian Kingb3b3b402013-01-11 17:43:49 -06002172 ipr_format_res_path(hostrcb->ioa_cfg,
2173 cfg->res_path, buffer, sizeof(buffer)),
2174 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2175 be32_to_cpu(cfg->wwid[0]),
2176 be32_to_cpu(cfg->wwid[1]));
Wayne Boyer4565e372010-02-19 13:24:07 -08002177 return;
2178 }
2179 }
2180 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2181 "WWN=%08X%08X\n", cfg->type_status,
Brian Kingb3b3b402013-01-11 17:43:49 -06002182 ipr_format_res_path(hostrcb->ioa_cfg,
2183 cfg->res_path, buffer, sizeof(buffer)),
2184 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2185 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
Wayne Boyer4565e372010-02-19 13:24:07 -08002186}
2187
2188/**
Brian King49dc6a12006-11-21 10:28:35 -06002189 * ipr_log_fabric_error - Log a fabric error.
2190 * @ioa_cfg: ioa config struct
2191 * @hostrcb: hostrcb struct
2192 *
2193 * Return value:
2194 * none
2195 **/
2196static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2197 struct ipr_hostrcb *hostrcb)
2198{
2199 struct ipr_hostrcb_type_20_error *error;
2200 struct ipr_hostrcb_fabric_desc *fabric;
2201 struct ipr_hostrcb_config_element *cfg;
2202 int i, add_len;
2203
2204 error = &hostrcb->hcam.u.error.u.type_20_error;
2205 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2206 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2207
2208 add_len = be32_to_cpu(hostrcb->hcam.length) -
2209 (offsetof(struct ipr_hostrcb_error, u) +
2210 offsetof(struct ipr_hostrcb_type_20_error, desc));
2211
2212 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2213 ipr_log_fabric_path(hostrcb, fabric);
2214 for_each_fabric_cfg(fabric, cfg)
2215 ipr_log_path_elem(hostrcb, cfg);
2216
2217 add_len -= be16_to_cpu(fabric->length);
2218 fabric = (struct ipr_hostrcb_fabric_desc *)
2219 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2220 }
2221
Brian Kingac719ab2006-11-21 10:28:42 -06002222 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
Brian King49dc6a12006-11-21 10:28:35 -06002223}
2224
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002225/**
Wayne Boyer4565e372010-02-19 13:24:07 -08002226 * ipr_log_sis64_array_error - Log a sis64 array error.
2227 * @ioa_cfg: ioa config struct
2228 * @hostrcb: hostrcb struct
2229 *
2230 * Return value:
2231 * none
2232 **/
2233static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2234 struct ipr_hostrcb *hostrcb)
2235{
2236 int i, num_entries;
2237 struct ipr_hostrcb_type_24_error *error;
2238 struct ipr_hostrcb64_array_data_entry *array_entry;
2239 char buffer[IPR_MAX_RES_PATH_LENGTH];
2240 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2241
2242 error = &hostrcb->hcam.u.error64.u.type_24_error;
2243
2244 ipr_err_separator;
2245
2246 ipr_err("RAID %s Array Configuration: %s\n",
2247 error->protection_level,
Brian Kingb3b3b402013-01-11 17:43:49 -06002248 ipr_format_res_path(ioa_cfg, error->last_res_path,
2249 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002250
2251 ipr_err_separator;
2252
2253 array_entry = error->array_member;
Wayne Boyer72620262010-09-27 10:45:28 -07002254 num_entries = min_t(u32, error->num_entries,
2255 ARRAY_SIZE(error->array_member));
Wayne Boyer4565e372010-02-19 13:24:07 -08002256
2257 for (i = 0; i < num_entries; i++, array_entry++) {
2258
2259 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2260 continue;
2261
2262 if (error->exposed_mode_adn == i)
2263 ipr_err("Exposed Array Member %d:\n", i);
2264 else
2265 ipr_err("Array Member %d:\n", i);
2266
2267 ipr_err("Array Member %d:\n", i);
2268 ipr_log_ext_vpd(&array_entry->vpd);
Wayne Boyer72620262010-09-27 10:45:28 -07002269 ipr_err("Current Location: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06002270 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2271 buffer, sizeof(buffer)));
Wayne Boyer72620262010-09-27 10:45:28 -07002272 ipr_err("Expected Location: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06002273 ipr_format_res_path(ioa_cfg,
2274 array_entry->expected_res_path,
2275 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002276
2277 ipr_err_separator;
2278 }
2279}
2280
2281/**
2282 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2283 * @ioa_cfg: ioa config struct
2284 * @hostrcb: hostrcb struct
2285 *
2286 * Return value:
2287 * none
2288 **/
2289static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2290 struct ipr_hostrcb *hostrcb)
2291{
2292 struct ipr_hostrcb_type_30_error *error;
2293 struct ipr_hostrcb64_fabric_desc *fabric;
2294 struct ipr_hostrcb64_config_element *cfg;
2295 int i, add_len;
2296
2297 error = &hostrcb->hcam.u.error64.u.type_30_error;
2298
2299 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2300 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2301
2302 add_len = be32_to_cpu(hostrcb->hcam.length) -
2303 (offsetof(struct ipr_hostrcb64_error, u) +
2304 offsetof(struct ipr_hostrcb_type_30_error, desc));
2305
2306 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2307 ipr_log64_fabric_path(hostrcb, fabric);
2308 for_each_fabric_cfg(fabric, cfg)
2309 ipr_log64_path_elem(hostrcb, cfg);
2310
2311 add_len -= be16_to_cpu(fabric->length);
2312 fabric = (struct ipr_hostrcb64_fabric_desc *)
2313 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2314 }
2315
2316 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2317}
2318
2319/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320 * ipr_log_generic_error - Log an adapter error.
2321 * @ioa_cfg: ioa config struct
2322 * @hostrcb: hostrcb struct
2323 *
2324 * Return value:
2325 * none
2326 **/
2327static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2328 struct ipr_hostrcb *hostrcb)
2329{
Brian Kingac719ab2006-11-21 10:28:42 -06002330 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002331 be32_to_cpu(hostrcb->hcam.length));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332}
2333
2334/**
2335 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2336 * @ioasc: IOASC
2337 *
2338 * This function will return the index of into the ipr_error_table
2339 * for the specified IOASC. If the IOASC is not in the table,
2340 * 0 will be returned, which points to the entry used for unknown errors.
2341 *
2342 * Return value:
2343 * index into the ipr_error_table
2344 **/
2345static u32 ipr_get_error(u32 ioasc)
2346{
2347 int i;
2348
2349 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
Brian King35a39692006-09-25 12:39:20 -05002350 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351 return i;
2352
2353 return 0;
2354}
2355
2356/**
2357 * ipr_handle_log_data - Log an adapter error.
2358 * @ioa_cfg: ioa config struct
2359 * @hostrcb: hostrcb struct
2360 *
2361 * This function logs an adapter error to the system.
2362 *
2363 * Return value:
2364 * none
2365 **/
2366static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2367 struct ipr_hostrcb *hostrcb)
2368{
2369 u32 ioasc;
2370 int error_index;
2371
2372 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2373 return;
2374
2375 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2376 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2377
Wayne Boyer4565e372010-02-19 13:24:07 -08002378 if (ioa_cfg->sis64)
2379 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2380 else
2381 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382
Wayne Boyer4565e372010-02-19 13:24:07 -08002383 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2384 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2386 scsi_report_bus_reset(ioa_cfg->host,
Wayne Boyer4565e372010-02-19 13:24:07 -08002387 hostrcb->hcam.u.error.fd_res_addr.bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388 }
2389
2390 error_index = ipr_get_error(ioasc);
2391
2392 if (!ipr_error_table[error_index].log_hcam)
2393 return;
2394
Brian King49dc6a12006-11-21 10:28:35 -06002395 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396
2397 /* Set indication we have logged an error */
2398 ioa_cfg->errors_logged++;
2399
Brian King933916f2007-03-29 12:43:30 -05002400 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401 return;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06002402 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2403 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404
2405 switch (hostrcb->hcam.overlay_id) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406 case IPR_HOST_RCB_OVERLAY_ID_2:
2407 ipr_log_cache_error(ioa_cfg, hostrcb);
2408 break;
2409 case IPR_HOST_RCB_OVERLAY_ID_3:
2410 ipr_log_config_error(ioa_cfg, hostrcb);
2411 break;
2412 case IPR_HOST_RCB_OVERLAY_ID_4:
2413 case IPR_HOST_RCB_OVERLAY_ID_6:
2414 ipr_log_array_error(ioa_cfg, hostrcb);
2415 break;
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002416 case IPR_HOST_RCB_OVERLAY_ID_7:
2417 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2418 break;
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06002419 case IPR_HOST_RCB_OVERLAY_ID_12:
2420 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2421 break;
2422 case IPR_HOST_RCB_OVERLAY_ID_13:
2423 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2424 break;
2425 case IPR_HOST_RCB_OVERLAY_ID_14:
2426 case IPR_HOST_RCB_OVERLAY_ID_16:
2427 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2428 break;
2429 case IPR_HOST_RCB_OVERLAY_ID_17:
2430 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2431 break;
Brian King49dc6a12006-11-21 10:28:35 -06002432 case IPR_HOST_RCB_OVERLAY_ID_20:
2433 ipr_log_fabric_error(ioa_cfg, hostrcb);
2434 break;
Wayne Boyer4565e372010-02-19 13:24:07 -08002435 case IPR_HOST_RCB_OVERLAY_ID_23:
2436 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2437 break;
2438 case IPR_HOST_RCB_OVERLAY_ID_24:
2439 case IPR_HOST_RCB_OVERLAY_ID_26:
2440 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2441 break;
2442 case IPR_HOST_RCB_OVERLAY_ID_30:
2443 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2444 break;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06002445 case IPR_HOST_RCB_OVERLAY_ID_1:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447 default:
brking@us.ibm.coma9cfca92005-11-01 17:00:41 -06002448 ipr_log_generic_error(ioa_cfg, hostrcb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449 break;
2450 }
2451}
2452
2453/**
2454 * ipr_process_error - Op done function for an adapter error log.
2455 * @ipr_cmd: ipr command struct
2456 *
2457 * This function is the op done function for an error log host
2458 * controlled async from the adapter. It will log the error and
2459 * send the HCAM back to the adapter.
2460 *
2461 * Return value:
2462 * none
2463 **/
2464static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2465{
2466 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2467 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -07002468 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Wayne Boyer4565e372010-02-19 13:24:07 -08002469 u32 fd_ioasc;
2470
2471 if (ioa_cfg->sis64)
2472 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2473 else
2474 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002475
2476 list_del(&hostrcb->queue);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06002477 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478
2479 if (!ioasc) {
2480 ipr_handle_log_data(ioa_cfg, hostrcb);
Brian King65f56472007-04-26 16:00:12 -05002481 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2482 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2484 dev_err(&ioa_cfg->pdev->dev,
2485 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2486 }
2487
2488 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2489}
2490
2491/**
2492 * ipr_timeout - An internally generated op has timed out.
2493 * @ipr_cmd: ipr command struct
2494 *
2495 * This function blocks host requests and initiates an
2496 * adapter reset.
2497 *
2498 * Return value:
2499 * none
2500 **/
2501static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2502{
2503 unsigned long lock_flags = 0;
2504 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2505
2506 ENTER;
2507 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2508
2509 ioa_cfg->errors_logged++;
2510 dev_err(&ioa_cfg->pdev->dev,
2511 "Adapter being reset due to command timeout.\n");
2512
2513 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2514 ioa_cfg->sdt_state = GET_DUMP;
2515
2516 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2517 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2518
2519 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2520 LEAVE;
2521}
2522
2523/**
2524 * ipr_oper_timeout - Adapter timed out transitioning to operational
2525 * @ipr_cmd: ipr command struct
2526 *
2527 * This function blocks host requests and initiates an
2528 * adapter reset.
2529 *
2530 * Return value:
2531 * none
2532 **/
2533static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2534{
2535 unsigned long lock_flags = 0;
2536 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2537
2538 ENTER;
2539 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2540
2541 ioa_cfg->errors_logged++;
2542 dev_err(&ioa_cfg->pdev->dev,
2543 "Adapter timed out transitioning to operational.\n");
2544
2545 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2546 ioa_cfg->sdt_state = GET_DUMP;
2547
2548 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2549 if (ipr_fastfail)
2550 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2551 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2552 }
2553
2554 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2555 LEAVE;
2556}
2557
2558/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002559 * ipr_find_ses_entry - Find matching SES in SES table
2560 * @res: resource entry struct of SES
2561 *
2562 * Return value:
2563 * pointer to SES table entry / NULL on failure
2564 **/
2565static const struct ipr_ses_table_entry *
2566ipr_find_ses_entry(struct ipr_resource_entry *res)
2567{
2568 int i, j, matches;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002569 struct ipr_std_inq_vpids *vpids;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2571
2572 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2573 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2574 if (ste->compare_product_id_byte[j] == 'X') {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002575 vpids = &res->std_inq_data.vpids;
2576 if (vpids->product_id[j] == ste->product_id[j])
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577 matches++;
2578 else
2579 break;
2580 } else
2581 matches++;
2582 }
2583
2584 if (matches == IPR_PROD_ID_LEN)
2585 return ste;
2586 }
2587
2588 return NULL;
2589}
2590
2591/**
2592 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2593 * @ioa_cfg: ioa config struct
2594 * @bus: SCSI bus
2595 * @bus_width: bus width
2596 *
2597 * Return value:
2598 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2599 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2600 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2601 * max 160MHz = max 320MB/sec).
2602 **/
2603static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2604{
2605 struct ipr_resource_entry *res;
2606 const struct ipr_ses_table_entry *ste;
2607 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2608
2609 /* Loop through each config table entry in the config table buffer */
2610 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002611 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002612 continue;
2613
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002614 if (bus != res->bus)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002615 continue;
2616
2617 if (!(ste = ipr_find_ses_entry(res)))
2618 continue;
2619
2620 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2621 }
2622
2623 return max_xfer_rate;
2624}
2625
2626/**
2627 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2628 * @ioa_cfg: ioa config struct
2629 * @max_delay: max delay in micro-seconds to wait
2630 *
2631 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2632 *
2633 * Return value:
2634 * 0 on success / other on failure
2635 **/
2636static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2637{
2638 volatile u32 pcii_reg;
2639 int delay = 1;
2640
2641 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2642 while (delay < max_delay) {
2643 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2644
2645 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2646 return 0;
2647
2648 /* udelay cannot be used if delay is more than a few milliseconds */
2649 if ((delay / 1000) > MAX_UDELAY_MS)
2650 mdelay(delay / 1000);
2651 else
2652 udelay(delay);
2653
2654 delay += delay;
2655 }
2656 return -EIO;
2657}
2658
2659/**
Wayne Boyerdcbad002010-02-19 13:24:14 -08002660 * ipr_get_sis64_dump_data_section - Dump IOA memory
2661 * @ioa_cfg: ioa config struct
2662 * @start_addr: adapter address to dump
2663 * @dest: destination kernel buffer
2664 * @length_in_words: length to dump in 4 byte words
2665 *
2666 * Return value:
2667 * 0 on success
2668 **/
2669static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2670 u32 start_addr,
2671 __be32 *dest, u32 length_in_words)
2672{
2673 int i;
2674
2675 for (i = 0; i < length_in_words; i++) {
2676 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2677 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2678 dest++;
2679 }
2680
2681 return 0;
2682}
2683
2684/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685 * ipr_get_ldump_data_section - Dump IOA memory
2686 * @ioa_cfg: ioa config struct
2687 * @start_addr: adapter address to dump
2688 * @dest: destination kernel buffer
2689 * @length_in_words: length to dump in 4 byte words
2690 *
2691 * Return value:
2692 * 0 on success / -EIO on failure
2693 **/
2694static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2695 u32 start_addr,
2696 __be32 *dest, u32 length_in_words)
2697{
2698 volatile u32 temp_pcii_reg;
2699 int i, delay = 0;
2700
Wayne Boyerdcbad002010-02-19 13:24:14 -08002701 if (ioa_cfg->sis64)
2702 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2703 dest, length_in_words);
2704
Linus Torvalds1da177e2005-04-16 15:20:36 -07002705 /* Write IOA interrupt reg starting LDUMP state */
2706 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
Wayne Boyer214777b2010-02-19 13:24:26 -08002707 ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002708
2709 /* Wait for IO debug acknowledge */
2710 if (ipr_wait_iodbg_ack(ioa_cfg,
2711 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2712 dev_err(&ioa_cfg->pdev->dev,
2713 "IOA dump long data transfer timeout\n");
2714 return -EIO;
2715 }
2716
2717 /* Signal LDUMP interlocked - clear IO debug ack */
2718 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2719 ioa_cfg->regs.clr_interrupt_reg);
2720
2721 /* Write Mailbox with starting address */
2722 writel(start_addr, ioa_cfg->ioa_mailbox);
2723
2724 /* Signal address valid - clear IOA Reset alert */
2725 writel(IPR_UPROCI_RESET_ALERT,
Wayne Boyer214777b2010-02-19 13:24:26 -08002726 ioa_cfg->regs.clr_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002727
2728 for (i = 0; i < length_in_words; i++) {
2729 /* Wait for IO debug acknowledge */
2730 if (ipr_wait_iodbg_ack(ioa_cfg,
2731 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2732 dev_err(&ioa_cfg->pdev->dev,
2733 "IOA dump short data transfer timeout\n");
2734 return -EIO;
2735 }
2736
2737 /* Read data from mailbox and increment destination pointer */
2738 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2739 dest++;
2740
2741 /* For all but the last word of data, signal data received */
2742 if (i < (length_in_words - 1)) {
2743 /* Signal dump data received - Clear IO debug Ack */
2744 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2745 ioa_cfg->regs.clr_interrupt_reg);
2746 }
2747 }
2748
2749 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2750 writel(IPR_UPROCI_RESET_ALERT,
Wayne Boyer214777b2010-02-19 13:24:26 -08002751 ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752
2753 writel(IPR_UPROCI_IO_DEBUG_ALERT,
Wayne Boyer214777b2010-02-19 13:24:26 -08002754 ioa_cfg->regs.clr_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755
2756 /* Signal dump data received - Clear IO debug Ack */
2757 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2758 ioa_cfg->regs.clr_interrupt_reg);
2759
2760 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2761 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2762 temp_pcii_reg =
Wayne Boyer214777b2010-02-19 13:24:26 -08002763 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764
2765 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2766 return 0;
2767
2768 udelay(10);
2769 delay += 10;
2770 }
2771
2772 return 0;
2773}
2774
2775#ifdef CONFIG_SCSI_IPR_DUMP
2776/**
2777 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2778 * @ioa_cfg: ioa config struct
2779 * @pci_address: adapter address
2780 * @length: length of data to copy
2781 *
2782 * Copy data from PCI adapter to kernel buffer.
2783 * Note: length MUST be a 4 byte multiple
2784 * Return value:
2785 * 0 on success / other on failure
2786 **/
2787static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2788 unsigned long pci_address, u32 length)
2789{
2790 int bytes_copied = 0;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03002791 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792 __be32 *page;
2793 unsigned long lock_flags = 0;
2794 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2795
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03002796 if (ioa_cfg->sis64)
2797 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2798 else
2799 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2800
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801 while (bytes_copied < length &&
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03002802 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002803 if (ioa_dump->page_offset >= PAGE_SIZE ||
2804 ioa_dump->page_offset == 0) {
2805 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2806
2807 if (!page) {
2808 ipr_trace;
2809 return bytes_copied;
2810 }
2811
2812 ioa_dump->page_offset = 0;
2813 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2814 ioa_dump->next_page_index++;
2815 } else
2816 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2817
2818 rem_len = length - bytes_copied;
2819 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2820 cur_len = min(rem_len, rem_page_len);
2821
2822 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2823 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2824 rc = -EIO;
2825 } else {
2826 rc = ipr_get_ldump_data_section(ioa_cfg,
2827 pci_address + bytes_copied,
2828 &page[ioa_dump->page_offset / 4],
2829 (cur_len / sizeof(u32)));
2830 }
2831 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2832
2833 if (!rc) {
2834 ioa_dump->page_offset += cur_len;
2835 bytes_copied += cur_len;
2836 } else {
2837 ipr_trace;
2838 break;
2839 }
2840 schedule();
2841 }
2842
2843 return bytes_copied;
2844}
2845
2846/**
2847 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2848 * @hdr: dump entry header struct
2849 *
2850 * Return value:
2851 * nothing
2852 **/
2853static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2854{
2855 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2856 hdr->num_elems = 1;
2857 hdr->offset = sizeof(*hdr);
2858 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2859}
2860
2861/**
2862 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2863 * @ioa_cfg: ioa config struct
2864 * @driver_dump: driver dump struct
2865 *
2866 * Return value:
2867 * nothing
2868 **/
2869static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2870 struct ipr_driver_dump *driver_dump)
2871{
2872 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2873
2874 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2875 driver_dump->ioa_type_entry.hdr.len =
2876 sizeof(struct ipr_dump_ioa_type_entry) -
2877 sizeof(struct ipr_dump_entry_header);
2878 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2879 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2880 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2881 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2882 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2883 ucode_vpd->minor_release[1];
2884 driver_dump->hdr.num_entries++;
2885}
2886
2887/**
2888 * ipr_dump_version_data - Fill in the driver version in the dump.
2889 * @ioa_cfg: ioa config struct
2890 * @driver_dump: driver dump struct
2891 *
2892 * Return value:
2893 * nothing
2894 **/
2895static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2896 struct ipr_driver_dump *driver_dump)
2897{
2898 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2899 driver_dump->version_entry.hdr.len =
2900 sizeof(struct ipr_dump_version_entry) -
2901 sizeof(struct ipr_dump_entry_header);
2902 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2903 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2904 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2905 driver_dump->hdr.num_entries++;
2906}
2907
2908/**
2909 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2910 * @ioa_cfg: ioa config struct
2911 * @driver_dump: driver dump struct
2912 *
2913 * Return value:
2914 * nothing
2915 **/
2916static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2917 struct ipr_driver_dump *driver_dump)
2918{
2919 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2920 driver_dump->trace_entry.hdr.len =
2921 sizeof(struct ipr_dump_trace_entry) -
2922 sizeof(struct ipr_dump_entry_header);
2923 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2924 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2925 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2926 driver_dump->hdr.num_entries++;
2927}
2928
2929/**
2930 * ipr_dump_location_data - Fill in the IOA location in the dump.
2931 * @ioa_cfg: ioa config struct
2932 * @driver_dump: driver dump struct
2933 *
2934 * Return value:
2935 * nothing
2936 **/
2937static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2938 struct ipr_driver_dump *driver_dump)
2939{
2940 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2941 driver_dump->location_entry.hdr.len =
2942 sizeof(struct ipr_dump_location_entry) -
2943 sizeof(struct ipr_dump_entry_header);
2944 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2945 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
Kay Sievers71610f52008-12-03 22:41:36 +01002946 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002947 driver_dump->hdr.num_entries++;
2948}
2949
2950/**
2951 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2952 * @ioa_cfg: ioa config struct
2953 * @dump: dump struct
2954 *
2955 * Return value:
2956 * nothing
2957 **/
2958static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2959{
2960 unsigned long start_addr, sdt_word;
2961 unsigned long lock_flags = 0;
2962 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2963 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03002964 u32 num_entries, max_num_entries, start_off, end_off;
2965 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002966 struct ipr_sdt *sdt;
Wayne Boyerdcbad002010-02-19 13:24:14 -08002967 int valid = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002968 int i;
2969
2970 ENTER;
2971
2972 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2973
Brian King41e9a692011-09-21 08:51:11 -05002974 if (ioa_cfg->sdt_state != READ_DUMP) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002975 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2976 return;
2977 }
2978
Wayne Boyer110def82010-11-04 09:36:16 -07002979 if (ioa_cfg->sis64) {
2980 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2981 ssleep(IPR_DUMP_DELAY_SECONDS);
2982 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2983 }
2984
Linus Torvalds1da177e2005-04-16 15:20:36 -07002985 start_addr = readl(ioa_cfg->ioa_mailbox);
2986
Wayne Boyerdcbad002010-02-19 13:24:14 -08002987 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002988 dev_err(&ioa_cfg->pdev->dev,
2989 "Invalid dump table format: %lx\n", start_addr);
2990 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2991 return;
2992 }
2993
2994 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2995
2996 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2997
2998 /* Initialize the overall dump header */
2999 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3000 driver_dump->hdr.num_entries = 1;
3001 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3002 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3003 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3004 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3005
3006 ipr_dump_version_data(ioa_cfg, driver_dump);
3007 ipr_dump_location_data(ioa_cfg, driver_dump);
3008 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3009 ipr_dump_trace_data(ioa_cfg, driver_dump);
3010
3011 /* Update dump_header */
3012 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3013
3014 /* IOA Dump entry */
3015 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003016 ioa_dump->hdr.len = 0;
3017 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3018 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3019
3020 /* First entries in sdt are actually a list of dump addresses and
3021 lengths to gather the real dump data. sdt represents the pointer
3022 to the ioa generated dump table. Dump data will be extracted based
3023 on entries in this table */
3024 sdt = &ioa_dump->sdt;
3025
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003026 if (ioa_cfg->sis64) {
3027 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3028 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3029 } else {
3030 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3031 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3032 }
3033
3034 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3035 (max_num_entries * sizeof(struct ipr_sdt_entry));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003036 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003037 bytes_to_copy / sizeof(__be32));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003038
3039 /* Smart Dump table is ready to use and the first entry is valid */
Wayne Boyerdcbad002010-02-19 13:24:14 -08003040 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3041 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003042 dev_err(&ioa_cfg->pdev->dev,
3043 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3044 rc, be32_to_cpu(sdt->hdr.state));
3045 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3046 ioa_cfg->sdt_state = DUMP_OBTAINED;
3047 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3048 return;
3049 }
3050
3051 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3052
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003053 if (num_entries > max_num_entries)
3054 num_entries = max_num_entries;
3055
3056 /* Update dump length to the actual data to be copied */
3057 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3058 if (ioa_cfg->sis64)
3059 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3060 else
3061 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003062
3063 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3064
3065 for (i = 0; i < num_entries; i++) {
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003066 if (ioa_dump->hdr.len > max_dump_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003067 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3068 break;
3069 }
3070
3071 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
Wayne Boyerdcbad002010-02-19 13:24:14 -08003072 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3073 if (ioa_cfg->sis64)
3074 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3075 else {
3076 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3077 end_off = be32_to_cpu(sdt->entry[i].end_token);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003078
Wayne Boyerdcbad002010-02-19 13:24:14 -08003079 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3080 bytes_to_copy = end_off - start_off;
3081 else
3082 valid = 0;
3083 }
3084 if (valid) {
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003085 if (bytes_to_copy > max_dump_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003086 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3087 continue;
3088 }
3089
3090 /* Copy data from adapter to driver buffers */
3091 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3092 bytes_to_copy);
3093
3094 ioa_dump->hdr.len += bytes_copied;
3095
3096 if (bytes_copied != bytes_to_copy) {
3097 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3098 break;
3099 }
3100 }
3101 }
3102 }
3103
3104 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3105
3106 /* Update dump_header */
3107 driver_dump->hdr.len += ioa_dump->hdr.len;
3108 wmb();
3109 ioa_cfg->sdt_state = DUMP_OBTAINED;
3110 LEAVE;
3111}
3112
3113#else
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003114#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003115#endif
3116
3117/**
3118 * ipr_release_dump - Free adapter dump memory
3119 * @kref: kref struct
3120 *
3121 * Return value:
3122 * nothing
3123 **/
3124static void ipr_release_dump(struct kref *kref)
3125{
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003126 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003127 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3128 unsigned long lock_flags = 0;
3129 int i;
3130
3131 ENTER;
3132 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3133 ioa_cfg->dump = NULL;
3134 ioa_cfg->sdt_state = INACTIVE;
3135 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3136
3137 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3138 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3139
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003140 vfree(dump->ioa_dump.ioa_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003141 kfree(dump);
3142 LEAVE;
3143}
3144
3145/**
3146 * ipr_worker_thread - Worker thread
David Howellsc4028952006-11-22 14:57:56 +00003147 * @work: ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07003148 *
3149 * Called at task level from a work thread. This function takes care
3150 * of adding and removing device from the mid-layer as configuration
3151 * changes are detected by the adapter.
3152 *
3153 * Return value:
3154 * nothing
3155 **/
David Howellsc4028952006-11-22 14:57:56 +00003156static void ipr_worker_thread(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003157{
3158 unsigned long lock_flags;
3159 struct ipr_resource_entry *res;
3160 struct scsi_device *sdev;
3161 struct ipr_dump *dump;
David Howellsc4028952006-11-22 14:57:56 +00003162 struct ipr_ioa_cfg *ioa_cfg =
3163 container_of(work, struct ipr_ioa_cfg, work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003164 u8 bus, target, lun;
3165 int did_work;
3166
3167 ENTER;
3168 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3169
Brian King41e9a692011-09-21 08:51:11 -05003170 if (ioa_cfg->sdt_state == READ_DUMP) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003171 dump = ioa_cfg->dump;
3172 if (!dump) {
3173 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3174 return;
3175 }
3176 kref_get(&dump->kref);
3177 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3178 ipr_get_ioa_dump(ioa_cfg, dump);
3179 kref_put(&dump->kref, ipr_release_dump);
3180
3181 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Brian King4c647e92011-10-15 09:08:56 -05003182 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003183 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3184 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3185 return;
3186 }
3187
3188restart:
3189 do {
3190 did_work = 0;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06003191 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
3192 !ioa_cfg->allow_ml_add_del) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003193 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3194 return;
3195 }
3196
3197 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3198 if (res->del_from_ml && res->sdev) {
3199 did_work = 1;
3200 sdev = res->sdev;
3201 if (!scsi_device_get(sdev)) {
Kleber Sacilotto de Souza5767a1c2011-02-14 20:19:31 -02003202 if (!res->add_to_ml)
3203 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3204 else
3205 res->del_from_ml = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003206 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3207 scsi_remove_device(sdev);
3208 scsi_device_put(sdev);
3209 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3210 }
3211 break;
3212 }
3213 }
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003214 } while (did_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003215
3216 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3217 if (res->add_to_ml) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08003218 bus = res->bus;
3219 target = res->target;
3220 lun = res->lun;
Brian King1121b792006-03-29 09:37:16 -06003221 res->add_to_ml = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003222 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3223 scsi_add_device(ioa_cfg->host, bus, target, lun);
3224 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3225 goto restart;
3226 }
3227 }
3228
3229 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Tony Jonesee959b02008-02-22 00:13:36 +01003230 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003231 LEAVE;
3232}
3233
3234#ifdef CONFIG_SCSI_IPR_TRACE
3235/**
3236 * ipr_read_trace - Dump the adapter trace
Chris Wright2c3c8be2010-05-12 18:28:57 -07003237 * @filp: open sysfs file
Linus Torvalds1da177e2005-04-16 15:20:36 -07003238 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08003239 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07003240 * @buf: buffer
3241 * @off: offset
3242 * @count: buffer size
3243 *
3244 * Return value:
3245 * number of bytes printed to buffer
3246 **/
Chris Wright2c3c8be2010-05-12 18:28:57 -07003247static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
Zhang Rui91a69022007-06-09 13:57:22 +08003248 struct bin_attribute *bin_attr,
3249 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003250{
Tony Jonesee959b02008-02-22 00:13:36 +01003251 struct device *dev = container_of(kobj, struct device, kobj);
3252 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003253 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3254 unsigned long lock_flags = 0;
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003255 ssize_t ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003256
3257 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003258 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3259 IPR_TRACE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003260 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003261
3262 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003263}
3264
3265static struct bin_attribute ipr_trace_attr = {
3266 .attr = {
3267 .name = "trace",
3268 .mode = S_IRUGO,
3269 },
3270 .size = 0,
3271 .read = ipr_read_trace,
3272};
3273#endif
3274
3275/**
3276 * ipr_show_fw_version - Show the firmware version
Tony Jonesee959b02008-02-22 00:13:36 +01003277 * @dev: class device struct
3278 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003279 *
3280 * Return value:
3281 * number of bytes printed to buffer
3282 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003283static ssize_t ipr_show_fw_version(struct device *dev,
3284 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003285{
Tony Jonesee959b02008-02-22 00:13:36 +01003286 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003287 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3288 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3289 unsigned long lock_flags = 0;
3290 int len;
3291
3292 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3293 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3294 ucode_vpd->major_release, ucode_vpd->card_type,
3295 ucode_vpd->minor_release[0],
3296 ucode_vpd->minor_release[1]);
3297 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3298 return len;
3299}
3300
Tony Jonesee959b02008-02-22 00:13:36 +01003301static struct device_attribute ipr_fw_version_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003302 .attr = {
3303 .name = "fw_version",
3304 .mode = S_IRUGO,
3305 },
3306 .show = ipr_show_fw_version,
3307};
3308
3309/**
3310 * ipr_show_log_level - Show the adapter's error logging level
Tony Jonesee959b02008-02-22 00:13:36 +01003311 * @dev: class device struct
3312 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003313 *
3314 * Return value:
3315 * number of bytes printed to buffer
3316 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003317static ssize_t ipr_show_log_level(struct device *dev,
3318 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003319{
Tony Jonesee959b02008-02-22 00:13:36 +01003320 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003321 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3322 unsigned long lock_flags = 0;
3323 int len;
3324
3325 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3326 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3327 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3328 return len;
3329}
3330
3331/**
3332 * ipr_store_log_level - Change the adapter's error logging level
Tony Jonesee959b02008-02-22 00:13:36 +01003333 * @dev: class device struct
3334 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003335 *
3336 * Return value:
3337 * number of bytes printed to buffer
3338 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003339static ssize_t ipr_store_log_level(struct device *dev,
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003340 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003341 const char *buf, size_t count)
3342{
Tony Jonesee959b02008-02-22 00:13:36 +01003343 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003344 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3345 unsigned long lock_flags = 0;
3346
3347 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3348 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3349 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3350 return strlen(buf);
3351}
3352
Tony Jonesee959b02008-02-22 00:13:36 +01003353static struct device_attribute ipr_log_level_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354 .attr = {
3355 .name = "log_level",
3356 .mode = S_IRUGO | S_IWUSR,
3357 },
3358 .show = ipr_show_log_level,
3359 .store = ipr_store_log_level
3360};
3361
3362/**
3363 * ipr_store_diagnostics - IOA Diagnostics interface
Tony Jonesee959b02008-02-22 00:13:36 +01003364 * @dev: device struct
3365 * @buf: buffer
3366 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003367 *
3368 * This function will reset the adapter and wait a reasonable
3369 * amount of time for any errors that the adapter might log.
3370 *
3371 * Return value:
3372 * count on success / other on failure
3373 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003374static ssize_t ipr_store_diagnostics(struct device *dev,
3375 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003376 const char *buf, size_t count)
3377{
Tony Jonesee959b02008-02-22 00:13:36 +01003378 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003379 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3380 unsigned long lock_flags = 0;
3381 int rc = count;
3382
3383 if (!capable(CAP_SYS_ADMIN))
3384 return -EACCES;
3385
Linus Torvalds1da177e2005-04-16 15:20:36 -07003386 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003387 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -05003388 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3389 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3390 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3391 }
3392
Linus Torvalds1da177e2005-04-16 15:20:36 -07003393 ioa_cfg->errors_logged = 0;
3394 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3395
3396 if (ioa_cfg->in_reset_reload) {
3397 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3398 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3399
3400 /* Wait for a second for any errors to be logged */
3401 msleep(1000);
3402 } else {
3403 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3404 return -EIO;
3405 }
3406
3407 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3408 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3409 rc = -EIO;
3410 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3411
3412 return rc;
3413}
3414
Tony Jonesee959b02008-02-22 00:13:36 +01003415static struct device_attribute ipr_diagnostics_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003416 .attr = {
3417 .name = "run_diagnostics",
3418 .mode = S_IWUSR,
3419 },
3420 .store = ipr_store_diagnostics
3421};
3422
3423/**
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003424 * ipr_show_adapter_state - Show the adapter's state
Tony Jonesee959b02008-02-22 00:13:36 +01003425 * @class_dev: device struct
3426 * @buf: buffer
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003427 *
3428 * Return value:
3429 * number of bytes printed to buffer
3430 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003431static ssize_t ipr_show_adapter_state(struct device *dev,
3432 struct device_attribute *attr, char *buf)
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003433{
Tony Jonesee959b02008-02-22 00:13:36 +01003434 struct Scsi_Host *shost = class_to_shost(dev);
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003435 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3436 unsigned long lock_flags = 0;
3437 int len;
3438
3439 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06003440 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003441 len = snprintf(buf, PAGE_SIZE, "offline\n");
3442 else
3443 len = snprintf(buf, PAGE_SIZE, "online\n");
3444 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3445 return len;
3446}
3447
3448/**
3449 * ipr_store_adapter_state - Change adapter state
Tony Jonesee959b02008-02-22 00:13:36 +01003450 * @dev: device struct
3451 * @buf: buffer
3452 * @count: buffer size
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003453 *
3454 * This function will change the adapter's state.
3455 *
3456 * Return value:
3457 * count on success / other on failure
3458 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003459static ssize_t ipr_store_adapter_state(struct device *dev,
3460 struct device_attribute *attr,
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003461 const char *buf, size_t count)
3462{
Tony Jonesee959b02008-02-22 00:13:36 +01003463 struct Scsi_Host *shost = class_to_shost(dev);
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003464 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3465 unsigned long lock_flags;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06003466 int result = count, i;
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003467
3468 if (!capable(CAP_SYS_ADMIN))
3469 return -EACCES;
3470
3471 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06003472 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3473 !strncmp(buf, "online", 6)) {
3474 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3475 spin_lock(&ioa_cfg->hrrq[i]._lock);
3476 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3477 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3478 }
3479 wmb();
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003480 ioa_cfg->reset_retries = 0;
3481 ioa_cfg->in_ioa_bringdown = 0;
3482 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3483 }
3484 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3485 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3486
3487 return result;
3488}
3489
Tony Jonesee959b02008-02-22 00:13:36 +01003490static struct device_attribute ipr_ioa_state_attr = {
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003491 .attr = {
Brian King49dd0962008-04-28 17:36:20 -05003492 .name = "online_state",
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003493 .mode = S_IRUGO | S_IWUSR,
3494 },
3495 .show = ipr_show_adapter_state,
3496 .store = ipr_store_adapter_state
3497};
3498
3499/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003500 * ipr_store_reset_adapter - Reset the adapter
Tony Jonesee959b02008-02-22 00:13:36 +01003501 * @dev: device struct
3502 * @buf: buffer
3503 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003504 *
3505 * This function will reset the adapter.
3506 *
3507 * Return value:
3508 * count on success / other on failure
3509 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003510static ssize_t ipr_store_reset_adapter(struct device *dev,
3511 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003512 const char *buf, size_t count)
3513{
Tony Jonesee959b02008-02-22 00:13:36 +01003514 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003515 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3516 unsigned long lock_flags;
3517 int result = count;
3518
3519 if (!capable(CAP_SYS_ADMIN))
3520 return -EACCES;
3521
3522 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3523 if (!ioa_cfg->in_reset_reload)
3524 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3525 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3526 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3527
3528 return result;
3529}
3530
Tony Jonesee959b02008-02-22 00:13:36 +01003531static struct device_attribute ipr_ioa_reset_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003532 .attr = {
3533 .name = "reset_host",
3534 .mode = S_IWUSR,
3535 },
3536 .store = ipr_store_reset_adapter
3537};
3538
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003539static int ipr_iopoll(struct blk_iopoll *iop, int budget);
3540 /**
3541 * ipr_show_iopoll_weight - Show ipr polling mode
3542 * @dev: class device struct
3543 * @buf: buffer
3544 *
3545 * Return value:
3546 * number of bytes printed to buffer
3547 **/
3548static ssize_t ipr_show_iopoll_weight(struct device *dev,
3549 struct device_attribute *attr, char *buf)
3550{
3551 struct Scsi_Host *shost = class_to_shost(dev);
3552 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3553 unsigned long lock_flags = 0;
3554 int len;
3555
3556 spin_lock_irqsave(shost->host_lock, lock_flags);
3557 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3558 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3559
3560 return len;
3561}
3562
3563/**
3564 * ipr_store_iopoll_weight - Change the adapter's polling mode
3565 * @dev: class device struct
3566 * @buf: buffer
3567 *
3568 * Return value:
3569 * number of bytes printed to buffer
3570 **/
3571static ssize_t ipr_store_iopoll_weight(struct device *dev,
3572 struct device_attribute *attr,
3573 const char *buf, size_t count)
3574{
3575 struct Scsi_Host *shost = class_to_shost(dev);
3576 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3577 unsigned long user_iopoll_weight;
3578 unsigned long lock_flags = 0;
3579 int i;
3580
3581 if (!ioa_cfg->sis64) {
3582 dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
3583 return -EINVAL;
3584 }
3585 if (kstrtoul(buf, 10, &user_iopoll_weight))
3586 return -EINVAL;
3587
3588 if (user_iopoll_weight > 256) {
3589 dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
3590 return -EINVAL;
3591 }
3592
3593 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3594 dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
3595 return strlen(buf);
3596 }
3597
3598 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
3599 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3600 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3601 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
3602 }
3603
3604 spin_lock_irqsave(shost->host_lock, lock_flags);
3605 ioa_cfg->iopoll_weight = user_iopoll_weight;
3606 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
3607 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3608 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3609 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
3610 ioa_cfg->iopoll_weight, ipr_iopoll);
3611 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
3612 }
3613 }
3614 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3615
3616 return strlen(buf);
3617}
3618
3619static struct device_attribute ipr_iopoll_weight_attr = {
3620 .attr = {
3621 .name = "iopoll_weight",
3622 .mode = S_IRUGO | S_IWUSR,
3623 },
3624 .show = ipr_show_iopoll_weight,
3625 .store = ipr_store_iopoll_weight
3626};
3627
Linus Torvalds1da177e2005-04-16 15:20:36 -07003628/**
3629 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3630 * @buf_len: buffer length
3631 *
3632 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3633 * list to use for microcode download
3634 *
3635 * Return value:
3636 * pointer to sglist / NULL on failure
3637 **/
3638static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3639{
3640 int sg_size, order, bsize_elem, num_elem, i, j;
3641 struct ipr_sglist *sglist;
3642 struct scatterlist *scatterlist;
3643 struct page *page;
3644
3645 /* Get the minimum size per scatter/gather element */
3646 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3647
3648 /* Get the actual size per element */
3649 order = get_order(sg_size);
3650
3651 /* Determine the actual number of bytes per element */
3652 bsize_elem = PAGE_SIZE * (1 << order);
3653
3654 /* Determine the actual number of sg entries needed */
3655 if (buf_len % bsize_elem)
3656 num_elem = (buf_len / bsize_elem) + 1;
3657 else
3658 num_elem = buf_len / bsize_elem;
3659
3660 /* Allocate a scatter/gather list for the DMA */
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06003661 sglist = kzalloc(sizeof(struct ipr_sglist) +
Linus Torvalds1da177e2005-04-16 15:20:36 -07003662 (sizeof(struct scatterlist) * (num_elem - 1)),
3663 GFP_KERNEL);
3664
3665 if (sglist == NULL) {
3666 ipr_trace;
3667 return NULL;
3668 }
3669
Linus Torvalds1da177e2005-04-16 15:20:36 -07003670 scatterlist = sglist->scatterlist;
Jens Axboe45711f12007-10-22 21:19:53 +02003671 sg_init_table(scatterlist, num_elem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003672
3673 sglist->order = order;
3674 sglist->num_sg = num_elem;
3675
3676 /* Allocate a bunch of sg elements */
3677 for (i = 0; i < num_elem; i++) {
3678 page = alloc_pages(GFP_KERNEL, order);
3679 if (!page) {
3680 ipr_trace;
3681
3682 /* Free up what we already allocated */
3683 for (j = i - 1; j >= 0; j--)
Jens Axboe45711f12007-10-22 21:19:53 +02003684 __free_pages(sg_page(&scatterlist[j]), order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003685 kfree(sglist);
3686 return NULL;
3687 }
3688
Jens Axboe642f1492007-10-24 11:20:47 +02003689 sg_set_page(&scatterlist[i], page, 0, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003690 }
3691
3692 return sglist;
3693}
3694
3695/**
3696 * ipr_free_ucode_buffer - Frees a microcode download buffer
3697 * @p_dnld: scatter/gather list pointer
3698 *
3699 * Free a DMA'able ucode download buffer previously allocated with
3700 * ipr_alloc_ucode_buffer
3701 *
3702 * Return value:
3703 * nothing
3704 **/
3705static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3706{
3707 int i;
3708
3709 for (i = 0; i < sglist->num_sg; i++)
Jens Axboe45711f12007-10-22 21:19:53 +02003710 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003711
3712 kfree(sglist);
3713}
3714
3715/**
3716 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3717 * @sglist: scatter/gather list pointer
3718 * @buffer: buffer pointer
3719 * @len: buffer length
3720 *
3721 * Copy a microcode image from a user buffer into a buffer allocated by
3722 * ipr_alloc_ucode_buffer
3723 *
3724 * Return value:
3725 * 0 on success / other on failure
3726 **/
3727static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3728 u8 *buffer, u32 len)
3729{
3730 int bsize_elem, i, result = 0;
3731 struct scatterlist *scatterlist;
3732 void *kaddr;
3733
3734 /* Determine the actual number of bytes per element */
3735 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3736
3737 scatterlist = sglist->scatterlist;
3738
3739 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
Jens Axboe45711f12007-10-22 21:19:53 +02003740 struct page *page = sg_page(&scatterlist[i]);
3741
3742 kaddr = kmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003743 memcpy(kaddr, buffer, bsize_elem);
Jens Axboe45711f12007-10-22 21:19:53 +02003744 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003745
3746 scatterlist[i].length = bsize_elem;
3747
3748 if (result != 0) {
3749 ipr_trace;
3750 return result;
3751 }
3752 }
3753
3754 if (len % bsize_elem) {
Jens Axboe45711f12007-10-22 21:19:53 +02003755 struct page *page = sg_page(&scatterlist[i]);
3756
3757 kaddr = kmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003758 memcpy(kaddr, buffer, len % bsize_elem);
Jens Axboe45711f12007-10-22 21:19:53 +02003759 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003760
3761 scatterlist[i].length = len % bsize_elem;
3762 }
3763
3764 sglist->buffer_len = len;
3765 return result;
3766}
3767
3768/**
Wayne Boyera32c0552010-02-19 13:23:36 -08003769 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3770 * @ipr_cmd: ipr command struct
3771 * @sglist: scatter/gather list
3772 *
3773 * Builds a microcode download IOA data list (IOADL).
3774 *
3775 **/
3776static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3777 struct ipr_sglist *sglist)
3778{
3779 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3780 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3781 struct scatterlist *scatterlist = sglist->scatterlist;
3782 int i;
3783
3784 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3785 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3786 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3787
3788 ioarcb->ioadl_len =
3789 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3790 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3791 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3792 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3793 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3794 }
3795
3796 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3797}
3798
3799/**
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003800 * ipr_build_ucode_ioadl - Build a microcode download IOADL
Linus Torvalds1da177e2005-04-16 15:20:36 -07003801 * @ipr_cmd: ipr command struct
3802 * @sglist: scatter/gather list
Linus Torvalds1da177e2005-04-16 15:20:36 -07003803 *
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003804 * Builds a microcode download IOA data list (IOADL).
Linus Torvalds1da177e2005-04-16 15:20:36 -07003805 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07003806 **/
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003807static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3808 struct ipr_sglist *sglist)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003809{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003810 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08003811 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003812 struct scatterlist *scatterlist = sglist->scatterlist;
3813 int i;
3814
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003815 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003816 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08003817 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3818
3819 ioarcb->ioadl_len =
Linus Torvalds1da177e2005-04-16 15:20:36 -07003820 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3821
3822 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3823 ioadl[i].flags_and_data_len =
3824 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3825 ioadl[i].address =
3826 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3827 }
3828
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003829 ioadl[i-1].flags_and_data_len |=
3830 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3831}
3832
3833/**
3834 * ipr_update_ioa_ucode - Update IOA's microcode
3835 * @ioa_cfg: ioa config struct
3836 * @sglist: scatter/gather list
3837 *
3838 * Initiate an adapter reset to update the IOA's microcode
3839 *
3840 * Return value:
3841 * 0 on success / -EIO on failure
3842 **/
3843static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3844 struct ipr_sglist *sglist)
3845{
3846 unsigned long lock_flags;
3847
3848 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003849 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -05003850 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3851 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3852 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3853 }
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003854
3855 if (ioa_cfg->ucode_sglist) {
3856 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3857 dev_err(&ioa_cfg->pdev->dev,
3858 "Microcode download already in progress\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003859 return -EIO;
3860 }
3861
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003862 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3863 sglist->num_sg, DMA_TO_DEVICE);
3864
3865 if (!sglist->num_dma_sg) {
3866 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3867 dev_err(&ioa_cfg->pdev->dev,
3868 "Failed to map microcode download buffer!\n");
3869 return -EIO;
3870 }
3871
3872 ioa_cfg->ucode_sglist = sglist;
3873 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3874 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3875 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3876
3877 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3878 ioa_cfg->ucode_sglist = NULL;
3879 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003880 return 0;
3881}
3882
3883/**
3884 * ipr_store_update_fw - Update the firmware on the adapter
Tony Jonesee959b02008-02-22 00:13:36 +01003885 * @class_dev: device struct
3886 * @buf: buffer
3887 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003888 *
3889 * This function will update the firmware on the adapter.
3890 *
3891 * Return value:
3892 * count on success / other on failure
3893 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003894static ssize_t ipr_store_update_fw(struct device *dev,
3895 struct device_attribute *attr,
3896 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003897{
Tony Jonesee959b02008-02-22 00:13:36 +01003898 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003899 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3900 struct ipr_ucode_image_header *image_hdr;
3901 const struct firmware *fw_entry;
3902 struct ipr_sglist *sglist;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003903 char fname[100];
3904 char *src;
3905 int len, result, dnld_size;
3906
3907 if (!capable(CAP_SYS_ADMIN))
3908 return -EACCES;
3909
3910 len = snprintf(fname, 99, "%s", buf);
3911 fname[len-1] = '\0';
3912
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003913 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003914 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3915 return -EIO;
3916 }
3917
3918 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3919
Linus Torvalds1da177e2005-04-16 15:20:36 -07003920 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3921 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3922 sglist = ipr_alloc_ucode_buffer(dnld_size);
3923
3924 if (!sglist) {
3925 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3926 release_firmware(fw_entry);
3927 return -ENOMEM;
3928 }
3929
3930 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3931
3932 if (result) {
3933 dev_err(&ioa_cfg->pdev->dev,
3934 "Microcode buffer copy to DMA buffer failed\n");
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003935 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003936 }
3937
Wayne Boyer14ed9cc2011-10-03 20:54:37 -07003938 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
3939
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003940 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003941
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003942 if (!result)
3943 result = count;
3944out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003945 ipr_free_ucode_buffer(sglist);
3946 release_firmware(fw_entry);
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003947 return result;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003948}
3949
Tony Jonesee959b02008-02-22 00:13:36 +01003950static struct device_attribute ipr_update_fw_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003951 .attr = {
3952 .name = "update_fw",
3953 .mode = S_IWUSR,
3954 },
3955 .store = ipr_store_update_fw
3956};
3957
Wayne Boyer75576bb2010-07-14 10:50:14 -07003958/**
3959 * ipr_show_fw_type - Show the adapter's firmware type.
3960 * @dev: class device struct
3961 * @buf: buffer
3962 *
3963 * Return value:
3964 * number of bytes printed to buffer
3965 **/
3966static ssize_t ipr_show_fw_type(struct device *dev,
3967 struct device_attribute *attr, char *buf)
3968{
3969 struct Scsi_Host *shost = class_to_shost(dev);
3970 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3971 unsigned long lock_flags = 0;
3972 int len;
3973
3974 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3975 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
3976 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3977 return len;
3978}
3979
3980static struct device_attribute ipr_ioa_fw_type_attr = {
3981 .attr = {
3982 .name = "fw_type",
3983 .mode = S_IRUGO,
3984 },
3985 .show = ipr_show_fw_type
3986};
3987
Tony Jonesee959b02008-02-22 00:13:36 +01003988static struct device_attribute *ipr_ioa_attrs[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003989 &ipr_fw_version_attr,
3990 &ipr_log_level_attr,
3991 &ipr_diagnostics_attr,
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003992 &ipr_ioa_state_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003993 &ipr_ioa_reset_attr,
3994 &ipr_update_fw_attr,
Wayne Boyer75576bb2010-07-14 10:50:14 -07003995 &ipr_ioa_fw_type_attr,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003996 &ipr_iopoll_weight_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003997 NULL,
3998};
3999
4000#ifdef CONFIG_SCSI_IPR_DUMP
4001/**
4002 * ipr_read_dump - Dump the adapter
Chris Wright2c3c8be2010-05-12 18:28:57 -07004003 * @filp: open sysfs file
Linus Torvalds1da177e2005-04-16 15:20:36 -07004004 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08004005 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07004006 * @buf: buffer
4007 * @off: offset
4008 * @count: buffer size
4009 *
4010 * Return value:
4011 * number of bytes printed to buffer
4012 **/
Chris Wright2c3c8be2010-05-12 18:28:57 -07004013static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
Zhang Rui91a69022007-06-09 13:57:22 +08004014 struct bin_attribute *bin_attr,
4015 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004016{
Tony Jonesee959b02008-02-22 00:13:36 +01004017 struct device *cdev = container_of(kobj, struct device, kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004018 struct Scsi_Host *shost = class_to_shost(cdev);
4019 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4020 struct ipr_dump *dump;
4021 unsigned long lock_flags = 0;
4022 char *src;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004023 int len, sdt_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004024 size_t rc = count;
4025
4026 if (!capable(CAP_SYS_ADMIN))
4027 return -EACCES;
4028
4029 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4030 dump = ioa_cfg->dump;
4031
4032 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4033 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4034 return 0;
4035 }
4036 kref_get(&dump->kref);
4037 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4038
4039 if (off > dump->driver_dump.hdr.len) {
4040 kref_put(&dump->kref, ipr_release_dump);
4041 return 0;
4042 }
4043
4044 if (off + count > dump->driver_dump.hdr.len) {
4045 count = dump->driver_dump.hdr.len - off;
4046 rc = count;
4047 }
4048
4049 if (count && off < sizeof(dump->driver_dump)) {
4050 if (off + count > sizeof(dump->driver_dump))
4051 len = sizeof(dump->driver_dump) - off;
4052 else
4053 len = count;
4054 src = (u8 *)&dump->driver_dump + off;
4055 memcpy(buf, src, len);
4056 buf += len;
4057 off += len;
4058 count -= len;
4059 }
4060
4061 off -= sizeof(dump->driver_dump);
4062
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004063 if (ioa_cfg->sis64)
4064 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4065 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4066 sizeof(struct ipr_sdt_entry));
4067 else
4068 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4069 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4070
4071 if (count && off < sdt_end) {
4072 if (off + count > sdt_end)
4073 len = sdt_end - off;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004074 else
4075 len = count;
4076 src = (u8 *)&dump->ioa_dump + off;
4077 memcpy(buf, src, len);
4078 buf += len;
4079 off += len;
4080 count -= len;
4081 }
4082
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004083 off -= sdt_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004084
4085 while (count) {
4086 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4087 len = PAGE_ALIGN(off) - off;
4088 else
4089 len = count;
4090 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4091 src += off & ~PAGE_MASK;
4092 memcpy(buf, src, len);
4093 buf += len;
4094 off += len;
4095 count -= len;
4096 }
4097
4098 kref_put(&dump->kref, ipr_release_dump);
4099 return rc;
4100}
4101
4102/**
4103 * ipr_alloc_dump - Prepare for adapter dump
4104 * @ioa_cfg: ioa config struct
4105 *
4106 * Return value:
4107 * 0 on success / other on failure
4108 **/
4109static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4110{
4111 struct ipr_dump *dump;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004112 __be32 **ioa_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004113 unsigned long lock_flags = 0;
4114
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06004115 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004116
4117 if (!dump) {
4118 ipr_err("Dump memory allocation failed\n");
4119 return -ENOMEM;
4120 }
4121
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004122 if (ioa_cfg->sis64)
4123 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4124 else
4125 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4126
4127 if (!ioa_data) {
4128 ipr_err("Dump memory allocation failed\n");
4129 kfree(dump);
4130 return -ENOMEM;
4131 }
4132
4133 dump->ioa_dump.ioa_data = ioa_data;
4134
Linus Torvalds1da177e2005-04-16 15:20:36 -07004135 kref_init(&dump->kref);
4136 dump->ioa_cfg = ioa_cfg;
4137
4138 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4139
4140 if (INACTIVE != ioa_cfg->sdt_state) {
4141 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004142 vfree(dump->ioa_dump.ioa_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004143 kfree(dump);
4144 return 0;
4145 }
4146
4147 ioa_cfg->dump = dump;
4148 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06004149 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004150 ioa_cfg->dump_taken = 1;
4151 schedule_work(&ioa_cfg->work_q);
4152 }
4153 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4154
Linus Torvalds1da177e2005-04-16 15:20:36 -07004155 return 0;
4156}
4157
4158/**
4159 * ipr_free_dump - Free adapter dump memory
4160 * @ioa_cfg: ioa config struct
4161 *
4162 * Return value:
4163 * 0 on success / other on failure
4164 **/
4165static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4166{
4167 struct ipr_dump *dump;
4168 unsigned long lock_flags = 0;
4169
4170 ENTER;
4171
4172 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4173 dump = ioa_cfg->dump;
4174 if (!dump) {
4175 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4176 return 0;
4177 }
4178
4179 ioa_cfg->dump = NULL;
4180 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4181
4182 kref_put(&dump->kref, ipr_release_dump);
4183
4184 LEAVE;
4185 return 0;
4186}
4187
4188/**
4189 * ipr_write_dump - Setup dump state of adapter
Chris Wright2c3c8be2010-05-12 18:28:57 -07004190 * @filp: open sysfs file
Linus Torvalds1da177e2005-04-16 15:20:36 -07004191 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08004192 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07004193 * @buf: buffer
4194 * @off: offset
4195 * @count: buffer size
4196 *
4197 * Return value:
4198 * number of bytes printed to buffer
4199 **/
Chris Wright2c3c8be2010-05-12 18:28:57 -07004200static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
Zhang Rui91a69022007-06-09 13:57:22 +08004201 struct bin_attribute *bin_attr,
4202 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004203{
Tony Jonesee959b02008-02-22 00:13:36 +01004204 struct device *cdev = container_of(kobj, struct device, kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004205 struct Scsi_Host *shost = class_to_shost(cdev);
4206 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4207 int rc;
4208
4209 if (!capable(CAP_SYS_ADMIN))
4210 return -EACCES;
4211
4212 if (buf[0] == '1')
4213 rc = ipr_alloc_dump(ioa_cfg);
4214 else if (buf[0] == '0')
4215 rc = ipr_free_dump(ioa_cfg);
4216 else
4217 return -EINVAL;
4218
4219 if (rc)
4220 return rc;
4221 else
4222 return count;
4223}
4224
4225static struct bin_attribute ipr_dump_attr = {
4226 .attr = {
4227 .name = "dump",
4228 .mode = S_IRUSR | S_IWUSR,
4229 },
4230 .size = 0,
4231 .read = ipr_read_dump,
4232 .write = ipr_write_dump
4233};
4234#else
4235static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4236#endif
4237
4238/**
4239 * ipr_change_queue_depth - Change the device's queue depth
4240 * @sdev: scsi device struct
4241 * @qdepth: depth to set
Mike Christiee881a172009-10-15 17:46:39 -07004242 * @reason: calling context
Linus Torvalds1da177e2005-04-16 15:20:36 -07004243 *
4244 * Return value:
4245 * actual depth set
4246 **/
Mike Christiee881a172009-10-15 17:46:39 -07004247static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
4248 int reason)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004249{
Brian King35a39692006-09-25 12:39:20 -05004250 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4251 struct ipr_resource_entry *res;
4252 unsigned long lock_flags = 0;
4253
Mike Christiee881a172009-10-15 17:46:39 -07004254 if (reason != SCSI_QDEPTH_DEFAULT)
4255 return -EOPNOTSUPP;
4256
Brian King35a39692006-09-25 12:39:20 -05004257 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4258 res = (struct ipr_resource_entry *)sdev->hostdata;
4259
4260 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4261 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4262 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4263
Linus Torvalds1da177e2005-04-16 15:20:36 -07004264 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4265 return sdev->queue_depth;
4266}
4267
4268/**
4269 * ipr_change_queue_type - Change the device's queue type
4270 * @dsev: scsi device struct
4271 * @tag_type: type of tags to use
4272 *
4273 * Return value:
4274 * actual queue type set
4275 **/
4276static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4277{
4278 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4279 struct ipr_resource_entry *res;
4280 unsigned long lock_flags = 0;
4281
4282 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4283 res = (struct ipr_resource_entry *)sdev->hostdata;
4284
4285 if (res) {
4286 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4287 /*
4288 * We don't bother quiescing the device here since the
4289 * adapter firmware does it for us.
4290 */
4291 scsi_set_tag_type(sdev, tag_type);
4292
4293 if (tag_type)
4294 scsi_activate_tcq(sdev, sdev->queue_depth);
4295 else
4296 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4297 } else
4298 tag_type = 0;
4299 } else
4300 tag_type = 0;
4301
4302 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4303 return tag_type;
4304}
4305
4306/**
4307 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4308 * @dev: device struct
Wayne Boyer46d74562010-08-11 07:15:17 -07004309 * @attr: device attribute structure
Linus Torvalds1da177e2005-04-16 15:20:36 -07004310 * @buf: buffer
4311 *
4312 * Return value:
4313 * number of bytes printed to buffer
4314 **/
Yani Ioannou10523b32005-05-17 06:43:37 -04004315static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004316{
4317 struct scsi_device *sdev = to_scsi_device(dev);
4318 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4319 struct ipr_resource_entry *res;
4320 unsigned long lock_flags = 0;
4321 ssize_t len = -ENXIO;
4322
4323 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4324 res = (struct ipr_resource_entry *)sdev->hostdata;
4325 if (res)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004326 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004327 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4328 return len;
4329}
4330
4331static struct device_attribute ipr_adapter_handle_attr = {
4332 .attr = {
4333 .name = "adapter_handle",
4334 .mode = S_IRUSR,
4335 },
4336 .show = ipr_show_adapter_handle
4337};
4338
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004339/**
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07004340 * ipr_show_resource_path - Show the resource path or the resource address for
4341 * this device.
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004342 * @dev: device struct
Wayne Boyer46d74562010-08-11 07:15:17 -07004343 * @attr: device attribute structure
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004344 * @buf: buffer
4345 *
4346 * Return value:
4347 * number of bytes printed to buffer
4348 **/
4349static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4350{
4351 struct scsi_device *sdev = to_scsi_device(dev);
4352 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4353 struct ipr_resource_entry *res;
4354 unsigned long lock_flags = 0;
4355 ssize_t len = -ENXIO;
4356 char buffer[IPR_MAX_RES_PATH_LENGTH];
4357
4358 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4359 res = (struct ipr_resource_entry *)sdev->hostdata;
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07004360 if (res && ioa_cfg->sis64)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004361 len = snprintf(buf, PAGE_SIZE, "%s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06004362 __ipr_format_res_path(res->res_path, buffer,
4363 sizeof(buffer)));
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07004364 else if (res)
4365 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4366 res->bus, res->target, res->lun);
4367
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004368 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4369 return len;
4370}
4371
4372static struct device_attribute ipr_resource_path_attr = {
4373 .attr = {
4374 .name = "resource_path",
Wayne Boyer75576bb2010-07-14 10:50:14 -07004375 .mode = S_IRUGO,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004376 },
4377 .show = ipr_show_resource_path
4378};
4379
Wayne Boyer75576bb2010-07-14 10:50:14 -07004380/**
Wayne Boyer46d74562010-08-11 07:15:17 -07004381 * ipr_show_device_id - Show the device_id for this device.
4382 * @dev: device struct
4383 * @attr: device attribute structure
4384 * @buf: buffer
4385 *
4386 * Return value:
4387 * number of bytes printed to buffer
4388 **/
4389static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4390{
4391 struct scsi_device *sdev = to_scsi_device(dev);
4392 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4393 struct ipr_resource_entry *res;
4394 unsigned long lock_flags = 0;
4395 ssize_t len = -ENXIO;
4396
4397 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4398 res = (struct ipr_resource_entry *)sdev->hostdata;
4399 if (res && ioa_cfg->sis64)
4400 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4401 else if (res)
4402 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4403
4404 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4405 return len;
4406}
4407
4408static struct device_attribute ipr_device_id_attr = {
4409 .attr = {
4410 .name = "device_id",
4411 .mode = S_IRUGO,
4412 },
4413 .show = ipr_show_device_id
4414};
4415
4416/**
Wayne Boyer75576bb2010-07-14 10:50:14 -07004417 * ipr_show_resource_type - Show the resource type for this device.
4418 * @dev: device struct
Wayne Boyer46d74562010-08-11 07:15:17 -07004419 * @attr: device attribute structure
Wayne Boyer75576bb2010-07-14 10:50:14 -07004420 * @buf: buffer
4421 *
4422 * Return value:
4423 * number of bytes printed to buffer
4424 **/
4425static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4426{
4427 struct scsi_device *sdev = to_scsi_device(dev);
4428 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4429 struct ipr_resource_entry *res;
4430 unsigned long lock_flags = 0;
4431 ssize_t len = -ENXIO;
4432
4433 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4434 res = (struct ipr_resource_entry *)sdev->hostdata;
4435
4436 if (res)
4437 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4438
4439 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4440 return len;
4441}
4442
4443static struct device_attribute ipr_resource_type_attr = {
4444 .attr = {
4445 .name = "resource_type",
4446 .mode = S_IRUGO,
4447 },
4448 .show = ipr_show_resource_type
4449};
4450
Linus Torvalds1da177e2005-04-16 15:20:36 -07004451static struct device_attribute *ipr_dev_attrs[] = {
4452 &ipr_adapter_handle_attr,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004453 &ipr_resource_path_attr,
Wayne Boyer46d74562010-08-11 07:15:17 -07004454 &ipr_device_id_attr,
Wayne Boyer75576bb2010-07-14 10:50:14 -07004455 &ipr_resource_type_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004456 NULL,
4457};
4458
4459/**
4460 * ipr_biosparam - Return the HSC mapping
4461 * @sdev: scsi device struct
4462 * @block_device: block device pointer
4463 * @capacity: capacity of the device
4464 * @parm: Array containing returned HSC values.
4465 *
4466 * This function generates the HSC parms that fdisk uses.
4467 * We want to make sure we return something that places partitions
4468 * on 4k boundaries for best performance with the IOA.
4469 *
4470 * Return value:
4471 * 0 on success
4472 **/
4473static int ipr_biosparam(struct scsi_device *sdev,
4474 struct block_device *block_device,
4475 sector_t capacity, int *parm)
4476{
4477 int heads, sectors;
4478 sector_t cylinders;
4479
4480 heads = 128;
4481 sectors = 32;
4482
4483 cylinders = capacity;
4484 sector_div(cylinders, (128 * 32));
4485
4486 /* return result */
4487 parm[0] = heads;
4488 parm[1] = sectors;
4489 parm[2] = cylinders;
4490
4491 return 0;
4492}
4493
4494/**
Brian King35a39692006-09-25 12:39:20 -05004495 * ipr_find_starget - Find target based on bus/target.
4496 * @starget: scsi target struct
4497 *
4498 * Return value:
4499 * resource entry pointer if found / NULL if not found
4500 **/
4501static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4502{
4503 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4504 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4505 struct ipr_resource_entry *res;
4506
4507 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004508 if ((res->bus == starget->channel) &&
Brian King0ee1d712012-03-14 21:20:06 -05004509 (res->target == starget->id)) {
Brian King35a39692006-09-25 12:39:20 -05004510 return res;
4511 }
4512 }
4513
4514 return NULL;
4515}
4516
4517static struct ata_port_info sata_port_info;
4518
4519/**
4520 * ipr_target_alloc - Prepare for commands to a SCSI target
4521 * @starget: scsi target struct
4522 *
4523 * If the device is a SATA device, this function allocates an
4524 * ATA port with libata, else it does nothing.
4525 *
4526 * Return value:
4527 * 0 on success / non-0 on failure
4528 **/
4529static int ipr_target_alloc(struct scsi_target *starget)
4530{
4531 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4532 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4533 struct ipr_sata_port *sata_port;
4534 struct ata_port *ap;
4535 struct ipr_resource_entry *res;
4536 unsigned long lock_flags;
4537
4538 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4539 res = ipr_find_starget(starget);
4540 starget->hostdata = NULL;
4541
4542 if (res && ipr_is_gata(res)) {
4543 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4544 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4545 if (!sata_port)
4546 return -ENOMEM;
4547
4548 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4549 if (ap) {
4550 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4551 sata_port->ioa_cfg = ioa_cfg;
4552 sata_port->ap = ap;
4553 sata_port->res = res;
4554
4555 res->sata_port = sata_port;
4556 ap->private_data = sata_port;
4557 starget->hostdata = sata_port;
4558 } else {
4559 kfree(sata_port);
4560 return -ENOMEM;
4561 }
4562 }
4563 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4564
4565 return 0;
4566}
4567
4568/**
4569 * ipr_target_destroy - Destroy a SCSI target
4570 * @starget: scsi target struct
4571 *
4572 * If the device was a SATA device, this function frees the libata
4573 * ATA port, else it does nothing.
4574 *
4575 **/
4576static void ipr_target_destroy(struct scsi_target *starget)
4577{
4578 struct ipr_sata_port *sata_port = starget->hostdata;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004579 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4580 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4581
4582 if (ioa_cfg->sis64) {
Brian King0ee1d712012-03-14 21:20:06 -05004583 if (!ipr_find_starget(starget)) {
4584 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4585 clear_bit(starget->id, ioa_cfg->array_ids);
4586 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4587 clear_bit(starget->id, ioa_cfg->vset_ids);
4588 else if (starget->channel == 0)
4589 clear_bit(starget->id, ioa_cfg->target_ids);
4590 }
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004591 }
Brian King35a39692006-09-25 12:39:20 -05004592
4593 if (sata_port) {
4594 starget->hostdata = NULL;
4595 ata_sas_port_destroy(sata_port->ap);
4596 kfree(sata_port);
4597 }
4598}
4599
4600/**
4601 * ipr_find_sdev - Find device based on bus/target/lun.
4602 * @sdev: scsi device struct
4603 *
4604 * Return value:
4605 * resource entry pointer if found / NULL if not found
4606 **/
4607static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4608{
4609 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4610 struct ipr_resource_entry *res;
4611
4612 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004613 if ((res->bus == sdev->channel) &&
4614 (res->target == sdev->id) &&
4615 (res->lun == sdev->lun))
Brian King35a39692006-09-25 12:39:20 -05004616 return res;
4617 }
4618
4619 return NULL;
4620}
4621
4622/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004623 * ipr_slave_destroy - Unconfigure a SCSI device
4624 * @sdev: scsi device struct
4625 *
4626 * Return value:
4627 * nothing
4628 **/
4629static void ipr_slave_destroy(struct scsi_device *sdev)
4630{
4631 struct ipr_resource_entry *res;
4632 struct ipr_ioa_cfg *ioa_cfg;
4633 unsigned long lock_flags = 0;
4634
4635 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4636
4637 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4638 res = (struct ipr_resource_entry *) sdev->hostdata;
4639 if (res) {
Brian King35a39692006-09-25 12:39:20 -05004640 if (res->sata_port)
Tejun Heo3e4ec342010-05-10 21:41:30 +02004641 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004642 sdev->hostdata = NULL;
4643 res->sdev = NULL;
Brian King35a39692006-09-25 12:39:20 -05004644 res->sata_port = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004645 }
4646 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4647}
4648
4649/**
4650 * ipr_slave_configure - Configure a SCSI device
4651 * @sdev: scsi device struct
4652 *
4653 * This function configures the specified scsi device.
4654 *
4655 * Return value:
4656 * 0 on success
4657 **/
4658static int ipr_slave_configure(struct scsi_device *sdev)
4659{
4660 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4661 struct ipr_resource_entry *res;
Brian Kingdd406ef2009-04-22 08:58:02 -05004662 struct ata_port *ap = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004663 unsigned long lock_flags = 0;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004664 char buffer[IPR_MAX_RES_PATH_LENGTH];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004665
4666 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4667 res = sdev->hostdata;
4668 if (res) {
4669 if (ipr_is_af_dasd_device(res))
4670 sdev->type = TYPE_RAID;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06004671 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004672 sdev->scsi_level = 4;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06004673 sdev->no_uld_attach = 1;
4674 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004675 if (ipr_is_vset_device(res)) {
Jens Axboe242f9dc2008-09-14 05:55:09 -07004676 blk_queue_rq_timeout(sdev->request_queue,
4677 IPR_VSET_RW_TIMEOUT);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -05004678 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004679 }
Brian Kingdd406ef2009-04-22 08:58:02 -05004680 if (ipr_is_gata(res) && res->sata_port)
4681 ap = res->sata_port->ap;
4682 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4683
4684 if (ap) {
Brian King35a39692006-09-25 12:39:20 -05004685 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
Brian Kingdd406ef2009-04-22 08:58:02 -05004686 ata_sas_slave_configure(sdev, ap);
4687 } else
Brian King35a39692006-09-25 12:39:20 -05004688 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004689 if (ioa_cfg->sis64)
4690 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06004691 ipr_format_res_path(ioa_cfg,
4692 res->res_path, buffer, sizeof(buffer)));
Brian Kingdd406ef2009-04-22 08:58:02 -05004693 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004694 }
4695 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4696 return 0;
4697}
4698
4699/**
Brian King35a39692006-09-25 12:39:20 -05004700 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4701 * @sdev: scsi device struct
4702 *
4703 * This function initializes an ATA port so that future commands
4704 * sent through queuecommand will work.
4705 *
4706 * Return value:
4707 * 0 on success
4708 **/
4709static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4710{
4711 struct ipr_sata_port *sata_port = NULL;
4712 int rc = -ENXIO;
4713
4714 ENTER;
4715 if (sdev->sdev_target)
4716 sata_port = sdev->sdev_target->hostdata;
Dan Williamsb2024452012-03-21 21:09:07 -07004717 if (sata_port) {
Brian King35a39692006-09-25 12:39:20 -05004718 rc = ata_sas_port_init(sata_port->ap);
Dan Williamsb2024452012-03-21 21:09:07 -07004719 if (rc == 0)
4720 rc = ata_sas_sync_probe(sata_port->ap);
4721 }
4722
Brian King35a39692006-09-25 12:39:20 -05004723 if (rc)
4724 ipr_slave_destroy(sdev);
4725
4726 LEAVE;
4727 return rc;
4728}
4729
4730/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004731 * ipr_slave_alloc - Prepare for commands to a device.
4732 * @sdev: scsi device struct
4733 *
4734 * This function saves a pointer to the resource entry
4735 * in the scsi device struct if the device exists. We
4736 * can then use this pointer in ipr_queuecommand when
4737 * handling new commands.
4738 *
4739 * Return value:
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06004740 * 0 on success / -ENXIO if device does not exist
Linus Torvalds1da177e2005-04-16 15:20:36 -07004741 **/
4742static int ipr_slave_alloc(struct scsi_device *sdev)
4743{
4744 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4745 struct ipr_resource_entry *res;
4746 unsigned long lock_flags;
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06004747 int rc = -ENXIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004748
4749 sdev->hostdata = NULL;
4750
4751 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4752
Brian King35a39692006-09-25 12:39:20 -05004753 res = ipr_find_sdev(sdev);
4754 if (res) {
4755 res->sdev = sdev;
4756 res->add_to_ml = 0;
4757 res->in_erp = 0;
4758 sdev->hostdata = res;
4759 if (!ipr_is_naca_model(res))
4760 res->needs_sync_complete = 1;
4761 rc = 0;
4762 if (ipr_is_gata(res)) {
4763 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4764 return ipr_ata_slave_alloc(sdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004765 }
4766 }
4767
4768 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4769
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06004770 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004771}
4772
Brian King7f8e9be2014-10-30 17:27:10 -05004773/**
4774 * ipr_match_lun - Match function for specified LUN
4775 * @ipr_cmd: ipr command struct
4776 * @device: device to match (sdev)
4777 *
4778 * Returns:
4779 * 1 if command matches sdev / 0 if command does not match sdev
4780 **/
4781static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
4782{
4783 if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
4784 return 1;
4785 return 0;
4786}
4787
4788/**
4789 * ipr_wait_for_ops - Wait for matching commands to complete
4790 * @ipr_cmd: ipr command struct
4791 * @device: device to match (sdev)
4792 * @match: match function to use
4793 *
4794 * Returns:
4795 * SUCCESS / FAILED
4796 **/
4797static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
4798 int (*match)(struct ipr_cmnd *, void *))
4799{
4800 struct ipr_cmnd *ipr_cmd;
4801 int wait;
4802 unsigned long flags;
4803 struct ipr_hrr_queue *hrrq;
4804 signed long timeout = IPR_ABORT_TASK_TIMEOUT;
4805 DECLARE_COMPLETION_ONSTACK(comp);
4806
4807 ENTER;
4808 do {
4809 wait = 0;
4810
4811 for_each_hrrq(hrrq, ioa_cfg) {
4812 spin_lock_irqsave(hrrq->lock, flags);
4813 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4814 if (match(ipr_cmd, device)) {
4815 ipr_cmd->eh_comp = &comp;
4816 wait++;
4817 }
4818 }
4819 spin_unlock_irqrestore(hrrq->lock, flags);
4820 }
4821
4822 if (wait) {
4823 timeout = wait_for_completion_timeout(&comp, timeout);
4824
4825 if (!timeout) {
4826 wait = 0;
4827
4828 for_each_hrrq(hrrq, ioa_cfg) {
4829 spin_lock_irqsave(hrrq->lock, flags);
4830 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4831 if (match(ipr_cmd, device)) {
4832 ipr_cmd->eh_comp = NULL;
4833 wait++;
4834 }
4835 }
4836 spin_unlock_irqrestore(hrrq->lock, flags);
4837 }
4838
4839 if (wait)
4840 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
4841 LEAVE;
4842 return wait ? FAILED : SUCCESS;
4843 }
4844 }
4845 } while (wait);
4846
4847 LEAVE;
4848 return SUCCESS;
4849}
4850
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06004851static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004852{
4853 struct ipr_ioa_cfg *ioa_cfg;
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06004854 unsigned long lock_flags = 0;
4855 int rc = SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004856
4857 ENTER;
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06004858 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4859 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004860
wenxiong@linux.vnet.ibm.com96b04db2013-04-17 09:34:06 -05004861 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06004862 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02004863 dev_err(&ioa_cfg->pdev->dev,
4864 "Adapter being reset as a result of error recovery.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004865
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02004866 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4867 ioa_cfg->sdt_state = GET_DUMP;
4868 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004869
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06004870 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4871 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4872 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004873
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06004874 /* If we got hit with a host reset while we were already resetting
4875 the adapter for some reason, and the reset failed. */
4876 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4877 ipr_trace;
4878 rc = FAILED;
4879 }
4880
4881 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004882 LEAVE;
4883 return rc;
4884}
4885
4886/**
Brian Kingc6513092006-03-29 09:37:43 -06004887 * ipr_device_reset - Reset the device
4888 * @ioa_cfg: ioa config struct
4889 * @res: resource entry struct
4890 *
4891 * This function issues a device reset to the affected device.
4892 * If the device is a SCSI device, a LUN reset will be sent
4893 * to the device first. If that does not work, a target reset
Brian King35a39692006-09-25 12:39:20 -05004894 * will be sent. If the device is a SATA device, a PHY reset will
4895 * be sent.
Brian Kingc6513092006-03-29 09:37:43 -06004896 *
4897 * Return value:
4898 * 0 on success / non-zero on failure
4899 **/
4900static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4901 struct ipr_resource_entry *res)
4902{
4903 struct ipr_cmnd *ipr_cmd;
4904 struct ipr_ioarcb *ioarcb;
4905 struct ipr_cmd_pkt *cmd_pkt;
Brian King35a39692006-09-25 12:39:20 -05004906 struct ipr_ioarcb_ata_regs *regs;
Brian Kingc6513092006-03-29 09:37:43 -06004907 u32 ioasc;
4908
4909 ENTER;
4910 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4911 ioarcb = &ipr_cmd->ioarcb;
4912 cmd_pkt = &ioarcb->cmd_pkt;
Wayne Boyera32c0552010-02-19 13:23:36 -08004913
4914 if (ipr_cmd->ioa_cfg->sis64) {
4915 regs = &ipr_cmd->i.ata_ioadl.regs;
4916 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4917 } else
4918 regs = &ioarcb->u.add_data.u.regs;
Brian Kingc6513092006-03-29 09:37:43 -06004919
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004920 ioarcb->res_handle = res->res_handle;
Brian Kingc6513092006-03-29 09:37:43 -06004921 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4922 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
Brian King35a39692006-09-25 12:39:20 -05004923 if (ipr_is_gata(res)) {
4924 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
Wayne Boyera32c0552010-02-19 13:23:36 -08004925 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
Brian King35a39692006-09-25 12:39:20 -05004926 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4927 }
Brian Kingc6513092006-03-29 09:37:43 -06004928
4929 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
Wayne Boyer96d21f02010-05-10 09:13:27 -07004930 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06004931 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Wayne Boyer96d21f02010-05-10 09:13:27 -07004932 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4933 if (ipr_cmd->ioa_cfg->sis64)
4934 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4935 sizeof(struct ipr_ioasa_gata));
4936 else
4937 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4938 sizeof(struct ipr_ioasa_gata));
4939 }
Brian Kingc6513092006-03-29 09:37:43 -06004940
4941 LEAVE;
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03004942 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
Brian Kingc6513092006-03-29 09:37:43 -06004943}
4944
4945/**
Brian King35a39692006-09-25 12:39:20 -05004946 * ipr_sata_reset - Reset the SATA port
Tejun Heocc0680a2007-08-06 18:36:23 +09004947 * @link: SATA link to reset
Brian King35a39692006-09-25 12:39:20 -05004948 * @classes: class of the attached device
4949 *
Tejun Heocc0680a2007-08-06 18:36:23 +09004950 * This function issues a SATA phy reset to the affected ATA link.
Brian King35a39692006-09-25 12:39:20 -05004951 *
4952 * Return value:
4953 * 0 on success / non-zero on failure
4954 **/
Tejun Heocc0680a2007-08-06 18:36:23 +09004955static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
Andrew Morton120bda32007-03-26 02:17:43 -07004956 unsigned long deadline)
Brian King35a39692006-09-25 12:39:20 -05004957{
Tejun Heocc0680a2007-08-06 18:36:23 +09004958 struct ipr_sata_port *sata_port = link->ap->private_data;
Brian King35a39692006-09-25 12:39:20 -05004959 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4960 struct ipr_resource_entry *res;
4961 unsigned long lock_flags = 0;
4962 int rc = -ENXIO;
4963
4964 ENTER;
4965 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03004966 while (ioa_cfg->in_reset_reload) {
Brian King73d98ff2006-11-21 10:27:58 -06004967 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4968 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4969 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4970 }
4971
Brian King35a39692006-09-25 12:39:20 -05004972 res = sata_port->res;
4973 if (res) {
4974 rc = ipr_device_reset(ioa_cfg, res);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004975 *classes = res->ata_class;
Brian King35a39692006-09-25 12:39:20 -05004976 }
4977
4978 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4979 LEAVE;
4980 return rc;
4981}
4982
4983/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004984 * ipr_eh_dev_reset - Reset the device
4985 * @scsi_cmd: scsi command struct
4986 *
4987 * This function issues a device reset to the affected device.
4988 * A LUN reset will be sent to the device first. If that does
4989 * not work, a target reset will be sent.
4990 *
4991 * Return value:
4992 * SUCCESS / FAILED
4993 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03004994static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004995{
4996 struct ipr_cmnd *ipr_cmd;
4997 struct ipr_ioa_cfg *ioa_cfg;
4998 struct ipr_resource_entry *res;
Brian King35a39692006-09-25 12:39:20 -05004999 struct ata_port *ap;
5000 int rc = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005001 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005002
5003 ENTER;
5004 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5005 res = scsi_cmd->device->hostdata;
5006
brking@us.ibm.comeeb883072005-11-01 17:02:29 -06005007 if (!res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005008 return FAILED;
5009
5010 /*
5011 * If we are currently going through reset/reload, return failed. This will force the
5012 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5013 * reset to complete
5014 */
5015 if (ioa_cfg->in_reset_reload)
5016 return FAILED;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005017 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005018 return FAILED;
5019
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005020 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005021 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005022 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5023 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5024 if (ipr_cmd->scsi_cmd)
5025 ipr_cmd->done = ipr_scsi_eh_done;
5026 if (ipr_cmd->qc)
5027 ipr_cmd->done = ipr_sata_eh_done;
5028 if (ipr_cmd->qc &&
5029 !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5030 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5031 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5032 }
Brian King7402ece2006-11-21 10:28:23 -06005033 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005034 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005035 spin_unlock(&hrrq->_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005036 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005037 res->resetting_device = 1;
Brian Kingfb3ed3c2006-03-29 09:37:37 -06005038 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
Brian King35a39692006-09-25 12:39:20 -05005039
5040 if (ipr_is_gata(res) && res->sata_port) {
5041 ap = res->sata_port->ap;
5042 spin_unlock_irq(scsi_cmd->device->host->host_lock);
Tejun Heoa1efdab2008-03-25 12:22:50 +09005043 ata_std_error_handler(ap);
Brian King35a39692006-09-25 12:39:20 -05005044 spin_lock_irq(scsi_cmd->device->host->host_lock);
Brian King5af23d22007-05-09 15:36:35 -05005045
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005046 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005047 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005048 list_for_each_entry(ipr_cmd,
5049 &hrrq->hrrq_pending_q, queue) {
5050 if (ipr_cmd->ioarcb.res_handle ==
5051 res->res_handle) {
5052 rc = -EIO;
5053 break;
5054 }
Brian King5af23d22007-05-09 15:36:35 -05005055 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005056 spin_unlock(&hrrq->_lock);
Brian King5af23d22007-05-09 15:36:35 -05005057 }
Brian King35a39692006-09-25 12:39:20 -05005058 } else
5059 rc = ipr_device_reset(ioa_cfg, res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005060 res->resetting_device = 0;
5061
Linus Torvalds1da177e2005-04-16 15:20:36 -07005062 LEAVE;
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005063 return rc ? FAILED : SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005064}
5065
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005066static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04005067{
5068 int rc;
Brian King7f8e9be2014-10-30 17:27:10 -05005069 struct ipr_ioa_cfg *ioa_cfg;
5070
5071 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04005072
5073 spin_lock_irq(cmd->device->host->host_lock);
5074 rc = __ipr_eh_dev_reset(cmd);
5075 spin_unlock_irq(cmd->device->host->host_lock);
5076
Brian King7f8e9be2014-10-30 17:27:10 -05005077 if (rc == SUCCESS)
5078 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5079
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04005080 return rc;
5081}
5082
Linus Torvalds1da177e2005-04-16 15:20:36 -07005083/**
5084 * ipr_bus_reset_done - Op done function for bus reset.
5085 * @ipr_cmd: ipr command struct
5086 *
5087 * This function is the op done function for a bus reset
5088 *
5089 * Return value:
5090 * none
5091 **/
5092static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5093{
5094 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5095 struct ipr_resource_entry *res;
5096
5097 ENTER;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005098 if (!ioa_cfg->sis64)
5099 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5100 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5101 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5102 break;
5103 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005104 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005105
5106 /*
5107 * If abort has not completed, indicate the reset has, else call the
5108 * abort's done function to wake the sleeping eh thread
5109 */
5110 if (ipr_cmd->sibling->sibling)
5111 ipr_cmd->sibling->sibling = NULL;
5112 else
5113 ipr_cmd->sibling->done(ipr_cmd->sibling);
5114
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005115 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005116 LEAVE;
5117}
5118
5119/**
5120 * ipr_abort_timeout - An abort task has timed out
5121 * @ipr_cmd: ipr command struct
5122 *
5123 * This function handles when an abort task times out. If this
5124 * happens we issue a bus reset since we have resources tied
5125 * up that must be freed before returning to the midlayer.
5126 *
5127 * Return value:
5128 * none
5129 **/
5130static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5131{
5132 struct ipr_cmnd *reset_cmd;
5133 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5134 struct ipr_cmd_pkt *cmd_pkt;
5135 unsigned long lock_flags = 0;
5136
5137 ENTER;
5138 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5139 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5140 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5141 return;
5142 }
5143
Brian Kingfb3ed3c2006-03-29 09:37:37 -06005144 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005145 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5146 ipr_cmd->sibling = reset_cmd;
5147 reset_cmd->sibling = ipr_cmd;
5148 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5149 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5150 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5151 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5152 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5153
5154 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5155 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5156 LEAVE;
5157}
5158
5159/**
5160 * ipr_cancel_op - Cancel specified op
5161 * @scsi_cmd: scsi command struct
5162 *
5163 * This function cancels specified op.
5164 *
5165 * Return value:
5166 * SUCCESS / FAILED
5167 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005168static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005169{
5170 struct ipr_cmnd *ipr_cmd;
5171 struct ipr_ioa_cfg *ioa_cfg;
5172 struct ipr_resource_entry *res;
5173 struct ipr_cmd_pkt *cmd_pkt;
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02005174 u32 ioasc, int_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005175 int op_found = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005176 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005177
5178 ENTER;
5179 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5180 res = scsi_cmd->device->hostdata;
5181
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005182 /* If we are currently going through reset/reload, return failed.
5183 * This will force the mid-layer to call ipr_eh_host_reset,
5184 * which will then go to sleep and wait for the reset to complete
5185 */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005186 if (ioa_cfg->in_reset_reload ||
5187 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005188 return FAILED;
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02005189 if (!res)
5190 return FAILED;
5191
5192 /*
5193 * If we are aborting a timed out op, chances are that the timeout was caused
5194 * by a still not detected EEH error. In such cases, reading a register will
5195 * trigger the EEH recovery infrastructure.
5196 */
5197 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5198
5199 if (!ipr_is_gscsi(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005200 return FAILED;
5201
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005202 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005203 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005204 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5205 if (ipr_cmd->scsi_cmd == scsi_cmd) {
5206 ipr_cmd->done = ipr_scsi_eh_done;
5207 op_found = 1;
5208 break;
5209 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005210 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005211 spin_unlock(&hrrq->_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005212 }
5213
5214 if (!op_found)
5215 return SUCCESS;
5216
5217 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005218 ipr_cmd->ioarcb.res_handle = res->res_handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005219 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5220 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5221 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5222 ipr_cmd->u.sdev = scsi_cmd->device;
5223
Brian Kingfb3ed3c2006-03-29 09:37:37 -06005224 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5225 scsi_cmd->cmnd[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005226 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
Wayne Boyer96d21f02010-05-10 09:13:27 -07005227 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005228
5229 /*
5230 * If the abort task timed out and we sent a bus reset, we will get
5231 * one the following responses to the abort
5232 */
5233 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5234 ioasc = 0;
5235 ipr_trace;
5236 }
5237
Kleber Sacilotto de Souzac4ee22a2013-03-14 13:52:23 -05005238 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005239 if (!ipr_is_naca_model(res))
5240 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005241
5242 LEAVE;
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005243 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005244}
5245
5246/**
5247 * ipr_eh_abort - Abort a single op
5248 * @scsi_cmd: scsi command struct
5249 *
5250 * Return value:
5251 * SUCCESS / FAILED
5252 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005253static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005254{
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005255 unsigned long flags;
5256 int rc;
Brian King7f8e9be2014-10-30 17:27:10 -05005257 struct ipr_ioa_cfg *ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005258
5259 ENTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005260
Brian King7f8e9be2014-10-30 17:27:10 -05005261 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5262
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005263 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5264 rc = ipr_cancel_op(scsi_cmd);
5265 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005266
Brian King7f8e9be2014-10-30 17:27:10 -05005267 if (rc == SUCCESS)
5268 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005269 LEAVE;
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005270 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005271}
5272
5273/**
5274 * ipr_handle_other_interrupt - Handle "other" interrupts
5275 * @ioa_cfg: ioa config struct
Wayne Boyer634651f2010-08-27 14:45:07 -07005276 * @int_reg: interrupt register
Linus Torvalds1da177e2005-04-16 15:20:36 -07005277 *
5278 * Return value:
5279 * IRQ_NONE / IRQ_HANDLED
5280 **/
Wayne Boyer634651f2010-08-27 14:45:07 -07005281static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
Wayne Boyer630ad8312011-04-07 12:12:30 -07005282 u32 int_reg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005283{
5284 irqreturn_t rc = IRQ_HANDLED;
Wayne Boyer7dacb642011-04-12 10:29:02 -07005285 u32 int_mask_reg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005286
Wayne Boyer7dacb642011-04-12 10:29:02 -07005287 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5288 int_reg &= ~int_mask_reg;
5289
5290 /* If an interrupt on the adapter did not occur, ignore it.
5291 * Or in the case of SIS 64, check for a stage change interrupt.
5292 */
5293 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5294 if (ioa_cfg->sis64) {
5295 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5296 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5297 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5298
5299 /* clear stage change */
5300 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5301 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5302 list_del(&ioa_cfg->reset_cmd->queue);
5303 del_timer(&ioa_cfg->reset_cmd->timer);
5304 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5305 return IRQ_HANDLED;
5306 }
5307 }
5308
5309 return IRQ_NONE;
5310 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005311
5312 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5313 /* Mask the interrupt */
5314 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5315
5316 /* Clear the interrupt */
5317 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
5318 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5319
5320 list_del(&ioa_cfg->reset_cmd->queue);
5321 del_timer(&ioa_cfg->reset_cmd->timer);
5322 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
Wayne Boyer7dacb642011-04-12 10:29:02 -07005323 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
Brian King7dd21302012-03-14 21:20:08 -05005324 if (ioa_cfg->clear_isr) {
5325 if (ipr_debug && printk_ratelimit())
5326 dev_err(&ioa_cfg->pdev->dev,
5327 "Spurious interrupt detected. 0x%08X\n", int_reg);
5328 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5329 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5330 return IRQ_NONE;
5331 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005332 } else {
5333 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5334 ioa_cfg->ioa_unit_checked = 1;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005335 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5336 dev_err(&ioa_cfg->pdev->dev,
5337 "No Host RRQ. 0x%08X\n", int_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005338 else
5339 dev_err(&ioa_cfg->pdev->dev,
5340 "Permanent IOA failure. 0x%08X\n", int_reg);
5341
5342 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5343 ioa_cfg->sdt_state = GET_DUMP;
5344
5345 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5346 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5347 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005348
Linus Torvalds1da177e2005-04-16 15:20:36 -07005349 return rc;
5350}
5351
5352/**
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005353 * ipr_isr_eh - Interrupt service routine error handler
5354 * @ioa_cfg: ioa config struct
5355 * @msg: message to log
5356 *
5357 * Return value:
5358 * none
5359 **/
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005360static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005361{
5362 ioa_cfg->errors_logged++;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005363 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005364
5365 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5366 ioa_cfg->sdt_state = GET_DUMP;
5367
5368 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5369}
5370
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005371static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005372 struct list_head *doneq)
5373{
5374 u32 ioasc;
5375 u16 cmd_index;
5376 struct ipr_cmnd *ipr_cmd;
5377 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5378 int num_hrrq = 0;
5379
5380 /* If interrupts are disabled, ignore the interrupt */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005381 if (!hrr_queue->allow_interrupts)
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005382 return 0;
5383
5384 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5385 hrr_queue->toggle_bit) {
5386
5387 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5388 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5389 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5390
5391 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5392 cmd_index < hrr_queue->min_cmd_id)) {
5393 ipr_isr_eh(ioa_cfg,
5394 "Invalid response handle from IOA: ",
5395 cmd_index);
5396 break;
5397 }
5398
5399 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5400 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5401
5402 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5403
5404 list_move_tail(&ipr_cmd->queue, doneq);
5405
5406 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5407 hrr_queue->hrrq_curr++;
5408 } else {
5409 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5410 hrr_queue->toggle_bit ^= 1u;
5411 }
5412 num_hrrq++;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005413 if (budget > 0 && num_hrrq >= budget)
5414 break;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005415 }
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005416
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005417 return num_hrrq;
5418}
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005419
5420static int ipr_iopoll(struct blk_iopoll *iop, int budget)
5421{
5422 struct ipr_ioa_cfg *ioa_cfg;
5423 struct ipr_hrr_queue *hrrq;
5424 struct ipr_cmnd *ipr_cmd, *temp;
5425 unsigned long hrrq_flags;
5426 int completed_ops;
5427 LIST_HEAD(doneq);
5428
5429 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5430 ioa_cfg = hrrq->ioa_cfg;
5431
5432 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5433 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5434
5435 if (completed_ops < budget)
5436 blk_iopoll_complete(iop);
5437 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5438
5439 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5440 list_del(&ipr_cmd->queue);
5441 del_timer(&ipr_cmd->timer);
5442 ipr_cmd->fast_done(ipr_cmd);
5443 }
5444
5445 return completed_ops;
5446}
5447
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005448/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005449 * ipr_isr - Interrupt service routine
5450 * @irq: irq number
5451 * @devp: pointer to ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07005452 *
5453 * Return value:
5454 * IRQ_NONE / IRQ_HANDLED
5455 **/
David Howells7d12e782006-10-05 14:55:46 +01005456static irqreturn_t ipr_isr(int irq, void *devp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005457{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005458 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5459 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005460 unsigned long hrrq_flags = 0;
Wayne Boyer7dacb642011-04-12 10:29:02 -07005461 u32 int_reg = 0;
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005462 int num_hrrq = 0;
Wayne Boyer7dacb642011-04-12 10:29:02 -07005463 int irq_none = 0;
Brian King172cd6e2012-07-17 08:14:40 -05005464 struct ipr_cmnd *ipr_cmd, *temp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005465 irqreturn_t rc = IRQ_NONE;
Brian King172cd6e2012-07-17 08:14:40 -05005466 LIST_HEAD(doneq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005467
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005468 spin_lock_irqsave(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005469 /* If interrupts are disabled, ignore the interrupt */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005470 if (!hrrq->allow_interrupts) {
5471 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005472 return IRQ_NONE;
5473 }
5474
Linus Torvalds1da177e2005-04-16 15:20:36 -07005475 while (1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005476 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5477 rc = IRQ_HANDLED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005478
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005479 if (!ioa_cfg->clear_isr)
5480 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005481
Linus Torvalds1da177e2005-04-16 15:20:36 -07005482 /* Clear the PCI interrupt */
Wayne Boyera5442ba2011-05-17 09:18:53 -07005483 num_hrrq = 0;
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005484 do {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005485 writel(IPR_PCII_HRRQ_UPDATED,
5486 ioa_cfg->regs.clr_interrupt_reg32);
Wayne Boyer7dacb642011-04-12 10:29:02 -07005487 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005488 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005489 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005490
Wayne Boyer7dacb642011-04-12 10:29:02 -07005491 } else if (rc == IRQ_NONE && irq_none == 0) {
5492 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5493 irq_none++;
Wayne Boyera5442ba2011-05-17 09:18:53 -07005494 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5495 int_reg & IPR_PCII_HRRQ_UPDATED) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005496 ipr_isr_eh(ioa_cfg,
5497 "Error clearing HRRQ: ", num_hrrq);
Brian King172cd6e2012-07-17 08:14:40 -05005498 rc = IRQ_HANDLED;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005499 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005500 } else
5501 break;
5502 }
5503
5504 if (unlikely(rc == IRQ_NONE))
Wayne Boyer634651f2010-08-27 14:45:07 -07005505 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005506
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005507 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King172cd6e2012-07-17 08:14:40 -05005508 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5509 list_del(&ipr_cmd->queue);
5510 del_timer(&ipr_cmd->timer);
5511 ipr_cmd->fast_done(ipr_cmd);
5512 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005513 return rc;
5514}
Brian King172cd6e2012-07-17 08:14:40 -05005515
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005516/**
5517 * ipr_isr_mhrrq - Interrupt service routine
5518 * @irq: irq number
5519 * @devp: pointer to ioa config struct
5520 *
5521 * Return value:
5522 * IRQ_NONE / IRQ_HANDLED
5523 **/
5524static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5525{
5526 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005527 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005528 unsigned long hrrq_flags = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005529 struct ipr_cmnd *ipr_cmd, *temp;
5530 irqreturn_t rc = IRQ_NONE;
5531 LIST_HEAD(doneq);
5532
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005533 spin_lock_irqsave(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005534
5535 /* If interrupts are disabled, ignore the interrupt */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005536 if (!hrrq->allow_interrupts) {
5537 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005538 return IRQ_NONE;
5539 }
5540
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005541 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
5542 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5543 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5544 hrrq->toggle_bit) {
5545 if (!blk_iopoll_sched_prep(&hrrq->iopoll))
5546 blk_iopoll_sched(&hrrq->iopoll);
5547 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5548 return IRQ_HANDLED;
5549 }
5550 } else {
5551 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5552 hrrq->toggle_bit)
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005553
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005554 if (ipr_process_hrrq(hrrq, -1, &doneq))
5555 rc = IRQ_HANDLED;
5556 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005557
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005558 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005559
5560 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5561 list_del(&ipr_cmd->queue);
5562 del_timer(&ipr_cmd->timer);
5563 ipr_cmd->fast_done(ipr_cmd);
5564 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005565 return rc;
5566}
5567
5568/**
Wayne Boyera32c0552010-02-19 13:23:36 -08005569 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07005570 * @ioa_cfg: ioa config struct
5571 * @ipr_cmd: ipr command struct
5572 *
5573 * Return value:
5574 * 0 on success / -1 on failure
5575 **/
Wayne Boyera32c0552010-02-19 13:23:36 -08005576static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5577 struct ipr_cmnd *ipr_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005578{
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005579 int i, nseg;
5580 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005581 u32 length;
5582 u32 ioadl_flags = 0;
5583 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5584 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08005585 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005586
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005587 length = scsi_bufflen(scsi_cmd);
5588 if (!length)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005589 return 0;
5590
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005591 nseg = scsi_dma_map(scsi_cmd);
5592 if (nseg < 0) {
Anton Blanchard51f52a42011-05-09 10:07:40 +10005593 if (printk_ratelimit())
5594 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005595 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005596 }
5597
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005598 ipr_cmd->dma_use_sg = nseg;
5599
Wayne Boyer438b0332010-05-10 09:13:00 -07005600 ioarcb->data_transfer_length = cpu_to_be32(length);
Wayne Boyerb8803b12010-05-14 08:55:13 -07005601 ioarcb->ioadl_len =
5602 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
Wayne Boyer438b0332010-05-10 09:13:00 -07005603
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005604 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5605 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5606 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08005607 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5608 ioadl_flags = IPR_IOADL_FLAGS_READ;
5609
5610 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5611 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5612 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5613 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5614 }
5615
5616 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5617 return 0;
5618}
5619
5620/**
5621 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5622 * @ioa_cfg: ioa config struct
5623 * @ipr_cmd: ipr command struct
5624 *
5625 * Return value:
5626 * 0 on success / -1 on failure
5627 **/
5628static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5629 struct ipr_cmnd *ipr_cmd)
5630{
5631 int i, nseg;
5632 struct scatterlist *sg;
5633 u32 length;
5634 u32 ioadl_flags = 0;
5635 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5636 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5637 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5638
5639 length = scsi_bufflen(scsi_cmd);
5640 if (!length)
5641 return 0;
5642
5643 nseg = scsi_dma_map(scsi_cmd);
5644 if (nseg < 0) {
5645 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5646 return -1;
5647 }
5648
5649 ipr_cmd->dma_use_sg = nseg;
5650
5651 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5652 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5653 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5654 ioarcb->data_transfer_length = cpu_to_be32(length);
5655 ioarcb->ioadl_len =
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005656 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5657 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5658 ioadl_flags = IPR_IOADL_FLAGS_READ;
5659 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5660 ioarcb->read_ioadl_len =
5661 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5662 }
5663
Wayne Boyera32c0552010-02-19 13:23:36 -08005664 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5665 ioadl = ioarcb->u.add_data.u.ioadl;
5666 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5667 offsetof(struct ipr_ioarcb, u.add_data));
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005668 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5669 }
5670
5671 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5672 ioadl[i].flags_and_data_len =
5673 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5674 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5675 }
5676
5677 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5678 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005679}
5680
5681/**
5682 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5683 * @scsi_cmd: scsi command struct
5684 *
5685 * Return value:
5686 * task attributes
5687 **/
5688static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5689{
5690 u8 tag[2];
5691 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5692
5693 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5694 switch (tag[0]) {
5695 case MSG_SIMPLE_TAG:
5696 rc = IPR_FLAGS_LO_SIMPLE_TASK;
5697 break;
5698 case MSG_HEAD_TAG:
5699 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5700 break;
5701 case MSG_ORDERED_TAG:
5702 rc = IPR_FLAGS_LO_ORDERED_TASK;
5703 break;
5704 };
5705 }
5706
5707 return rc;
5708}
5709
5710/**
5711 * ipr_erp_done - Process completion of ERP for a device
5712 * @ipr_cmd: ipr command struct
5713 *
5714 * This function copies the sense buffer into the scsi_cmd
5715 * struct and pushes the scsi_done function.
5716 *
5717 * Return value:
5718 * nothing
5719 **/
5720static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5721{
5722 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5723 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005724 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005725
5726 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5727 scsi_cmd->result |= (DID_ERROR << 16);
Brian Kingfb3ed3c2006-03-29 09:37:37 -06005728 scmd_printk(KERN_ERR, scsi_cmd,
5729 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005730 } else {
5731 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5732 SCSI_SENSE_BUFFERSIZE);
5733 }
5734
5735 if (res) {
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005736 if (!ipr_is_naca_model(res))
5737 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005738 res->in_erp = 0;
5739 }
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005740 scsi_dma_unmap(ipr_cmd->scsi_cmd);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005741 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005742 scsi_cmd->scsi_done(scsi_cmd);
5743}
5744
5745/**
5746 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5747 * @ipr_cmd: ipr command struct
5748 *
5749 * Return value:
5750 * none
5751 **/
5752static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5753{
Brian King51b1c7e2007-03-29 12:43:50 -05005754 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005755 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
Wayne Boyera32c0552010-02-19 13:23:36 -08005756 dma_addr_t dma_addr = ipr_cmd->dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005757
5758 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
Wayne Boyera32c0552010-02-19 13:23:36 -08005759 ioarcb->data_transfer_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005760 ioarcb->read_data_transfer_length = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -08005761 ioarcb->ioadl_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005762 ioarcb->read_ioadl_len = 0;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005763 ioasa->hdr.ioasc = 0;
5764 ioasa->hdr.residual_data_len = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -08005765
5766 if (ipr_cmd->ioa_cfg->sis64)
5767 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5768 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5769 else {
5770 ioarcb->write_ioadl_addr =
5771 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5772 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5773 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005774}
5775
5776/**
5777 * ipr_erp_request_sense - Send request sense to a device
5778 * @ipr_cmd: ipr command struct
5779 *
5780 * This function sends a request sense to a device as a result
5781 * of a check condition.
5782 *
5783 * Return value:
5784 * nothing
5785 **/
5786static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5787{
5788 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005789 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005790
5791 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5792 ipr_erp_done(ipr_cmd);
5793 return;
5794 }
5795
5796 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5797
5798 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5799 cmd_pkt->cdb[0] = REQUEST_SENSE;
5800 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5801 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5802 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5803 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5804
Wayne Boyera32c0552010-02-19 13:23:36 -08005805 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5806 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005807
5808 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5809 IPR_REQUEST_SENSE_TIMEOUT * 2);
5810}
5811
5812/**
5813 * ipr_erp_cancel_all - Send cancel all to a device
5814 * @ipr_cmd: ipr command struct
5815 *
5816 * This function sends a cancel all to a device to clear the
5817 * queue. If we are running TCQ on the device, QERR is set to 1,
5818 * which means all outstanding ops have been dropped on the floor.
5819 * Cancel all will return them to us.
5820 *
5821 * Return value:
5822 * nothing
5823 **/
5824static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5825{
5826 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5827 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5828 struct ipr_cmd_pkt *cmd_pkt;
5829
5830 res->in_erp = 1;
5831
5832 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5833
5834 if (!scsi_get_tag_type(scsi_cmd->device)) {
5835 ipr_erp_request_sense(ipr_cmd);
5836 return;
5837 }
5838
5839 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5840 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5841 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5842
5843 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5844 IPR_CANCEL_ALL_TIMEOUT);
5845}
5846
5847/**
5848 * ipr_dump_ioasa - Dump contents of IOASA
5849 * @ioa_cfg: ioa config struct
5850 * @ipr_cmd: ipr command struct
Brian Kingfe964d02006-03-29 09:37:29 -06005851 * @res: resource entry struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07005852 *
5853 * This function is invoked by the interrupt handler when ops
5854 * fail. It will log the IOASA if appropriate. Only called
5855 * for GPDD ops.
5856 *
5857 * Return value:
5858 * none
5859 **/
5860static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
Brian Kingfe964d02006-03-29 09:37:29 -06005861 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005862{
5863 int i;
5864 u16 data_len;
Brian Kingb0692dd2007-03-29 12:43:09 -05005865 u32 ioasc, fd_ioasc;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005866 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005867 __be32 *ioasa_data = (__be32 *)ioasa;
5868 int error_index;
5869
Wayne Boyer96d21f02010-05-10 09:13:27 -07005870 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5871 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005872
5873 if (0 == ioasc)
5874 return;
5875
5876 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5877 return;
5878
Brian Kingb0692dd2007-03-29 12:43:09 -05005879 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5880 error_index = ipr_get_error(fd_ioasc);
5881 else
5882 error_index = ipr_get_error(ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005883
5884 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5885 /* Don't log an error if the IOA already logged one */
Wayne Boyer96d21f02010-05-10 09:13:27 -07005886 if (ioasa->hdr.ilid != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005887 return;
5888
Brian Kingcc9bd5d2007-03-29 12:43:01 -05005889 if (!ipr_is_gscsi(res))
5890 return;
5891
Linus Torvalds1da177e2005-04-16 15:20:36 -07005892 if (ipr_error_table[error_index].log_ioasa == 0)
5893 return;
5894 }
5895
Brian Kingfe964d02006-03-29 09:37:29 -06005896 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005897
Wayne Boyer96d21f02010-05-10 09:13:27 -07005898 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5899 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5900 data_len = sizeof(struct ipr_ioasa64);
5901 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005902 data_len = sizeof(struct ipr_ioasa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005903
5904 ipr_err("IOASA Dump:\n");
5905
5906 for (i = 0; i < data_len / 4; i += 4) {
5907 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5908 be32_to_cpu(ioasa_data[i]),
5909 be32_to_cpu(ioasa_data[i+1]),
5910 be32_to_cpu(ioasa_data[i+2]),
5911 be32_to_cpu(ioasa_data[i+3]));
5912 }
5913}
5914
5915/**
5916 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5917 * @ioasa: IOASA
5918 * @sense_buf: sense data buffer
5919 *
5920 * Return value:
5921 * none
5922 **/
5923static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5924{
5925 u32 failing_lba;
5926 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5927 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005928 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5929 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005930
5931 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5932
5933 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5934 return;
5935
5936 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5937
5938 if (ipr_is_vset_device(res) &&
5939 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5940 ioasa->u.vset.failing_lba_hi != 0) {
5941 sense_buf[0] = 0x72;
5942 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5943 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5944 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5945
5946 sense_buf[7] = 12;
5947 sense_buf[8] = 0;
5948 sense_buf[9] = 0x0A;
5949 sense_buf[10] = 0x80;
5950
5951 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5952
5953 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5954 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5955 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5956 sense_buf[15] = failing_lba & 0x000000ff;
5957
5958 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5959
5960 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5961 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5962 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5963 sense_buf[19] = failing_lba & 0x000000ff;
5964 } else {
5965 sense_buf[0] = 0x70;
5966 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5967 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5968 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5969
5970 /* Illegal request */
5971 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
Wayne Boyer96d21f02010-05-10 09:13:27 -07005972 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005973 sense_buf[7] = 10; /* additional length */
5974
5975 /* IOARCB was in error */
5976 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5977 sense_buf[15] = 0xC0;
5978 else /* Parameter data was invalid */
5979 sense_buf[15] = 0x80;
5980
5981 sense_buf[16] =
5982 ((IPR_FIELD_POINTER_MASK &
Wayne Boyer96d21f02010-05-10 09:13:27 -07005983 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005984 sense_buf[17] =
5985 (IPR_FIELD_POINTER_MASK &
Wayne Boyer96d21f02010-05-10 09:13:27 -07005986 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005987 } else {
5988 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5989 if (ipr_is_vset_device(res))
5990 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5991 else
5992 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5993
5994 sense_buf[0] |= 0x80; /* Or in the Valid bit */
5995 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5996 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5997 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5998 sense_buf[6] = failing_lba & 0x000000ff;
5999 }
6000
6001 sense_buf[7] = 6; /* additional length */
6002 }
6003 }
6004}
6005
6006/**
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006007 * ipr_get_autosense - Copy autosense data to sense buffer
6008 * @ipr_cmd: ipr command struct
6009 *
6010 * This function copies the autosense buffer to the buffer
6011 * in the scsi_cmd, if there is autosense available.
6012 *
6013 * Return value:
6014 * 1 if autosense was available / 0 if not
6015 **/
6016static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6017{
Wayne Boyer96d21f02010-05-10 09:13:27 -07006018 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6019 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006020
Wayne Boyer96d21f02010-05-10 09:13:27 -07006021 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006022 return 0;
6023
Wayne Boyer96d21f02010-05-10 09:13:27 -07006024 if (ipr_cmd->ioa_cfg->sis64)
6025 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6026 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6027 SCSI_SENSE_BUFFERSIZE));
6028 else
6029 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6030 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6031 SCSI_SENSE_BUFFERSIZE));
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006032 return 1;
6033}
6034
6035/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006036 * ipr_erp_start - Process an error response for a SCSI op
6037 * @ioa_cfg: ioa config struct
6038 * @ipr_cmd: ipr command struct
6039 *
6040 * This function determines whether or not to initiate ERP
6041 * on the affected device.
6042 *
6043 * Return value:
6044 * nothing
6045 **/
6046static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6047 struct ipr_cmnd *ipr_cmd)
6048{
6049 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6050 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006051 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian King8a048992007-04-26 16:00:10 -05006052 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006053
6054 if (!res) {
6055 ipr_scsi_eh_done(ipr_cmd);
6056 return;
6057 }
6058
Brian King8a048992007-04-26 16:00:10 -05006059 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006060 ipr_gen_sense(ipr_cmd);
6061
Brian Kingcc9bd5d2007-03-29 12:43:01 -05006062 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6063
Brian King8a048992007-04-26 16:00:10 -05006064 switch (masked_ioasc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006065 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006066 if (ipr_is_naca_model(res))
6067 scsi_cmd->result |= (DID_ABORT << 16);
6068 else
6069 scsi_cmd->result |= (DID_IMM_RETRY << 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006070 break;
6071 case IPR_IOASC_IR_RESOURCE_HANDLE:
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06006072 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006073 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6074 break;
6075 case IPR_IOASC_HW_SEL_TIMEOUT:
6076 scsi_cmd->result |= (DID_NO_CONNECT << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006077 if (!ipr_is_naca_model(res))
6078 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006079 break;
6080 case IPR_IOASC_SYNC_REQUIRED:
6081 if (!res->in_erp)
6082 res->needs_sync_complete = 1;
6083 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6084 break;
6085 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06006086 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006087 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6088 break;
6089 case IPR_IOASC_BUS_WAS_RESET:
6090 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6091 /*
6092 * Report the bus reset and ask for a retry. The device
6093 * will give CC/UA the next command.
6094 */
6095 if (!res->resetting_device)
6096 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6097 scsi_cmd->result |= (DID_ERROR << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006098 if (!ipr_is_naca_model(res))
6099 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006100 break;
6101 case IPR_IOASC_HW_DEV_BUS_STATUS:
6102 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6103 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006104 if (!ipr_get_autosense(ipr_cmd)) {
6105 if (!ipr_is_naca_model(res)) {
6106 ipr_erp_cancel_all(ipr_cmd);
6107 return;
6108 }
6109 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006110 }
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006111 if (!ipr_is_naca_model(res))
6112 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006113 break;
6114 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6115 break;
6116 default:
Brian King5b7304f2006-08-02 14:57:51 -05006117 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6118 scsi_cmd->result |= (DID_ERROR << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006119 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006120 res->needs_sync_complete = 1;
6121 break;
6122 }
6123
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09006124 scsi_dma_unmap(ipr_cmd->scsi_cmd);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006125 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006126 scsi_cmd->scsi_done(scsi_cmd);
6127}
6128
6129/**
6130 * ipr_scsi_done - mid-layer done function
6131 * @ipr_cmd: ipr command struct
6132 *
6133 * This function is invoked by the interrupt handler for
6134 * ops generated by the SCSI mid-layer
6135 *
6136 * Return value:
6137 * none
6138 **/
6139static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6140{
6141 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6142 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006143 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006144 unsigned long hrrq_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006145
Wayne Boyer96d21f02010-05-10 09:13:27 -07006146 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006147
6148 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
Brian King172cd6e2012-07-17 08:14:40 -05006149 scsi_dma_unmap(scsi_cmd);
6150
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006151 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006152 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006153 scsi_cmd->scsi_done(scsi_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006154 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
Brian King172cd6e2012-07-17 08:14:40 -05006155 } else {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006156 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006157 ipr_erp_start(ioa_cfg, ipr_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006158 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
Brian King172cd6e2012-07-17 08:14:40 -05006159 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006160}
6161
6162/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006163 * ipr_queuecommand - Queue a mid-layer request
Brian King00bfef22012-07-17 08:13:52 -05006164 * @shost: scsi host struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07006165 * @scsi_cmd: scsi command struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07006166 *
6167 * This function queues a request generated by the mid-layer.
6168 *
6169 * Return value:
6170 * 0 on success
6171 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6172 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6173 **/
Brian King00bfef22012-07-17 08:13:52 -05006174static int ipr_queuecommand(struct Scsi_Host *shost,
6175 struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006176{
6177 struct ipr_ioa_cfg *ioa_cfg;
6178 struct ipr_resource_entry *res;
6179 struct ipr_ioarcb *ioarcb;
6180 struct ipr_cmnd *ipr_cmd;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006181 unsigned long hrrq_flags, lock_flags;
Dan Carpenterd12f1572012-07-30 11:18:22 +03006182 int rc;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006183 struct ipr_hrr_queue *hrrq;
6184 int hrrq_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006185
Brian King00bfef22012-07-17 08:13:52 -05006186 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6187
Linus Torvalds1da177e2005-04-16 15:20:36 -07006188 scsi_cmd->result = (DID_OK << 16);
Brian King00bfef22012-07-17 08:13:52 -05006189 res = scsi_cmd->device->hostdata;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006190
6191 if (ipr_is_gata(res) && res->sata_port) {
6192 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6193 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6194 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6195 return rc;
6196 }
6197
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006198 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6199 hrrq = &ioa_cfg->hrrq[hrrq_id];
Linus Torvalds1da177e2005-04-16 15:20:36 -07006200
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006201 spin_lock_irqsave(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006202 /*
6203 * We are currently blocking all devices due to a host reset
6204 * We have told the host to stop giving us new requests, but
6205 * ERP ops don't count. FIXME
6206 */
Brian Kingbfae7822013-01-30 23:45:08 -06006207 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006208 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006209 return SCSI_MLQUEUE_HOST_BUSY;
Brian King00bfef22012-07-17 08:13:52 -05006210 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006211
6212 /*
6213 * FIXME - Create scsi_set_host_offline interface
6214 * and the ioa_is_dead check can be removed
6215 */
Brian Kingbfae7822013-01-30 23:45:08 -06006216 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006217 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006218 goto err_nodev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006219 }
6220
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006221 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6222 if (ipr_cmd == NULL) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006223 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006224 return SCSI_MLQUEUE_HOST_BUSY;
6225 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006226 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006227
Brian King172cd6e2012-07-17 08:14:40 -05006228 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006229 ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006230
6231 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6232 ipr_cmd->scsi_cmd = scsi_cmd;
Brian King172cd6e2012-07-17 08:14:40 -05006233 ipr_cmd->done = ipr_scsi_eh_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006234
6235 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6236 if (scsi_cmd->underflow == 0)
6237 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6238
Linus Torvalds1da177e2005-04-16 15:20:36 -07006239 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
Wayne Boyerab6c10b2011-03-31 09:56:10 -07006240 if (ipr_is_gscsi(res))
6241 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006242 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6243 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
6244 }
6245
6246 if (scsi_cmd->cmnd[0] >= 0xC0 &&
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006247 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006248 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006249 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006250
Dan Carpenterd12f1572012-07-30 11:18:22 +03006251 if (ioa_cfg->sis64)
6252 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6253 else
6254 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006255
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006256 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6257 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006258 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006259 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006260 if (!rc)
6261 scsi_dma_unmap(scsi_cmd);
Brian Kinga5fb4072012-03-14 21:20:09 -05006262 return SCSI_MLQUEUE_HOST_BUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006263 }
6264
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006265 if (unlikely(hrrq->ioa_is_dead)) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006266 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006267 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006268 scsi_dma_unmap(scsi_cmd);
6269 goto err_nodev;
6270 }
6271
6272 ioarcb->res_handle = res->res_handle;
6273 if (res->needs_sync_complete) {
6274 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6275 res->needs_sync_complete = 0;
6276 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006277 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
Brian King00bfef22012-07-17 08:13:52 -05006278 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
Brian Kinga5fb4072012-03-14 21:20:09 -05006279 ipr_send_command(ipr_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006280 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006281 return 0;
6282
6283err_nodev:
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006284 spin_lock_irqsave(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006285 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6286 scsi_cmd->result = (DID_NO_CONNECT << 16);
6287 scsi_cmd->scsi_done(scsi_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006288 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006289 return 0;
6290}
6291
6292/**
Brian King35a39692006-09-25 12:39:20 -05006293 * ipr_ioctl - IOCTL handler
6294 * @sdev: scsi device struct
6295 * @cmd: IOCTL cmd
6296 * @arg: IOCTL arg
6297 *
6298 * Return value:
6299 * 0 on success / other on failure
6300 **/
Adrian Bunkbd705f22006-11-21 10:28:48 -06006301static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
Brian King35a39692006-09-25 12:39:20 -05006302{
6303 struct ipr_resource_entry *res;
6304
6305 res = (struct ipr_resource_entry *)sdev->hostdata;
Brian King0ce3a7e2008-07-11 13:37:50 -05006306 if (res && ipr_is_gata(res)) {
6307 if (cmd == HDIO_GET_IDENTITY)
6308 return -ENOTTY;
Jeff Garzik94be9a52009-01-16 10:17:09 -05006309 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
Brian King0ce3a7e2008-07-11 13:37:50 -05006310 }
Brian King35a39692006-09-25 12:39:20 -05006311
6312 return -EINVAL;
6313}
6314
6315/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006316 * ipr_info - Get information about the card/driver
6317 * @scsi_host: scsi host struct
6318 *
6319 * Return value:
6320 * pointer to buffer with description string
6321 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03006322static const char *ipr_ioa_info(struct Scsi_Host *host)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006323{
6324 static char buffer[512];
6325 struct ipr_ioa_cfg *ioa_cfg;
6326 unsigned long lock_flags = 0;
6327
6328 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6329
6330 spin_lock_irqsave(host->host_lock, lock_flags);
6331 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6332 spin_unlock_irqrestore(host->host_lock, lock_flags);
6333
6334 return buffer;
6335}
6336
6337static struct scsi_host_template driver_template = {
6338 .module = THIS_MODULE,
6339 .name = "IPR",
6340 .info = ipr_ioa_info,
Brian King35a39692006-09-25 12:39:20 -05006341 .ioctl = ipr_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006342 .queuecommand = ipr_queuecommand,
6343 .eh_abort_handler = ipr_eh_abort,
6344 .eh_device_reset_handler = ipr_eh_dev_reset,
6345 .eh_host_reset_handler = ipr_eh_host_reset,
6346 .slave_alloc = ipr_slave_alloc,
6347 .slave_configure = ipr_slave_configure,
6348 .slave_destroy = ipr_slave_destroy,
Brian King35a39692006-09-25 12:39:20 -05006349 .target_alloc = ipr_target_alloc,
6350 .target_destroy = ipr_target_destroy,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006351 .change_queue_depth = ipr_change_queue_depth,
6352 .change_queue_type = ipr_change_queue_type,
6353 .bios_param = ipr_biosparam,
6354 .can_queue = IPR_MAX_COMMANDS,
6355 .this_id = -1,
6356 .sg_tablesize = IPR_MAX_SGLIST,
6357 .max_sectors = IPR_IOA_MAX_SECTORS,
6358 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6359 .use_clustering = ENABLE_CLUSTERING,
6360 .shost_attrs = ipr_ioa_attrs,
6361 .sdev_attrs = ipr_dev_attrs,
Martin K. Petersen8562d022013-10-23 06:25:40 -04006362 .proc_name = IPR_NAME,
6363 .no_write_same = 1,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006364};
6365
Brian King35a39692006-09-25 12:39:20 -05006366/**
6367 * ipr_ata_phy_reset - libata phy_reset handler
6368 * @ap: ata port to reset
6369 *
6370 **/
6371static void ipr_ata_phy_reset(struct ata_port *ap)
6372{
6373 unsigned long flags;
6374 struct ipr_sata_port *sata_port = ap->private_data;
6375 struct ipr_resource_entry *res = sata_port->res;
6376 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6377 int rc;
6378
6379 ENTER;
6380 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03006381 while (ioa_cfg->in_reset_reload) {
Brian King35a39692006-09-25 12:39:20 -05006382 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6383 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6384 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6385 }
6386
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006387 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
Brian King35a39692006-09-25 12:39:20 -05006388 goto out_unlock;
6389
6390 rc = ipr_device_reset(ioa_cfg, res);
6391
6392 if (rc) {
Tejun Heo3e4ec342010-05-10 21:41:30 +02006393 ap->link.device[0].class = ATA_DEV_NONE;
Brian King35a39692006-09-25 12:39:20 -05006394 goto out_unlock;
6395 }
6396
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006397 ap->link.device[0].class = res->ata_class;
6398 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
Tejun Heo3e4ec342010-05-10 21:41:30 +02006399 ap->link.device[0].class = ATA_DEV_NONE;
Brian King35a39692006-09-25 12:39:20 -05006400
6401out_unlock:
6402 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6403 LEAVE;
6404}
6405
6406/**
6407 * ipr_ata_post_internal - Cleanup after an internal command
6408 * @qc: ATA queued command
6409 *
6410 * Return value:
6411 * none
6412 **/
6413static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6414{
6415 struct ipr_sata_port *sata_port = qc->ap->private_data;
6416 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6417 struct ipr_cmnd *ipr_cmd;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006418 struct ipr_hrr_queue *hrrq;
Brian King35a39692006-09-25 12:39:20 -05006419 unsigned long flags;
6420
6421 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03006422 while (ioa_cfg->in_reset_reload) {
Brian King73d98ff2006-11-21 10:27:58 -06006423 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6424 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6425 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6426 }
6427
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006428 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006429 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006430 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6431 if (ipr_cmd->qc == qc) {
6432 ipr_device_reset(ioa_cfg, sata_port->res);
6433 break;
6434 }
Brian King35a39692006-09-25 12:39:20 -05006435 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006436 spin_unlock(&hrrq->_lock);
Brian King35a39692006-09-25 12:39:20 -05006437 }
6438 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6439}
6440
6441/**
Brian King35a39692006-09-25 12:39:20 -05006442 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6443 * @regs: destination
6444 * @tf: source ATA taskfile
6445 *
6446 * Return value:
6447 * none
6448 **/
6449static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6450 struct ata_taskfile *tf)
6451{
6452 regs->feature = tf->feature;
6453 regs->nsect = tf->nsect;
6454 regs->lbal = tf->lbal;
6455 regs->lbam = tf->lbam;
6456 regs->lbah = tf->lbah;
6457 regs->device = tf->device;
6458 regs->command = tf->command;
6459 regs->hob_feature = tf->hob_feature;
6460 regs->hob_nsect = tf->hob_nsect;
6461 regs->hob_lbal = tf->hob_lbal;
6462 regs->hob_lbam = tf->hob_lbam;
6463 regs->hob_lbah = tf->hob_lbah;
6464 regs->ctl = tf->ctl;
6465}
6466
6467/**
6468 * ipr_sata_done - done function for SATA commands
6469 * @ipr_cmd: ipr command struct
6470 *
6471 * This function is invoked by the interrupt handler for
6472 * ops generated by the SCSI mid-layer to SATA devices
6473 *
6474 * Return value:
6475 * none
6476 **/
6477static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6478{
6479 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6480 struct ata_queued_cmd *qc = ipr_cmd->qc;
6481 struct ipr_sata_port *sata_port = qc->ap->private_data;
6482 struct ipr_resource_entry *res = sata_port->res;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006483 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian King35a39692006-09-25 12:39:20 -05006484
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006485 spin_lock(&ipr_cmd->hrrq->_lock);
Wayne Boyer96d21f02010-05-10 09:13:27 -07006486 if (ipr_cmd->ioa_cfg->sis64)
6487 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6488 sizeof(struct ipr_ioasa_gata));
6489 else
6490 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6491 sizeof(struct ipr_ioasa_gata));
Brian King35a39692006-09-25 12:39:20 -05006492 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6493
Wayne Boyer96d21f02010-05-10 09:13:27 -07006494 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006495 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
Brian King35a39692006-09-25 12:39:20 -05006496
6497 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
Wayne Boyer96d21f02010-05-10 09:13:27 -07006498 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
Brian King35a39692006-09-25 12:39:20 -05006499 else
Wayne Boyer96d21f02010-05-10 09:13:27 -07006500 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006501 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006502 spin_unlock(&ipr_cmd->hrrq->_lock);
Brian King35a39692006-09-25 12:39:20 -05006503 ata_qc_complete(qc);
6504}
6505
6506/**
Wayne Boyera32c0552010-02-19 13:23:36 -08006507 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6508 * @ipr_cmd: ipr command struct
6509 * @qc: ATA queued command
6510 *
6511 **/
6512static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6513 struct ata_queued_cmd *qc)
6514{
6515 u32 ioadl_flags = 0;
6516 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
wenxiong@linux.vnet.ibm.com1ac7c262013-04-18 21:32:48 -05006517 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
Wayne Boyera32c0552010-02-19 13:23:36 -08006518 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6519 int len = qc->nbytes;
6520 struct scatterlist *sg;
6521 unsigned int si;
6522 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6523
6524 if (len == 0)
6525 return;
6526
6527 if (qc->dma_dir == DMA_TO_DEVICE) {
6528 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6529 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6530 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6531 ioadl_flags = IPR_IOADL_FLAGS_READ;
6532
6533 ioarcb->data_transfer_length = cpu_to_be32(len);
6534 ioarcb->ioadl_len =
6535 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6536 ioarcb->u.sis64_addr_data.data_ioadl_addr =
wenxiong@linux.vnet.ibm.com1ac7c262013-04-18 21:32:48 -05006537 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
Wayne Boyera32c0552010-02-19 13:23:36 -08006538
6539 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6540 ioadl64->flags = cpu_to_be32(ioadl_flags);
6541 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6542 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6543
6544 last_ioadl64 = ioadl64;
6545 ioadl64++;
6546 }
6547
6548 if (likely(last_ioadl64))
6549 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6550}
6551
6552/**
Brian King35a39692006-09-25 12:39:20 -05006553 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6554 * @ipr_cmd: ipr command struct
6555 * @qc: ATA queued command
6556 *
6557 **/
6558static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6559 struct ata_queued_cmd *qc)
6560{
6561 u32 ioadl_flags = 0;
6562 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08006563 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04006564 struct ipr_ioadl_desc *last_ioadl = NULL;
James Bottomleydde20202008-02-19 11:36:56 +01006565 int len = qc->nbytes;
Brian King35a39692006-09-25 12:39:20 -05006566 struct scatterlist *sg;
Tejun Heoff2aeb12007-12-05 16:43:11 +09006567 unsigned int si;
Brian King35a39692006-09-25 12:39:20 -05006568
6569 if (len == 0)
6570 return;
6571
6572 if (qc->dma_dir == DMA_TO_DEVICE) {
6573 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6574 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08006575 ioarcb->data_transfer_length = cpu_to_be32(len);
6576 ioarcb->ioadl_len =
Brian King35a39692006-09-25 12:39:20 -05006577 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6578 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6579 ioadl_flags = IPR_IOADL_FLAGS_READ;
6580 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6581 ioarcb->read_ioadl_len =
6582 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6583 }
6584
Tejun Heoff2aeb12007-12-05 16:43:11 +09006585 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Brian King35a39692006-09-25 12:39:20 -05006586 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6587 ioadl->address = cpu_to_be32(sg_dma_address(sg));
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04006588
6589 last_ioadl = ioadl;
6590 ioadl++;
Brian King35a39692006-09-25 12:39:20 -05006591 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04006592
6593 if (likely(last_ioadl))
6594 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
Brian King35a39692006-09-25 12:39:20 -05006595}
6596
6597/**
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006598 * ipr_qc_defer - Get a free ipr_cmd
6599 * @qc: queued command
6600 *
6601 * Return value:
6602 * 0 if success
6603 **/
6604static int ipr_qc_defer(struct ata_queued_cmd *qc)
6605{
6606 struct ata_port *ap = qc->ap;
6607 struct ipr_sata_port *sata_port = ap->private_data;
6608 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6609 struct ipr_cmnd *ipr_cmd;
6610 struct ipr_hrr_queue *hrrq;
6611 int hrrq_id;
6612
6613 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6614 hrrq = &ioa_cfg->hrrq[hrrq_id];
6615
6616 qc->lldd_task = NULL;
6617 spin_lock(&hrrq->_lock);
6618 if (unlikely(hrrq->ioa_is_dead)) {
6619 spin_unlock(&hrrq->_lock);
6620 return 0;
6621 }
6622
6623 if (unlikely(!hrrq->allow_cmds)) {
6624 spin_unlock(&hrrq->_lock);
6625 return ATA_DEFER_LINK;
6626 }
6627
6628 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6629 if (ipr_cmd == NULL) {
6630 spin_unlock(&hrrq->_lock);
6631 return ATA_DEFER_LINK;
6632 }
6633
6634 qc->lldd_task = ipr_cmd;
6635 spin_unlock(&hrrq->_lock);
6636 return 0;
6637}
6638
6639/**
Brian King35a39692006-09-25 12:39:20 -05006640 * ipr_qc_issue - Issue a SATA qc to a device
6641 * @qc: queued command
6642 *
6643 * Return value:
6644 * 0 if success
6645 **/
6646static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6647{
6648 struct ata_port *ap = qc->ap;
6649 struct ipr_sata_port *sata_port = ap->private_data;
6650 struct ipr_resource_entry *res = sata_port->res;
6651 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6652 struct ipr_cmnd *ipr_cmd;
6653 struct ipr_ioarcb *ioarcb;
6654 struct ipr_ioarcb_ata_regs *regs;
6655
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006656 if (qc->lldd_task == NULL)
6657 ipr_qc_defer(qc);
6658
6659 ipr_cmd = qc->lldd_task;
6660 if (ipr_cmd == NULL)
Brian King0feeed82007-03-29 12:43:43 -05006661 return AC_ERR_SYSTEM;
Brian King35a39692006-09-25 12:39:20 -05006662
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006663 qc->lldd_task = NULL;
6664 spin_lock(&ipr_cmd->hrrq->_lock);
6665 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6666 ipr_cmd->hrrq->ioa_is_dead)) {
6667 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6668 spin_unlock(&ipr_cmd->hrrq->_lock);
6669 return AC_ERR_SYSTEM;
6670 }
6671
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006672 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
Brian King35a39692006-09-25 12:39:20 -05006673 ioarcb = &ipr_cmd->ioarcb;
Brian King35a39692006-09-25 12:39:20 -05006674
Wayne Boyera32c0552010-02-19 13:23:36 -08006675 if (ioa_cfg->sis64) {
6676 regs = &ipr_cmd->i.ata_ioadl.regs;
6677 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6678 } else
6679 regs = &ioarcb->u.add_data.u.regs;
6680
6681 memset(regs, 0, sizeof(*regs));
6682 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
Brian King35a39692006-09-25 12:39:20 -05006683
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006684 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Brian King35a39692006-09-25 12:39:20 -05006685 ipr_cmd->qc = qc;
6686 ipr_cmd->done = ipr_sata_done;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006687 ipr_cmd->ioarcb.res_handle = res->res_handle;
Brian King35a39692006-09-25 12:39:20 -05006688 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6689 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6690 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
James Bottomleydde20202008-02-19 11:36:56 +01006691 ipr_cmd->dma_use_sg = qc->n_elem;
Brian King35a39692006-09-25 12:39:20 -05006692
Wayne Boyera32c0552010-02-19 13:23:36 -08006693 if (ioa_cfg->sis64)
6694 ipr_build_ata_ioadl64(ipr_cmd, qc);
6695 else
6696 ipr_build_ata_ioadl(ipr_cmd, qc);
6697
Brian King35a39692006-09-25 12:39:20 -05006698 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6699 ipr_copy_sata_tf(regs, &qc->tf);
6700 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006701 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
Brian King35a39692006-09-25 12:39:20 -05006702
6703 switch (qc->tf.protocol) {
6704 case ATA_PROT_NODATA:
6705 case ATA_PROT_PIO:
6706 break;
6707
6708 case ATA_PROT_DMA:
6709 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6710 break;
6711
Tejun Heo0dc36882007-12-18 16:34:43 -05006712 case ATAPI_PROT_PIO:
6713 case ATAPI_PROT_NODATA:
Brian King35a39692006-09-25 12:39:20 -05006714 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6715 break;
6716
Tejun Heo0dc36882007-12-18 16:34:43 -05006717 case ATAPI_PROT_DMA:
Brian King35a39692006-09-25 12:39:20 -05006718 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6719 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6720 break;
6721
6722 default:
6723 WARN_ON(1);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006724 spin_unlock(&ipr_cmd->hrrq->_lock);
Brian King0feeed82007-03-29 12:43:43 -05006725 return AC_ERR_INVALID;
Brian King35a39692006-09-25 12:39:20 -05006726 }
6727
Wayne Boyera32c0552010-02-19 13:23:36 -08006728 ipr_send_command(ipr_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006729 spin_unlock(&ipr_cmd->hrrq->_lock);
Wayne Boyera32c0552010-02-19 13:23:36 -08006730
Brian King35a39692006-09-25 12:39:20 -05006731 return 0;
6732}
6733
6734/**
Tejun Heo4c9bf4e2008-04-07 22:47:20 +09006735 * ipr_qc_fill_rtf - Read result TF
6736 * @qc: ATA queued command
6737 *
6738 * Return value:
6739 * true
6740 **/
6741static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6742{
6743 struct ipr_sata_port *sata_port = qc->ap->private_data;
6744 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6745 struct ata_taskfile *tf = &qc->result_tf;
6746
6747 tf->feature = g->error;
6748 tf->nsect = g->nsect;
6749 tf->lbal = g->lbal;
6750 tf->lbam = g->lbam;
6751 tf->lbah = g->lbah;
6752 tf->device = g->device;
6753 tf->command = g->status;
6754 tf->hob_nsect = g->hob_nsect;
6755 tf->hob_lbal = g->hob_lbal;
6756 tf->hob_lbam = g->hob_lbam;
6757 tf->hob_lbah = g->hob_lbah;
6758 tf->ctl = g->alt_status;
6759
6760 return true;
6761}
6762
Brian King35a39692006-09-25 12:39:20 -05006763static struct ata_port_operations ipr_sata_ops = {
Brian King35a39692006-09-25 12:39:20 -05006764 .phy_reset = ipr_ata_phy_reset,
Tejun Heoa1efdab2008-03-25 12:22:50 +09006765 .hardreset = ipr_sata_reset,
Brian King35a39692006-09-25 12:39:20 -05006766 .post_internal_cmd = ipr_ata_post_internal,
Brian King35a39692006-09-25 12:39:20 -05006767 .qc_prep = ata_noop_qc_prep,
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006768 .qc_defer = ipr_qc_defer,
Brian King35a39692006-09-25 12:39:20 -05006769 .qc_issue = ipr_qc_issue,
Tejun Heo4c9bf4e2008-04-07 22:47:20 +09006770 .qc_fill_rtf = ipr_qc_fill_rtf,
Brian King35a39692006-09-25 12:39:20 -05006771 .port_start = ata_sas_port_start,
6772 .port_stop = ata_sas_port_stop
6773};
6774
6775static struct ata_port_info sata_port_info = {
Sergei Shtylyov9cbe0562011-02-04 22:05:48 +03006776 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
Sergei Shtylyov0f2e0332011-01-21 20:32:01 +03006777 .pio_mask = ATA_PIO4_ONLY,
6778 .mwdma_mask = ATA_MWDMA2,
6779 .udma_mask = ATA_UDMA6,
Brian King35a39692006-09-25 12:39:20 -05006780 .port_ops = &ipr_sata_ops
6781};
6782
Linus Torvalds1da177e2005-04-16 15:20:36 -07006783#ifdef CONFIG_PPC_PSERIES
6784static const u16 ipr_blocked_processors[] = {
Michael Ellermand3dbeef2012-08-19 21:44:01 +00006785 PVR_NORTHSTAR,
6786 PVR_PULSAR,
6787 PVR_POWER4,
6788 PVR_ICESTAR,
6789 PVR_SSTAR,
6790 PVR_POWER4p,
6791 PVR_630,
6792 PVR_630p
Linus Torvalds1da177e2005-04-16 15:20:36 -07006793};
6794
6795/**
6796 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6797 * @ioa_cfg: ioa cfg struct
6798 *
6799 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6800 * certain pSeries hardware. This function determines if the given
6801 * adapter is in one of these confgurations or not.
6802 *
6803 * Return value:
6804 * 1 if adapter is not supported / 0 if adapter is supported
6805 **/
6806static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6807{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006808 int i;
6809
Auke Kok44c10132007-06-08 15:46:36 -07006810 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03006811 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
Michael Ellermand3dbeef2012-08-19 21:44:01 +00006812 if (pvr_version_is(ipr_blocked_processors[i]))
Auke Kok44c10132007-06-08 15:46:36 -07006813 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006814 }
6815 }
6816 return 0;
6817}
6818#else
6819#define ipr_invalid_adapter(ioa_cfg) 0
6820#endif
6821
6822/**
6823 * ipr_ioa_bringdown_done - IOA bring down completion.
6824 * @ipr_cmd: ipr command struct
6825 *
6826 * This function processes the completion of an adapter bring down.
6827 * It wakes any reset sleepers.
6828 *
6829 * Return value:
6830 * IPR_RC_JOB_RETURN
6831 **/
6832static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6833{
6834 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
wenxiong@linux.vnet.ibm.com96b04db2013-04-17 09:34:06 -05006835 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006836
6837 ENTER;
Brian Kingbfae7822013-01-30 23:45:08 -06006838 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
6839 ipr_trace;
6840 spin_unlock_irq(ioa_cfg->host->host_lock);
6841 scsi_unblock_requests(ioa_cfg->host);
6842 spin_lock_irq(ioa_cfg->host->host_lock);
6843 }
6844
Linus Torvalds1da177e2005-04-16 15:20:36 -07006845 ioa_cfg->in_reset_reload = 0;
6846 ioa_cfg->reset_retries = 0;
wenxiong@linux.vnet.ibm.com96b04db2013-04-17 09:34:06 -05006847 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
6848 spin_lock(&ioa_cfg->hrrq[i]._lock);
6849 ioa_cfg->hrrq[i].ioa_is_dead = 1;
6850 spin_unlock(&ioa_cfg->hrrq[i]._lock);
6851 }
6852 wmb();
6853
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006854 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006855 wake_up_all(&ioa_cfg->reset_wait_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006856 LEAVE;
6857
6858 return IPR_RC_JOB_RETURN;
6859}
6860
6861/**
6862 * ipr_ioa_reset_done - IOA reset completion.
6863 * @ipr_cmd: ipr command struct
6864 *
6865 * This function processes the completion of an adapter reset.
6866 * It schedules any necessary mid-layer add/removes and
6867 * wakes any reset sleepers.
6868 *
6869 * Return value:
6870 * IPR_RC_JOB_RETURN
6871 **/
6872static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6873{
6874 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6875 struct ipr_resource_entry *res;
6876 struct ipr_hostrcb *hostrcb, *temp;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006877 int i = 0, j;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006878
6879 ENTER;
6880 ioa_cfg->in_reset_reload = 0;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006881 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
6882 spin_lock(&ioa_cfg->hrrq[j]._lock);
6883 ioa_cfg->hrrq[j].allow_cmds = 1;
6884 spin_unlock(&ioa_cfg->hrrq[j]._lock);
6885 }
6886 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006887 ioa_cfg->reset_cmd = NULL;
brking@us.ibm.com3d1d0da2005-11-01 17:01:54 -06006888 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006889
6890 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6891 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6892 ipr_trace;
6893 break;
6894 }
6895 }
6896 schedule_work(&ioa_cfg->work_q);
6897
6898 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6899 list_del(&hostrcb->queue);
6900 if (i++ < IPR_NUM_LOG_HCAMS)
6901 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6902 else
6903 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6904 }
6905
Brian King6bb04172007-04-26 16:00:08 -05006906 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006907 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6908
6909 ioa_cfg->reset_retries = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006910 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006911 wake_up_all(&ioa_cfg->reset_wait_q);
6912
Mark Nelson30237852008-12-10 12:23:20 +11006913 spin_unlock(ioa_cfg->host->host_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006914 scsi_unblock_requests(ioa_cfg->host);
Mark Nelson30237852008-12-10 12:23:20 +11006915 spin_lock(ioa_cfg->host->host_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006916
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006917 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006918 scsi_block_requests(ioa_cfg->host);
6919
6920 LEAVE;
6921 return IPR_RC_JOB_RETURN;
6922}
6923
6924/**
6925 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6926 * @supported_dev: supported device struct
6927 * @vpids: vendor product id struct
6928 *
6929 * Return value:
6930 * none
6931 **/
6932static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6933 struct ipr_std_inq_vpids *vpids)
6934{
6935 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6936 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6937 supported_dev->num_records = 1;
6938 supported_dev->data_length =
6939 cpu_to_be16(sizeof(struct ipr_supported_device));
6940 supported_dev->reserved = 0;
6941}
6942
6943/**
6944 * ipr_set_supported_devs - Send Set Supported Devices for a device
6945 * @ipr_cmd: ipr command struct
6946 *
Wayne Boyera32c0552010-02-19 13:23:36 -08006947 * This function sends a Set Supported Devices to the adapter
Linus Torvalds1da177e2005-04-16 15:20:36 -07006948 *
6949 * Return value:
6950 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6951 **/
6952static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6953{
6954 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6955 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006956 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6957 struct ipr_resource_entry *res = ipr_cmd->u.res;
6958
6959 ipr_cmd->job_step = ipr_ioa_reset_done;
6960
6961 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
Brian Kinge4fbf442006-03-29 09:37:22 -06006962 if (!ipr_is_scsi_disk(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006963 continue;
6964
6965 ipr_cmd->u.res = res;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006966 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006967
6968 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6969 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6970 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6971
6972 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006973 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006974 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6975 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6976
Wayne Boyera32c0552010-02-19 13:23:36 -08006977 ipr_init_ioadl(ipr_cmd,
6978 ioa_cfg->vpd_cbs_dma +
6979 offsetof(struct ipr_misc_cbs, supp_dev),
6980 sizeof(struct ipr_supported_device),
6981 IPR_IOADL_FLAGS_WRITE_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006982
6983 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6984 IPR_SET_SUP_DEVICE_TIMEOUT);
6985
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006986 if (!ioa_cfg->sis64)
6987 ipr_cmd->job_step = ipr_set_supported_devs;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006988 LEAVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006989 return IPR_RC_JOB_RETURN;
6990 }
6991
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006992 LEAVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006993 return IPR_RC_JOB_CONTINUE;
6994}
6995
6996/**
6997 * ipr_get_mode_page - Locate specified mode page
6998 * @mode_pages: mode page buffer
6999 * @page_code: page code to find
7000 * @len: minimum required length for mode page
7001 *
7002 * Return value:
7003 * pointer to mode page / NULL on failure
7004 **/
7005static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7006 u32 page_code, u32 len)
7007{
7008 struct ipr_mode_page_hdr *mode_hdr;
7009 u32 page_length;
7010 u32 length;
7011
7012 if (!mode_pages || (mode_pages->hdr.length == 0))
7013 return NULL;
7014
7015 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7016 mode_hdr = (struct ipr_mode_page_hdr *)
7017 (mode_pages->data + mode_pages->hdr.block_desc_len);
7018
7019 while (length) {
7020 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7021 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7022 return mode_hdr;
7023 break;
7024 } else {
7025 page_length = (sizeof(struct ipr_mode_page_hdr) +
7026 mode_hdr->page_length);
7027 length -= page_length;
7028 mode_hdr = (struct ipr_mode_page_hdr *)
7029 ((unsigned long)mode_hdr + page_length);
7030 }
7031 }
7032 return NULL;
7033}
7034
7035/**
7036 * ipr_check_term_power - Check for term power errors
7037 * @ioa_cfg: ioa config struct
7038 * @mode_pages: IOAFP mode pages buffer
7039 *
7040 * Check the IOAFP's mode page 28 for term power errors
7041 *
7042 * Return value:
7043 * nothing
7044 **/
7045static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7046 struct ipr_mode_pages *mode_pages)
7047{
7048 int i;
7049 int entry_length;
7050 struct ipr_dev_bus_entry *bus;
7051 struct ipr_mode_page28 *mode_page;
7052
7053 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7054 sizeof(struct ipr_mode_page28));
7055
7056 entry_length = mode_page->entry_length;
7057
7058 bus = mode_page->bus;
7059
7060 for (i = 0; i < mode_page->num_entries; i++) {
7061 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7062 dev_err(&ioa_cfg->pdev->dev,
7063 "Term power is absent on scsi bus %d\n",
7064 bus->res_addr.bus);
7065 }
7066
7067 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7068 }
7069}
7070
7071/**
7072 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7073 * @ioa_cfg: ioa config struct
7074 *
7075 * Looks through the config table checking for SES devices. If
7076 * the SES device is in the SES table indicating a maximum SCSI
7077 * bus speed, the speed is limited for the bus.
7078 *
7079 * Return value:
7080 * none
7081 **/
7082static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7083{
7084 u32 max_xfer_rate;
7085 int i;
7086
7087 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7088 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7089 ioa_cfg->bus_attr[i].bus_width);
7090
7091 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7092 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7093 }
7094}
7095
7096/**
7097 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7098 * @ioa_cfg: ioa config struct
7099 * @mode_pages: mode page 28 buffer
7100 *
7101 * Updates mode page 28 based on driver configuration
7102 *
7103 * Return value:
7104 * none
7105 **/
7106static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03007107 struct ipr_mode_pages *mode_pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007108{
7109 int i, entry_length;
7110 struct ipr_dev_bus_entry *bus;
7111 struct ipr_bus_attributes *bus_attr;
7112 struct ipr_mode_page28 *mode_page;
7113
7114 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7115 sizeof(struct ipr_mode_page28));
7116
7117 entry_length = mode_page->entry_length;
7118
7119 /* Loop for each device bus entry */
7120 for (i = 0, bus = mode_page->bus;
7121 i < mode_page->num_entries;
7122 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7123 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7124 dev_err(&ioa_cfg->pdev->dev,
7125 "Invalid resource address reported: 0x%08X\n",
7126 IPR_GET_PHYS_LOC(bus->res_addr));
7127 continue;
7128 }
7129
7130 bus_attr = &ioa_cfg->bus_attr[i];
7131 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7132 bus->bus_width = bus_attr->bus_width;
7133 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7134 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7135 if (bus_attr->qas_enabled)
7136 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7137 else
7138 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7139 }
7140}
7141
7142/**
7143 * ipr_build_mode_select - Build a mode select command
7144 * @ipr_cmd: ipr command struct
7145 * @res_handle: resource handle to send command to
7146 * @parm: Byte 2 of Mode Sense command
7147 * @dma_addr: DMA buffer address
7148 * @xfer_len: data transfer length
7149 *
7150 * Return value:
7151 * none
7152 **/
7153static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
Wayne Boyera32c0552010-02-19 13:23:36 -08007154 __be32 res_handle, u8 parm,
7155 dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007156{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007157 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7158
7159 ioarcb->res_handle = res_handle;
7160 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7161 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7162 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7163 ioarcb->cmd_pkt.cdb[1] = parm;
7164 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7165
Wayne Boyera32c0552010-02-19 13:23:36 -08007166 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007167}
7168
7169/**
7170 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7171 * @ipr_cmd: ipr command struct
7172 *
7173 * This function sets up the SCSI bus attributes and sends
7174 * a Mode Select for Page 28 to activate them.
7175 *
7176 * Return value:
7177 * IPR_RC_JOB_RETURN
7178 **/
7179static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7180{
7181 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7182 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7183 int length;
7184
7185 ENTER;
Brian King47338042006-02-08 20:57:42 -06007186 ipr_scsi_bus_speed_limit(ioa_cfg);
7187 ipr_check_term_power(ioa_cfg, mode_pages);
7188 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7189 length = mode_pages->hdr.length + 1;
7190 mode_pages->hdr.length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007191
7192 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7193 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7194 length);
7195
Wayne Boyerf72919e2010-02-19 13:24:21 -08007196 ipr_cmd->job_step = ipr_set_supported_devs;
7197 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7198 struct ipr_resource_entry, queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007199 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7200
7201 LEAVE;
7202 return IPR_RC_JOB_RETURN;
7203}
7204
7205/**
7206 * ipr_build_mode_sense - Builds a mode sense command
7207 * @ipr_cmd: ipr command struct
7208 * @res: resource entry struct
7209 * @parm: Byte 2 of mode sense command
7210 * @dma_addr: DMA address of mode sense buffer
7211 * @xfer_len: Size of DMA buffer
7212 *
7213 * Return value:
7214 * none
7215 **/
7216static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7217 __be32 res_handle,
Wayne Boyera32c0552010-02-19 13:23:36 -08007218 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007219{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007220 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7221
7222 ioarcb->res_handle = res_handle;
7223 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7224 ioarcb->cmd_pkt.cdb[2] = parm;
7225 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7226 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7227
Wayne Boyera32c0552010-02-19 13:23:36 -08007228 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007229}
7230
7231/**
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007232 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7233 * @ipr_cmd: ipr command struct
7234 *
7235 * This function handles the failure of an IOA bringup command.
7236 *
7237 * Return value:
7238 * IPR_RC_JOB_RETURN
7239 **/
7240static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7241{
7242 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyer96d21f02010-05-10 09:13:27 -07007243 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007244
7245 dev_err(&ioa_cfg->pdev->dev,
7246 "0x%02X failed with IOASC: 0x%08X\n",
7247 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7248
7249 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007250 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007251 return IPR_RC_JOB_RETURN;
7252}
7253
7254/**
7255 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7256 * @ipr_cmd: ipr command struct
7257 *
7258 * This function handles the failure of a Mode Sense to the IOAFP.
7259 * Some adapters do not handle all mode pages.
7260 *
7261 * Return value:
7262 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7263 **/
7264static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7265{
Wayne Boyerf72919e2010-02-19 13:24:21 -08007266 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyer96d21f02010-05-10 09:13:27 -07007267 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007268
7269 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
Wayne Boyerf72919e2010-02-19 13:24:21 -08007270 ipr_cmd->job_step = ipr_set_supported_devs;
7271 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7272 struct ipr_resource_entry, queue);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007273 return IPR_RC_JOB_CONTINUE;
7274 }
7275
7276 return ipr_reset_cmd_failed(ipr_cmd);
7277}
7278
7279/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007280 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7281 * @ipr_cmd: ipr command struct
7282 *
7283 * This function send a Page 28 mode sense to the IOA to
7284 * retrieve SCSI bus attributes.
7285 *
7286 * Return value:
7287 * IPR_RC_JOB_RETURN
7288 **/
7289static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7290{
7291 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7292
7293 ENTER;
7294 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7295 0x28, ioa_cfg->vpd_cbs_dma +
7296 offsetof(struct ipr_misc_cbs, mode_pages),
7297 sizeof(struct ipr_mode_pages));
7298
7299 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007300 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007301
7302 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7303
7304 LEAVE;
7305 return IPR_RC_JOB_RETURN;
7306}
7307
7308/**
Brian Kingac09c342007-04-26 16:00:16 -05007309 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7310 * @ipr_cmd: ipr command struct
7311 *
7312 * This function enables dual IOA RAID support if possible.
7313 *
7314 * Return value:
7315 * IPR_RC_JOB_RETURN
7316 **/
7317static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7318{
7319 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7320 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7321 struct ipr_mode_page24 *mode_page;
7322 int length;
7323
7324 ENTER;
7325 mode_page = ipr_get_mode_page(mode_pages, 0x24,
7326 sizeof(struct ipr_mode_page24));
7327
7328 if (mode_page)
7329 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7330
7331 length = mode_pages->hdr.length + 1;
7332 mode_pages->hdr.length = 0;
7333
7334 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7335 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7336 length);
7337
7338 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7339 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7340
7341 LEAVE;
7342 return IPR_RC_JOB_RETURN;
7343}
7344
7345/**
7346 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7347 * @ipr_cmd: ipr command struct
7348 *
7349 * This function handles the failure of a Mode Sense to the IOAFP.
7350 * Some adapters do not handle all mode pages.
7351 *
7352 * Return value:
7353 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7354 **/
7355static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7356{
Wayne Boyer96d21f02010-05-10 09:13:27 -07007357 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian Kingac09c342007-04-26 16:00:16 -05007358
7359 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7360 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7361 return IPR_RC_JOB_CONTINUE;
7362 }
7363
7364 return ipr_reset_cmd_failed(ipr_cmd);
7365}
7366
7367/**
7368 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7369 * @ipr_cmd: ipr command struct
7370 *
7371 * This function send a mode sense to the IOA to retrieve
7372 * the IOA Advanced Function Control mode page.
7373 *
7374 * Return value:
7375 * IPR_RC_JOB_RETURN
7376 **/
7377static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7378{
7379 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7380
7381 ENTER;
7382 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7383 0x24, ioa_cfg->vpd_cbs_dma +
7384 offsetof(struct ipr_misc_cbs, mode_pages),
7385 sizeof(struct ipr_mode_pages));
7386
7387 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7388 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7389
7390 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7391
7392 LEAVE;
7393 return IPR_RC_JOB_RETURN;
7394}
7395
7396/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007397 * ipr_init_res_table - Initialize the resource table
7398 * @ipr_cmd: ipr command struct
7399 *
7400 * This function looks through the existing resource table, comparing
7401 * it with the config table. This function will take care of old/new
7402 * devices and schedule adding/removing them from the mid-layer
7403 * as appropriate.
7404 *
7405 * Return value:
7406 * IPR_RC_JOB_CONTINUE
7407 **/
7408static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7409{
7410 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7411 struct ipr_resource_entry *res, *temp;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007412 struct ipr_config_table_entry_wrapper cfgtew;
7413 int entries, found, flag, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007414 LIST_HEAD(old_res);
7415
7416 ENTER;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007417 if (ioa_cfg->sis64)
7418 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7419 else
7420 flag = ioa_cfg->u.cfg_table->hdr.flags;
7421
7422 if (flag & IPR_UCODE_DOWNLOAD_REQ)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007423 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7424
7425 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7426 list_move_tail(&res->queue, &old_res);
7427
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007428 if (ioa_cfg->sis64)
Wayne Boyer438b0332010-05-10 09:13:00 -07007429 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007430 else
7431 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7432
7433 for (i = 0; i < entries; i++) {
7434 if (ioa_cfg->sis64)
7435 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7436 else
7437 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07007438 found = 0;
7439
7440 list_for_each_entry_safe(res, temp, &old_res, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007441 if (ipr_is_same_device(res, &cfgtew)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007442 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7443 found = 1;
7444 break;
7445 }
7446 }
7447
7448 if (!found) {
7449 if (list_empty(&ioa_cfg->free_res_q)) {
7450 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7451 break;
7452 }
7453
7454 found = 1;
7455 res = list_entry(ioa_cfg->free_res_q.next,
7456 struct ipr_resource_entry, queue);
7457 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007458 ipr_init_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007459 res->add_to_ml = 1;
Wayne Boyer56115592010-06-10 14:46:34 -07007460 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7461 res->sdev->allow_restart = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007462
7463 if (found)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007464 ipr_update_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007465 }
7466
7467 list_for_each_entry_safe(res, temp, &old_res, queue) {
7468 if (res->sdev) {
7469 res->del_from_ml = 1;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007470 res->res_handle = IPR_INVALID_RES_HANDLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007471 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007472 }
7473 }
7474
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007475 list_for_each_entry_safe(res, temp, &old_res, queue) {
7476 ipr_clear_res_target(res);
7477 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7478 }
7479
Brian Kingac09c342007-04-26 16:00:16 -05007480 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7481 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7482 else
7483 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007484
7485 LEAVE;
7486 return IPR_RC_JOB_CONTINUE;
7487}
7488
7489/**
7490 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7491 * @ipr_cmd: ipr command struct
7492 *
7493 * This function sends a Query IOA Configuration command
7494 * to the adapter to retrieve the IOA configuration table.
7495 *
7496 * Return value:
7497 * IPR_RC_JOB_RETURN
7498 **/
7499static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7500{
7501 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7502 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007503 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
Brian Kingac09c342007-04-26 16:00:16 -05007504 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007505
7506 ENTER;
Brian Kingac09c342007-04-26 16:00:16 -05007507 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7508 ioa_cfg->dual_raid = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007509 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7510 ucode_vpd->major_release, ucode_vpd->card_type,
7511 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7512 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7513 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7514
7515 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
Wayne Boyer438b0332010-05-10 09:13:00 -07007516 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007517 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7518 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007519
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007520 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
Wayne Boyera32c0552010-02-19 13:23:36 -08007521 IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007522
7523 ipr_cmd->job_step = ipr_init_res_table;
7524
7525 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7526
7527 LEAVE;
7528 return IPR_RC_JOB_RETURN;
7529}
7530
7531/**
7532 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7533 * @ipr_cmd: ipr command struct
7534 *
7535 * This utility function sends an inquiry to the adapter.
7536 *
7537 * Return value:
7538 * none
7539 **/
7540static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
Wayne Boyera32c0552010-02-19 13:23:36 -08007541 dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007542{
7543 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007544
7545 ENTER;
7546 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7547 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7548
7549 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7550 ioarcb->cmd_pkt.cdb[1] = flags;
7551 ioarcb->cmd_pkt.cdb[2] = page;
7552 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7553
Wayne Boyera32c0552010-02-19 13:23:36 -08007554 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007555
7556 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7557 LEAVE;
7558}
7559
7560/**
brking@us.ibm.com62275042005-11-01 17:01:14 -06007561 * ipr_inquiry_page_supported - Is the given inquiry page supported
7562 * @page0: inquiry page 0 buffer
7563 * @page: page code.
7564 *
7565 * This function determines if the specified inquiry page is supported.
7566 *
7567 * Return value:
7568 * 1 if page is supported / 0 if not
7569 **/
7570static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7571{
7572 int i;
7573
7574 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7575 if (page0->page[i] == page)
7576 return 1;
7577
7578 return 0;
7579}
7580
7581/**
Brian Kingac09c342007-04-26 16:00:16 -05007582 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7583 * @ipr_cmd: ipr command struct
7584 *
7585 * This function sends a Page 0xD0 inquiry to the adapter
7586 * to retrieve adapter capabilities.
7587 *
7588 * Return value:
7589 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7590 **/
7591static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7592{
7593 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7594 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7595 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7596
7597 ENTER;
7598 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7599 memset(cap, 0, sizeof(*cap));
7600
7601 if (ipr_inquiry_page_supported(page0, 0xD0)) {
7602 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7603 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7604 sizeof(struct ipr_inquiry_cap));
7605 return IPR_RC_JOB_RETURN;
7606 }
7607
7608 LEAVE;
7609 return IPR_RC_JOB_CONTINUE;
7610}
7611
7612/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007613 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7614 * @ipr_cmd: ipr command struct
7615 *
7616 * This function sends a Page 3 inquiry to the adapter
7617 * to retrieve software VPD information.
7618 *
7619 * Return value:
7620 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7621 **/
7622static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7623{
7624 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
brking@us.ibm.com62275042005-11-01 17:01:14 -06007625
7626 ENTER;
7627
Brian Kingac09c342007-04-26 16:00:16 -05007628 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
brking@us.ibm.com62275042005-11-01 17:01:14 -06007629
7630 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7631 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7632 sizeof(struct ipr_inquiry_page3));
7633
7634 LEAVE;
7635 return IPR_RC_JOB_RETURN;
7636}
7637
7638/**
7639 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7640 * @ipr_cmd: ipr command struct
7641 *
7642 * This function sends a Page 0 inquiry to the adapter
7643 * to retrieve supported inquiry pages.
7644 *
7645 * Return value:
7646 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7647 **/
7648static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7649{
7650 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007651 char type[5];
7652
7653 ENTER;
7654
7655 /* Grab the type out of the VPD and store it away */
7656 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7657 type[4] = '\0';
7658 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7659
brking@us.ibm.com62275042005-11-01 17:01:14 -06007660 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007661
brking@us.ibm.com62275042005-11-01 17:01:14 -06007662 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7663 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7664 sizeof(struct ipr_inquiry_page0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07007665
7666 LEAVE;
7667 return IPR_RC_JOB_RETURN;
7668}
7669
7670/**
7671 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7672 * @ipr_cmd: ipr command struct
7673 *
7674 * This function sends a standard inquiry to the adapter.
7675 *
7676 * Return value:
7677 * IPR_RC_JOB_RETURN
7678 **/
7679static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7680{
7681 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7682
7683 ENTER;
brking@us.ibm.com62275042005-11-01 17:01:14 -06007684 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007685
7686 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7687 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7688 sizeof(struct ipr_ioa_vpd));
7689
7690 LEAVE;
7691 return IPR_RC_JOB_RETURN;
7692}
7693
7694/**
Wayne Boyer214777b2010-02-19 13:24:26 -08007695 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007696 * @ipr_cmd: ipr command struct
7697 *
7698 * This function send an Identify Host Request Response Queue
7699 * command to establish the HRRQ with the adapter.
7700 *
7701 * Return value:
7702 * IPR_RC_JOB_RETURN
7703 **/
Wayne Boyer214777b2010-02-19 13:24:26 -08007704static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007705{
7706 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7707 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007708 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007709
7710 ENTER;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007711 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007712 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7713
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007714 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7715 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
Linus Torvalds1da177e2005-04-16 15:20:36 -07007716
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007717 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7718 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007719
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007720 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7721 if (ioa_cfg->sis64)
7722 ioarcb->cmd_pkt.cdb[1] = 0x1;
7723
7724 if (ioa_cfg->nvectors == 1)
7725 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7726 else
7727 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7728
7729 ioarcb->cmd_pkt.cdb[2] =
7730 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7731 ioarcb->cmd_pkt.cdb[3] =
7732 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7733 ioarcb->cmd_pkt.cdb[4] =
7734 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7735 ioarcb->cmd_pkt.cdb[5] =
7736 ((u64) hrrq->host_rrq_dma) & 0xff;
7737 ioarcb->cmd_pkt.cdb[7] =
7738 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7739 ioarcb->cmd_pkt.cdb[8] =
7740 (sizeof(u32) * hrrq->size) & 0xff;
7741
7742 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007743 ioarcb->cmd_pkt.cdb[9] =
7744 ioa_cfg->identify_hrrq_index;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007745
7746 if (ioa_cfg->sis64) {
7747 ioarcb->cmd_pkt.cdb[10] =
7748 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7749 ioarcb->cmd_pkt.cdb[11] =
7750 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7751 ioarcb->cmd_pkt.cdb[12] =
7752 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7753 ioarcb->cmd_pkt.cdb[13] =
7754 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7755 }
7756
7757 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007758 ioarcb->cmd_pkt.cdb[14] =
7759 ioa_cfg->identify_hrrq_index;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007760
7761 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7762 IPR_INTERNAL_TIMEOUT);
7763
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007764 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7765 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007766
7767 LEAVE;
7768 return IPR_RC_JOB_RETURN;
Wayne Boyer214777b2010-02-19 13:24:26 -08007769 }
7770
Linus Torvalds1da177e2005-04-16 15:20:36 -07007771 LEAVE;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007772 return IPR_RC_JOB_CONTINUE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007773}
7774
7775/**
7776 * ipr_reset_timer_done - Adapter reset timer function
7777 * @ipr_cmd: ipr command struct
7778 *
7779 * Description: This function is used in adapter reset processing
7780 * for timing events. If the reset_cmd pointer in the IOA
7781 * config struct is not this adapter's we are doing nested
7782 * resets and fail_all_ops will take care of freeing the
7783 * command block.
7784 *
7785 * Return value:
7786 * none
7787 **/
7788static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7789{
7790 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7791 unsigned long lock_flags = 0;
7792
7793 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7794
7795 if (ioa_cfg->reset_cmd == ipr_cmd) {
7796 list_del(&ipr_cmd->queue);
7797 ipr_cmd->done(ipr_cmd);
7798 }
7799
7800 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7801}
7802
7803/**
7804 * ipr_reset_start_timer - Start a timer for adapter reset job
7805 * @ipr_cmd: ipr command struct
7806 * @timeout: timeout value
7807 *
7808 * Description: This function is used in adapter reset processing
7809 * for timing events. If the reset_cmd pointer in the IOA
7810 * config struct is not this adapter's we are doing nested
7811 * resets and fail_all_ops will take care of freeing the
7812 * command block.
7813 *
7814 * Return value:
7815 * none
7816 **/
7817static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7818 unsigned long timeout)
7819{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007820
7821 ENTER;
7822 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007823 ipr_cmd->done = ipr_reset_ioa_job;
7824
7825 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7826 ipr_cmd->timer.expires = jiffies + timeout;
7827 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7828 add_timer(&ipr_cmd->timer);
7829}
7830
7831/**
7832 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7833 * @ioa_cfg: ioa cfg struct
7834 *
7835 * Return value:
7836 * nothing
7837 **/
7838static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7839{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007840 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007841
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007842 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007843 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007844 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7845
7846 /* Initialize Host RRQ pointers */
7847 hrrq->hrrq_start = hrrq->host_rrq;
7848 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7849 hrrq->hrrq_curr = hrrq->hrrq_start;
7850 hrrq->toggle_bit = 1;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007851 spin_unlock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007852 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007853 wmb();
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007854
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007855 ioa_cfg->identify_hrrq_index = 0;
7856 if (ioa_cfg->hrrq_num == 1)
7857 atomic_set(&ioa_cfg->hrrq_index, 0);
7858 else
7859 atomic_set(&ioa_cfg->hrrq_index, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007860
7861 /* Zero out config table */
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007862 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007863}
7864
7865/**
Wayne Boyer214777b2010-02-19 13:24:26 -08007866 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7867 * @ipr_cmd: ipr command struct
7868 *
7869 * Return value:
7870 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7871 **/
7872static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7873{
7874 unsigned long stage, stage_time;
7875 u32 feedback;
7876 volatile u32 int_reg;
7877 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7878 u64 maskval = 0;
7879
7880 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7881 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7882 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7883
7884 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7885
7886 /* sanity check the stage_time value */
Wayne Boyer438b0332010-05-10 09:13:00 -07007887 if (stage_time == 0)
7888 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7889 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
Wayne Boyer214777b2010-02-19 13:24:26 -08007890 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7891 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7892 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7893
7894 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7895 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7896 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7897 stage_time = ioa_cfg->transop_timeout;
7898 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7899 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
Wayne Boyer1df79ca2010-07-14 10:49:43 -07007900 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7901 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7902 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7903 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7904 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7905 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7906 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7907 return IPR_RC_JOB_CONTINUE;
7908 }
Wayne Boyer214777b2010-02-19 13:24:26 -08007909 }
7910
7911 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7912 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7913 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7914 ipr_cmd->done = ipr_reset_ioa_job;
7915 add_timer(&ipr_cmd->timer);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007916
7917 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Wayne Boyer214777b2010-02-19 13:24:26 -08007918
7919 return IPR_RC_JOB_RETURN;
7920}
7921
7922/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007923 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7924 * @ipr_cmd: ipr command struct
7925 *
7926 * This function reinitializes some control blocks and
7927 * enables destructive diagnostics on the adapter.
7928 *
7929 * Return value:
7930 * IPR_RC_JOB_RETURN
7931 **/
7932static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7933{
7934 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7935 volatile u32 int_reg;
Wayne Boyer7be96902010-05-10 09:14:07 -07007936 volatile u64 maskval;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007937 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007938
7939 ENTER;
Wayne Boyer214777b2010-02-19 13:24:26 -08007940 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007941 ipr_init_ioa_mem(ioa_cfg);
7942
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007943 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7944 spin_lock(&ioa_cfg->hrrq[i]._lock);
7945 ioa_cfg->hrrq[i].allow_interrupts = 1;
7946 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7947 }
7948 wmb();
Wayne Boyer8701f182010-06-04 10:26:50 -07007949 if (ioa_cfg->sis64) {
7950 /* Set the adapter to the correct endian mode. */
7951 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7952 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7953 }
7954
Wayne Boyer7be96902010-05-10 09:14:07 -07007955 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007956
7957 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7958 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
Wayne Boyer214777b2010-02-19 13:24:26 -08007959 ioa_cfg->regs.clr_interrupt_mask_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007960 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7961 return IPR_RC_JOB_CONTINUE;
7962 }
7963
7964 /* Enable destructive diagnostics on IOA */
Wayne Boyer214777b2010-02-19 13:24:26 -08007965 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007966
Wayne Boyer7be96902010-05-10 09:14:07 -07007967 if (ioa_cfg->sis64) {
7968 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7969 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7970 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7971 } else
7972 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
Wayne Boyer214777b2010-02-19 13:24:26 -08007973
Linus Torvalds1da177e2005-04-16 15:20:36 -07007974 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7975
7976 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7977
Wayne Boyer214777b2010-02-19 13:24:26 -08007978 if (ioa_cfg->sis64) {
7979 ipr_cmd->job_step = ipr_reset_next_stage;
7980 return IPR_RC_JOB_CONTINUE;
7981 }
7982
Linus Torvalds1da177e2005-04-16 15:20:36 -07007983 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
Brian King5469cb52007-03-29 12:42:40 -05007984 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007985 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7986 ipr_cmd->done = ipr_reset_ioa_job;
7987 add_timer(&ipr_cmd->timer);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007988 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007989
7990 LEAVE;
7991 return IPR_RC_JOB_RETURN;
7992}
7993
7994/**
7995 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7996 * @ipr_cmd: ipr command struct
7997 *
7998 * This function is invoked when an adapter dump has run out
7999 * of processing time.
8000 *
8001 * Return value:
8002 * IPR_RC_JOB_CONTINUE
8003 **/
8004static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8005{
8006 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8007
8008 if (ioa_cfg->sdt_state == GET_DUMP)
Brian King41e9a692011-09-21 08:51:11 -05008009 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8010 else if (ioa_cfg->sdt_state == READ_DUMP)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008011 ioa_cfg->sdt_state = ABORT_DUMP;
8012
Brian King4c647e92011-10-15 09:08:56 -05008013 ioa_cfg->dump_timeout = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008014 ipr_cmd->job_step = ipr_reset_alert;
8015
8016 return IPR_RC_JOB_CONTINUE;
8017}
8018
8019/**
8020 * ipr_unit_check_no_data - Log a unit check/no data error log
8021 * @ioa_cfg: ioa config struct
8022 *
8023 * Logs an error indicating the adapter unit checked, but for some
8024 * reason, we were unable to fetch the unit check buffer.
8025 *
8026 * Return value:
8027 * nothing
8028 **/
8029static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8030{
8031 ioa_cfg->errors_logged++;
8032 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8033}
8034
8035/**
8036 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8037 * @ioa_cfg: ioa config struct
8038 *
8039 * Fetches the unit check buffer from the adapter by clocking the data
8040 * through the mailbox register.
8041 *
8042 * Return value:
8043 * nothing
8044 **/
8045static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8046{
8047 unsigned long mailbox;
8048 struct ipr_hostrcb *hostrcb;
8049 struct ipr_uc_sdt sdt;
8050 int rc, length;
Brian King65f56472007-04-26 16:00:12 -05008051 u32 ioasc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008052
8053 mailbox = readl(ioa_cfg->ioa_mailbox);
8054
Wayne Boyerdcbad002010-02-19 13:24:14 -08008055 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008056 ipr_unit_check_no_data(ioa_cfg);
8057 return;
8058 }
8059
8060 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8061 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8062 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8063
Wayne Boyerdcbad002010-02-19 13:24:14 -08008064 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8065 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8066 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008067 ipr_unit_check_no_data(ioa_cfg);
8068 return;
8069 }
8070
8071 /* Find length of the first sdt entry (UC buffer) */
Wayne Boyerdcbad002010-02-19 13:24:14 -08008072 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8073 length = be32_to_cpu(sdt.entry[0].end_token);
8074 else
8075 length = (be32_to_cpu(sdt.entry[0].end_token) -
8076 be32_to_cpu(sdt.entry[0].start_token)) &
8077 IPR_FMT2_MBX_ADDR_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008078
8079 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8080 struct ipr_hostrcb, queue);
8081 list_del(&hostrcb->queue);
8082 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8083
8084 rc = ipr_get_ldump_data_section(ioa_cfg,
Wayne Boyerdcbad002010-02-19 13:24:14 -08008085 be32_to_cpu(sdt.entry[0].start_token),
Linus Torvalds1da177e2005-04-16 15:20:36 -07008086 (__be32 *)&hostrcb->hcam,
8087 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8088
Brian King65f56472007-04-26 16:00:12 -05008089 if (!rc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008090 ipr_handle_log_data(ioa_cfg, hostrcb);
Wayne Boyer4565e372010-02-19 13:24:07 -08008091 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Brian King65f56472007-04-26 16:00:12 -05008092 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8093 ioa_cfg->sdt_state == GET_DUMP)
8094 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8095 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07008096 ipr_unit_check_no_data(ioa_cfg);
8097
8098 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8099}
8100
8101/**
Wayne Boyer110def82010-11-04 09:36:16 -07008102 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8103 * @ipr_cmd: ipr command struct
8104 *
8105 * Description: This function will call to get the unit check buffer.
8106 *
8107 * Return value:
8108 * IPR_RC_JOB_RETURN
8109 **/
8110static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8111{
8112 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8113
8114 ENTER;
8115 ioa_cfg->ioa_unit_checked = 0;
8116 ipr_get_unit_check_buffer(ioa_cfg);
8117 ipr_cmd->job_step = ipr_reset_alert;
8118 ipr_reset_start_timer(ipr_cmd, 0);
8119
8120 LEAVE;
8121 return IPR_RC_JOB_RETURN;
8122}
8123
8124/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008125 * ipr_reset_restore_cfg_space - Restore PCI config space.
8126 * @ipr_cmd: ipr command struct
8127 *
8128 * Description: This function restores the saved PCI config space of
8129 * the adapter, fails all outstanding ops back to the callers, and
8130 * fetches the dump/unit check if applicable to this reset.
8131 *
8132 * Return value:
8133 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8134 **/
8135static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8136{
8137 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyer630ad8312011-04-07 12:12:30 -07008138 u32 int_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008139
8140 ENTER;
Kleber Sacilotto de Souza99c965d2009-11-25 20:13:43 -02008141 ioa_cfg->pdev->state_saved = true;
Jon Mason1d3c16a2010-11-30 17:43:26 -06008142 pci_restore_state(ioa_cfg->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008143
8144 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
Wayne Boyer96d21f02010-05-10 09:13:27 -07008145 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008146 return IPR_RC_JOB_CONTINUE;
8147 }
8148
8149 ipr_fail_all_ops(ioa_cfg);
8150
Wayne Boyer8701f182010-06-04 10:26:50 -07008151 if (ioa_cfg->sis64) {
8152 /* Set the adapter to the correct endian mode. */
8153 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8154 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8155 }
8156
Linus Torvalds1da177e2005-04-16 15:20:36 -07008157 if (ioa_cfg->ioa_unit_checked) {
Wayne Boyer110def82010-11-04 09:36:16 -07008158 if (ioa_cfg->sis64) {
8159 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8160 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8161 return IPR_RC_JOB_RETURN;
8162 } else {
8163 ioa_cfg->ioa_unit_checked = 0;
8164 ipr_get_unit_check_buffer(ioa_cfg);
8165 ipr_cmd->job_step = ipr_reset_alert;
8166 ipr_reset_start_timer(ipr_cmd, 0);
8167 return IPR_RC_JOB_RETURN;
8168 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008169 }
8170
8171 if (ioa_cfg->in_ioa_bringdown) {
8172 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8173 } else {
8174 ipr_cmd->job_step = ipr_reset_enable_ioa;
8175
8176 if (GET_DUMP == ioa_cfg->sdt_state) {
Brian King41e9a692011-09-21 08:51:11 -05008177 ioa_cfg->sdt_state = READ_DUMP;
Brian King4c647e92011-10-15 09:08:56 -05008178 ioa_cfg->dump_timeout = 0;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03008179 if (ioa_cfg->sis64)
8180 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8181 else
8182 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008183 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8184 schedule_work(&ioa_cfg->work_q);
8185 return IPR_RC_JOB_RETURN;
8186 }
8187 }
8188
Wayne Boyer438b0332010-05-10 09:13:00 -07008189 LEAVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008190 return IPR_RC_JOB_CONTINUE;
8191}
8192
8193/**
Brian Kinge619e1a2007-01-23 11:25:37 -06008194 * ipr_reset_bist_done - BIST has completed on the adapter.
8195 * @ipr_cmd: ipr command struct
8196 *
8197 * Description: Unblock config space and resume the reset process.
8198 *
8199 * Return value:
8200 * IPR_RC_JOB_CONTINUE
8201 **/
8202static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8203{
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008204 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8205
Brian Kinge619e1a2007-01-23 11:25:37 -06008206 ENTER;
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008207 if (ioa_cfg->cfg_locked)
8208 pci_cfg_access_unlock(ioa_cfg->pdev);
8209 ioa_cfg->cfg_locked = 0;
Brian Kinge619e1a2007-01-23 11:25:37 -06008210 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8211 LEAVE;
8212 return IPR_RC_JOB_CONTINUE;
8213}
8214
8215/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008216 * ipr_reset_start_bist - Run BIST on the adapter.
8217 * @ipr_cmd: ipr command struct
8218 *
8219 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8220 *
8221 * Return value:
8222 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8223 **/
8224static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8225{
8226 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008227 int rc = PCIBIOS_SUCCESSFUL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008228
8229 ENTER;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008230 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8231 writel(IPR_UPROCI_SIS64_START_BIST,
8232 ioa_cfg->regs.set_uproc_interrupt_reg32);
8233 else
8234 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8235
8236 if (rc == PCIBIOS_SUCCESSFUL) {
Brian Kinge619e1a2007-01-23 11:25:37 -06008237 ipr_cmd->job_step = ipr_reset_bist_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008238 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8239 rc = IPR_RC_JOB_RETURN;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008240 } else {
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008241 if (ioa_cfg->cfg_locked)
8242 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8243 ioa_cfg->cfg_locked = 0;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008244 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8245 rc = IPR_RC_JOB_CONTINUE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008246 }
8247
8248 LEAVE;
8249 return rc;
8250}
8251
8252/**
Brian King463fc692007-05-07 17:09:05 -05008253 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8254 * @ipr_cmd: ipr command struct
8255 *
8256 * Description: This clears PCI reset to the adapter and delays two seconds.
8257 *
8258 * Return value:
8259 * IPR_RC_JOB_RETURN
8260 **/
8261static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8262{
8263 ENTER;
8264 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
8265 ipr_cmd->job_step = ipr_reset_bist_done;
8266 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8267 LEAVE;
8268 return IPR_RC_JOB_RETURN;
8269}
8270
8271/**
8272 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8273 * @ipr_cmd: ipr command struct
8274 *
8275 * Description: This asserts PCI reset to the adapter.
8276 *
8277 * Return value:
8278 * IPR_RC_JOB_RETURN
8279 **/
8280static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8281{
8282 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8283 struct pci_dev *pdev = ioa_cfg->pdev;
8284
8285 ENTER;
Brian King463fc692007-05-07 17:09:05 -05008286 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8287 ipr_cmd->job_step = ipr_reset_slot_reset_done;
8288 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
8289 LEAVE;
8290 return IPR_RC_JOB_RETURN;
8291}
8292
8293/**
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008294 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8295 * @ipr_cmd: ipr command struct
8296 *
8297 * Description: This attempts to block config access to the IOA.
8298 *
8299 * Return value:
8300 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8301 **/
8302static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8303{
8304 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8305 int rc = IPR_RC_JOB_CONTINUE;
8306
8307 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8308 ioa_cfg->cfg_locked = 1;
8309 ipr_cmd->job_step = ioa_cfg->reset;
8310 } else {
8311 if (ipr_cmd->u.time_left) {
8312 rc = IPR_RC_JOB_RETURN;
8313 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8314 ipr_reset_start_timer(ipr_cmd,
8315 IPR_CHECK_FOR_RESET_TIMEOUT);
8316 } else {
8317 ipr_cmd->job_step = ioa_cfg->reset;
8318 dev_err(&ioa_cfg->pdev->dev,
8319 "Timed out waiting to lock config access. Resetting anyway.\n");
8320 }
8321 }
8322
8323 return rc;
8324}
8325
8326/**
8327 * ipr_reset_block_config_access - Block config access to the IOA
8328 * @ipr_cmd: ipr command struct
8329 *
8330 * Description: This attempts to block config access to the IOA
8331 *
8332 * Return value:
8333 * IPR_RC_JOB_CONTINUE
8334 **/
8335static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8336{
8337 ipr_cmd->ioa_cfg->cfg_locked = 0;
8338 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8339 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8340 return IPR_RC_JOB_CONTINUE;
8341}
8342
8343/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008344 * ipr_reset_allowed - Query whether or not IOA can be reset
8345 * @ioa_cfg: ioa config struct
8346 *
8347 * Return value:
8348 * 0 if reset not allowed / non-zero if reset is allowed
8349 **/
8350static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8351{
8352 volatile u32 temp_reg;
8353
8354 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8355 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8356}
8357
8358/**
8359 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8360 * @ipr_cmd: ipr command struct
8361 *
8362 * Description: This function waits for adapter permission to run BIST,
8363 * then runs BIST. If the adapter does not give permission after a
8364 * reasonable time, we will reset the adapter anyway. The impact of
8365 * resetting the adapter without warning the adapter is the risk of
8366 * losing the persistent error log on the adapter. If the adapter is
8367 * reset while it is writing to the flash on the adapter, the flash
8368 * segment will have bad ECC and be zeroed.
8369 *
8370 * Return value:
8371 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8372 **/
8373static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8374{
8375 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8376 int rc = IPR_RC_JOB_RETURN;
8377
8378 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8379 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8380 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8381 } else {
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008382 ipr_cmd->job_step = ipr_reset_block_config_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008383 rc = IPR_RC_JOB_CONTINUE;
8384 }
8385
8386 return rc;
8387}
8388
8389/**
Wayne Boyer8701f182010-06-04 10:26:50 -07008390 * ipr_reset_alert - Alert the adapter of a pending reset
Linus Torvalds1da177e2005-04-16 15:20:36 -07008391 * @ipr_cmd: ipr command struct
8392 *
8393 * Description: This function alerts the adapter that it will be reset.
8394 * If memory space is not currently enabled, proceed directly
8395 * to running BIST on the adapter. The timer must always be started
8396 * so we guarantee we do not run BIST from ipr_isr.
8397 *
8398 * Return value:
8399 * IPR_RC_JOB_RETURN
8400 **/
8401static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8402{
8403 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8404 u16 cmd_reg;
8405 int rc;
8406
8407 ENTER;
8408 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8409
8410 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8411 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
Wayne Boyer214777b2010-02-19 13:24:26 -08008412 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008413 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8414 } else {
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008415 ipr_cmd->job_step = ipr_reset_block_config_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008416 }
8417
8418 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8419 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8420
8421 LEAVE;
8422 return IPR_RC_JOB_RETURN;
8423}
8424
8425/**
8426 * ipr_reset_ucode_download_done - Microcode download completion
8427 * @ipr_cmd: ipr command struct
8428 *
8429 * Description: This function unmaps the microcode download buffer.
8430 *
8431 * Return value:
8432 * IPR_RC_JOB_CONTINUE
8433 **/
8434static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8435{
8436 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8437 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8438
8439 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
8440 sglist->num_sg, DMA_TO_DEVICE);
8441
8442 ipr_cmd->job_step = ipr_reset_alert;
8443 return IPR_RC_JOB_CONTINUE;
8444}
8445
8446/**
8447 * ipr_reset_ucode_download - Download microcode to the adapter
8448 * @ipr_cmd: ipr command struct
8449 *
8450 * Description: This function checks to see if it there is microcode
8451 * to download to the adapter. If there is, a download is performed.
8452 *
8453 * Return value:
8454 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8455 **/
8456static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8457{
8458 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8459 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8460
8461 ENTER;
8462 ipr_cmd->job_step = ipr_reset_alert;
8463
8464 if (!sglist)
8465 return IPR_RC_JOB_CONTINUE;
8466
8467 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8468 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8469 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8470 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8471 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8472 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8473 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8474
Wayne Boyera32c0552010-02-19 13:23:36 -08008475 if (ioa_cfg->sis64)
8476 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8477 else
8478 ipr_build_ucode_ioadl(ipr_cmd, sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008479 ipr_cmd->job_step = ipr_reset_ucode_download_done;
8480
8481 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8482 IPR_WRITE_BUFFER_TIMEOUT);
8483
8484 LEAVE;
8485 return IPR_RC_JOB_RETURN;
8486}
8487
8488/**
8489 * ipr_reset_shutdown_ioa - Shutdown the adapter
8490 * @ipr_cmd: ipr command struct
8491 *
8492 * Description: This function issues an adapter shutdown of the
8493 * specified type to the specified adapter as part of the
8494 * adapter reset job.
8495 *
8496 * Return value:
8497 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8498 **/
8499static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8500{
8501 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8502 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8503 unsigned long timeout;
8504 int rc = IPR_RC_JOB_CONTINUE;
8505
8506 ENTER;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008507 if (shutdown_type != IPR_SHUTDOWN_NONE &&
8508 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008509 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8510 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8511 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8512 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8513
Brian Kingac09c342007-04-26 16:00:16 -05008514 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8515 timeout = IPR_SHUTDOWN_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008516 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8517 timeout = IPR_INTERNAL_TIMEOUT;
Brian Kingac09c342007-04-26 16:00:16 -05008518 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8519 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008520 else
Brian Kingac09c342007-04-26 16:00:16 -05008521 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008522
8523 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8524
8525 rc = IPR_RC_JOB_RETURN;
8526 ipr_cmd->job_step = ipr_reset_ucode_download;
8527 } else
8528 ipr_cmd->job_step = ipr_reset_alert;
8529
8530 LEAVE;
8531 return rc;
8532}
8533
8534/**
8535 * ipr_reset_ioa_job - Adapter reset job
8536 * @ipr_cmd: ipr command struct
8537 *
8538 * Description: This function is the job router for the adapter reset job.
8539 *
8540 * Return value:
8541 * none
8542 **/
8543static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8544{
8545 u32 rc, ioasc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008546 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8547
8548 do {
Wayne Boyer96d21f02010-05-10 09:13:27 -07008549 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008550
8551 if (ioa_cfg->reset_cmd != ipr_cmd) {
8552 /*
8553 * We are doing nested adapter resets and this is
8554 * not the current reset job.
8555 */
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008556 list_add_tail(&ipr_cmd->queue,
8557 &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008558 return;
8559 }
8560
8561 if (IPR_IOASC_SENSE_KEY(ioasc)) {
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06008562 rc = ipr_cmd->job_step_failed(ipr_cmd);
8563 if (rc == IPR_RC_JOB_RETURN)
8564 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008565 }
8566
8567 ipr_reinit_ipr_cmnd(ipr_cmd);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06008568 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008569 rc = ipr_cmd->job_step(ipr_cmd);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03008570 } while (rc == IPR_RC_JOB_CONTINUE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008571}
8572
8573/**
8574 * _ipr_initiate_ioa_reset - Initiate an adapter reset
8575 * @ioa_cfg: ioa config struct
8576 * @job_step: first job step of reset job
8577 * @shutdown_type: shutdown type
8578 *
8579 * Description: This function will initiate the reset of the given adapter
8580 * starting at the selected job step.
8581 * If the caller needs to wait on the completion of the reset,
8582 * the caller must sleep on the reset_wait_q.
8583 *
8584 * Return value:
8585 * none
8586 **/
8587static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8588 int (*job_step) (struct ipr_cmnd *),
8589 enum ipr_shutdown_type shutdown_type)
8590{
8591 struct ipr_cmnd *ipr_cmd;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008592 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008593
8594 ioa_cfg->in_reset_reload = 1;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008595 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8596 spin_lock(&ioa_cfg->hrrq[i]._lock);
8597 ioa_cfg->hrrq[i].allow_cmds = 0;
8598 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8599 }
8600 wmb();
Brian Kingbfae7822013-01-30 23:45:08 -06008601 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
8602 scsi_block_requests(ioa_cfg->host);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008603
8604 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8605 ioa_cfg->reset_cmd = ipr_cmd;
8606 ipr_cmd->job_step = job_step;
8607 ipr_cmd->u.shutdown_type = shutdown_type;
8608
8609 ipr_reset_ioa_job(ipr_cmd);
8610}
8611
8612/**
8613 * ipr_initiate_ioa_reset - Initiate an adapter reset
8614 * @ioa_cfg: ioa config struct
8615 * @shutdown_type: shutdown type
8616 *
8617 * Description: This function will initiate the reset of the given adapter.
8618 * If the caller needs to wait on the completion of the reset,
8619 * the caller must sleep on the reset_wait_q.
8620 *
8621 * Return value:
8622 * none
8623 **/
8624static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8625 enum ipr_shutdown_type shutdown_type)
8626{
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008627 int i;
8628
8629 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008630 return;
8631
Brian King41e9a692011-09-21 08:51:11 -05008632 if (ioa_cfg->in_reset_reload) {
8633 if (ioa_cfg->sdt_state == GET_DUMP)
8634 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8635 else if (ioa_cfg->sdt_state == READ_DUMP)
8636 ioa_cfg->sdt_state = ABORT_DUMP;
8637 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008638
8639 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8640 dev_err(&ioa_cfg->pdev->dev,
8641 "IOA taken offline - error recovery failed\n");
8642
8643 ioa_cfg->reset_retries = 0;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008644 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8645 spin_lock(&ioa_cfg->hrrq[i]._lock);
8646 ioa_cfg->hrrq[i].ioa_is_dead = 1;
8647 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8648 }
8649 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008650
8651 if (ioa_cfg->in_ioa_bringdown) {
8652 ioa_cfg->reset_cmd = NULL;
8653 ioa_cfg->in_reset_reload = 0;
8654 ipr_fail_all_ops(ioa_cfg);
8655 wake_up_all(&ioa_cfg->reset_wait_q);
8656
Brian Kingbfae7822013-01-30 23:45:08 -06008657 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8658 spin_unlock_irq(ioa_cfg->host->host_lock);
8659 scsi_unblock_requests(ioa_cfg->host);
8660 spin_lock_irq(ioa_cfg->host->host_lock);
8661 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008662 return;
8663 } else {
8664 ioa_cfg->in_ioa_bringdown = 1;
8665 shutdown_type = IPR_SHUTDOWN_NONE;
8666 }
8667 }
8668
8669 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8670 shutdown_type);
8671}
8672
8673/**
Linas Vepstasf8a88b192006-02-03 16:52:42 -06008674 * ipr_reset_freeze - Hold off all I/O activity
8675 * @ipr_cmd: ipr command struct
8676 *
8677 * Description: If the PCI slot is frozen, hold off all I/O
8678 * activity; then, as soon as the slot is available again,
8679 * initiate an adapter reset.
8680 */
8681static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8682{
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008683 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8684 int i;
8685
Linas Vepstasf8a88b192006-02-03 16:52:42 -06008686 /* Disallow new interrupts, avoid loop */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008687 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8688 spin_lock(&ioa_cfg->hrrq[i]._lock);
8689 ioa_cfg->hrrq[i].allow_interrupts = 0;
8690 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8691 }
8692 wmb();
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008693 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06008694 ipr_cmd->done = ipr_reset_ioa_job;
8695 return IPR_RC_JOB_RETURN;
8696}
8697
8698/**
8699 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8700 * @pdev: PCI device struct
8701 *
8702 * Description: This routine is called to tell us that the PCI bus
8703 * is down. Can't do anything here, except put the device driver
8704 * into a holding pattern, waiting for the PCI bus to come back.
8705 */
8706static void ipr_pci_frozen(struct pci_dev *pdev)
8707{
8708 unsigned long flags = 0;
8709 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8710
8711 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8712 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8713 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8714}
8715
8716/**
8717 * ipr_pci_slot_reset - Called when PCI slot has been reset.
8718 * @pdev: PCI device struct
8719 *
8720 * Description: This routine is called by the pci error recovery
8721 * code after the PCI slot has been reset, just before we
8722 * should resume normal operations.
8723 */
8724static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8725{
8726 unsigned long flags = 0;
8727 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8728
8729 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King463fc692007-05-07 17:09:05 -05008730 if (ioa_cfg->needs_warm_reset)
8731 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8732 else
8733 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8734 IPR_SHUTDOWN_NONE);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06008735 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8736 return PCI_ERS_RESULT_RECOVERED;
8737}
8738
8739/**
8740 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8741 * @pdev: PCI device struct
8742 *
8743 * Description: This routine is called when the PCI bus has
8744 * permanently failed.
8745 */
8746static void ipr_pci_perm_failure(struct pci_dev *pdev)
8747{
8748 unsigned long flags = 0;
8749 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008750 int i;
Linas Vepstasf8a88b192006-02-03 16:52:42 -06008751
8752 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8753 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8754 ioa_cfg->sdt_state = ABORT_DUMP;
wenxiong@linux.vnet.ibm.com96b04db2013-04-17 09:34:06 -05008755 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
Linas Vepstasf8a88b192006-02-03 16:52:42 -06008756 ioa_cfg->in_ioa_bringdown = 1;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008757 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8758 spin_lock(&ioa_cfg->hrrq[i]._lock);
8759 ioa_cfg->hrrq[i].allow_cmds = 0;
8760 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8761 }
8762 wmb();
Linas Vepstasf8a88b192006-02-03 16:52:42 -06008763 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8764 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8765}
8766
8767/**
8768 * ipr_pci_error_detected - Called when a PCI error is detected.
8769 * @pdev: PCI device struct
8770 * @state: PCI channel state
8771 *
8772 * Description: Called when a PCI error is detected.
8773 *
8774 * Return value:
8775 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8776 */
8777static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8778 pci_channel_state_t state)
8779{
8780 switch (state) {
8781 case pci_channel_io_frozen:
8782 ipr_pci_frozen(pdev);
8783 return PCI_ERS_RESULT_NEED_RESET;
8784 case pci_channel_io_perm_failure:
8785 ipr_pci_perm_failure(pdev);
8786 return PCI_ERS_RESULT_DISCONNECT;
8787 break;
8788 default:
8789 break;
8790 }
8791 return PCI_ERS_RESULT_NEED_RESET;
8792}
8793
8794/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008795 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8796 * @ioa_cfg: ioa cfg struct
8797 *
8798 * Description: This is the second phase of adapter intialization
8799 * This function takes care of initilizing the adapter to the point
8800 * where it can accept new commands.
8801
8802 * Return value:
Joe Perchesb1c11812008-02-03 17:28:22 +02008803 * 0 on success / -EIO on failure
Linus Torvalds1da177e2005-04-16 15:20:36 -07008804 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08008805static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008806{
8807 int rc = 0;
8808 unsigned long host_lock_flags = 0;
8809
8810 ENTER;
8811 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8812 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06008813 if (ioa_cfg->needs_hard_reset) {
8814 ioa_cfg->needs_hard_reset = 0;
8815 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8816 } else
8817 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8818 IPR_SHUTDOWN_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008819 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8820 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8821 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8822
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008823 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008824 rc = -EIO;
8825 } else if (ipr_invalid_adapter(ioa_cfg)) {
8826 if (!ipr_testmode)
8827 rc = -EIO;
8828
8829 dev_err(&ioa_cfg->pdev->dev,
8830 "Adapter not supported in this hardware configuration.\n");
8831 }
8832
8833 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8834
8835 LEAVE;
8836 return rc;
8837}
8838
8839/**
8840 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8841 * @ioa_cfg: ioa config struct
8842 *
8843 * Return value:
8844 * none
8845 **/
8846static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8847{
8848 int i;
8849
8850 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8851 if (ioa_cfg->ipr_cmnd_list[i])
8852 pci_pool_free(ioa_cfg->ipr_cmd_pool,
8853 ioa_cfg->ipr_cmnd_list[i],
8854 ioa_cfg->ipr_cmnd_list_dma[i]);
8855
8856 ioa_cfg->ipr_cmnd_list[i] = NULL;
8857 }
8858
8859 if (ioa_cfg->ipr_cmd_pool)
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03008860 pci_pool_destroy(ioa_cfg->ipr_cmd_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008861
Brian King89aad422012-03-14 21:20:10 -05008862 kfree(ioa_cfg->ipr_cmnd_list);
8863 kfree(ioa_cfg->ipr_cmnd_list_dma);
8864 ioa_cfg->ipr_cmnd_list = NULL;
8865 ioa_cfg->ipr_cmnd_list_dma = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008866 ioa_cfg->ipr_cmd_pool = NULL;
8867}
8868
8869/**
8870 * ipr_free_mem - Frees memory allocated for an adapter
8871 * @ioa_cfg: ioa cfg struct
8872 *
8873 * Return value:
8874 * nothing
8875 **/
8876static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8877{
8878 int i;
8879
8880 kfree(ioa_cfg->res_entries);
8881 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
8882 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8883 ipr_free_cmd_blks(ioa_cfg);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008884
8885 for (i = 0; i < ioa_cfg->hrrq_num; i++)
8886 pci_free_consistent(ioa_cfg->pdev,
8887 sizeof(u32) * ioa_cfg->hrrq[i].size,
8888 ioa_cfg->hrrq[i].host_rrq,
8889 ioa_cfg->hrrq[i].host_rrq_dma);
8890
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008891 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
8892 ioa_cfg->u.cfg_table,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008893 ioa_cfg->cfg_table_dma);
8894
8895 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8896 pci_free_consistent(ioa_cfg->pdev,
8897 sizeof(struct ipr_hostrcb),
8898 ioa_cfg->hostrcb[i],
8899 ioa_cfg->hostrcb_dma[i]);
8900 }
8901
8902 ipr_free_dump(ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008903 kfree(ioa_cfg->trace);
8904}
8905
8906/**
8907 * ipr_free_all_resources - Free all allocated resources for an adapter.
8908 * @ipr_cmd: ipr command struct
8909 *
8910 * This function frees all allocated resources for the
8911 * specified adapter.
8912 *
8913 * Return value:
8914 * none
8915 **/
8916static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8917{
8918 struct pci_dev *pdev = ioa_cfg->pdev;
8919
8920 ENTER;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008921 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
8922 ioa_cfg->intr_flag == IPR_USE_MSIX) {
8923 int i;
8924 for (i = 0; i < ioa_cfg->nvectors; i++)
8925 free_irq(ioa_cfg->vectors_info[i].vec,
8926 &ioa_cfg->hrrq[i]);
8927 } else
8928 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
8929
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008930 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008931 pci_disable_msi(pdev);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008932 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
8933 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008934 pci_disable_msix(pdev);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008935 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
8936 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008937
Linus Torvalds1da177e2005-04-16 15:20:36 -07008938 iounmap(ioa_cfg->hdw_dma_regs);
8939 pci_release_regions(pdev);
8940 ipr_free_mem(ioa_cfg);
8941 scsi_host_put(ioa_cfg->host);
8942 pci_disable_device(pdev);
8943 LEAVE;
8944}
8945
8946/**
8947 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8948 * @ioa_cfg: ioa config struct
8949 *
8950 * Return value:
8951 * 0 on success / -ENOMEM on allocation failure
8952 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08008953static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008954{
8955 struct ipr_cmnd *ipr_cmd;
8956 struct ipr_ioarcb *ioarcb;
8957 dma_addr_t dma_addr;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008958 int i, entries_each_hrrq, hrrq_id = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008959
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03008960 ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev,
8961 sizeof(struct ipr_cmnd), 512, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008962
8963 if (!ioa_cfg->ipr_cmd_pool)
8964 return -ENOMEM;
8965
Brian King89aad422012-03-14 21:20:10 -05008966 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
8967 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
8968
8969 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
8970 ipr_free_cmd_blks(ioa_cfg);
8971 return -ENOMEM;
8972 }
8973
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008974 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8975 if (ioa_cfg->hrrq_num > 1) {
8976 if (i == 0) {
8977 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
8978 ioa_cfg->hrrq[i].min_cmd_id = 0;
8979 ioa_cfg->hrrq[i].max_cmd_id =
8980 (entries_each_hrrq - 1);
8981 } else {
8982 entries_each_hrrq =
8983 IPR_NUM_BASE_CMD_BLKS/
8984 (ioa_cfg->hrrq_num - 1);
8985 ioa_cfg->hrrq[i].min_cmd_id =
8986 IPR_NUM_INTERNAL_CMD_BLKS +
8987 (i - 1) * entries_each_hrrq;
8988 ioa_cfg->hrrq[i].max_cmd_id =
8989 (IPR_NUM_INTERNAL_CMD_BLKS +
8990 i * entries_each_hrrq - 1);
8991 }
8992 } else {
8993 entries_each_hrrq = IPR_NUM_CMD_BLKS;
8994 ioa_cfg->hrrq[i].min_cmd_id = 0;
8995 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
8996 }
8997 ioa_cfg->hrrq[i].size = entries_each_hrrq;
8998 }
8999
9000 BUG_ON(ioa_cfg->hrrq_num == 0);
9001
9002 i = IPR_NUM_CMD_BLKS -
9003 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9004 if (i > 0) {
9005 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9006 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9007 }
9008
Linus Torvalds1da177e2005-04-16 15:20:36 -07009009 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03009010 ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009011
9012 if (!ipr_cmd) {
9013 ipr_free_cmd_blks(ioa_cfg);
9014 return -ENOMEM;
9015 }
9016
9017 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
9018 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9019 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9020
9021 ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08009022 ipr_cmd->dma_addr = dma_addr;
9023 if (ioa_cfg->sis64)
9024 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9025 else
9026 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9027
Linus Torvalds1da177e2005-04-16 15:20:36 -07009028 ioarcb->host_response_handle = cpu_to_be32(i << 2);
Wayne Boyera32c0552010-02-19 13:23:36 -08009029 if (ioa_cfg->sis64) {
9030 ioarcb->u.sis64_addr_data.data_ioadl_addr =
9031 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9032 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
Wayne Boyer96d21f02010-05-10 09:13:27 -07009033 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
Wayne Boyera32c0552010-02-19 13:23:36 -08009034 } else {
9035 ioarcb->write_ioadl_addr =
9036 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9037 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9038 ioarcb->ioasa_host_pci_addr =
Wayne Boyer96d21f02010-05-10 09:13:27 -07009039 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
Wayne Boyera32c0552010-02-19 13:23:36 -08009040 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009041 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9042 ipr_cmd->cmd_index = i;
9043 ipr_cmd->ioa_cfg = ioa_cfg;
9044 ipr_cmd->sense_buffer_dma = dma_addr +
9045 offsetof(struct ipr_cmnd, sense_buffer);
9046
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009047 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9048 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9049 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9050 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9051 hrrq_id++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009052 }
9053
9054 return 0;
9055}
9056
9057/**
9058 * ipr_alloc_mem - Allocate memory for an adapter
9059 * @ioa_cfg: ioa config struct
9060 *
9061 * Return value:
9062 * 0 on success / non-zero for error
9063 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009064static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009065{
9066 struct pci_dev *pdev = ioa_cfg->pdev;
9067 int i, rc = -ENOMEM;
9068
9069 ENTER;
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06009070 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009071 ioa_cfg->max_devs_supported, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009072
9073 if (!ioa_cfg->res_entries)
9074 goto out;
9075
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009076 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009077 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009078 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9079 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009080
9081 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
9082 sizeof(struct ipr_misc_cbs),
9083 &ioa_cfg->vpd_cbs_dma);
9084
9085 if (!ioa_cfg->vpd_cbs)
9086 goto out_free_res_entries;
9087
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009088 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9089 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9090 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009091 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9092 if (i == 0)
9093 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9094 else
9095 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009096 }
9097
Linus Torvalds1da177e2005-04-16 15:20:36 -07009098 if (ipr_alloc_cmd_blks(ioa_cfg))
9099 goto out_free_vpd_cbs;
9100
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009101 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9102 ioa_cfg->hrrq[i].host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
9103 sizeof(u32) * ioa_cfg->hrrq[i].size,
9104 &ioa_cfg->hrrq[i].host_rrq_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009105
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009106 if (!ioa_cfg->hrrq[i].host_rrq) {
9107 while (--i > 0)
9108 pci_free_consistent(pdev,
9109 sizeof(u32) * ioa_cfg->hrrq[i].size,
9110 ioa_cfg->hrrq[i].host_rrq,
9111 ioa_cfg->hrrq[i].host_rrq_dma);
9112 goto out_ipr_free_cmd_blocks;
9113 }
9114 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9115 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009116
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009117 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
9118 ioa_cfg->cfg_table_size,
9119 &ioa_cfg->cfg_table_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009120
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009121 if (!ioa_cfg->u.cfg_table)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009122 goto out_free_host_rrq;
9123
9124 for (i = 0; i < IPR_NUM_HCAMS; i++) {
9125 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
9126 sizeof(struct ipr_hostrcb),
9127 &ioa_cfg->hostrcb_dma[i]);
9128
9129 if (!ioa_cfg->hostrcb[i])
9130 goto out_free_hostrcb_dma;
9131
9132 ioa_cfg->hostrcb[i]->hostrcb_dma =
9133 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
Brian King49dc6a12006-11-21 10:28:35 -06009134 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009135 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9136 }
9137
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06009138 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
Linus Torvalds1da177e2005-04-16 15:20:36 -07009139 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9140
9141 if (!ioa_cfg->trace)
9142 goto out_free_hostrcb_dma;
9143
Linus Torvalds1da177e2005-04-16 15:20:36 -07009144 rc = 0;
9145out:
9146 LEAVE;
9147 return rc;
9148
9149out_free_hostrcb_dma:
9150 while (i-- > 0) {
9151 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
9152 ioa_cfg->hostrcb[i],
9153 ioa_cfg->hostrcb_dma[i]);
9154 }
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009155 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
9156 ioa_cfg->u.cfg_table,
9157 ioa_cfg->cfg_table_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009158out_free_host_rrq:
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009159 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9160 pci_free_consistent(pdev,
9161 sizeof(u32) * ioa_cfg->hrrq[i].size,
9162 ioa_cfg->hrrq[i].host_rrq,
9163 ioa_cfg->hrrq[i].host_rrq_dma);
9164 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009165out_ipr_free_cmd_blocks:
9166 ipr_free_cmd_blks(ioa_cfg);
9167out_free_vpd_cbs:
9168 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
9169 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9170out_free_res_entries:
9171 kfree(ioa_cfg->res_entries);
9172 goto out;
9173}
9174
9175/**
9176 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9177 * @ioa_cfg: ioa config struct
9178 *
9179 * Return value:
9180 * none
9181 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009182static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009183{
9184 int i;
9185
9186 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9187 ioa_cfg->bus_attr[i].bus = i;
9188 ioa_cfg->bus_attr[i].qas_enabled = 0;
9189 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9190 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9191 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9192 else
9193 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9194 }
9195}
9196
9197/**
9198 * ipr_init_ioa_cfg - Initialize IOA config struct
9199 * @ioa_cfg: ioa config struct
9200 * @host: scsi host struct
9201 * @pdev: PCI dev struct
9202 *
9203 * Return value:
9204 * none
9205 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009206static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9207 struct Scsi_Host *host, struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009208{
9209 const struct ipr_interrupt_offsets *p;
9210 struct ipr_interrupts *t;
9211 void __iomem *base;
9212
9213 ioa_cfg->host = host;
9214 ioa_cfg->pdev = pdev;
9215 ioa_cfg->log_level = ipr_log_level;
brking@us.ibm.com3d1d0da2005-11-01 17:01:54 -06009216 ioa_cfg->doorbell = IPR_DOORBELL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009217 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9218 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009219 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9220 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9221 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9222 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9223
Linus Torvalds1da177e2005-04-16 15:20:36 -07009224 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9225 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9226 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9227 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
David Howellsc4028952006-11-22 14:57:56 +00009228 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009229 init_waitqueue_head(&ioa_cfg->reset_wait_q);
Wayne Boyer95fecd92009-06-16 15:13:28 -07009230 init_waitqueue_head(&ioa_cfg->msi_wait_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009231 ioa_cfg->sdt_state = INACTIVE;
9232
9233 ipr_initialize_bus_attr(ioa_cfg);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009234 ioa_cfg->max_devs_supported = ipr_max_devs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009235
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009236 if (ioa_cfg->sis64) {
9237 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9238 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9239 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9240 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9241 } else {
9242 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9243 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9244 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9245 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9246 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009247 host->max_channel = IPR_MAX_BUS_TO_SCAN;
9248 host->unique_id = host->host_no;
9249 host->max_cmd_len = IPR_MAX_CDB_LEN;
Brian King89aad422012-03-14 21:20:10 -05009250 host->can_queue = ioa_cfg->max_cmds;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009251 pci_set_drvdata(pdev, ioa_cfg);
9252
9253 p = &ioa_cfg->chip_cfg->regs;
9254 t = &ioa_cfg->regs;
9255 base = ioa_cfg->hdw_dma_regs;
9256
9257 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9258 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009259 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009260 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009261 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009262 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009263 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009264 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009265 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009266 t->ioarrin_reg = base + p->ioarrin_reg;
9267 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009268 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009269 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009270 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009271 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009272 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
Wayne Boyerdcbad002010-02-19 13:24:14 -08009273
9274 if (ioa_cfg->sis64) {
Wayne Boyer214777b2010-02-19 13:24:26 -08009275 t->init_feedback_reg = base + p->init_feedback_reg;
Wayne Boyerdcbad002010-02-19 13:24:14 -08009276 t->dump_addr_reg = base + p->dump_addr_reg;
9277 t->dump_data_reg = base + p->dump_data_reg;
Wayne Boyer8701f182010-06-04 10:26:50 -07009278 t->endian_swap_reg = base + p->endian_swap_reg;
Wayne Boyerdcbad002010-02-19 13:24:14 -08009279 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009280}
9281
9282/**
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009283 * ipr_get_chip_info - Find adapter chip information
Linus Torvalds1da177e2005-04-16 15:20:36 -07009284 * @dev_id: PCI device id struct
9285 *
9286 * Return value:
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009287 * ptr to chip information on success / NULL on failure
Linus Torvalds1da177e2005-04-16 15:20:36 -07009288 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009289static const struct ipr_chip_t *
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009290ipr_get_chip_info(const struct pci_device_id *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009291{
9292 int i;
9293
Linus Torvalds1da177e2005-04-16 15:20:36 -07009294 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9295 if (ipr_chip[i].vendor == dev_id->vendor &&
9296 ipr_chip[i].device == dev_id->device)
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009297 return &ipr_chip[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07009298 return NULL;
9299}
9300
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009301static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9302{
9303 struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
9304 int i, err, vectors;
9305
9306 for (i = 0; i < ARRAY_SIZE(entries); ++i)
9307 entries[i].entry = i;
9308
9309 vectors = ipr_number_of_msix;
9310
9311 while ((err = pci_enable_msix(ioa_cfg->pdev, entries, vectors)) > 0)
9312 vectors = err;
9313
9314 if (err < 0) {
9315 pci_disable_msix(ioa_cfg->pdev);
9316 return err;
9317 }
9318
9319 if (!err) {
9320 for (i = 0; i < vectors; i++)
9321 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9322 ioa_cfg->nvectors = vectors;
9323 }
9324
9325 return err;
9326}
9327
9328static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9329{
9330 int i, err, vectors;
9331
9332 vectors = ipr_number_of_msix;
9333
9334 while ((err = pci_enable_msi_block(ioa_cfg->pdev, vectors)) > 0)
9335 vectors = err;
9336
9337 if (err < 0) {
9338 pci_disable_msi(ioa_cfg->pdev);
9339 return err;
9340 }
9341
9342 if (!err) {
9343 for (i = 0; i < vectors; i++)
9344 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9345 ioa_cfg->nvectors = vectors;
9346 }
9347
9348 return err;
9349}
9350
9351static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9352{
9353 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9354
9355 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9356 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9357 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9358 ioa_cfg->vectors_info[vec_idx].
9359 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9360 }
9361}
9362
9363static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9364{
9365 int i, rc;
9366
9367 for (i = 1; i < ioa_cfg->nvectors; i++) {
9368 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9369 ipr_isr_mhrrq,
9370 0,
9371 ioa_cfg->vectors_info[i].desc,
9372 &ioa_cfg->hrrq[i]);
9373 if (rc) {
9374 while (--i >= 0)
9375 free_irq(ioa_cfg->vectors_info[i].vec,
9376 &ioa_cfg->hrrq[i]);
9377 return rc;
9378 }
9379 }
9380 return 0;
9381}
9382
Linus Torvalds1da177e2005-04-16 15:20:36 -07009383/**
Wayne Boyer95fecd92009-06-16 15:13:28 -07009384 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9385 * @pdev: PCI device struct
9386 *
9387 * Description: Simply set the msi_received flag to 1 indicating that
9388 * Message Signaled Interrupts are supported.
9389 *
9390 * Return value:
9391 * 0 on success / non-zero on failure
9392 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009393static irqreturn_t ipr_test_intr(int irq, void *devp)
Wayne Boyer95fecd92009-06-16 15:13:28 -07009394{
9395 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9396 unsigned long lock_flags = 0;
9397 irqreturn_t rc = IRQ_HANDLED;
9398
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009399 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
Wayne Boyer95fecd92009-06-16 15:13:28 -07009400 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9401
9402 ioa_cfg->msi_received = 1;
9403 wake_up(&ioa_cfg->msi_wait_q);
9404
9405 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9406 return rc;
9407}
9408
9409/**
9410 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9411 * @pdev: PCI device struct
9412 *
9413 * Description: The return value from pci_enable_msi() can not always be
9414 * trusted. This routine sets up and initiates a test interrupt to determine
9415 * if the interrupt is received via the ipr_test_intr() service routine.
9416 * If the tests fails, the driver will fall back to LSI.
9417 *
9418 * Return value:
9419 * 0 on success / non-zero on failure
9420 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009421static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
Wayne Boyer95fecd92009-06-16 15:13:28 -07009422{
9423 int rc;
9424 volatile u32 int_reg;
9425 unsigned long lock_flags = 0;
9426
9427 ENTER;
9428
9429 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9430 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9431 ioa_cfg->msi_received = 0;
9432 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
Wayne Boyer214777b2010-02-19 13:24:26 -08009433 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
Wayne Boyer95fecd92009-06-16 15:13:28 -07009434 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9435 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9436
wenxiong@linux.vnet.ibm.comf19799f2013-02-27 12:37:45 -06009437 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9438 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9439 else
9440 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
Wayne Boyer95fecd92009-06-16 15:13:28 -07009441 if (rc) {
9442 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9443 return rc;
9444 } else if (ipr_debug)
9445 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
9446
Wayne Boyer214777b2010-02-19 13:24:26 -08009447 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
Wayne Boyer95fecd92009-06-16 15:13:28 -07009448 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9449 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009450 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Wayne Boyer95fecd92009-06-16 15:13:28 -07009451 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9452
Wayne Boyer95fecd92009-06-16 15:13:28 -07009453 if (!ioa_cfg->msi_received) {
9454 /* MSI test failed */
9455 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
9456 rc = -EOPNOTSUPP;
9457 } else if (ipr_debug)
9458 dev_info(&pdev->dev, "MSI test succeeded.\n");
9459
9460 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9461
wenxiong@linux.vnet.ibm.comf19799f2013-02-27 12:37:45 -06009462 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9463 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
9464 else
9465 free_irq(pdev->irq, ioa_cfg);
Wayne Boyer95fecd92009-06-16 15:13:28 -07009466
9467 LEAVE;
9468
9469 return rc;
9470}
9471
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009472 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
Linus Torvalds1da177e2005-04-16 15:20:36 -07009473 * @pdev: PCI device struct
9474 * @dev_id: PCI device id struct
9475 *
9476 * Return value:
9477 * 0 on success / non-zero on failure
9478 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009479static int ipr_probe_ioa(struct pci_dev *pdev,
9480 const struct pci_device_id *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009481{
9482 struct ipr_ioa_cfg *ioa_cfg;
9483 struct Scsi_Host *host;
9484 unsigned long ipr_regs_pci;
9485 void __iomem *ipr_regs;
Eric Sesterhenna2a65a32006-09-25 16:59:07 -07009486 int rc = PCIBIOS_SUCCESSFUL;
Brian King473b1e82007-05-02 10:44:11 -05009487 volatile u32 mask, uproc, interrupts;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009488 unsigned long lock_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009489
9490 ENTER;
9491
9492 if ((rc = pci_enable_device(pdev))) {
9493 dev_err(&pdev->dev, "Cannot enable adapter\n");
9494 goto out;
9495 }
9496
9497 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
9498
9499 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9500
9501 if (!host) {
9502 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9503 rc = -ENOMEM;
9504 goto out_disable;
9505 }
9506
9507 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9508 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
Dan Williams8d8e7d12012-07-09 21:06:08 -07009509 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009510
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009511 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009512
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009513 if (!ioa_cfg->ipr_chip) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009514 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9515 dev_id->vendor, dev_id->device);
9516 goto out_scsi_host_put;
9517 }
9518
Wayne Boyera32c0552010-02-19 13:23:36 -08009519 /* set SIS 32 or SIS 64 */
9520 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009521 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
Brian King7dd21302012-03-14 21:20:08 -05009522 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
Brian King89aad422012-03-14 21:20:10 -05009523 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009524
Brian King5469cb52007-03-29 12:42:40 -05009525 if (ipr_transop_timeout)
9526 ioa_cfg->transop_timeout = ipr_transop_timeout;
9527 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
9528 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9529 else
9530 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9531
Auke Kok44c10132007-06-08 15:46:36 -07009532 ioa_cfg->revid = pdev->revision;
Brian King463fc692007-05-07 17:09:05 -05009533
Linus Torvalds1da177e2005-04-16 15:20:36 -07009534 ipr_regs_pci = pci_resource_start(pdev, 0);
9535
9536 rc = pci_request_regions(pdev, IPR_NAME);
9537 if (rc < 0) {
9538 dev_err(&pdev->dev,
9539 "Couldn't register memory range of registers\n");
9540 goto out_scsi_host_put;
9541 }
9542
Arjan van de Ven25729a72008-09-28 16:18:02 -07009543 ipr_regs = pci_ioremap_bar(pdev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009544
9545 if (!ipr_regs) {
9546 dev_err(&pdev->dev,
9547 "Couldn't map memory range of registers\n");
9548 rc = -ENOMEM;
9549 goto out_release_regions;
9550 }
9551
9552 ioa_cfg->hdw_dma_regs = ipr_regs;
9553 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9554 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9555
9556 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9557
9558 pci_set_master(pdev);
9559
Wayne Boyera32c0552010-02-19 13:23:36 -08009560 if (ioa_cfg->sis64) {
9561 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
9562 if (rc < 0) {
9563 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
9564 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9565 }
9566
9567 } else
9568 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9569
Linus Torvalds1da177e2005-04-16 15:20:36 -07009570 if (rc < 0) {
9571 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
9572 goto cleanup_nomem;
9573 }
9574
9575 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
9576 ioa_cfg->chip_cfg->cache_line_size);
9577
9578 if (rc != PCIBIOS_SUCCESSFUL) {
9579 dev_err(&pdev->dev, "Write of cache line size failed\n");
9580 rc = -EIO;
9581 goto cleanup_nomem;
9582 }
9583
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009584 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9585 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9586 IPR_MAX_MSIX_VECTORS);
9587 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9588 }
9589
9590 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009591 ipr_enable_msix(ioa_cfg) == 0)
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009592 ioa_cfg->intr_flag = IPR_USE_MSIX;
9593 else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009594 ipr_enable_msi(ioa_cfg) == 0)
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009595 ioa_cfg->intr_flag = IPR_USE_MSI;
9596 else {
9597 ioa_cfg->intr_flag = IPR_USE_LSI;
9598 ioa_cfg->nvectors = 1;
9599 dev_info(&pdev->dev, "Cannot enable MSI.\n");
9600 }
9601
9602 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9603 ioa_cfg->intr_flag == IPR_USE_MSIX) {
Wayne Boyer95fecd92009-06-16 15:13:28 -07009604 rc = ipr_test_msi(ioa_cfg, pdev);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009605 if (rc == -EOPNOTSUPP) {
9606 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9607 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9608 pci_disable_msi(pdev);
9609 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9610 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9611 pci_disable_msix(pdev);
9612 }
9613
9614 ioa_cfg->intr_flag = IPR_USE_LSI;
9615 ioa_cfg->nvectors = 1;
9616 }
Wayne Boyer95fecd92009-06-16 15:13:28 -07009617 else if (rc)
9618 goto out_msi_disable;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009619 else {
9620 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9621 dev_info(&pdev->dev,
9622 "Request for %d MSIs succeeded with starting IRQ: %d\n",
9623 ioa_cfg->nvectors, pdev->irq);
9624 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9625 dev_info(&pdev->dev,
9626 "Request for %d MSIXs succeeded.",
9627 ioa_cfg->nvectors);
9628 }
9629 }
9630
9631 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
9632 (unsigned int)num_online_cpus(),
9633 (unsigned int)IPR_MAX_HRRQ_NUM);
Wayne Boyer95fecd92009-06-16 15:13:28 -07009634
Linus Torvalds1da177e2005-04-16 15:20:36 -07009635 /* Save away PCI config space for use following IOA reset */
9636 rc = pci_save_state(pdev);
9637
9638 if (rc != PCIBIOS_SUCCESSFUL) {
9639 dev_err(&pdev->dev, "Failed to save PCI config space\n");
9640 rc = -EIO;
Julia Lawallf170c682011-07-11 14:08:25 -07009641 goto out_msi_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009642 }
9643
9644 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
Julia Lawallf170c682011-07-11 14:08:25 -07009645 goto out_msi_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009646
9647 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
Julia Lawallf170c682011-07-11 14:08:25 -07009648 goto out_msi_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009649
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009650 if (ioa_cfg->sis64)
9651 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9652 + ((sizeof(struct ipr_config_table_entry64)
9653 * ioa_cfg->max_devs_supported)));
9654 else
9655 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9656 + ((sizeof(struct ipr_config_table_entry)
9657 * ioa_cfg->max_devs_supported)));
9658
Linus Torvalds1da177e2005-04-16 15:20:36 -07009659 rc = ipr_alloc_mem(ioa_cfg);
9660 if (rc < 0) {
9661 dev_err(&pdev->dev,
9662 "Couldn't allocate enough memory for device driver!\n");
Julia Lawallf170c682011-07-11 14:08:25 -07009663 goto out_msi_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009664 }
9665
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06009666 /*
9667 * If HRRQ updated interrupt is not masked, or reset alert is set,
9668 * the card is in an unknown state and needs a hard reset
9669 */
Wayne Boyer214777b2010-02-19 13:24:26 -08009670 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
9671 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
9672 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06009673 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
9674 ioa_cfg->needs_hard_reset = 1;
Anton Blanchard5d7c20b2011-08-01 19:43:45 +10009675 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
Brian King473b1e82007-05-02 10:44:11 -05009676 ioa_cfg->needs_hard_reset = 1;
9677 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
9678 ioa_cfg->ioa_unit_checked = 1;
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06009679
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009680 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009681 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009682 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009683
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009684 if (ioa_cfg->intr_flag == IPR_USE_MSI
9685 || ioa_cfg->intr_flag == IPR_USE_MSIX) {
9686 name_msi_vectors(ioa_cfg);
9687 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
9688 0,
9689 ioa_cfg->vectors_info[0].desc,
9690 &ioa_cfg->hrrq[0]);
9691 if (!rc)
9692 rc = ipr_request_other_msi_irqs(ioa_cfg);
9693 } else {
9694 rc = request_irq(pdev->irq, ipr_isr,
9695 IRQF_SHARED,
9696 IPR_NAME, &ioa_cfg->hrrq[0]);
9697 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009698 if (rc) {
9699 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
9700 pdev->irq, rc);
9701 goto cleanup_nolog;
9702 }
9703
Brian King463fc692007-05-07 17:09:05 -05009704 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
9705 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
9706 ioa_cfg->needs_warm_reset = 1;
9707 ioa_cfg->reset = ipr_reset_slot_reset;
9708 } else
9709 ioa_cfg->reset = ipr_reset_start_bist;
9710
Linus Torvalds1da177e2005-04-16 15:20:36 -07009711 spin_lock(&ipr_driver_lock);
9712 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
9713 spin_unlock(&ipr_driver_lock);
9714
9715 LEAVE;
9716out:
9717 return rc;
9718
9719cleanup_nolog:
9720 ipr_free_mem(ioa_cfg);
Wayne Boyer95fecd92009-06-16 15:13:28 -07009721out_msi_disable:
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009722 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9723 pci_disable_msi(pdev);
9724 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9725 pci_disable_msix(pdev);
Julia Lawallf170c682011-07-11 14:08:25 -07009726cleanup_nomem:
9727 iounmap(ipr_regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009728out_release_regions:
9729 pci_release_regions(pdev);
9730out_scsi_host_put:
9731 scsi_host_put(host);
9732out_disable:
9733 pci_disable_device(pdev);
9734 goto out;
9735}
9736
9737/**
9738 * ipr_scan_vsets - Scans for VSET devices
9739 * @ioa_cfg: ioa config struct
9740 *
9741 * Description: Since the VSET resources do not follow SAM in that we can have
9742 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
9743 *
9744 * Return value:
9745 * none
9746 **/
9747static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
9748{
9749 int target, lun;
9750
9751 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03009752 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009753 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
9754}
9755
9756/**
9757 * ipr_initiate_ioa_bringdown - Bring down an adapter
9758 * @ioa_cfg: ioa config struct
9759 * @shutdown_type: shutdown type
9760 *
9761 * Description: This function will initiate bringing down the adapter.
9762 * This consists of issuing an IOA shutdown to the adapter
9763 * to flush the cache, and running BIST.
9764 * If the caller needs to wait on the completion of the reset,
9765 * the caller must sleep on the reset_wait_q.
9766 *
9767 * Return value:
9768 * none
9769 **/
9770static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
9771 enum ipr_shutdown_type shutdown_type)
9772{
9773 ENTER;
9774 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9775 ioa_cfg->sdt_state = ABORT_DUMP;
9776 ioa_cfg->reset_retries = 0;
9777 ioa_cfg->in_ioa_bringdown = 1;
9778 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
9779 LEAVE;
9780}
9781
9782/**
9783 * __ipr_remove - Remove a single adapter
9784 * @pdev: pci device struct
9785 *
9786 * Adapter hot plug remove entry point.
9787 *
9788 * Return value:
9789 * none
9790 **/
9791static void __ipr_remove(struct pci_dev *pdev)
9792{
9793 unsigned long host_lock_flags = 0;
9794 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
Brian Kingbfae7822013-01-30 23:45:08 -06009795 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009796 ENTER;
9797
9798 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03009799 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -05009800 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9801 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9802 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9803 }
9804
Brian Kingbfae7822013-01-30 23:45:08 -06009805 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9806 spin_lock(&ioa_cfg->hrrq[i]._lock);
9807 ioa_cfg->hrrq[i].removing_ioa = 1;
9808 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9809 }
9810 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07009811 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9812
9813 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9814 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
Tejun Heo43829732012-08-20 14:51:24 -07009815 flush_work(&ioa_cfg->work_q);
wenxiong@linux.vnet.ibm.com9077a942013-03-14 13:52:24 -05009816 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009817 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9818
9819 spin_lock(&ipr_driver_lock);
9820 list_del(&ioa_cfg->queue);
9821 spin_unlock(&ipr_driver_lock);
9822
9823 if (ioa_cfg->sdt_state == ABORT_DUMP)
9824 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9825 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9826
9827 ipr_free_all_resources(ioa_cfg);
9828
9829 LEAVE;
9830}
9831
9832/**
9833 * ipr_remove - IOA hot plug remove entry point
9834 * @pdev: pci device struct
9835 *
9836 * Adapter hot plug remove entry point.
9837 *
9838 * Return value:
9839 * none
9840 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009841static void ipr_remove(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009842{
9843 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9844
9845 ENTER;
9846
Tony Jonesee959b02008-02-22 00:13:36 +01009847 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009848 &ipr_trace_attr);
Tony Jonesee959b02008-02-22 00:13:36 +01009849 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009850 &ipr_dump_attr);
9851 scsi_remove_host(ioa_cfg->host);
9852
9853 __ipr_remove(pdev);
9854
9855 LEAVE;
9856}
9857
9858/**
9859 * ipr_probe - Adapter hot plug add entry point
9860 *
9861 * Return value:
9862 * 0 on success / non-zero on failure
9863 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009864static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009865{
9866 struct ipr_ioa_cfg *ioa_cfg;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06009867 int rc, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009868
9869 rc = ipr_probe_ioa(pdev, dev_id);
9870
9871 if (rc)
9872 return rc;
9873
9874 ioa_cfg = pci_get_drvdata(pdev);
9875 rc = ipr_probe_ioa_part2(ioa_cfg);
9876
9877 if (rc) {
9878 __ipr_remove(pdev);
9879 return rc;
9880 }
9881
9882 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9883
9884 if (rc) {
9885 __ipr_remove(pdev);
9886 return rc;
9887 }
9888
Tony Jonesee959b02008-02-22 00:13:36 +01009889 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009890 &ipr_trace_attr);
9891
9892 if (rc) {
9893 scsi_remove_host(ioa_cfg->host);
9894 __ipr_remove(pdev);
9895 return rc;
9896 }
9897
Tony Jonesee959b02008-02-22 00:13:36 +01009898 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009899 &ipr_dump_attr);
9900
9901 if (rc) {
Tony Jonesee959b02008-02-22 00:13:36 +01009902 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009903 &ipr_trace_attr);
9904 scsi_remove_host(ioa_cfg->host);
9905 __ipr_remove(pdev);
9906 return rc;
9907 }
9908
9909 scsi_scan_host(ioa_cfg->host);
9910 ipr_scan_vsets(ioa_cfg);
9911 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
9912 ioa_cfg->allow_ml_add_del = 1;
brking@us.ibm.com11cd8f12005-11-01 17:00:11 -06009913 ioa_cfg->host->max_channel = IPR_VSET_BUS;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06009914 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
9915
9916 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
9917 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9918 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
9919 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
9920 ioa_cfg->iopoll_weight, ipr_iopoll);
9921 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
9922 }
9923 }
9924
Linus Torvalds1da177e2005-04-16 15:20:36 -07009925 schedule_work(&ioa_cfg->work_q);
9926 return 0;
9927}
9928
9929/**
9930 * ipr_shutdown - Shutdown handler.
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07009931 * @pdev: pci device struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07009932 *
9933 * This function is invoked upon system shutdown/reboot. It will issue
9934 * an adapter shutdown to the adapter to flush the write cache.
9935 *
9936 * Return value:
9937 * none
9938 **/
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07009939static void ipr_shutdown(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009940{
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07009941 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009942 unsigned long lock_flags = 0;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06009943 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009944
9945 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06009946 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
9947 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9948 ioa_cfg->iopoll_weight = 0;
9949 for (i = 1; i < ioa_cfg->hrrq_num; i++)
9950 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
9951 }
9952
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03009953 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -05009954 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9955 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9956 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9957 }
9958
Linus Torvalds1da177e2005-04-16 15:20:36 -07009959 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9960 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9961 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9962}
9963
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009964static struct pci_device_id ipr_pci_table[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009965 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -06009966 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07009967 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -06009968 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07009969 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -06009970 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07009971 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -06009972 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07009973 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -06009974 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07009975 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -06009976 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07009977 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -06009978 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06009979 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King5469cb52007-03-29 12:42:40 -05009980 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
9981 IPR_USE_LONG_TRANSOP_TIMEOUT },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06009982 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King6d84c942007-01-23 11:25:23 -06009983 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06009984 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King22d2e402007-04-26 16:00:13 -05009985 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9986 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -06009987 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King5469cb52007-03-29 12:42:40 -05009988 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9989 IPR_USE_LONG_TRANSOP_TIMEOUT },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06009990 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King6d84c942007-01-23 11:25:23 -06009991 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06009992 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King22d2e402007-04-26 16:00:13 -05009993 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9994 IPR_USE_LONG_TRANSOP_TIMEOUT},
Brian King60e74862006-11-21 10:28:10 -06009995 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King5469cb52007-03-29 12:42:40 -05009996 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9997 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -06009998 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King22d2e402007-04-26 16:00:13 -05009999 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10000 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King185eb312007-03-29 12:42:53 -050010001 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King185eb312007-03-29 12:42:53 -050010002 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10003 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Wayne Boyerb0f56d32010-06-24 13:34:14 -070010004 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10005 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King5469cb52007-03-29 12:42:40 -050010006 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
Brian King463fc692007-05-07 17:09:05 -050010007 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010008 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
Brian King6d84c942007-01-23 11:25:23 -060010009 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010010 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King6d84c942007-01-23 11:25:23 -060010011 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010012 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King5469cb52007-03-29 12:42:40 -050010013 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10014 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -060010015 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King5469cb52007-03-29 12:42:40 -050010016 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10017 IPR_USE_LONG_TRANSOP_TIMEOUT },
Wayne Boyerd7b46272010-02-19 13:24:38 -080010018 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10019 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10020 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10021 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10022 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10023 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
Wayne Boyer32622bd2010-10-18 20:24:34 -070010024 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
wenxiong@linux.vnet.ibm.comb8d5d562013-01-11 17:43:47 -060010025 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10026 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
Wayne Boyer5a918352011-10-27 11:58:21 -070010027 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10028 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
Wayne Boyer32622bd2010-10-18 20:24:34 -070010029 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -080010030 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -080010031 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -080010032 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -080010033 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -080010034 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -080010035 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -080010036 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10037 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10038 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -080010039 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
wenxiong@linux.vnet.ibm.comb8d5d562013-01-11 17:43:47 -060010040 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10041 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10042 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10043 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10044 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10045 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10046 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10047 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010048 { }
10049};
10050MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10051
Stephen Hemmingera55b2d22012-09-07 09:33:16 -070010052static const struct pci_error_handlers ipr_err_handler = {
Linas Vepstasf8a88b192006-02-03 16:52:42 -060010053 .error_detected = ipr_pci_error_detected,
10054 .slot_reset = ipr_pci_slot_reset,
10055};
10056
Linus Torvalds1da177e2005-04-16 15:20:36 -070010057static struct pci_driver ipr_driver = {
10058 .name = IPR_NAME,
10059 .id_table = ipr_pci_table,
10060 .probe = ipr_probe,
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010061 .remove = ipr_remove,
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -070010062 .shutdown = ipr_shutdown,
Linas Vepstasf8a88b192006-02-03 16:52:42 -060010063 .err_handler = &ipr_err_handler,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010064};
10065
10066/**
Wayne Boyerf72919e2010-02-19 13:24:21 -080010067 * ipr_halt_done - Shutdown prepare completion
10068 *
10069 * Return value:
10070 * none
10071 **/
10072static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10073{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010074 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Wayne Boyerf72919e2010-02-19 13:24:21 -080010075}
10076
10077/**
10078 * ipr_halt - Issue shutdown prepare to all adapters
10079 *
10080 * Return value:
10081 * NOTIFY_OK on success / NOTIFY_DONE on failure
10082 **/
10083static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10084{
10085 struct ipr_cmnd *ipr_cmd;
10086 struct ipr_ioa_cfg *ioa_cfg;
10087 unsigned long flags = 0;
10088
10089 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10090 return NOTIFY_DONE;
10091
10092 spin_lock(&ipr_driver_lock);
10093
10094 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10095 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -060010096 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
Wayne Boyerf72919e2010-02-19 13:24:21 -080010097 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10098 continue;
10099 }
10100
10101 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10102 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10103 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10104 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10105 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10106
10107 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10108 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10109 }
10110 spin_unlock(&ipr_driver_lock);
10111
10112 return NOTIFY_OK;
10113}
10114
10115static struct notifier_block ipr_notifier = {
10116 ipr_halt, NULL, 0
10117};
10118
10119/**
Linus Torvalds1da177e2005-04-16 15:20:36 -070010120 * ipr_init - Module entry point
10121 *
10122 * Return value:
10123 * 0 on success / negative value on failure
10124 **/
10125static int __init ipr_init(void)
10126{
10127 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10128 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10129
Wayne Boyerf72919e2010-02-19 13:24:21 -080010130 register_reboot_notifier(&ipr_notifier);
Henrik Kretzschmardcbccbde2006-09-25 16:58:58 -070010131 return pci_register_driver(&ipr_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010132}
10133
10134/**
10135 * ipr_exit - Module unload
10136 *
10137 * Module unload entry point.
10138 *
10139 * Return value:
10140 * none
10141 **/
10142static void __exit ipr_exit(void)
10143{
Wayne Boyerf72919e2010-02-19 13:24:21 -080010144 unregister_reboot_notifier(&ipr_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010145 pci_unregister_driver(&ipr_driver);
10146}
10147
10148module_init(ipr_init);
10149module_exit(ipr_exit);