[PATCH] IB: sparse endianness cleanup

Fix sparse warnings.  Use __be* where appropriate.

Signed-off-by: Sean Hefty <sean.hefty@intel.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 403ed12..781be77 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -83,7 +83,7 @@
 struct cm_device {
 	struct list_head list;
 	struct ib_device *device;
-	u64 ca_guid;
+	__be64 ca_guid;
 	struct cm_port port[0];
 };
 
@@ -100,8 +100,8 @@
 	struct list_head list;
 	struct cm_port *port;
 	struct ib_mad_recv_wc *mad_recv_wc;	/* Received MADs */
-	u32 local_id;				/* Established / timewait */
-	u32 remote_id;
+	__be32 local_id;			/* Established / timewait */
+	__be32 remote_id;
 	struct ib_cm_event cm_event;
 	struct ib_sa_path_rec path[0];
 };
@@ -110,8 +110,8 @@
 	struct cm_work work;			/* Must be first. */
 	struct rb_node remote_qp_node;
 	struct rb_node remote_id_node;
-	u64 remote_ca_guid;
-	u32 remote_qpn;
+	__be64 remote_ca_guid;
+	__be32 remote_qpn;
 	u8 inserted_remote_qp;
 	u8 inserted_remote_id;
 };
@@ -132,11 +132,11 @@
 	struct cm_av alt_av;
 
 	void *private_data;
-	u64 tid;
-	u32 local_qpn;
-	u32 remote_qpn;
-	u32 sq_psn;
-	u32 rq_psn;
+	__be64 tid;
+	__be32 local_qpn;
+	__be32 remote_qpn;
+	__be32 sq_psn;
+	__be32 rq_psn;
 	int timeout_ms;
 	enum ib_mtu path_mtu;
 	u8 private_data_len;
@@ -253,7 +253,7 @@
 			   u16 dlid, u8 sl, u16 src_path_bits)
 {
 	memset(ah_attr, 0, sizeof ah_attr);
-	ah_attr->dlid = be16_to_cpu(dlid);
+	ah_attr->dlid = dlid;
 	ah_attr->sl = sl;
 	ah_attr->src_path_bits = src_path_bits;
 	ah_attr->port_num = port_num;
@@ -264,7 +264,7 @@
 {
 	av->port = port;
 	av->pkey_index = wc->pkey_index;
-	cm_set_ah_attr(&av->ah_attr, port->port_num, cpu_to_be16(wc->slid),
+	cm_set_ah_attr(&av->ah_attr, port->port_num, wc->slid,
 		       wc->sl, wc->dlid_path_bits);
 }
 
@@ -295,8 +295,9 @@
 		return ret;
 
 	av->port = port;
-	cm_set_ah_attr(&av->ah_attr, av->port->port_num, path->dlid,
-		       path->sl, path->slid & 0x7F);
+	cm_set_ah_attr(&av->ah_attr, av->port->port_num,
+		       be16_to_cpu(path->dlid), path->sl,
+		       be16_to_cpu(path->slid) & 0x7F);
 	av->packet_life_time = path->packet_life_time;
 	return 0;
 }
@@ -309,26 +310,26 @@
 	do {
 		spin_lock_irqsave(&cm.lock, flags);
 		ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, 1,
-					(int *) &cm_id_priv->id.local_id);
+					(__force int *) &cm_id_priv->id.local_id);
 		spin_unlock_irqrestore(&cm.lock, flags);
 	} while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
 	return ret;
 }
 
-static void cm_free_id(u32 local_id)
+static void cm_free_id(__be32 local_id)
 {
 	unsigned long flags;
 
 	spin_lock_irqsave(&cm.lock, flags);
-	idr_remove(&cm.local_id_table, (int) local_id);
+	idr_remove(&cm.local_id_table, (__force int) local_id);
 	spin_unlock_irqrestore(&cm.lock, flags);
 }
 
-static struct cm_id_private * cm_get_id(u32 local_id, u32 remote_id)
+static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
 {
 	struct cm_id_private *cm_id_priv;
 
-	cm_id_priv = idr_find(&cm.local_id_table, (int) local_id);
+	cm_id_priv = idr_find(&cm.local_id_table, (__force int) local_id);
 	if (cm_id_priv) {
 		if (cm_id_priv->id.remote_id == remote_id)
 			atomic_inc(&cm_id_priv->refcount);
@@ -339,7 +340,7 @@
 	return cm_id_priv;
 }
 
-static struct cm_id_private * cm_acquire_id(u32 local_id, u32 remote_id)
+static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
 {
 	struct cm_id_private *cm_id_priv;
 	unsigned long flags;
@@ -356,8 +357,8 @@
 	struct rb_node **link = &cm.listen_service_table.rb_node;
 	struct rb_node *parent = NULL;
 	struct cm_id_private *cur_cm_id_priv;
-	u64 service_id = cm_id_priv->id.service_id;
-	u64 service_mask = cm_id_priv->id.service_mask;
+	__be64 service_id = cm_id_priv->id.service_id;
+	__be64 service_mask = cm_id_priv->id.service_mask;
 
 	while (*link) {
 		parent = *link;
@@ -376,7 +377,7 @@
 	return NULL;
 }
 
-static struct cm_id_private * cm_find_listen(u64 service_id)
+static struct cm_id_private * cm_find_listen(__be64 service_id)
 {
 	struct rb_node *node = cm.listen_service_table.rb_node;
 	struct cm_id_private *cm_id_priv;
@@ -400,8 +401,8 @@
 	struct rb_node **link = &cm.remote_id_table.rb_node;
 	struct rb_node *parent = NULL;
 	struct cm_timewait_info *cur_timewait_info;
-	u64 remote_ca_guid = timewait_info->remote_ca_guid;
-	u32 remote_id = timewait_info->work.remote_id;
+	__be64 remote_ca_guid = timewait_info->remote_ca_guid;
+	__be32 remote_id = timewait_info->work.remote_id;
 
 	while (*link) {
 		parent = *link;
@@ -424,8 +425,8 @@
 	return NULL;
 }
 
-static struct cm_timewait_info * cm_find_remote_id(u64 remote_ca_guid,
-						   u32 remote_id)
+static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
+						   __be32 remote_id)
 {
 	struct rb_node *node = cm.remote_id_table.rb_node;
 	struct cm_timewait_info *timewait_info;
@@ -453,8 +454,8 @@
 	struct rb_node **link = &cm.remote_qp_table.rb_node;
 	struct rb_node *parent = NULL;
 	struct cm_timewait_info *cur_timewait_info;
-	u64 remote_ca_guid = timewait_info->remote_ca_guid;
-	u32 remote_qpn = timewait_info->remote_qpn;
+	__be64 remote_ca_guid = timewait_info->remote_ca_guid;
+	__be32 remote_qpn = timewait_info->remote_qpn;
 
 	while (*link) {
 		parent = *link;
@@ -484,7 +485,7 @@
 	struct rb_node *parent = NULL;
 	struct cm_id_private *cur_cm_id_priv;
 	union ib_gid *port_gid = &cm_id_priv->av.dgid;
-	u32 remote_id = cm_id_priv->id.remote_id;
+	__be32 remote_id = cm_id_priv->id.remote_id;
 
 	while (*link) {
 		parent = *link;
@@ -598,7 +599,7 @@
 	spin_unlock_irqrestore(&cm.lock, flags);
 }
 
-static struct cm_timewait_info * cm_create_timewait_info(u32 local_id)
+static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
 {
 	struct cm_timewait_info *timewait_info;
 
@@ -715,14 +716,15 @@
 EXPORT_SYMBOL(ib_destroy_cm_id);
 
 int ib_cm_listen(struct ib_cm_id *cm_id,
-		 u64 service_id,
-		 u64 service_mask)
+		 __be64 service_id,
+		 __be64 service_mask)
 {
 	struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
 	unsigned long flags;
 	int ret = 0;
 
-	service_mask = service_mask ? service_mask : ~0ULL;
+	service_mask = service_mask ? service_mask :
+		       __constant_cpu_to_be64(~0ULL);
 	service_id &= service_mask;
 	if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
 	    (service_id != IB_CM_ASSIGN_SERVICE_ID))
@@ -735,8 +737,8 @@
 
 	spin_lock_irqsave(&cm.lock, flags);
 	if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
-		cm_id->service_id = __cpu_to_be64(cm.listen_service_id++);
-		cm_id->service_mask = ~0ULL;
+		cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
+		cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
 	} else {
 		cm_id->service_id = service_id;
 		cm_id->service_mask = service_mask;
@@ -752,18 +754,19 @@
 }
 EXPORT_SYMBOL(ib_cm_listen);
 
-static u64 cm_form_tid(struct cm_id_private *cm_id_priv,
-		       enum cm_msg_sequence msg_seq)
+static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
+			  enum cm_msg_sequence msg_seq)
 {
 	u64 hi_tid, low_tid;
 
 	hi_tid   = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
-	low_tid  = (u64) (cm_id_priv->id.local_id | (msg_seq << 30));
+	low_tid  = (u64) ((__force u32)cm_id_priv->id.local_id |
+			  (msg_seq << 30));
 	return cpu_to_be64(hi_tid | low_tid);
 }
 
 static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
-			      enum cm_msg_attr_id attr_id, u64 tid)
+			      __be16 attr_id, __be64 tid)
 {
 	hdr->base_version  = IB_MGMT_BASE_VERSION;
 	hdr->mgmt_class	   = IB_MGMT_CLASS_CM;
@@ -896,7 +899,7 @@
 			goto error1;
 	}
 	cm_id->service_id = param->service_id;
-	cm_id->service_mask = ~0ULL;
+	cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
 	cm_id_priv->timeout_ms = cm_convert_to_ms(
 				    param->primary_path->packet_life_time) * 2 +
 				 cm_convert_to_ms(
@@ -963,7 +966,7 @@
 	rej_msg->remote_comm_id = rcv_msg->local_comm_id;
 	rej_msg->local_comm_id = rcv_msg->remote_comm_id;
 	cm_rej_set_msg_rejected(rej_msg, msg_rejected);
-	rej_msg->reason = reason;
+	rej_msg->reason = cpu_to_be16(reason);
 
 	if (ari && ari_length) {
 		cm_rej_set_reject_info_len(rej_msg, ari_length);
@@ -977,8 +980,8 @@
 	return ret;
 }
 
-static inline int cm_is_active_peer(u64 local_ca_guid, u64 remote_ca_guid,
-				    u32 local_qpn, u32 remote_qpn)
+static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
+				    __be32 local_qpn, __be32 remote_qpn)
 {
 	return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
 		((local_ca_guid == remote_ca_guid) &&
@@ -1137,7 +1140,7 @@
 		break;
 	}
 
-	rej_msg->reason = reason;
+	rej_msg->reason = cpu_to_be16(reason);
 	if (ari && ari_length) {
 		cm_rej_set_reject_info_len(rej_msg, ari_length);
 		memcpy(rej_msg->ari, ari, ari_length);
@@ -1276,7 +1279,7 @@
 	cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
 	cm_id_priv->id.context = listen_cm_id_priv->id.context;
 	cm_id_priv->id.service_id = req_msg->service_id;
-	cm_id_priv->id.service_mask = ~0ULL;
+	cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
 
 	cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
 	ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
@@ -1969,7 +1972,7 @@
 	param = &work->cm_event.param.rej_rcvd;
 	param->ari = rej_msg->ari;
 	param->ari_length = cm_rej_get_reject_info_len(rej_msg);
-	param->reason = rej_msg->reason;
+	param->reason = __be16_to_cpu(rej_msg->reason);
 	work->cm_event.private_data = &rej_msg->private_data;
 }
 
@@ -1978,20 +1981,20 @@
 	struct cm_timewait_info *timewait_info;
 	struct cm_id_private *cm_id_priv;
 	unsigned long flags;
-	u32 remote_id;
+	__be32 remote_id;
 
 	remote_id = rej_msg->local_comm_id;
 
-	if (rej_msg->reason == IB_CM_REJ_TIMEOUT) {
+	if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
 		spin_lock_irqsave(&cm.lock, flags);
-		timewait_info = cm_find_remote_id( *((u64 *) rej_msg->ari),
+		timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
 						  remote_id);
 		if (!timewait_info) {
 			spin_unlock_irqrestore(&cm.lock, flags);
 			return NULL;
 		}
 		cm_id_priv = idr_find(&cm.local_id_table,
-				      (int) timewait_info->work.local_id);
+				      (__force int) timewait_info->work.local_id);
 		if (cm_id_priv) {
 			if (cm_id_priv->id.remote_id == remote_id)
 				atomic_inc(&cm_id_priv->refcount);
@@ -2032,7 +2035,7 @@
 		/* fall through */
 	case IB_CM_REQ_RCVD:
 	case IB_CM_MRA_REQ_SENT:
-		if (rej_msg->reason == IB_CM_REJ_STALE_CONN)
+		if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
 			cm_enter_timewait(cm_id_priv);
 		else
 			cm_reset_to_idle(cm_id_priv);
@@ -2553,7 +2556,7 @@
 	cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
 			  cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
 	sidr_req_msg->request_id = cm_id_priv->id.local_id;
-	sidr_req_msg->pkey = param->pkey;
+	sidr_req_msg->pkey = cpu_to_be16(param->pkey);
 	sidr_req_msg->service_id = param->service_id;
 
 	if (param->private_data && param->private_data_len)
@@ -2580,7 +2583,7 @@
 		goto out;
 
 	cm_id->service_id = param->service_id;
-	cm_id->service_mask = ~0ULL;
+	cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
 	cm_id_priv->timeout_ms = param->timeout_ms;
 	cm_id_priv->max_cm_retries = param->max_cm_retries;
 	ret = cm_alloc_msg(cm_id_priv, &msg);
@@ -2621,7 +2624,7 @@
 	sidr_req_msg = (struct cm_sidr_req_msg *)
 				work->mad_recv_wc->recv_buf.mad;
 	param = &work->cm_event.param.sidr_req_rcvd;
-	param->pkey = sidr_req_msg->pkey;
+	param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
 	param->listen_id = listen_id;
 	param->device = work->port->mad_agent->device;
 	param->port = work->port->port_num;
@@ -2645,7 +2648,7 @@
 	sidr_req_msg = (struct cm_sidr_req_msg *)
 				work->mad_recv_wc->recv_buf.mad;
 	wc = work->mad_recv_wc->wc;
-	cm_id_priv->av.dgid.global.subnet_prefix = wc->slid;
+	cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
 	cm_id_priv->av.dgid.global.interface_id = 0;
 	cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
 				&cm_id_priv->av);
@@ -2673,7 +2676,7 @@
 	cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
 	cm_id_priv->id.context = cur_cm_id_priv->id.context;
 	cm_id_priv->id.service_id = sidr_req_msg->service_id;
-	cm_id_priv->id.service_mask = ~0ULL;
+	cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
 
 	cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
 	cm_process_work(cm_id_priv, work);
@@ -3175,10 +3178,10 @@
 }
 EXPORT_SYMBOL(ib_cm_init_qp_attr);
 
-static u64 cm_get_ca_guid(struct ib_device *device)
+static __be64 cm_get_ca_guid(struct ib_device *device)
 {
 	struct ib_device_attr *device_attr;
-	u64 guid;
+	__be64 guid;
 	int ret;
 
 	device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h
index 15a309a..807a9fb 100644
--- a/drivers/infiniband/core/cm_msgs.h
+++ b/drivers/infiniband/core/cm_msgs.h
@@ -43,19 +43,17 @@
 
 #define IB_CM_CLASS_VERSION	2 /* IB specification 1.2 */
 
-enum cm_msg_attr_id {
-	CM_REQ_ATTR_ID	    = __constant_htons(0x0010),
-	CM_MRA_ATTR_ID	    = __constant_htons(0x0011),
-	CM_REJ_ATTR_ID	    = __constant_htons(0x0012),
-	CM_REP_ATTR_ID	    = __constant_htons(0x0013),
-	CM_RTU_ATTR_ID	    = __constant_htons(0x0014),
-	CM_DREQ_ATTR_ID	    = __constant_htons(0x0015),
-	CM_DREP_ATTR_ID	    = __constant_htons(0x0016),
-	CM_SIDR_REQ_ATTR_ID = __constant_htons(0x0017),
-	CM_SIDR_REP_ATTR_ID = __constant_htons(0x0018),
-	CM_LAP_ATTR_ID      = __constant_htons(0x0019),
-	CM_APR_ATTR_ID      = __constant_htons(0x001A)
-};
+#define CM_REQ_ATTR_ID	    __constant_htons(0x0010)
+#define CM_MRA_ATTR_ID	    __constant_htons(0x0011)
+#define CM_REJ_ATTR_ID	    __constant_htons(0x0012)
+#define CM_REP_ATTR_ID	    __constant_htons(0x0013)
+#define CM_RTU_ATTR_ID	    __constant_htons(0x0014)
+#define CM_DREQ_ATTR_ID	    __constant_htons(0x0015)
+#define CM_DREP_ATTR_ID	    __constant_htons(0x0016)
+#define CM_SIDR_REQ_ATTR_ID __constant_htons(0x0017)
+#define CM_SIDR_REP_ATTR_ID __constant_htons(0x0018)
+#define CM_LAP_ATTR_ID      __constant_htons(0x0019)
+#define CM_APR_ATTR_ID      __constant_htons(0x001A)
 
 enum cm_msg_sequence {
 	CM_MSG_SEQUENCE_REQ,
@@ -67,35 +65,35 @@
 struct cm_req_msg {
 	struct ib_mad_hdr hdr;
 
-	u32 local_comm_id;
-	u32 rsvd4;
-	u64 service_id;
-	u64 local_ca_guid;
-	u32 rsvd24;
-	u32 local_qkey;
+	__be32 local_comm_id;
+	__be32 rsvd4;
+	__be64 service_id;
+	__be64 local_ca_guid;
+	__be32 rsvd24;
+	__be32 local_qkey;
 	/* local QPN:24, responder resources:8 */
-	u32 offset32;
+	__be32 offset32;
 	/* local EECN:24, initiator depth:8 */
-	u32 offset36;
+	__be32 offset36;
 	/*
 	 * remote EECN:24, remote CM response timeout:5,
 	 * transport service type:2, end-to-end flow control:1
 	 */
-	u32 offset40;
+	__be32 offset40;
 	/* starting PSN:24, local CM response timeout:5, retry count:3 */
-	u32 offset44;
-	u16 pkey;
+	__be32 offset44;
+	__be16 pkey;
 	/* path MTU:4, RDC exists:1, RNR retry count:3. */
 	u8 offset50;
 	/* max CM Retries:4, SRQ:1, rsvd:3 */
 	u8 offset51;
 
-	u16 primary_local_lid;
-	u16 primary_remote_lid;
+	__be16 primary_local_lid;
+	__be16 primary_remote_lid;
 	union ib_gid primary_local_gid;
 	union ib_gid primary_remote_gid;
 	/* flow label:20, rsvd:6, packet rate:6 */
-	u32 primary_offset88;
+	__be32 primary_offset88;
 	u8 primary_traffic_class;
 	u8 primary_hop_limit;
 	/* SL:4, subnet local:1, rsvd:3 */
@@ -103,12 +101,12 @@
 	/* local ACK timeout:5, rsvd:3 */
 	u8 primary_offset95;
 
-	u16 alt_local_lid;
-	u16 alt_remote_lid;
+	__be16 alt_local_lid;
+	__be16 alt_remote_lid;
 	union ib_gid alt_local_gid;
 	union ib_gid alt_remote_gid;
 	/* flow label:20, rsvd:6, packet rate:6 */
-	u32 alt_offset132;
+	__be32 alt_offset132;
 	u8 alt_traffic_class;
 	u8 alt_hop_limit;
 	/* SL:4, subnet local:1, rsvd:3 */
@@ -120,12 +118,12 @@
 
 } __attribute__ ((packed));
 
-static inline u32 cm_req_get_local_qpn(struct cm_req_msg *req_msg)
+static inline __be32 cm_req_get_local_qpn(struct cm_req_msg *req_msg)
 {
 	return cpu_to_be32(be32_to_cpu(req_msg->offset32) >> 8);
 }
 
-static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, u32 qpn)
+static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, __be32 qpn)
 {
 	req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
 					 (be32_to_cpu(req_msg->offset32) &
@@ -208,13 +206,13 @@
 					  0xFFFFFFFE));
 }
 
-static inline u32 cm_req_get_starting_psn(struct cm_req_msg *req_msg)
+static inline __be32 cm_req_get_starting_psn(struct cm_req_msg *req_msg)
 {
 	return cpu_to_be32(be32_to_cpu(req_msg->offset44) >> 8);
 }
 
 static inline void cm_req_set_starting_psn(struct cm_req_msg *req_msg,
-					   u32 starting_psn)
+					   __be32 starting_psn)
 {
 	req_msg->offset44 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
 			    (be32_to_cpu(req_msg->offset44) & 0x000000FF));
@@ -288,13 +286,13 @@
 				  ((srq & 0x1) << 3));
 }
 
-static inline u32 cm_req_get_primary_flow_label(struct cm_req_msg *req_msg)
+static inline __be32 cm_req_get_primary_flow_label(struct cm_req_msg *req_msg)
 {
-	return cpu_to_be32((be32_to_cpu(req_msg->primary_offset88) >> 12));
+	return cpu_to_be32(be32_to_cpu(req_msg->primary_offset88) >> 12);
 }
 
 static inline void cm_req_set_primary_flow_label(struct cm_req_msg *req_msg,
-						 u32 flow_label)
+						 __be32 flow_label)
 {
 	req_msg->primary_offset88 = cpu_to_be32(
 				    (be32_to_cpu(req_msg->primary_offset88) &
@@ -350,13 +348,13 @@
 					  (local_ack_timeout << 3));
 }
 
-static inline u32 cm_req_get_alt_flow_label(struct cm_req_msg *req_msg)
+static inline __be32 cm_req_get_alt_flow_label(struct cm_req_msg *req_msg)
 {
-	return cpu_to_be32((be32_to_cpu(req_msg->alt_offset132) >> 12));
+	return cpu_to_be32(be32_to_cpu(req_msg->alt_offset132) >> 12);
 }
 
 static inline void cm_req_set_alt_flow_label(struct cm_req_msg *req_msg,
-					     u32 flow_label)
+					     __be32 flow_label)
 {
 	req_msg->alt_offset132 = cpu_to_be32(
 				 (be32_to_cpu(req_msg->alt_offset132) &
@@ -422,8 +420,8 @@
  struct cm_mra_msg {
 	struct ib_mad_hdr hdr;
 
-	u32 local_comm_id;
-	u32 remote_comm_id;
+	__be32 local_comm_id;
+	__be32 remote_comm_id;
 	/* message MRAed:2, rsvd:6 */
 	u8 offset8;
 	/* service timeout:5, rsvd:3 */
@@ -458,13 +456,13 @@
 struct cm_rej_msg {
 	struct ib_mad_hdr hdr;
 
-	u32 local_comm_id;
-	u32 remote_comm_id;
+	__be32 local_comm_id;
+	__be32 remote_comm_id;
 	/* message REJected:2, rsvd:6 */
 	u8 offset8;
 	/* reject info length:7, rsvd:1. */
 	u8 offset9;
-	u16 reason;
+	__be16 reason;
 	u8 ari[IB_CM_REJ_ARI_LENGTH];
 
 	u8 private_data[IB_CM_REJ_PRIVATE_DATA_SIZE];
@@ -495,45 +493,45 @@
 struct cm_rep_msg {
 	struct ib_mad_hdr hdr;
 
-	u32 local_comm_id;
-	u32 remote_comm_id;
-	u32 local_qkey;
+	__be32 local_comm_id;
+	__be32 remote_comm_id;
+	__be32 local_qkey;
 	/* local QPN:24, rsvd:8 */
-	u32 offset12;
+	__be32 offset12;
 	/* local EECN:24, rsvd:8 */
-	u32 offset16;
+	__be32 offset16;
 	/* starting PSN:24 rsvd:8 */
-	u32 offset20;
+	__be32 offset20;
 	u8 resp_resources;
 	u8 initiator_depth;
 	/* target ACK delay:5, failover accepted:2, end-to-end flow control:1 */
 	u8 offset26;
 	/* RNR retry count:3, SRQ:1, rsvd:5 */
 	u8 offset27;
-	u64 local_ca_guid;
+	__be64 local_ca_guid;
 
 	u8 private_data[IB_CM_REP_PRIVATE_DATA_SIZE];
 
 } __attribute__ ((packed));
 
-static inline u32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg)
+static inline __be32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg)
 {
 	return cpu_to_be32(be32_to_cpu(rep_msg->offset12) >> 8);
 }
 
-static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, u32 qpn)
+static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, __be32 qpn)
 {
 	rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
 			    (be32_to_cpu(rep_msg->offset12) & 0x000000FF));
 }
 
-static inline u32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg)
+static inline __be32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg)
 {
 	return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8);
 }
 
 static inline void cm_rep_set_starting_psn(struct cm_rep_msg *rep_msg,
-					   u32 starting_psn)
+					   __be32 starting_psn)
 {
 	rep_msg->offset20 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
 			    (be32_to_cpu(rep_msg->offset20) & 0x000000FF));
@@ -600,8 +598,8 @@
 struct cm_rtu_msg {
 	struct ib_mad_hdr hdr;
 
-	u32 local_comm_id;
-	u32 remote_comm_id;
+	__be32 local_comm_id;
+	__be32 remote_comm_id;
 
 	u8 private_data[IB_CM_RTU_PRIVATE_DATA_SIZE];
 
@@ -610,21 +608,21 @@
 struct cm_dreq_msg {
 	struct ib_mad_hdr hdr;
 
-	u32 local_comm_id;
-	u32 remote_comm_id;
+	__be32 local_comm_id;
+	__be32 remote_comm_id;
 	/* remote QPN/EECN:24, rsvd:8 */
-	u32 offset8;
+	__be32 offset8;
 
 	u8 private_data[IB_CM_DREQ_PRIVATE_DATA_SIZE];
 
 } __attribute__ ((packed));
 
-static inline u32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg)
+static inline __be32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg)
 {
 	return cpu_to_be32(be32_to_cpu(dreq_msg->offset8) >> 8);
 }
 
-static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, u32 qpn)
+static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, __be32 qpn)
 {
 	dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
 			    (be32_to_cpu(dreq_msg->offset8) & 0x000000FF));
@@ -633,8 +631,8 @@
 struct cm_drep_msg {
 	struct ib_mad_hdr hdr;
 
-	u32 local_comm_id;
-	u32 remote_comm_id;
+	__be32 local_comm_id;
+	__be32 remote_comm_id;
 
 	u8 private_data[IB_CM_DREP_PRIVATE_DATA_SIZE];
 
@@ -643,37 +641,37 @@
 struct cm_lap_msg {
 	struct ib_mad_hdr hdr;
 
-	u32 local_comm_id;
-	u32 remote_comm_id;
+	__be32 local_comm_id;
+	__be32 remote_comm_id;
 
-	u32 rsvd8;
+	__be32 rsvd8;
 	/* remote QPN/EECN:24, remote CM response timeout:5, rsvd:3 */
-	u32 offset12;
-	u32 rsvd16;
+	__be32 offset12;
+	__be32 rsvd16;
 
-	u16 alt_local_lid;
-	u16 alt_remote_lid;
+	__be16 alt_local_lid;
+	__be16 alt_remote_lid;
 	union ib_gid alt_local_gid;
 	union ib_gid alt_remote_gid;
 	/* flow label:20, rsvd:4, traffic class:8 */
-	u32 offset56;
+	__be32 offset56;
 	u8 alt_hop_limit;
 	/* rsvd:2, packet rate:6 */
-	uint8_t offset61;
+	u8 offset61;
 	/* SL:4, subnet local:1, rsvd:3 */
-	uint8_t offset62;
+	u8 offset62;
 	/* local ACK timeout:5, rsvd:3 */
-	uint8_t offset63;
+	u8 offset63;
 
 	u8 private_data[IB_CM_LAP_PRIVATE_DATA_SIZE];
 } __attribute__  ((packed));
 
-static inline u32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg)
+static inline __be32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg)
 {
 	return cpu_to_be32(be32_to_cpu(lap_msg->offset12) >> 8);
 }
 
-static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, u32 qpn)
+static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, __be32 qpn)
 {
 	lap_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
 					 (be32_to_cpu(lap_msg->offset12) &
@@ -693,17 +691,17 @@
 					  0xFFFFFF07));
 }
 
-static inline u32 cm_lap_get_flow_label(struct cm_lap_msg *lap_msg)
+static inline __be32 cm_lap_get_flow_label(struct cm_lap_msg *lap_msg)
 {
-	return be32_to_cpu(lap_msg->offset56) >> 12;
+	return cpu_to_be32(be32_to_cpu(lap_msg->offset56) >> 12);
 }
 
 static inline void cm_lap_set_flow_label(struct cm_lap_msg *lap_msg,
-					 u32 flow_label)
+					 __be32 flow_label)
 {
-	lap_msg->offset56 = cpu_to_be32((flow_label << 12) |
-					 (be32_to_cpu(lap_msg->offset56) &
-					  0x00000FFF));
+	lap_msg->offset56 = cpu_to_be32(
+				 (be32_to_cpu(lap_msg->offset56) & 0x00000FFF) |
+				 (be32_to_cpu(flow_label) << 12));
 }
 
 static inline u8 cm_lap_get_traffic_class(struct cm_lap_msg *lap_msg)
@@ -766,8 +764,8 @@
 struct cm_apr_msg {
 	struct ib_mad_hdr hdr;
 
-	u32 local_comm_id;
-	u32 remote_comm_id;
+	__be32 local_comm_id;
+	__be32 remote_comm_id;
 
 	u8 info_length;
 	u8 ap_status;
@@ -779,10 +777,10 @@
 struct cm_sidr_req_msg {
 	struct ib_mad_hdr hdr;
 
-	u32 request_id;
-	u16 pkey;
-	u16 rsvd;
-	u64 service_id;
+	__be32 request_id;
+	__be16 pkey;
+	__be16 rsvd;
+	__be64 service_id;
 
 	u8 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE];
 } __attribute__ ((packed));
@@ -790,26 +788,26 @@
 struct cm_sidr_rep_msg {
 	struct ib_mad_hdr hdr;
 
-	u32 request_id;
+	__be32 request_id;
 	u8 status;
 	u8 info_length;
-	u16 rsvd;
+	__be16 rsvd;
 	/* QPN:24, rsvd:8 */
-	u32 offset8;
-	u64 service_id;
-	u32 qkey;
+	__be32 offset8;
+	__be64 service_id;
+	__be32 qkey;
 	u8 info[IB_CM_SIDR_REP_INFO_LENGTH];
 
 	u8 private_data[IB_CM_SIDR_REP_PRIVATE_DATA_SIZE];
 } __attribute__ ((packed));
 
-static inline u32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg)
+static inline __be32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg)
 {
 	return cpu_to_be32(be32_to_cpu(sidr_rep_msg->offset8) >> 8);
 }
 
 static inline void cm_sidr_rep_set_qpn(struct cm_sidr_rep_msg *sidr_rep_msg,
-				       u32 qpn)
+				       __be32 qpn)
 {
 	sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
 					(be32_to_cpu(sidr_rep_msg->offset8) &
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index b97e210c..214493c 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -693,7 +693,8 @@
 		goto out;
 	}
 
-	build_smp_wc(send_wr->wr_id, smp->dr_slid, send_wr->wr.ud.pkey_index,
+	build_smp_wc(send_wr->wr_id, be16_to_cpu(smp->dr_slid),
+		     send_wr->wr.ud.pkey_index,
 		     send_wr->wr.ud.port_num, &mad_wc);
 
 	/* No GRH for DR SMP */
@@ -1554,7 +1555,7 @@
 }
 
 struct ib_mad_send_wr_private*
-ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, u64 tid)
+ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid)
 {
 	struct ib_mad_send_wr_private *mad_send_wr;
 
@@ -1597,7 +1598,7 @@
 	struct ib_mad_send_wr_private *mad_send_wr;
 	struct ib_mad_send_wc mad_send_wc;
 	unsigned long flags;
-	u64 tid;
+	__be64 tid;
 
 	INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
 	list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
@@ -2165,7 +2166,8 @@
 			 * Defined behavior is to complete response
 			 * before request
 			 */
-			build_smp_wc(local->wr_id, IB_LID_PERMISSIVE,
+			build_smp_wc(local->wr_id,
+				     be16_to_cpu(IB_LID_PERMISSIVE),
 				     0 /* pkey index */,
 				     recv_mad_agent->agent.port_num, &wc);
 
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index 568da10..807b0f3 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -121,7 +121,7 @@
 	struct ib_send_wr send_wr;
 	struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
 	u64 wr_id;			/* client WR ID */
-	u64 tid;
+	__be64 tid;
 	unsigned long timeout;
 	int retries;
 	int retry;
@@ -144,7 +144,7 @@
 	struct ib_send_wr send_wr;
 	struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
 	u64 wr_id;			/* client WR ID */
-	u64 tid;
+	__be64 tid;
 };
 
 struct ib_mad_mgmt_method_table {
@@ -210,7 +210,7 @@
 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr);
 
 struct ib_mad_send_wr_private *
-ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, u64 tid);
+ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid);
 
 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
 			     struct ib_mad_send_wc *mad_send_wc);
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index 8f1eb80..d68bf7e 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -61,7 +61,7 @@
 	int seg_num;
 	int newwin;
 
-	u64 tid;
+	__be64 tid;
 	u32 src_qp;
 	u16 slid;
 	u8 mgmt_class;
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index b2e7799..bf7334e 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -255,14 +255,14 @@
 		return ret;
 
 	return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
-		       be16_to_cpu(((u16 *) gid.raw)[0]),
-		       be16_to_cpu(((u16 *) gid.raw)[1]),
-		       be16_to_cpu(((u16 *) gid.raw)[2]),
-		       be16_to_cpu(((u16 *) gid.raw)[3]),
-		       be16_to_cpu(((u16 *) gid.raw)[4]),
-		       be16_to_cpu(((u16 *) gid.raw)[5]),
-		       be16_to_cpu(((u16 *) gid.raw)[6]),
-		       be16_to_cpu(((u16 *) gid.raw)[7]));
+		       be16_to_cpu(((__be16 *) gid.raw)[0]),
+		       be16_to_cpu(((__be16 *) gid.raw)[1]),
+		       be16_to_cpu(((__be16 *) gid.raw)[2]),
+		       be16_to_cpu(((__be16 *) gid.raw)[3]),
+		       be16_to_cpu(((__be16 *) gid.raw)[4]),
+		       be16_to_cpu(((__be16 *) gid.raw)[5]),
+		       be16_to_cpu(((__be16 *) gid.raw)[6]),
+		       be16_to_cpu(((__be16 *) gid.raw)[7]));
 }
 
 static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr,
@@ -334,11 +334,11 @@
 		break;
 	case 16:
 		ret = sprintf(buf, "%u\n",
-			      be16_to_cpup((u16 *)(out_mad->data + 40 + offset / 8)));
+			      be16_to_cpup((__be16 *)(out_mad->data + 40 + offset / 8)));
 		break;
 	case 32:
 		ret = sprintf(buf, "%u\n",
-			      be32_to_cpup((u32 *)(out_mad->data + 40 + offset / 8)));
+			      be32_to_cpup((__be32 *)(out_mad->data + 40 + offset / 8)));
 		break;
 	default:
 		ret = 0;
@@ -600,10 +600,10 @@
 		return ret;
 
 	return sprintf(buf, "%04x:%04x:%04x:%04x\n",
-		       be16_to_cpu(((u16 *) &attr.sys_image_guid)[0]),
-		       be16_to_cpu(((u16 *) &attr.sys_image_guid)[1]),
-		       be16_to_cpu(((u16 *) &attr.sys_image_guid)[2]),
-		       be16_to_cpu(((u16 *) &attr.sys_image_guid)[3]));
+		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
+		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
+		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
+		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
 }
 
 static ssize_t show_node_guid(struct class_device *cdev, char *buf)
@@ -617,10 +617,10 @@
 		return ret;
 
 	return sprintf(buf, "%04x:%04x:%04x:%04x\n",
-		       be16_to_cpu(((u16 *) &attr.node_guid)[0]),
-		       be16_to_cpu(((u16 *) &attr.node_guid)[1]),
-		       be16_to_cpu(((u16 *) &attr.node_guid)[2]),
-		       be16_to_cpu(((u16 *) &attr.node_guid)[3]));
+		       be16_to_cpu(((__be16 *) &attr.node_guid)[0]),
+		       be16_to_cpu(((__be16 *) &attr.node_guid)[1]),
+		       be16_to_cpu(((__be16 *) &attr.node_guid)[2]),
+		       be16_to_cpu(((__be16 *) &attr.node_guid)[3]));
 }
 
 static CLASS_DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c
index b32d43e..89cd76d 100644
--- a/drivers/infiniband/core/ud_header.c
+++ b/drivers/infiniband/core/ud_header.c
@@ -195,6 +195,7 @@
 		       struct ib_ud_header *header)
 {
 	int header_len;
+	u16 packet_length;
 
 	memset(header, 0, sizeof *header);
 
@@ -209,7 +210,7 @@
 	header->lrh.link_version     = 0;
 	header->lrh.link_next_header =
 		grh_present ? IB_LNH_IBA_GLOBAL : IB_LNH_IBA_LOCAL;
-	header->lrh.packet_length    = (IB_LRH_BYTES     +
+	packet_length		     = (IB_LRH_BYTES     +
 					IB_BTH_BYTES     +
 					IB_DETH_BYTES    +
 					payload_bytes    +
@@ -218,8 +219,7 @@
 
 	header->grh_present          = grh_present;
 	if (grh_present) {
-		header->lrh.packet_length  += IB_GRH_BYTES / 4;
-
+		packet_length		   += IB_GRH_BYTES / 4;
 		header->grh.ip_version      = 6;
 		header->grh.payload_length  =
 			cpu_to_be16((IB_BTH_BYTES     +
@@ -230,7 +230,7 @@
 		header->grh.next_header     = 0x1b;
 	}
 
-	cpu_to_be16s(&header->lrh.packet_length);
+	header->lrh.packet_length = cpu_to_be16(packet_length);
 
 	if (header->immediate_present)
 		header->bth.opcode           = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 8a19dd4..16d91f1 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -271,7 +271,7 @@
 	struct ib_send_wr *bad_wr;
 	struct ib_rmpp_mad *rmpp_mad;
 	u8 method;
-	u64 *tid;
+	__be64 *tid;
 	int ret, length, hdr_len, data_len, rmpp_hdr_size;
 	int rmpp_active = 0;
 
@@ -316,7 +316,7 @@
 	if (packet->mad.hdr.grh_present) {
 		ah_attr.ah_flags = IB_AH_GRH;
 		memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16);
-		ah_attr.grh.flow_label 	   = packet->mad.hdr.flow_label;
+		ah_attr.grh.flow_label 	   = be32_to_cpu(packet->mad.hdr.flow_label);
 		ah_attr.grh.hop_limit  	   = packet->mad.hdr.hop_limit;
 		ah_attr.grh.traffic_class  = packet->mad.hdr.traffic_class;
 	}