aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorKim Lilliestierna XX <kim.xx.lilliestierna@ericsson.com>2010-02-05 09:59:57 +0100
committerJohn Rigby <john.rigby@linaro.org>2010-09-02 22:44:06 -0600
commit426bc48736015fa09ba4cb9cb98a7b5a79548b42 (patch)
treee51b7eb0e794888ad08b8c54bfe37a079f9d0c91 /net
parentc658b823ffc3a1fcee93d85ba33e2b7222a7775d (diff)
Added caif as part of kernel
Diffstat (limited to 'net')
-rw-r--r--net/Kconfig2
-rw-r--r--net/Makefile1
-rw-r--r--net/caif/Kconfig72
-rw-r--r--net/caif/Makefile34
-rw-r--r--net/caif/caif_chnlif.c177
-rw-r--r--net/caif/caif_chr.c324
-rw-r--r--net/caif/caif_config_util.c121
-rw-r--r--net/caif/caif_dev.c479
-rw-r--r--net/caif/caif_socket.c1432
-rw-r--r--net/caif/chnl_chr.c1363
-rw-r--r--net/caif/chnl_net.c594
-rw-r--r--net/caif/generic/cfcnfg.c538
-rw-r--r--net/caif/generic/cfctrl.c699
-rw-r--r--net/caif/generic/cfdgml.c106
-rw-r--r--net/caif/generic/cffrml.c148
-rw-r--r--net/caif/generic/cflist.c88
-rw-r--r--net/caif/generic/cfmuxl.c225
-rw-r--r--net/caif/generic/cfpkt_plain.c553
-rw-r--r--net/caif/generic/cfpkt_skbuff.c596
-rw-r--r--net/caif/generic/cfrfml.c104
-rw-r--r--net/caif/generic/cfserl.c198
-rw-r--r--net/caif/generic/cfsrvl.c182
-rw-r--r--net/caif/generic/cfutill.c112
-rw-r--r--net/caif/generic/cfveil.c106
-rw-r--r--net/caif/generic/cfvidl.c62
25 files changed, 8315 insertions, 1 deletions
diff --git a/net/Kconfig b/net/Kconfig
index 041c35edb76..9d79883a0c9 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -275,5 +275,5 @@ source "net/wimax/Kconfig"
source "net/rfkill/Kconfig"
source "net/9p/Kconfig"
-
+source "net/caif/Kconfig"
endif # if NET
diff --git a/net/Makefile b/net/Makefile
index 1542e7268a7..a5eae27aa42 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -56,6 +56,7 @@ obj-$(CONFIG_NETLABEL) += netlabel/
obj-$(CONFIG_IUCV) += iucv/
obj-$(CONFIG_RFKILL) += rfkill/
obj-$(CONFIG_NET_9P) += 9p/
+obj-$(CONFIG_CAIF) += caif/
ifneq ($(CONFIG_DCB),)
obj-y += dcb/
endif
diff --git a/net/caif/Kconfig b/net/caif/Kconfig
new file mode 100644
index 00000000000..6707ef59d3b
--- /dev/null
+++ b/net/caif/Kconfig
@@ -0,0 +1,72 @@
+#
+# CAIF net configurations
+#
+
+#menu "CAIF Support"
+comment "CAIF Support"
+menuconfig CAIF
+ tristate "Enable CAIF support"
+ select CRC_CCITT
+ default n
+ ---help---
+ The "Communication CPU to Application CPU Interface" (CAIF) is a packet
+ based connection-oriented MUX protocol developed by ST-Ericsson for use
+ with its modems.
+
+ Say Y (or M) here if you build for a phone product (e.g. Android) that
+ uses CAIF as transport, if unsure say N.
+
+ If you select to build it as module then CAIF_SOCK and CAIF_NETDEV also
+ needs to be built as modules. You will also need to say yes to any CAIF
+ physical devices that your platform requires.
+
+ See Documentation/networking/caif for a further explanation on how to
+ use and configure CAIF.
+
+if CAIF
+
+config CAIF_DEBUG
+ bool "Enable Debug"
+ default n
+ --- help ---
+ Enable the inclusion of debug code in the CAIF stack.
+ Be aware that doing this will impact performance.
+ If unsure say N.
+
+config CAIF_SOCK
+ tristate "CAIF Sockets"
+ default CAIF
+ ---help---
+ Say Y if you will be using CAIF Sockets.
+ This can be either built-in or a loadable module,
+ If you select to build it as a built-in then the main CAIF device must
+ also be a built-in,
+ If unsure say Y.
+
+config CAIF_NETDEV
+ tristate "CAIF GPRS Network device"
+ default CAIF
+ ---help---
+ Say Y if you will be using a CAIF based GPRS network device.
+ This can be either built-in or a loadable module,
+ If you select to build it as a built-in then the main CAIF device must
+ also be a built-in.
+ If unsure say Y.
+
+#official-kernel-patch-cut-here
+
+config CAIF_CHARDEV
+ tristate "CAIF character device"
+ default CAIF
+ ---help---
+ Say Y if you will be using the CAIF AT type character devices.
+ This can be either built-in or a loadable module,
+ If you select to build it as a built-in then the main CAIF device must
+ also be a built-in.
+ If unsure say Y.
+
+# Include physical drivers
+source "drivers/net/caif/Kconfig"
+#official-kernel-patch-resume-here
+endif
+#endmenu
diff --git a/net/caif/Makefile b/net/caif/Makefile
new file mode 100644
index 00000000000..13e66956c55
--- /dev/null
+++ b/net/caif/Makefile
@@ -0,0 +1,34 @@
+ifeq ($(CONFIG_CAIF_DEBUG),1)
+CAIF_DBG_FLAGS := -DDEBUG
+endif
+
+ccflags-y := $(CAIF_FLAGS) $(CAIF_DBG_FLAGS)
+
+caif-objs := caif_dev.o caif_chnlif.o \
+ generic/cfcnfg.o generic/cfmuxl.o generic/cfctrl.o \
+ generic/cffrml.o generic/cfveil.o generic/cflist.o \
+ generic/cfserl.o generic/cfdgml.o \
+ generic/cfrfml.o generic/cfvidl.o generic/cfutill.o \
+ generic/cfsrvl.o generic/cfpkt_skbuff.o caif_config_util.o
+
+clean-dirs:= .tmp_versions
+
+clean-files:= \
+ Module.symvers \
+ modules.order \
+ *.cmd \
+ *~ \
+ generic/*.o \
+ generic/*~
+
+obj-$(CONFIG_CAIF) += caif.o
+obj-$(CONFIG_CAIF_NETDEV) += chnl_net.o
+obj-$(CONFIG_CAIF_SOCK) += caif_socket.o
+
+export-objs := caif.o
+
+#official-kernel-patch-cut-here
+
+# CAIF character device
+obj-$(CONFIG_CAIF_CHARDEV) += chnl_chr.o caif_chr.o
+
diff --git a/net/caif/caif_chnlif.c b/net/caif/caif_chnlif.c
new file mode 100644
index 00000000000..cd337c8ce1d
--- /dev/null
+++ b/net/caif/caif_chnlif.c
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2009
+ * Author: Daniel Martensson / Daniel.Martensson@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/skbuff.h>
+#include <net/caif/caif_kernel.h>
+#include <net/caif/generic/caif_layer.h>
+#include <net/caif/generic/cfpkt.h>
+#include <net/caif/generic/cfcnfg.h>
+#include <net/caif/generic/cfglue.h>
+#include <net/caif/caif_dev.h>
+struct caif_kernelif {
+ struct layer layer;
+ struct caif_device *dev;
+ struct cfctrl_link_param param;
+};
+
+
+/*
+ * func caif_create_skb - Creates a CAIF SKB buffer
+ * @data: data to add to buffer
+ * @data_length: length of data
+ */
+struct sk_buff *caif_create_skb(unsigned char *data, unsigned int data_length)
+{
+ /* NOTE: Make room for CAIF headers when using SKB inside CAIF. */
+ struct sk_buff *skb =
+ alloc_skb(data_length + CAIF_SKB_HEAD_RESERVE +
+ CAIF_SKB_TAIL_RESERVE, GFP_ATOMIC);
+ if (skb == NULL)
+ return NULL;
+ skb_reserve(skb, CAIF_SKB_HEAD_RESERVE);
+
+ memcpy(skb_put(skb, data_length), data, data_length);
+ return skb;
+}
+EXPORT_SYMBOL(caif_create_skb);
+
+int caif_extract_and_destroy_skb(struct sk_buff *skb, unsigned char *data,
+ unsigned int max_length)
+{
+ unsigned int len;
+ len = skb->len;
+ /*
+ * Note: skb_linearize only fails on an out of memory condition
+ * if we fail here we are NOT freeing the skb.
+ */
+ if (!skb_linearize(skb) || skb->len > max_length)
+ return CFGLU_EOVERFLOW;
+ memcpy(data, skb->data, skb->len);
+ kfree_skb(skb);
+ return len;
+}
+EXPORT_SYMBOL(caif_extract_and_destroy_skb);
+
+/*
+ * NOTE: transmit takes ownership of the SKB.
+ * I.e. transmit only fails on severe errors.
+ * flow_off is not checked on transmit; this is client's responcibility.
+ */
+int caif_transmit(struct caif_device *dev, struct sk_buff *skb)
+{
+ struct caif_kernelif *chnlif =
+ (struct caif_kernelif *) dev->_caif_handle;
+ struct cfpkt *pkt;
+ pkt = cfpkt_fromnative(CAIF_DIR_OUT, (void *) skb);
+ return chnlif->layer.dn->transmit(chnlif->layer.dn, pkt);
+}
+EXPORT_SYMBOL(caif_transmit);
+
+int caif_flow_control(struct caif_device *dev, enum caif_flowctrl flow)
+{
+ enum caif_modemcmd modemcmd;
+ struct caif_kernelif *chnlif =
+ (struct caif_kernelif *) dev->_caif_handle;
+ switch (flow) {
+ case CAIF_FLOWCTRL_ON:
+ modemcmd = CAIF_MODEMCMD_FLOW_ON_REQ;
+ break;
+ case CAIF_FLOWCTRL_OFF:
+ modemcmd = CAIF_MODEMCMD_FLOW_OFF_REQ;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return chnlif->layer.dn->modemcmd(chnlif->layer.dn, modemcmd);
+}
+EXPORT_SYMBOL(caif_flow_control);
+
+static int chnlif_receive(struct layer *layr, struct cfpkt *cfpkt)
+{
+ struct caif_kernelif *chnl =
+ container_of(layr, struct caif_kernelif, layer);
+ struct sk_buff *skb;
+ skb = (struct sk_buff *) cfpkt_tonative(cfpkt);
+ chnl->dev->receive_cb(chnl->dev, skb);
+ return CFGLU_EOK;
+}
+
+static void chnlif_flowctrl(struct layer *layr, enum caif_ctrlcmd ctrl,
+ int phyid)
+{
+ struct caif_kernelif *chnl = (struct caif_kernelif *) layr;
+ enum caif_control ctl;
+
+ switch (ctrl) {
+ case CAIF_CTRLCMD_FLOW_OFF_IND:
+ ctl = CAIF_CONTROL_FLOW_OFF;
+ break;
+ case CAIF_CTRLCMD_FLOW_ON_IND:
+ ctl = CAIF_CONTROL_FLOW_ON;
+ break;
+ case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
+ ctl = CAIF_CONTROL_REMOTE_SHUTDOWN;
+ break;
+ case CAIF_CTRLCMD_DEINIT_RSP:
+ ctl = CAIF_CONTROL_DEV_DEINIT;
+ chnl->dev->_caif_handle = NULL;
+ chnl->dev->control_cb(chnl->dev, ctl);
+ memset(chnl, 0, sizeof(chnl));
+ cfglu_free(chnl);
+ return;
+
+ case CAIF_CTRLCMD_INIT_RSP:
+ ctl = CAIF_CONTROL_DEV_INIT;
+ break;
+ case CAIF_CTRLCMD_INIT_FAIL_RSP:
+ ctl = CAIF_CONTROL_DEV_INIT_FAILED;
+ break;
+ default:
+ return;
+ }
+ chnl->dev->control_cb(chnl->dev, ctl);
+}
+
+int caif_add_device(struct caif_device *dev)
+{
+ int ret;
+ struct caif_kernelif *chnl = cfglu_alloc(sizeof(struct caif_kernelif));
+ if (!chnl)
+ return -ENOMEM;
+ chnl->dev = dev;
+ chnl->layer.ctrlcmd = chnlif_flowctrl;
+ chnl->layer.receive = chnlif_receive;
+ ret =
+ channel_config_2_link_param(get_caif_conf(), &dev->caif_config,
+ &chnl->param);
+ if (ret < 0) {
+ ret = CFGLU_EBADPARAM;
+ goto error;
+ }
+ if (cfcnfg_add_adaptation_layer(get_caif_conf(), &chnl->param,
+ &chnl->layer)) {
+ ret = CFGLU_ENOTCONN;
+ goto error;
+ }
+ dev->_caif_handle = chnl;
+
+ return CFGLU_EOK;
+error:
+ chnl->dev->_caif_handle = NULL;
+ memset(chnl, 0, sizeof(chnl));
+ cfglu_free(chnl);
+ return ret;
+}
+EXPORT_SYMBOL(caif_add_device);
+
+int caif_remove_device(struct caif_device *caif_dev)
+{
+
+ struct caif_kernelif *chnl =
+ container_of(caif_dev->_caif_handle, struct caif_kernelif, layer);
+ return cfcnfg_del_adapt_layer(get_caif_conf(), &chnl->layer);
+}
+EXPORT_SYMBOL(caif_remove_device);
diff --git a/net/caif/caif_chr.c b/net/caif/caif_chr.c
new file mode 100644
index 00000000000..fc9bc833c4b
--- /dev/null
+++ b/net/caif/caif_chr.c
@@ -0,0 +1,324 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2009
+ * Author: Daniel Martensson / Daniel.Martensson@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/ioctl.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/uaccess.h>
+#include <net/rtnetlink.h>
+#include <net/caif/caif_actions.h>
+#include <net/caif/caif_dev.h>
+#include <net/caif/caif_chr.h>
+#include <net/caif/generic/cfloopcfg.h>
+#include <net/caif/generic/cfpkt.h>
+#include <net/caif/generic/cfcnfg.h>
+#include <linux/caif/caif_ioctl.h>
+#include <linux/caif/if_caif.h>
+
+MODULE_LICENSE("GPL");
+
+int caif_dbg_level = CAIFLOG_LEVEL_WARNING;
+EXPORT_SYMBOL(caif_dbg_level);
+
+
+struct class caif_class = {
+ .name = "caif",
+};
+
+static ssize_t dbg_lvl_show(struct class *class, char *buf)
+{
+ return sprintf(buf, "%d\n", caif_dbg_level);
+}
+
+static ssize_t dbg_lvl_store(struct class *class, const char *buf,
+ size_t count)
+{
+ int val;
+
+
+ if ((sscanf(buf, "%d", &val) != 1) || (val < CAIFLOG_MIN_LEVEL)
+ || (val > CAIFLOG_MAX_LEVEL)) {
+ pr_warning("CAIF: %s(): Invalid value\n", __func__);
+ return -EINVAL;
+ }
+ caif_dbg_level = val;
+ return count;
+}
+
+CLASS_ATTR(dbg_lvl, 0644, dbg_lvl_show, dbg_lvl_store);
+
+int serial_use_stx;
+static int (*chrdev_mgmt_func) (int action, union caif_action *param);
+struct caif_chr {
+ struct cfloopcfg *loop;
+ struct miscdevice misc;
+};
+
+static struct caif_chr caifdev;
+
+
+int caifdev_open(struct inode *inode, struct file *filp)
+{
+ pr_warning("CAIF: %s(): Entered\n", __func__);
+ return 0;
+}
+
+int caifdev_release(struct inode *inode, struct file *filp)
+{
+ pr_warning("CAIF: %s(): Entered\n", __func__);
+ return 0;
+}
+
+static int netdev_create(struct caif_channel_create_action *action)
+{
+ struct net_device *dev;
+ rtnl_lock();
+ dev = chnl_net_create(action->name.name, &action->config);
+ rtnl_unlock();
+ return register_netdev(dev);
+}
+
+static int netdev_remove(char *name)
+{
+ struct ifreq ifreq;
+ strncpy(ifreq.ifr_name, name, sizeof(ifreq.ifr_name));
+ ifreq.ifr_name[sizeof(ifreq.ifr_name)-1] = '\0';
+ return caif_ioctl(SIOCCAIFNETREMOVE, (unsigned long) &ifreq, false);
+}
+
+static int caifdev_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long argp)
+{
+ union caif_action param;
+ int ret;
+ int type;
+ int size;
+ int operation;
+ enum caif_dev_type devtype;
+
+ if (argp == 0) {
+ pr_info("CAIF: %s(): argument is null\n", __func__);
+ return -EINVAL;
+ }
+
+ type = _IOC_TYPE(cmd);
+ pr_info("CAIF: %s(): type = %d\n", __func__, type);
+
+ if (type != CAIF_IOC_MAGIC) {
+ pr_info("CAIF: %s(): unknown ioctl type\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Check whether command is valid before copying anything. */
+ switch (cmd) {
+ case CAIF_IOC_CONFIG_DEVICE:
+ case CAIF_IOC_REMOVE_DEVICE:
+ break;
+ default:
+ pr_info("CAIF: %s(): unknown ioctl command\n", __func__);
+ return -EINVAL;
+ }
+
+ size = _IOC_SIZE(cmd);
+ pr_info("CAIF: %s(): size = %d\n", __func__, size);
+ if (copy_from_user(&param, (void *) argp, size)) {
+ printk(KERN_WARNING
+ "caifdev_ioctl: copy_from_user returned non zero.\n");
+ return -EINVAL;
+ }
+
+
+ switch (cmd) {
+ case CAIF_IOC_CONFIG_DEVICE:
+ operation = CAIF_ACT_CREATE_DEVICE;
+ devtype = param.create_channel.name.devtype;
+ break;
+ case CAIF_IOC_REMOVE_DEVICE:
+ operation = CAIF_ACT_DELETE_DEVICE;
+ devtype = param.delete_channel.devtype;
+ break;
+ default:
+ printk(KERN_INFO
+ "caifdev_ioctl: OTHER ACTIONS NOT YET IMPLEMENTED\n");
+ return -EINVAL;
+ }
+
+ if (devtype == CAIF_DEV_CHR) {
+ pr_info("CAIF: %s(): device type CAIF_DEV_CHR\n", __func__);
+ if (chrdev_mgmt_func == NULL) {
+ printk(KERN_WARNING
+ "caifdev_ioctl:DevType CHR is not registered\n");
+ return -EINVAL;
+ }
+ ret = (*chrdev_mgmt_func)(operation, &param);
+ if (ret < 0) {
+ printk(KERN_INFO
+ "caifdev_ioctl: error performing device operation\n");
+ return ret;
+ }
+
+ } else if (devtype == CAIF_DEV_NET) {
+ pr_info("CAIF: %s(): device type CAIF_DEV_NET\n", __func__);
+ switch (cmd) {
+ case CAIF_IOC_CONFIG_DEVICE:
+ return netdev_create(&param.create_channel);
+ case CAIF_IOC_REMOVE_DEVICE:
+ return netdev_remove(param.delete_channel.name);
+ default:
+ return -EINVAL;
+ }
+ }
+
+
+ if (copy_to_user((void *) argp, &param, size)) {
+ printk(KERN_WARNING
+ "caifdev_ioctl: copy_to_user returned non zero.\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+const struct file_operations caifdev_fops = {
+ .owner = THIS_MODULE,
+ .open = caifdev_open,
+ .ioctl = caifdev_ioctl,
+ .release = caifdev_release,
+};
+
+void __exit caifdev_exit_module(void)
+{
+ class_remove_file(&caif_class, &class_attr_dbg_lvl);
+ class_unregister(&caif_class);
+ misc_deregister(&caifdev.misc);
+}
+
+int __init caifdev_init_module(void)
+{
+ int result;
+
+ caifdev.misc.minor = MISC_DYNAMIC_MINOR;
+ caifdev.misc.name = "caifconfig";
+ caifdev.misc.fops = &caifdev_fops;
+
+ result = misc_register(&caifdev.misc);
+
+ if (result < 0) {
+ printk(KERN_WARNING
+ "caifdev: err: %d, can't register misc.\n", result);
+ goto err_misc_register_failed;
+ }
+ /* Register class for SYSFS. */
+ result = class_register(&caif_class);
+ if (unlikely(result)) {
+ printk(KERN_WARNING
+ "caifdev: err: %d, can't create sysfs node.\n", result);
+ goto err_class_register_failed;
+ }
+
+ /* Create SYSFS nodes. */
+ result = class_create_file(&caif_class, &class_attr_dbg_lvl);
+ if (unlikely(result)) {
+ printk(KERN_WARNING
+ "caifdev: err: %d, can't create sysfs node.\n", result);
+ goto err_sysfs_create_failed;
+ }
+
+ return result;
+
+err_sysfs_create_failed:
+ class_unregister(&caif_class);
+err_class_register_failed:
+
+err_misc_register_failed:
+ return -ENODEV;
+}
+
+int caifdev_phy_register(struct layer *phyif, enum cfcnfg_phy_type phy_type,
+ enum cfcnfg_phy_preference phy_pref,
+ bool fcs,bool stx)
+{
+ uint16 phyid;
+
+ /* Hook up the physical interface.
+ * Right now we are not using the returned ID.
+ */
+ cfcnfg_add_phy_layer(get_caif_conf(), phy_type, NULL, phyif, &phyid,
+ phy_pref,fcs,stx);
+
+ pr_warning("CAIF: caifdev_phy_register: phyif:%p phyid:%d == phyif->id:%d\n",
+ (void *)phyif, phyid, phyif->id);
+ return 0;
+}
+EXPORT_SYMBOL(caifdev_phy_register);
+
+#if 0
+int caifdev_phy_loop_register(struct layer *phyif,
+ enum cfcnfg_phy_type phy_type,
+ bool fcs, bool stx)
+{
+ /* Create the loop stack. */
+ caifdev.loop = cfloopcfg_create();
+
+ /* Hook up the loop layer. */
+ cfloopcfg_add_phy_layer(caifdev.loop, phy_type, phyif);
+
+ return 0;
+}
+EXPORT_SYMBOL(caifdev_phy_loop_register);
+#endif
+
+int caifdev_phy_unregister(struct layer *phyif)
+{
+ pr_warning("CAIF: caifdev_phy_unregister: phy:%p id:%d\n",
+ phyif, phyif->id);
+ cfcnfg_del_phy_layer(get_caif_conf(), phyif);
+ return 0;
+}
+EXPORT_SYMBOL(caifdev_phy_unregister);
+
+int caif_register_chrdev(int (*chrdev_mgmt)
+ (int action, union caif_action *param))
+{
+ chrdev_mgmt_func = chrdev_mgmt;
+ return 0;
+}
+EXPORT_SYMBOL(caif_register_chrdev);
+
+void caif_unregister_chrdev()
+{
+ chrdev_mgmt_func = NULL;
+}
+EXPORT_SYMBOL(caif_unregister_chrdev);
+
+struct caif_packet_funcs cfcnfg_get_packet_funcs()
+{
+ struct caif_packet_funcs f;
+ f.cfpkt_fromnative = cfpkt_fromnative;
+ f.cfpkt_tonative = cfpkt_tonative;
+ f.cfpkt_destroy = cfpkt_destroy;
+ f.cfpkt_create_xmit_pkt = cfpkt_create_uplink;
+ f.cfpkt_create_recv_pkt = cfpkt_create_uplink;
+ f.cfpkt_dequeue = cfpkt_dequeue;
+ f.cfpkt_qpeek = cfpkt_qpeek;
+ f.cfpkt_queue = cfpkt_queue;
+ f.cfpktq_create = cfpktq_create;
+ f.cfpkt_raw_extract = cfpkt_raw_extract;
+ f.cfpkt_raw_append = cfpkt_raw_append;
+ f.cfpkt_getlen = cfpkt_getlen;
+ return f;
+}
+EXPORT_SYMBOL(cfcnfg_get_packet_funcs);
+
+module_exit(caifdev_exit_module);
+subsys_initcall(caifdev_init_module);
diff --git a/net/caif/caif_config_util.c b/net/caif/caif_config_util.c
new file mode 100644
index 00000000000..9403711d932
--- /dev/null
+++ b/net/caif/caif_config_util.c
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2009
+ * Author: Daniel Martensson / Daniel.Martensson@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <net/caif/generic/cfglue.h>
+#include <net/caif/generic/cfctrl.h>
+#include <net/caif/generic/cfcnfg.h>
+#include <linux/caif/caif_config.h>
+
+int
+channel_config_2_link_param(struct cfcnfg *cnfg,
+ struct caif_channel_config *s, struct cfctrl_link_param *l)
+{
+ struct dev_info *dev_info;
+ enum cfcnfg_phy_preference pref;
+ memset(l, 0, sizeof(*l));
+ l->priority = s->priority;
+ if (s->phy_name[0] != '\0') {
+ l->phyid = cfcnfg_get_named(cnfg, s->phy_name);
+ } else {
+ switch (s->phy_pref) {
+ case CAIF_PHYPREF_UNSPECIFIED:
+ pref = CFPHYPREF_UNSPECIFIED;
+ break;
+ case CAIF_PHYPREF_LOW_LAT:
+ pref = CFPHYPREF_LOW_LAT;
+ break;
+ case CAIF_PHYPREF_HIGH_BW:
+ pref = CFPHYPREF_HIGH_BW;
+ break;
+ case CAIF_PHYPREF_LOOP:
+ pref = CFPHYPREF_LOOP;
+ break;
+ default:
+ return -CFGLU_ENODEV;
+ }
+ dev_info = cfcnfg_get_phyid(cnfg, pref);
+ if (dev_info == NULL)
+ return -CFGLU_ENODEV;
+ l->phyid = dev_info->id;
+ }
+ switch (s->type) {
+ case CAIF_CHTY_AT:
+ l->linktype = CFCTRL_SRV_VEI;
+ l->chtype = 0x02;
+ l->endpoint = 0x00;
+ break;
+ case CAIF_CHTY_AT_CTRL:
+ l->linktype = CFCTRL_SRV_VEI;
+ l->chtype = 0x00;
+ l->endpoint = 0x00;
+ break;
+ case CAIF_CHTY_AT_PAIRED:
+ l->linktype = CFCTRL_SRV_VEI;
+ l->chtype = 0x03;
+ l->endpoint = 0x00;
+ break;
+ case CAIF_CHTY_VIDEO:
+ l->linktype = CFCTRL_SRV_VIDEO;
+ l->chtype = 0x00;
+ l->endpoint = 0x00;
+ l->u.video.connid = s->u.dgm.connection_id;
+ break;
+ case CAIF_CHTY_DATAGRAM:
+ l->linktype = CFCTRL_SRV_DATAGRAM;
+ l->chtype = 0x00;
+ l->u.datagram.connid = s->u.dgm.connection_id;
+ break;
+ case CAIF_CHTY_DATAGRAM_LOOP:
+ l->linktype = CFCTRL_SRV_DATAGRAM;
+ l->chtype = 0x03;
+ l->endpoint = 0x00;
+ l->u.datagram.connid = s->u.dgm.connection_id;
+ break;
+ case CAIF_CHTY_DEBUG:
+ l->linktype = CFCTRL_SRV_DBG;
+ l->endpoint = 0x01; /* ACC SIDE */
+ l->chtype = 0x00; /* Single channel with interactive
+ * debug and print-out mixed.
+ */
+ break;
+ case CAIF_CHTY_DEBUG_INTERACT:
+ l->linktype = CFCTRL_SRV_DBG;
+ l->endpoint = 0x01; /* ACC SIDE */
+ l->chtype = 0x02; /* Interactive debug only */
+ break;
+ case CAIF_CHTY_DEBUG_TRACE:
+ l->linktype = CFCTRL_SRV_DBG;
+ l->endpoint = 0x01; /* ACC SIDE */
+ l->chtype = 0x01; /* Debug print-out only */
+ break;
+ case CAIF_CHTY_RFM:
+ l->linktype = CFCTRL_SRV_RFM;
+ l->u.datagram.connid = s->u.rfm.connection_id;
+ strncpy(l->u.rfm.volume, s->u.rfm.volume,
+ sizeof(l->u.rfm.volume)-1);
+ l->u.rfm.volume[sizeof(l->u.rfm.volume)-1] = 0;
+ break;
+ case CAIF_CHTY_UTILITY:
+ l->linktype = CFCTRL_SRV_UTIL;
+ l->endpoint = 0x00;
+ l->chtype = 0x00;
+ l->u.utility.fifosize_bufs = s->u.utility.fifosize_bufs;
+ l->u.utility.fifosize_kb = s->u.utility.fifosize_kb;
+ strncpy(l->u.utility.name, s->u.utility.name,
+ sizeof(l->u.utility.name)-1);
+ l->u.utility.name[sizeof(l->u.utility.name)-1] = 0;
+ l->u.utility.paramlen = s->u.utility.paramlen;
+ if (l->u.utility.paramlen > sizeof(l->u.utility.params))
+ l->u.utility.paramlen = sizeof(l->u.utility.params);
+ memcpy(l->u.utility.params, s->u.utility.params,
+ l->u.utility.paramlen);
+ break;
+ default:
+ return -CFGLU_EINVAL;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(channel_config_2_link_param);
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
new file mode 100644
index 00000000000..3d55daca266
--- /dev/null
+++ b/net/caif/caif_dev.c
@@ -0,0 +1,479 @@
+/*
+ * CAIF Interface registration.
+ * Copyright (C) ST-Ericsson AB 2009
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Borrowed heavily from file: pn_dev.c. Thanks to
+ * Remi Denis-Courmont <remi.denis-courmont@nokia.com>
+ * and Sakari Ailus <sakari.ailus@nokia.com>
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/if_arp.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <net/netns/generic.h>
+#include <net/net_namespace.h>
+#include <net/pkt_sched.h>
+#include <net/caif/caif_device.h>
+#include <net/caif/caif_dev.h>
+#include <net/caif/generic/caif_layer.h>
+#include <net/caif/generic/cfpkt.h>
+#include <net/caif/generic/cfcnfg.h>
+
+MODULE_LICENSE("GPL");
+#define TIMEOUT (HZ*1000)
+
+/* Used for local tracking of the CAIF net devices */
+struct caif_device_entry {
+ struct layer layer;
+ struct list_head list;
+ atomic_t in_use;
+ atomic_t state;
+ uint16 phyid;
+ struct net_device *netdev;
+ wait_queue_head_t event;
+};
+
+struct caif_device_entry_list {
+ struct list_head list;
+ spinlock_t lock;
+};
+
+struct caif_net {
+ struct caif_device_entry_list caifdevs;
+};
+
+int caif_net_id;
+struct cfcnfg *cfg;
+
+struct caif_device_entry_list *caif_device_list(struct net *net)
+{
+ struct caif_net *caifn;
+ BUG_ON(!net);
+ caifn = net_generic(net, caif_net_id);
+ BUG_ON(!caifn);
+ return &caifn->caifdevs;
+}
+
+/* Allocate new CAIF device. */
+static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
+{
+ struct caif_device_entry_list *caifdevs;
+ struct caif_device_entry *caifd;
+ caifdevs = caif_device_list(dev_net(dev));
+ BUG_ON(!caifdevs);
+ caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC);
+ if (!caifd)
+ return NULL;
+ caifd->netdev = dev;
+ list_add(&caifd->list, &caifdevs->list);
+ init_waitqueue_head(&caifd->event);
+ return caifd;
+}
+
+static struct caif_device_entry *caif_get(struct net_device *dev)
+{
+ struct caif_device_entry_list *caifdevs =
+ caif_device_list(dev_net(dev));
+ struct caif_device_entry *caifd;
+ BUG_ON(!caifdevs);
+ list_for_each_entry(caifd, &caifdevs->list, list) {
+ if (caifd->netdev == dev)
+ return caifd;
+ }
+ return NULL;
+}
+
+static void caif_device_destroy(struct net_device *dev)
+{
+ struct caif_device_entry_list *caifdevs =
+ caif_device_list(dev_net(dev));
+ struct caif_device_entry *caifd;
+ ASSERT_RTNL();
+ if (dev->type != ARPHRD_CAIF)
+ return;
+
+ spin_lock_bh(&caifdevs->lock);
+ caifd = caif_get(dev);
+ if (caifd == NULL) {
+ spin_unlock_bh(&caifdevs->lock);
+ return;
+ }
+
+ list_del(&caifd->list);
+ spin_unlock_bh(&caifdevs->lock);
+
+ kfree(caifd);
+ return;
+}
+
+static int transmit(struct layer *layer, struct cfpkt *pkt)
+{
+ struct caif_device_entry *caifd =
+ container_of(layer, struct caif_device_entry, layer);
+ struct sk_buff *skb, *skb2;
+ int ret = -EINVAL;
+ skb = cfpkt_tonative(pkt);
+ skb->dev = caifd->netdev;
+ /*
+ * Don't allow SKB to be destroyed upon error, but signal resend
+ * notification to clients. We can't rely on the return value as
+ * congestion (NET_XMIT_CN) sometimes drops the packet, sometimes don't.
+ */
+ if (netif_queue_stopped(caifd->netdev))
+ return -EAGAIN;
+ skb2 = skb_get(skb);
+
+ ret = dev_queue_xmit(skb2);
+
+ if (!ret)
+ kfree_skb(skb);
+ else
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int modemcmd(struct layer *layr, enum caif_modemcmd ctrl)
+{
+ struct caif_device_entry *caifd;
+ struct caif_dev_common *caifdev;
+ caifd = container_of(layr, struct caif_device_entry, layer);
+ caifdev = netdev_priv(caifd->netdev);
+ if (ctrl == _CAIF_MODEMCMD_PHYIF_USEFULL) {
+ atomic_set(&caifd->in_use, 1);
+ wake_up_interruptible(&caifd->event);
+
+ } else if (ctrl == _CAIF_MODEMCMD_PHYIF_USELESS) {
+ atomic_set(&caifd->in_use, 0);
+ wake_up_interruptible(&caifd->event);
+ }
+ return 0;
+}
+
+/*
+ * Stuff received packets to associated sockets.
+ * On error, returns non-zero and releases the skb.
+ */
+static int receive(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pkttype, struct net_device *orig_dev)
+{
+ struct net *net;
+ struct cfpkt *pkt;
+ struct caif_device_entry *caifd;
+ net = dev_net(dev);
+ pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
+ caifd = caif_get(dev);
+ if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd)
+ return NET_RX_DROP;
+
+ if (caifd->layer.up->receive(caifd->layer.up, pkt))
+ return NET_RX_DROP;
+
+ return 0;
+}
+
+static struct packet_type caif_packet_type __read_mostly = {
+ .type = cpu_to_be16(ETH_P_CAIF),
+ .func = receive,
+};
+
+static void dev_flowctrl(struct net_device *dev, int on)
+{
+ struct caif_device_entry *caifd = caif_get(dev);
+ if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd)
+ return;
+
+ caifd->layer.up->ctrlcmd(caifd->layer.up,
+ on ?
+ _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND :
+ _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
+ caifd->layer.id);
+}
+
+/* notify Caif of device events */
+static int caif_device_notify(struct notifier_block *me, unsigned long what,
+ void *arg)
+{
+ struct net_device *dev = arg;
+ struct caif_device_entry *caifd = NULL;
+ struct caif_dev_common *caifdev;
+ int res = -EINVAL;
+ enum caif_phy_preference phy_pref;
+ enum cfcnfg_phy_type phy_type;
+
+ if (dev->type != ARPHRD_CAIF)
+ return 0;
+
+ switch (what) {
+ case NETDEV_REGISTER:
+ pr_info("CAIF: %s():register %s\n", __func__, dev->name);
+ caifd = caif_device_alloc(dev);
+ if (caifd == NULL)
+ break;
+ caifdev = netdev_priv(dev);
+ caifdev->flowctrl = dev_flowctrl;
+ atomic_set(&caifd->state, what);
+ res = 0;
+ break;
+
+ case NETDEV_UP:
+ pr_info("CAIF: %s(): up %s\n", __func__, dev->name);
+ caifd = caif_get(dev);
+ if (caifd == NULL)
+ break;
+ caifdev = netdev_priv(dev);
+ if (atomic_read(&caifd->state) == NETDEV_UP) {
+ pr_info("CAIF: %s():%s already up\n",
+ __func__, dev->name);
+ break;
+ }
+ atomic_set(&caifd->state, what);
+ caifd->layer.transmit = transmit;
+ caifd->layer.modemcmd = modemcmd;
+ if (caifdev->link_select == CAIF_PHYPREF_LOW_LAT)
+ phy_pref = CAIF_LINK_LOW_LATENCY;
+ else
+ phy_pref = CAIF_LINK_HIGH_BANDW;
+
+ if (caifdev->use_frag)
+ phy_type = CFPHYTYPE_FRAG;
+ else
+ phy_type = CFPHYTYPE_CAIF;
+
+ cfcnfg_add_phy_layer(get_caif_conf(),
+ phy_type,
+ dev,
+ &caifd->layer,
+ &caifd->phyid,
+ phy_pref,
+ caifdev->use_fcs,
+ caifdev->use_stx);
+ strncpy(caifd->layer.name, dev->name,
+ sizeof(caifd->layer.name) - 1);
+ caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
+ break;
+
+ case NETDEV_GOING_DOWN:
+ caifd = caif_get(dev);
+ if (caifd == NULL)
+ break;
+ pr_info("CAIF: %s():going down %s\n", __func__, dev->name);
+ atomic_set(&caifd->state, what);
+ if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd)
+ return -EINVAL;
+ caifd->layer.up->ctrlcmd(caifd->layer.up,
+ _CAIF_CTRLCMD_PHYIF_DOWN_IND,
+ caifd->layer.id);
+ res = wait_event_interruptible_timeout(caifd->event,
+ atomic_read(&caifd->in_use) == 0,
+ TIMEOUT);
+ break;
+
+ case NETDEV_DOWN:
+ caifd = caif_get(dev);
+ if (caifd == NULL)
+ break;
+ pr_info("CAIF: %s(): down %s\n", __func__, dev->name);
+ if (atomic_read(&caifd->in_use))
+ pr_warning("CAIF: %s(): "
+ "Unregistering an active CAIF device: %s\n",
+ __func__, dev->name);
+ cfcnfg_del_phy_layer(get_caif_conf(), &caifd->layer);
+ atomic_set(&caifd->state, what);
+ break;
+
+ case NETDEV_UNREGISTER:
+ caifd = caif_get(dev);
+ pr_info("CAIF: %s(): unregister %s\n", __func__, dev->name);
+ atomic_set(&caifd->state, what);
+ caif_device_destroy(dev);
+ break;
+ }
+ return 0;
+}
+
+static struct notifier_block caif_device_notifier = {
+ .notifier_call = caif_device_notify,
+ .priority = 0,
+};
+
+
+struct cfcnfg *get_caif_conf(void)
+{
+ return cfg;
+}
+EXPORT_SYMBOL(get_caif_conf);
+
+static int (*caif_ioctl_func)(unsigned int cmd, unsigned long arg, bool);
+
+void caif_register_ioctl(
+ int (*ioctl)(unsigned int cmd,
+ unsigned long arg,
+ bool))
+{
+ caif_ioctl_func = ioctl;
+}
+EXPORT_SYMBOL(caif_register_ioctl);
+
+int caif_ioctl(unsigned int cmd, unsigned long arg, bool from_user_land)
+{
+ if (caif_ioctl_func == NULL)
+ return -EINVAL;
+ return caif_ioctl_func(cmd, arg, from_user_land);
+}
+EXPORT_SYMBOL(caif_ioctl);
+
+int caifdev_adapt_register(struct caif_channel_config *config,
+ struct layer *adap_layer)
+{
+ struct cfctrl_link_param param;
+
+ if (channel_config_2_link_param(get_caif_conf(), config, &param) == 0)
+ /* Hook up the adaptation layer. */
+ return cfcnfg_add_adaptation_layer(get_caif_conf(),
+ &param, adap_layer);
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(caifdev_adapt_register);
+
+int caifdev_adapt_unregister(struct layer *adap_layer)
+{
+ return cfcnfg_del_adapt_layer(get_caif_conf(), adap_layer);
+}
+EXPORT_SYMBOL(caifdev_adapt_unregister);
+
+//official-kernel-patch-cut-here
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32))
+//official-kernel-patch-resume-here
+/* Per-namespace Caif devices handling */
+static int caif_init_net(struct net *net)
+{
+ struct caif_net *caifn = net_generic(net, caif_net_id);
+ INIT_LIST_HEAD(&caifn->caifdevs.list);
+ spin_lock_init(&caifn->caifdevs.lock);
+ return 0;
+}
+
+static void caif_exit_net(struct net *net)
+{
+ struct net_device *dev;
+ int res;
+ rtnl_lock();
+ for_each_netdev(net, dev) {
+ if (dev->type != ARPHRD_CAIF)
+ continue;
+ res = dev_close(dev);
+ caif_device_destroy(dev);
+ }
+ rtnl_unlock();
+}
+
+static struct pernet_operations caif_net_ops = {
+ .init = caif_init_net,
+ .exit = caif_exit_net,
+ .id = &caif_net_id,
+ .size = sizeof(struct caif_net),
+};
+//official-kernel-patch-cut-here
+#else
+
+/* Per-namespace Caif devices handling */
+static int caif_init_net(struct net *net)
+{
+ struct caif_net *caifn;
+ int ret;
+ caifn = kmalloc(sizeof(*caifn), GFP_KERNEL);
+ if (!caifn)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&caifn->caifdevs.list);
+ spin_lock_init(&caifn->caifdevs.lock);
+ ret = net_assign_generic(net, caif_net_id, caifn);
+ if (ret)
+ kfree(caifn);
+
+ return 0;
+}
+
+static void caif_exit_net(struct net *net)
+{
+ struct caif_net *caifn = net_generic(net, caif_net_id);
+ struct net_device *dev;
+ int res;
+ rtnl_lock();
+ for_each_netdev(net, dev) {
+ if (dev->type != ARPHRD_CAIF)
+ continue;
+ res = dev_close(dev);
+ caif_device_destroy(dev);
+ }
+ rtnl_unlock();
+ kfree(caifn);
+}
+
+static struct pernet_operations caif_net_ops = {
+ .init = caif_init_net,
+ .exit = caif_exit_net,
+};
+
+#endif
+//official-kernel-patch-resume-here
+
+/* Initialize Caif devices list */
+int __init caif_device_init(void)
+{
+ int result;
+ cfg = cfcnfg_create();
+ if (!cfg) {
+ pr_warning("CAIF: %s(): can't create cfcnfg.\n", __func__);
+ goto err_cfcnfg_create_failed;
+ }
+//official-kernel-patch-cut-here
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32))
+//official-kernel-patch-resume-here
+ result = register_pernet_device(&caif_net_ops);
+//official-kernel-patch-cut-here
+#else
+ result = register_pernet_gen_device(&caif_net_id,&caif_net_ops);
+#endif
+//official-kernel-patch-resume-here
+
+ if (result) {
+ kfree(cfg);
+ cfg = NULL;
+ return result;
+ }
+ dev_add_pack(&caif_packet_type);
+ register_netdevice_notifier(&caif_device_notifier);
+
+ return result;
+err_cfcnfg_create_failed:
+ return -ENODEV;
+}
+
+void __exit caif_device_exit(void)
+{
+ cfcnfg_remove(cfg);
+ dev_remove_pack(&caif_packet_type);
+//official-kernel-patch-cut-here
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32))
+//official-kernel-patch-resume-here
+ unregister_pernet_device(&caif_net_ops);
+//official-kernel-patch-cut-here
+#else
+ unregister_pernet_gen_device(caif_net_id,&caif_net_ops);
+#endif
+//official-kernel-patch-resume-here
+ unregister_netdevice_notifier(&caif_device_notifier);
+}
+
+module_init(caif_device_init);
+module_exit(caif_device_exit);
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
new file mode 100644
index 00000000000..85d38b65b28
--- /dev/null
+++ b/net/caif/caif_socket.c
@@ -0,0 +1,1432 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2009
+ * Author: Sjur Brendeland sjur.brandlenad@stericsson.com
+ * Per Sigmond per.sigmond@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/poll.h>
+#include <linux/tcp.h>
+#include <linux/uaccess.h>
+#include <asm/atomic.h>
+
+#include <linux/caif/caif_socket.h>
+#include <linux/caif/caif_config.h>
+#include <net/caif/generic/caif_layer.h>
+#include <net/caif/caif_dev.h>
+#include <net/caif/generic/cfpkt.h>
+
+//official-kernel-patch-cut-here
+#if !defined(trace_printk)
+#define trace_printk(fmt, ...)
+#endif
+//official-kernel-patch-resume-here
+MODULE_LICENSE("GPL");
+
+#define CHNL_SKT_READ_QUEUE_HIGH 2000
+#define CHNL_SKT_READ_QUEUE_LOW 100
+
+int caif_sockbuf_size = 40000;
+static struct kmem_cache *caif_sk_cachep;
+static atomic_t caif_nr_socks = ATOMIC_INIT(0);
+
+#define CONN_STATE_OPEN_BIT 1
+#define CONN_STATE_PENDING_BIT 2
+#define CONN_STATE_PEND_DESTROY_BIT 3
+#define CONN_REMOTE_SHUTDOWN_BIT 4
+
+#define TX_FLOW_ON_BIT 1
+#define RX_FLOW_ON_BIT 2
+
+#define STATE_IS_OPEN(cf_sk) test_bit(CONN_STATE_OPEN_BIT,\
+ (void *) &(cf_sk)->conn_state)
+#define STATE_IS_REMOTE_SHUTDOWN(cf_sk) test_bit(CONN_REMOTE_SHUTDOWN_BIT,\
+ (void *) &(cf_sk)->conn_state)
+#define STATE_IS_PENDING(cf_sk) test_bit(CONN_STATE_PENDING_BIT,\
+ (void *) &(cf_sk)->conn_state)
+#define STATE_IS_PENDING_DESTROY(cf_sk) test_bit(CONN_STATE_PEND_DESTROY_BIT,\
+ (void *) &(cf_sk)->conn_state)
+
+#define SET_STATE_PENDING_DESTROY(cf_sk) set_bit(CONN_STATE_PEND_DESTROY_BIT,\
+ (void *) &(cf_sk)->conn_state)
+#define SET_STATE_OPEN(cf_sk) set_bit(CONN_STATE_OPEN_BIT,\
+ (void *) &(cf_sk)->conn_state)
+#define SET_STATE_CLOSED(cf_sk) clear_bit(CONN_STATE_OPEN_BIT,\
+ (void *) &(cf_sk)->conn_state)
+#define SET_PENDING_ON(cf_sk) set_bit(CONN_STATE_PENDING_BIT,\
+ (void *) &(cf_sk)->conn_state)
+#define SET_PENDING_OFF(cf_sk) clear_bit(CONN_STATE_PENDING_BIT,\
+ (void *) &(cf_sk)->conn_state)
+#define SET_REMOTE_SHUTDOWN(cf_sk) set_bit(CONN_REMOTE_SHUTDOWN_BIT,\
+ (void *) &(cf_sk)->conn_state)
+
+#define SET_REMOTE_SHUTDOWN_OFF(dev) clear_bit(CONN_REMOTE_SHUTDOWN_BIT,\
+ (void *) &(dev)->conn_state)
+#define RX_FLOW_IS_ON(cf_sk) test_bit(RX_FLOW_ON_BIT,\
+ (void *) &(cf_sk)->flow_state)
+#define TX_FLOW_IS_ON(cf_sk) test_bit(TX_FLOW_ON_BIT,\
+ (void *) &(cf_sk)->flow_state)
+
+#define SET_RX_FLOW_OFF(cf_sk) clear_bit(RX_FLOW_ON_BIT,\
+ (void *) &(cf_sk)->flow_state)
+#define SET_RX_FLOW_ON(cf_sk) set_bit(RX_FLOW_ON_BIT,\
+ (void *) &(cf_sk)->flow_state)
+#define SET_TX_FLOW_OFF(cf_sk) clear_bit(TX_FLOW_ON_BIT,\
+ (void *) &(cf_sk)->flow_state)
+#define SET_TX_FLOW_ON(cf_sk) set_bit(TX_FLOW_ON_BIT,\
+ (void *) &(cf_sk)->flow_state)
+
+#define SKT_READ_FLAG 0x01
+#define SKT_WRITE_FLAG 0x02
+static struct dentry *debugfsdir;
+#include <linux/debugfs.h>
+
+#ifdef CONFIG_DEBUG_FS
+struct debug_fs_counter {
+ atomic_t num_open;
+ atomic_t num_close;
+ atomic_t num_init;
+ atomic_t num_init_resp;
+ atomic_t num_init_fail_resp;
+ atomic_t num_deinit;
+ atomic_t num_deinit_resp;
+ atomic_t num_remote_shutdown_ind;
+ atomic_t num_tx_flow_off_ind;
+ atomic_t num_tx_flow_on_ind;
+ atomic_t num_rx_flow_off;
+ atomic_t num_rx_flow_on;
+};
+struct debug_fs_counter cnt;
+#define dbfs_atomic_inc(v) atomic_inc(v)
+#else
+#define dbfs_atomic_inc(v)
+#endif
+
+/* The AF_CAIF socket */
+struct caifsock {
+ /* NOTE: sk has to be the first member */
+ struct sock sk;
+ struct layer layer;
+ char name[CAIF_LAYER_NAME_SZ];
+ u32 conn_state;
+ u32 flow_state;
+ struct cfpktq *pktq;
+ int file_mode;
+ struct caif_channel_config config;
+ int read_queue_len;
+ spinlock_t read_queue_len_lock;
+ struct dentry *debugfs_socket_dir;
+};
+
+static void drain_queue(struct caifsock *cf_sk);
+
+/* Packet Receive Callback function called from CAIF Stack */
+static int caif_sktrecv_cb(struct layer *layr, struct cfpkt *pkt)
+{
+ struct caifsock *cf_sk;
+ int read_queue_high;
+ cf_sk = container_of(layr, struct caifsock, layer);
+
+ if (!STATE_IS_OPEN(cf_sk)) {
+ /*FIXME: This should be allowed finally!*/
+ pr_debug("CAIF: %s(): called after close request\n", __func__);
+ cfpkt_destroy(pkt);
+ return 0;
+ }
+ /* NOTE: This function may be called in Tasklet context! */
+
+ /* The queue has its own lock */
+ cfpkt_queue(cf_sk->pktq, pkt, 0);
+
+ spin_lock(&cf_sk->read_queue_len_lock);
+ cf_sk->read_queue_len++;
+
+ read_queue_high = (cf_sk->read_queue_len > CHNL_SKT_READ_QUEUE_HIGH);
+ spin_unlock(&cf_sk->read_queue_len_lock);
+
+ if (RX_FLOW_IS_ON(cf_sk) && read_queue_high) {
+ dbfs_atomic_inc(&cnt.num_rx_flow_off);
+ SET_RX_FLOW_OFF(cf_sk);
+
+ /* Send flow off (NOTE: must not sleep) */
+ pr_debug("CAIF: %s():"
+ " sending flow OFF (queue len = %d)\n",
+ __func__,
+ cf_sk->read_queue_len);
+ caif_assert(cf_sk->layer.dn);
+ caif_assert(cf_sk->layer.dn->ctrlcmd);
+
+ (void) cf_sk->layer.dn->modemcmd(cf_sk->layer.dn,
+ CAIF_MODEMCMD_FLOW_OFF_REQ);
+ }
+
+ /* Signal reader that data is available. */
+
+ wake_up_interruptible(cf_sk->sk.sk_sleep);
+
+ return 0;
+}
+
+/* Packet Flow Control Callback function called from CAIF */
+static void caif_sktflowctrl_cb(struct layer *layr,
+ enum caif_ctrlcmd flow,
+ int phyid)
+{
+ struct caifsock *cf_sk;
+
+ /* NOTE: This function may be called in Tasklet context! */
+ trace_printk("CAIF: %s(): flowctrl func called: %s.\n",
+ __func__,
+ flow == CAIF_CTRLCMD_FLOW_ON_IND ? "ON" :
+ flow == CAIF_CTRLCMD_FLOW_OFF_IND ? "OFF" :
+ flow == CAIF_CTRLCMD_INIT_RSP ? "INIT_RSP" :
+ flow == CAIF_CTRLCMD_DEINIT_RSP ? "DEINIT_RSP" :
+ flow == CAIF_CTRLCMD_INIT_FAIL_RSP ? "INIT_FAIL_RSP" :
+ flow ==
+ CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND ? "REMOTE_SHUTDOWN" :
+ "UKNOWN CTRL COMMAND");
+
+ cf_sk = container_of(layr, struct caifsock, layer);
+ switch (flow) {
+ case CAIF_CTRLCMD_FLOW_ON_IND:
+ dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
+ /* Signal reader that data is available. */
+ SET_TX_FLOW_ON(cf_sk);
+ wake_up_interruptible(cf_sk->sk.sk_sleep);
+ break;
+
+ case CAIF_CTRLCMD_FLOW_OFF_IND:
+ dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
+ SET_TX_FLOW_OFF(cf_sk);
+ break;
+
+ case CAIF_CTRLCMD_INIT_RSP:
+ dbfs_atomic_inc(&cnt.num_init_resp);
+ /* Signal reader that data is available. */
+ caif_assert(STATE_IS_OPEN(cf_sk));
+ SET_PENDING_OFF(cf_sk);
+ SET_TX_FLOW_ON(cf_sk);
+ wake_up_interruptible(cf_sk->sk.sk_sleep);
+ break;
+
+ case CAIF_CTRLCMD_DEINIT_RSP:
+ dbfs_atomic_inc(&cnt.num_deinit_resp);
+ caif_assert(!STATE_IS_OPEN(cf_sk));
+ SET_PENDING_OFF(cf_sk);
+ if (!STATE_IS_PENDING_DESTROY(cf_sk)) {
+ if (cf_sk->sk.sk_sleep != NULL)
+ wake_up_interruptible(cf_sk->sk.sk_sleep);
+ }
+ sock_put(&cf_sk->sk);
+ break;
+
+ case CAIF_CTRLCMD_INIT_FAIL_RSP:
+ dbfs_atomic_inc(&cnt.num_init_fail_resp);
+ caif_assert(STATE_IS_OPEN(cf_sk));
+ SET_STATE_CLOSED(cf_sk);
+ SET_PENDING_OFF(cf_sk);
+ SET_TX_FLOW_OFF(cf_sk);
+ wake_up_interruptible(cf_sk->sk.sk_sleep);
+ break;
+
+ case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
+ dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
+ caif_assert(STATE_IS_OPEN(cf_sk));
+ caif_assert(STATE_IS_PENDING(cf_sk));
+ SET_REMOTE_SHUTDOWN(cf_sk);
+ SET_TX_FLOW_OFF(cf_sk);
+ drain_queue(cf_sk);
+ SET_RX_FLOW_ON(cf_sk);
+ cf_sk->file_mode = 0;
+ wake_up_interruptible(cf_sk->sk.sk_sleep);
+ break;
+
+ default:
+ pr_debug("CAIF: %s(): Unexpected flow command %d\n",
+ __func__, flow);
+ }
+}
+
+
+
+static struct sk_buff *caif_alloc_send_skb(struct sock *sk,
+ unsigned long data_len,
+ int *err)
+{
+ struct sk_buff *skb;
+ unsigned int sk_allocation = sk->sk_allocation;
+
+ sk->sk_allocation |= GFP_KERNEL;
+
+ /* Allocate a buffer structure to hold the signal. */
+ skb = sock_alloc_send_skb(sk, data_len, 1, err);
+
+ sk->sk_allocation = sk_allocation;
+
+ return skb;
+}
+
+static int caif_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t buf_len, int flags)
+
+{
+ struct sock *sk = sock->sk;
+ struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
+ struct cfpkt *pkt = NULL;
+ size_t len;
+ int result;
+ struct sk_buff *skb;
+ ssize_t ret = -EIO;
+ int read_queue_low;
+
+ if (cf_sk == NULL) {
+ pr_debug("CAIF: %s(): private_data not set!\n",
+ __func__);
+ ret = -EBADFD;
+ goto read_error;
+ }
+
+ /* Don't do multiple iovec entries yet */
+ if (m->msg_iovlen != 1)
+ return -EOPNOTSUPP;
+
+ if (unlikely(!buf_len))
+ return -EINVAL;
+
+ lock_sock(&(cf_sk->sk));
+
+ caif_assert(cf_sk->pktq);
+
+ if (!STATE_IS_OPEN(cf_sk)) {
+ /* Socket is closed or closing. */
+ if (!STATE_IS_PENDING(cf_sk)) {
+ pr_debug("CAIF: %s(): socket is closed (by remote)\n",
+ __func__);
+ ret = -EPIPE;
+ } else {
+ pr_debug("CAIF: %s(): socket is closing..\n", __func__);
+ ret = -EBADF;
+ }
+ goto read_error;
+ }
+
+ /* Socket is open or opening. */
+ if (STATE_IS_PENDING(cf_sk)) {
+ pr_debug("CAIF: %s(): socket is opening...\n", __func__);
+
+ if (flags & MSG_DONTWAIT) {
+ /* We can't block. */
+ pr_debug("CAIF: %s():state pending and MSG_DONTWAIT\n",
+ __func__);
+ ret = -EAGAIN;
+ goto read_error;
+ }
+
+ /*
+ * Blocking mode; state is pending and we need to wait
+ * for its conclusion.
+ */
+ release_sock(&cf_sk->sk);
+
+ result =
+ wait_event_interruptible(*cf_sk->sk.sk_sleep,
+ !STATE_IS_PENDING(cf_sk));
+
+ lock_sock(&(cf_sk->sk));
+
+ if (result == -ERESTARTSYS) {
+ pr_debug("CAIF: %s(): wait_event_interruptible"
+ " woken by a signal (1)", __func__);
+ ret = -ERESTARTSYS;
+ goto read_error;
+ }
+ }
+
+ if (STATE_IS_REMOTE_SHUTDOWN(cf_sk)) {
+ pr_debug("CAIF: %s(): received remote_shutdown indication\n",
+ __func__);
+ ret = -ESHUTDOWN;
+ goto read_error;
+ }
+
+ /*
+ * Block if we don't have any received buffers.
+ * The queue has its own lock.
+ */
+ while ((pkt = cfpkt_qpeek(cf_sk->pktq)) == NULL) {
+
+ if (flags & MSG_DONTWAIT) {
+ pr_debug("CAIF: %s(): MSG_DONTWAIT\n", __func__);
+ ret = -EAGAIN;
+ goto read_error;
+ }
+ pr_debug("CAIF: %s() wait_event\n", __func__);
+
+ /* Let writers in. */
+ release_sock(&cf_sk->sk);
+
+ /* Block reader until data arrives or socket is closed. */
+ if (wait_event_interruptible(*cf_sk->sk.sk_sleep,
+ cfpkt_qpeek(cf_sk->pktq)
+ || STATE_IS_REMOTE_SHUTDOWN(cf_sk)
+ || !STATE_IS_OPEN(cf_sk)) ==
+ -ERESTARTSYS) {
+ pr_debug("CAIF: %s():"
+ " wait_event_interruptible woken by "
+ "a signal, signal_pending(current) = %d\n",
+ __func__,
+ signal_pending(current));
+ return -ERESTARTSYS;
+ }
+
+ pr_debug("CAIF: %s() awake\n", __func__);
+ if (STATE_IS_REMOTE_SHUTDOWN(cf_sk)) {
+ pr_debug("CAIF: %s(): "
+ "received remote_shutdown indication\n",
+ __func__);
+ ret = -ESHUTDOWN;
+ goto read_error_no_unlock;
+ }
+
+ /* I want to be alone on cf_sk (except status and queue). */
+ lock_sock(&(cf_sk->sk));
+
+ if (!STATE_IS_OPEN(cf_sk)) {
+ /* Someone closed the link, report error. */
+ pr_debug("CAIF: %s(): remote end shutdown!\n",
+ __func__);
+ ret = -EPIPE;
+ goto read_error;
+ }
+ }
+
+ /* The queue has its own lock. */
+ len = cfpkt_getlen(pkt);
+
+ /* Check max length that can be copied. */
+ if (len > buf_len) {
+ pr_debug("CAIF: %s(): user buffer too small\n", __func__);
+ ret = -EINVAL;
+ goto read_error;
+ }
+
+ /*
+ * Get packet from queue.
+ * The queue has its own lock.
+ */
+ pkt = cfpkt_dequeue(cf_sk->pktq);
+
+ spin_lock(&cf_sk->read_queue_len_lock);
+ cf_sk->read_queue_len--;
+ read_queue_low = (cf_sk->read_queue_len < CHNL_SKT_READ_QUEUE_LOW);
+ spin_unlock(&cf_sk->read_queue_len_lock);
+
+ if (!RX_FLOW_IS_ON(cf_sk) && read_queue_low) {
+ dbfs_atomic_inc(&cnt.num_rx_flow_on);
+ SET_RX_FLOW_ON(cf_sk);
+
+ /* Send flow on. */
+ pr_debug("CAIF: %s(): sending flow ON (queue len = %d)\n",
+ __func__, cf_sk->read_queue_len);
+ caif_assert(cf_sk->layer.dn);
+ caif_assert(cf_sk->layer.dn->ctrlcmd);
+ (void) cf_sk->layer.dn->modemcmd(cf_sk->layer.dn,
+ CAIF_MODEMCMD_FLOW_ON_REQ);
+
+ caif_assert(cf_sk->read_queue_len >= 0);
+ }
+ skb = cfpkt_tonative(pkt);
+ result = skb_copy_datagram_iovec(skb, 0, m->msg_iov, len);
+ if (result) {
+ pr_debug("CAIF: %s(): cfpkt_raw_extract failed\n", __func__);
+ cfpkt_destroy(pkt);
+ ret = -EFAULT;
+ goto read_error;
+ }
+ if (unlikely(buf_len < len)) {
+ len = buf_len;
+ m->msg_flags |= MSG_TRUNC;
+ }
+
+ /* Free packet. */
+ skb_free_datagram(sk, skb);
+
+ /* Let the others in. */
+ release_sock(&cf_sk->sk);
+ return len;
+
+read_error:
+ release_sock(&cf_sk->sk);
+read_error_no_unlock:
+ return ret;
+}
+
+/* Send a signal as a consequence of sendmsg, sendto or caif_sendmsg. */
+static int caif_sendmsg(struct kiocb *kiocb, struct socket *sock,
+ struct msghdr *msg, size_t len)
+{
+
+ struct sock *sk = sock->sk;
+ struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
+ void *payload;
+ size_t payload_size = msg->msg_iov->iov_len;
+ struct cfpkt *pkt = NULL;
+ struct payload_info info;
+ unsigned char *txbuf;
+ int err = 0;
+ ssize_t ret = -EIO;
+ int result;
+ struct sk_buff *skb;
+ caif_assert(msg->msg_iovlen == 1);
+
+ if (cf_sk == NULL) {
+ pr_debug("CAIF: %s(): private_data not set!\n",
+ __func__);
+ ret = -EBADFD;
+ goto write_error_no_unlock;
+ }
+
+ if (unlikely(msg->msg_iov->iov_base == NULL)) {
+ pr_warning("CAIF: %s(): Buffer is NULL.\n", __func__);
+ ret = -EINVAL;
+ goto write_error_no_unlock;
+ }
+
+ payload = msg->msg_iov->iov_base;
+ if (payload_size > CAIF_MAX_PAYLOAD_SIZE) {
+ pr_debug("CAIF: %s(): buffer too long\n", __func__);
+ ret = -EINVAL;
+ goto write_error_no_unlock;
+ }
+ /* I want to be alone on cf_sk (except status and queue) */
+ lock_sock(&(cf_sk->sk));
+
+ caif_assert(cf_sk->pktq);
+
+ if (!STATE_IS_OPEN(cf_sk)) {
+ /* Socket is closed or closing */
+ if (!STATE_IS_PENDING(cf_sk)) {
+ pr_debug("CAIF: %s(): socket is closed (by remote)\n",
+ __func__);
+ ret = -EPIPE;
+ } else {
+ pr_debug("CAIF: %s(): socket is closing...\n",
+ __func__);
+ ret = -EBADF;
+ }
+ goto write_error;
+ }
+
+ /* Socket is open or opening */
+ if (STATE_IS_PENDING(cf_sk)) {
+ pr_debug("CAIF: %s(): socket is opening...\n", __func__);
+
+ if (msg->msg_flags & MSG_DONTWAIT) {
+ /* We can't block */
+ trace_printk("CAIF: %s():state pending:"
+ "state=MSG_DONTWAIT\n", __func__);
+ ret = -EAGAIN;
+ goto write_error;
+ }
+
+ /* Let readers in */
+ release_sock(&cf_sk->sk);
+
+ /*
+ * Blocking mode; state is pending and we need to wait
+ * for its conclusion.
+ */
+ result =
+ wait_event_interruptible(*cf_sk->sk.sk_sleep,
+ !STATE_IS_PENDING(cf_sk));
+
+ /* I want to be alone on cf_sk (except status and queue) */
+ lock_sock(&(cf_sk->sk));
+
+ if (result == -ERESTARTSYS) {
+ pr_debug("CAIF: %s(): wait_event_interruptible"
+ " woken by a signal (1)", __func__);
+ ret = -ERESTARTSYS;
+ goto write_error;
+ }
+ }
+
+ if (STATE_IS_REMOTE_SHUTDOWN(cf_sk)) {
+ pr_debug("CAIF: %s(): received remote_shutdown indication\n",
+ __func__);
+ ret = -ESHUTDOWN;
+ goto write_error;
+ }
+
+ if (!TX_FLOW_IS_ON(cf_sk)) {
+
+ /* Flow is off. Check non-block flag */
+ if (msg->msg_flags & MSG_DONTWAIT) {
+ trace_printk("CAIF: %s(): MSG_DONTWAIT and tx flow off",
+ __func__);
+ ret = -EAGAIN;
+ goto write_error;
+ }
+
+ /* release lock before waiting */
+ release_sock(&cf_sk->sk);
+
+ /* Wait until flow is on or socket is closed */
+ if (wait_event_interruptible(*cf_sk->sk.sk_sleep,
+ TX_FLOW_IS_ON(cf_sk)
+ || !STATE_IS_OPEN(cf_sk)
+ || STATE_IS_REMOTE_SHUTDOWN(cf_sk)
+ ) == -ERESTARTSYS) {
+ pr_debug("CAIF: %s():"
+ " wait_event_interruptible woken by a signal",
+ __func__);
+ ret = -ERESTARTSYS;
+ goto write_error_no_unlock;
+ }
+
+ /* I want to be alone on cf_sk (except status and queue) */
+ lock_sock(&(cf_sk->sk));
+
+ if (!STATE_IS_OPEN(cf_sk)) {
+ /* someone closed the link, report error */
+ pr_debug("CAIF: %s(): remote end shutdown!\n",
+ __func__);
+ ret = -EPIPE;
+ goto write_error;
+ }
+
+ if (STATE_IS_REMOTE_SHUTDOWN(cf_sk)) {
+ pr_debug("CAIF: %s(): "
+ "received remote_shutdown indication\n",
+ __func__);
+ ret = -ESHUTDOWN;
+ goto write_error;
+ }
+ }
+
+ /* Create packet, buf=NULL means no copying */
+ skb = caif_alloc_send_skb(sk,
+ payload_size + CAIF_NEEDED_HEADROOM +
+ CAIF_NEEDED_TAILROOM,
+ &err);
+
+ if (skb == NULL) {
+ pr_debug("CAIF: %s(): caif_alloc_send_skb returned NULL\n",
+ __func__);
+ ret = -ENOMEM;
+ goto write_error;
+ }
+
+ skb_reserve(skb, CAIF_NEEDED_HEADROOM);
+ pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb);
+ caif_assert((void *)pkt == (void *)skb);
+
+ if (cfpkt_raw_append(pkt, (void **) &txbuf, payload_size) < 0) {
+ pr_debug("CAIF: %s(): cfpkt_raw_append failed\n", __func__);
+ cfpkt_destroy(pkt);
+ ret = -EINVAL;
+ goto write_error;
+ }
+
+ /* Copy data into buffer. */
+ if (copy_from_user(txbuf, payload, payload_size)) {
+ pr_debug("CAIF: %s(): copy_from_user returned non zero.\n",
+ __func__);
+ cfpkt_destroy(pkt);
+ ret = -EINVAL;
+ goto write_error;
+ }
+ memset(&info, 0, sizeof(info));
+
+ /* Send the packet down the stack. */
+ caif_assert(cf_sk->layer.dn);
+ caif_assert(cf_sk->layer.dn->transmit);
+
+ do {
+ ret = cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt);
+
+ if (likely((ret >= 0) || (ret != -EAGAIN)))
+ break;
+
+ /* EAGAIN - retry */
+ if (msg->msg_flags & MSG_DONTWAIT) {
+ pr_debug("CAIF: %s(): NONBLOCK and transmit failed,"
+ " error = %d\n", __func__, ret);
+ ret = -EAGAIN;
+ goto write_error;
+ }
+
+ /* Let readers in */
+ release_sock(&cf_sk->sk);
+
+ /* Wait until flow is on or socket is closed */
+ if (wait_event_interruptible(*cf_sk->sk.sk_sleep,
+ TX_FLOW_IS_ON(cf_sk)
+ || !STATE_IS_OPEN(cf_sk)
+ || STATE_IS_REMOTE_SHUTDOWN(cf_sk)
+ ) == -ERESTARTSYS) {
+ pr_debug("CAIF: %s(): wait_event_interruptible"
+ " woken by a signal", __func__);
+ ret = -ERESTARTSYS;
+ goto write_error_no_unlock;
+ }
+
+ /* I want to be alone on cf_sk (except status and queue) */
+ lock_sock(&(cf_sk->sk));
+
+ } while (ret == -EAGAIN);
+
+ if (ret < 0) {
+ cfpkt_destroy(pkt);
+ pr_debug("CAIF: %s(): transmit failed, error = %d\n",
+ __func__, ret);
+
+ goto write_error;
+ }
+
+ release_sock(&cf_sk->sk);
+ return payload_size;
+
+write_error:
+ release_sock(&cf_sk->sk);
+write_error_no_unlock:
+ return ret;
+}
+
+static unsigned int caif_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
+{
+ struct sock *sk = sock->sk;
+ struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
+ u32 mask = 0;
+
+ poll_wait(file, sk->sk_sleep, wait);
+ lock_sock(&(cf_sk->sk));
+ if (cfpkt_qpeek(cf_sk->pktq) != NULL)
+ mask |= (POLLIN | POLLRDNORM);
+ if (!STATE_IS_OPEN(cf_sk))
+ mask |= POLLHUP;
+ else if (TX_FLOW_IS_ON(cf_sk))
+ mask |= (POLLOUT | POLLWRNORM);
+ release_sock(&cf_sk->sk);
+ trace_printk("CAIF: %s(): poll mask=0x%04x...\n",
+ __func__, mask);
+ return mask;
+}
+
+static void drain_queue(struct caifsock *cf_sk)
+{
+ struct cfpkt *pkt = NULL;
+
+ /* Empty the queue */
+ do {
+ /* The queue has its own lock */
+ pkt = cfpkt_dequeue(cf_sk->pktq);
+ if (!pkt)
+ break;
+ pr_debug("CAIF: %s(): freeing packet from read queue\n",
+ __func__);
+ cfpkt_destroy(pkt);
+
+ } while (1);
+
+ spin_lock(&cf_sk->read_queue_len_lock);
+ cf_sk->read_queue_len = 0;
+ spin_unlock(&cf_sk->read_queue_len_lock);
+}
+
+
+static int setsockopt(struct socket *sock,
+ int lvl, int opt, char __user *ov, unsigned int ol)
+{
+ struct sock *sk = sock->sk;
+ struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
+ struct caif_channel_opt confopt;
+ int res;
+
+ if (lvl != SOL_CAIF) {
+ pr_debug("CAIF: %s(): setsockopt bad level\n", __func__);
+ return -ENOPROTOOPT;
+ }
+
+ switch (opt) {
+ case CAIFSO_CHANNEL_CONFIG:
+ if (ol < sizeof(struct caif_channel_opt)) {
+ pr_debug("CAIF: %s(): setsockopt"
+ " CAIFSO_CHANNEL_CONFIG bad size\n", __func__);
+ return -EINVAL;
+ }
+ res = copy_from_user(&confopt, ov, sizeof(confopt));
+ if (res)
+ return res;
+ lock_sock(&(cf_sk->sk));
+ cf_sk->config.priority = confopt.priority;
+ cf_sk->config.phy_pref = confopt.link_selector;
+ strncpy(cf_sk->config.phy_name, confopt.link_name,
+ sizeof(cf_sk->config.phy_name));
+ pr_debug("CAIF: %s(): Setting sockopt pri=%d pref=%d name=%s\n",
+ __func__,
+ cf_sk->config.priority,
+ cf_sk->config.phy_pref,
+ cf_sk->config.phy_name);
+ release_sock(&cf_sk->sk);
+ return 0;
+/* TODO: Implement the remaining options:
+ * case CAIF_REQ_PARAM_OPT:
+ * case CAIF_RSP_PARAM_OPT:
+ * case CAIF_UTIL_FLOW_OPT:
+ * case CAIF_CONN_INFO_OPT:
+ * case CAIF_CONN_ID_OPT:
+ */
+ default:
+ pr_debug("CAIF: %s(): unhandled option %d\n", __func__, opt);
+ return -EINVAL;
+ }
+
+
+}
+
+static int getsockopt(struct socket *sock,
+ int lvl, int opt, char __user *ov, int __user *ol)
+{
+ return -EINVAL;
+}
+
+static int caif_channel_config(struct caifsock *cf_sk,
+ struct sockaddr *sock_addr, int len,
+ struct caif_channel_config *config)
+{
+ struct sockaddr_caif *addr = (struct sockaddr_caif *)sock_addr;
+
+
+ if (len != sizeof(struct sockaddr_caif)) {
+ pr_debug("CAIF: %s(): Bad address len (%d,%d)\n",
+ __func__, len, sizeof(struct sockaddr_caif));
+ return -EINVAL;
+ }
+ if (sock_addr->sa_family != AF_CAIF) {
+ pr_debug("CAIF: %s(): Bad address family (%d)\n",
+ __func__, sock_addr->sa_family);
+ return -EAFNOSUPPORT;
+ }
+
+ switch (cf_sk->sk.sk_protocol) {
+ case CAIFPROTO_AT:
+ config->type = CAIF_CHTY_AT;
+ break;
+ case CAIFPROTO_DATAGRAM:
+ config->type = CAIF_CHTY_DATAGRAM;
+ break;
+ case CAIFPROTO_DATAGRAM_LOOP:
+ config->type = CAIF_CHTY_DATAGRAM_LOOP;
+ config->u.dgm.connection_id = addr->u.dgm.connection_id;
+ break;
+ case CAIFPROTO_UTIL:
+ config->type = CAIF_CHTY_UTILITY;
+ strncpy(config->u.utility.name, addr->u.util.service,
+ sizeof(config->u.utility.name));
+ /* forcing the end of string to be null-terminated */
+ config->u.utility.name[sizeof(config->u.utility.name)-1] = '\0';
+ break;
+ case CAIFPROTO_RFM:
+ config->type = CAIF_CHTY_RFM;
+ config->u.rfm.connection_id = addr->u.rfm.connection_id;
+ strncpy(config->u.rfm.volume, addr->u.rfm.volume,
+ sizeof(config->u.rfm.volume));
+ /* forcing the end of string to be null-terminated */
+ config->u.rfm.volume[sizeof(config->u.rfm.volume)-1] = '\0';
+ break;
+ default:
+ pr_debug("CAIF: %s(): Bad caif protocol type (%d)\n",
+ __func__, cf_sk->sk.sk_protocol);
+ return -EINVAL;
+ }
+ trace_printk("CAIF: %s(): Setting connect param PROTO=%s\n",
+ __func__,
+ (cf_sk->sk.sk_protocol == CAIFPROTO_AT) ?
+ "CAIFPROTO_AT" :
+ (cf_sk->sk.sk_protocol == CAIFPROTO_DATAGRAM) ?
+ "CAIFPROTO_DATAGRAM" :
+ (cf_sk->sk.sk_protocol == CAIFPROTO_DATAGRAM_LOOP) ?
+ "CAIFPROTO_DATAGRAM_LOOP" :
+ (cf_sk->sk.sk_protocol == CAIFPROTO_UTIL) ?
+ "CAIFPROTO_UTIL" :
+ (cf_sk->sk.sk_protocol == CAIFPROTO_RFM) ?
+ "CAIFPROTO_RFM" : "ERROR");
+ return 0;
+}
+
+
+int caif_connect(struct socket *sock, struct sockaddr *uservaddr,
+ int sockaddr_len, int flags)
+{
+ struct caifsock *cf_sk = NULL;
+ int result = -1;
+ int mode = 0;
+ int ret = -EIO;
+ struct sock *sk = sock->sk;
+
+ BUG_ON(sk == NULL);
+
+ cf_sk = container_of(sk, struct caifsock, sk);
+
+ trace_printk("CAIF: %s(): cf_sk=%p OPEN=%d, TX_FLOW=%d, RX_FLOW=%d\n",
+ __func__, cf_sk,
+ STATE_IS_OPEN(cf_sk),
+ TX_FLOW_IS_ON(cf_sk), RX_FLOW_IS_ON(cf_sk));
+
+ sk->sk_state = TCP_CLOSE;
+ sock->state = SS_UNCONNECTED;
+
+ if (sock->type == SOCK_SEQPACKET) {
+ sock->state = SS_CONNECTED;
+ sk->sk_state = TCP_ESTABLISHED;
+ } else
+ goto out;
+
+ /* I want to be alone on cf_sk (except status and queue) */
+ lock_sock(&(cf_sk->sk));
+
+ ret = caif_channel_config(cf_sk, uservaddr, sockaddr_len,
+ &cf_sk->config);
+ if (ret) {
+ pr_debug("CAIF: %s(): Cannot set socket address\n",
+ __func__);
+ goto open_error;
+ }
+
+ dbfs_atomic_inc(&cnt.num_open);
+ mode = SKT_READ_FLAG | SKT_WRITE_FLAG;
+
+ /* If socket is not open, make sure socket is in fully closed state */
+ if (!STATE_IS_OPEN(cf_sk)) {
+ /* Has link close response been received (if we ever sent it)?*/
+ if (STATE_IS_PENDING(cf_sk)) {
+ /*
+ * Still waiting for close response from remote.
+ * If opened non-blocking, report "would block"
+ */
+ if (flags & MSG_DONTWAIT) {
+ pr_debug("CAIF: %s(): MSG_DONTWAIT"
+ " && close pending\n", __func__);
+ ret = -EAGAIN;
+ goto open_error;
+ }
+
+ pr_debug("CAIF: %s(): Wait for close response"
+ " from remote...\n", __func__);
+
+ release_sock(&cf_sk->sk);
+
+ /*
+ * Blocking mode; close is pending and we need to wait
+ * for its conclusion.
+ */
+ result =
+ wait_event_interruptible(*cf_sk->sk.sk_sleep,
+ !STATE_IS_PENDING(cf_sk));
+
+ lock_sock(&(cf_sk->sk));
+ if (result == -ERESTARTSYS) {
+ pr_debug("CAIF: %s(): wait_event_interruptible"
+ "woken by a signal (1)", __func__);
+ ret = -ERESTARTSYS;
+ goto open_error;
+ }
+ }
+ }
+
+ /* socket is now either closed, pending open or open */
+ if (STATE_IS_OPEN(cf_sk) && !STATE_IS_PENDING(cf_sk)) {
+ /* Open */
+ pr_debug("CAIF: %s(): Socket is already opened (cf_sk=%p)"
+ " check access f_flags = 0x%x file_mode = 0x%x\n",
+ __func__, cf_sk, mode, cf_sk->file_mode);
+
+ if (mode & cf_sk->file_mode) {
+ pr_debug("CAIF: %s(): Access mode already in use"
+ "0x%x\n",
+ __func__, mode);
+ ret = -EBUSY;
+ goto open_error;
+ }
+ } else {
+ /* We are closed or pending open.
+ * If closed: send link setup
+ * If pending open: link setup already sent (we could have been
+ * interrupted by a signal last time)
+ */
+ if (!STATE_IS_OPEN(cf_sk)) {
+ /* First opening of file; connect lower layers: */
+ /* Drain queue (very unlikely) */
+ drain_queue(cf_sk);
+
+ cf_sk->layer.receive = caif_sktrecv_cb;
+
+ SET_STATE_OPEN(cf_sk);
+ SET_PENDING_ON(cf_sk);
+
+ /* Register this channel. */
+ result =
+ caifdev_adapt_register(&cf_sk->config,
+ &cf_sk->layer);
+ if (result < 0) {
+ pr_debug("CAIF: %s(): can't register channel\n",
+ __func__);
+ ret = -EIO;
+ SET_STATE_CLOSED(cf_sk);
+ SET_PENDING_OFF(cf_sk);
+ goto open_error;
+ }
+ dbfs_atomic_inc(&cnt.num_init);
+ }
+
+ /* If opened non-blocking, report "success".
+ */
+ if (flags & MSG_DONTWAIT) {
+ pr_debug("CAIF: %s(): MSG_DONTWAIT success\n",
+ __func__);
+ ret = 0;
+ goto open_success;
+ }
+
+ trace_printk("CAIF: %s(): Wait for connect response\n",
+ __func__);
+
+ /* release lock before waiting */
+ release_sock(&cf_sk->sk);
+
+ result =
+ wait_event_interruptible(*cf_sk->sk.sk_sleep,
+ !STATE_IS_PENDING(cf_sk));
+
+ lock_sock(&(cf_sk->sk));
+
+ if (result == -ERESTARTSYS) {
+ pr_debug("CAIF: %s(): wait_event_interruptible"
+ "woken by a signal (2)", __func__);
+ ret = -ERESTARTSYS;
+ goto open_error;
+ }
+
+ if (!STATE_IS_OPEN(cf_sk)) {
+ /* Lower layers said "no" */
+ pr_debug("CAIF: %s(): Closed received\n", __func__);
+ ret = -EPIPE;
+ goto open_error;
+ }
+
+ trace_printk("CAIF: %s(): Connect received\n", __func__);
+ }
+open_success:
+ /* Open is ok */
+ cf_sk->file_mode |= mode;
+
+ trace_printk("CAIF: %s(): Connected - file mode = %x\n",
+ __func__, cf_sk->file_mode);
+
+ release_sock(&cf_sk->sk);
+ return 0;
+open_error:
+ release_sock(&cf_sk->sk);
+out:
+ return ret;
+}
+
+static int caif_shutdown(struct socket *sock, int how)
+{
+ struct caifsock *cf_sk = NULL;
+ int result;
+ int tx_flow_state_was_on;
+ struct sock *sk = sock->sk;
+ int res = 0;
+
+ if (how != SHUT_RDWR)
+ return -EOPNOTSUPP; /* FIXME: ENOTSUP in userland for POSIX */
+
+ cf_sk = container_of(sk, struct caifsock, sk);
+ if (cf_sk == NULL) {
+ pr_debug("CAIF: %s(): COULD NOT FIND SOCKET\n", __func__);
+ return -EBADF;
+ }
+
+ /* I want to be alone on cf_sk (except status queue) */
+ lock_sock(&(cf_sk->sk));
+ dbfs_atomic_inc(&cnt.num_close);
+
+ /* Is the socket open? */
+ if (!STATE_IS_OPEN(cf_sk)) {
+ pr_debug("CAIF: %s(): socket not open (cf_sk=%p) \n",
+ __func__, cf_sk);
+ release_sock(&cf_sk->sk);
+ return 0;
+ }
+
+ /* Is the socket waiting for link setup response? */
+ if (STATE_IS_PENDING(cf_sk)) {
+ pr_debug("CAIF: %s(): Socket is open pending (cf_sk=%p) \n",
+ __func__, cf_sk);
+ release_sock(&cf_sk->sk);
+ /* What to return here? Seems that EBADF is the closest :-| */
+ return -EBADF;
+ }
+ /* IS_CLOSED have double meaning:
+ * 1) Spontanous Remote Shutdown Request.
+ * 2) Ack on a channel teardown(disconnect)
+ * Must clear bit in case we previously received
+ * remote shudown request.
+ */
+
+ SET_STATE_CLOSED(cf_sk);
+ SET_PENDING_ON(cf_sk);
+ sock->state = SS_DISCONNECTING; /* FIXME: Update the sock->states */
+ tx_flow_state_was_on = TX_FLOW_IS_ON(cf_sk);
+ SET_TX_FLOW_OFF(cf_sk);
+ sock_hold(&cf_sk->sk);
+ result = caifdev_adapt_unregister(&cf_sk->layer);
+
+ if (result < 0) {
+ pr_debug("CAIF: %s(): caifdev_adapt_unregister() failed\n",
+ __func__);
+ SET_STATE_CLOSED(cf_sk);
+ SET_PENDING_OFF(cf_sk);
+ SET_TX_FLOW_OFF(cf_sk);
+ release_sock(&cf_sk->sk);
+ return -EIO;
+ }
+ if (STATE_IS_REMOTE_SHUTDOWN(cf_sk)) {
+ SET_PENDING_OFF(cf_sk);
+ SET_REMOTE_SHUTDOWN_OFF(cf_sk);
+ }
+
+ dbfs_atomic_inc(&cnt.num_deinit);
+
+ /*
+ * We don't wait for close response here. Close pending state will be
+ * cleared by flow control callback when response arrives.
+ */
+ drain_queue(cf_sk);
+ SET_RX_FLOW_ON(cf_sk);
+ cf_sk->file_mode = 0;
+
+ release_sock(&cf_sk->sk);
+ return res;
+}
+static ssize_t caif_sock_no_sendpage(struct socket *sock,
+ struct page *page,
+ int offset, size_t size, int flags)
+{
+ return -EOPNOTSUPP;
+}
+
+/* This function is called as part of close. */
+static int caif_release(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+ struct caifsock *cf_sk = NULL;
+
+ caif_assert(sk != NULL);
+ cf_sk = container_of(sk, struct caifsock, sk);
+
+ caif_shutdown(sock, SHUT_RDWR);
+ lock_sock(&(cf_sk->sk));
+
+ sock->sk = NULL;
+
+ /* Detach the socket from its process context by making it orphan. */
+ sock_orphan(sk);
+
+ /*
+ * Setting SHUTDOWN_MASK means that both send and receive are shutdown
+ * for the socket.
+ */
+ sk->sk_shutdown = SHUTDOWN_MASK;
+
+ /*
+ * Set the socket state to closed, the TCP_CLOSE macro is used when
+ * closing any socket.
+ */
+ sk->sk_state = TCP_CLOSE;
+
+ /* Flush out this sockets receive queue. */
+ drain_queue(cf_sk);
+
+ /* Finally release the socket. */
+ STATE_IS_PENDING_DESTROY(cf_sk);
+ release_sock(&cf_sk->sk);
+
+ /*
+ * The rest of the cleanup will be handled from the
+ * caif_sock_destructor
+ */
+ sock_put(sk);
+ return 0;
+}
+
+static int caif_sock_ioctl(struct socket *sock,
+ unsigned int cmd,
+ unsigned long arg)
+
+{
+ return caif_ioctl(cmd, arg, true);
+}
+
+static struct proto_ops caif_ops = {
+ .family = PF_CAIF,
+ .owner = THIS_MODULE,
+ .release = caif_release,
+ .bind = sock_no_bind,
+ .connect = caif_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .getname = sock_no_getname,
+ .poll = caif_poll,
+ .ioctl = caif_sock_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = caif_shutdown,
+ .setsockopt = setsockopt,
+ .getsockopt = getsockopt,
+ .sendmsg = caif_sendmsg,
+ .recvmsg = caif_recvmsg,
+ .mmap = sock_no_mmap,
+ .sendpage = caif_sock_no_sendpage,
+};
+
+/* This function is called when a socket is finally destroyed. */
+static void caif_sock_destructor(struct sock *sk)
+{
+ struct caifsock *cf_sk = NULL;
+ cf_sk = container_of(sk, struct caifsock, sk);
+
+ /* Error checks. */
+ caif_assert(!atomic_read(&sk->sk_wmem_alloc));
+ caif_assert(sk_unhashed(sk));
+ caif_assert(!sk->sk_socket);
+
+ if (!sock_flag(sk, SOCK_DEAD)) {
+ pr_debug("CAIF: %s(): 0x%p", __func__, sk);
+ return;
+ }
+
+ lock_sock(&(cf_sk->sk));
+
+ if (STATE_IS_OPEN(cf_sk)) {
+ pr_debug("CAIF: %s(): socket is opened (cf_sk=%p)"
+ " file_mode = 0x%x\n", __func__,
+ cf_sk, cf_sk->file_mode);
+ release_sock(&cf_sk->sk);
+ return;
+ }
+ drain_queue(cf_sk);
+ cfglu_free(cf_sk->pktq);
+
+ if (cf_sk->debugfs_socket_dir != NULL)
+ debugfs_remove_recursive(cf_sk->debugfs_socket_dir);
+
+ release_sock(&cf_sk->sk);
+ trace_printk("CAIF: %s(): caif_sock_destructor: Removing socket %s\n",
+ __func__, cf_sk->name);
+ atomic_dec(&caif_nr_socks);
+}
+
+static int caif_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
+{
+ struct sock *sk = NULL;
+ struct caifsock *cf_sk = NULL;
+ int result = 0;
+
+ static struct proto prot = {.name = "PF_CAIF",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct caifsock),
+ };
+ prot.slab = caif_sk_cachep;
+
+ if (net != &init_net)
+ return -EAFNOSUPPORT;
+
+ if (protocol < 0 || protocol >= CAIFPROTO_MAX)
+ return -EPROTONOSUPPORT;
+
+ /*
+ * Set the socket state to unconnected. The socket state is really
+ * not used at all in the net/core or socket.c but the
+ * initialization makes sure that sock->state is not uninitialized.
+ */
+ sock->state = SS_UNCONNECTED;
+
+ sk = sk_alloc(net, PF_CAIF, GFP_KERNEL, &prot);
+ if (!sk)
+ return -ENOMEM;
+
+ cf_sk = container_of(sk, struct caifsock, sk);
+
+ /* Store the protocol */
+ sk->sk_protocol = (unsigned char) protocol;
+
+ spin_lock_init(&cf_sk->read_queue_len_lock);
+
+ /* Fill in some information concerning the misc socket. */
+ snprintf(cf_sk->name, sizeof(cf_sk->name), "cf_sk%d",
+ atomic_read(&caif_nr_socks));
+ snprintf(cf_sk->config.name, sizeof(cf_sk->config.name), "caifconf%d",
+ atomic_read(&caif_nr_socks));
+
+ /*
+ * The sock->type specifies the socket type to use. The CAIF socket is
+ * a packet stream in the sence that it is packet based.
+ * CAIF trusts the reliability of the link, no resending is implemented.
+ */
+ switch (sock->type) {
+ case SOCK_SEQPACKET:
+ sock->ops = &caif_ops;
+ break;
+ default:
+ sk_free(sk);
+ return -ESOCKTNOSUPPORT;
+ }
+
+ /*
+ * Lock in order to try to stop someone from opening the socket
+ * too early.
+ */
+ lock_sock(&(cf_sk->sk));
+
+ /* Initialize the nozero default sock structure data. */
+ sock_init_data(sock, sk);
+
+ sk->sk_destruct = caif_sock_destructor;
+ sk->sk_sndbuf = caif_sockbuf_size;
+ sk->sk_rcvbuf = caif_sockbuf_size;
+
+ cf_sk->pktq = cfpktq_create();
+
+ if (!cf_sk->pktq) {
+ pr_err("CAIF: %s(): queue create failed.\n", __func__);
+ result = -ENOMEM;
+ release_sock(&cf_sk->sk);
+ goto err_failed;
+ }
+
+ cf_sk->layer.ctrlcmd = caif_sktflowctrl_cb;
+ SET_STATE_CLOSED(cf_sk);
+ SET_PENDING_OFF(cf_sk);
+ SET_TX_FLOW_OFF(cf_sk);
+ SET_RX_FLOW_ON(cf_sk);
+
+ /* Increase the number of sockets created. */
+ atomic_inc(&caif_nr_socks);
+ if (!IS_ERR(debugfsdir)) {
+ cf_sk->debugfs_socket_dir =
+ debugfs_create_dir(cf_sk->name, debugfsdir);
+ debugfs_create_u32("conn_state", S_IRUSR | S_IWUSR,
+ cf_sk->debugfs_socket_dir, &cf_sk->conn_state);
+ debugfs_create_u32("flow_state", S_IRUSR | S_IWUSR,
+ cf_sk->debugfs_socket_dir, &cf_sk->flow_state);
+ debugfs_create_u32("read_queue_len", S_IRUSR | S_IWUSR,
+ cf_sk->debugfs_socket_dir,
+ (u32 *) &cf_sk->read_queue_len);
+ debugfs_create_u32("identity", S_IRUSR | S_IWUSR,
+ cf_sk->debugfs_socket_dir,
+ (u32 *) &cf_sk->layer.id);
+ }
+ release_sock(&cf_sk->sk);
+ return 0;
+err_failed:
+ sk_free(sk);
+ return result;
+}
+
+
+static struct net_proto_family caif_family_ops = {
+ .family = PF_CAIF,
+ .create = caif_create,
+ .owner = THIS_MODULE,
+};
+
+int af_caif_init(void)
+{
+ int err;
+ err = sock_register(&caif_family_ops);
+
+ if (!err)
+ return err;
+
+ return 0;
+}
+
+static int __init caif_sktinit_module(void)
+{
+ int stat;
+#ifdef CONFIG_DEBUG_FS
+ debugfsdir = debugfs_create_dir("chnl_skt", NULL);
+ if (!IS_ERR(debugfsdir)) {
+ atomic_inc(&caif_nr_socks);
+ debugfs_create_u32("num_sockets", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &caif_nr_socks);
+ debugfs_create_u32("num_open", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.num_open);
+ debugfs_create_u32("num_close", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.num_close);
+ debugfs_create_u32("num_init", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.num_init);
+ debugfs_create_u32("num_init_resp", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.num_init_resp);
+ debugfs_create_u32("num_init_fail_resp", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.num_init_fail_resp);
+ debugfs_create_u32("num_deinit", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.num_deinit);
+ debugfs_create_u32("num_deinit_resp", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.num_deinit_resp);
+ debugfs_create_u32("num_remote_shutdown_ind",
+ S_IRUSR | S_IWUSR, debugfsdir,
+ (u32 *) &cnt.num_remote_shutdown_ind);
+ debugfs_create_u32("num_tx_flow_off_ind", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.num_tx_flow_off_ind);
+ debugfs_create_u32("num_tx_flow_on_ind", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.num_tx_flow_on_ind);
+ debugfs_create_u32("num_rx_flow_off", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.num_rx_flow_off);
+ debugfs_create_u32("num_rx_flow_on", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.num_rx_flow_on);
+ }
+#endif
+ stat = af_caif_init();
+ if (stat) {
+ pr_err("CAIF: %s(): Failed to initialize CAIF socket layer.",
+ __func__);
+ return stat;
+ }
+ return 0;
+}
+
+static void __exit caif_sktexit_module(void)
+{
+ sock_unregister(PF_CAIF);
+ if (debugfsdir != NULL)
+ debugfs_remove_recursive(debugfsdir);
+}
+
+module_init(caif_sktinit_module);
+module_exit(caif_sktexit_module);
+
diff --git a/net/caif/chnl_chr.c b/net/caif/chnl_chr.c
new file mode 100644
index 00000000000..77e6d9a4529
--- /dev/null
+++ b/net/caif/chnl_chr.c
@@ -0,0 +1,1363 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2009
+ * Author: Per Sigmond / Per.Sigmond@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/poll.h>
+#include <asm/uaccess.h>
+#include <asm/atomic.h>
+
+/* CAIF header files. */
+#include <net/caif/generic/caif_layer.h>
+#include <net/caif/generic/cfcnfg.h>
+#include <net/caif/generic/cfpkt.h>
+#include <net/caif/generic/cffrml.h>
+#include <net/caif/caif_chr.h>
+#include <linux/caif/caif_config.h>
+#include <net/caif/caif_actions.h>
+MODULE_LICENSE("GPL");
+
+#define COUNTER_DEBUG 0
+
+#define CHNL_CHR_READ_QUEUE_HIGH 2000
+#define CHNL_CHR_READ_QUEUE_LOW 100
+
+static LIST_HEAD(caif_chrdev_list);
+static spinlock_t list_lock;
+
+#define CONN_STATE_OPEN_BIT 1
+#define CONN_STATE_PENDING_BIT 2
+#define CONN_REMOTE_SHUTDOWN_BIT 4
+#define TX_FLOW_ON_BIT 1
+#define RX_FLOW_ON_BIT 2
+
+#define STATE_IS_OPEN(dev) test_bit(CONN_STATE_OPEN_BIT,\
+ (void *) &(dev)->conn_state)
+#define STATE_IS_REMOTE_SHUTDOWN(dev) test_bit(CONN_REMOTE_SHUTDOWN_BIT,\
+ (void *) &(dev)->conn_state)
+#define STATE_IS_PENDING(dev) test_bit(CONN_STATE_PENDING_BIT,\
+ (void *) &(dev)->conn_state)
+
+#define SET_STATE_OPEN(dev) set_bit(CONN_STATE_OPEN_BIT,\
+ (void *) &(dev)->conn_state)
+#define SET_STATE_CLOSED(dev) clear_bit(CONN_STATE_OPEN_BIT,\
+ (void *) &(dev)->conn_state)
+#define SET_PENDING_ON(dev) set_bit(CONN_STATE_PENDING_BIT,\
+ (void *) &(dev)->conn_state)
+#define SET_PENDING_OFF(dev) clear_bit(CONN_STATE_PENDING_BIT,\
+ (void *) &(dev)->conn_state)
+#define SET_REMOTE_SHUTDOWN(dev) set_bit(CONN_REMOTE_SHUTDOWN_BIT,\
+ (void *) &(dev)->conn_state)
+
+#define SET_REMOTE_SHUTDOWN_OFF(dev) clear_bit(CONN_REMOTE_SHUTDOWN_BIT,\
+ (void *) &(dev)->conn_state)
+
+#define RX_FLOW_IS_ON(dev) test_bit(RX_FLOW_ON_BIT,\
+ (void *) &(dev)->flow_state)
+#define TX_FLOW_IS_ON(dev) test_bit(TX_FLOW_ON_BIT,\
+ (void *) &(dev)->flow_state)
+
+#define SET_RX_FLOW_OFF(dev) clear_bit(RX_FLOW_ON_BIT,\
+ (void *) &(dev)->flow_state)
+#define SET_RX_FLOW_ON(dev) set_bit(RX_FLOW_ON_BIT,\
+ (void *) &(dev)->flow_state)
+#define SET_TX_FLOW_OFF(dev) clear_bit(TX_FLOW_ON_BIT,\
+ (void *) &(dev)->flow_state)
+#define SET_TX_FLOW_ON(dev) set_bit(TX_FLOW_ON_BIT,\
+ (void *) &(dev)->flow_state)
+
+#define CHR_READ_FLAG 0x01
+#define CHR_WRITE_FLAG 0x02
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *debugfsdir;
+#include <linux/debugfs.h>
+
+#endif
+
+struct caif_char_dev {
+ struct layer layer;
+ u32 conn_state;
+ u32 flow_state;
+ struct cfpktq *pktq;
+ char name[256]; /* Redundant! Already in struct miscdevice */
+ struct miscdevice misc;
+ int file_mode;
+ struct caif_packet_funcs pktf;
+ struct caif_channel_config config;
+ /* Access to this struct and below layers */
+ struct mutex mutex;
+ int read_queue_len;
+ spinlock_t read_queue_len_lock;
+ wait_queue_head_t read_wq;
+ wait_queue_head_t mgmt_wq;
+ /* List of misc test devices */
+ struct list_head list_field;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_device_dir;
+ atomic_t num_open;
+ atomic_t num_close;
+ atomic_t num_init;
+ atomic_t num_init_resp;
+ atomic_t num_init_fail_resp;
+ atomic_t num_deinit;
+ atomic_t num_deinit_resp;
+ atomic_t num_remote_shutdown_ind;
+ atomic_t num_tx_flow_off_ind;
+ atomic_t num_tx_flow_on_ind;
+ atomic_t num_rx_flow_off;
+ atomic_t num_rx_flow_on;
+#endif
+#if COUNTER_DEBUG
+ unsigned long counter;
+ int mismatch_reported;
+#endif
+};
+
+static void drain_queue(struct caif_char_dev *dev);
+
+/* Packet Receive Callback function called from CAIF Stack */
+static int caif_chrrecv_cb(struct layer *layr, struct cfpkt *pkt)
+{
+ struct caif_char_dev *dev;
+ int read_queue_high;
+#if COUNTER_DEBUG
+ unsigned long *data_p;
+#endif
+ dev = container_of(layr, struct caif_char_dev, layer);
+
+ pr_debug("CAIF: %s(): data received: %d bytes.\n",
+ __func__, dev->pktf.cfpkt_getlen(pkt));
+
+ /* NOTE: This function may be called in Tasklet context! */
+#if COUNTER_DEBUG
+ dev->pktf.cfpkt_raw_extract(pkt, (void **) &data_p, 0);
+
+ if (data_p[0] == 1) {
+ dev->counter = data_p[0];
+ dev->mismatch_reported = 0;
+ }
+
+ if ((dev->counter != data_p[0]) && !dev->mismatch_reported) {
+ pr_warning("CAIF: %s(): WARNING - caif_chrrecv_cb(): "
+ "sequence: expected %ld, got %ld\n",
+ __func__, dev->counter, data_p[0]);
+ dev->mismatch_reported = 1;
+ }
+
+ if (!(dev->counter % 100000))
+ pr_debug("CAIF: %s(): %ld\n", __func__, dev->counter);
+
+ dev->counter++;
+#endif
+
+ /* The queue has its own lock */
+ dev->pktf.cfpkt_queue(dev->pktq, pkt, 0);
+ spin_lock(&dev->read_queue_len_lock);
+ dev->read_queue_len++;
+ read_queue_high = (dev->read_queue_len > CHNL_CHR_READ_QUEUE_HIGH);
+ spin_unlock(&dev->read_queue_len_lock);
+
+ if (RX_FLOW_IS_ON(dev) && read_queue_high) {
+
+#ifdef CONFIG_DEBUG_FS
+ atomic_inc(&dev->num_rx_flow_off);
+#endif
+ SET_RX_FLOW_OFF(dev);
+
+ /* Send flow off (NOTE: must not sleep) */
+ pr_debug("CAIF: %s(): sending flow OFF"
+ " (queue len = %d)\n",
+ __func__,
+ dev->read_queue_len);
+ caif_assert(dev->layer.dn);
+ caif_assert(dev->layer.dn->ctrlcmd);
+ (void) dev->layer.dn->modemcmd(dev->layer.dn,
+ CAIF_MODEMCMD_FLOW_OFF_REQ);
+ }
+
+ /* Signal reader that data is available. */
+ wake_up_interruptible(&dev->read_wq);
+
+ return 0;
+}
+
+/* Packet Flow Control Callback function called from CAIF */
+static void caif_chrflowctrl_cb(struct layer *layr, enum caif_ctrlcmd flow, int phyid)
+{
+ struct caif_char_dev *dev;
+
+
+ /* NOTE: This function may be called in Tasklet context! */
+ pr_debug("CAIF: %s(): AT flowctrl func called flow: %s.\n",
+ __func__,
+ flow == CAIF_CTRLCMD_FLOW_ON_IND ? "ON" :
+ flow == CAIF_CTRLCMD_FLOW_OFF_IND ? "OFF" :
+ flow == CAIF_CTRLCMD_INIT_RSP ? "INIT_RSP" :
+ flow == CAIF_CTRLCMD_DEINIT_RSP ? "DEINIT_RSP" :
+ flow == CAIF_CTRLCMD_INIT_FAIL_RSP ? "INIT_FAIL_RSP" :
+ flow ==
+ CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND ? "REMOTE_SHUTDOWN" :
+ "UKNOWN CTRL COMMAND");
+
+ dev = container_of(layr, struct caif_char_dev, layer);
+
+ switch (flow) {
+ case CAIF_CTRLCMD_FLOW_ON_IND:
+ pr_debug("CAIF: %s(): CAIF_CTRLCMD_FLOW_ON_IND\n",
+ __func__);
+#ifdef CONFIG_DEBUG_FS
+ atomic_inc(&dev->num_tx_flow_on_ind);
+#endif
+ /* Signal reader that data is available. */
+ SET_TX_FLOW_ON(dev);
+ wake_up_interruptible(&dev->mgmt_wq);
+ break;
+
+ case CAIF_CTRLCMD_FLOW_OFF_IND:
+#ifdef CONFIG_DEBUG_FS
+ atomic_inc(&dev->num_tx_flow_off_ind);
+#endif
+ pr_debug("CAIF: %s(): CAIF_CTRLCMD_FLOW_OFF_IND\n",
+ __func__);
+ SET_TX_FLOW_OFF(dev);
+ break;
+
+ case CAIF_CTRLCMD_INIT_RSP:
+ pr_debug("CAIF: %s(): CAIF_CTRLCMD_INIT_RSP\n",
+ __func__);
+#ifdef CONFIG_DEBUG_FS
+ atomic_inc(&dev->num_init_resp);
+#endif
+ /* Signal reader that data is available. */
+ caif_assert(STATE_IS_OPEN(dev));
+ SET_PENDING_OFF(dev);
+ SET_TX_FLOW_ON(dev);
+ wake_up_interruptible(&dev->mgmt_wq);
+ break;
+
+ case CAIF_CTRLCMD_DEINIT_RSP:
+ pr_debug("CAIF: %s(): CAIF_CTRLCMD_DEINIT_RSP\n",
+ __func__);
+#ifdef CONFIG_DEBUG_FS
+ atomic_inc(&dev->num_deinit_resp);
+#endif
+ caif_assert(!STATE_IS_OPEN(dev));
+ SET_PENDING_OFF(dev);
+ wake_up_interruptible(&dev->mgmt_wq);
+ break;
+
+ case CAIF_CTRLCMD_INIT_FAIL_RSP:
+ pr_debug("CAIF: %s(): CAIF_CTRLCMD_INIT_FAIL_RSP\n",
+ __func__);
+#ifdef CONFIG_DEBUG_FS
+ atomic_inc(&dev->num_init_fail_resp);
+#endif
+ caif_assert(STATE_IS_OPEN(dev));
+ SET_STATE_CLOSED(dev);
+ SET_PENDING_OFF(dev);
+ SET_TX_FLOW_OFF(dev);
+ wake_up_interruptible(&dev->mgmt_wq);
+ break;
+
+ case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
+ pr_debug("CAIF: %s(): CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND\n",
+ __func__);
+#ifdef CONFIG_DEBUG_FS
+ atomic_inc(&dev->num_remote_shutdown_ind);
+#endif
+ SET_TX_FLOW_OFF(dev);
+ SET_REMOTE_SHUTDOWN(dev);
+ SET_TX_FLOW_OFF(dev);
+
+ drain_queue(dev);
+ SET_RX_FLOW_ON(dev);
+ dev->file_mode = 0;
+
+ wake_up_interruptible(&dev->mgmt_wq);
+ wake_up_interruptible(&dev->read_wq);
+ break;
+
+ default:
+ pr_debug("CAIF: %s(): Unexpected flow command %d\n",
+ __func__, flow);
+ }
+}
+
+/* Device Read function called from Linux kernel */
+ssize_t caif_chrread(struct file *filp, char __user *buf, size_t count,
+ loff_t *f_pos)
+{
+ struct cfpkt *pkt = NULL;
+ unsigned char *rxbuf = NULL;
+ size_t len;
+ int result;
+ struct caif_char_dev *dev = filp->private_data;
+ ssize_t ret = -EIO;
+ int read_queue_low;
+
+
+ if (dev == NULL) {
+ pr_debug("CAIF: %s(): private_data not set!\n",
+ __func__);
+ return -EBADFD;
+ }
+
+ /* I want to be alone on dev (except status and queue) */
+ if (mutex_lock_interruptible(&dev->mutex)) {
+ pr_debug("CAIF: %s(): mutex_lock_interruptible got signalled\n",
+ __func__);
+ return -ERESTARTSYS;
+ }
+
+ caif_assert(dev->pktq);
+
+ if (!STATE_IS_OPEN(dev)) {
+ /* Device is closed or closing. */
+ if (!STATE_IS_PENDING(dev)) {
+ pr_debug("CAIF: %s(): device is closed (by remote)\n",
+ __func__);
+ ret = -EPIPE;
+ } else {
+ pr_debug("CAIF: %s(): device is closing...\n",
+ __func__);
+ ret = -EBADF;
+ }
+ goto read_error;
+ }
+
+ /* Device is open or opening. */
+ if (STATE_IS_PENDING(dev)) {
+ pr_debug("CAIF: %s(): device is opening...\n",
+ __func__);
+
+ if (filp->f_flags & O_NONBLOCK) {
+ /* We can't block. */
+ pr_debug("CAIF: %s(): state pending and O_NONBLOCK\n",
+ __func__);
+ ret = -EAGAIN;
+ goto read_error;
+ }
+
+ /* Blocking mode; state is pending and we need to wait
+ * for its conclusion. (Shutdown_ind set pending off.)
+ */
+ result =
+ wait_event_interruptible(dev->mgmt_wq,
+ !STATE_IS_PENDING(dev));
+ if (result == -ERESTARTSYS) {
+ pr_debug("CAIF: %s(): wait_event_interruptible"
+ " woken by a signal (1)", __func__);
+ ret = -ERESTARTSYS;
+ goto read_error;
+ }
+ }
+ if (STATE_IS_REMOTE_SHUTDOWN(dev)) {
+ pr_debug("CAIF: %s(): received remote_shutdown indication\n",
+ __func__);
+ ret = -ESHUTDOWN;
+ goto read_error;
+ }
+
+ /* Block if we don't have any received buffers.
+ * The queue has its own lock.
+ */
+ while ((pkt = dev->pktf.cfpkt_qpeek(dev->pktq)) == NULL) {
+
+ if (filp->f_flags & O_NONBLOCK) {
+ pr_debug("CAIF: %s(): O_NONBLOCK\n", __func__);
+ ret = -EAGAIN;
+ goto read_error;
+ }
+ pr_debug("CAIF: %s(): wait_event\n", __func__);
+
+ /* Let writers in. */
+ mutex_unlock(&dev->mutex);
+
+ /* Block reader until data arrives or device is closed. */
+ if (wait_event_interruptible(dev->read_wq,
+ dev->pktf.cfpkt_qpeek(dev->pktq)
+ || STATE_IS_REMOTE_SHUTDOWN(dev)
+ || !STATE_IS_OPEN(dev)) ==
+ -ERESTARTSYS) {
+ pr_debug("CAIF: %s():_event_interruptible woken by "
+ "a signal, signal_pending(current) = %d\n",
+ __func__,
+ signal_pending(current));
+ return -ERESTARTSYS;
+ }
+
+ pr_debug("CAIF: %s(): awake\n", __func__);
+ if (STATE_IS_REMOTE_SHUTDOWN(dev)) {
+ pr_debug("CAIF: %s received remote_shutdown\n",
+ __func__);
+ ret = -ESHUTDOWN;
+ goto read_error;
+ }
+
+ /* I want to be alone on dev (except status and queue). */
+ if (mutex_lock_interruptible(&dev->mutex)) {
+ pr_debug("CAIF: %s():"
+ "mutex_lock_interruptible got signalled\n",
+ __func__);
+ return -ERESTARTSYS;
+ }
+
+ if (!STATE_IS_OPEN(dev)) {
+ /* Someone closed the link, report error. */
+ pr_debug("CAIF: %s(): remote end shutdown!\n",
+ __func__);
+ ret = -EPIPE;
+ goto read_error;
+ }
+ }
+
+ /* The queue has its own lock. */
+ len = dev->pktf.cfpkt_getlen(pkt);
+
+ /* Check max length that can be copied. */
+ if (len > count) {
+ pr_debug("CAIF: %s(): user buffer too small\n", __func__);
+ ret = -EINVAL;
+ goto read_error;
+ }
+
+ /* Get packet from queue.
+ * The queue has its own lock.
+ */
+ pkt = dev->pktf.cfpkt_dequeue(dev->pktq);
+
+ spin_lock(&dev->read_queue_len_lock);
+ dev->read_queue_len--;
+ read_queue_low = (dev->read_queue_len < CHNL_CHR_READ_QUEUE_LOW);
+ spin_unlock(&dev->read_queue_len_lock);
+
+ if (!RX_FLOW_IS_ON(dev) && read_queue_low) {
+#ifdef CONFIG_DEBUG_FS
+ atomic_inc(&dev->num_rx_flow_on);
+#endif
+ SET_RX_FLOW_ON(dev);
+
+ /* Send flow on. */
+ pr_debug("CAIF: %s(): sending flow ON (queue len = %d)\n",
+ __func__,
+ dev->read_queue_len);
+ caif_assert(dev->layer.dn);
+ caif_assert(dev->layer.dn->ctrlcmd);
+ (void) dev->layer.dn->modemcmd(dev->layer.dn,
+ CAIF_MODEMCMD_FLOW_ON_REQ);
+
+ caif_assert(dev->read_queue_len >= 0);
+ }
+
+ result = dev->pktf.cfpkt_raw_extract(pkt, (void **) &rxbuf, len);
+
+ caif_assert(result >= 0);
+
+ if (result < 0) {
+ pr_debug("CAIF: %s(): cfpkt_raw_extract failed\n", __func__);
+ dev->pktf.cfpkt_destroy(pkt);
+ ret = -EINVAL;
+ goto read_error;
+ }
+
+ /* Copy data from the RX buffer to the user buffer. */
+ if (copy_to_user(buf, rxbuf, len)) {
+ pr_debug("CAIF: %s(): copy_to_user returned non zero", __func__);
+ dev->pktf.cfpkt_destroy(pkt);
+ ret = -EINVAL;
+ goto read_error;
+ }
+
+ /* Free packet. */
+ dev->pktf.cfpkt_destroy(pkt);
+
+ /* Let the others in. */
+ mutex_unlock(&dev->mutex);
+ return len;
+
+read_error:
+ mutex_unlock(&dev->mutex);
+ return ret;
+}
+
+/* Device write function called from Linux kernel (misc device) */
+ssize_t caif_chrwrite(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct cfpkt *pkt = NULL;
+ struct caif_char_dev *dev = filp->private_data;
+ unsigned char *txbuf;
+ ssize_t ret = -EIO;
+ int result;
+
+ if (dev == NULL) {
+ pr_debug("CAIF: %s(): private_data not set!\n",
+ __func__);
+ ret = -EBADFD;
+ goto write_error_no_unlock;
+ }
+
+ if (count > CAIF_MAX_PAYLOAD_SIZE) {
+ pr_debug("CAIF: %s() buffer too long\n", __func__);
+ ret = -EINVAL;
+ goto write_error_no_unlock;
+ }
+
+ /* I want to be alone on dev (except status and queue). */
+ if (mutex_lock_interruptible(&dev->mutex)) {
+ pr_debug("CAIF: %s():"
+ "mutex_lock_interruptible got signalled\n",
+ __func__);
+ ret = -ERESTARTSYS;
+ goto write_error_no_unlock;
+ }
+
+ caif_assert(dev->pktq);
+
+ if (!STATE_IS_OPEN(dev)) {
+ /* Device is closed or closing. */
+ if (!STATE_IS_PENDING(dev)) {
+ pr_debug("CAIF: %s(): device is closed (by remote)\n",
+ __func__);
+ ret = -EPIPE;
+ } else {
+ pr_debug("CAIF: %s(): device is closing...\n", __func__);
+ ret = -EBADF;
+ }
+ goto write_error;
+ }
+
+ /* Device is open or opening. */
+ if (STATE_IS_PENDING(dev)) {
+ pr_debug("CAIF: device is opening...\n");
+
+ if (filp->f_flags & O_NONBLOCK) {
+ /* We can't block */
+ pr_debug("CAIF: %s(): state pending and O_NONBLOCK\n",
+ __func__);
+ ret = -EAGAIN;
+ goto write_error;
+ }
+
+ /* Blocking mode; state is pending and we need to wait
+ * for its conclusion. (Shutdown_ind set pending off.)
+ */
+ result =
+ wait_event_interruptible(dev->mgmt_wq,
+ !STATE_IS_PENDING(dev));
+ if (result == -ERESTARTSYS) {
+ pr_debug("CAIF: %s(): wait_event_interruptible"
+ " woken by a signal (1)", __func__);
+ ret = -ERESTARTSYS;
+ goto write_error;
+ }
+ }
+ if (STATE_IS_REMOTE_SHUTDOWN(dev)) {
+ pr_debug("CAIF: received remote_shutdown indication\n");
+ ret = -ESHUTDOWN;
+ goto write_error;
+ }
+
+
+
+ if (!TX_FLOW_IS_ON(dev)) {
+
+ /* Flow is off. Check non-block flag. */
+ if (filp->f_flags & O_NONBLOCK) {
+ pr_debug("CAIF: %s(): O_NONBLOCK and tx flow off",
+ __func__);
+ ret = -EAGAIN;
+ goto write_error;
+ }
+
+ /* Let readers in. */
+ mutex_unlock(&dev->mutex);
+
+ /* Wait until flow is on or device is closed. */
+ if (wait_event_interruptible(dev->mgmt_wq, TX_FLOW_IS_ON(dev)
+ || !STATE_IS_OPEN(dev)
+ || STATE_IS_REMOTE_SHUTDOWN(dev)
+ ) == -ERESTARTSYS) {
+ pr_debug("CAIF: %s(): wait_event_interruptible"
+ " woken by a signal (1)", __func__);
+ ret = -ERESTARTSYS;
+ goto write_error_no_unlock;
+ }
+
+ /* I want to be alone on dev (except status and queue). */
+ if (mutex_lock_interruptible(&dev->mutex)) {
+ pr_debug("CAIF: %s():"
+ "mutex_lock_interruptible got signalled\n",
+ __func__);
+ ret = -ERESTARTSYS;
+ goto write_error_no_unlock;
+ }
+
+ if (!STATE_IS_OPEN(dev)) {
+ /* Someone closed the link, report error. */
+ pr_debug("CAIF: %s(): remote end shutdown!\n",
+ __func__);
+ ret = -EPIPE;
+ goto write_error;
+ }
+ if (STATE_IS_REMOTE_SHUTDOWN(dev)) {
+ pr_debug("CAIF: received remote_shutdown indication\n");
+ ret = -ESHUTDOWN;
+ goto write_error;
+ }
+ }
+
+ /* Create packet, buf=NULL means no copying. */
+ pkt = dev->pktf.cfpkt_create_xmit_pkt((const unsigned char *) NULL,
+ count);
+ if (pkt == NULL) {
+ pr_debug("CAIF: %s():cfpkt_create_pkt returned NULL\n",
+ __func__);
+ ret = -EIO;
+ goto write_error;
+ }
+
+ if (dev->pktf.cfpkt_raw_append(pkt, (void **) &txbuf, count) < 0) {
+ pr_debug("CAIF: %s(): cfpkt_raw_append failed\n", __func__);
+ dev->pktf.cfpkt_destroy(pkt);
+ ret = -EINVAL;
+ goto write_error;
+ }
+
+ /* Copy data into buffer. */
+ if (copy_from_user(txbuf, buf, count)) {
+ pr_debug("CAIF: %s(): copy_from_user returned non zero.\n",
+ __func__);
+ dev->pktf.cfpkt_destroy(pkt);
+ ret = -EINVAL;
+ goto write_error;
+ }
+
+ /* Send the packet down the stack. */
+ caif_assert(dev->layer.dn);
+ caif_assert(dev->layer.dn->transmit);
+
+ do {
+ ret = dev->layer.dn->transmit(dev->layer.dn, pkt);
+
+ /*
+ * FIXME: If ret == -EAGAIN we may spin in a tight loop
+ * until the CAIF phy device can take more packets.
+ * we should back off before re-trying (sleep).
+ */
+ if (likely(ret >= 0) || (ret != -EAGAIN))
+ break;
+
+ /* EAGAIN - retry. */
+ if (filp->f_flags & O_NONBLOCK) {
+ pr_debug("CAIF: %s(): NONBLOCK and transmit failed,"
+ " error = %d\n", __func__, ret);
+ ret = -EAGAIN;
+ goto write_error;
+ }
+
+ /* Let readers in. */
+ mutex_unlock(&dev->mutex);
+
+ /* Wait until flow is on or device is closed. */
+ if (wait_event_interruptible(dev->mgmt_wq, TX_FLOW_IS_ON(dev)
+ || !STATE_IS_OPEN(dev)
+ || STATE_IS_REMOTE_SHUTDOWN(dev)) ==
+ -ERESTARTSYS) {
+ pr_debug("CAIF: %s(): wait_event_interruptible"
+ " woken by a signal (1)", __func__);
+ ret = -ERESTARTSYS;
+ goto write_error_no_unlock;
+ }
+
+ /* I want to be alone on dev (except status and queue). */
+ if (mutex_lock_interruptible(&dev->mutex)) {
+ pr_debug("CAIF: %s():"
+ "mutex_lock_interruptible got signalled\n",
+ __func__);
+ ret = -ERESTARTSYS;
+ goto write_error_no_unlock;
+ }
+
+ } while (ret == -EAGAIN);
+
+ if (ret < 0) {
+ dev->pktf.cfpkt_destroy(pkt);
+ pr_debug("CAIF: transmit failed, error = %d\n",
+ ret);
+
+ goto write_error;
+ }
+
+ mutex_unlock(&dev->mutex);
+ return count;
+
+write_error:
+ mutex_unlock(&dev->mutex);
+write_error_no_unlock:
+ return ret;
+}
+
+static unsigned int caif_chrpoll(struct file *filp, poll_table *waittab)
+{
+ struct caif_char_dev *dev = filp->private_data;
+ unsigned int mask = 0;
+
+
+ if (dev == NULL) {
+ pr_debug("CAIF: %s(): private_data not set!\n",
+ __func__);
+ return -EBADFD;
+ }
+
+ /* I want to be alone on dev (except status and queue). */
+ if (mutex_lock_interruptible(&dev->mutex)) {
+ pr_debug("CAIF: %s():"
+ "mutex_lock_interruptible got signalled\n",
+ __func__);
+ goto poll_error;
+ }
+
+ caif_assert(dev->pktq);
+
+ if (STATE_IS_REMOTE_SHUTDOWN(dev)) {
+ pr_debug("CAIF: %s(): not open\n", __func__);
+ goto poll_error;
+ }
+
+ if (!STATE_IS_OPEN(dev)) {
+ pr_debug("CAIF: %s(): not open\n", __func__);
+ goto poll_error;
+ }
+
+ poll_wait(filp, &dev->read_wq, waittab);
+
+ if (dev->pktf.cfpkt_qpeek(dev->pktq) != NULL)
+ mask |= (POLLIN | POLLRDNORM);
+
+
+ if (TX_FLOW_IS_ON(dev))
+ mask |= (POLLOUT | POLLWRNORM);
+
+ mutex_unlock(&dev->mutex);
+ pr_debug("CAIF: %s(): caif_chrpoll mask=0x%04x...\n",
+ __func__, mask);
+
+ return mask;
+
+poll_error:
+ mask |= POLLERR;
+ mutex_unlock(&dev->mutex);
+ return mask;
+}
+
+/* Usage:
+ * minor >= 0 : find from minor
+ * minor < 0 and name == name : find from name
+ * minor < 0 and name == NULL : get first
+ */
+
+static struct caif_char_dev *find_device(int minor, char *name,
+ int remove_from_list)
+{
+ struct list_head *list_node;
+ struct list_head *n;
+ struct caif_char_dev *dev = NULL;
+ struct caif_char_dev *tmp;
+ spin_lock(&list_lock);
+ pr_debug("CAIF: %s(): start looping \n", __func__);
+ list_for_each_safe(list_node, n, &caif_chrdev_list) {
+ tmp = list_entry(list_node, struct caif_char_dev, list_field);
+ if (minor >= 0) { /* find from minor */
+ if (tmp->misc.minor == minor)
+ dev = tmp;
+
+ } else if (name) { /* find from name */
+ if (!strncmp(tmp->name, name, sizeof(tmp->name)))
+ dev = tmp;
+ } else { /* take first */
+ dev = tmp;
+ }
+
+ if (dev) {
+ pr_debug("CAIF: %s(): match %d, %s \n",
+ __func__, minor, name);
+ if (remove_from_list)
+ list_del(list_node);
+ break;
+ }
+ }
+ spin_unlock(&list_lock);
+ return dev;
+}
+
+
+static void drain_queue(struct caif_char_dev *dev)
+{
+ struct cfpkt *pkt;
+
+ /* Empty the queue. */
+ do {
+ /* The queue has its own lock. */
+ pkt = dev->pktf.cfpkt_dequeue(dev->pktq);
+
+ if (!pkt)
+ break;
+
+ pr_debug("CAIF: %s(): freeing packet from read queue\n",
+ __func__);
+ dev->pktf.cfpkt_destroy(pkt);
+
+ } while (1);
+
+ spin_lock(&dev->read_queue_len_lock);
+ dev->read_queue_len = 0;
+ spin_unlock(&dev->read_queue_len_lock);
+}
+
+int caif_chropen(struct inode *inode, struct file *filp)
+{
+ struct caif_char_dev *dev = NULL;
+ int result = -1;
+ int minor = iminor(inode);
+ int mode = 0;
+ int ret = -EIO;
+
+
+ dev = find_device(minor, NULL, 0);
+
+ if (dev == NULL) {
+ pr_debug("CAIF: [%s] COULD NOT FIND DEVICE\n", __func__);
+ return -EBADF;
+ }
+
+ pr_debug("CAIF: dev=%p OPEN=%d, TX_FLOW=%d, RX_FLOW=%d\n", dev,
+ STATE_IS_OPEN(dev),
+ TX_FLOW_IS_ON(dev), RX_FLOW_IS_ON(dev));
+
+ pr_debug("CAIF: get mutex\n");
+
+ /* I want to be alone on dev (except status and queue). */
+ if (mutex_lock_interruptible(&dev->mutex)) {
+ pr_debug("CAIF: %s():"
+ "mutex_lock_interruptible got signalled\n",
+ __func__);
+ return -ERESTARTSYS;
+ }
+#ifdef CONFIG_DEBUG_FS
+ atomic_inc(&dev->num_open);
+#endif
+ filp->private_data = dev;
+
+ switch (filp->f_flags & O_ACCMODE) {
+ case O_RDONLY:
+ mode = CHR_READ_FLAG;
+ break;
+ case O_WRONLY:
+ mode = CHR_WRITE_FLAG;
+ break;
+ case O_RDWR:
+ mode = CHR_READ_FLAG | CHR_WRITE_FLAG;
+ break;
+ }
+
+ /* If device is not open, make sure device is in fully closed state. */
+ if (!STATE_IS_OPEN(dev)) {
+ /* Has link close response been received
+ * (if we ever sent it)?
+ */
+ if (STATE_IS_PENDING(dev)) {
+ /* Still waiting for close response from remote.
+ * If opened non-blocking, report "would block".
+ */
+ if (filp->f_flags & O_NONBLOCK) {
+ pr_debug("CAIF: O_NONBLOCK && close pending\n");
+ ret = -EAGAIN;
+ goto open_error;
+ }
+
+ pr_debug("CAIF: %s(): "
+ "wait for close response from remote...\n",
+ __func__);
+
+ /* Blocking mode; close is pending and we need to wait
+ * for its conclusion. (Shutdown_ind set pending off.)
+ */
+ result =
+ wait_event_interruptible(dev->mgmt_wq,
+ !STATE_IS_PENDING(dev));
+ if (result == -ERESTARTSYS) {
+ pr_debug("CAIF: %s(): wait_event_interruptible"
+ " woken by a signal (1)", __func__);
+ ret = -ERESTARTSYS;
+ goto open_error;
+ }
+ }
+ }
+
+ /* Device is now either closed, pending open or open */
+ if (STATE_IS_OPEN(dev) && !STATE_IS_PENDING(dev)) {
+ /* Open */
+ pr_debug("CAIF: %s():"
+ " Device is already opened (dev=%p) check access "
+ "f_flags = 0x%x file_mode = 0x%x\n",
+ __func__, dev, mode, dev->file_mode);
+
+ if (mode & dev->file_mode) {
+ pr_debug("CAIF: %s():Access mode already in use 0x%x\n",
+ __func__, mode);
+ ret = -EBUSY;
+ goto open_error;
+ }
+ } else {
+ /* We are closed or pending open.
+ * If closed: send link setup
+ * If pending open: link setup already sent (we could have been
+ * interrupted by a signal last time)
+ */
+ if (!STATE_IS_OPEN(dev)) {
+ /* First opening of file; connect lower layers: */
+
+ dev->layer.receive = caif_chrrecv_cb;
+
+ SET_STATE_OPEN(dev);
+ SET_PENDING_ON(dev);
+
+ /* Register this channel. */
+ result =
+ caifdev_adapt_register(&dev->config, &dev->layer);
+ if (result < 0) {
+ pr_debug("CAIF: %s():can't register channel\n",
+ __func__);
+ ret = -EIO;
+ SET_STATE_CLOSED(dev);
+ SET_PENDING_OFF(dev);
+ goto open_error;
+ }
+#ifdef CONFIG_DEBUG_FS
+ atomic_inc(&dev->num_init);
+#endif
+ }
+
+ /* If opened non-blocking, report "success".
+ */
+ if (filp->f_flags & O_NONBLOCK) {
+ pr_debug("CAIF: %s(): O_NONBLOCK success\n", __func__);
+ ret = 0;
+ goto open_success;
+ }
+
+ pr_debug("CAIF: WAIT FOR CONNECT RESPONSE \n");
+ result =
+ wait_event_interruptible(dev->mgmt_wq,
+ !STATE_IS_PENDING(dev));
+ if (result == -ERESTARTSYS) {
+ pr_debug("CAIF: %s(): wait_event_interruptible"
+ " woken by a signal (2)", __func__);
+ ret = -ERESTARTSYS;
+ goto open_error;
+ }
+
+ if (!STATE_IS_OPEN(dev)) {
+ /* Lower layers said "no". */
+ pr_debug("CAIF: %s(): caif_chropen: CLOSED RECEIVED\n",
+ __func__);
+ ret = -EPIPE;
+ goto open_error;
+ }
+
+ pr_debug("CAIF: %s(): caif_chropen: CONNECT RECEIVED\n",
+ __func__);
+
+
+ SET_RX_FLOW_ON(dev);
+
+ /* Send flow on. */
+ pr_debug("CAIF: %s(): sending flow ON at open\n", __func__);
+
+ caif_assert(dev->layer.dn);
+ caif_assert(dev->layer.dn->ctrlcmd);
+ (void) dev->layer.dn->modemcmd(dev->layer.dn,
+ CAIF_MODEMCMD_FLOW_ON_REQ);
+
+ }
+open_success:
+ /* Open is OK. */
+ dev->file_mode |= mode;
+
+ pr_debug("CAIF: %s(): Open - file mode = %x\n",
+ __func__, dev->file_mode);
+
+ pr_debug("CAIF: %s(): CONNECTED \n", __func__);
+
+ mutex_unlock(&dev->mutex);
+ return 0;
+
+open_error:
+ mutex_unlock(&dev->mutex);
+ return ret;
+}
+
+int caif_chrrelease(struct inode *inode, struct file *filp)
+{
+ struct caif_char_dev *dev = NULL;
+ int minor = iminor(inode);
+ int result;
+ int mode = 0;
+ int tx_flow_state_was_on;
+
+
+ dev = find_device(minor, NULL, 0);
+ if (dev == NULL) {
+ pr_debug("CAIF: %s(): Could not find device\n", __func__);
+ return -EBADF;
+ }
+
+ /* I want to be alone on dev (except status queue). */
+ if (mutex_lock_interruptible(&dev->mutex)) {
+ pr_debug("CAIF: %s():"
+ "mutex_lock_interruptible got signalled\n",
+ __func__);
+ return -ERESTARTSYS;
+ }
+#ifdef CONFIG_DEBUG_FS
+ atomic_inc(&dev->num_close);
+#endif
+
+ /* Is the device open? */
+ if (!STATE_IS_OPEN(dev)) {
+ pr_debug("CAIF: %s(): Device not open (dev=%p) \n",
+ __func__, dev);
+ mutex_unlock(&dev->mutex);
+ return 0;
+ }
+
+ /* Is the device waiting for link setup response? */
+ if (STATE_IS_PENDING(dev)) {
+ pr_debug("CAIF: %s(): Device is open pending (dev=%p) \n",
+ __func__, dev);
+ mutex_unlock(&dev->mutex);
+ /* What to return here? Seems that EBADF is the closest :-| */
+ return -EBADF;
+ }
+
+ switch (filp->f_flags & O_ACCMODE) {
+ case O_RDONLY:
+ mode = CHR_READ_FLAG;
+ break;
+ case O_WRONLY:
+ mode = CHR_WRITE_FLAG;
+ break;
+ case O_RDWR:
+ mode = CHR_READ_FLAG | CHR_WRITE_FLAG;
+ break;
+ }
+
+ dev->file_mode &= ~mode;
+ if (dev->file_mode) {
+ pr_debug("CAIF: %s(): Device is kept open by someone else,"
+ " don't close. CAIF connection - file_mode = %x\n",
+ __func__, dev->file_mode);
+ mutex_unlock(&dev->mutex);
+ return 0;
+ }
+
+ /* IS_CLOSED have double meaning:
+ * 1) Spontanous Remote Shutdown Request.
+ * 2) Ack on a channel teardown(disconnect)
+ * Must clear bit, in case we previously received
+ * a remote shudown request.
+ */
+
+ SET_STATE_CLOSED(dev);
+ SET_PENDING_ON(dev);
+ tx_flow_state_was_on = TX_FLOW_IS_ON(dev);
+ SET_TX_FLOW_OFF(dev);
+ result = caifdev_adapt_unregister(&dev->layer);
+
+ if (result < 0) {
+ pr_debug("CAIF: %s(): caifdev_adapt_unregister() failed\n",
+ __func__);
+ SET_STATE_CLOSED(dev);
+ SET_PENDING_OFF(dev);
+ SET_TX_FLOW_OFF(dev);
+ mutex_unlock(&dev->mutex);
+ return -EIO;
+ }
+
+ if (STATE_IS_REMOTE_SHUTDOWN(dev)) {
+ SET_PENDING_OFF(dev);
+ SET_REMOTE_SHUTDOWN_OFF(dev);
+ }
+#ifdef CONFIG_DEBUG_FS
+ atomic_inc(&dev->num_deinit);
+#endif
+
+ /* We don't wait for close response here. Close pending state will be
+ * cleared by flow control callback when response arrives.
+ */
+
+ /* Empty the queue */
+ drain_queue(dev);
+ SET_RX_FLOW_ON(dev);
+ dev->file_mode = 0;
+
+ mutex_unlock(&dev->mutex);
+ return 0;
+}
+
+const struct file_operations caif_chrfops = {
+ .owner = THIS_MODULE,
+ .read = caif_chrread,
+ .write = caif_chrwrite,
+ .open = caif_chropen,
+ .release = caif_chrrelease,
+ .poll = caif_chrpoll,
+};
+
+int chrdev_create(struct caif_channel_create_action *action)
+{
+ struct caif_char_dev *dev = NULL;
+ int result;
+
+ /* Allocate device */
+ dev = kmalloc(sizeof(*dev), GFP_KERNEL);
+
+ if (!dev) {
+ pr_err("CAIF: %s kmalloc failed.\n", __func__);
+ return -ENOMEM;
+ }
+
+ pr_debug("CAIF: %s(): dev=%p \n", __func__, dev);
+ memset(dev, 0, sizeof(*dev));
+
+ mutex_init(&dev->mutex);
+ init_waitqueue_head(&dev->read_wq);
+ init_waitqueue_head(&dev->mgmt_wq);
+ spin_lock_init(&dev->read_queue_len_lock);
+
+ /* Fill in some information concerning the misc device. */
+ dev->misc.minor = MISC_DYNAMIC_MINOR;
+ strncpy(dev->name, action->name.name, sizeof(dev->name));
+ dev->misc.name = dev->name;
+ dev->misc.fops = &caif_chrfops;
+
+ /* Register the device. */
+ result = misc_register(&dev->misc);
+
+ /* Lock in order to try to stop someone from opening the device
+ * too early. The misc device has its own lock. We cannot take our
+ * lock until misc_register() is finished, because in open() the
+ * locks are taken in this order (misc first and then dev).
+ * So anyone managing to open the device between the misc_register
+ * and the mutex_lock will get a "device not found" error. Don't
+ * think it can be avoided.
+ */
+ if (mutex_lock_interruptible(&dev->mutex)) {
+ pr_debug("CAIF: %s():"
+ "mutex_lock_interruptible got signalled\n",
+ __func__);
+ return -ERESTARTSYS;
+ }
+
+ if (result < 0) {
+ pr_err("CAIF: chnl_chr: error - %d, can't register misc.\n",
+ result);
+ mutex_unlock(&dev->mutex);
+ goto err_failed;
+ }
+
+ dev->pktf = cfcnfg_get_packet_funcs();
+
+ dev->pktq = dev->pktf.cfpktq_create();
+ if (!dev->pktq) {
+ pr_err("CAIF: %s(): queue create failed.\n", __func__);
+ result = -ENOMEM;
+ mutex_unlock(&dev->mutex);
+ misc_deregister(&dev->misc);
+ goto err_failed;
+ }
+
+ strncpy(action->name.name, dev->misc.name, sizeof(action->name.name)-1);
+ action->name.name[sizeof(action->name.name)-1] = '\0';
+ action->major = MISC_MAJOR;
+ action->minor = dev->misc.minor;
+
+ dev->config = action->config;
+ pr_debug("CAIF: dev: Registered dev with name=%s minor=%d, dev=%p\n",
+ dev->misc.name, dev->misc.minor, dev->misc.this_device);
+
+ dev->layer.ctrlcmd = caif_chrflowctrl_cb;
+ SET_STATE_CLOSED(dev);
+ SET_PENDING_OFF(dev);
+ SET_TX_FLOW_OFF(dev);
+ SET_RX_FLOW_ON(dev);
+
+ /* Add the device. */
+ spin_lock(&list_lock);
+ list_add(&dev->list_field, &caif_chrdev_list);
+ spin_unlock(&list_lock);
+
+ pr_debug("CAIF: %s(): Creating device %s\n",
+ __func__, action->name.name);
+
+#ifdef CONFIG_DEBUG_FS
+ if (debugfsdir != NULL) {
+ dev->debugfs_device_dir =
+ debugfs_create_dir(dev->misc.name, debugfsdir);
+ debugfs_create_u32("conn_state", S_IRUSR | S_IWUSR,
+ dev->debugfs_device_dir, &dev->conn_state);
+ debugfs_create_u32("flow_state", S_IRUSR | S_IWUSR,
+ dev->debugfs_device_dir, &dev->flow_state);
+ debugfs_create_u32("num_open", S_IRUSR | S_IWUSR,
+ dev->debugfs_device_dir,
+ (u32 *) &dev->num_open);
+ debugfs_create_u32("num_close", S_IRUSR | S_IWUSR,
+ dev->debugfs_device_dir,
+ (u32 *) &dev->num_close);
+ debugfs_create_u32("num_init", S_IRUSR | S_IWUSR,
+ dev->debugfs_device_dir,
+ (u32 *) &dev->num_init);
+ debugfs_create_u32("num_init_resp", S_IRUSR | S_IWUSR,
+ dev->debugfs_device_dir,
+ (u32 *) &dev->num_init_resp);
+ debugfs_create_u32("num_init_fail_resp", S_IRUSR | S_IWUSR,
+ dev->debugfs_device_dir,
+ (u32 *) &dev->num_init_fail_resp);
+ debugfs_create_u32("num_deinit", S_IRUSR | S_IWUSR,
+ dev->debugfs_device_dir,
+ (u32 *) &dev->num_deinit);
+ debugfs_create_u32("num_deinit_resp", S_IRUSR | S_IWUSR,
+ dev->debugfs_device_dir,
+ (u32 *) &dev->num_deinit_resp);
+ debugfs_create_u32("num_remote_shutdown_ind",
+ S_IRUSR | S_IWUSR, dev->debugfs_device_dir,
+ (u32 *) &dev->num_remote_shutdown_ind);
+ debugfs_create_u32("num_tx_flow_off_ind", S_IRUSR | S_IWUSR,
+ dev->debugfs_device_dir,
+ (u32 *) &dev->num_tx_flow_off_ind);
+ debugfs_create_u32("num_tx_flow_on_ind", S_IRUSR | S_IWUSR,
+ dev->debugfs_device_dir,
+ (u32 *) &dev->num_tx_flow_on_ind);
+ debugfs_create_u32("num_rx_flow_off", S_IRUSR | S_IWUSR,
+ dev->debugfs_device_dir,
+ (u32 *) &dev->num_rx_flow_off);
+ debugfs_create_u32("num_rx_flow_on", S_IRUSR | S_IWUSR,
+ dev->debugfs_device_dir,
+ (u32 *) &dev->num_rx_flow_on);
+ debugfs_create_u32("read_queue_len", S_IRUSR | S_IWUSR,
+ dev->debugfs_device_dir,
+ (u32 *) &dev->read_queue_len);
+ }
+#endif
+ mutex_unlock(&dev->mutex);
+ return 0;
+err_failed:
+ action->name.name[0] = '\0';
+ action->major = -1;
+ action->minor = -1;
+ kfree(dev);
+ return result;
+}
+
+int chrdev_remove(char *name)
+{
+ int ret = 0;
+
+ struct caif_char_dev *dev = NULL;
+
+ /* Find device from name. */
+ dev = find_device(-1, name, 0);
+ if (!dev)
+ return -EBADF;
+
+ if (mutex_lock_interruptible(&dev->mutex)) {
+ pr_debug("CAIF: %s():"
+ "mutex_lock_interruptible got signalled\n",
+ __func__);
+ return -ERESTARTSYS;
+ }
+
+ if (STATE_IS_OPEN(dev)) {
+ pr_debug("CAIF: %s(): Device is opened "
+ "(dev=%p) file_mode = 0x%x\n",
+ __func__, dev, dev->file_mode);
+ mutex_unlock(&dev->mutex);
+ return -EBUSY;
+ }
+
+ /* Remove from list. */
+ (void) find_device(-1, name, 1);
+
+ drain_queue(dev);
+ ret = misc_deregister(&dev->misc);
+
+ cfglu_free(dev->pktq);
+
+#ifdef CONFIG_DEBUG_FS
+ if (dev->debugfs_device_dir != NULL)
+ debugfs_remove_recursive(dev->debugfs_device_dir);
+#endif
+
+ mutex_unlock(&dev->mutex);
+ pr_debug("CAIF: %s(): Removing device %s\n", __func__, dev->name);
+ kfree(dev);
+
+ return ret;
+}
+
+int chrdev_mgmt(int action, union caif_action *param)
+{
+
+ switch (action) {
+ case CAIF_ACT_CREATE_DEVICE:
+ return chrdev_create(&param->create_channel);
+ case CAIF_ACT_DELETE_DEVICE:
+ return chrdev_remove(param->delete_channel.name);
+ default:
+ return -EINVAL;
+ }
+}
+
+int caif_chrinit_module(void)
+{
+#ifdef CONFIG_DEBUG_FS
+ debugfsdir = debugfs_create_dir("chnl_chr", NULL);
+#endif
+
+ spin_lock_init(&list_lock);
+ caif_register_chrdev(chrdev_mgmt);
+ return 0;
+}
+
+void caif_chrexit_module(void)
+{
+ int result;
+
+ do {
+ /* Remove any device (the first in the list). */
+ result = chrdev_remove(NULL);
+ } while (result == 0);
+
+ caif_unregister_chrdev();
+
+#ifdef CONFIG_DEBUG_FS
+ if (debugfsdir != NULL)
+ debugfs_remove_recursive(debugfsdir);
+#endif
+
+}
+
+module_init(caif_chrinit_module);
+module_exit(caif_chrexit_module);
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
new file mode 100644
index 00000000000..2e53a048f0a
--- /dev/null
+++ b/net/caif/chnl_net.c
@@ -0,0 +1,594 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2009
+ * Author: Daniel Martensson / Daniel.Martensson@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/version.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/if_ether.h>
+#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/sched.h>
+#include <linux/sockios.h>
+#include <linux/caif/if_caif.h>
+#include <net/rtnetlink.h>
+#include <net/caif/generic/caif_layer.h>
+#include <net/caif/generic/cfcnfg.h>
+#include <net/caif/generic/cfpkt.h>
+#include <net/caif/caif_dev.h>
+
+#define CAIF_CONNECT_TIMEOUT 30
+#define SIZE_MTU 1500
+#define SIZE_MTU_MAX 4080
+#define SIZE_MTU_MIN 68
+#define CAIF_NET_DEFAULT_QUEUE_LEN 500
+
+/*This list isi protected by the rtnl lock. */
+static LIST_HEAD(chnl_net_list);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK("caif");
+
+struct chnl_net {
+ struct layer chnl;
+ struct net_device_stats stats;
+ struct caif_channel_config config;
+ struct list_head list_field;
+ struct net_device *netdev;
+ char name[256];
+ wait_queue_head_t netmgmt_wq;
+ /* Flow status to remember and control the transmission. */
+ bool flowenabled;
+};
+
+
+static struct chnl_net *find_device(char *name)
+{
+ struct list_head *list_node;
+ struct list_head *n;
+ struct chnl_net *dev = NULL;
+ struct chnl_net *tmp;
+ ASSERT_RTNL();
+ list_for_each_safe(list_node, n, &chnl_net_list) {
+ tmp = list_entry(list_node, struct chnl_net, list_field);
+ /* Find from name. */
+ if (name) {
+ if (!strncmp(tmp->name, name, sizeof(tmp->name)))
+ dev = tmp;
+ else if (!strncmp(tmp->netdev->name,
+ name,
+ sizeof(tmp->netdev->name)))
+ dev = tmp;
+ } else
+ /* Get the first element if name is not specified. */
+ dev = tmp;
+ if (dev)
+ break;
+ }
+ return dev;
+}
+
+static void robust_list_del(struct list_head *delete_node)
+{
+ struct list_head *list_node;
+ struct list_head *n;
+ ASSERT_RTNL();
+ list_for_each_safe(list_node, n, &chnl_net_list) {
+ if (list_node == delete_node) {
+ list_del(list_node);
+ break;
+ }
+ }
+}
+
+static int chnl_recv_cb(struct layer *layr, struct cfpkt *pkt)
+{
+ struct sk_buff *skb;
+ struct chnl_net *priv = NULL;
+ int pktlen;
+ int err = 0;
+
+ priv = container_of(layr, struct chnl_net, chnl);
+
+ if (!priv)
+ return -EINVAL;
+
+ /* Get length of CAIF packet. */
+ pktlen = cfpkt_getlen(pkt);
+
+ skb = (struct sk_buff *) cfpkt_tonative(pkt);
+ /* Pass some minimum information and
+ * send the packet to the net stack.
+ */
+ skb->dev = priv->netdev;
+ skb->protocol = htons(ETH_P_IP);
+
+ if (priv->config.type == CAIF_CHTY_DATAGRAM_LOOP) {
+ /* We change the header, so the checksum is corrupted. */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
+
+ /* FIXME: Drivers should call this in tasklet context. */
+ if (in_interrupt())
+ netif_rx(skb);
+ else
+ netif_rx_ni(skb);
+
+ /* Update statistics. */
+ priv->netdev->stats.rx_packets++;
+ priv->netdev->stats.rx_bytes += pktlen;
+
+ return err;
+}
+
+static void chnl_flowctrl_cb(struct layer *layr, enum caif_ctrlcmd flow,
+ int phyid)
+{
+ struct chnl_net *priv = NULL;
+ pr_debug("CAIF: %s(): NET flowctrl func called flow: %s.\n",
+ __func__,
+ flow == CAIF_CTRLCMD_FLOW_ON_IND ? "ON" :
+ flow == CAIF_CTRLCMD_INIT_RSP ? "INIT" :
+ flow == CAIF_CTRLCMD_FLOW_OFF_IND ? "OFF" :
+ flow == CAIF_CTRLCMD_DEINIT_RSP ? "CLOSE/DEINIT" :
+ flow == CAIF_CTRLCMD_INIT_FAIL_RSP ? "OPEN_FAIL" :
+ flow == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND ?
+ "REMOTE_SHUTDOWN" : "UKNOWN CTRL COMMAND");
+
+ priv = container_of(layr, struct chnl_net, chnl);
+
+ switch (flow) {
+ case CAIF_CTRLCMD_FLOW_OFF_IND:
+ case CAIF_CTRLCMD_DEINIT_RSP:
+ case CAIF_CTRLCMD_INIT_FAIL_RSP:
+ case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
+ priv->flowenabled = false;
+ netif_tx_disable(priv->netdev);
+ wake_up_interruptible(&priv->netmgmt_wq);
+ break;
+ case CAIF_CTRLCMD_FLOW_ON_IND:
+ case CAIF_CTRLCMD_INIT_RSP:
+ priv->flowenabled = true;
+ netif_wake_queue(priv->netdev);
+ wake_up_interruptible(&priv->netmgmt_wq);
+ break;
+ default:
+ break;
+ }
+}
+
+static int chnl_net_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct chnl_net *priv;
+ struct cfpkt *pkt = NULL;
+ int len;
+ int result = -1;
+ ASSERT_RTNL();
+
+ /* Get our private data. */
+ priv = (struct chnl_net *)netdev_priv(dev);
+ if (!priv)
+ return -ENOSPC;
+
+
+ if (skb->len > priv->netdev->mtu) {
+ pr_warning("CAIF: %s(): Size of skb exceeded MTU\n", __func__);
+ return -ENOSPC;
+ }
+
+ if (!priv->flowenabled) {
+ pr_debug("CAIF: %s(): dropping packets flow off\n", __func__);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (priv->config.type == CAIF_CHTY_DATAGRAM_LOOP) {
+ struct iphdr *hdr;
+ __be32 swap;
+ /* Retrieve IP header. */
+ hdr = ip_hdr(skb);
+ /* Change source and destination address. */
+ swap = hdr->saddr;
+ hdr->saddr = hdr->daddr;
+ hdr->daddr = swap;
+ }
+ /* Store original SKB length. */
+ len = skb->len;
+
+ pkt = cfpkt_fromnative(CAIF_DIR_OUT, (void *) skb);
+
+ /* Send the packet down the stack. */
+ result = priv->chnl.dn->transmit(priv->chnl.dn, pkt);
+ if (result) {
+ if (result == CFGLU_ERETRY)
+ result = NETDEV_TX_BUSY;
+ return result;
+ }
+
+ /* Update statistics. */
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += len;
+
+ return NETDEV_TX_OK;
+}
+
+
+static int chnl_net_open(struct net_device *dev)
+{
+ struct chnl_net *priv = NULL;
+ int result = -1;
+ ASSERT_RTNL();
+
+ priv = (struct chnl_net *)netdev_priv(dev);
+ pr_debug("CAIF: %s(): dev name: %s\n", __func__, priv->name);
+
+ if (!priv) {
+ pr_debug("CAIF: %s(): chnl_net_open: no priv\n", __func__);
+ return -ENODEV;
+ }
+ result = caifdev_adapt_register(&priv->config, &priv->chnl);
+ if (result != 0) {
+ pr_debug("CAIF: %s(): err: "
+ "Unable to register and open device, Err:%d\n",
+ __func__,
+ result);
+ return -ENODEV;
+ }
+ result = wait_event_interruptible(priv->netmgmt_wq, priv->flowenabled);
+
+ if (result == -ERESTARTSYS) {
+ pr_debug("CAIF: %s(): wait_event_interruptible"
+ " woken by a signal\n", __func__);
+ return -ERESTARTSYS;
+ } else
+ pr_debug("CAIF: %s(): Flow on recieved\n", __func__);
+
+ return 0;
+}
+
+static int chnl_net_stop(struct net_device *dev)
+{
+ struct chnl_net *priv;
+ int result = -1;
+ ASSERT_RTNL();
+ priv = (struct chnl_net *)netdev_priv(dev);
+
+ result = caifdev_adapt_unregister(&priv->chnl);
+ if (result != 0) {
+ pr_debug("CAIF: %s(): chnl_net_stop: err: "
+ "Unable to STOP device, Err:%d\n",
+ __func__, result);
+ return -EBUSY;
+ }
+ result = wait_event_interruptible(priv->netmgmt_wq,
+ !priv->flowenabled);
+
+ if (result == -ERESTARTSYS) {
+ pr_debug("CAIF: %s(): wait_event_interruptible woken by"
+ " signal, signal_pending(current) = %d\n",
+ __func__,
+ signal_pending(current));
+ } else {
+ pr_debug("CAIF: %s(): disconnect received\n", __func__);
+
+ }
+
+ return 0;
+}
+
+int chnl_net_init(struct net_device *dev)
+{
+ struct chnl_net *priv;
+ ASSERT_RTNL();
+ priv = (struct chnl_net *)netdev_priv(dev);
+ strncpy(priv->config.name, dev->name, sizeof(priv->config.name));
+ strncpy(priv->name, dev->name, sizeof(priv->name));
+ return 0;
+}
+
+void chnl_net_uninit(struct net_device *dev)
+{
+ struct chnl_net *priv;
+ ASSERT_RTNL();
+ priv = (struct chnl_net *)netdev_priv(dev);
+ /* If someone already have the lock it's already protected */
+ robust_list_del(&priv->list_field);
+ dev_put(dev);
+}
+
+//official-kernel-patch-cut-here
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 28))
+//official-kernel-patch-resume-here
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = chnl_net_open,
+ .ndo_stop = chnl_net_stop,
+ .ndo_init = chnl_net_init,
+ .ndo_uninit = chnl_net_uninit,
+ .ndo_start_xmit = chnl_net_hard_start_xmit,
+};
+//official-kernel-patch-cut-here
+#endif
+//official-kernel-patch-resume-here
+
+static void ipcaif_net_init(struct net_device *dev)
+{
+ struct chnl_net *priv;
+//official-kernel-patch-cut-here
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 28))
+ dev->open = chnl_net_open;
+ dev->stop = chnl_net_stop;
+ dev->init = chnl_net_init;
+ dev->uninit = chnl_net_uninit;
+ dev->hard_start_xmit = chnl_net_hard_start_xmit;
+#else
+//official-kernel-patch-resume-here
+ dev->netdev_ops = &netdev_ops;
+//official-kernel-patch-cut-here
+#endif
+//official-kernel-patch-resume-here
+ dev->destructor = free_netdev;
+ dev->flags |= IFF_NOARP;
+ dev->flags |= IFF_POINTOPOINT;
+ dev->needed_headroom = CAIF_NEEDED_HEADROOM;
+ dev->needed_tailroom = CAIF_NEEDED_TAILROOM;
+ dev->mtu = SIZE_MTU;
+ dev->tx_queue_len = CAIF_NET_DEFAULT_QUEUE_LEN;
+
+ priv = (struct chnl_net *)netdev_priv(dev);
+ priv->chnl.receive = chnl_recv_cb;
+ priv->chnl.ctrlcmd = chnl_flowctrl_cb;
+ priv->netdev = dev;
+ priv->config.type = CAIF_CHTY_DATAGRAM;
+ priv->config.phy_pref = CFPHYPREF_HIGH_BW;
+ priv->config.priority = CAIF_PRIO_LOW;
+ priv->config.u.dgm.connection_id = -1; /* Insert illegal value */
+ priv->flowenabled = false;
+
+ ASSERT_RTNL();
+ init_waitqueue_head(&priv->netmgmt_wq);
+ list_add(&priv->list_field, &chnl_net_list);
+}
+
+static int delete_device(struct chnl_net *dev)
+{
+ ASSERT_RTNL();
+ if (dev->netdev)
+ unregister_netdevice(dev->netdev);
+ return 0;
+}
+
+static int ipcaif_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct chnl_net *priv;
+ u8 loop;
+ priv = (struct chnl_net *)netdev_priv(dev);
+ NLA_PUT_U32(skb, IFLA_CAIF_IPV4_CONNID,
+ priv->config.u.dgm.connection_id);
+ NLA_PUT_U32(skb, IFLA_CAIF_IPV6_CONNID,
+ priv->config.u.dgm.connection_id);
+ loop = priv->config.type == CAIF_CHTY_DATAGRAM_LOOP;
+ NLA_PUT_U8(skb, IFLA_CAIF_LOOPBACK, loop);
+
+
+ return 0;
+nla_put_failure:
+ return -EMSGSIZE;
+
+}
+
+static void caif_netlink_parms(struct nlattr *data[],
+ struct caif_channel_config *parms)
+{
+ if (!data) {
+ pr_warning("CAIF: %s: no params data found\n", __func__);
+ return;
+ }
+ if (data[IFLA_CAIF_IPV4_CONNID])
+ parms->u.dgm.connection_id =
+ nla_get_u32(data[IFLA_CAIF_IPV4_CONNID]);
+ if (data[IFLA_CAIF_IPV6_CONNID])
+ parms->u.dgm.connection_id =
+ nla_get_u32(data[IFLA_CAIF_IPV6_CONNID]);
+ if (data[IFLA_CAIF_LOOPBACK]) {
+ if (nla_get_u8(data[IFLA_CAIF_LOOPBACK]))
+ parms->type = CAIF_CHTY_DATAGRAM_LOOP;
+ else
+ parms->type = CAIF_CHTY_DATAGRAM;
+ }
+}
+
+//official-kernel-patch-cut-here
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32))
+//official-kernel-patch-resume-here
+static int ipcaif_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+//official-kernel-patch-cut-here
+#else
+static int ipcaif_newlink(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[])
+#endif
+//official-kernel-patch-resume-here
+{
+ int err;
+ struct chnl_net *caifdev;
+ ASSERT_RTNL();
+ caifdev = netdev_priv(dev);
+ caif_netlink_parms(data, &caifdev->config);
+ err = register_netdevice(dev);
+ if (err) {
+ pr_warning("CAIF: %s(): device rtml registration failed\n",
+ __func__);
+ goto out;
+ }
+ dev_hold(dev);
+out:
+ return err;
+}
+
+static int ipcaif_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[])
+{
+ struct chnl_net *caifdev;
+ ASSERT_RTNL();
+ caifdev = netdev_priv(dev);
+ caif_netlink_parms(data, &caifdev->config);
+ netdev_state_change(dev);
+ return 0;
+}
+
+static size_t ipcaif_get_size(const struct net_device *dev)
+{
+ return
+ /* IFLA_CAIF_IPV4_CONNID */
+ nla_total_size(4) +
+ /* IFLA_CAIF_IPV6_CONNID */
+ nla_total_size(4) +
+ /* IFLA_CAIF_LOOPBACK */
+ nla_total_size(2) +
+ 0;
+}
+
+static const struct nla_policy ipcaif_policy[IFLA_CAIF_MAX + 1] = {
+ [IFLA_CAIF_IPV4_CONNID] = { .type = NLA_U32 },
+ [IFLA_CAIF_IPV6_CONNID] = { .type = NLA_U32 },
+ [IFLA_CAIF_LOOPBACK] = { .type = NLA_U8 }
+};
+
+
+static struct rtnl_link_ops ipcaif_link_ops __read_mostly = {
+ .kind = "caif",
+ .priv_size = (size_t)sizeof(struct chnl_net),
+ .setup = ipcaif_net_init,
+ .maxtype = IFLA_CAIF_MAX,
+ .policy = ipcaif_policy,
+ .newlink = ipcaif_newlink,
+ .changelink = ipcaif_changelink,
+ .get_size = ipcaif_get_size,
+ .fill_info = ipcaif_fill_info,
+
+};
+
+int chnl_net_ioctl(unsigned int cmd, unsigned long arg, bool from_user_land)
+{
+ struct chnl_net *priv;
+ int result = -1;
+ struct chnl_net *dev;
+ struct net_device *netdevptr;
+ int ret;
+ struct ifreq ifreq;
+ struct ifcaif_param param;
+ rtnl_lock();
+ if (from_user_land) {
+ if (copy_from_user(&ifreq, (const void *)arg, sizeof(ifreq)))
+ return -EFAULT;
+ } else
+ memcpy(&ifreq, (void *)arg, sizeof(ifreq));
+
+ if (cmd == SIOCCAIFNETREMOVE) {
+ pr_debug("CAIF: %s(): %s\n", __func__, ifreq.ifr_name);
+ dev = find_device(ifreq.ifr_name);
+ if (!dev)
+ ret = -ENODEV;
+ else
+ ret = delete_device(dev);
+ rtnl_unlock();
+ return ret;
+ }
+
+ if (cmd != SIOCCAIFNETNEW) {
+ rtnl_unlock();
+ return -ENOIOCTLCMD;
+ }
+ if (ifreq.ifr_ifru.ifru_data != NULL) {
+ if (from_user_land) {
+ ret = copy_from_user(&param,
+ ifreq.ifr_ifru.ifru_data,
+ sizeof(param));
+ if (ret) {
+ rtnl_unlock();
+ return -EFAULT;
+ }
+ } else
+ memcpy(&param,
+ ifreq.ifr_ifru.ifru_data,
+ sizeof(param));
+ ifreq.ifr_ifru.ifru_data = &param;
+ }
+
+ netdevptr = alloc_netdev(sizeof(struct chnl_net),
+ ifreq.ifr_name, ipcaif_net_init);
+ if (!netdevptr) {
+ rtnl_unlock();
+ return -ENODEV;
+ }
+ dev_hold(netdevptr);
+ priv = (struct chnl_net *)netdev_priv(netdevptr);
+ priv->config.u.dgm.connection_id = param.ipv4_connid;
+
+ if (param.loop)
+ priv->config.type = CAIF_CHTY_DATAGRAM_LOOP;
+ else
+ priv->config.type = CAIF_CHTY_DATAGRAM;
+
+ result = register_netdevice(priv->netdev);
+
+ if (result < 0) {
+ pr_warning("CAIF: %s(): can't register netdev %s %d\n",
+ __func__, ifreq.ifr_name, result);
+ dev_put(netdevptr);
+ rtnl_unlock();
+ return -ENODEV;
+ }
+ pr_debug("CAIF: %s(): netdev channel open:%s\n", __func__, priv->name);
+ rtnl_unlock();
+ return 0;
+};
+
+struct net_device *chnl_net_create(char *name,
+ struct caif_channel_config *config)
+{
+ struct net_device *dev;
+ ASSERT_RTNL();
+ dev = alloc_netdev(sizeof(struct chnl_net), name, ipcaif_net_init);
+ if (!dev)
+ return NULL;
+ ((struct chnl_net *)netdev_priv(dev))->config = *config;
+ dev_hold(dev);
+ return dev;
+}
+EXPORT_SYMBOL(chnl_net_create);
+
+static int __init chnl_init_module(void)
+{
+ int err = -1;
+ caif_register_ioctl(chnl_net_ioctl);
+ err = rtnl_link_register(&ipcaif_link_ops);
+ if (err < 0) {
+ rtnl_link_unregister(&ipcaif_link_ops);
+ return err;
+ }
+ return 0;
+}
+
+static void __exit chnl_exit_module(void)
+{
+ struct chnl_net *dev = NULL;
+ struct list_head *list_node;
+ struct list_head *_tmp;
+ rtnl_lock();
+ list_for_each_safe(list_node, _tmp, &chnl_net_list) {
+ dev = list_entry(list_node, struct chnl_net, list_field);
+ delete_device(dev);
+ }
+ rtnl_unlock();
+ rtnl_link_unregister(&ipcaif_link_ops);
+ caif_register_ioctl(NULL);
+}
+
+module_init(chnl_init_module);
+module_exit(chnl_exit_module);
diff --git a/net/caif/generic/cfcnfg.c b/net/caif/generic/cfcnfg.c
new file mode 100644
index 00000000000..14358c402ce
--- /dev/null
+++ b/net/caif/generic/cfcnfg.c
@@ -0,0 +1,538 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2009
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <net/caif/generic/cfglue.h>
+#include <net/caif/generic/caif_layer.h>
+#include <net/caif/generic/cfpkt.h>
+#include <net/caif/generic/cflst.h>
+#include <net/caif/generic/cfcnfg.h>
+#include <net/caif/generic/cfctrl.h>
+#include <net/caif/generic/cfmuxl.h>
+#include <net/caif/generic/cffrml.h>
+#include <net/caif/generic/cfserl.h>
+#include <net/caif/generic/cfsrvl.h>
+
+#include <linux/module.h>
+
+#define MAX_PHY_LAYERS 7
+#define PHY_NAME_LEN 20
+
+#define container_obj(layr) cfglu_container_of(layr, struct cfcnfg, layer)
+
+/* Information about CAIF physical interfaces held by Config Module in order
+ * to manage physical interfaces
+ */
+struct cfcnfg_phyinfo {
+ /* Pointer to the layer below the MUX (framing layer) */
+ struct layer *frm_layer;
+ /* Pointer to the lowest actual physical layer */
+ struct layer *phy_layer;
+ /* Unique identifier of the physical interface */
+ unsigned int id;
+ /* Preference of the physical in interface */
+ enum cfcnfg_phy_preference pref;
+
+ /* Reference count, number of channels using the device */
+ int phy_ref_count;
+
+ /* Information about the physical device */
+ struct dev_info dev_info;
+};
+
+struct cfcnfg {
+ struct layer layer;
+ struct layer *ctrl;
+ struct layer *mux;
+ uint8 last_phyid;
+ struct cfcnfg_phyinfo phy_layers[MAX_PHY_LAYERS];
+};
+
+static void cncfg_linkup_rsp(struct layer *layer, uint8 linkid,
+ enum cfctrl_srv serv, uint8 phyid,
+ struct layer *adapt_layer);
+static void cncfg_linkdestroy_rsp(struct layer *layer, uint8 linkid,
+ struct layer *client_layer);
+static void cncfg_reject_rsp(struct layer *layer, uint8 linkid,
+ struct layer *adapt_layer);
+static void cfctrl_resp_func(void);
+static void cfctrl_enum_resp(void);
+
+struct cfcnfg *cfcnfg_create()
+{
+ struct cfcnfg *this;
+ struct cfctrl_rsp *resp;
+ /* Initiate this layer */
+ this = cfglu_alloc(sizeof(struct cfcnfg));
+ if (!this) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ memset(this, 0, sizeof(struct cfcnfg));
+ this->mux = cfmuxl_create();
+ if (!this->mux)
+ goto out_of_mem;
+ this->ctrl = cfctrl_create();
+ if (!this->ctrl)
+ goto out_of_mem;
+ /* Initiate response functions */
+ resp = cfctrl_get_respfuncs(this->ctrl);
+ resp->enum_rsp = cfctrl_enum_resp;
+ resp->linkerror_ind = cfctrl_resp_func;
+ resp->linkdestroy_rsp = cncfg_linkdestroy_rsp;
+ resp->sleep_rsp = cfctrl_resp_func;
+ resp->wake_rsp = cfctrl_resp_func;
+ resp->restart_rsp = cfctrl_resp_func;
+ resp->radioset_rsp = cfctrl_resp_func;
+ resp->linksetup_rsp = cncfg_linkup_rsp;
+ resp->reject_rsp = cncfg_reject_rsp;
+
+ this->last_phyid = 1;
+
+ cfmuxl_set_uplayer(this->mux, this->ctrl, 0);
+ layer_set_dn(this->ctrl, this->mux);
+ layer_set_up(this->ctrl, this);
+ return this;
+out_of_mem:
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ cfglu_free(this->mux);
+ cfglu_free(this->ctrl);
+ cfglu_free(this);
+ return NULL;
+}
+EXPORT_SYMBOL(cfcnfg_create);
+
+void cfcnfg_remove(struct cfcnfg *cfg)
+{
+ if (cfg) {
+ cfglu_free(cfg->mux);
+ cfglu_free(cfg->ctrl);
+ cfglu_free(cfg);
+ }
+}
+
+static void cfctrl_resp_func(void)
+{
+}
+
+static void cfctrl_enum_resp(void)
+{
+}
+
+struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg,
+ enum cfcnfg_phy_preference phy_pref)
+{
+ int i;
+
+ /* Try to match with specified preference */
+ for (i = 1; i < MAX_PHY_LAYERS; i++) {
+ if (cnfg->phy_layers[i].id == i &&
+ cnfg->phy_layers[i].pref == phy_pref &&
+ cnfg->phy_layers[i].frm_layer != NULL) {
+ caif_assert(cnfg->phy_layers != NULL);
+ caif_assert(cnfg->phy_layers[i].id == i);
+ return &cnfg->phy_layers[i].dev_info;
+ }
+ }
+ /* Otherwise just return something */
+ for (i = 1; i < MAX_PHY_LAYERS; i++) {
+ if (cnfg->phy_layers[i].id == i) {
+ caif_assert(cnfg->phy_layers != NULL);
+ caif_assert(cnfg->phy_layers[i].id == i);
+ return &cnfg->phy_layers[i].dev_info;
+ }
+ }
+
+ return NULL;
+}
+
+static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo(struct cfcnfg *cnfg,
+ uint8 phyid)
+{
+ int i;
+
+ /* Try to match with specified preference */
+ for (i = 0; i < MAX_PHY_LAYERS; i++)
+ if (cnfg->phy_layers[i].frm_layer != NULL &&
+ cnfg->phy_layers[i].id == phyid)
+ return &cnfg->phy_layers[i];
+ return 0;
+}
+
+int cfcnfg_get_named(struct cfcnfg *cnfg, char *name)
+{
+ int i;
+
+ /* Try to match with specified preference */
+ for (i = 0; i < MAX_PHY_LAYERS; i++) {
+ if (cnfg->phy_layers[i].frm_layer != NULL
+ && strcmp(cnfg->phy_layers[i].frm_layer->name,
+ name) == 0) {
+ return cnfg->phy_layers[i].frm_layer->id;
+ }
+ }
+ return 0;
+}
+
+/*
+ * NOTE: What happens on destroy failure:
+ * 1a) No response - Too early
+ * This will not happen because enumerate has already
+ * completed.
+ * 1b) No response - FATAL
+ * Not handled, but this should be a CAIF PROTOCOL ERROR
+ * Modem error, response is really expected - this
+ * case is not really handled.
+ * 2) O/E-bit indicate error
+ * Ignored - this link is destroyed anyway.
+ * 3) Not able to match on request
+ * Not handled, but this should be a CAIF PROTOCOL ERROR
+ * 4) Link-Error - (no response)
+ * Not handled, but this should be a CAIF PROTOCOL ERROR
+ */
+
+int cfcnfg_del_adapt_layer(struct cfcnfg *cnfg, struct layer *adap_layer)
+{
+ uint8 channel_id = 0;
+ int ret = 0;
+ struct cfcnfg_phyinfo *phyinfo = NULL;
+ uint8 phyid = 0;
+
+ caif_assert(adap_layer != NULL);
+ channel_id = adap_layer->id;
+ if (channel_id == 0) {
+ pr_err("CAIF: %s():adap_layer->id is 0\n", __func__);
+ ret = CFGLU_ENOTCONN;
+ goto end;
+ }
+
+ if (adap_layer->dn == NULL) {
+ pr_err("CAIF: %s():adap_layer->dn is NULL\n", __func__);
+ ret = CFGLU_ENODEV;
+ goto end;
+ }
+
+ if (adap_layer->dn != NULL)
+ phyid = cfsrvl_getphyid(adap_layer->dn);
+
+ phyinfo = cfcnfg_get_phyinfo(cnfg, phyid);
+ if (phyinfo == NULL) {
+ pr_warning("CAIF: %s(): No interface to send disconnect to\n",
+ __func__);
+ ret = CFGLU_ENODEV;
+ goto end;
+ }
+
+ if (phyinfo->id != phyid
+ || phyinfo->phy_layer->id != phyid
+ || phyinfo->frm_layer->id != phyid) {
+
+ pr_err("CAIF: %s(): Inconsistency in phy registration\n",
+ __func__);
+ ret = CFGLU_EINVAL;
+ goto end;
+ }
+
+ ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer);
+
+end:
+ if (phyinfo != NULL && --phyinfo->phy_ref_count == 0 &&
+ phyinfo->phy_layer != NULL &&
+ phyinfo->phy_layer->modemcmd != NULL) {
+ phyinfo->phy_layer->modemcmd(phyinfo->phy_layer,
+ _CAIF_MODEMCMD_PHYIF_USELESS);
+ }
+ return ret;
+
+}
+EXPORT_SYMBOL(cfcnfg_del_adapt_layer);
+
+static void cncfg_linkdestroy_rsp(struct layer *layer, uint8 linkid,
+ struct layer *client_layer)
+{
+ struct cfcnfg *cnfg = container_obj(layer);
+ struct layer *servl;
+
+ /*
+ * 1) Remove service from the MUX layer. The MUX must
+ * guarante that no more payload sent "upwards" (receive)
+ */
+ servl = cfmuxl_remove_uplayer(cnfg->mux, linkid);
+
+ if (servl == NULL) {
+ pr_err("CAIF: %s(): PROTOCOL ERROR "
+ "- Error removing service_layer Linkid(%d)",
+ __func__, linkid);
+ return;
+ }
+ caif_assert(linkid == servl->id);
+
+ if (servl != client_layer && servl->up != client_layer) {
+ pr_err("CAIF: %s(): Error removing service_layer "
+ "Linkid(%d) %p %p",
+ __func__, linkid, (void *) servl,
+ (void *) client_layer);
+ return;
+ }
+
+ /*
+ * 2) DEINIT_RSP must guarantee that no more packets are transmitted
+ * from client (adap_layer) when it returns.
+ */
+
+ if (servl->ctrlcmd == NULL) {
+ pr_err("CAIF: %s(): Error servl->ctrlcmd == NULL", __func__);
+ return;
+ }
+
+ servl->ctrlcmd(servl, CAIF_CTRLCMD_DEINIT_RSP, 0);
+
+ /* 3) It is now safe to destroy the service layer. */
+
+ if (client_layer != servl->up)
+ cfservl_destroy(servl);
+}
+
+/*
+ * NOTE: What happens on linksetup failure:
+ * 1a) No response - Too early
+ * This will not happen because enumerate is secured
+ * before using interface.
+ * 1b) No response - FATAL
+ * Not handled, but this should be a CAIF PROTOCOL ERROR
+ * Modem error, response is really expected - this case is
+ * not really handled.
+ * 2) O/E-bit indicate error
+ * Handled in cnfg_reject_rsp
+ * 3) Not able to match on request
+ * Not handled, but this should be a CAIF PROTOCOL ERROR
+ * 4) Link-Error - (no response)
+ * Not handled, but this should be a CAIF PROTOCOL ERROR
+ */
+
+bool
+cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg,
+ struct cfctrl_link_param *param,
+ struct layer *adap_layer)
+{
+ struct layer *frml;
+ if (adap_layer == NULL) {
+ pr_err("CAIF: %s(): adap_layer is zero", __func__);
+ return CFGLU_EINVAL;
+ }
+ if (adap_layer->receive == NULL) {
+ pr_err("CAIF: %s(): adap_layer->receive is NULL", __func__);
+ return CFGLU_EINVAL;
+ }
+ if (adap_layer->ctrlcmd == NULL) {
+ pr_err("CAIF: %s(): adap_layer->ctrlcmd == NULL", __func__);
+ return CFGLU_EINVAL;
+ }
+ frml = cnfg->phy_layers[param->phyid].frm_layer;
+ if (frml == NULL) {
+ pr_err("CAIF: %s(): Specified PHY type does not exist!",
+ __func__);
+ return CFGLU_ENODEV;
+ }
+ caif_assert(param->phyid == cnfg->phy_layers[param->phyid].id);
+ caif_assert(cnfg->phy_layers[param->phyid].frm_layer->id ==
+ param->phyid);
+ caif_assert(cnfg->phy_layers[param->phyid].phy_layer->id ==
+ param->phyid);
+ /* FIXME: ENUMERATE INITIALLY WHEN ACTIVATING PHYSICAL INTERFACE */
+ cfctrl_enum_req(cnfg->ctrl, param->phyid);
+ cfctrl_linkup_request(cnfg->ctrl, param, adap_layer);
+ return 0;
+}
+EXPORT_SYMBOL(cfcnfg_add_adaptation_layer);
+
+static void cncfg_reject_rsp(struct layer *layer, uint8 linkid,
+ struct layer *adapt_layer)
+{
+ if (adapt_layer != NULL && adapt_layer->ctrlcmd != NULL)
+ adapt_layer->ctrlcmd(adapt_layer,
+ CAIF_CTRLCMD_INIT_FAIL_RSP, 0);
+}
+
+static void
+cncfg_linkup_rsp(struct layer *layer, uint8 linkid, enum cfctrl_srv serv,
+ uint8 phyid, struct layer *adapt_layer)
+{
+ struct cfcnfg *cnfg = container_obj(layer);
+ struct layer *servicel = NULL;
+ struct cfcnfg_phyinfo *phyinfo;
+ if (adapt_layer == NULL) {
+ pr_err("CAIF: %s(): PROTOCOL ERROR "
+ "- LinkUp Request/Response did not match\n", __func__);
+ return;
+ }
+
+ caif_assert(cnfg != NULL);
+ caif_assert(phyid != 0);
+ phyinfo = &cnfg->phy_layers[phyid];
+ caif_assert(phyinfo != NULL);
+ caif_assert(phyinfo->id == phyid);
+ caif_assert(phyinfo->phy_layer != NULL);
+ caif_assert(phyinfo->phy_layer->id == phyid);
+
+ if (phyinfo != NULL &&
+ phyinfo->phy_ref_count++ == 0 &&
+ phyinfo->phy_layer != NULL &&
+ phyinfo->phy_layer->modemcmd != NULL) {
+ caif_assert(phyinfo->phy_layer->id == phyid);
+ phyinfo->phy_layer->modemcmd(phyinfo->phy_layer,
+ _CAIF_MODEMCMD_PHYIF_USEFULL);
+
+ }
+ adapt_layer->id = linkid;
+
+ switch (serv) {
+ case CFCTRL_SRV_VEI:
+ servicel = cfvei_create(linkid, &phyinfo->dev_info);
+ break;
+ case CFCTRL_SRV_DATAGRAM:
+ servicel = cfdgml_create(linkid, &phyinfo->dev_info);
+ break;
+ case CFCTRL_SRV_RFM:
+ servicel = cfrfml_create(linkid, &phyinfo->dev_info);
+ break;
+ case CFCTRL_SRV_UTIL:
+ servicel = cfutill_create(linkid, &phyinfo->dev_info);
+ break;
+ case CFCTRL_SRV_VIDEO:
+ servicel = cfvidl_create(linkid, &phyinfo->dev_info);
+ break;
+ case CFCTRL_SRV_DBG:
+ servicel = adapt_layer;
+ break;
+ default:
+ pr_err("CAIF: %s(): Protocol error. "
+ "Link setup response - unknown channel type\n",
+ __func__);
+ return;
+ }
+ if (!servicel) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return;
+ }
+ layer_set_dn(servicel, cnfg->mux);
+
+
+ cfmuxl_set_uplayer(cnfg->mux, servicel, linkid);
+ if (servicel != adapt_layer) {
+ layer_set_up(servicel, adapt_layer);
+ layer_set_dn(adapt_layer, servicel);
+ }
+ servicel->ctrlcmd(servicel, CAIF_CTRLCMD_INIT_RSP, 0);
+}
+
+void
+cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
+ void *dev, struct layer *phy_layer, uint16 *phyid,
+ enum cfcnfg_phy_preference pref,
+ bool fcs, bool stx)
+{
+ struct layer *frml;
+ struct layer *phy_driver = NULL;
+ int i;
+
+
+ if (cnfg->phy_layers[cnfg->last_phyid].frm_layer == NULL) {
+ *phyid = cnfg->last_phyid;
+
+ /* range: * 1..(MAX_PHY_LAYERS-1) */
+ cnfg->last_phyid =
+ (cnfg->last_phyid % (MAX_PHY_LAYERS - 1)) + 1;
+ } else {
+ *phyid = 0;
+ for (i = 1; i < MAX_PHY_LAYERS; i++) {
+ if (cnfg->phy_layers[i].frm_layer == NULL) {
+ *phyid = i;
+ break;
+ }
+ }
+ }
+ if (*phyid == 0) {
+ pr_err("CAIF: %s(): No Available PHY ID\n", __func__);
+ return;
+ }
+
+ switch (phy_type) {
+ case CFPHYTYPE_FRAG:
+ fcs = true;
+ phy_driver =
+ cfserl_create(CFPHYTYPE_FRAG, *phyid, stx);
+ if (!phy_driver) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return;
+ }
+
+ break;
+ case CFPHYTYPE_CAIF:
+ phy_driver = NULL;
+ break;
+ default:
+ pr_err("CAIF: %s(): %d", __func__, phy_type);
+ return;
+ break;
+ }
+
+ phy_layer->id = *phyid;
+ cnfg->phy_layers[*phyid].pref = pref;
+ cnfg->phy_layers[*phyid].id = *phyid;
+ cnfg->phy_layers[*phyid].dev_info.id = *phyid;
+ cnfg->phy_layers[*phyid].dev_info.dev = dev;
+ cnfg->phy_layers[*phyid].phy_layer = phy_layer;
+ cnfg->phy_layers[*phyid].phy_ref_count = 0;
+ phy_layer->type = phy_type;
+ frml = cffrml_create(*phyid, fcs);
+ if (!frml) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return;
+ }
+ cnfg->phy_layers[*phyid].frm_layer = frml;
+ cfmuxl_set_dnlayer(cnfg->mux, frml, *phyid);
+ layer_set_up(frml, cnfg->mux);
+
+ if (phy_driver != NULL) {
+ phy_driver->id = *phyid;
+ layer_set_dn(frml, phy_driver);
+ layer_set_up(phy_driver, frml);
+ layer_set_dn(phy_driver, phy_layer);
+ layer_set_up(phy_layer, phy_driver);
+ } else {
+ layer_set_dn(frml, phy_layer);
+ layer_set_up(phy_layer, frml);
+ }
+}
+EXPORT_SYMBOL(cfcnfg_add_phy_layer);
+
+int cfcnfg_del_phy_layer(struct cfcnfg *cnfg, struct layer *phy_layer)
+{
+ struct layer *frml, *frml_dn;
+ uint16 phyid;
+ phyid = phy_layer->id;
+ caif_assert(phyid == cnfg->phy_layers[phyid].id);
+ caif_assert(phy_layer == cnfg->phy_layers[phyid].phy_layer);
+ caif_assert(phy_layer->id == phyid);
+ caif_assert(cnfg->phy_layers[phyid].frm_layer->id == phyid);
+
+ memset(&cnfg->phy_layers[phy_layer->id], 0,
+ sizeof(struct cfcnfg_phyinfo));
+ frml = cfmuxl_remove_dnlayer(cnfg->mux, phy_layer->id);
+ frml_dn = frml->dn;
+ cffrml_set_uplayer(frml, NULL);
+ cffrml_set_dnlayer(frml, NULL);
+ cffrml_destroy(frml);
+
+ if (phy_layer != frml_dn) {
+ layer_set_up(frml_dn, NULL);
+ layer_set_dn(frml_dn, NULL);
+ cfglu_free(frml_dn);
+ }
+ layer_set_up(phy_layer, NULL);
+ return CFGLU_EOK;
+}
+EXPORT_SYMBOL(cfcnfg_del_phy_layer);
diff --git a/net/caif/generic/cfctrl.c b/net/caif/generic/cfctrl.c
new file mode 100644
index 00000000000..e79eb1b512c
--- /dev/null
+++ b/net/caif/generic/cfctrl.c
@@ -0,0 +1,699 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2009
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <net/caif/generic/cfglue.h>
+#include <net/caif/generic/caif_layer.h>
+#include <net/caif/generic/cfpkt.h>
+#include <net/caif/generic/cfctrl.h>
+
+#define container_obj(layr) cfglu_container_of(layr, struct cfctrl, serv.layer)
+#define UTILITY_NAME_LENGTH 16
+#define CFPKT_CTRL_PKT_LEN 20
+
+
+#ifdef CAIF_NO_LOOP
+static inline int handle_loop(struct cfctrl *ctrl,
+ int cmd, struct cfpkt *pkt){
+ return CAIF_FAILURE;
+}
+#else
+static int handle_loop(struct cfctrl *ctrl,
+ int cmd, struct cfpkt *pkt);
+#endif
+static int cfctrl_recv(struct layer *layr, struct cfpkt *pkt);
+static void cfctrl_ctrlcmd(struct layer *layr, enum caif_ctrlcmd ctrl,
+ int phyid);
+
+
+struct layer *cfctrl_create()
+{
+ struct cfctrl *this =
+ (struct cfctrl *) cfglu_alloc(sizeof(struct cfctrl));
+ if (!this) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
+ memset(this, 0, sizeof(*this));
+ cfglu_init_lock(this->info_list_lock);
+ cfglu_atomic_set(this->req_seq_no, 1);
+ cfglu_atomic_set(this->rsp_seq_no, 1);
+ this->serv.dev_info.id = 0xff;
+ this->serv.layer.id = 0;
+ this->serv.layer.receive = cfctrl_recv;
+ sprintf(this->serv.layer.name, "ctrl");
+ this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
+ cfglu_init_lock(this->loop_linkid_lock);
+ this->loop_linkid = 1;
+ return &this->serv.layer;
+}
+
+bool param_eq(struct cfctrl_link_param *p1, struct cfctrl_link_param *p2)
+{
+ bool eq =
+ p1->linktype == p2->linktype &&
+ p1->priority == p2->priority &&
+ p1->phyid == p2->phyid &&
+ p1->endpoint == p2->endpoint && p1->chtype == p2->chtype;
+
+ if (!eq)
+ return false;
+
+ switch (p1->linktype) {
+ case CFCTRL_SRV_VEI:
+ return true;
+ case CFCTRL_SRV_DATAGRAM:
+ return p1->u.datagram.connid == p2->u.datagram.connid;
+ case CFCTRL_SRV_RFM:
+ return
+ p1->u.rfm.connid == p2->u.rfm.connid &&
+ strcmp(p1->u.rfm.volume, p2->u.rfm.volume) == 0;
+ case CFCTRL_SRV_UTIL:
+ return
+ p1->u.utility.fifosize_kb == p2->u.utility.fifosize_kb
+ && p1->u.utility.fifosize_bufs ==
+ p2->u.utility.fifosize_bufs
+ && strcmp(p1->u.utility.name, p2->u.utility.name) == 0
+ && p1->u.utility.paramlen == p2->u.utility.paramlen
+ && memcmp(p1->u.utility.params, p2->u.utility.params,
+ p1->u.utility.paramlen) == 0;
+
+ case CFCTRL_SRV_VIDEO:
+ return p1->u.video.connid == p2->u.video.connid;
+ case CFCTRL_SRV_DBG:
+ return true;
+ case CFCTRL_SRV_DECM:
+ return false;
+ default:
+ return false;
+ }
+ return false;
+}
+
+bool cfctrl_req_eq(struct cfctrl_request_info *r1,
+ struct cfctrl_request_info *r2)
+{
+ if (r1->cmd != r2->cmd)
+ return false;
+ if (r1->cmd == CFCTRL_CMD_LINK_SETUP)
+ return param_eq(&r1->param, &r2->param);
+ else
+ return r1->channel_id == r2->channel_id;
+}
+
+/* Insert request at the end */
+void cfctrl_insert_req(struct cfctrl *ctrl,
+ struct cfctrl_request_info *req)
+{
+ struct cfctrl_request_info *p;
+ cfglu_lock(ctrl->info_list_lock);
+ req->next = NULL;
+ cfglu_atomic_inc(ctrl->req_seq_no);
+ req->sequence_no = cfglu_atomic_read(ctrl->req_seq_no);
+ if (ctrl->first_req == NULL) {
+ ctrl->first_req = req;
+ cfglu_unlock(ctrl->info_list_lock);
+ return;
+ }
+ p = ctrl->first_req;
+ while (p->next != NULL)
+ p = p->next;
+ p->next = req;
+ cfglu_unlock(ctrl->info_list_lock);
+}
+
+static void cfctrl_insert_req2(struct cfctrl *ctrl, enum cfctrl_cmd cmd,
+ uint8 linkid, struct layer *user_layer)
+{
+ struct cfctrl_request_info *req = cfglu_alloc(sizeof(*req));
+ if (!req) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return;
+ }
+ req->client_layer = user_layer;
+ req->cmd = cmd;
+ req->channel_id = linkid;
+ cfctrl_insert_req(ctrl, req);
+}
+
+/* Compare and remove request */
+struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
+ struct cfctrl_request_info *req)
+{
+ struct cfctrl_request_info *p;
+ struct cfctrl_request_info *ret;
+
+ cfglu_lock(ctrl->info_list_lock);
+ if (ctrl->first_req == NULL) {
+ cfglu_unlock(ctrl->info_list_lock);
+ return NULL;
+ }
+
+ if (cfctrl_req_eq(req, ctrl->first_req)) {
+ ret = ctrl->first_req;
+ cfglu_atomic_set(ctrl->rsp_seq_no,
+ ctrl->first_req->sequence_no);
+ ctrl->first_req = ctrl->first_req->next;
+ cfglu_unlock(ctrl->info_list_lock);
+ return ret;
+ }
+
+ pr_warning("CAIF: %s(): Requests are not received in order/matching\n",
+ __func__);
+
+ p = ctrl->first_req;
+
+ while (p->next != NULL) {
+ if (cfctrl_req_eq(req, p->next)) {
+ ret = p->next;
+ cfglu_atomic_set(ctrl->rsp_seq_no,
+ p->next->sequence_no);
+ p = p->next;
+ cfglu_unlock(ctrl->info_list_lock);
+ return ret;
+ }
+ p = p->next;
+ }
+ cfglu_unlock(ctrl->info_list_lock);
+ return NULL;
+}
+
+/* Compare and remove old requests based on sequence no. */
+void cfctrl_prune_req(struct cfctrl *ctrl)
+{
+ struct cfctrl_request_info *p;
+ struct cfctrl_request_info *del;
+
+ cfglu_lock(ctrl->info_list_lock);
+ if (ctrl->first_req == NULL) {
+ cfglu_unlock(ctrl->info_list_lock);
+ return;
+ }
+
+ if (ctrl->first_req->sequence_no <
+ cfglu_atomic_read(ctrl->req_seq_no)) {
+ del = ctrl->first_req;
+ ctrl->first_req = ctrl->first_req->next;
+ cfglu_free(del);
+ }
+ p = ctrl->first_req;
+ while (p->next != NULL) {
+ if (p->next->sequence_no <
+ cfglu_atomic_read(ctrl->rsp_seq_no)) {
+ del = p->next;
+ p = p->next;
+ cfglu_atomic_set(ctrl->rsp_seq_no,
+ ctrl->first_req->sequence_no);
+ cfglu_free(del);
+ }
+ p = p->next;
+ }
+ cfglu_unlock(ctrl->info_list_lock);
+}
+
+struct cfctrl_rsp *cfctrl_get_respfuncs(struct layer *layer)
+{
+ struct cfctrl *this = container_obj(layer);
+ return &this->res;
+}
+
+void cfctrl_set_dnlayer(struct layer *this, struct layer *dn)
+{
+ this->dn = dn;
+}
+
+void cfctrl_set_uplayer(struct layer *this, struct layer *up)
+{
+ this->up = up;
+}
+
+void init_info(struct payload_info *info, struct cfctrl *cfctrl)
+{
+ info->hdr_len = 0;
+ info->channel_id = cfctrl->serv.layer.id;
+ info->dev_info = &cfctrl->serv.dev_info;
+}
+
+void cfctrl_enum_req(struct layer *layer, uint8 physlinkid)
+{
+ struct cfctrl *cfctrl = container_obj(layer);
+ int ret;
+ struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
+ if (!pkt) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return;
+ }
+ caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
+ init_info(cfpkt_info(pkt), cfctrl);
+ cfpkt_info(pkt)->dev_info->id = physlinkid;
+ cfctrl->serv.dev_info.id = physlinkid;
+ cfpkt_addbdy(pkt, CFCTRL_CMD_ENUM);
+ cfpkt_addbdy(pkt, physlinkid);
+ ret =
+ cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
+ if (ret < 0) {
+ pr_err("CAIF: %s(): Could not transmit enum message\n",
+ __func__);
+ cfpkt_destroy(pkt);
+ }
+}
+
+void cfctrl_linkup_request(struct layer *layer, struct cfctrl_link_param *param,
+ struct layer *user_layer)
+{
+ struct cfctrl *cfctrl = container_obj(layer);
+ uint32 tmp32;
+ uint16 tmp16;
+ uint8 tmp8;
+ struct cfctrl_request_info *req;
+ int ret;
+ char utility_name[16];
+ struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
+ if (!pkt) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return;
+ }
+ cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_SETUP);
+ cfpkt_addbdy(pkt, (param->chtype << 4) + param->linktype);
+ cfpkt_addbdy(pkt, (param->priority << 3) + param->phyid);
+ cfpkt_addbdy(pkt, param->endpoint & 0x03);
+ switch (param->linktype) {
+ case CFCTRL_SRV_VEI:
+ break;
+ case CFCTRL_SRV_VIDEO:
+ cfpkt_addbdy(pkt, (uint8) param->u.video.connid);
+ break;
+ case CFCTRL_SRV_DBG:
+ break;
+ case CFCTRL_SRV_DATAGRAM:
+ tmp32 = cfglu_cpu_to_le32(param->u.datagram.connid);
+ cfpkt_add_body(pkt, &tmp32, 4);
+ break;
+ case CFCTRL_SRV_RFM:
+ /* Construct a frame, convert DatagramConnectionID to network
+ * format long and copy it out...
+ */
+ tmp32 = cfglu_cpu_to_le32(param->u.rfm.connid);
+ cfpkt_add_body(pkt, &tmp32, 4);
+ /* Add volume name, including zero termination... */
+ cfpkt_add_body(pkt, param->u.rfm.volume,
+ strlen(param->u.rfm.volume) + 1);
+ break;
+ case CFCTRL_SRV_UTIL:
+ tmp16 = cfglu_cpu_to_le16(param->u.utility.fifosize_kb);
+ cfpkt_add_body(pkt, &tmp16, 2);
+ tmp16 = cfglu_cpu_to_le16(param->u.utility.fifosize_bufs);
+ cfpkt_add_body(pkt, &tmp16, 2);
+ memset(utility_name, 0, sizeof(utility_name));
+ strncpy(utility_name, param->u.utility.name,
+ UTILITY_NAME_LENGTH - 1);
+ cfpkt_add_body(pkt, utility_name, UTILITY_NAME_LENGTH);
+ tmp8 = param->u.utility.paramlen;
+ cfpkt_add_body(pkt, &tmp8, 1);
+ cfpkt_add_body(pkt, param->u.utility.params,
+ param->u.utility.paramlen);
+ break;
+ default:
+ pr_warning("CAIF: %s():Request setup of bad link type = %d\n",
+ __func__, param->linktype);
+ }
+ req = cfglu_alloc(sizeof(*req));
+ if (!req) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return;
+ }
+ memset(req, 0, sizeof(*req));
+ req->client_layer = user_layer;
+ req->cmd = CFCTRL_CMD_LINK_SETUP;
+ req->param = *param;
+ cfctrl_insert_req(cfctrl, req);
+ init_info(cfpkt_info(pkt), cfctrl);
+ cfpkt_info(pkt)->dev_info->id = param->phyid;
+ ret =
+ cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
+ if (ret < 0) {
+ pr_err("CAIF: %s(): Could not transmit linksetup request\n",
+ __func__);
+ cfpkt_destroy(pkt);
+ }
+}
+
+int cfctrl_linkdown_req(struct layer *layer, uint8 channelid,
+ struct layer *client)
+{
+ int ret;
+ struct cfctrl *cfctrl = container_obj(layer);
+ struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
+ if (!pkt) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return CFGLU_ENOMEM;
+ }
+ cfctrl_insert_req2(cfctrl, CFCTRL_CMD_LINK_DESTROY, channelid, client);
+ cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY);
+ cfpkt_addbdy(pkt, channelid);
+ init_info(cfpkt_info(pkt), cfctrl);
+ ret =
+ cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
+ if (ret < 0) {
+ pr_err("CAIF: %s(): Could not transmit link-down request\n",
+ __func__);
+ cfpkt_destroy(pkt);
+ }
+ return ret;
+}
+
+void cfctrl_sleep_req(struct layer *layer)
+{
+ int ret;
+ struct cfctrl *cfctrl = container_obj(layer);
+ struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
+ if (!pkt) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return;
+ }
+ cfpkt_addbdy(pkt, CFCTRL_CMD_SLEEP);
+ init_info(cfpkt_info(pkt), cfctrl);
+ ret =
+ cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
+ if (ret < 0)
+ cfpkt_destroy(pkt);
+}
+
+void cfctrl_wake_req(struct layer *layer)
+{
+ int ret;
+ struct cfctrl *cfctrl = container_obj(layer);
+ struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
+ if (!pkt) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return;
+ }
+ cfpkt_addbdy(pkt, CFCTRL_CMD_WAKE);
+ init_info(cfpkt_info(pkt), cfctrl);
+ ret =
+ cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
+ if (ret < 0)
+ cfpkt_destroy(pkt);
+}
+
+void cfctrl_getstartreason_req(struct layer *layer)
+{
+ int ret;
+ struct cfctrl *cfctrl = container_obj(layer);
+ struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
+ if (!pkt) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return;
+ }
+ cfpkt_addbdy(pkt, CFCTRL_CMD_START_REASON);
+ init_info(cfpkt_info(pkt), cfctrl);
+ ret =
+ cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
+ if (ret < 0)
+ cfpkt_destroy(pkt);
+}
+
+void cfctrl_setmode_req(struct layer *layer, uint8 mode)
+{
+ int ret;
+ struct cfctrl *cfctrl = container_obj(layer);
+ struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
+ if (!pkt) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return;
+ }
+ cfpkt_addbdy(pkt, CFCTRL_CMD_RADIO_SET);
+ cfpkt_addbdy(pkt, mode);
+ init_info(cfpkt_info(pkt), cfctrl);
+ ret =
+ cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
+ if (ret < 0)
+ cfpkt_destroy(pkt);
+}
+
+static int cfctrl_recv(struct layer *layer, struct cfpkt *pkt)
+{
+ uint8 cmdrsp;
+ uint8 cmd;
+ int ret = -1;
+ uint16 tmp16;
+ uint8 len;
+ uint8 param[255];
+ uint8 linkid;
+ struct cfctrl *cfctrl = container_obj(layer);
+ struct cfctrl_request_info rsp, *req;
+
+
+ cfpkt_extr_head(pkt, &cmdrsp, 1);
+ cmd = cmdrsp & CFCTRL_CMD_MASK;
+ if (cmd != CFCTRL_CMD_LINK_ERR
+ && CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp)) {
+ if (handle_loop(cfctrl, cmd, pkt) == CAIF_FAILURE) {
+ pr_info("CAIF: %s() CAIF Protocol error:"
+ "Response bit not set\n", __func__);
+ goto error;
+ }
+ }
+
+ switch (cmd) {
+ case CFCTRL_CMD_LINK_SETUP:
+ {
+ enum cfctrl_srv serv;
+ enum cfctrl_srv servtype;
+ uint8 endpoint;
+ uint8 physlinkid;
+ uint8 prio;
+ uint8 tmp;
+ uint32 tmp32;
+ uint8 *cp;
+ int i;
+ struct cfctrl_link_param linkparam;
+ memset(&linkparam, 0, sizeof(linkparam));
+
+ cfpkt_extr_head(pkt, &tmp, 1);
+
+ serv = tmp & CFCTRL_SRV_MASK;
+ linkparam.linktype = serv;
+
+ servtype = tmp >> 4;
+ linkparam.chtype = servtype;
+
+ cfpkt_extr_head(pkt, &tmp, 1);
+ physlinkid = tmp & 0x07;
+ prio = tmp >> 3;
+
+ linkparam.priority = prio;
+ linkparam.phyid = physlinkid;
+ cfpkt_extr_head(pkt, &endpoint, 1);
+ linkparam.endpoint = endpoint & 0x03;
+
+ switch (serv) {
+ case CFCTRL_SRV_VEI:
+ case CFCTRL_SRV_DBG:
+ /* Link ID */
+ cfpkt_extr_head(pkt, &linkid, 1);
+ break;
+ case CFCTRL_SRV_VIDEO:
+ cfpkt_extr_head(pkt, &tmp, 1);
+ linkparam.u.video.connid = tmp;
+ /* Link ID */
+ cfpkt_extr_head(pkt, &linkid, 1);
+ break;
+
+ case CFCTRL_SRV_DATAGRAM:
+ cfpkt_extr_head(pkt, &tmp32, 4);
+ linkparam.u.datagram.connid =
+ cfglu_le32_to_cpu(tmp32);
+ /* Link ID */
+ cfpkt_extr_head(pkt, &linkid, 1);
+ break;
+ case CFCTRL_SRV_RFM:
+ /* Construct a frame, convert
+ * DatagramConnectionID
+ * to network format long and copy it out...
+ */
+ cfpkt_extr_head(pkt, &tmp32, 4);
+ linkparam.u.rfm.connid =
+ cfglu_le32_to_cpu(tmp32);
+ cp = (uint8 *) linkparam.u.rfm.volume;
+ for (cfpkt_extr_head(pkt, &tmp, 1);
+ cfpkt_more(pkt) && tmp != '\0';
+ cfpkt_extr_head(pkt, &tmp, 1))
+ *cp++ = tmp;
+ *cp = '\0';
+
+ /* Link ID */
+ cfpkt_extr_head(pkt, &linkid, 1);
+
+ break;
+ case CFCTRL_SRV_UTIL:
+ /* Construct a frame, convert
+ * DatagramConnectionID
+ * to network format long and copy it out...
+ */
+ /* Fifosize KB */
+ cfpkt_extr_head(pkt, &tmp16, 2);
+ linkparam.u.utility.fifosize_kb =
+ cfglu_le16_to_cpu(tmp16);
+ /* Fifosize bufs */
+ cfpkt_extr_head(pkt, &tmp16, 2);
+ linkparam.u.utility.fifosize_bufs =
+ cfglu_le16_to_cpu(tmp16);
+ /* name */
+ cp = (uint8 *) linkparam.u.utility.name;
+ caif_assert(sizeof(linkparam.u.utility.name)
+ >= UTILITY_NAME_LENGTH);
+ for (i = 0;
+ i < UTILITY_NAME_LENGTH
+ && cfpkt_more(pkt); i++) {
+ cfpkt_extr_head(pkt, &tmp, 1);
+ *cp++ = tmp;
+ }
+ /* Length */
+ cfpkt_extr_head(pkt, &len, 1);
+ linkparam.u.utility.paramlen = len;
+ /* Param Data */
+ cp = linkparam.u.utility.params;
+ while (cfpkt_more(pkt) && len--) {
+ cfpkt_extr_head(pkt, &tmp, 1);
+ *cp++ = tmp;
+ }
+ /* Link ID */
+ cfpkt_extr_head(pkt, &linkid, 1);
+ /* Length */
+ cfpkt_extr_head(pkt, &len, 1);
+ /* Param Data */
+ cfpkt_extr_head(pkt, &param, len);
+ break;
+ default:
+ pr_warning("CAIF: %s(): Request setup "
+ "- invalid link type (%d)",
+ __func__, serv);
+ goto error;
+ }
+ if (cfpkt_erroneous(pkt)) {
+ pr_err("CAIF: %s(): Packet is erroneous!",
+ __func__);
+ goto error;
+ }
+ rsp.cmd = cmd;
+ rsp.param = linkparam;
+ req = cfctrl_remove_req(cfctrl, &rsp);
+
+ if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp)) {
+ pr_err("CAIF: %s(): Invalid O/E bit "
+ "on CAIF control channel", __func__);
+ cfctrl->res.reject_rsp(cfctrl->serv.layer.up,
+ 0,
+ req ? req->client_layer
+ : NULL);
+ } else {
+ cfctrl->res.linksetup_rsp(cfctrl->serv.
+ layer.up, linkid,
+ serv, physlinkid,
+ req ? req->
+ client_layer : NULL);
+ }
+
+ if (req != NULL)
+ cfglu_free(req);
+ }
+ break;
+ case CFCTRL_CMD_LINK_DESTROY:
+ cfpkt_extr_head(pkt, &linkid, 1);
+ rsp.cmd = cmd;
+ rsp.channel_id = linkid;
+ req = cfctrl_remove_req(cfctrl, &rsp);
+ cfctrl->res.linkdestroy_rsp(cfctrl->serv.layer.up, linkid,
+ req ? req->client_layer : NULL);
+ if (req != NULL)
+ cfglu_free(req);
+ break;
+ case CFCTRL_CMD_LINK_ERR:
+ pr_err("CAIF: %s(): Frame Error Indication received \n",
+ __func__);
+ cfctrl->res.linkerror_ind();
+ break;
+ case CFCTRL_CMD_ENUM:
+ cfctrl->res.enum_rsp();
+ break;
+ case CFCTRL_CMD_SLEEP:
+ cfctrl->res.sleep_rsp();
+ break;
+ case CFCTRL_CMD_WAKE:
+ cfctrl->res.wake_rsp();
+ break;
+ case CFCTRL_CMD_LINK_RECONF:
+ cfctrl->res.restart_rsp();
+ break;
+ case CFCTRL_CMD_RADIO_SET:
+ cfctrl->res.radioset_rsp();
+ break;
+ default:
+ pr_err("CAIF: %s(): Unrecognized Control Frame\n", __func__);
+ goto error;
+ break;
+ }
+ ret = 0;
+error:
+ cfpkt_destroy(pkt);
+ return ret;
+}
+
+static void cfctrl_ctrlcmd(struct layer *layr, enum caif_ctrlcmd ctrl,
+ int phyid)
+{
+ struct cfctrl *this = container_obj(layr);
+ switch (ctrl) {
+ case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND:
+ case CAIF_CTRLCMD_FLOW_OFF_IND:
+ cfglu_lock(this->info_list_lock);
+ if (this->first_req != NULL) {
+ pr_warning("CAIF: %s(): Received flow off in "
+ "control layer", __func__);
+ }
+ cfglu_unlock(this->info_list_lock);
+ break;
+ default:
+ break;
+ }
+}
+
+#ifndef CAIF_NO_LOOP
+int handle_loop(struct cfctrl *ctrl, int cmd, struct cfpkt *pkt)
+{
+ uint8 linkid, linktype, tmp;
+ switch (cmd) {
+ case CFCTRL_CMD_LINK_SETUP:
+ cfglu_lock(ctrl->loop_linkid_lock);
+ for (linkid = 0x1; linkid < 255; linkid++) {
+ if (!ctrl->loop_linkused[linkid]) {
+ ctrl->loop_linkused[linkid] = 1;
+ break;
+ }
+ }
+ cfpkt_add_trail(pkt, &linkid, 1);
+ cfglu_unlock(ctrl->loop_linkid_lock);
+ cfpkt_peek_head(pkt, &linktype, 1);
+ if (linktype == CFCTRL_SRV_UTIL) {
+ tmp = 0x01;
+ cfpkt_add_trail(pkt, &tmp, 1);
+ cfpkt_add_trail(pkt, &tmp, 1);
+ }
+ break;
+
+ case CFCTRL_CMD_LINK_DESTROY:
+ cfglu_lock(ctrl->loop_linkid_lock);
+ cfpkt_peek_head(pkt, &linkid, 1);
+ ctrl->loop_linkused[linkid] = 0;
+ cfglu_unlock(ctrl->loop_linkid_lock);
+ break;
+ default:
+ break;
+ }
+ return CAIF_SUCCESS;
+}
+#endif
diff --git a/net/caif/generic/cfdgml.c b/net/caif/generic/cfdgml.c
new file mode 100644
index 00000000000..f9b1bed7128
--- /dev/null
+++ b/net/caif/generic/cfdgml.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2009
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <net/caif/generic/cfglue.h>
+#include <net/caif/generic/caif_layer.h>
+#include <net/caif/generic/cfsrvl.h>
+#include <net/caif/generic/cfpkt.h>
+
+#define container_obj(layr) ((struct cfsrvl *) layr)
+
+#define DGM_CMD_BIT 0x80
+#define DGM_FLOW_OFF 0x81
+#define DGM_FLOW_ON 0x80
+#define DGM_CTRL_PKT_SIZE 1
+
+static int cfdgml_receive(struct layer *layr, struct cfpkt *pkt);
+static int cfdgml_transmit(struct layer *layr, struct cfpkt *pkt);
+
+struct layer *cfdgml_create(uint8 channel_id, struct dev_info *dev_info)
+{
+ struct cfsrvl *dgm = cfglu_alloc(sizeof(struct cfsrvl));
+ if (!dgm) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ caif_assert(offsetof(struct cfsrvl, layer) == 0);
+ memset(dgm, 0, sizeof(struct cfsrvl));
+ cfsrvl_init(dgm, channel_id, dev_info);
+ dgm->layer.receive = cfdgml_receive;
+ dgm->layer.transmit = cfdgml_transmit;
+ snprintf(dgm->layer.name, CAIF_LAYER_NAME_SZ - 1, "dgm%d", channel_id);
+ dgm->layer.name[CAIF_LAYER_NAME_SZ - 1] = '\0';
+ return &dgm->layer;
+}
+
+static int cfdgml_receive(struct layer *layr, struct cfpkt *pkt)
+{
+ uint8 cmd = -1;
+ uint8 dgmhdr[3];
+ int ret;
+ caif_assert(layr->up != NULL);
+ caif_assert(layr->receive != NULL);
+ caif_assert(layr->ctrlcmd != NULL);
+
+ if (cfpkt_extr_head(pkt, &cmd, 1) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ cfpkt_destroy(pkt);
+ return CFGLU_EPROTO;
+ }
+
+ if ((cmd & DGM_CMD_BIT) == 0) {
+ if (cfpkt_extr_head(pkt, &dgmhdr, 3) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ cfpkt_destroy(pkt);
+ return CFGLU_EPROTO;
+ }
+ ret = layr->up->receive(layr->up, pkt);
+ return ret;
+ }
+
+ switch (cmd) {
+ case DGM_FLOW_OFF: /* FLOW OFF */
+ layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_OFF_IND, 0);
+ cfpkt_destroy(pkt);
+ return 0;
+ case DGM_FLOW_ON: /* FLOW ON */
+ layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_ON_IND, 0);
+ cfpkt_destroy(pkt);
+ return 0;
+ default:
+ cfpkt_destroy(pkt);
+ pr_info("CAIF: %s(): Unknown datagram control %d (0x%x)\n",
+ __func__, cmd, cmd);
+ return CFGLU_EPROTO;
+ }
+}
+
+static int cfdgml_transmit(struct layer *layr, struct cfpkt *pkt)
+{
+ uint32 zero = 0;
+ struct payload_info *info;
+ struct cfsrvl *service = container_obj(layr);
+ int ret;
+ if (!cfsrvl_ready(service, &ret))
+ return ret;
+
+ cfpkt_add_head(pkt, &zero, 4);
+
+ /* Add info for MUX-layer to route the packet out. */
+ info = cfpkt_info(pkt);
+ info->channel_id = service->layer.id;
+ /* To optimize alignment, we add up the size of CAIF header
+ * before payload.
+ */
+ info->hdr_len = 4;
+ info->dev_info = &service->dev_info;
+ ret = layr->dn->transmit(layr->dn, pkt);
+ if (ret < 0) {
+ uint32 tmp32;
+ cfpkt_extr_head(pkt, &tmp32, 4);
+ }
+ return ret;
+}
diff --git a/net/caif/generic/cffrml.c b/net/caif/generic/cffrml.c
new file mode 100644
index 00000000000..25c78a9dd77
--- /dev/null
+++ b/net/caif/generic/cffrml.c
@@ -0,0 +1,148 @@
+/*
+ * CAIF Framing Layer.
+ *
+ * Copyright (C) ST-Ericsson AB 2009
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#define container_obj(layr) cfglu_container_of(layr, struct cffrml, layer)
+
+#include <net/caif/generic/cfglue.h>
+#include <net/caif/generic/caif_layer.h>
+#include <net/caif/generic/cfpkt.h>
+#include <net/caif/generic/cffrml.h>
+
+struct cffrml {
+ struct layer layer;
+ bool dofcs; /* !< FCS active */
+};
+
+static int cffrml_receive(struct layer *layr, struct cfpkt *pkt);
+static int cffrml_transmit(struct layer *layr, struct cfpkt *pkt);
+static void cffrml_ctrlcmd(struct layer *layr, enum caif_ctrlcmd ctrl,
+ int phyid);
+
+uint32 cffrml_rcv_error;
+uint32 cffrml_rcv_checsum_error;
+struct layer *cffrml_create(uint16 phyid, bool use_fcs)
+{
+ struct cffrml *this = cfglu_alloc(sizeof(struct cffrml));
+ if (!this) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ caif_assert(offsetof(struct cffrml, layer) == 0);
+
+ memset(this, 0, sizeof(struct layer));
+ this->layer.receive = cffrml_receive;
+ this->layer.transmit = cffrml_transmit;
+ this->layer.ctrlcmd = cffrml_ctrlcmd;
+ snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "frm%d", phyid);
+ this->dofcs = use_fcs;
+ this->layer.id = phyid;
+ return (struct layer *) this;
+}
+
+void cffrml_set_uplayer(struct layer *this, struct layer *up)
+{
+ this->up = up;
+}
+
+void cffrml_set_dnlayer(struct layer *this, struct layer *dn)
+{
+ this->dn = dn;
+}
+
+static uint16 cffrml_checksum(uint16 chks, void *buf, uint16 len)
+{
+ /* FIXME: FCS should be moved to glue in order to use OS-Specific
+ * solutions
+ */
+ return fcs16(chks, buf, len);
+}
+
+static int cffrml_receive(struct layer *layr, struct cfpkt *pkt)
+{
+ uint16 tmp;
+ uint16 len;
+ uint16 hdrchks;
+ uint16 pktchks;
+ struct cffrml *this;
+ this = container_obj(layr);
+
+ cfpkt_extr_head(pkt, &tmp, 2);
+ len = cfglu_le16_to_cpu(tmp);
+
+ /* Subtract for FCS on length if FCS is not used. */
+ if (!this->dofcs)
+ len -= 2;
+
+ if (cfpkt_setlen(pkt, len) < 0) {
+ ++cffrml_rcv_error;
+ pr_err("CAIF: %s():Framing length error (%d)\n", __func__, len);
+ cfpkt_destroy(pkt);
+ return CFGLU_EPKT;
+ }
+ /*
+ * Don't do extract if FCS is false, rather do setlen - then we don't
+ * get a cache-miss.
+ */
+ if (this->dofcs) {
+ cfpkt_extr_trail(pkt, &tmp, 2);
+ hdrchks = cfglu_le16_to_cpu(tmp);
+ pktchks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff);
+ if (pktchks != hdrchks) {
+ cfpkt_add_trail(pkt, &tmp, 2);
+ ++cffrml_rcv_error;
+ ++cffrml_rcv_checsum_error;
+ pr_info("CAIF: %s(): Frame checksum error "
+ "(0x%x != 0x%x)\n", __func__, hdrchks, pktchks);
+ return CFGLU_EFCS;
+ }
+ }
+ if (cfpkt_erroneous(pkt)) {
+ ++cffrml_rcv_error;
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ cfpkt_destroy(pkt);
+ return CFGLU_EPKT;
+ }
+ return layr->up->receive(layr->up, pkt);
+}
+
+static int cffrml_transmit(struct layer *layr, struct cfpkt *pkt)
+{
+ int tmp;
+ uint16 chks;
+ uint16 len;
+ int ret;
+ struct cffrml *this = container_obj(layr);
+ if (this->dofcs) {
+ chks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff);
+ tmp = cfglu_cpu_to_le16(chks);
+ cfpkt_add_trail(pkt, &tmp, 2);
+ } else {
+ cfpkt_pad_trail(pkt, 2);
+ }
+ len = cfpkt_getlen(pkt);
+ tmp = cfglu_cpu_to_le16(len);
+ cfpkt_add_head(pkt, &tmp, 2);
+ cfpkt_info(pkt)->hdr_len += 2;
+ if (cfpkt_erroneous(pkt)) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ return CFGLU_EPROTO;
+ }
+ ret = layr->dn->transmit(layr->dn, pkt);
+ if (ret < 0) {
+ /* Remove header on faulty packet. */
+ cfpkt_extr_head(pkt, &tmp, 2);
+ }
+ return ret;
+}
+
+static void cffrml_ctrlcmd(struct layer *layr, enum caif_ctrlcmd ctrl,
+ int phyid)
+{
+ if (layr->up->ctrlcmd)
+ layr->up->ctrlcmd(layr->up, ctrl, layr->id);
+}
diff --git a/net/caif/generic/cflist.c b/net/caif/generic/cflist.c
new file mode 100644
index 00000000000..1be38d48a14
--- /dev/null
+++ b/net/caif/generic/cflist.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2009
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <net/caif/generic/cfglue.h>
+#include <net/caif/generic/cfpkt.h>
+#include <net/caif/generic/cflst.h>
+
+void cflst_init(struct layer **lst)
+{
+ *lst = NULL;
+}
+
+struct layer *cflst_remove(struct layer **lst, struct layer *elem)
+{
+ struct layer *tmp;
+ if (*lst == NULL)
+ return NULL;
+
+ tmp = (*lst);
+ (*lst) = (*lst)->next;
+ return tmp;
+}
+
+/* Adds an element from the queue. */
+void cflst_insert(struct layer **lst, struct layer *node)
+{
+ struct layer *tmp;
+ node->next = NULL;
+ if ((*lst) == NULL) {
+ (*lst) = node;
+ return;
+ }
+ tmp = *lst;
+ while (tmp->next != NULL)
+ tmp = tmp->next;
+ tmp->next = node;
+}
+
+int cflst_put(struct layer **lst, uint8 id, struct layer *node)
+{
+ if (cflst_get(lst, id) != NULL) {
+ pr_err("CAIF: %s(): cflst_put duplicate key\n", __func__);
+ return CFGLU_EINVAL;
+ }
+ node->id = id;
+ cflst_insert(lst, node);
+ return CFGLU_EOK;
+}
+
+struct layer *cflst_get(struct layer * *lst, uint8 id)
+{
+ struct layer *node;
+ for (node = (*lst); node != NULL; node = node->next) {
+ if (id == node->id)
+ return node;
+ }
+ return NULL;
+}
+
+struct layer *cflst_del(struct layer * *lst, uint8 id)
+{
+ struct layer *iter;
+ struct layer *node = NULL;
+
+ if ((*lst) == NULL)
+ return NULL;
+
+ if ((*lst)->id == id) {
+ node = (*lst);
+ (*lst) = (*lst)->next;
+ node->next = NULL;
+
+ return node;
+ }
+
+ for (iter = (*lst); iter->next != NULL; iter = iter->next) {
+ if (id == iter->next->id) {
+ node = iter->next;
+ iter->next = iter->next->next;
+ node->next = NULL;
+ return node;
+ }
+ }
+ return NULL;
+}
diff --git a/net/caif/generic/cfmuxl.c b/net/caif/generic/cfmuxl.c
new file mode 100644
index 00000000000..f68ad8ead1d
--- /dev/null
+++ b/net/caif/generic/cfmuxl.c
@@ -0,0 +1,225 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2009
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <net/caif/generic/cfglue.h>
+#include <net/caif/generic/cfpkt.h>
+#include <net/caif/generic/cflst.h>
+#include <net/caif/generic/cfmuxl.h>
+#include <net/caif/generic/cfsrvl.h>
+#include <net/caif/generic/cffrml.h>
+
+#define container_obj(layr) cfglu_container_of(layr, struct cfmuxl, layer)
+
+#define CAIF_CTRL_CHANNEL 0
+#define UP_CACHE_SIZE 8
+#define DN_CACHE_SIZE 8
+
+struct cfmuxl {
+ struct layer layer;
+ struct layer *up_cache[UP_CACHE_SIZE];
+ struct layer *dn_cache[DN_CACHE_SIZE];
+ /*
+ * Set when inserting or removing downwards layers.
+ */
+ cfglu_lock_t transmit_lock;
+
+ /*
+ * Set when inserting or removing upwards layers.
+ */
+ cfglu_lock_t receive_lock;
+
+};
+
+static int cfmuxl_receive(struct layer *layr, struct cfpkt *pkt);
+static int cfmuxl_transmit(struct layer *layr, struct cfpkt *pkt);
+static void cfmuxl_ctrlcmd(struct layer *layr, enum caif_ctrlcmd ctrl,
+ int phyid);
+static struct layer *get_up(struct cfmuxl *muxl, int id);
+
+struct layer *cfmuxl_create()
+{
+ struct cfmuxl *this = cfglu_alloc(sizeof(struct cfmuxl));
+ if (!this) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ memset(this, 0, sizeof(*this));
+ this->layer.receive = cfmuxl_receive;
+ this->layer.transmit = cfmuxl_transmit;
+ this->layer.ctrlcmd = cfmuxl_ctrlcmd;
+ cfglu_init_lock(this->transmit_lock);
+ cfglu_init_lock(this->receive_lock);
+ snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "mux");
+ return &this->layer;
+}
+
+int cfmuxl_set_uplayer(struct layer *layr, struct layer *up, uint8 linkid)
+{
+ int res;
+ struct cfmuxl *muxl = container_obj(layr);
+ cfglu_lock(muxl->receive_lock);
+ res = cflst_put(&muxl->layer.up, linkid, up);
+ cfglu_unlock(muxl->receive_lock);
+ return res;
+}
+
+bool cfmuxl_is_phy_inuse(struct layer *layr, uint8 phyid)
+{
+ struct layer *p;
+ struct cfmuxl *muxl = container_obj(layr);
+ bool match = false;
+ cfglu_lock(muxl->receive_lock);
+
+ for (p = layr->up; p != NULL; p = p->next) {
+ if (cfsrvl_phyid_match(p, phyid)) {
+ match = true;
+ break;
+ }
+ }
+ cfglu_unlock(muxl->receive_lock);
+ return match;
+}
+
+uint8 cfmuxl_get_phyid(struct layer *layr, uint8 channel_id)
+{
+ struct layer *up;
+ int phyid;
+ struct cfmuxl *muxl = container_obj(layr);
+ cfglu_lock(muxl->receive_lock);
+ up = get_up(muxl, channel_id);
+ if (up != NULL)
+ phyid = cfsrvl_getphyid(up);
+ else
+ phyid = 0;
+ cfglu_unlock(muxl->receive_lock);
+ return phyid;
+}
+
+int cfmuxl_set_dnlayer(struct layer *layr, struct layer *dn, uint8 phyid)
+{
+ int ret;
+ struct cfmuxl *muxl = (struct cfmuxl *) layr;
+ cfglu_lock(muxl->transmit_lock);
+ ret = cflst_put(&muxl->layer.dn, phyid, dn);
+ cfglu_unlock(muxl->transmit_lock);
+ return ret;
+}
+
+struct layer *cfmuxl_remove_dnlayer(struct layer *layr, uint8 phyid)
+{
+ struct cfmuxl *muxl = container_obj(layr);
+ struct layer *dn;
+ cfglu_lock(muxl->transmit_lock);
+ memset(muxl->dn_cache, 0, sizeof(muxl->dn_cache));
+ dn = cflst_del(&muxl->layer.dn, phyid);
+ caif_assert(dn != NULL);
+ cfglu_unlock(muxl->transmit_lock);
+ return dn;
+}
+
+/* Invariant: lock is taken */
+static struct layer *get_up(struct cfmuxl *muxl, int id)
+{
+ struct layer *up;
+ int idx = id % UP_CACHE_SIZE;
+ up = muxl->up_cache[idx];
+ if (up == NULL || up->id != id) {
+ up = cflst_get(&muxl->layer.up, id);
+ muxl->up_cache[idx] = up;
+ }
+ return up;
+}
+
+/* Invariant: lock is taken */
+static struct layer *get_dn(struct cfmuxl *muxl, struct dev_info *dev_info)
+{
+ struct layer *dn;
+ int idx = dev_info->id % DN_CACHE_SIZE;
+ dn = muxl->dn_cache[idx];
+ if (dn == NULL || dn->id != dev_info->id) {
+ dn = cflst_get(&muxl->layer.dn, dev_info->id);
+ muxl->dn_cache[idx] = dn;
+ }
+ return dn;
+}
+
+struct layer *cfmuxl_remove_uplayer(struct layer *layr, uint8 id)
+{
+ struct layer *up;
+ struct cfmuxl *muxl = container_obj(layr);
+ cfglu_lock(muxl->receive_lock);
+ memset(muxl->up_cache, 0, sizeof(muxl->up_cache));
+ up = cflst_del(&muxl->layer.up, id);
+ cfglu_unlock(muxl->receive_lock);
+ return up;
+}
+
+static int cfmuxl_receive(struct layer *layr, struct cfpkt *pkt)
+{
+ int ret;
+ struct cfmuxl *muxl = container_obj(layr);
+ uint8 id;
+ struct layer *up;
+ if (cfpkt_extr_head(pkt, &id, 1) < 0) {
+ pr_err("CAIF: %s(): erroneous Caif Packet\n", __func__);
+ cfpkt_destroy(pkt);
+ return CFGLU_EPKT;
+ }
+
+ up = get_up(muxl, id);
+ if (up == NULL) {
+ pr_info("CAIF: %s():Received data on unknown link ID = %d "
+ "(0x%x) up == NULL", __func__, id, id);
+ cfpkt_destroy(pkt);
+ /*
+ * Don't return ERROR, since modem misbehaves and sends out
+ * flow before linksetup response.
+ */
+ return /* CFGLU_EPROT; */ CFGLU_EOK;
+ }
+
+ ret = up->receive(up, pkt);
+
+
+ return ret;
+}
+
+static int cfmuxl_transmit(struct layer *layr, struct cfpkt *pkt)
+{
+ int ret;
+ struct cfmuxl *muxl = container_obj(layr);
+ uint8 linkid;
+ struct layer *dn;
+ struct payload_info *info = cfpkt_info(pkt);
+ dn = get_dn(muxl, cfpkt_info(pkt)->dev_info);
+ if (dn == NULL) {
+ pr_warning("CAIF: %s(): Send data on unknown phy "
+ "ID = %d (0x%x)\n",
+ __func__, info->dev_info->id, info->dev_info->id);
+ return CFGLU_ENOTCONN;
+ }
+ info->hdr_len += 1;
+ linkid = info->channel_id;
+ cfpkt_add_head(pkt, &linkid, 1);
+ ret = dn->transmit(dn, pkt);
+ if (ret < 0) {
+ /* Remove MUX protocol header upon error. */
+ cfpkt_extr_head(pkt, &linkid, 1);
+ }
+
+ return ret;
+}
+
+static void cfmuxl_ctrlcmd(struct layer *layr, enum caif_ctrlcmd ctrl,
+ int phyid)
+{
+ struct layer *p;
+ struct cfmuxl *muxl = container_obj(layr);
+ for (p = muxl->layer.up; p != NULL; p = p->next) {
+ if (cfsrvl_phyid_match(p, phyid))
+ p->ctrlcmd(p, ctrl, phyid);
+ }
+}
diff --git a/net/caif/generic/cfpkt_plain.c b/net/caif/generic/cfpkt_plain.c
new file mode 100644
index 00000000000..906df250adf
--- /dev/null
+++ b/net/caif/generic/cfpkt_plain.c
@@ -0,0 +1,553 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2009
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <net/caif/generic/cfglue.h>
+#include <net/caif/generic/cfpkt.h>
+
+/* NOTE: Excluding physical layer, max is 10 bytes. Physical layer is
+ * uncertain.
+ */
+#define PKT_PREFIX 16
+#define PKT_POSTFIX 2
+
+/* Minimum size of payload buffer */
+#define PKT_MINSIZE 128
+
+#define MAGIC_VALUE 0xbaadf00d
+/* Return closest 32bit aligned address (by adding) */
+#define ALIGN32(p) (((((uint32)p)&0x3) == 0) ?\
+ ((uint32) p) :\
+ (((uint32)p) + (0x4 - (((uint32)p)&0x3))))
+
+/* Do division by 0 on failure - CRASH */
+#define CHECK_MAGIC(pkt) \
+ caif_assert(((pkt) != NULL && \
+ ((pkt)->magic1 == MAGIC_VALUE && \
+ *(pkt)->magic2 == MAGIC_VALUE)))
+
+/* Memory Layout:
+ * Magic1
+ * | Buffer starts at start of page
+ * | | Pad until next 32bit aligned offset
+ * | | | Magic2
+ * | | | | Packet struct starts on first 32bit
+ * | | | | | aligned offset after end of packet
+ * V V V V V
+ * +---+-----------------+---+---+-----------------+
+ * |M2 | packet buffer ..|pad|M1 | struct cfpkt |
+ * +---+-----------------+---+---+-----------------+
+ * ^ ^
+ * | |
+ * | cfpkt._end_
+ * cfpkt._head_
+ */
+
+struct _payload {
+ uint32 magic2;
+ uint8 buf[1];
+};
+struct cfpkt {
+ uint32 magic1;
+ uint32 *magic2; /* This will point to location before _head_ */
+ struct payload_info info;
+ void *blob;
+ struct cfpkt *next;
+ const uint8 *_head_; /* Start of buffer, i.e. first legal
+ * pos for _data_
+ */
+ uint8 *_data_; /* Start of payload */
+ uint8 *_tail_; /* End of payload data */
+ uint8 *_end_; /* End of buffer, i.e. last legal pos
+ * for _tail_
+ */
+};
+
+#define PKT_ERROR(pkt, errmsg) do {\
+ pkt->_data_ = pkt->_tail_ = pkt->_end_ = (uint8 *)pkt->_head_;\
+ pr_err(errmsg); } while (0)
+
+struct cfpktq {
+ struct cfpkt *head;
+ cfglu_lock_t qlock;
+ int count;
+};
+
+cfglu_atomic_t cfpkt_packet_count;
+EXPORT_SYMBOL(cfpkt_packet_count);
+
+struct cfpkt *cfpkt_create_pfx(uint16 len, uint16 pfx)
+{
+ int pldlen;
+ int bloblen;
+ struct cfpkt *pkt;
+ void *blob;
+ struct _payload *pld;
+ uint8 *pktstruct;
+ void *blobend, *structend;
+ cfglu_atomic_inc(cfpkt_packet_count);
+
+ /* (1) Compute payload length */
+ pldlen = len + pfx;
+ if (pldlen < PKT_MINSIZE)
+ pldlen = PKT_MINSIZE;
+ /* Make room for Magic before & after payload */
+ pldlen += 2 * sizeof(uint32);
+ pldlen = ALIGN32(pldlen);
+
+ /* (2) Compute blob length, payload + packet struct */
+ bloblen = sizeof(struct _payload) + pldlen + sizeof(struct cfpkt);
+
+ bloblen = ALIGN32(bloblen);
+
+ /* (3) Allocate the blob */
+ blob = cfglu_alloc(bloblen);
+
+ blobend = (uint8 *) blob + bloblen;
+
+ /* Initialize payload struct */
+ pld = (struct _payload *) blob;
+ pld->magic2 = MAGIC_VALUE;
+
+ /* Initialize packet struct */
+ pktstruct = pld->buf + pldlen;
+ pktstruct = (uint8 *) ALIGN32(pktstruct);
+ structend = pktstruct + sizeof(struct cfpkt);
+ memset(pktstruct, 0, sizeof(struct cfpkt));
+ caif_assert(structend <= blobend);
+ pkt = (struct cfpkt *) pktstruct;
+ pkt->blob = blob;
+ pkt->_end_ = &pld->buf[pldlen];
+ pkt->_head_ = &pld->buf[0];
+ pkt->_data_ = (uint8 *) pkt->_head_ + pfx;
+ pkt->_tail_ = pkt->_data_;
+
+ pkt->magic1 = MAGIC_VALUE;
+ pkt->magic2 = (uint32 *) &pld->buf[pldlen];
+ *pkt->magic2 = MAGIC_VALUE;
+ CHECK_MAGIC(pkt);
+ return pkt;
+}
+
+struct cfpkt *cfpkt_create(uint16 len)
+{
+ return cfpkt_create_pfx(len, PKT_PREFIX);
+}
+
+void cfpkt_destroy(struct cfpkt *pkt)
+{
+ CHECK_MAGIC(pkt);
+ cfglu_atomic_dec(cfpkt_packet_count);
+ caif_assert(cfglu_atomic_read(cfpkt_packet_count) >= 0);
+ cfglu_free(pkt->blob);
+}
+
+bool cfpkt_more(struct cfpkt *pkt)
+{
+ CHECK_MAGIC(pkt);
+ return pkt->_data_ < pkt->_tail_;
+}
+
+int cfpkt_extr_head(struct cfpkt *pkt, void *dta, uint16 len)
+{
+ register int i;
+ uint8 *data = dta;
+ caif_assert(data != NULL);
+ CHECK_MAGIC(pkt);
+ if (pkt->_data_ + len > pkt->_tail_) {
+ PKT_ERROR(pkt,
+ "cfpkt_extr_head would read beyond end of packet\n");
+ return CFGLU_EPKT;
+ }
+ for (i = 0; i < len; i++) {
+ data[i] = *pkt->_data_;
+ pkt->_data_++;
+ }
+ CHECK_MAGIC(pkt);
+ return CFGLU_EOK;
+}
+
+int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, uint16 len)
+{
+ int i;
+ uint8 *data = dta;
+ caif_assert(data != NULL);
+ CHECK_MAGIC(pkt);
+ if (pkt->_data_ + len > pkt->_tail_) {
+ PKT_ERROR(pkt,
+ "cfpkt_extr_trail would read beyond start of packet\n");
+ return CFGLU_EPKT;
+ }
+ data += len;
+ for (i = 0; i < len; i++) {
+ data--;
+ pkt->_tail_--;
+ *data = *pkt->_tail_;
+ }
+ CHECK_MAGIC(pkt);
+ return CFGLU_EOK;
+}
+
+char *cfpkt_log_pkt(struct cfpkt *pkt, char *buf, int buflen)
+{
+ char *p = buf;
+ int i;
+ sprintf(buf, " pkt:%p len:%d {%d,%d} data: [",
+ (void *) pkt,
+ pkt->_tail_ - pkt->_data_,
+ pkt->_data_ - pkt->_head_, pkt->_tail_ - pkt->_head_);
+ p = buf + strlen(buf);
+
+ for (i = 0; i < pkt->_tail_ - pkt->_data_; i++) {
+ if (p > buf + buflen - 10) {
+ sprintf(p, "...");
+ p = buf + strlen(buf);
+ break;
+ }
+ sprintf(p, "%02x,", pkt->_data_[i]);
+ p = buf + strlen(buf);
+ }
+ sprintf(p, "]");
+ return buf;
+}
+
+int cfpkt_add_body(struct cfpkt *pkt, const void *dta, uint16 len)
+{
+ register int i;
+ const uint8 *data = dta;
+ caif_assert(data != NULL);
+ CHECK_MAGIC(pkt);
+ if (pkt->_tail_ + len > pkt->_end_) {
+ PKT_ERROR(pkt,
+ "cfpkt_add_body would write beyond end of packet\n");
+ return CFGLU_EPKT;
+ }
+
+ for (i = 0; i < len; i++) {
+ *pkt->_tail_ = data[i];
+ pkt->_tail_++;
+ }
+ CHECK_MAGIC(pkt);
+ return CFGLU_EOK;
+}
+
+int cfpkt_addbdy(struct cfpkt *pkt, uint8 data)
+{
+ CHECK_MAGIC(pkt);
+ return cfpkt_add_body(pkt, &data, 1);
+}
+
+int cfpkt_add_head(struct cfpkt *pkt, const void *dta, uint16 len)
+{
+ register int i;
+ const uint8 *data = dta;
+ caif_assert(data != NULL);
+ CHECK_MAGIC(pkt);
+ if (pkt->_data_ - len < pkt->_head_) {
+ PKT_ERROR(pkt, "cfpkt_add_head: write beyond start of packet\n");
+ return CFGLU_EPKT;
+ }
+ for (i = len - 1; i >= 0; i--) {
+ --pkt->_data_;
+ *pkt->_data_ = data[i];
+ }
+ CHECK_MAGIC(pkt);
+ return CFGLU_EOK;
+}
+
+int cfpkt_add_trail(struct cfpkt *pkt, const void *data, uint16 len)
+{
+ CHECK_MAGIC(pkt);
+ caif_assert(data != NULL);
+ return cfpkt_add_body(pkt, data, len);
+
+}
+
+uint16 cfpkt_iterate(struct cfpkt *pkt,
+ uint16 (*func)(uint16 chks, void *buf, uint16 len),
+ uint16 data)
+{
+ return func(data, pkt->_data_, cfpkt_getlen(pkt));
+}
+
+int cfpkt_setlen(struct cfpkt *pkt, uint16 len)
+{
+ CHECK_MAGIC(pkt);
+ if (pkt->_data_ + len > pkt->_end_) {
+ PKT_ERROR(pkt, "cfpkt_setlen: Erroneous packet\n");
+ return CFGLU_EPKT;
+ }
+ pkt->_tail_ = pkt->_data_ + len;
+ return cfpkt_getlen(pkt);
+}
+
+uint16 cfpkt_getlen(struct cfpkt *pkt)
+{
+ CHECK_MAGIC(pkt);
+ return pkt->_tail_ - pkt->_data_;
+}
+
+void cfpkt_extract(struct cfpkt *cfpkt, void *buf, unsigned int buflen,
+ unsigned int *actual_len)
+{
+ uint16 pklen = cfpkt_getlen(cfpkt);
+ caif_assert(buf != NULL);
+ caif_assert(actual_len != NULL);
+ CHECK_MAGIC(cfpkt);
+ if (buflen < pklen)
+ pklen = buflen;
+ *actual_len = pklen;
+ cfpkt_extr_head(cfpkt, buf, pklen);
+ CHECK_MAGIC(cfpkt);
+}
+
+struct cfpkt *cfpkt_create_uplink(const unsigned char *data, unsigned int len)
+{
+ struct cfpkt *pkt = cfpkt_create_pfx(len, PKT_PREFIX);
+ if (data != NULL)
+ cfpkt_add_body(pkt, data, len);
+ CHECK_MAGIC(pkt);
+ return pkt;
+}
+
+struct cfpkt *cfpkt_append(struct cfpkt *dstpkt, struct cfpkt *addpkt,
+ uint16 expectlen)
+{
+ uint16 addpktlen = addpkt->_tail_ - addpkt->_data_;
+ uint16 neededtailspace;
+ CHECK_MAGIC(dstpkt);
+ CHECK_MAGIC(addpkt);
+ if (expectlen > addpktlen)
+ neededtailspace = expectlen;
+ else
+ neededtailspace = addpktlen;
+ if (dstpkt->_tail_ + neededtailspace > dstpkt->_end_) {
+ struct cfpkt *tmppkt;
+ uint16 dstlen;
+ uint16 createlen;
+ dstlen = dstpkt->_tail_ - dstpkt->_data_;
+ createlen = dstlen + addpktlen;
+ if (expectlen > createlen)
+ createlen = expectlen;
+ tmppkt = cfpkt_create(createlen + PKT_PREFIX + PKT_POSTFIX);
+ tmppkt->_tail_ = tmppkt->_data_ + dstlen;
+ memcpy(tmppkt->_data_, dstpkt->_data_, dstlen);
+ cfpkt_destroy(dstpkt);
+ dstpkt = tmppkt;
+ }
+ memcpy(dstpkt->_tail_, addpkt->_data_,
+ addpkt->_tail_ - addpkt->_data_);
+ cfpkt_destroy(addpkt);
+ dstpkt->_tail_ += addpktlen;
+ CHECK_MAGIC(dstpkt);
+ return dstpkt;
+}
+
+struct cfpkt *cfpkt_split(struct cfpkt *pkt, uint16 pos)
+{
+ struct cfpkt *half; /* FIXME: Rename half to pkt2 */
+ uint8 *split = pkt->_data_ + pos;
+ uint16 len2nd = pkt->_tail_ - split;
+
+
+ CHECK_MAGIC(pkt);
+ if (pkt->_data_ + pos > pkt->_tail_) {
+ PKT_ERROR(pkt,
+ "cfpkt_split: trying to split beyond end of packet\n");
+ return NULL;
+ }
+
+ /* Create a new packet for the second part of the data */
+ half = cfpkt_create_pfx(len2nd + PKT_PREFIX + PKT_POSTFIX, PKT_PREFIX);
+
+
+ if (half == NULL)
+ return NULL;
+
+ /* Reduce the length of the original packet */
+ pkt->_tail_ = split;
+
+ memcpy(half->_data_, split, len2nd);
+ half->_tail_ += len2nd;
+ CHECK_MAGIC(pkt);
+ return half;
+}
+
+bool cfpkt_erroneous(struct cfpkt *pkt)
+{
+ /* Errors are marked by setting _end_ equal to _head_ (zero sized
+ * packet)
+ */
+ return pkt->_end_ == (uint8 *) pkt->_head_;
+}
+
+int cfpkt_pad_trail(struct cfpkt *pkt, uint16 len)
+{
+ CHECK_MAGIC(pkt);
+ if (pkt->_tail_ + len > pkt->_end_) {
+ PKT_ERROR(pkt, "cfpkt_pad_trail pads beyond end of packet\n");
+ return CFGLU_EPKT;
+ }
+#if 1
+ /* We're assuming that the modem doesn't require zero-padding */
+ pkt->_tail_ += len;
+#else
+ while (len--)
+ *pkt->_tail_++ = 0;
+#endif
+
+ CHECK_MAGIC(pkt);
+ return CFGLU_EOK;
+}
+
+int cfpkt_peek_head(struct cfpkt *const pkt, void *dta, uint16 len)
+{
+ register int i;
+ uint8 *data = (uint8 *) dta;
+ CHECK_MAGIC(pkt);
+ if (pkt->_data_ + len > pkt->_tail_) {
+ PKT_ERROR(pkt,
+ "cfpkt_peek_head would read beyond end of packet\n");
+ return CFGLU_EPKT;
+ }
+ for (i = 0; i < len; i++)
+ data[i] = pkt->_data_[i];
+ CHECK_MAGIC(pkt);
+ return CFGLU_EOK;
+
+}
+
+int cfpkt_raw_append(struct cfpkt *cfpkt, void **buf, unsigned int buflen)
+{
+ caif_assert(buf != NULL);
+ if (cfpkt->_tail_ + buflen > cfpkt->_end_) {
+ PKT_ERROR(cfpkt,
+ "cfpkt_raw_append would append beyond end of packet\n");
+ return CFGLU_EPKT;
+ }
+
+ *buf = cfpkt->_tail_;
+ cfpkt->_tail_ += buflen;
+ return CFGLU_EOK;
+}
+
+int cfpkt_raw_extract(struct cfpkt *cfpkt, void **buf, unsigned int buflen)
+{
+ caif_assert(buf != NULL);
+ if (cfpkt->_data_ + buflen > cfpkt->_tail_) {
+ PKT_ERROR(cfpkt,
+ "cfpkt_raw_extact would read beyond end of packet\n");
+ return CFGLU_EPKT;
+ }
+
+ *buf = cfpkt->_data_;
+ cfpkt->_data_ += buflen;
+ return CFGLU_EOK;
+}
+
+struct cfpktq *cfpktq_create()
+{
+ struct cfpktq *q = (struct cfpktq *) cfglu_alloc(sizeof(struct cfpktq));
+ cfglu_init_lock(q->qlock);
+ q->head = NULL;
+ q->count = 0;
+ return q;
+}
+
+void cfpkt_queue(struct cfpktq *pktq, struct cfpkt *pkt, unsigned short prio)
+{
+ CHECK_MAGIC(pkt);
+ cfglu_lock(pktq->qlock);
+ pkt->next = NULL;
+ if (pktq->head == NULL)
+ pktq->head = pkt;
+ else {
+ /* NOTE: Consider having a tail pointer in order to improve
+ * performance
+ */
+ struct cfpkt *p = pktq->head;
+ while (p->next != NULL) {
+ CHECK_MAGIC(p);
+ p = p->next;
+ }
+ p->next = pkt;
+ }
+ pktq->count++;
+ cfglu_unlock(pktq->qlock);
+}
+
+struct cfpkt *cfpkt_qpeek(struct cfpktq *pktq)
+{
+ struct cfpkt *pkt;
+
+ cfglu_lock(pktq->qlock);
+ if (pktq->head != NULL) {
+ /* NOTE: Sync is only needed due to this CHECK_MAGIC... */
+ CHECK_MAGIC(pktq->head);
+ }
+ pkt = pktq->head;
+ cfglu_unlock(pktq->qlock);
+ return pkt;
+}
+
+struct cfpkt *cfpkt_dequeue(struct cfpktq *pktq)
+{
+ struct cfpkt *ret;
+ cfglu_lock(pktq->qlock);
+ if (pktq->head == NULL) {
+ cfglu_unlock(pktq->qlock);
+ return NULL;
+ }
+ ret = pktq->head;
+ pktq->head = pktq->head->next;
+ CHECK_MAGIC(ret);
+ pktq->count--;
+ caif_assert(pktq->count >= 0);
+ cfglu_unlock(pktq->qlock);
+ return ret;
+}
+
+int cfpkt_qcount(struct cfpktq *pktq)
+{
+ int count;
+
+ cfglu_lock(pktq->qlock);
+ count = pktq->count;
+ cfglu_unlock(pktq->qlock);
+
+ return count;
+}
+
+inline struct cfpkt *cfpkt_clone_release(struct cfpkt *pkt)
+{
+ /* Plain packets have no ownership. */
+ return pkt;
+}
+#if 0
+struct caif_packet_funcs cfpkt_get_packet_funcs()
+{
+ struct caif_packet_funcs f;
+ memset(&f, 0, sizeof(f));
+ f.cfpkt_destroy = cfpkt_destroy;
+ f.cfpkt_extract = cfpkt_extract;
+ f.cfpkt_create_xmit_pkt = cfpkt_create_uplink; /* FIXME: Decide upon
+ which create
+ functions to export
+ */
+ f.cfpkt_create_recv_pkt = cfpkt_create_uplink;
+ f.cfpkt_raw_append = cfpkt_raw_append;
+ f.cfpkt_raw_extract = cfpkt_raw_extract;
+ f.cfpktq_create = cfpktq_create;
+ f.cfpkt_queue = cfpkt_queue;
+ f.cfpkt_qpeek = cfpkt_qpeek;
+ f.cfpkt_dequeue = cfpkt_dequeue;
+ f.cfpkt_getlen = cfpkt_getlen;
+ return f;
+}
+#endif
+struct payload_info *cfpkt_info(struct cfpkt *pkt)
+{
+ return &pkt->info;
+}
diff --git a/net/caif/generic/cfpkt_skbuff.c b/net/caif/generic/cfpkt_skbuff.c
new file mode 100644
index 00000000000..32f23ca02fb
--- /dev/null
+++ b/net/caif/generic/cfpkt_skbuff.c
@@ -0,0 +1,596 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2009
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <linux/hardirq.h>
+#include <net/caif/generic/cfglue.h>
+#include <net/caif/generic/cfpkt.h>
+
+#define PKT_PREFIX CAIF_NEEDED_HEADROOM
+#define PKT_POSTFIX CAIF_NEEDED_TAILROOM
+#define PKT_LEN_WHEN_EXTENDING 128
+#define PKT_ERROR(pkt, errmsg) do { \
+ cfpkt_priv(pkt)->erronous = true; \
+ skb_reset_tail_pointer(&pkt->skb); \
+ pr_warning("CAIF: " errmsg);\
+ } while (0)
+
+struct cfpktq {
+ struct sk_buff_head head;
+ cfglu_atomic_t count;
+ spinlock_t lock;
+};
+
+/*
+ * net/caif/generic/ is generic and does not
+ * understand SKB, so we do this typecast
+ */
+struct cfpkt {
+ struct sk_buff skb;
+};
+
+/* Private data inside SKB */
+struct cfpkt_priv_data {
+ struct dev_info dev_info;
+ bool erronous;
+};
+
+inline struct cfpkt_priv_data *cfpkt_priv(struct cfpkt *pkt)
+{
+ return (struct cfpkt_priv_data *) pkt->skb.cb;
+}
+
+inline bool is_erronous(struct cfpkt *pkt)
+{
+ return cfpkt_priv(pkt)->erronous;
+}
+
+inline struct sk_buff *pkt_to_skb(struct cfpkt *pkt)
+{
+ return &pkt->skb;
+}
+
+inline struct cfpkt *skb_to_pkt(struct sk_buff *skb)
+{
+ return (struct cfpkt *) skb;
+}
+
+cfglu_atomic_t cfpkt_packet_count;
+EXPORT_SYMBOL(cfpkt_packet_count);
+
+struct cfpkt *cfpkt_fromnative(enum caif_direction dir, void *nativepkt)
+{
+ struct cfpkt *pkt = skb_to_pkt(nativepkt);
+ cfpkt_priv(pkt)->erronous = false;
+ cfglu_atomic_inc(cfpkt_packet_count);
+ return pkt;
+}
+EXPORT_SYMBOL(cfpkt_fromnative);
+
+void *cfpkt_tonative(struct cfpkt *pkt)
+{
+ return (void *) pkt;
+}
+EXPORT_SYMBOL(cfpkt_tonative);
+
+struct cfpkt *cfpkt_create_pfx(uint16 len, uint16 pfx)
+{
+ struct sk_buff *skb;
+
+ if (likely(in_interrupt()))
+ skb = alloc_skb(len + pfx, GFP_ATOMIC);
+ else
+ skb = alloc_skb(len + pfx, GFP_KERNEL);
+
+ if (unlikely(skb == NULL))
+ return NULL;
+
+ skb_reserve(skb, pfx);
+ cfglu_atomic_inc(cfpkt_packet_count);
+ return skb_to_pkt(skb);
+}
+
+inline struct cfpkt *cfpkt_create(uint16 len)
+{
+ return cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX);
+}
+EXPORT_SYMBOL(cfpkt_create);
+
+void cfpkt_destroy(struct cfpkt *pkt)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ cfglu_atomic_dec(cfpkt_packet_count);
+ caif_assert(cfglu_atomic_read(cfpkt_packet_count) >= 0);
+ kfree_skb(skb);
+}
+EXPORT_SYMBOL(cfpkt_destroy);
+
+inline bool cfpkt_more(struct cfpkt *pkt)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ return skb->len > 0;
+}
+EXPORT_SYMBOL(cfpkt_more);
+
+int cfpkt_peek_head(struct cfpkt *pkt, void *data, uint16 len)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ if (skb->tail - skb->data >= len) {
+ memcpy(data, skb->data, len);
+ return CFGLU_EOK;
+ }
+ return !cfpkt_extr_head(pkt, data, len) &&
+ !cfpkt_add_head(pkt, data, len);
+}
+EXPORT_SYMBOL(cfpkt_peek_head);
+
+int cfpkt_extr_head(struct cfpkt *pkt, void *data, uint16 len)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ uint8 *from;
+ if (unlikely(is_erronous(pkt)))
+ return CFGLU_EPKT;
+
+ if (unlikely(len > skb->len)) {
+ PKT_ERROR(pkt, "cfpkt_extr_head read beyond end of packet\n");
+ return CFGLU_EPKT;
+ }
+
+ if (unlikely(len > skb_headlen(skb))) {
+ if (unlikely(skb_linearize(skb) != 0)) {
+ PKT_ERROR(pkt, "cfpkt_extr_head linearize failed\n");
+ return CFGLU_EPKT;
+ }
+ }
+ from = skb_pull(skb, len);
+ from -= len;
+ memcpy(data, from, len);
+ return CFGLU_EOK;
+}
+EXPORT_SYMBOL(cfpkt_extr_head);
+
+int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, uint16 len)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ uint8 *data = dta;
+ uint8 *from;
+ if (unlikely(is_erronous(pkt)))
+ return CFGLU_EPKT;
+
+ if (unlikely(skb_linearize(skb) != 0)) {
+ PKT_ERROR(pkt, "cfpkt_extr_trail linearize failed\n");
+ return CFGLU_EPKT;
+ }
+ if (unlikely(skb->data + len > skb_tail_pointer(skb))) {
+ PKT_ERROR(pkt, "cfpkt_extr_trail read beyond end of packet\n");
+ return CFGLU_EPKT;
+ }
+ from = skb_tail_pointer(skb) - len;
+ skb_trim(skb, skb->len - len);
+ memcpy(data, from, len);
+ return CFGLU_EOK;
+}
+EXPORT_SYMBOL(cfpkt_extr_trail);
+
+int cfpkt_pad_trail(struct cfpkt *pkt, uint16 len)
+{
+ return cfpkt_add_body(pkt, NULL, len);
+}
+EXPORT_SYMBOL(cfpkt_pad_trail);
+
+int cfpkt_add_body(struct cfpkt *pkt, const void *data, uint16 len)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ struct sk_buff *lastskb;
+ uint8 *to;
+ uint16 addlen = 0;
+
+
+ if (unlikely(is_erronous(pkt)))
+ return CFGLU_EPKT;
+
+ lastskb = skb;
+
+ /* Check whether we need to add space at the tail */
+ if (unlikely(skb_tailroom(skb) < len)) {
+ if (likely(len < PKT_LEN_WHEN_EXTENDING))
+ addlen = PKT_LEN_WHEN_EXTENDING;
+ else
+ addlen = len;
+ }
+
+ /* Check whether we need to change the SKB before writing to the tail */
+ if (unlikely((addlen > 0) || skb_cloned(skb) || skb_shared(skb))) {
+
+ /* Make sure data is writable */
+ if (unlikely(skb_cow_data(skb, addlen, &lastskb) < 0)) {
+ PKT_ERROR(pkt, "cfpkt_add_body: cow failed\n");
+ return CFGLU_EPKT;
+ }
+ /*
+ * Is the SKB non-linear after skb_cow_data()? If so, we are
+ * going to add data to the last SKB, so we need to adjust
+ * lengths of the top SKB.
+ */
+ if (lastskb != skb) {
+ pr_warning("CAIF: %s(): Packet is non-linear\n",
+ __func__);
+ skb->len += len;
+ skb->data_len += len;
+ }
+ }
+
+ /* All set to put the last SKB and optionally write data there. */
+ to = skb_put(lastskb, len);
+ if (likely(data))
+ memcpy(to, data, len);
+ return CFGLU_EOK;
+}
+EXPORT_SYMBOL(cfpkt_add_body);
+
+inline int cfpkt_addbdy(struct cfpkt *pkt, uint8 data)
+{
+ return cfpkt_add_body(pkt, &data, 1);
+}
+EXPORT_SYMBOL(cfpkt_addbdy);
+
+int cfpkt_add_head(struct cfpkt *pkt, const void *data2, uint16 len)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ struct sk_buff *lastskb;
+ uint8 *to;
+ const uint8 *data = data2;
+ if (unlikely(is_erronous(pkt)))
+ return CFGLU_EPKT;
+ if (unlikely(skb_headroom(skb) < len)) {
+ PKT_ERROR(pkt, "cfpkt_add_head: no headroom\n");
+ return CFGLU_EPKT;
+ }
+
+ /* Make sure data is writable */
+ if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) {
+ PKT_ERROR(pkt, "cfpkt_add_head: cow failed\n");
+ return CFGLU_EPKT;
+ }
+
+ to = skb_push(skb, len);
+ memcpy(to, data, len);
+ return CFGLU_EOK;
+}
+EXPORT_SYMBOL(cfpkt_add_head);
+
+inline int cfpkt_add_trail(struct cfpkt *pkt, const void *data, uint16 len)
+{
+ return cfpkt_add_body(pkt, data, len);
+}
+EXPORT_SYMBOL(cfpkt_add_trail);
+
+inline uint16 cfpkt_getlen(struct cfpkt *pkt)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ return skb->len;
+}
+EXPORT_SYMBOL(cfpkt_getlen);
+
+inline uint16 cfpkt_iterate(struct cfpkt *pkt,
+ uint16 (*iter_func)(uint16, void *, uint16),
+ uint16 data)
+{
+ /*
+ * Don't care about the performance hit of linearizing,
+ * Checksum should not be used on high-speed interfaces anyway.
+ */
+ if (unlikely(is_erronous(pkt)))
+ return CFGLU_EPKT;
+ if (unlikely(skb_linearize(&pkt->skb) != 0)) {
+ PKT_ERROR(pkt, "cfpkt_iterate: linearize failed\n");
+ return CFGLU_EPKT;
+ }
+ return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt));
+}
+EXPORT_SYMBOL(cfpkt_iterate);
+
+int cfpkt_setlen(struct cfpkt *pkt, uint16 len)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+
+
+ if (unlikely(is_erronous(pkt)))
+ return CFGLU_EPKT;
+
+ if (likely(len <= skb->len)) {
+ if (unlikely(skb->data_len))
+ ___pskb_trim(skb, len);
+ else
+ skb_trim(skb, len);
+
+ return cfpkt_getlen(pkt);
+ }
+
+ /* Need to expand SKB */
+ if (unlikely(!cfpkt_pad_trail(pkt, len - skb->len)))
+ PKT_ERROR(pkt, "cfpkt_setlen: skb_pad_trail failed\n");
+
+ return cfpkt_getlen(pkt);
+}
+EXPORT_SYMBOL(cfpkt_setlen);
+
+struct cfpkt *cfpkt_create_uplink(const unsigned char *data, unsigned int len)
+{
+ struct cfpkt *pkt = cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX);
+ if (unlikely(data != NULL))
+ cfpkt_add_body(pkt, data, len);
+ return pkt;
+}
+EXPORT_SYMBOL(cfpkt_create_uplink);
+
+
+struct cfpkt *cfpkt_append(struct cfpkt *dstpkt,
+ struct cfpkt *addpkt,
+ uint16 expectlen)
+{
+ struct sk_buff *dst = pkt_to_skb(dstpkt);
+ struct sk_buff *add = pkt_to_skb(addpkt);
+ uint16 addlen = add->tail - add->data;
+ uint16 neededtailspace;
+ struct sk_buff *tmp;
+ uint16 dstlen;
+ uint16 createlen;
+ if (unlikely(is_erronous(dstpkt) || is_erronous(addpkt))) {
+ cfpkt_destroy(addpkt);
+ return dstpkt;
+ }
+ if (expectlen > addlen)
+ neededtailspace = expectlen;
+ else
+ neededtailspace = addlen;
+
+ if (dst->tail + neededtailspace > dst->end) {
+ /* Create a dumplicate of 'dst' with more tail space */
+ dstlen = dst->tail - dst->data;
+ createlen = dstlen + neededtailspace;
+ tmp = pkt_to_skb(
+ cfpkt_create(createlen + PKT_PREFIX + PKT_POSTFIX));
+ if (!tmp)
+ return NULL;
+ tmp->tail = tmp->data + dstlen;
+ tmp->len = dstlen;
+ memcpy(tmp->data, dst->data, dstlen);
+ cfpkt_destroy(dstpkt);
+ dst = tmp;
+ }
+ memcpy(dst->tail, add->data, add->tail - add->data);
+ cfpkt_destroy(addpkt);
+ dst->tail += addlen;
+ dst->len += addlen;
+ return skb_to_pkt(dst);
+}
+EXPORT_SYMBOL(cfpkt_append);
+
+struct cfpkt *cfpkt_split(struct cfpkt *pkt, uint16 pos)
+{
+ struct sk_buff *skb2;
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ uint8 *split = skb->data + pos;
+ uint16 len2nd = skb->tail - split;
+
+ if (unlikely(is_erronous(pkt)))
+ return NULL;
+
+ if (skb->data + pos > skb->tail) {
+ PKT_ERROR(pkt,
+ "cfpkt_split: trying to split beyond end of packet");
+ return NULL;
+ }
+
+ /* Create a new packet for the second part of the data */
+ skb2 = pkt_to_skb(
+ cfpkt_create_pfx(len2nd + PKT_PREFIX + PKT_POSTFIX,
+ PKT_PREFIX));
+
+ if (skb2 == NULL)
+ return NULL;
+
+ /* Reduce the length of the original packet */
+ skb->tail = split;
+ skb->len = pos;
+
+ memcpy(skb2->data, split, len2nd);
+ skb2->tail += len2nd;
+ skb2->len += len2nd;
+ return skb_to_pkt(skb2);
+}
+EXPORT_SYMBOL(cfpkt_split);
+
+
+char *cfpkt_log_pkt(struct cfpkt *pkt, char *buf, int buflen)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ char *p = buf;
+ int i;
+
+ /*
+ * Sanity check buffer length, it needs to be at least as large as
+ * the header info: ~=50+ bytes
+ */
+ if (buflen < 50)
+ return NULL;
+
+ snprintf(buf, buflen, "%s: pkt:%p len:%d(%d+%d) {%d,%d} data: [",
+ is_erronous(pkt) ? "ERRONOUS-SKB" :
+ (skb->data_len != 0 ? "COMPLEX-SKB" : "SKB"),
+ skb,
+ skb->len,
+ skb->tail - skb->data,
+ skb->data_len,
+ skb->data - skb->head, skb->tail - skb->head);
+ p = buf + strlen(buf);
+
+ for (i = 0; i < skb->tail - skb->data && i < 300; i++) {
+ if (p > buf + buflen - 10) {
+ sprintf(p, "...");
+ p = buf + strlen(buf);
+ break;
+ }
+ sprintf(p, "%02x,", skb->data[i]);
+ p = buf + strlen(buf);
+ }
+ sprintf(p, "]\n");
+ return buf;
+}
+EXPORT_SYMBOL(cfpkt_log_pkt);
+
+int cfpkt_raw_append(struct cfpkt *pkt, void **buf, unsigned int buflen)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ struct sk_buff *lastskb;
+
+ caif_assert(buf != NULL);
+ if (unlikely(is_erronous(pkt)))
+ return CFGLU_EPKT;
+ /* Make sure SKB is writable */
+ if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) {
+ PKT_ERROR(pkt, "cfpkt_raw_append: skb_cow_data failed\n");
+ return CFGLU_EPKT;
+ }
+
+ if (unlikely(skb_linearize(skb) != 0)) {
+ PKT_ERROR(pkt, "cfpkt_raw_append: linearize failed\n");
+ return CFGLU_EPKT;
+ }
+
+ if (unlikely(skb_tailroom(skb) < buflen)) {
+ PKT_ERROR(pkt, "cfpkt_raw_append: buffer too short - failed\n");
+ return CFGLU_EPKT;
+ }
+
+ *buf = skb_put(skb, buflen);
+ return 1;
+}
+EXPORT_SYMBOL(cfpkt_raw_append);
+
+int cfpkt_raw_extract(struct cfpkt *pkt, void **buf, unsigned int buflen)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+
+ caif_assert(buf != NULL);
+ if (unlikely(is_erronous(pkt)))
+ return CFGLU_EPKT;
+
+ if (unlikely(buflen > skb->len)) {
+ PKT_ERROR(pkt, "cfpkt_raw_extract: buflen too large "
+ "- failed\n");
+ return CFGLU_EPKT;
+ }
+
+ if (unlikely(buflen > skb_headlen(skb))) {
+ if (unlikely(skb_linearize(skb) != 0)) {
+ PKT_ERROR(pkt, "cfpkt_raw_extract: linearize failed\n");
+ return CFGLU_EPKT;
+ }
+ }
+
+ *buf = skb->data;
+ skb_pull(skb, buflen);
+
+ return 1;
+}
+EXPORT_SYMBOL(cfpkt_raw_extract);
+
+inline bool cfpkt_erroneous(struct cfpkt *pkt)
+{
+ return cfpkt_priv(pkt)->erronous;
+}
+EXPORT_SYMBOL(cfpkt_erroneous);
+
+struct cfpkt *cfpkt_create_pkt(enum caif_direction dir,
+ const unsigned char *data, unsigned int len)
+{
+ struct cfpkt *pkt;
+ if (dir == CAIF_DIR_OUT)
+ pkt = cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX);
+ else
+ pkt = cfpkt_create_pfx(len, 0);
+ if (unlikely(!pkt))
+ return NULL;
+ if (unlikely(data))
+ cfpkt_add_body(pkt, data, len);
+ cfpkt_priv(pkt)->erronous = false;
+ return pkt;
+}
+EXPORT_SYMBOL(cfpkt_create_pkt);
+
+struct cfpktq *cfpktq_create()
+{
+ struct cfpktq *q = cfglu_alloc(sizeof(struct cfpktq));
+ if (!q)
+ return NULL;
+ skb_queue_head_init(&q->head);
+ cfglu_atomic_set(q->count, 0);
+ spin_lock_init(&q->lock);
+ return q;
+}
+EXPORT_SYMBOL(cfpktq_create);
+
+void cfpkt_queue(struct cfpktq *pktq, struct cfpkt *pkt, unsigned short prio)
+{
+ cfglu_atomic_inc(pktq->count);
+ spin_lock(&pktq->lock);
+ skb_queue_tail(&pktq->head, pkt_to_skb(pkt));
+ spin_unlock(&pktq->lock);
+
+}
+EXPORT_SYMBOL(cfpkt_queue);
+
+struct cfpkt *cfpkt_qpeek(struct cfpktq *pktq)
+{
+ struct cfpkt *tmp;
+ spin_lock(&pktq->lock);
+ tmp = skb_to_pkt(skb_peek(&pktq->head));
+ spin_unlock(&pktq->lock);
+ return tmp;
+}
+EXPORT_SYMBOL(cfpkt_qpeek);
+
+struct cfpkt *cfpkt_dequeue(struct cfpktq *pktq)
+{
+ struct cfpkt *pkt;
+ spin_lock(&pktq->lock);
+ pkt = skb_to_pkt(skb_dequeue(&pktq->head));
+ if (pkt) {
+ cfglu_atomic_dec(pktq->count);
+ caif_assert(cfglu_atomic_read(pktq->count) >= 0);
+ }
+ spin_unlock(&pktq->lock);
+ return pkt;
+}
+EXPORT_SYMBOL(cfpkt_dequeue);
+
+int cfpkt_qcount(struct cfpktq *pktq)
+{
+ return cfglu_atomic_read(pktq->count);
+}
+EXPORT_SYMBOL(cfpkt_qcount);
+
+struct cfpkt *cfpkt_clone_release(struct cfpkt *pkt)
+{
+ struct cfpkt *clone;
+ clone = skb_to_pkt(skb_clone(pkt_to_skb(pkt), GFP_ATOMIC));
+ /* Free original packet. */
+ cfpkt_destroy(pkt);
+ if (!clone)
+ return NULL;
+ cfglu_atomic_inc(cfpkt_packet_count);
+ return clone;
+}
+EXPORT_SYMBOL(cfpkt_clone_release);
+
+struct payload_info *cfpkt_info(struct cfpkt *pkt)
+{
+ return (struct payload_info *)&pkt_to_skb(pkt)->cb;
+}
+EXPORT_SYMBOL(cfpkt_info);
diff --git a/net/caif/generic/cfrfml.c b/net/caif/generic/cfrfml.c
new file mode 100644
index 00000000000..aed541afea9
--- /dev/null
+++ b/net/caif/generic/cfrfml.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2009
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <net/caif/generic/cfglue.h>
+#include <net/caif/generic/caif_layer.h>
+#include <net/caif/generic/cfsrvl.h>
+#include <net/caif/generic/cfpkt.h>
+
+#define container_obj(layr) cfglu_container_of(layr, struct cfsrvl, layer)
+
+#define RFM_SEGMENTATION_BIT 0x01
+#define RFM_PAYLOAD 0x00
+#define RFM_CMD_BIT 0x80
+#define RFM_FLOW_OFF 0x81
+#define RFM_FLOW_ON 0x80
+#define RFM_SET_PIN 0x82
+#define RFM_CTRL_PKT_SIZE 1
+
+static int cfrfml_receive(struct layer *layr, struct cfpkt *pkt);
+static int cfrfml_transmit(struct layer *layr, struct cfpkt *pkt);
+
+struct layer *cfrfml_create(uint8 channel_id, struct dev_info *dev_info)
+{
+ struct cfsrvl *rfm = cfglu_alloc(sizeof(struct cfsrvl));
+ if (!rfm) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ caif_assert(offsetof(struct cfsrvl, layer) == 0);
+ memset(rfm, 0, sizeof(struct cfsrvl));
+ cfsrvl_init(rfm, channel_id, dev_info);
+ rfm->layer.receive = cfrfml_receive;
+ rfm->layer.transmit = cfrfml_transmit;
+ snprintf(rfm->layer.name, CAIF_LAYER_NAME_SZ, "rfm%d", channel_id);
+ return &rfm->layer;
+}
+
+void cffrml_destroy(struct layer *layer)
+{
+ cfglu_free(layer);
+}
+
+static int cfrfml_receive(struct layer *layr, struct cfpkt *pkt)
+{
+ uint8 tmp;
+ bool segmented;
+ int ret;
+ caif_assert(layr->up != NULL);
+ caif_assert(layr->receive != NULL);
+
+ /*
+ * RFM is taking care of segmentation and stripping of
+ * segmentation bit.
+ */
+ if (cfpkt_extr_head(pkt, &tmp, 1) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ cfpkt_destroy(pkt);
+ return CFGLU_EPROTO;
+ }
+ segmented = tmp & RFM_SEGMENTATION_BIT;
+ caif_assert(!segmented);
+
+ ret = layr->up->receive(layr->up, pkt);
+ return ret;
+}
+
+static int cfrfml_transmit(struct layer *layr, struct cfpkt *pkt)
+{
+ uint8 tmp = 0;
+ int ret;
+ struct cfsrvl *service = container_obj(layr);
+
+ caif_assert(layr->dn != NULL);
+ caif_assert(layr->dn->transmit != NULL);
+
+ if (!cfsrvl_ready(service, &ret))
+ return ret;
+
+ if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
+ pr_err("CAIF: %s():Packet too large - size=%d\n",
+ __func__, cfpkt_getlen(pkt));
+ return CFGLU_EOVERFLOW;
+ }
+ if (cfpkt_add_head(pkt, &tmp, 1) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ return CFGLU_EPKT;
+ }
+
+ /* Add info for MUX-layer to route the packet out. */
+ cfpkt_info(pkt)->channel_id = service->layer.id;
+ /*
+ * To optimize alignment, we add up the size of CAIF header before
+ * payload.
+ */
+ cfpkt_info(pkt)->hdr_len = 1;
+ cfpkt_info(pkt)->dev_info = &service->dev_info;
+ ret = layr->dn->transmit(layr->dn, pkt);
+ if (ret < 0)
+ cfpkt_extr_head(pkt, &tmp, 1);
+ return ret;
+}
diff --git a/net/caif/generic/cfserl.c b/net/caif/generic/cfserl.c
new file mode 100644
index 00000000000..a8426ed9d3e
--- /dev/null
+++ b/net/caif/generic/cfserl.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2009
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <net/caif/generic/cfglue.h>
+#include <net/caif/generic/caif_layer.h>
+#include <net/caif/generic/cfpkt.h>
+
+#define container_obj(layr) ((struct cfserl *) layr)
+
+#define CFSERL_STX 0x02
+#define CAIF_MINIUM_PACKET_SIZE 4
+struct cfserl {
+ struct layer layer;
+ struct cfpkt *incomplete_frm;
+ cfglu_lock_t sync;
+ bool usestx;
+};
+#define STXLEN(layr) (layr->usestx ? 1 : 0)
+
+static int cfserl_receive(struct layer *layr, struct cfpkt *pkt);
+static int cfserl_transmit(struct layer *layr, struct cfpkt *pkt);
+static void cfserl_ctrlcmd(struct layer *layr, enum caif_ctrlcmd ctrl,
+ int phyid);
+
+struct cfserl *cfserl_create(int type, int instance, bool use_stx)
+{
+ struct cfserl *this = cfglu_alloc(sizeof(struct cfserl));
+ if (!this) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ caif_assert(offsetof(struct cfserl, layer) == 0);
+ memset(this, 0, sizeof(struct cfserl));
+ this->layer.receive = cfserl_receive;
+ this->layer.transmit = cfserl_transmit;
+ this->layer.ctrlcmd = cfserl_ctrlcmd;
+ this->layer.type = type;
+ this->usestx = use_stx;
+ cfglu_init_lock(this->sync);
+ snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "ser1");
+ return this;
+}
+
+void cfserl_set_uplayer(struct cfserl *this, struct layer *up)
+{
+ this->layer.up = up;
+}
+
+void cfserl_set_dnlayer(struct cfserl *this, struct layer *dn)
+{
+ this->layer.dn = dn;
+}
+
+static int cfserl_receive(struct layer *l, struct cfpkt *newpkt)
+{
+ struct cfserl *layr = container_obj(l);
+ uint16 pkt_len;
+ struct cfpkt *pkt = NULL;
+ struct cfpkt *tail_pkt = NULL;
+ uint8 tmp8;
+ uint16 tmp;
+ uint8 stx = CFSERL_STX;
+ int ret;
+ uint16 expectlen = 0;
+ caif_assert(newpkt != NULL);
+ cfglu_lock(layr->sync);
+
+ if (layr->incomplete_frm != NULL) {
+
+ layr->incomplete_frm =
+ cfpkt_append(layr->incomplete_frm, newpkt, expectlen);
+ pkt = layr->incomplete_frm;
+ } else {
+ pkt = newpkt;
+ }
+ layr->incomplete_frm = NULL;
+
+ do {
+ /* Search for STX at start of pkt if STX is used */
+ if (layr->usestx) {
+ cfpkt_extr_head(pkt, &tmp8, 1);
+ if (tmp8 != CFSERL_STX) {
+ while (cfpkt_more(pkt)
+ && tmp8 != CFSERL_STX) {
+ cfpkt_extr_head(pkt, &tmp8, 1);
+ }
+ if (!cfpkt_more(pkt)) {
+ cfpkt_destroy(pkt);
+ layr->incomplete_frm = NULL;
+ cfglu_unlock(layr->sync);
+ return CFGLU_EPROTO;
+ }
+ }
+ }
+
+ pkt_len = cfpkt_getlen(pkt);
+
+ /*
+ * pkt_len is the accumulated length of the packet data
+ * we have received so far.
+ * Exit if frame doesn't hold length.
+ */
+
+ if (pkt_len < 2) {
+ if (layr->usestx)
+ cfpkt_add_head(pkt, &stx, 1);
+ layr->incomplete_frm = pkt;
+ cfglu_unlock(layr->sync);
+ return 0;
+ }
+
+ /*
+ * Find length of frame.
+ * expectlen is the length we need for a full frame.
+ */
+ cfpkt_peek_head(pkt, &tmp, 2);
+ expectlen = cfglu_le16_to_cpu(tmp) + 2;
+ /*
+ * Frame error handling
+ */
+ if (expectlen < CAIF_MINIUM_PACKET_SIZE
+ || expectlen > CAIF_MAX_FRAMESIZE) {
+ if (!layr->usestx) {
+ if (pkt != NULL)
+ cfpkt_destroy(pkt);
+ layr->incomplete_frm = NULL;
+ expectlen = 0;
+ cfglu_unlock(layr->sync);
+ return CFGLU_EPROTO;
+ }
+ continue;
+ }
+
+ if (pkt_len < expectlen) {
+ /* Too little received data */
+ if (layr->usestx)
+ cfpkt_add_head(pkt, &stx, 1);
+ layr->incomplete_frm = pkt;
+ cfglu_unlock(layr->sync);
+ return 0;
+ }
+
+ /*
+ * Enough data for at least one frame.
+ * Split the frame, if too long
+ */
+ if (pkt_len > expectlen)
+ tail_pkt = cfpkt_split(pkt, expectlen);
+ else
+ tail_pkt = NULL;
+
+ /* Send the first part of packet upwards.*/
+ cfglu_unlock(layr->sync);
+ ret = layr->layer.up->receive(layr->layer.up, pkt);
+ cfglu_lock(layr->sync);
+ if (ret == CFGLU_EFCS) {
+ if (layr->usestx) {
+ if (tail_pkt != NULL)
+ pkt = cfpkt_append(pkt, tail_pkt, 0);
+
+ /* Start search for next STX if frame failed */
+ continue;
+ } else {
+ cfpkt_destroy(pkt);
+ pkt = NULL;
+ }
+ }
+
+ pkt = tail_pkt;
+
+ } while (pkt != NULL);
+
+ cfglu_unlock(layr->sync);
+ return 0;
+}
+
+static int cfserl_transmit(struct layer *layer, struct cfpkt *newpkt)
+{
+ struct cfserl *layr = container_obj(layer);
+ int ret;
+ uint8 tmp8 = CFSERL_STX;
+ if (layr->usestx)
+ cfpkt_add_head(newpkt, &tmp8, 1);
+ ret = layer->dn->transmit(layer->dn, newpkt);
+ if (ret < 0)
+ cfpkt_extr_head(newpkt, &tmp8, 1);
+
+ return ret;
+}
+
+static void cfserl_ctrlcmd(struct layer *layr, enum caif_ctrlcmd ctrl,
+ int phyid)
+{
+ layr->up->ctrlcmd(layr->up, ctrl, phyid);
+}
diff --git a/net/caif/generic/cfsrvl.c b/net/caif/generic/cfsrvl.c
new file mode 100644
index 00000000000..aa62b794b1f
--- /dev/null
+++ b/net/caif/generic/cfsrvl.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2009
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <net/caif/generic/cfglue.h>
+#include <net/caif/generic/caif_layer.h>
+#include <net/caif/generic/cfsrvl.h>
+#include <net/caif/generic/cfpkt.h>
+
+#define SRVL_CTRL_PKT_SIZE 1
+#define SRVL_FLOW_OFF 0x81
+#define SRVL_FLOW_ON 0x80
+#define SRVL_SET_PIN 0x82
+#define SRVL_CTRL_PKT_SIZE 1
+
+#define container_obj(layr) cfglu_container_of(layr, struct cfsrvl, layer)
+
+static void cfservl_ctrlcmd(struct layer *layr, enum caif_ctrlcmd ctrl,
+ int phyid)
+{
+ struct cfsrvl *service = container_obj(layr);
+ caif_assert(layr->up != NULL);
+ caif_assert(layr->up->ctrlcmd != NULL);
+ switch (ctrl) {
+ case CAIF_CTRLCMD_INIT_RSP:
+ service->open = true;
+ layr->up->ctrlcmd(layr->up, ctrl, phyid);
+ break;
+ case CAIF_CTRLCMD_DEINIT_RSP:
+ case CAIF_CTRLCMD_INIT_FAIL_RSP:
+ service->open = false;
+ layr->up->ctrlcmd(layr->up, ctrl, phyid);
+ break;
+ case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND:
+ if (phyid != service->dev_info.id)
+ break;
+ if (service->modem_flow_on)
+ layr->up->ctrlcmd(layr->up,
+ CAIF_CTRLCMD_FLOW_OFF_IND, phyid);
+ service->phy_flow_on = false;
+ break;
+ case _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND:
+ if (phyid != service->dev_info.id)
+ return;
+ if (service->modem_flow_on) {
+ layr->up->ctrlcmd(layr->up,
+ CAIF_CTRLCMD_FLOW_ON_IND,
+ phyid);
+ }
+ service->phy_flow_on = true;
+ break;
+ case CAIF_CTRLCMD_FLOW_OFF_IND:
+ if (service->phy_flow_on) {
+ layr->up->ctrlcmd(layr->up,
+ CAIF_CTRLCMD_FLOW_OFF_IND, phyid);
+ }
+ service->modem_flow_on = false;
+ break;
+ case CAIF_CTRLCMD_FLOW_ON_IND:
+ if (service->phy_flow_on) {
+ layr->up->ctrlcmd(layr->up,
+ CAIF_CTRLCMD_FLOW_ON_IND, phyid);
+ }
+ service->modem_flow_on = true;
+ break;
+ case _CAIF_CTRLCMD_PHYIF_DOWN_IND:
+ /* In case interface is down, let's fake a remove shutdown */
+ layr->up->ctrlcmd(layr->up,
+ CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, phyid);
+ break;
+ case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
+ layr->up->ctrlcmd(layr->up, ctrl, phyid);
+ break;
+ default:
+ pr_warning("CAIF: %s(): "
+ "Unexpected ctrl in cfsrvl (%d)\n", __func__, ctrl);
+ /* We have both modem and phy flow on, send flow on */
+ layr->up->ctrlcmd(layr->up, ctrl, phyid);
+ service->phy_flow_on = true;
+ break;
+ }
+}
+
+static int cfservl_modemcmd(struct layer *layr, enum caif_modemcmd ctrl)
+{
+ struct cfsrvl *service = container_obj(layr);
+ caif_assert(layr != NULL);
+ caif_assert(layr->dn != NULL);
+ caif_assert(layr->dn->transmit != NULL);
+ switch (ctrl) {
+ case CAIF_MODEMCMD_FLOW_ON_REQ:
+ {
+ struct cfpkt *pkt;
+ struct payload_info *info;
+ uint8 flow_on = SRVL_FLOW_ON;
+ pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
+ if (!pkt) {
+ pr_warning("CAIF: %s(): Out of memory\n",
+ __func__);
+ return CFGLU_ENOMEM;
+ }
+
+ if (cfpkt_add_head(pkt, &flow_on, 1) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n",
+ __func__);
+ cfpkt_destroy(pkt);
+ return CFGLU_EPROTO;
+ }
+ info = cfpkt_info(pkt);
+ info->channel_id = service->layer.id;
+ info->hdr_len = 1;
+ info->dev_info = &service->dev_info;
+ return layr->dn->transmit(layr->dn, pkt);
+ }
+ case CAIF_MODEMCMD_FLOW_OFF_REQ:
+ {
+ struct cfpkt *pkt;
+ struct payload_info *info;
+ uint8 flow_off = SRVL_FLOW_OFF;
+ pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
+ if (cfpkt_add_head(pkt, &flow_off, 1) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n",
+ __func__);
+ cfpkt_destroy(pkt);
+ return CFGLU_EPROTO;
+ }
+ info = cfpkt_info(pkt);
+ info->channel_id = service->layer.id;
+ info->hdr_len = 1;
+ info->dev_info = &service->dev_info;
+ return layr->dn->transmit(layr->dn, pkt);
+ }
+ default:
+ break;
+ }
+ return CFGLU_EINVAL;
+}
+
+void cfservl_destroy(struct layer *layer)
+{
+ cfglu_free(layer);
+}
+
+void cfsrvl_init(struct cfsrvl *service,
+ uint8 channel_id,
+ struct dev_info *dev_info)
+{
+ caif_assert(offsetof(struct cfsrvl, layer) == 0);
+ service->open = false;
+ service->modem_flow_on = true;
+ service->phy_flow_on = true;
+ service->layer.id = channel_id;
+ service->layer.ctrlcmd = cfservl_ctrlcmd;
+ service->layer.modemcmd = cfservl_modemcmd;
+ service->dev_info = *dev_info;
+}
+
+bool cfsrvl_ready(struct cfsrvl *service, int *err)
+{
+ if (service->open && service->modem_flow_on && service->phy_flow_on)
+ return true;
+ if (!service->open) {
+ *err = CFGLU_ENOTCONN;
+ return false;
+ }
+ caif_assert(!(service->modem_flow_on && service->phy_flow_on));
+ *err = CFGLU_ERETRY;
+ return false;
+}
+uint8 cfsrvl_getphyid(struct layer *layer)
+{
+ struct cfsrvl *servl = container_obj(layer);
+ return servl->dev_info.id;
+}
+
+bool cfsrvl_phyid_match(struct layer *layer, int phyid)
+{
+ struct cfsrvl *servl = container_obj(layer);
+ return servl->dev_info.id == phyid;
+}
diff --git a/net/caif/generic/cfutill.c b/net/caif/generic/cfutill.c
new file mode 100644
index 00000000000..351860ef33b
--- /dev/null
+++ b/net/caif/generic/cfutill.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2009
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <net/caif/generic/cfglue.h>
+#include <net/caif/generic/caif_layer.h>
+#include <net/caif/generic/cfsrvl.h>
+#include <net/caif/generic/cfpkt.h>
+
+#define container_obj(layr) ((struct cfsrvl *) layr)
+#define UTIL_PAYLOAD 0x00
+#define UTIL_CMD_BIT 0x80
+#define UTIL_REMOTE_SHUTDOWN 0x82
+#define UTIL_FLOW_OFF 0x81
+#define UTIL_FLOW_ON 0x80
+#define UTIL_CTRL_PKT_SIZE 1
+static int cfutill_receive(struct layer *layr, struct cfpkt *pkt);
+static int cfutill_transmit(struct layer *layr, struct cfpkt *pkt);
+
+struct layer *cfutill_create(uint8 channel_id, struct dev_info *dev_info)
+{
+ struct cfsrvl *util = cfglu_alloc(sizeof(struct cfsrvl));
+ if (!util) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ caif_assert(offsetof(struct cfsrvl, layer) == 0);
+ memset(util, 0, sizeof(struct cfsrvl));
+ cfsrvl_init(util, channel_id, dev_info);
+ util->layer.receive = cfutill_receive;
+ util->layer.transmit = cfutill_transmit;
+ snprintf(util->layer.name, CAIF_LAYER_NAME_SZ - 1, "util1");
+ return &util->layer;
+}
+
+static int cfutill_receive(struct layer *layr, struct cfpkt *pkt)
+{
+ uint8 cmd = -1;
+ struct cfsrvl *service = container_obj(layr);
+ caif_assert(layr != NULL);
+ caif_assert(layr->up != NULL);
+ caif_assert(layr->up->receive != NULL);
+ caif_assert(layr->up->ctrlcmd != NULL);
+ if (cfpkt_extr_head(pkt, &cmd, 1) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ cfpkt_destroy(pkt);
+ return CFGLU_EPROTO;
+ }
+
+ switch (cmd) {
+ case UTIL_PAYLOAD:
+ return layr->up->receive(layr->up, pkt);
+ case UTIL_FLOW_OFF:
+ layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_OFF_IND, 0);
+ cfpkt_destroy(pkt);
+ return 0;
+ case UTIL_FLOW_ON:
+ layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_ON_IND, 0);
+ cfpkt_destroy(pkt);
+ return 0;
+ case UTIL_REMOTE_SHUTDOWN: /* Remote Shutdown Request */
+ pr_err("CAIF: %s(): REMOTE SHUTDOWN REQUEST RECEIVED\n",
+ __func__);
+ layr->ctrlcmd(layr, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, 0);
+ service->open = false;
+ cfpkt_destroy(pkt);
+ return 0;
+ default:
+ cfpkt_destroy(pkt);
+ pr_warning("CAIF: %s(): Unknown service control %d (0x%x)\n",
+ __func__, cmd, cmd);
+ return CFGLU_EPROTO;
+ }
+}
+
+static int cfutill_transmit(struct layer *layr, struct cfpkt *pkt)
+{
+ uint8 zero = 0;
+ struct payload_info *info;
+ int ret;
+ struct cfsrvl *service = container_obj(layr);
+ caif_assert(layr != NULL);
+ caif_assert(layr->dn != NULL);
+ caif_assert(layr->dn->transmit != NULL);
+ if (!cfsrvl_ready(service, &ret))
+ return ret;
+
+ if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
+ pr_err("CAIF: %s(): packet too large size=%d\n",
+ __func__, cfpkt_getlen(pkt));
+ return CFGLU_EOVERFLOW;
+ }
+
+ cfpkt_add_head(pkt, &zero, 1);
+ /* Add info for MUX-layer to route the packet out. */
+ info = cfpkt_info(pkt);
+ info->channel_id = service->layer.id;
+ /*
+ * To optimize alignment, we add up the size of CAIF header before
+ * payload.
+ */
+ info->hdr_len = 1;
+ info->dev_info = &service->dev_info;
+ ret = layr->dn->transmit(layr->dn, pkt);
+ if (ret < 0) {
+ uint32 tmp32;
+ cfpkt_extr_head(pkt, &tmp32, 4);
+ }
+ return ret;
+}
diff --git a/net/caif/generic/cfveil.c b/net/caif/generic/cfveil.c
new file mode 100644
index 00000000000..283012895b6
--- /dev/null
+++ b/net/caif/generic/cfveil.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2009
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <net/caif/generic/cfglue.h>
+#include <net/caif/generic/caif_layer.h>
+#include <net/caif/generic/cfsrvl.h>
+#include <net/caif/generic/cfpkt.h>
+
+#define VEI_PAYLOAD 0x00
+#define VEI_CMD_BIT 0x80
+#define VEI_FLOW_OFF 0x81
+#define VEI_FLOW_ON 0x80
+#define VEI_SET_PIN 0x82
+#define VEI_CTRL_PKT_SIZE 1
+#define container_obj(layr) cfglu_container_of(layr, struct cfsrvl, layer)
+
+static int cfvei_receive(struct layer *layr, struct cfpkt *pkt);
+static int cfvei_transmit(struct layer *layr, struct cfpkt *pkt);
+
+struct layer *cfvei_create(uint8 channel_id, struct dev_info *dev_info)
+{
+ struct cfsrvl *vei = cfglu_alloc(sizeof(struct cfsrvl));
+ if (!vei) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ caif_assert(offsetof(struct cfsrvl, layer) == 0);
+ memset(vei, 0, sizeof(struct cfsrvl));
+ cfsrvl_init(vei, channel_id, dev_info);
+ vei->layer.receive = cfvei_receive;
+ vei->layer.transmit = cfvei_transmit;
+ snprintf(vei->layer.name, CAIF_LAYER_NAME_SZ - 1, "vei%d", channel_id);
+ return &vei->layer;
+}
+
+static int cfvei_receive(struct layer *layr, struct cfpkt *pkt)
+{
+ uint8 cmd;
+ int ret;
+ caif_assert(layr->up != NULL);
+ caif_assert(layr->receive != NULL);
+ caif_assert(layr->ctrlcmd != NULL);
+
+
+ if (cfpkt_extr_head(pkt, &cmd, 1) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ cfpkt_destroy(pkt);
+ return CFGLU_EPROTO;
+ }
+ switch (cmd) {
+ case VEI_PAYLOAD:
+ ret = layr->up->receive(layr->up, pkt);
+ return ret;
+ case VEI_FLOW_OFF:
+ layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_OFF_IND, 0);
+ cfpkt_destroy(pkt);
+ return CFGLU_EOK;
+ case VEI_FLOW_ON:
+ layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_ON_IND, 0);
+ cfpkt_destroy(pkt);
+ return CFGLU_EOK;
+ case VEI_SET_PIN: /* SET RS232 PIN */
+ cfpkt_destroy(pkt);
+ return CFGLU_EOK;
+ default: /* SET RS232 PIN */
+ pr_warning("CAIF: %s():Unknown VEI control packet %d (0x%x)!\n",
+ __func__, cmd, cmd);
+ cfpkt_destroy(pkt);
+ return CFGLU_EPROTO;
+ }
+}
+
+static int cfvei_transmit(struct layer *layr, struct cfpkt *pkt)
+{
+ uint8 tmp = 0;
+ struct payload_info *info;
+ int ret;
+ struct cfsrvl *service = container_obj(layr);
+ if (!cfsrvl_ready(service, &ret))
+ return ret;
+ caif_assert(layr->dn != NULL);
+ caif_assert(layr->dn->transmit != NULL);
+ if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
+ pr_warning("CAIF: %s(): Packet too large - size=%d\n",
+ __func__, cfpkt_getlen(pkt));
+ return CFGLU_EOVERFLOW;
+ }
+
+ if (cfpkt_add_head(pkt, &tmp, 1) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ return CFGLU_EPKT;
+ }
+
+ /* Add info-> for MUX-layer to route the packet out. */
+ info = cfpkt_info(pkt);
+ info->channel_id = service->layer.id;
+ info->hdr_len = 1;
+ info->dev_info = &service->dev_info;
+ ret = layr->dn->transmit(layr->dn, pkt);
+ if (ret < 0)
+ cfpkt_extr_head(pkt, &tmp, 1);
+ return ret;
+}
diff --git a/net/caif/generic/cfvidl.c b/net/caif/generic/cfvidl.c
new file mode 100644
index 00000000000..bad3cd15b51
--- /dev/null
+++ b/net/caif/generic/cfvidl.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2009
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <net/caif/generic/caif_layer.h>
+#include <net/caif/generic/cfglue.h>
+#include <net/caif/generic/cfsrvl.h>
+#include <net/caif/generic/cfpkt.h>
+
+#define container_obj(layr) ((struct cfsrvl *) layr)
+
+static int cfvidl_receive(struct layer *layr, struct cfpkt *pkt);
+static int cfvidl_transmit(struct layer *layr, struct cfpkt *pkt);
+
+struct layer *cfvidl_create(uint8 channel_id, struct dev_info *dev_info)
+{
+ struct cfsrvl *vid = cfglu_alloc(sizeof(struct cfsrvl));
+ if (!vid) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ caif_assert(offsetof(struct cfsrvl, layer) == 0);
+
+ memset(vid, 0, sizeof(struct cfsrvl));
+ cfsrvl_init(vid, channel_id, dev_info);
+ vid->layer.receive = cfvidl_receive;
+ vid->layer.transmit = cfvidl_transmit;
+ snprintf(vid->layer.name, CAIF_LAYER_NAME_SZ - 1, "vid1");
+ return &vid->layer;
+}
+
+static int cfvidl_receive(struct layer *layr, struct cfpkt *pkt)
+{
+ uint32 videoheader;
+ if (cfpkt_extr_head(pkt, &videoheader, 4) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ cfpkt_destroy(pkt);
+ return CFGLU_EPROTO;
+ }
+ return layr->up->receive(layr->up, pkt);
+}
+
+static int cfvidl_transmit(struct layer *layr, struct cfpkt *pkt)
+{
+ struct cfsrvl *service = container_obj(layr);
+ struct payload_info *info;
+ uint32 videoheader = 0;
+ int ret;
+ if (!cfsrvl_ready(service, &ret))
+ return ret;
+ cfpkt_add_head(pkt, &videoheader, 4);
+ /* Add info for MUX-layer to route the packet out */
+ info = cfpkt_info(pkt);
+ info->channel_id = service->layer.id;
+ info->dev_info = &service->dev_info;
+ ret = layr->dn->transmit(layr->dn, pkt);
+ if (ret < 0)
+ cfpkt_extr_head(pkt, &videoheader, 4);
+ return ret;
+}