summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPhilippe Langlais <philippe.langlais@stericsson.com>2012-06-04 19:45:37 +0800
committerPhilippe Langlais <philippe.langlais@stericsson.com>2012-06-04 19:45:37 +0800
commit78b63e0cee40de1adbe69ceee752ae3b6a6d36e2 (patch)
treeeb40be5a6a78d4b4d845d7befbc99d3eaee6b0b2
parent5f5b8f72f5b5d14b2b68e9b0cdf0909cf15ad178 (diff)
parent2226b173a9085faf3da513181c7e177cc7c66db1 (diff)
Merge topic branch 'caif' into integration-linux-ux500
-rw-r--r--drivers/net/caif/caif_serial.c1
-rw-r--r--drivers/net/caif/caif_shmcore.c52
2 files changed, 43 insertions, 10 deletions
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 8a3054b8481..957363ceae4 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -182,6 +182,7 @@ static void ldisc_receive(struct tty_struct *tty, const u8 *data,
* This is not yet handled.
*/
+ BUG_ON(ser->dev == NULL);
/*
* Workaround for garbage at start of transmission,
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
index 5b2041319a3..fc55bf65f1c 100644
--- a/drivers/net/caif/caif_shmcore.c
+++ b/drivers/net/caif/caif_shmcore.c
@@ -13,6 +13,7 @@
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
+#include <linux/kthread.h>
#include <net/caif/caif_device.h>
#include <net/caif/caif_shm.h>
@@ -107,8 +108,13 @@ struct shmdrv_layer {
struct workqueue_struct *pshm_tx_workqueue;
struct workqueue_struct *pshm_rx_workqueue;
+ struct kthread_worker pshm_flow_ctrl_kw;
+ struct task_struct *pshm_flow_ctrl_kw_task;
+
struct work_struct shm_tx_work;
struct work_struct shm_rx_work;
+ struct kthread_work shm_flow_on_work;
+ struct kthread_work shm_flow_off_work;
struct sk_buff_head sk_qhead;
struct shmdev_layer *pshm_dev;
@@ -126,6 +132,24 @@ static int shm_netdev_close(struct net_device *shm_netdev)
return 0;
}
+static void shm_flow_on_work_func(struct kthread_work *work)
+{
+ struct shmdrv_layer *pshm_drv = container_of(work, struct shmdrv_layer, shm_flow_on_work);
+
+ pshm_drv->cfdev.flowctrl
+ (pshm_drv->pshm_dev->pshm_netdev,
+ CAIF_FLOW_ON);
+}
+
+static void shm_flow_off_work_func(struct kthread_work *work)
+{
+ struct shmdrv_layer *pshm_drv = container_of(work, struct shmdrv_layer, shm_flow_off_work);
+
+ pshm_drv->cfdev.flowctrl
+ (pshm_drv->pshm_dev->pshm_netdev,
+ CAIF_FLOW_OFF);
+}
+
int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
{
struct buf_list *pbuf;
@@ -238,11 +262,9 @@ int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
if ((avail_emptybuff > HIGH_WATERMARK) &&
(!pshm_drv->tx_empty_available)) {
pshm_drv->tx_empty_available = 1;
+ queue_kthread_work(&pshm_drv->pshm_flow_ctrl_kw,
+ &pshm_drv->shm_flow_on_work);
spin_unlock_irqrestore(&pshm_drv->lock, flags);
- pshm_drv->cfdev.flowctrl
- (pshm_drv->pshm_dev->pshm_netdev,
- CAIF_FLOW_ON);
-
/* Schedule the work queue. if required */
if (!work_pending(&pshm_drv->shm_tx_work))
@@ -426,11 +448,8 @@ static void shm_tx_work_func(struct work_struct *tx_work)
pshm_drv->tx_empty_available) {
/* Update blocking condition. */
pshm_drv->tx_empty_available = 0;
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
- pshm_drv->cfdev.flowctrl
- (pshm_drv->pshm_dev->pshm_netdev,
- CAIF_FLOW_OFF);
- spin_lock_irqsave(&pshm_drv->lock, flags);
+ queue_kthread_work(&pshm_drv->pshm_flow_ctrl_kw,
+ &pshm_drv->shm_flow_off_work);
}
/*
* We simply return back to the caller if we do not have space
@@ -503,7 +522,8 @@ static void shm_tx_work_func(struct work_struct *tx_work)
pbuf->frames++;
pbuf->frm_ofs += frmlen + (frmlen % 32);
- } while (pbuf->frames < SHM_MAX_FRMS_PER_BUF);
+ } while (pbuf->frames < SHM_MAX_FRMS_PER_BUF &&
+ pbuf->frm_ofs < pbuf->len);
/* Assign buffer as full. */
list_add_tail(&pbuf->list, &pshm_drv->tx_full_list);
@@ -562,6 +582,7 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
{
int result, j;
struct shmdrv_layer *pshm_drv = NULL;
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
pshm_dev->pshm_netdev = alloc_netdev(sizeof(struct shmdrv_layer),
"cfshm%d", shm_netdev_setup);
@@ -622,11 +643,20 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
INIT_WORK(&pshm_drv->shm_tx_work, shm_tx_work_func);
INIT_WORK(&pshm_drv->shm_rx_work, shm_rx_work_func);
+ init_kthread_work(&pshm_drv->shm_flow_on_work, shm_flow_on_work_func);
+ init_kthread_work(&pshm_drv->shm_flow_off_work, shm_flow_off_work_func);
+
pshm_drv->pshm_tx_workqueue =
create_singlethread_workqueue("shm_tx_work");
pshm_drv->pshm_rx_workqueue =
create_singlethread_workqueue("shm_rx_work");
+ init_kthread_worker(&pshm_drv->pshm_flow_ctrl_kw);
+ pshm_drv->pshm_flow_ctrl_kw_task = kthread_run(kthread_worker_fn,
+ &pshm_drv->pshm_flow_ctrl_kw, "pshm_caif_flow_ctrl");
+ /* must use the FIFO scheduler as it is realtime sensitive */
+ sched_setscheduler(pshm_drv->pshm_flow_ctrl_kw_task, SCHED_FIFO, &param);
+
for (j = 0; j < NR_TX_BUF; j++) {
struct buf_list *tx_buf =
kmalloc(sizeof(struct buf_list), GFP_KERNEL);
@@ -744,6 +774,8 @@ void caif_shmcore_remove(struct net_device *pshm_netdev)
/* Destroy work queues. */
destroy_workqueue(pshm_drv->pshm_tx_workqueue);
destroy_workqueue(pshm_drv->pshm_rx_workqueue);
+ flush_kthread_worker(&pshm_drv->pshm_flow_ctrl_kw);
+ kthread_stop(pshm_drv->pshm_flow_ctrl_kw_task);
unregister_netdev(pshm_netdev);
}