summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>2011-10-06 19:05:02 +0200
committerPhilippe LANGLAIS <philippe.langlais@stericsson.com>2011-10-13 10:23:49 +0200
commit847817a7ac38983d030d87d4c1b38e379a26fe8e (patch)
treeaf0b449b8359ed45a234a377c0a81bde69a36653
parent5a3348f06558026c5c842112b83674e00903064c (diff)
cw1200: Accurate reporting of TX status.
* Accurate reporting of TX status is implemented (needed for UAPSD and PSPOLL). * Leaking of TX rate policies is fixed. * skb destructor is implemented. * Time to live for queued frames is implemented. * cw1200_tx is split by separate TX handlers (like in mac80211). * cw1200_skb_to_wsm is not existing anymore. * BT coex: null frames are prioritized as management frames. * Debug: added printing of rate policies in use. ST-Ericsson ID: 354950 ST-Ericsson ID: 360749 Change-Id: I920d398418df99c21b37a16ef16591e58a82151d Signed-off-by: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com> Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/33542 Reviewed-by: Bartosz MARKOWSKI <bartosz.markowski@tieto.com> Reviewed-by: Philippe LANGLAIS <philippe.langlais@stericsson.com>
-rwxr-xr-xdrivers/staging/cw1200/ap.c4
-rw-r--r--drivers/staging/cw1200/debug.c9
-rw-r--r--drivers/staging/cw1200/debug.h10
-rw-r--r--drivers/staging/cw1200/main.c19
-rw-r--r--drivers/staging/cw1200/pm.c5
-rw-r--r--drivers/staging/cw1200/queue.c203
-rw-r--r--drivers/staging/cw1200/queue.h40
-rw-r--r--drivers/staging/cw1200/sta.c8
-rw-r--r--drivers/staging/cw1200/txrx.c617
-rw-r--r--drivers/staging/cw1200/txrx.h10
-rw-r--r--drivers/staging/cw1200/wsm.c30
-rw-r--r--drivers/staging/cw1200/wsm.h4
12 files changed, 566 insertions, 393 deletions
diff --git a/drivers/staging/cw1200/ap.c b/drivers/staging/cw1200/ap.c
index 5a75ac508d1..7b1ac0dfada 100755
--- a/drivers/staging/cw1200/ap.c
+++ b/drivers/staging/cw1200/ap.c
@@ -77,10 +77,6 @@ int cw1200_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (priv->mode != NL80211_IFTYPE_AP || !sta_priv->link_id)
return 0;
- /* HACK! To be removed when accurate TX ststus
- * reporting for dropped frames is implemented. */
- ieee80211_sta_eosp_irqsafe(sta);
-
entry = &priv->link_id_db[sta_priv->link_id - 1];
spin_lock_bh(&priv->ps_state_lock);
entry->status = CW1200_LINK_SOFT;
diff --git a/drivers/staging/cw1200/debug.c b/drivers/staging/cw1200/debug.c
index db927620f26..91c690ad4e6 100644
--- a/drivers/staging/cw1200/debug.c
+++ b/drivers/staging/cw1200/debug.c
@@ -101,6 +101,7 @@ static void cw1200_debug_print_map(struct seq_file *seq,
static int cw1200_status_show(struct seq_file *seq, void *v)
{
int i;
+ struct list_head *item;
struct cw1200_common *priv = seq->private;
struct cw1200_debug_priv *d = priv->debug;
seq_puts(seq, "CW1200 Wireless LAN driver status\n");
@@ -208,6 +209,12 @@ static int cw1200_status_show(struct seq_file *seq, void *v)
priv->long_frame_max_tx_count);
seq_printf(seq, "Short retr: %d\n",
priv->short_frame_max_tx_count);
+ spin_lock_bh(&priv->tx_policy_cache.lock);
+ i = 0;
+ list_for_each(item, &priv->tx_policy_cache.used)
+ ++i;
+ spin_unlock_bh(&priv->tx_policy_cache.lock);
+ seq_printf(seq, "RC in use: %d\n", i);
seq_puts(seq, "\n");
for (i = 0; i < 4; ++i) {
@@ -283,6 +290,8 @@ static int cw1200_status_show(struct seq_file *seq, void *v)
d->tx_cache_miss);
seq_printf(seq, "TX copy: %d\n",
d->tx_copy);
+ seq_printf(seq, "TX TTL: %d\n",
+ d->tx_ttl);
seq_printf(seq, "Scan: %s\n",
atomic_read(&priv->scan.in_progress) ? "active" : "idle");
seq_printf(seq, "Led state: 0x%.2X\n",
diff --git a/drivers/staging/cw1200/debug.h b/drivers/staging/cw1200/debug.h
index e7fc4d2daef..6f7d8acab00 100644
--- a/drivers/staging/cw1200/debug.h
+++ b/drivers/staging/cw1200/debug.h
@@ -15,6 +15,7 @@ struct cw1200_debug_priv {
int tx_multi_frames;
int tx_cache_miss;
int tx_copy;
+ int tx_ttl;
};
int cw1200_debug_init(struct cw1200_common *priv);
@@ -57,6 +58,11 @@ static inline void cw1200_debug_tx_copy(struct cw1200_common *priv)
++priv->debug->tx_copy;
}
+static inline void cw1200_debug_tx_ttl(struct cw1200_common *priv)
+{
+ ++priv->debug->tx_ttl;
+}
+
#else /* CONFIG_CW1200_DEBUGFS */
static inline int cw1200_debug_init(struct cw1200_common *priv)
@@ -97,6 +103,10 @@ static inline void cw1200_debug_tx_copy(struct cw1200_common *priv)
{
}
+static inline void cw1200_debug_tx_ttl(struct cw1200_common *priv)
+{
+}
+
#endif /* CONFIG_CW1200_DEBUGFS */
#endif /* CW1200_DEBUG_H_INCLUDED */
diff --git a/drivers/staging/cw1200/main.c b/drivers/staging/cw1200/main.c
index d2f6cee19ee..94398fb222f 100644
--- a/drivers/staging/cw1200/main.c
+++ b/drivers/staging/cw1200/main.c
@@ -199,6 +199,13 @@ static struct ieee80211_supported_band cw1200_band_5ghz = {
};
#endif /* CONFIG_CW1200_5GHZ_SUPPORT */
+static const unsigned long cw1200_ttl[] = {
+ 1 * HZ, /* VO */
+ 2 * HZ, /* VI */
+ 5 * HZ, /* BE */
+ 10 * HZ /* BK */
+};
+
static const struct ieee80211_ops cw1200_ops = {
.start = cw1200_start,
.stop = cw1200_stop,
@@ -342,17 +349,19 @@ struct ieee80211_hw *cw1200_init_common(size_t priv_data_len)
}
if (unlikely(cw1200_queue_stats_init(&priv->tx_queue_stats,
- CW1200_LINK_ID_MAX))) {
+ CW1200_LINK_ID_MAX,
+ cw1200_skb_dtor,
+ priv))) {
ieee80211_free_hw(hw);
return NULL;
}
for (i = 0; i < 4; ++i) {
if (unlikely(cw1200_queue_init(&priv->tx_queue[i],
- &priv->tx_queue_stats, i, 16))) {
+ &priv->tx_queue_stats, i, 16,
+ cw1200_ttl[i]))) {
for (; i > 0; i--)
- cw1200_queue_deinit(&priv->tx_queue[i - 1],
- priv);
+ cw1200_queue_deinit(&priv->tx_queue[i - 1]);
cw1200_queue_stats_deinit(&priv->tx_queue_stats);
ieee80211_free_hw(hw);
return NULL;
@@ -442,7 +451,7 @@ void cw1200_unregister_common(struct ieee80211_hw *dev)
}
for (i = 0; i < 4; ++i)
- cw1200_queue_deinit(&priv->tx_queue[i], priv);
+ cw1200_queue_deinit(&priv->tx_queue[i]);
cw1200_queue_stats_deinit(&priv->tx_queue_stats);
cw1200_pm_deinit(&priv->pm_state);
}
diff --git a/drivers/staging/cw1200/pm.c b/drivers/staging/cw1200/pm.c
index 3cb6b62b530..3d7570767ed 100644
--- a/drivers/staging/cw1200/pm.c
+++ b/drivers/staging/cw1200/pm.c
@@ -95,6 +95,7 @@ void cw1200_pm_deinit(struct cw1200_pm_state *pm)
wake_lock_destroy(&pm->wakelock);
cw1200_pm_deinit_common(pm);
}
+
void cw1200_pm_stay_awake(struct cw1200_pm_state *pm,
unsigned long tmo)
{
@@ -205,6 +206,10 @@ int cw1200_wow_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
return -EAGAIN;
#endif
+ /* Do not suspend when datapath is not idle */
+ if (priv->tx_queue_stats.num_queued)
+ return -EBUSY;
+
/* Make sure there is no configuration requests in progress. */
if (!mutex_trylock(&priv->conf_mutex))
return -EBUSY;
diff --git a/drivers/staging/cw1200/queue.c b/drivers/staging/cw1200/queue.c
index 83b294dc1da..d19f87acf4d 100644
--- a/drivers/staging/cw1200/queue.c
+++ b/drivers/staging/cw1200/queue.c
@@ -11,37 +11,37 @@
#include "net/mac80211.h"
#include "queue.h"
-#include "wsm.h"
-#include "txrx.h"
#include "cw1200.h"
+#include "debug.h"
/* private */ struct cw1200_queue_item
{
struct list_head head;
struct sk_buff *skb;
u32 packetID;
+ unsigned long timestamp;
struct cw1200_txpriv txpriv;
u8 generation;
};
-static inline void __cw1200_queue_lock(struct cw1200_queue *queue,
- struct cw1200_common *cw1200)
+static inline void __cw1200_queue_lock(struct cw1200_queue *queue)
{
+ struct cw1200_queue_stats *stats = queue->stats;
if (queue->tx_locked_cnt++ == 0) {
txrx_printk(KERN_DEBUG "[TX] Queue %d is locked.\n",
queue->queue_id);
- ieee80211_stop_queue(cw1200->hw, queue->queue_id);
+ ieee80211_stop_queue(stats->priv->hw, queue->queue_id);
}
}
-static inline void __cw1200_queue_unlock(struct cw1200_queue *queue,
- struct cw1200_common *cw1200)
+static inline void __cw1200_queue_unlock(struct cw1200_queue *queue)
{
+ struct cw1200_queue_stats *stats = queue->stats;
BUG_ON(!queue->tx_locked_cnt);
if (--queue->tx_locked_cnt == 0) {
txrx_printk(KERN_DEBUG "[TX] Queue %d is unlocked.\n",
queue->queue_id);
- ieee80211_wake_queue(cw1200->hw, queue->queue_id);
+ ieee80211_wake_queue(stats->priv->hw, queue->queue_id);
}
}
@@ -65,11 +65,95 @@ static inline u32 cw1200_queue_make_packet_id(u8 queue_generation, u8 queue_id,
((u32)queue_generation << 24);
}
+static void cw1200_queue_post_gc(struct cw1200_queue_stats *stats,
+ struct list_head *gc_list)
+{
+ struct cw1200_queue_item *item;
+
+ while (!list_empty(gc_list)) {
+ item = list_first_entry(
+ gc_list, struct cw1200_queue_item, head);
+ list_del(&item->head);
+ stats->skb_dtor(stats->priv, item->skb, &item->txpriv);
+ kfree(item);
+ }
+}
+
+static void cw1200_queue_register_post_gc(struct list_head *gc_list,
+ struct cw1200_queue_item *item)
+{
+ struct cw1200_queue_item *gc_item;
+ gc_item = kmalloc(sizeof(struct cw1200_queue_item),
+ GFP_KERNEL | GFP_ATOMIC);
+ BUG_ON(!gc_item);
+ memcpy(gc_item, item, sizeof(struct cw1200_queue_item));
+ list_move_tail(&gc_item->head, gc_list);
+}
+
+static void __cw1200_queue_gc(struct cw1200_queue *queue,
+ struct list_head *head,
+ bool unlock)
+{
+ struct cw1200_queue_stats *stats = queue->stats;
+ struct cw1200_queue_item *item = NULL;
+ bool wakeup_stats = false;
+
+ while (!list_empty(&queue->queue)) {
+ item = list_first_entry(
+ &queue->queue, struct cw1200_queue_item, head);
+ if (jiffies - item->timestamp < queue->ttl)
+ break;
+ --queue->num_queued;
+ --queue->link_map_cache[item->txpriv.link_id];
+ spin_lock_bh(&stats->lock);
+ --stats->num_queued;
+ if (!--stats->link_map_cache[item->txpriv.link_id])
+ wakeup_stats = true;
+ spin_unlock_bh(&stats->lock);
+ cw1200_debug_tx_ttl(stats->priv);
+ cw1200_queue_register_post_gc(head, item);
+ item->skb = NULL;
+ list_move_tail(&item->head, &queue->free_pool);
+ }
+
+ if (wakeup_stats)
+ wake_up(&stats->wait_link_id_empty);
+
+ if (queue->overfull) {
+ if (queue->num_queued <= (queue->capacity >> 1)) {
+ queue->overfull = false;
+ if (unlock)
+ __cw1200_queue_unlock(queue);
+ } else {
+ unsigned long tmo = item->timestamp + queue->ttl;
+ mod_timer(&queue->gc, tmo);
+ cw1200_pm_stay_awake(&stats->priv->pm_state,
+ tmo - jiffies);
+ }
+ }
+}
+
+static void cw1200_queue_gc(unsigned long arg)
+{
+ LIST_HEAD(list);
+ struct cw1200_queue *queue =
+ (struct cw1200_queue *)arg;
+
+ spin_lock_bh(&queue->lock);
+ __cw1200_queue_gc(queue, &list, true);
+ spin_unlock_bh(&queue->lock);
+ cw1200_queue_post_gc(queue->stats, &list);
+}
+
int cw1200_queue_stats_init(struct cw1200_queue_stats *stats,
- size_t map_capacity)
+ size_t map_capacity,
+ cw1200_queue_skb_dtor_t skb_dtor,
+ struct cw1200_common *priv)
{
memset(stats, 0, sizeof(*stats));
stats->map_capacity = map_capacity;
+ stats->skb_dtor = skb_dtor;
+ stats->priv = priv;
spin_lock_init(&stats->lock);
init_waitqueue_head(&stats->wait_link_id_empty);
@@ -84,7 +168,8 @@ int cw1200_queue_stats_init(struct cw1200_queue_stats *stats,
int cw1200_queue_init(struct cw1200_queue *queue,
struct cw1200_queue_stats *stats,
u8 queue_id,
- size_t capacity)
+ size_t capacity,
+ unsigned long ttl)
{
size_t i;
@@ -92,10 +177,14 @@ int cw1200_queue_init(struct cw1200_queue *queue,
queue->stats = stats;
queue->capacity = capacity;
queue->queue_id = queue_id;
+ queue->ttl = ttl;
INIT_LIST_HEAD(&queue->queue);
INIT_LIST_HEAD(&queue->pending);
INIT_LIST_HEAD(&queue->free_pool);
spin_lock_init(&queue->lock);
+ init_timer(&queue->gc);
+ queue->gc.data = (unsigned long)queue;
+ queue->gc.function = cw1200_queue_gc;
queue->pool = kzalloc(sizeof(struct cw1200_queue_item) * capacity,
GFP_KERNEL);
@@ -116,9 +205,10 @@ int cw1200_queue_init(struct cw1200_queue *queue,
return 0;
}
-int cw1200_queue_clear(struct cw1200_queue *queue, struct cw1200_common *priv)
+int cw1200_queue_clear(struct cw1200_queue *queue)
{
int i;
+ LIST_HEAD(gc_list);
struct cw1200_queue_stats *stats = queue->stats;
spin_lock_bh(&queue->lock);
@@ -128,15 +218,12 @@ int cw1200_queue_clear(struct cw1200_queue *queue, struct cw1200_common *priv)
struct cw1200_queue_item *item = list_first_entry(
&queue->pending, struct cw1200_queue_item, head);
WARN_ON(!item->skb);
- if (likely(item->skb)) {
- dev_kfree_skb_any(item->skb);
- item->skb = NULL;
- }
+ cw1200_queue_register_post_gc(&gc_list, item);
+ item->skb = NULL;
list_move_tail(&item->head, &queue->free_pool);
}
queue->num_queued = 0;
queue->num_pending = 0;
- queue->num_sent = 0;
spin_lock_bh(&stats->lock);
for (i = 0; i < stats->map_capacity; ++i) {
@@ -147,10 +234,11 @@ int cw1200_queue_clear(struct cw1200_queue *queue, struct cw1200_common *priv)
spin_unlock_bh(&stats->lock);
if (unlikely(queue->overfull)) {
queue->overfull = false;
- __cw1200_queue_unlock(queue, priv);
+ __cw1200_queue_unlock(queue);
}
spin_unlock_bh(&queue->lock);
wake_up(&stats->wait_link_id_empty);
+ cw1200_queue_post_gc(stats, &gc_list);
return 0;
}
@@ -160,10 +248,10 @@ void cw1200_queue_stats_deinit(struct cw1200_queue_stats *stats)
stats->link_map_cache = NULL;
}
-void cw1200_queue_deinit(struct cw1200_queue *queue,
- struct cw1200_common *priv)
+void cw1200_queue_deinit(struct cw1200_queue *queue)
{
- cw1200_queue_clear(queue, priv);
+ cw1200_queue_clear(queue);
+ del_timer_sync(&queue->gc);
INIT_LIST_HEAD(&queue->free_pool);
kfree(queue->pool);
kfree(queue->link_map_cache);
@@ -196,18 +284,14 @@ size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue,
return ret;
}
-int cw1200_queue_put(struct cw1200_queue *queue, struct cw1200_common *priv,
- struct sk_buff *skb, struct cw1200_txpriv *txpriv)
+int cw1200_queue_put(struct cw1200_queue *queue,
+ struct sk_buff *skb,
+ struct cw1200_txpriv *txpriv)
{
- int ret;
- struct wsm_tx *wsm;
+ int ret = 0;
+ LIST_HEAD(gc_list);
struct cw1200_queue_stats *stats = queue->stats;
- wsm = (struct wsm_tx *)skb_push(skb, sizeof(struct wsm_tx));
- ret = cw1200_skb_to_wsm(priv, skb, wsm, txpriv);
- if (ret)
- return ret;
-
if (txpriv->link_id >= queue->stats->map_capacity)
return -EINVAL;
@@ -224,7 +308,7 @@ int cw1200_queue_put(struct cw1200_queue *queue, struct cw1200_common *priv,
item->packetID = cw1200_queue_make_packet_id(
queue->generation, queue->queue_id,
item->generation, item - queue->pool);
- wsm->packetID = __cpu_to_le32(item->packetID);
+ item->timestamp = jiffies;
++queue->num_queued;
++queue->link_map_cache[txpriv->link_id];
@@ -236,12 +320,17 @@ int cw1200_queue_put(struct cw1200_queue *queue, struct cw1200_common *priv,
if (queue->num_queued >= queue->capacity) {
queue->overfull = true;
- __cw1200_queue_lock(queue, priv);
+ __cw1200_queue_gc(queue, &gc_list, false);
+ if (queue->overfull)
+ __cw1200_queue_lock(queue);
+
}
} else {
ret = -ENOENT;
}
spin_unlock_bh(&queue->lock);
+ if (unlikely(!list_empty(&gc_list)))
+ cw1200_queue_post_gc(stats, &gc_list);
return ret;
}
@@ -268,6 +357,7 @@ int cw1200_queue_get(struct cw1200_queue *queue,
*tx = (struct wsm_tx *)item->skb->data;
*tx_info = IEEE80211_SKB_CB(item->skb);
*txpriv = &item->txpriv;
+ (*tx)->packetID = __cpu_to_le32(item->packetID);
list_move_tail(&item->head, &queue->pending);
++queue->num_pending;
--queue->link_map_cache[item->txpriv.link_id];
@@ -307,7 +397,6 @@ int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packetID)
WARN_ON(1);
ret = -ENOENT;
} else {
- struct wsm_tx *wsm = (struct wsm_tx *)item->skb->data;
--queue->num_pending;
++queue->link_map_cache[item->txpriv.link_id];
@@ -319,7 +408,6 @@ int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packetID)
item->generation = ++item_generation;
item->packetID = cw1200_queue_make_packet_id(
queue_generation, queue_id, item_generation, item_id);
- wsm->packetID = __cpu_to_le32(item->packetID);
list_move(&item->head, &queue->queue);
}
spin_unlock_bh(&queue->lock);
@@ -333,7 +421,6 @@ int cw1200_queue_requeue_all(struct cw1200_queue *queue)
while (!list_empty(&queue->pending)) {
struct cw1200_queue_item *item = list_entry(
queue->pending.prev, struct cw1200_queue_item, head);
- struct wsm_tx *wsm = (struct wsm_tx *)item->skb->data;
--queue->num_pending;
++queue->link_map_cache[item->txpriv.link_id];
@@ -347,7 +434,6 @@ int cw1200_queue_requeue_all(struct cw1200_queue *queue)
item->packetID = cw1200_queue_make_packet_id(
queue->generation, queue->queue_id,
item->generation, item - queue->pool);
- wsm->packetID = __cpu_to_le32(item->packetID);
list_move(&item->head, &queue->queue);
}
spin_unlock_bh(&queue->lock);
@@ -355,13 +441,15 @@ int cw1200_queue_requeue_all(struct cw1200_queue *queue)
return 0;
}
-int cw1200_queue_remove(struct cw1200_queue *queue, struct cw1200_common *priv,
- u32 packetID)
+int cw1200_queue_remove(struct cw1200_queue *queue, u32 packetID)
{
int ret = 0;
u8 queue_generation, queue_id, item_generation, item_id;
struct cw1200_queue_item *item;
- struct sk_buff *skb_to_free = NULL;
+ struct cw1200_queue_stats *stats = queue->stats;
+ struct sk_buff *gc_skb = NULL;
+ struct cw1200_txpriv gc_txpriv;
+
cw1200_queue_parse_id(packetID, &queue_generation, &queue_id,
&item_generation, &item_id);
@@ -378,12 +466,13 @@ int cw1200_queue_remove(struct cw1200_queue *queue, struct cw1200_common *priv,
WARN_ON(1);
ret = -ENOENT;
} else {
+ gc_txpriv = item->txpriv;
+ gc_skb = item->skb;
+ item->skb = NULL;
--queue->num_pending;
--queue->num_queued;
++queue->num_sent;
++item->generation;
- skb_to_free = item->skb;
- item->skb = NULL;
/* Do not use list_move_tail here, but list_move:
* try to utilize cache row.
*/
@@ -392,20 +481,19 @@ int cw1200_queue_remove(struct cw1200_queue *queue, struct cw1200_common *priv,
if (unlikely(queue->overfull) &&
(queue->num_queued <= (queue->capacity >> 1))) {
queue->overfull = false;
- __cw1200_queue_unlock(queue, priv);
+ __cw1200_queue_unlock(queue);
}
}
spin_unlock_bh(&queue->lock);
- if (skb_to_free)
- dev_kfree_skb_any(item->skb);
+ if (gc_skb)
+ stats->skb_dtor(stats->priv, gc_skb, &gc_txpriv);
return ret;
}
int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packetID,
- struct sk_buff **skb,
- const struct cw1200_txpriv **txpriv)
+ struct sk_buff **skb)
{
int ret = 0;
u8 queue_generation, queue_id, item_generation, item_id;
@@ -427,42 +515,25 @@ int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packetID,
ret = -ENOENT;
} else {
*skb = item->skb;
- *txpriv = &item->txpriv;
- item->skb = NULL;
}
spin_unlock_bh(&queue->lock);
return ret;
}
-void cw1200_queue_lock(struct cw1200_queue *queue, struct cw1200_common *cw1200)
+void cw1200_queue_lock(struct cw1200_queue *queue)
{
spin_lock_bh(&queue->lock);
- __cw1200_queue_lock(queue, cw1200);
+ __cw1200_queue_lock(queue);
spin_unlock_bh(&queue->lock);
}
-void cw1200_queue_unlock(struct cw1200_queue *queue,
- struct cw1200_common *cw1200)
+void cw1200_queue_unlock(struct cw1200_queue *queue)
{
spin_lock_bh(&queue->lock);
- __cw1200_queue_unlock(queue, cw1200);
+ __cw1200_queue_unlock(queue);
spin_unlock_bh(&queue->lock);
}
-/*
-int cw1200_queue_get_stats(struct cw1200_queue *queue,
- struct ieee80211_tx_queue_stats *stats)
-{
- spin_lock_bh(&queue->lock);
- stats->len = queue->num_queued;
- stats->limit = queue->capacity;
- stats->count = queue->num_sent;
- spin_unlock_bh(&queue->lock);
-
- return 0;
-}
-*/
-
bool cw1200_queue_stats_is_empty(struct cw1200_queue_stats *stats,
u32 link_id_map)
{
diff --git a/drivers/staging/cw1200/queue.h b/drivers/staging/cw1200/queue.h
index 502496b142b..bff33625c8a 100644
--- a/drivers/staging/cw1200/queue.h
+++ b/drivers/staging/cw1200/queue.h
@@ -22,6 +22,10 @@
/* forward */ struct cw1200_queue_stats;
+typedef void (*cw1200_queue_skb_dtor_t)(struct cw1200_common *priv,
+ struct sk_buff *skb,
+ const struct cw1200_txpriv *txpriv);
+
struct cw1200_queue {
struct cw1200_queue_stats *stats;
size_t capacity;
@@ -38,6 +42,8 @@ struct cw1200_queue {
spinlock_t lock;
u8 queue_id;
u8 generation;
+ struct timer_list gc;
+ unsigned long ttl;
};
struct cw1200_queue_stats {
@@ -46,29 +52,36 @@ struct cw1200_queue_stats {
int num_queued;
size_t map_capacity;
wait_queue_head_t wait_link_id_empty;
+ cw1200_queue_skb_dtor_t skb_dtor;
+ struct cw1200_common *priv;
};
struct cw1200_txpriv {
u8 link_id;
u8 raw_link_id;
u8 tid;
+ u8 rate_id;
+ u8 offset;
};
int cw1200_queue_stats_init(struct cw1200_queue_stats *stats,
- size_t map_capacity);
+ size_t map_capacity,
+ cw1200_queue_skb_dtor_t skb_dtor,
+ struct cw1200_common *priv);
int cw1200_queue_init(struct cw1200_queue *queue,
struct cw1200_queue_stats *stats,
u8 queue_id,
- size_t capacity);
-int cw1200_queue_clear(struct cw1200_queue *queue, struct cw1200_common *priv);
+ size_t capacity,
+ unsigned long ttl);
+int cw1200_queue_clear(struct cw1200_queue *queue);
void cw1200_queue_stats_deinit(struct cw1200_queue_stats *stats);
-void cw1200_queue_deinit(struct cw1200_queue *queue,
- struct cw1200_common *priv);
+void cw1200_queue_deinit(struct cw1200_queue *queue);
size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue,
u32 link_id_map);
-int cw1200_queue_put(struct cw1200_queue *queue, struct cw1200_common *cw1200,
- struct sk_buff *skb, struct cw1200_txpriv *txpriv);
+int cw1200_queue_put(struct cw1200_queue *queue,
+ struct sk_buff *skb,
+ struct cw1200_txpriv *txpriv);
int cw1200_queue_get(struct cw1200_queue *queue,
u32 link_id_map,
struct wsm_tx **tx,
@@ -76,15 +89,12 @@ int cw1200_queue_get(struct cw1200_queue *queue,
const struct cw1200_txpriv **txpriv);
int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packetID);
int cw1200_queue_requeue_all(struct cw1200_queue *queue);
-int cw1200_queue_remove(struct cw1200_queue *queue, struct cw1200_common *priv,
- u32 packetID);
+int cw1200_queue_remove(struct cw1200_queue *queue,
+ u32 packetID);
int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packetID,
- struct sk_buff **skb,
- const struct cw1200_txpriv **txpriv);
-void cw1200_queue_lock(struct cw1200_queue *queue,
- struct cw1200_common *cw1200);
-void cw1200_queue_unlock(struct cw1200_queue *queue,
- struct cw1200_common *cw1200);
+ struct sk_buff **skb);
+void cw1200_queue_lock(struct cw1200_queue *queue);
+void cw1200_queue_unlock(struct cw1200_queue *queue);
bool cw1200_queue_stats_is_empty(struct cw1200_queue_stats *stats,
u32 link_id_map);
diff --git a/drivers/staging/cw1200/sta.c b/drivers/staging/cw1200/sta.c
index b6ea70f3418..1ba769df65d 100644
--- a/drivers/staging/cw1200/sta.c
+++ b/drivers/staging/cw1200/sta.c
@@ -136,7 +136,7 @@ void cw1200_stop(struct ieee80211_hw *dev)
priv->join_status = CW1200_JOIN_STATUS_PASSIVE;
for (i = 0; i < 4; i++)
- cw1200_queue_clear(&priv->tx_queue[i], priv);
+ cw1200_queue_clear(&priv->tx_queue[i]);
/* HACK! */
if (atomic_xchg(&priv->tx_lock, 1) != 1)
@@ -760,7 +760,7 @@ int __cw1200_flush(struct cw1200_common *priv, bool drop)
*/
if (drop) {
for (i = 0; i < 4; ++i)
- cw1200_queue_clear(&priv->tx_queue[i], priv);
+ cw1200_queue_clear(&priv->tx_queue[i]);
} else {
ret = wait_event_timeout(
priv->tx_queue_stats.wait_link_id_empty,
@@ -1136,7 +1136,7 @@ void cw1200_join_work(struct work_struct *work)
bssid, NULL, 0, 0, 0);
if (!bss) {
cw1200_queue_remove(&priv->tx_queue[queueId],
- priv, __le32_to_cpu(wsm->packetID));
+ __le32_to_cpu(wsm->packetID));
wsm_unlock_tx(priv);
return;
}
@@ -1211,7 +1211,7 @@ void cw1200_join_work(struct work_struct *work)
memset(&priv->join_bssid[0],
0, sizeof(priv->join_bssid));
cw1200_queue_remove(&priv->tx_queue[queueId],
- priv, __le32_to_cpu(wsm->packetID));
+ __le32_to_cpu(wsm->packetID));
cancel_delayed_work_sync(&priv->join_timeout);
cw1200_update_listening(priv, priv->listening);
} else {
diff --git a/drivers/staging/cw1200/txrx.c b/drivers/staging/cw1200/txrx.c
index 04b7b0d091e..72f5acdccdc 100644
--- a/drivers/staging/cw1200/txrx.c
+++ b/drivers/staging/cw1200/txrx.c
@@ -24,16 +24,10 @@
#define tx_policy_printk(...)
#endif
-/* txrx private */
-struct __cw1200_txpriv {
- struct cw1200_txpriv super;
- u16 ethertype;
-};
+#define CW1200_INVALID_RATE_ID (0xFF)
static int cw1200_handle_action_rx(struct cw1200_common *priv,
struct sk_buff *skb);
-static int cw1200_handle_action_tx(struct cw1200_common *priv,
- struct sk_buff *skb);
static const struct ieee80211_rate *
cw1200_get_tx_rate(const struct cw1200_common *priv,
const struct ieee80211_tx_rate *rate);
@@ -45,14 +39,14 @@ static inline void cw1200_tx_queues_lock(struct cw1200_common *priv)
{
int i;
for (i = 0; i < 4; ++i)
- cw1200_queue_lock(&priv->tx_queue[i], priv);
+ cw1200_queue_lock(&priv->tx_queue[i]);
}
static inline void cw1200_tx_queues_unlock(struct cw1200_common *priv)
{
int i;
for (i = 0; i < 4; ++i)
- cw1200_queue_unlock(&priv->tx_queue[i], priv);
+ cw1200_queue_unlock(&priv->tx_queue[i]);
}
/* ******************************************************************** */
@@ -269,7 +263,7 @@ static int tx_policy_get(struct cw1200_common *priv,
return idx;
}
-void tx_policy_put(struct cw1200_common *priv, int idx)
+static void tx_policy_put(struct cw1200_common *priv, int idx)
{
int usage, locked;
struct tx_policy_cache *cache = &priv->tx_policy_cache;
@@ -352,6 +346,18 @@ void tx_policy_upload_work(struct work_struct *work)
/* ******************************************************************** */
/* cw1200 TX implementation */
+struct cw1200_txinfo {
+ struct sk_buff *skb;
+ unsigned queue;
+ struct ieee80211_tx_info *tx_info;
+ const struct ieee80211_rate *rate;
+ struct ieee80211_hdr *hdr;
+ size_t hdrlen;
+ const u8 *da;
+ struct cw1200_sta_priv *sta_priv;
+ struct cw1200_txpriv txpriv;
+};
+
u32 cw1200_rate_mask_to_wsm(struct cw1200_common *priv, u32 rates)
{
u32 ret = 0;
@@ -375,252 +381,292 @@ cw1200_get_tx_rate(const struct cw1200_common *priv,
bitrates[rate->idx];
}
-/* NOTE: cw1200_skb_to_wsm executes in atomic context. */
-int cw1200_skb_to_wsm(struct cw1200_common *priv, struct sk_buff *skb,
- struct wsm_tx *wsm, struct cw1200_txpriv *txpriv)
+static int
+cw1200_tx_h_calc_link_ids(struct cw1200_common *priv,
+ struct cw1200_txinfo *t)
{
- struct __cw1200_txpriv *info =
- container_of(txpriv, struct __cw1200_txpriv, super);
- bool tx_policy_renew = false;
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
- const struct ieee80211_rate *rate = cw1200_get_tx_rate(priv,
- &tx_info->control.rates[0]);
- u8 priority = 0;
- memset(wsm, 0, sizeof(*wsm));
- wsm->hdr.len = __cpu_to_le16(skb->len);
- wsm->hdr.id = __cpu_to_le16(0x0004);
- if (rate) {
- wsm->maxTxRate = rate->hw_value;
- if (rate->flags & IEEE80211_TX_RC_MCS) {
- if (cw1200_ht_greenfield(&priv->ht_info))
- wsm->htTxParameters |=
- __cpu_to_le32(WSM_HT_TX_GREENFIELD);
- else
- wsm->htTxParameters |=
- __cpu_to_le32(WSM_HT_TX_MIXED);
+ if (likely(t->tx_info->control.sta && t->sta_priv->link_id))
+ t->txpriv.raw_link_id =
+ t->txpriv.link_id =
+ t->sta_priv->link_id;
+ else if (priv->mode != NL80211_IFTYPE_AP)
+ t->txpriv.raw_link_id =
+ t->txpriv.link_id = 0;
+ else if (is_multicast_ether_addr(t->da)) {
+ if (priv->enable_beacon) {
+ t->txpriv.raw_link_id = 0;
+ t->txpriv.link_id = CW1200_LINK_ID_AFTER_DTIM;
+ } else {
+ t->txpriv.raw_link_id = 0;
+ t->txpriv.link_id = 0;
}
+ } else {
+ t->txpriv.link_id = cw1200_find_link_id(priv, t->da);
+ if (!t->txpriv.link_id)
+ t->txpriv.link_id = cw1200_alloc_link_id(priv, t->da);
+ if (!t->txpriv.link_id) {
+ wiphy_err(priv->hw->wiphy,
+ "%s: No more link IDs available.\n",
+ __func__);
+ return -ENOENT;
+ }
+ t->txpriv.raw_link_id = t->txpriv.link_id;
}
- wsm->flags = tx_policy_get(priv,
- tx_info->control.rates, IEEE80211_TX_MAX_RATES,
- &tx_policy_renew) << 4;
+ if (t->txpriv.raw_link_id)
+ priv->link_id_db[t->txpriv.raw_link_id - 1].timestamp =
+ jiffies;
- if (tx_policy_renew) {
- tx_policy_printk(KERN_DEBUG "[TX] TX policy renew.\n");
- /* It's not so optimal to stop TX queues every now and then.
- * Maybe it's better to reimplement task scheduling with
- * a counter. */
- /* cw1200_tx_queues_lock(priv); */
- /* Definetly better. TODO. */
- wsm_lock_tx_async(priv);
- cw1200_tx_queues_lock(priv);
- queue_work(priv->workqueue, &priv->tx_policy_upload_work);
+ if (t->tx_info->control.sta &&
+ (t->tx_info->control.sta->uapsd_queues & BIT(t->queue)))
+ t->txpriv.link_id = CW1200_LINK_ID_UAPSD;
+ return 0;
+}
+
+static void
+cw1200_tx_h_pm(struct cw1200_common *priv,
+ struct cw1200_txinfo *t)
+{
+ if (unlikely(ieee80211_is_auth(t->hdr->frame_control))) {
+ u32 mask = ~BIT(t->txpriv.raw_link_id);
+ spin_lock_bh(&priv->ps_state_lock);
+ priv->sta_asleep_mask &= mask;
+ priv->pspoll_mask &= mask;
+ spin_unlock_bh(&priv->ps_state_lock);
}
+}
- wsm->queueId = wsm_queue_id_to_wsm(skb_get_queue_mapping(skb));
+static void
+cw1200_tx_h_calc_tid(struct cw1200_common *priv,
+ struct cw1200_txinfo *t)
+{
+ if (ieee80211_is_data_qos(t->hdr->frame_control)) {
+ u8 *qos = ieee80211_get_qos_ctl(t->hdr);
+ t->txpriv.tid = qos[0] & IEEE80211_QOS_CTL_TID_MASK;
+ } else if (ieee80211_is_data(t->hdr->frame_control)) {
+ t->txpriv.tid = 0;
+ }
+}
- /* BT Coex specific handling */
- if (priv->is_BT_Present) {
- struct ieee80211_hdr *hdr =
- (struct ieee80211_hdr *)(skb->data + sizeof(struct wsm_tx));
+/* IV/ICV injection. */
+/* TODO: Quite unoptimal. It's better co modify mac80211
+ * to reserve space for IV */
+static int
+cw1200_tx_h_crypt(struct cw1200_common *priv,
+ struct cw1200_txinfo *t)
+{
+ size_t iv_len;
+ size_t icv_len;
+ u8 *icv;
+ u8 *newhdr;
- if (cpu_to_be16(info->ethertype) == ETH_P_PAE)
- priority = WSM_EPTA_PRIORITY_EAPOL;
- else if (ieee80211_is_action(hdr->frame_control))
- priority = WSM_EPTA_PRIORITY_ACTION;
- else if (ieee80211_is_mgmt(hdr->frame_control))
- priority = WSM_EPTA_PRIORITY_MGT;
- else if ((wsm->queueId == WSM_QUEUE_VOICE))
- priority = WSM_EPTA_PRIORITY_VOICE;
- else if ((wsm->queueId == WSM_QUEUE_VIDEO))
- priority = WSM_EPTA_PRIORITY_VIDEO;
- else
- priority = WSM_EPTA_PRIORITY_DATA;
+ if (!t->tx_info->control.hw_key ||
+ !(t->hdr->frame_control &
+ __cpu_to_le32(IEEE80211_FCTL_PROTECTED)))
+ return 0;
- txrx_printk(KERN_DEBUG "[TX] EPTA priority %x.\n",
- ((priority) & 0x7));
+ iv_len = t->tx_info->control.hw_key->iv_len;
+ icv_len = t->tx_info->control.hw_key->icv_len;
- /* Set EPTA priority */
- wsm->flags |= (((priority) & 0x7) << 1);
+ if (t->tx_info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
+ icv_len += 8; /* MIC */
}
+ if ((skb_headroom(t->skb) + skb_tailroom(t->skb) <
+ iv_len + icv_len + WSM_TX_EXTRA_HEADROOM) ||
+ (skb_headroom(t->skb) <
+ iv_len + WSM_TX_EXTRA_HEADROOM)) {
+ wiphy_err(priv->hw->wiphy,
+ "Bug: no space allocated for crypto headers.\n"
+ "headroom: %d, tailroom: %d, "
+ "req_headroom: %d, req_tailroom: %d\n"
+ "Please fix it in cw1200_get_skb().\n",
+ skb_headroom(t->skb), skb_tailroom(t->skb),
+ iv_len + WSM_TX_EXTRA_HEADROOM, icv_len);
+ return -ENOMEM;
+ } else if (skb_tailroom(t->skb) < icv_len) {
+ size_t offset = icv_len - skb_tailroom(t->skb);
+ u8 *p;
+ wiphy_warn(priv->hw->wiphy,
+ "Slowpath: tailroom is not big enough. "
+ "Req: %d, got: %d.\n",
+ icv_len, skb_tailroom(t->skb));
+
+ p = skb_push(t->skb, offset);
+ memmove(p, &p[offset], t->skb->len - offset);
+ skb_trim(t->skb, t->skb->len - offset);
+ }
+
+ newhdr = skb_push(t->skb, iv_len);
+ memmove(newhdr, newhdr + iv_len, t->hdrlen);
+ t->hdrlen += iv_len;
+ icv = skb_put(t->skb, icv_len);
+
return 0;
}
-/* ******************************************************************** */
+static int
+cw1200_tx_h_align(struct cw1200_common *priv,
+ struct cw1200_txinfo *t)
+{
+ size_t offset = (size_t)t->skb->data & 3;
+ u8 *p;
+
+ if (!offset)
+ return 0;
+
+ if (skb_headroom(t->skb) < offset) {
+ wiphy_err(priv->hw->wiphy,
+ "Bug: no space allocated "
+ "for DMA alignment.\n"
+ "headroom: %d\n",
+ skb_headroom(t->skb));
+ return -ENOMEM;
+ }
+ p = skb_push(t->skb, offset);
+ memmove(p, &p[offset], t->skb->len - offset);
+ skb_trim(t->skb, t->skb->len - offset);
+ cw1200_debug_tx_copy(priv);
+ return 0;
+}
-void cw1200_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+static int
+cw1200_tx_h_action(struct cw1200_common *priv,
+ struct cw1200_txinfo *t)
{
- struct cw1200_common *priv = dev->priv;
- unsigned queue = skb_get_queue_mapping(skb);
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
- struct ieee80211_hdr *hdr =
- (struct ieee80211_hdr *)skb->data;
- int was_buffered = 0;
- const u8 *da = ieee80211_get_DA(hdr);
- struct cw1200_sta_priv *sta_priv =
- (struct cw1200_sta_priv *)&tx_info->control.sta->drv_priv;
- int link_id, raw_link_id;
- int ret;
- struct __cw1200_txpriv txpriv = {
- .super.tid = CW1200_MAX_TID,
- };
+ struct ieee80211_mgmt *mgmt =
+ (struct ieee80211_mgmt *)t->hdr;
+ if (ieee80211_is_action(t->hdr->frame_control) &&
+ mgmt->u.action.category == WLAN_CATEGORY_BACK)
+ return 1;
+ else
+ return 0;
+}
- if (likely(tx_info->control.sta && sta_priv->link_id))
- raw_link_id = link_id = sta_priv->link_id;
- else if (priv->mode != NL80211_IFTYPE_AP)
- raw_link_id = link_id = 0;
- else if (is_multicast_ether_addr(da)) {
- if (priv->enable_beacon) {
- raw_link_id = 0;
- link_id = CW1200_LINK_ID_AFTER_DTIM;
- } else {
- raw_link_id = link_id = 0;
- }
- } else {
- raw_link_id = cw1200_find_link_id(priv, da);
- if (!raw_link_id)
- raw_link_id = cw1200_alloc_link_id(priv, da);
- if (!raw_link_id) {
- wiphy_err(priv->hw->wiphy,
- "%s: No more link IDs available.\n",
- __func__);
- goto err;
- }
- link_id = raw_link_id;
+/* Add WSM header */
+static struct wsm_tx *
+cw1200_tx_h_wsm(struct cw1200_common *priv,
+ struct cw1200_txinfo *t)
+{
+ struct wsm_tx *wsm;
+
+ if (skb_headroom(t->skb) < sizeof(struct wsm_tx)) {
+ wiphy_err(priv->hw->wiphy,
+ "Bug: no space allocated "
+ "for WSM header.\n"
+ "headroom: %d\n",
+ skb_headroom(t->skb));
+ return NULL;
}
- if (raw_link_id)
- priv->link_id_db[raw_link_id - 1].timestamp = jiffies;
- if (tx_info->control.sta &&
- (tx_info->control.sta->uapsd_queues & BIT(queue)))
- link_id = CW1200_LINK_ID_UAPSD;
+ wsm = (struct wsm_tx *)skb_push(t->skb, sizeof(struct wsm_tx));
+ t->txpriv.offset += sizeof(struct wsm_tx);
+ memset(wsm, 0, sizeof(*wsm));
+ wsm->hdr.len = __cpu_to_le16(t->skb->len);
+ wsm->hdr.id = __cpu_to_le16(0x0004);
+ wsm->queueId = wsm_queue_id_to_wsm(t->queue);
+ return wsm;
+}
- txrx_printk(KERN_DEBUG "[TX] TX %d bytes (queue: %d, link_id: %d (%d)).\n",
- skb->len, queue, link_id, raw_link_id);
+/* BT Coex specific handling */
+static void
+cw1200_tx_h_bt(struct cw1200_common *priv,
+ struct cw1200_txinfo *t,
+ struct wsm_tx *wsm)
+{
+ u8 priority = 0;
- if (WARN_ON(queue >= 4))
- goto err;
+ if (!priv->is_BT_Present)
+ return;
- if (unlikely(ieee80211_is_auth(hdr->frame_control))) {
- spin_lock_bh(&priv->ps_state_lock);
- priv->sta_asleep_mask &= ~BIT(raw_link_id);
- priv->pspoll_mask &= ~BIT(raw_link_id);
- spin_unlock_bh(&priv->ps_state_lock);
- } else if (ieee80211_is_data_qos(hdr->frame_control) ||
- ieee80211_is_qos_nullfunc(hdr->frame_control)) {
- u8 *qos = ieee80211_get_qos_ctl(hdr);
- txpriv.super.tid = qos[0] & IEEE80211_QOS_CTL_TID_MASK;
- } else if (ieee80211_is_data(hdr->frame_control) ||
- ieee80211_is_nullfunc(hdr->frame_control)) {
- txpriv.super.tid = 0;
+ if (unlikely(ieee80211_is_nullfunc(t->hdr->frame_control)))
+ priority = WSM_EPTA_PRIORITY_MGT;
+ else if (ieee80211_is_data(t->hdr->frame_control)) {
+ /* Skip LLC SNAP header (+6) */
+ u8 *payload = &t->skb->data[t->hdrlen];
+ u16 *ethertype = (u16 *) &payload[6];
+ if (unlikely(*ethertype == __be16_to_cpu(ETH_P_PAE)))
+ priority = WSM_EPTA_PRIORITY_EAPOL;
}
-
- /* BT Coex support related configuration */
- if (priv->is_BT_Present) {
- txpriv.ethertype = 0;
-
- if (ieee80211_is_data_qos(hdr->frame_control) ||
- ieee80211_is_data(hdr->frame_control)) {
- unsigned int headerlen =
- ieee80211_get_hdrlen_from_skb(skb);
-
- /* Skip LLC SNAP header (+6) */
- if (headerlen > 0)
- txpriv.ethertype =
- *((u16 *)(skb->data + headerlen + 6));
- }
- else if (ieee80211_is_assoc_req(hdr->frame_control) ||
- ieee80211_is_reassoc_req(hdr->frame_control)) {
- struct ieee80211_mgmt *mgt_frame =
- (struct ieee80211_mgmt *)skb->data;
-
- if (mgt_frame->u.assoc_req.listen_interval <
- priv->listen_interval) {
- txrx_printk(KERN_DEBUG
- "Modified Listen Interval to %x from %x\n",
+ else if (unlikely(ieee80211_is_assoc_req(t->hdr->frame_control) ||
+ ieee80211_is_reassoc_req(t->hdr->frame_control))) {
+ struct ieee80211_mgmt *mgt_frame =
+ (struct ieee80211_mgmt *)t->hdr;
+
+ if (mgt_frame->u.assoc_req.listen_interval <
+ priv->listen_interval) {
+ txrx_printk(KERN_DEBUG
+ "Modified Listen Interval to %d from %d\n",
priv->listen_interval,
mgt_frame->u.assoc_req.listen_interval);
- /* Replace listen interval derieved from
- the one read from SDD */
- mgt_frame->u.assoc_req.listen_interval =
- priv->listen_interval;
- }
+ /* Replace listen interval derieved from
+ * the one read from SDD */
+ mgt_frame->u.assoc_req.listen_interval =
+ priv->listen_interval;
}
}
- /* IV/ICV injection. */
- /* TODO: Quite unoptimal. It's better co modify mac80211
- * to reserve space for IV */
- if (tx_info->control.hw_key &&
- (hdr->frame_control &
- __cpu_to_le32(IEEE80211_FCTL_PROTECTED))) {
- size_t hdrlen = ieee80211_hdrlen(hdr->frame_control);
- size_t iv_len = tx_info->control.hw_key->iv_len;
- size_t icv_len = tx_info->control.hw_key->icv_len;
- u8 *icv;
- u8 *newhdr;
-
- if (tx_info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
- icv_len += 8; /* MIC */
- }
+ if (likely(!priority)) {
+ if (ieee80211_is_action(t->hdr->frame_control))
+ priority = WSM_EPTA_PRIORITY_ACTION;
+ else if (ieee80211_is_mgmt(t->hdr->frame_control))
+ priority = WSM_EPTA_PRIORITY_MGT;
+ else if ((wsm->queueId == WSM_QUEUE_VOICE))
+ priority = WSM_EPTA_PRIORITY_VOICE;
+ else if ((wsm->queueId == WSM_QUEUE_VIDEO))
+ priority = WSM_EPTA_PRIORITY_VIDEO;
+ else
+ priority = WSM_EPTA_PRIORITY_DATA;
+ }
- if ((skb_headroom(skb) + skb_tailroom(skb) <
- iv_len + icv_len + WSM_TX_EXTRA_HEADROOM) ||
- (skb_headroom(skb) < iv_len + WSM_TX_EXTRA_HEADROOM)) {
- wiphy_err(priv->hw->wiphy,
- "Bug: no space allocated "
- "for crypto headers.\n"
- "headroom: %d, tailroom: %d, "
- "req_headroom: %d, req_tailroom: %d\n"
- "Please fix it in cw1200_get_skb().\n",
- skb_headroom(skb), skb_tailroom(skb),
- iv_len + WSM_TX_EXTRA_HEADROOM, icv_len);
- goto err;
- } else if (skb_tailroom(skb) < icv_len) {
- size_t offset = icv_len - skb_tailroom(skb);
- u8 *p;
- wiphy_warn(priv->hw->wiphy,
- "Slowpath: tailroom is not big enough. "
- "Req: %d, got: %d.\n",
- icv_len, skb_tailroom(skb));
-
- p = skb_push(skb, offset);
- memmove(p, &p[offset], skb->len - offset);
- skb_trim(skb, skb->len - offset);
- }
+ txrx_printk(KERN_DEBUG "[TX] EPTA priority %d.\n",
+ priority);
- newhdr = skb_push(skb, iv_len);
- memmove(newhdr, newhdr + iv_len, hdrlen);
- memset(&newhdr[hdrlen], 0, iv_len);
- icv = skb_put(skb, icv_len);
- memset(icv, 0, icv_len);
- }
+ wsm->flags |= priority << 1;
+}
- if ((size_t)skb->data & 3) {
- size_t offset = (size_t)skb->data & 3;
- u8 *p;
- if (skb_headroom(skb) < 4) {
- wiphy_err(priv->hw->wiphy,
- "Bug: no space allocated "
- "for DMA alignment.\n"
- "headroom: %d\n",
- skb_headroom(skb));
- goto err;
- }
- p = skb_push(skb, offset);
- memmove(p, &p[offset], skb->len - offset);
- skb_trim(skb, skb->len - offset);
- cw1200_debug_tx_copy(priv);
+static void
+cw1200_tx_h_rate_policy(struct cw1200_common *priv,
+ struct cw1200_txinfo *t,
+ struct wsm_tx *wsm)
+{
+ bool tx_policy_renew = false;
+
+ wsm->maxTxRate = t->rate->hw_value;
+ if (t->rate->flags & IEEE80211_TX_RC_MCS) {
+ if (cw1200_ht_greenfield(&priv->ht_info))
+ wsm->htTxParameters |=
+ __cpu_to_le32(WSM_HT_TX_GREENFIELD);
+ else
+ wsm->htTxParameters |=
+ __cpu_to_le32(WSM_HT_TX_MIXED);
}
- if (ieee80211_is_action(hdr->frame_control))
- if (cw1200_handle_action_tx(priv, skb))
- goto drop;
+ t->txpriv.rate_id = tx_policy_get(priv,
+ t->tx_info->control.rates, IEEE80211_TX_MAX_RATES,
+ &tx_policy_renew);
+ wsm->flags = t->txpriv.rate_id << 4;
- spin_lock_bh(&priv->ps_state_lock);
+ if (tx_policy_renew) {
+ tx_policy_printk(KERN_DEBUG "[TX] TX policy renew.\n");
+ /* It's not so optimal to stop TX queues every now and then.
+ * Maybe it's better to reimplement task scheduling with
+ * a counter. */
+ /* cw1200_tx_queues_lock(priv); */
+ /* Definetly better. TODO. */
+ wsm_lock_tx_async(priv);
+ cw1200_tx_queues_lock(priv);
+ queue_work(priv->workqueue, &priv->tx_policy_upload_work);
+ }
+}
- if (link_id == CW1200_LINK_ID_AFTER_DTIM &&
+static bool
+cw1200_tx_h_pm_state(struct cw1200_common *priv,
+ struct cw1200_txinfo *t)
+{
+ int was_buffered = 1;
+
+ if (t->txpriv.link_id == CW1200_LINK_ID_AFTER_DTIM &&
!priv->buffered_multicasts) {
priv->buffered_multicasts = true;
if (priv->sta_asleep_mask)
@@ -628,35 +674,82 @@ void cw1200_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
&priv->multicast_start_work);
}
- if (raw_link_id && txpriv.super.tid < CW1200_MAX_TID)
- was_buffered = priv->link_id_db[raw_link_id - 1]
- .buffered[txpriv.super.tid]++;
+ if (t->txpriv.raw_link_id && t->txpriv.tid < CW1200_MAX_TID)
+ was_buffered = priv->link_id_db[t->txpriv.raw_link_id - 1]
+ .buffered[t->txpriv.tid]++;
- txpriv.super.link_id = link_id;
- txpriv.super.raw_link_id = raw_link_id;
- ret = cw1200_queue_put(&priv->tx_queue[queue], priv, skb,
- &txpriv.super);
+ return !was_buffered;
+}
- spin_unlock_bh(&priv->ps_state_lock);
+/* ******************************************************************** */
- if (raw_link_id && !was_buffered && txpriv.super.tid < CW1200_MAX_TID)
- ieee80211_sta_set_buffered(tx_info->control.sta,
- txpriv.super.tid, true);
+void cw1200_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+{
+ struct cw1200_common *priv = dev->priv;
+ struct cw1200_txinfo t = {
+ .skb = skb,
+ .queue = skb_get_queue_mapping(skb),
+ .tx_info = IEEE80211_SKB_CB(skb),
+ .hdr = (struct ieee80211_hdr *)skb->data,
+ .txpriv.tid = CW1200_MAX_TID,
+ .txpriv.rate_id = CW1200_INVALID_RATE_ID,
+ };
+ struct wsm_tx *wsm;
+ bool tid_update = 0;
+ int ret;
- if (!WARN_ON(ret))
- cw1200_bh_wakeup(priv);
- else
- goto err;
+ t.rate = cw1200_get_tx_rate(priv,
+ &t.tx_info->control.rates[0]),
+ t.hdrlen = ieee80211_hdrlen(t.hdr->frame_control);
+ t.da = ieee80211_get_DA(t.hdr);
+ t.sta_priv =
+ (struct cw1200_sta_priv *)&t.tx_info->control.sta->drv_priv;
- return;
+ if (WARN_ON(t.queue >= 4 || !t.rate))
+ goto drop;
+
+ if ((ret = cw1200_tx_h_calc_link_ids(priv, &t)))
+ goto drop;
+
+ txrx_printk(KERN_DEBUG "[TX] TX %d bytes "
+ "(queue: %d, link_id: %d (%d)).\n",
+ skb->len, t.queue, t.txpriv.link_id,
+ t.txpriv.raw_link_id);
+
+ cw1200_tx_h_pm(priv, &t);
+ cw1200_tx_h_calc_tid(priv, &t);
+ if ((ret = cw1200_tx_h_crypt(priv, &t)))
+ goto drop;
+ if ((ret = cw1200_tx_h_align(priv, &t)))
+ goto drop;
+ if ((ret = cw1200_tx_h_action(priv, &t)))
+ goto drop;
+ wsm = cw1200_tx_h_wsm(priv, &t);
+ if (!wsm) {
+ ret = -ENOMEM;
+ goto drop;
+ }
+ cw1200_tx_h_bt(priv, &t, wsm);
+ cw1200_tx_h_rate_policy(priv, &t, wsm);
+
+ spin_lock_bh(&priv->ps_state_lock);
+ {
+ tid_update = cw1200_tx_h_pm_state(priv, &t);
+ BUG_ON(cw1200_queue_put(&priv->tx_queue[t.queue],
+ t.skb, &t.txpriv));
+ }
+ spin_unlock_bh(&priv->ps_state_lock);
+
+ if (tid_update)
+ ieee80211_sta_set_buffered(t.tx_info->control.sta,
+ t.txpriv.tid, true);
+
+ cw1200_bh_wakeup(priv);
-err:
- /* TODO: Update TX failure counters */
- dev_kfree_skb_any(skb);
return;
drop:
- dev_kfree_skb_any(skb);
+ cw1200_skb_dtor(priv, skb, &t.txpriv);
return;
}
@@ -674,18 +767,6 @@ static int cw1200_handle_action_rx(struct cw1200_common *priv,
return 0;
}
-static int cw1200_handle_action_tx(struct cw1200_common *priv,
- struct sk_buff *skb)
-{
- struct ieee80211_mgmt *mgmt = (void *)skb->data;
-
- /* Filter block ACK negotiation: fully controlled by firmware */
- if (mgmt->u.action.category == WLAN_CATEGORY_BACK)
- return 1;
-
- return 0;
-}
-
static int cw1200_handle_pspoll(struct cw1200_common *priv,
struct sk_buff *skb)
{
@@ -709,11 +790,6 @@ static int cw1200_handle_pspoll(struct cw1200_common *priv,
sta_priv = (struct cw1200_sta_priv *)&sta->drv_priv;
link_id = sta_priv->link_id;
pspoll_mask = BIT(sta_priv->link_id);
-
- /* HACK! To be removed when accurate TX ststus
- * reporting for dropped frames is implemented. */
- if (priv->pspoll_mask & pspoll_mask)
- ieee80211_sta_eosp_irqsafe(sta);
}
rcu_read_unlock();
if (!link_id)
@@ -746,7 +822,6 @@ void cw1200_tx_confirm_cb(struct cw1200_common *priv,
u8 queue_id = cw1200_queue_get_queue_id(arg->packetID);
struct cw1200_queue *queue = &priv->tx_queue[queue_id];
struct sk_buff *skb;
- const struct cw1200_txpriv *txpriv = NULL;
txrx_printk(KERN_DEBUG "[TX] TX confirm: %d, %d.\n",
arg->status, arg->ackFailures);
@@ -780,10 +855,8 @@ void cw1200_tx_confirm_cb(struct cw1200_common *priv,
WARN_ON(cw1200_queue_requeue(queue,
arg->packetID));
} else if (!WARN_ON(cw1200_queue_get_skb(
- queue, arg->packetID, &skb, &txpriv))) {
+ queue, arg->packetID, &skb))) {
struct ieee80211_tx_info *tx = IEEE80211_SKB_CB(skb);
- struct wsm_tx *wsm_tx = (struct wsm_tx *)skb->data;
- int rate_id = (wsm_tx->flags >> 4) & 0x07;
int tx_count = arg->ackFailures;
u8 ht_flags = 0;
int i;
@@ -791,9 +864,6 @@ void cw1200_tx_confirm_cb(struct cw1200_common *priv,
if (cw1200_ht_greenfield(&priv->ht_info))
ht_flags |= IEEE80211_TX_RC_GREEN_FIELD;
- /* Release used TX rate policy */
- tx_policy_put(priv, rate_id);
-
if (likely(!arg->status)) {
tx->flags |= IEEE80211_TX_STAT_ACK;
priv->cqm_tx_failure_count = 0;
@@ -833,15 +903,11 @@ void cw1200_tx_confirm_cb(struct cw1200_common *priv,
tx->status.rates[i].idx = -1;
}
- skb_pull(skb, sizeof(struct wsm_tx));
- cw1200_notify_buffered_tx(priv, skb, arg->link_id,
- txpriv->tid);
- ieee80211_tx_status(priv->hw, skb);
- WARN_ON(cw1200_queue_remove(queue, priv, arg->packetID));
+ cw1200_queue_remove(queue, arg->packetID);
}
}
-void cw1200_notify_buffered_tx(struct cw1200_common *priv,
+static void cw1200_notify_buffered_tx(struct cw1200_common *priv,
struct sk_buff *skb, int link_id, int tid)
{
struct ieee80211_sta *sta;
@@ -870,6 +936,19 @@ void cw1200_notify_buffered_tx(struct cw1200_common *priv,
}
+void cw1200_skb_dtor(struct cw1200_common *priv,
+ struct sk_buff *skb,
+ const struct cw1200_txpriv *txpriv)
+{
+ skb_pull(skb, txpriv->offset);
+ if (txpriv->rate_id != CW1200_INVALID_RATE_ID) {
+ cw1200_notify_buffered_tx(priv, skb,
+ txpriv->raw_link_id, txpriv->tid);
+ tx_policy_put(priv, txpriv->rate_id);
+ }
+ ieee80211_tx_status(priv->hw, skb);
+}
+
void cw1200_rx_cb(struct cw1200_common *priv,
struct wsm_rx *arg,
struct sk_buff **skb_p)
diff --git a/drivers/staging/cw1200/txrx.h b/drivers/staging/cw1200/txrx.h
index 87dbdfa949c..9f4f40ea31c 100644
--- a/drivers/staging/cw1200/txrx.h
+++ b/drivers/staging/cw1200/txrx.h
@@ -17,6 +17,7 @@
/* extern */ struct ieee80211_hw;
/* extern */ struct sk_buff;
/* extern */ struct wsm_tx;
+/* extern */ struct wsm_rx;
/* extern */ struct wsm_tx_confirm;
/* extern */ struct cw1200_txpriv;
@@ -53,19 +54,16 @@ struct tx_policy_cache {
*/
void tx_policy_init(struct cw1200_common *priv);
void tx_policy_upload_work(struct work_struct *work);
-void tx_policy_put(struct cw1200_common *priv, int idx);
/* ******************************************************************** */
/* TX implementation */
u32 cw1200_rate_mask_to_wsm(struct cw1200_common *priv,
u32 rates);
-int cw1200_skb_to_wsm(struct cw1200_common *priv,
- struct sk_buff *skb, struct wsm_tx *wsm,
- struct cw1200_txpriv *txpriv);
void cw1200_tx(struct ieee80211_hw *dev, struct sk_buff *skb);
-void cw1200_notify_buffered_tx(struct cw1200_common *priv,
- struct sk_buff *skb, int link_id, int tid);
+void cw1200_skb_dtor(struct cw1200_common *priv,
+ struct sk_buff *skb,
+ const struct cw1200_txpriv *txpriv);
/* ******************************************************************** */
/* WSM callbacks */
diff --git a/drivers/staging/cw1200/wsm.c b/drivers/staging/cw1200/wsm.c
index d0bcffa1654..16996def7b2 100644
--- a/drivers/staging/cw1200/wsm.c
+++ b/drivers/staging/cw1200/wsm.c
@@ -1399,8 +1399,6 @@ static bool wsm_handle_tx_data(struct cw1200_common *priv,
* probe responses.
* The easiest way to get it back is to convert
* probe request into WSM start_scan command. */
- const struct cw1200_txpriv *txpriv;
- int rate_id = (wsm->flags >> 4) & 0x07;
struct cw1200_queue *queue =
&priv->tx_queue[cw1200_queue_get_queue_id(
wsm->packetID)];
@@ -1409,12 +1407,13 @@ static bool wsm_handle_tx_data(struct cw1200_common *priv,
wsm_lock_tx_async(priv);
BUG_ON(priv->scan.probe_skb);
BUG_ON(cw1200_queue_get_skb(queue,
- wsm->packetID,
- &priv->scan.probe_skb, &txpriv));
- BUG_ON(cw1200_queue_remove(queue, priv,
- wsm->packetID));
+ wsm->packetID,
+ &priv->scan.probe_skb));
+ skb_get(priv->scan.probe_skb);
+ IEEE80211_SKB_CB(priv->scan.probe_skb)->flags |=
+ IEEE80211_TX_STAT_ACK;
+ BUG_ON(cw1200_queue_remove(queue, wsm->packetID));
/* Release used TX rate policy */
- tx_policy_put(priv, rate_id);
queue_delayed_work(priv->workqueue,
&priv->scan.probe_work, 0);
handled = true;
@@ -1424,23 +1423,11 @@ static bool wsm_handle_tx_data(struct cw1200_common *priv,
{
/* See detailed description of "join" below.
* We are dropping everything except AUTH in non-joined mode. */
- struct sk_buff *skb;
- int rate_id = (wsm->flags >> 4) & 0x07;
- const struct cw1200_txpriv *txpriv = NULL;
struct cw1200_queue *queue =
&priv->tx_queue[cw1200_queue_get_queue_id(
wsm->packetID)];
- wsm_printk(KERN_DEBUG "[WSM] Drop frame (0x%.4X):"
- " not joined.\n", fctl);
- BUG_ON(cw1200_queue_get_skb(queue, wsm->packetID,
- &skb, &txpriv));
- skb_pull(skb, sizeof(struct wsm_tx));
- cw1200_notify_buffered_tx(priv, skb, link_id, txpriv->tid);
- BUG_ON(cw1200_queue_remove(queue, priv, wsm->packetID));
- /* Release used TX rate policy */
- tx_policy_put(priv, rate_id);
- /* Release SKB. TODO: report TX failure. */
- dev_kfree_skb(skb);
+ wsm_printk(KERN_DEBUG "[WSM] Drop frame (0x%.4X).\n", fctl);
+ BUG_ON(cw1200_queue_remove(queue, wsm->packetID));
handled = true;
}
break;
@@ -1626,7 +1613,6 @@ int wsm_get_tx(struct cw1200_common *priv, u8 **data,
if (ret)
break;
-
if (cw1200_queue_get(queue,
tx_allowed_mask,
&wsm, &tx_info, &txpriv))
diff --git a/drivers/staging/cw1200/wsm.h b/drivers/staging/cw1200/wsm.h
index 11e18005200..f0b07dc08a2 100644
--- a/drivers/staging/cw1200/wsm.h
+++ b/drivers/staging/cw1200/wsm.h
@@ -753,11 +753,11 @@ typedef void (*wsm_tx_confirm_cb) (struct cw1200_common *priv,
/* Note that ideology of wsm_tx struct is different against the rest of
* WSM API. wsm_hdr is /not/ a caller-adapted struct to be used as an input
* argument for WSM call, but a prepared bytestream to be sent to firmware.
- * It is filled partly in cw1200_skb_to_wsm, partly in low-level WSM code.
+ * It is filled partly in cw1200_tx, partly in low-level WSM code.
* Please pay attention once again: ideology is different.
*
* Legend:
- * - [in]: cw1200_skb_to_wsm must fill this field.
+ * - [in]: cw1200_tx must fill this field.
* - [wsm]: the field is filled by low-level WSM.
*/
struct wsm_tx {