summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorKevin Hilman <khilman@deeprootsystems.com>2010-10-21 11:21:55 -0700
committerKevin Hilman <khilman@deeprootsystems.com>2010-10-21 11:21:55 -0700
commit7940a34b2e1e0485211a17d8c3ab4da1ea3e1330 (patch)
treefe08ba701cbf23ae44a16d3b7afa521fce943a8a /drivers
parent6451d7783ba5ff24eb1a544eaa6665b890f30466 (diff)
parent8939b3504dc35224cb9c88e5af925b22ea9eee71 (diff)
Merge branch 'davinci-next' into davinci-for-linus
Conflicts: arch/arm/mach-davinci/board-da830-evm.c arch/arm/mach-davinci/board-da850-evm.c
Diffstat (limited to 'drivers')
-rw-r--r--drivers/input/keyboard/Kconfig9
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/tnetv107x-keypad.c340
-rw-r--r--drivers/input/touchscreen/Kconfig9
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/tnetv107x-ts.c396
-rw-r--r--drivers/mtd/nand/davinci_nand.c61
-rw-r--r--drivers/net/Kconfig21
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/davinci_cpdma.c965
-rw-r--r--drivers/net/davinci_cpdma.h108
-rw-r--r--drivers/net/davinci_emac.c1338
-rw-r--r--drivers/net/davinci_mdio.c475
13 files changed, 2526 insertions, 1200 deletions
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index aa037fec2f86..48cf9ec8baf2 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -424,6 +424,15 @@ config KEYBOARD_OMAP
To compile this driver as a module, choose M here: the
module will be called omap-keypad.
+config KEYBOARD_TNETV107X
+ tristate "TI TNETV107X keypad support"
+ depends on ARCH_DAVINCI_TNETV107X
+ help
+ Say Y here if you want to use the TNETV107X keypad.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tnetv107x-keypad.
+
config KEYBOARD_TWL4030
tristate "TI TWL4030/TWL5030/TPS659x0 keypad support"
depends on TWL4030_CORE
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 504b591be0cd..dc0451824dbf 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_KEYBOARD_SH_KEYSC) += sh_keysc.o
obj-$(CONFIG_KEYBOARD_STMPE) += stmpe-keypad.o
obj-$(CONFIG_KEYBOARD_STOWAWAY) += stowaway.o
obj-$(CONFIG_KEYBOARD_SUNKBD) += sunkbd.o
+obj-$(CONFIG_KEYBOARD_TNETV107X) += tnetv107x-keypad.o
obj-$(CONFIG_KEYBOARD_TWL4030) += twl4030_keypad.o
obj-$(CONFIG_KEYBOARD_XTKBD) += xtkbd.o
obj-$(CONFIG_KEYBOARD_W90P910) += w90p910_keypad.o
diff --git a/drivers/input/keyboard/tnetv107x-keypad.c b/drivers/input/keyboard/tnetv107x-keypad.c
new file mode 100644
index 000000000000..b4a81ebfab92
--- /dev/null
+++ b/drivers/input/keyboard/tnetv107x-keypad.c
@@ -0,0 +1,340 @@
+/*
+ * Texas Instruments TNETV107X Keypad Driver
+ *
+ * Copyright (C) 2010 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/input/matrix_keypad.h>
+
+#define BITS(x) (BIT(x) - 1)
+
+#define KEYPAD_ROWS 9
+#define KEYPAD_COLS 9
+
+#define DEBOUNCE_MIN 0x400ul
+#define DEBOUNCE_MAX 0x3ffffffful
+
+struct keypad_regs {
+ u32 rev;
+ u32 mode;
+ u32 mask;
+ u32 pol;
+ u32 dclock;
+ u32 rclock;
+ u32 stable_cnt;
+ u32 in_en;
+ u32 out;
+ u32 out_en;
+ u32 in;
+ u32 lock;
+ u32 pres[3];
+};
+
+#define keypad_read(kp, reg) __raw_readl(&(kp)->regs->reg)
+#define keypad_write(kp, reg, val) __raw_writel(val, &(kp)->regs->reg)
+
+struct keypad_data {
+ struct input_dev *input_dev;
+ struct resource *res;
+ struct keypad_regs __iomem *regs;
+ struct clk *clk;
+ struct device *dev;
+ spinlock_t lock;
+ u32 irq_press;
+ u32 irq_release;
+ int rows, cols, row_shift;
+ int debounce_ms, active_low;
+ u32 prev_keys[3];
+ unsigned short keycodes[];
+};
+
+static irqreturn_t keypad_irq(int irq, void *data)
+{
+ struct keypad_data *kp = data;
+ int i, bit, val, row, col, code;
+ unsigned long flags;
+ u32 curr_keys[3];
+ u32 change;
+
+ spin_lock_irqsave(&kp->lock, flags);
+
+ memset(curr_keys, 0, sizeof(curr_keys));
+ if (irq == kp->irq_press)
+ for (i = 0; i < 3; i++)
+ curr_keys[i] = keypad_read(kp, pres[i]);
+
+ for (i = 0; i < 3; i++) {
+ change = curr_keys[i] ^ kp->prev_keys[i];
+
+ while (change) {
+ bit = fls(change) - 1;
+ change ^= BIT(bit);
+ val = curr_keys[i] & BIT(bit);
+ bit += i * 32;
+ row = bit / KEYPAD_COLS;
+ col = bit % KEYPAD_COLS;
+
+ code = MATRIX_SCAN_CODE(row, col, kp->row_shift);
+ input_event(kp->input_dev, EV_MSC, MSC_SCAN, code);
+ input_report_key(kp->input_dev, kp->keycodes[code],
+ val);
+ }
+ }
+ input_sync(kp->input_dev);
+ memcpy(kp->prev_keys, curr_keys, sizeof(curr_keys));
+
+ if (irq == kp->irq_press)
+ keypad_write(kp, lock, 0); /* Allow hardware updates */
+
+ spin_unlock_irqrestore(&kp->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int keypad_start(struct input_dev *dev)
+{
+ struct keypad_data *kp = input_get_drvdata(dev);
+ unsigned long mask, debounce, clk_rate_khz;
+ unsigned long flags;
+
+ clk_enable(kp->clk);
+ clk_rate_khz = clk_get_rate(kp->clk) / 1000;
+
+ spin_lock_irqsave(&kp->lock, flags);
+
+ /* Initialize device registers */
+ keypad_write(kp, mode, 0);
+
+ mask = BITS(kp->rows) << KEYPAD_COLS;
+ mask |= BITS(kp->cols);
+ keypad_write(kp, mask, ~mask);
+
+ keypad_write(kp, pol, kp->active_low ? 0 : 0x3ffff);
+ keypad_write(kp, stable_cnt, 3);
+
+ debounce = kp->debounce_ms * clk_rate_khz;
+ debounce = clamp(debounce, DEBOUNCE_MIN, DEBOUNCE_MAX);
+ keypad_write(kp, dclock, debounce);
+ keypad_write(kp, rclock, 4 * debounce);
+
+ keypad_write(kp, in_en, 1);
+
+ spin_unlock_irqrestore(&kp->lock, flags);
+
+ return 0;
+}
+
+static void keypad_stop(struct input_dev *dev)
+{
+ struct keypad_data *kp = input_get_drvdata(dev);
+
+ synchronize_irq(kp->irq_press);
+ synchronize_irq(kp->irq_release);
+ clk_disable(kp->clk);
+}
+
+static int __devinit keypad_probe(struct platform_device *pdev)
+{
+ const struct matrix_keypad_platform_data *pdata;
+ const struct matrix_keymap_data *keymap_data;
+ struct device *dev = &pdev->dev;
+ struct keypad_data *kp;
+ int error = 0, sz, row_shift;
+ u32 rev = 0;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(dev, "cannot find device data\n");
+ return -EINVAL;
+ }
+
+ keymap_data = pdata->keymap_data;
+ if (!keymap_data) {
+ dev_err(dev, "cannot find keymap data\n");
+ return -EINVAL;
+ }
+
+ row_shift = get_count_order(pdata->num_col_gpios);
+ sz = offsetof(struct keypad_data, keycodes);
+ sz += (pdata->num_row_gpios << row_shift) * sizeof(kp->keycodes[0]);
+ kp = kzalloc(sz, GFP_KERNEL);
+ if (!kp) {
+ dev_err(dev, "cannot allocate device info\n");
+ return -ENOMEM;
+ }
+
+ kp->dev = dev;
+ kp->rows = pdata->num_row_gpios;
+ kp->cols = pdata->num_col_gpios;
+ kp->row_shift = row_shift;
+ platform_set_drvdata(pdev, kp);
+ spin_lock_init(&kp->lock);
+
+ kp->irq_press = platform_get_irq_byname(pdev, "press");
+ kp->irq_release = platform_get_irq_byname(pdev, "release");
+ if (kp->irq_press < 0 || kp->irq_release < 0) {
+ dev_err(dev, "cannot determine device interrupts\n");
+ error = -ENODEV;
+ goto error_res;
+ }
+
+ kp->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!kp->res) {
+ dev_err(dev, "cannot determine register area\n");
+ error = -ENODEV;
+ goto error_res;
+ }
+
+ if (!request_mem_region(kp->res->start, resource_size(kp->res),
+ pdev->name)) {
+ dev_err(dev, "cannot claim register memory\n");
+ kp->res = NULL;
+ error = -EINVAL;
+ goto error_res;
+ }
+
+ kp->regs = ioremap(kp->res->start, resource_size(kp->res));
+ if (!kp->regs) {
+ dev_err(dev, "cannot map register memory\n");
+ error = -ENOMEM;
+ goto error_map;
+ }
+
+ kp->clk = clk_get(dev, NULL);
+ if (!kp->clk) {
+ dev_err(dev, "cannot claim device clock\n");
+ error = -EINVAL;
+ goto error_clk;
+ }
+
+ error = request_threaded_irq(kp->irq_press, NULL, keypad_irq, 0,
+ dev_name(dev), kp);
+ if (error < 0) {
+ dev_err(kp->dev, "Could not allocate keypad press key irq\n");
+ goto error_irq_press;
+ }
+
+ error = request_threaded_irq(kp->irq_release, NULL, keypad_irq, 0,
+ dev_name(dev), kp);
+ if (error < 0) {
+ dev_err(kp->dev, "Could not allocate keypad release key irq\n");
+ goto error_irq_release;
+ }
+
+ kp->input_dev = input_allocate_device();
+ if (!kp->input_dev) {
+ dev_err(dev, "cannot allocate input device\n");
+ error = -ENOMEM;
+ goto error_input;
+ }
+ input_set_drvdata(kp->input_dev, kp);
+
+ kp->input_dev->name = pdev->name;
+ kp->input_dev->dev.parent = &pdev->dev;
+ kp->input_dev->open = keypad_start;
+ kp->input_dev->close = keypad_stop;
+ kp->input_dev->evbit[0] = BIT_MASK(EV_KEY);
+ if (!pdata->no_autorepeat)
+ kp->input_dev->evbit[0] |= BIT_MASK(EV_REP);
+
+ clk_enable(kp->clk);
+ rev = keypad_read(kp, rev);
+ kp->input_dev->id.bustype = BUS_HOST;
+ kp->input_dev->id.product = ((rev >> 8) & 0x07);
+ kp->input_dev->id.version = ((rev >> 16) & 0xfff);
+ clk_disable(kp->clk);
+
+ kp->input_dev->keycode = kp->keycodes;
+ kp->input_dev->keycodesize = sizeof(kp->keycodes[0]);
+ kp->input_dev->keycodemax = kp->rows << kp->row_shift;
+
+ matrix_keypad_build_keymap(keymap_data, kp->row_shift, kp->keycodes,
+ kp->input_dev->keybit);
+
+ input_set_capability(kp->input_dev, EV_MSC, MSC_SCAN);
+
+ error = input_register_device(kp->input_dev);
+ if (error < 0) {
+ dev_err(dev, "Could not register input device\n");
+ goto error_reg;
+ }
+
+ return 0;
+
+
+error_reg:
+ input_free_device(kp->input_dev);
+error_input:
+ free_irq(kp->irq_release, kp);
+error_irq_release:
+ free_irq(kp->irq_press, kp);
+error_irq_press:
+ clk_put(kp->clk);
+error_clk:
+ iounmap(kp->regs);
+error_map:
+ release_mem_region(kp->res->start, resource_size(kp->res));
+error_res:
+ platform_set_drvdata(pdev, NULL);
+ kfree(kp);
+ return error;
+}
+
+static int __devexit keypad_remove(struct platform_device *pdev)
+{
+ struct keypad_data *kp = platform_get_drvdata(pdev);
+
+ free_irq(kp->irq_press, kp);
+ free_irq(kp->irq_release, kp);
+ input_unregister_device(kp->input_dev);
+ clk_put(kp->clk);
+ iounmap(kp->regs);
+ release_mem_region(kp->res->start, resource_size(kp->res));
+ platform_set_drvdata(pdev, NULL);
+ kfree(kp);
+
+ return 0;
+}
+
+static struct platform_driver keypad_driver = {
+ .probe = keypad_probe,
+ .remove = __devexit_p(keypad_remove),
+ .driver.name = "tnetv107x-keypad",
+ .driver.owner = THIS_MODULE,
+};
+
+static int __init keypad_init(void)
+{
+ return platform_driver_register(&keypad_driver);
+}
+
+static void __exit keypad_exit(void)
+{
+ platform_driver_unregister(&keypad_driver);
+}
+
+module_init(keypad_init);
+module_exit(keypad_exit);
+
+MODULE_AUTHOR("Cyril Chemparathy");
+MODULE_DESCRIPTION("TNETV107X Keypad Driver");
+MODULE_ALIAS("platform: tnetv107x-keypad");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 0069d9703fda..8d320289abf8 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -328,6 +328,15 @@ config TOUCHSCREEN_MIGOR
To compile this driver as a module, choose M here: the
module will be called migor_ts.
+config TOUCHSCREEN_TNETV107X
+ tristate "TI TNETV107X touchscreen support"
+ depends on ARCH_DAVINCI_TNETV107X
+ help
+ Say Y here if you want to use the TNETV107X touchscreen.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tnetv107x-ts.
+
config TOUCHSCREEN_TOUCHRIGHT
tristate "Touchright serial touchscreen"
select SERIO
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 28217e1dcafd..d41a964e673b 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_TOUCHSCREEN_PENMOUNT) += penmount.o
obj-$(CONFIG_TOUCHSCREEN_QT602240) += qt602240_ts.o
obj-$(CONFIG_TOUCHSCREEN_S3C2410) += s3c2410_ts.o
obj-$(CONFIG_TOUCHSCREEN_STMPE) += stmpe-ts.o
+obj-$(CONFIG_TOUCHSCREEN_TNETV107X) += tnetv107x-ts.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213) += touchit213.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHRIGHT) += touchright.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHWIN) += touchwin.o
diff --git a/drivers/input/touchscreen/tnetv107x-ts.c b/drivers/input/touchscreen/tnetv107x-ts.c
new file mode 100644
index 000000000000..cf1dba2e267c
--- /dev/null
+++ b/drivers/input/touchscreen/tnetv107x-ts.c
@@ -0,0 +1,396 @@
+/*
+ * Texas Instruments TNETV107X Touchscreen Driver
+ *
+ * Copyright (C) 2010 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/ctype.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+
+#include <mach/tnetv107x.h>
+
+#define TSC_PENUP_POLL (HZ / 5)
+#define IDLE_TIMEOUT 100 /* msec */
+
+/*
+ * The first and last samples of a touch interval are usually garbage and need
+ * to be filtered out with these devices. The following definitions control
+ * the number of samples skipped.
+ */
+#define TSC_HEAD_SKIP 1
+#define TSC_TAIL_SKIP 1
+#define TSC_SKIP (TSC_HEAD_SKIP + TSC_TAIL_SKIP + 1)
+#define TSC_SAMPLES (TSC_SKIP + 1)
+
+/* Register Offsets */
+struct tsc_regs {
+ u32 rev;
+ u32 tscm;
+ u32 bwcm;
+ u32 swc;
+ u32 adcchnl;
+ u32 adcdata;
+ u32 chval[4];
+};
+
+/* TSC Mode Configuration Register (tscm) bits */
+#define WMODE BIT(0)
+#define TSKIND BIT(1)
+#define ZMEASURE_EN BIT(2)
+#define IDLE BIT(3)
+#define TSC_EN BIT(4)
+#define STOP BIT(5)
+#define ONE_SHOT BIT(6)
+#define SINGLE BIT(7)
+#define AVG BIT(8)
+#define AVGNUM(x) (((x) & 0x03) << 9)
+#define PVSTC(x) (((x) & 0x07) << 11)
+#define PON BIT(14)
+#define PONBG BIT(15)
+#define AFERST BIT(16)
+
+/* ADC DATA Capture Register bits */
+#define DATA_VALID BIT(16)
+
+/* Register Access Macros */
+#define tsc_read(ts, reg) __raw_readl(&(ts)->regs->reg)
+#define tsc_write(ts, reg, val) __raw_writel(val, &(ts)->regs->reg);
+#define tsc_set_bits(ts, reg, val) \
+ tsc_write(ts, reg, tsc_read(ts, reg) | (val))
+#define tsc_clr_bits(ts, reg, val) \
+ tsc_write(ts, reg, tsc_read(ts, reg) & ~(val))
+
+struct sample {
+ int x, y, p;
+};
+
+struct tsc_data {
+ struct input_dev *input_dev;
+ struct resource *res;
+ struct tsc_regs __iomem *regs;
+ struct timer_list timer;
+ spinlock_t lock;
+ struct clk *clk;
+ struct device *dev;
+ int sample_count;
+ struct sample samples[TSC_SAMPLES];
+ int tsc_irq;
+};
+
+static int tsc_read_sample(struct tsc_data *ts, struct sample* sample)
+{
+ int x, y, z1, z2, t, p = 0;
+ u32 val;
+
+ val = tsc_read(ts, chval[0]);
+ if (val & DATA_VALID)
+ x = val & 0xffff;
+ else
+ return -EINVAL;
+
+ y = tsc_read(ts, chval[1]) & 0xffff;
+ z1 = tsc_read(ts, chval[2]) & 0xffff;
+ z2 = tsc_read(ts, chval[3]) & 0xffff;
+
+ if (z1) {
+ t = ((600 * x) * (z2 - z1));
+ p = t / (u32) (z1 << 12);
+ if (p < 0)
+ p = 0;
+ }
+
+ sample->x = x;
+ sample->y = y;
+ sample->p = p;
+
+ return 0;
+}
+
+static void tsc_poll(unsigned long data)
+{
+ struct tsc_data *ts = (struct tsc_data *)data;
+ unsigned long flags;
+ int i, val, x, y, p;
+
+ spin_lock_irqsave(&ts->lock, flags);
+
+ if (ts->sample_count >= TSC_SKIP) {
+ input_report_abs(ts->input_dev, ABS_PRESSURE, 0);
+ input_report_key(ts->input_dev, BTN_TOUCH, 0);
+ input_sync(ts->input_dev);
+ } else if (ts->sample_count > 0) {
+ /*
+ * A touch event lasted less than our skip count. Salvage and
+ * report anyway.
+ */
+ for (i = 0, val = 0; i < ts->sample_count; i++)
+ val += ts->samples[i].x;
+ x = val / ts->sample_count;
+
+ for (i = 0, val = 0; i < ts->sample_count; i++)
+ val += ts->samples[i].y;
+ y = val / ts->sample_count;
+
+ for (i = 0, val = 0; i < ts->sample_count; i++)
+ val += ts->samples[i].p;
+ p = val / ts->sample_count;
+
+ input_report_abs(ts->input_dev, ABS_X, x);
+ input_report_abs(ts->input_dev, ABS_Y, y);
+ input_report_abs(ts->input_dev, ABS_PRESSURE, p);
+ input_report_key(ts->input_dev, BTN_TOUCH, 1);
+ input_sync(ts->input_dev);
+ }
+
+ ts->sample_count = 0;
+
+ spin_unlock_irqrestore(&ts->lock, flags);
+}
+
+static irqreturn_t tsc_irq(int irq, void *dev_id)
+{
+ struct tsc_data *ts = (struct tsc_data *)dev_id;
+ struct sample *sample;
+ int index;
+
+ spin_lock(&ts->lock);
+
+ index = ts->sample_count % TSC_SAMPLES;
+ sample = &ts->samples[index];
+ if (tsc_read_sample(ts, sample) < 0)
+ goto out;
+
+ if (++ts->sample_count >= TSC_SKIP) {
+ index = (ts->sample_count - TSC_TAIL_SKIP - 1) % TSC_SAMPLES;
+ sample = &ts->samples[index];
+
+ input_report_abs(ts->input_dev, ABS_X, sample->x);
+ input_report_abs(ts->input_dev, ABS_Y, sample->y);
+ input_report_abs(ts->input_dev, ABS_PRESSURE, sample->p);
+ if (ts->sample_count == TSC_SKIP)
+ input_report_key(ts->input_dev, BTN_TOUCH, 1);
+ input_sync(ts->input_dev);
+ }
+ mod_timer(&ts->timer, jiffies + TSC_PENUP_POLL);
+out:
+ spin_unlock(&ts->lock);
+ return IRQ_HANDLED;
+}
+
+static int tsc_start(struct input_dev *dev)
+{
+ struct tsc_data *ts = input_get_drvdata(dev);
+ unsigned long timeout = jiffies + msecs_to_jiffies(IDLE_TIMEOUT);
+ u32 val;
+
+ clk_enable(ts->clk);
+
+ /* Go to idle mode, before any initialization */
+ while (time_after(timeout, jiffies)) {
+ if (tsc_read(ts, tscm) & IDLE)
+ break;
+ }
+
+ if (time_before(timeout, jiffies)) {
+ dev_warn(ts->dev, "timeout waiting for idle\n");
+ clk_disable(ts->clk);
+ return -EIO;
+ }
+
+ /* Configure TSC Control register*/
+ val = (PONBG | PON | PVSTC(4) | ONE_SHOT | ZMEASURE_EN);
+ tsc_write(ts, tscm, val);
+
+ /* Bring TSC out of reset: Clear AFE reset bit */
+ val &= ~(AFERST);
+ tsc_write(ts, tscm, val);
+
+ /* Configure all pins for hardware control*/
+ tsc_write(ts, bwcm, 0);
+
+ /* Finally enable the TSC */
+ tsc_set_bits(ts, tscm, TSC_EN);
+
+ return 0;
+}
+
+static void tsc_stop(struct input_dev *dev)
+{
+ struct tsc_data *ts = input_get_drvdata(dev);
+
+ tsc_clr_bits(ts, tscm, TSC_EN);
+ synchronize_irq(ts->tsc_irq);
+ del_timer_sync(&ts->timer);
+ clk_disable(ts->clk);
+}
+
+static int __devinit tsc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tsc_data *ts;
+ int error = 0;
+ u32 rev = 0;
+
+ ts = kzalloc(sizeof(struct tsc_data), GFP_KERNEL);
+ if (!ts) {
+ dev_err(dev, "cannot allocate device info\n");
+ return -ENOMEM;
+ }
+
+ ts->dev = dev;
+ spin_lock_init(&ts->lock);
+ setup_timer(&ts->timer, tsc_poll, (unsigned long)ts);
+ platform_set_drvdata(pdev, ts);
+
+ ts->tsc_irq = platform_get_irq(pdev, 0);
+ if (ts->tsc_irq < 0) {
+ dev_err(dev, "cannot determine device interrupt\n");
+ error = -ENODEV;
+ goto error_res;
+ }
+
+ ts->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!ts->res) {
+ dev_err(dev, "cannot determine register area\n");
+ error = -ENODEV;
+ goto error_res;
+ }
+
+ if (!request_mem_region(ts->res->start, resource_size(ts->res),
+ pdev->name)) {
+ dev_err(dev, "cannot claim register memory\n");
+ ts->res = NULL;
+ error = -EINVAL;
+ goto error_res;
+ }
+
+ ts->regs = ioremap(ts->res->start, resource_size(ts->res));
+ if (!ts->regs) {
+ dev_err(dev, "cannot map register memory\n");
+ error = -ENOMEM;
+ goto error_map;
+ }
+
+ ts->clk = clk_get(dev, NULL);
+ if (!ts->clk) {
+ dev_err(dev, "cannot claim device clock\n");
+ error = -EINVAL;
+ goto error_clk;
+ }
+
+ error = request_threaded_irq(ts->tsc_irq, NULL, tsc_irq, 0,
+ dev_name(dev), ts);
+ if (error < 0) {
+ dev_err(ts->dev, "Could not allocate ts irq\n");
+ goto error_irq;
+ }
+
+ ts->input_dev = input_allocate_device();
+ if (!ts->input_dev) {
+ dev_err(dev, "cannot allocate input device\n");
+ error = -ENOMEM;
+ goto error_input;
+ }
+ input_set_drvdata(ts->input_dev, ts);
+
+ ts->input_dev->name = pdev->name;
+ ts->input_dev->id.bustype = BUS_HOST;
+ ts->input_dev->dev.parent = &pdev->dev;
+ ts->input_dev->open = tsc_start;
+ ts->input_dev->close = tsc_stop;
+
+ clk_enable(ts->clk);
+ rev = tsc_read(ts, rev);
+ ts->input_dev->id.product = ((rev >> 8) & 0x07);
+ ts->input_dev->id.version = ((rev >> 16) & 0xfff);
+ clk_disable(ts->clk);
+
+ __set_bit(EV_KEY, ts->input_dev->evbit);
+ __set_bit(EV_ABS, ts->input_dev->evbit);
+ __set_bit(BTN_TOUCH, ts->input_dev->keybit);
+
+ input_set_abs_params(ts->input_dev, ABS_X, 0, 0xffff, 5, 0);
+ input_set_abs_params(ts->input_dev, ABS_Y, 0, 0xffff, 5, 0);
+ input_set_abs_params(ts->input_dev, ABS_PRESSURE, 0, 4095, 128, 0);
+
+ error = input_register_device(ts->input_dev);
+ if (error < 0) {
+ dev_err(dev, "failed input device registration\n");
+ goto error_reg;
+ }
+
+ return 0;
+
+error_reg:
+ input_free_device(ts->input_dev);
+error_input:
+ free_irq(ts->tsc_irq, ts);
+error_irq:
+ clk_put(ts->clk);
+error_clk:
+ iounmap(ts->regs);
+error_map:
+ release_mem_region(ts->res->start, resource_size(ts->res));
+error_res:
+ platform_set_drvdata(pdev, NULL);
+ kfree(ts);
+
+ return error;
+}
+
+static int __devexit tsc_remove(struct platform_device *pdev)
+{
+ struct tsc_data *ts = platform_get_drvdata(pdev);
+
+ input_unregister_device(ts->input_dev);
+ free_irq(ts->tsc_irq, ts);
+ clk_put(ts->clk);
+ iounmap(ts->regs);
+ release_mem_region(ts->res->start, resource_size(ts->res));
+ platform_set_drvdata(pdev, NULL);
+ kfree(ts);
+
+ return 0;
+}
+
+static struct platform_driver tsc_driver = {
+ .probe = tsc_probe,
+ .remove = __devexit_p(tsc_remove),
+ .driver.name = "tnetv107x-ts",
+ .driver.owner = THIS_MODULE,
+};
+
+static int __init tsc_init(void)
+{
+ return platform_driver_register(&tsc_driver);
+}
+
+static void __exit tsc_exit(void)
+{
+ platform_driver_unregister(&tsc_driver);
+}
+
+module_init(tsc_init);
+module_exit(tsc_exit);
+
+MODULE_AUTHOR("Cyril Chemparathy");
+MODULE_DESCRIPTION("TNETV107X Touchscreen Driver");
+MODULE_ALIAS("platform: tnetv107x-ts");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 2ac7367afe77..8beb0d0233b5 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <mach/nand.h>
+#include <mach/aemif.h>
#include <asm/mach-types.h>
@@ -74,6 +75,8 @@ struct davinci_nand_info {
uint32_t mask_cle;
uint32_t core_chipsel;
+
+ struct davinci_aemif_timing *timing;
};
static DEFINE_SPINLOCK(davinci_nand_lock);
@@ -478,36 +481,6 @@ static int nand_davinci_dev_ready(struct mtd_info *mtd)
return davinci_nand_readl(info, NANDFSR_OFFSET) & BIT(0);
}
-static void __init nand_dm6446evm_flash_init(struct davinci_nand_info *info)
-{
- uint32_t regval, a1cr;
-
- /*
- * NAND FLASH timings @ PLL1 == 459 MHz
- * - AEMIF.CLK freq = PLL1/6 = 459/6 = 76.5 MHz
- * - AEMIF.CLK period = 1/76.5 MHz = 13.1 ns
- */
- regval = 0
- | (0 << 31) /* selectStrobe */
- | (0 << 30) /* extWait (never with NAND) */
- | (1 << 26) /* writeSetup 10 ns */
- | (3 << 20) /* writeStrobe 40 ns */
- | (1 << 17) /* writeHold 10 ns */
- | (0 << 13) /* readSetup 10 ns */
- | (3 << 7) /* readStrobe 60 ns */
- | (0 << 4) /* readHold 10 ns */
- | (3 << 2) /* turnAround ?? ns */
- | (0 << 0) /* asyncSize 8-bit bus */
- ;
- a1cr = davinci_nand_readl(info, A1CR_OFFSET);
- if (a1cr != regval) {
- dev_dbg(info->dev, "Warning: NAND config: Set A1CR " \
- "reg to 0x%08x, was 0x%08x, should be done by " \
- "bootloader.\n", regval, a1cr);
- davinci_nand_writel(info, A1CR_OFFSET, regval);
- }
-}
-
/*----------------------------------------------------------------------*/
/* An ECC layout for using 4-bit ECC with small-page flash, storing
@@ -611,6 +584,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
info->chip.options = pdata->options;
info->chip.bbt_td = pdata->bbt_td;
info->chip.bbt_md = pdata->bbt_md;
+ info->timing = pdata->timing;
info->ioaddr = (uint32_t __force) vaddr;
@@ -688,15 +662,25 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
goto err_clk_enable;
}
- /* EMIF timings should normally be set by the boot loader,
- * especially after boot-from-NAND. The *only* reason to
- * have this special casing for the DM6446 EVM is to work
- * with boot-from-NOR ... with CS0 manually re-jumpered
- * (after startup) so it addresses the NAND flash, not NOR.
- * Even for dev boards, that's unusually rude...
+ /*
+ * Setup Async configuration register in case we did not boot from
+ * NAND and so bootloader did not bother to set it up.
*/
- if (machine_is_davinci_evm())
- nand_dm6446evm_flash_init(info);
+ val = davinci_nand_readl(info, A1CR_OFFSET + info->core_chipsel * 4);
+
+ /* Extended Wait is not valid and Select Strobe mode is not used */
+ val &= ~(ACR_ASIZE_MASK | ACR_EW_MASK | ACR_SS_MASK);
+ if (info->chip.options & NAND_BUSWIDTH_16)
+ val |= 0x1;
+
+ davinci_nand_writel(info, A1CR_OFFSET + info->core_chipsel * 4, val);
+
+ ret = davinci_aemif_setup_timing(info->timing, info->base,
+ info->core_chipsel);
+ if (ret < 0) {
+ dev_dbg(&pdev->dev, "NAND timing values setup fail\n");
+ goto err_timing;
+ }
spin_lock_irq(&davinci_nand_lock);
@@ -809,6 +793,7 @@ syndrome_done:
return 0;
err_scan:
+err_timing:
clk_disable(info->clk);
err_clk_enable:
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 77efe462b921..6011935d757c 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -954,6 +954,8 @@ config NET_NETX
config TI_DAVINCI_EMAC
tristate "TI DaVinci EMAC Support"
depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
+ select TI_DAVINCI_MDIO
+ select TI_DAVINCI_CPDMA
select PHYLIB
help
This driver supports TI's DaVinci Ethernet .
@@ -961,6 +963,25 @@ config TI_DAVINCI_EMAC
To compile this driver as a module, choose M here: the module
will be called davinci_emac_driver. This is recommended.
+config TI_DAVINCI_MDIO
+ tristate "TI DaVinci MDIO Support"
+ depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
+ select PHYLIB
+ help
+ This driver supports TI's DaVinci MDIO module.
+
+ To compile this driver as a module, choose M here: the module
+ will be called davinci_mdio. This is recommended.
+
+config TI_DAVINCI_CPDMA
+ tristate "TI DaVinci CPDMA Support"
+ depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
+ help
+ This driver supports TI's DaVinci CPDMA dma engine.
+
+ To compile this driver as a module, choose M here: the module
+ will be called davinci_cpdma. This is recommended.
+
config DM9000
tristate "DM9000 support"
depends on ARM || BLACKFIN || MIPS
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 3e8f150c4b14..65da885a3c0c 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -7,6 +7,8 @@ obj-$(CONFIG_MDIO) += mdio.o
obj-$(CONFIG_PHYLIB) += phy/
obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o
+obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
+obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
obj-$(CONFIG_E1000) += e1000/
obj-$(CONFIG_E1000E) += e1000e/
diff --git a/drivers/net/davinci_cpdma.c b/drivers/net/davinci_cpdma.c
new file mode 100644
index 000000000000..e92b2b6cd8c4
--- /dev/null
+++ b/drivers/net/davinci_cpdma.c
@@ -0,0 +1,965 @@
+/*
+ * Texas Instruments CPDMA Driver
+ *
+ * Copyright (C) 2010 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+
+#include "davinci_cpdma.h"
+
+/* DMA Registers */
+#define CPDMA_TXIDVER 0x00
+#define CPDMA_TXCONTROL 0x04
+#define CPDMA_TXTEARDOWN 0x08
+#define CPDMA_RXIDVER 0x10
+#define CPDMA_RXCONTROL 0x14
+#define CPDMA_SOFTRESET 0x1c
+#define CPDMA_RXTEARDOWN 0x18
+#define CPDMA_TXINTSTATRAW 0x80
+#define CPDMA_TXINTSTATMASKED 0x84
+#define CPDMA_TXINTMASKSET 0x88
+#define CPDMA_TXINTMASKCLEAR 0x8c
+#define CPDMA_MACINVECTOR 0x90
+#define CPDMA_MACEOIVECTOR 0x94
+#define CPDMA_RXINTSTATRAW 0xa0
+#define CPDMA_RXINTSTATMASKED 0xa4
+#define CPDMA_RXINTMASKSET 0xa8
+#define CPDMA_RXINTMASKCLEAR 0xac
+#define CPDMA_DMAINTSTATRAW 0xb0
+#define CPDMA_DMAINTSTATMASKED 0xb4
+#define CPDMA_DMAINTMASKSET 0xb8
+#define CPDMA_DMAINTMASKCLEAR 0xbc
+#define CPDMA_DMAINT_HOSTERR BIT(1)
+
+/* the following exist only if has_ext_regs is set */
+#define CPDMA_DMACONTROL 0x20
+#define CPDMA_DMASTATUS 0x24
+#define CPDMA_RXBUFFOFS 0x28
+#define CPDMA_EM_CONTROL 0x2c
+
+/* Descriptor mode bits */
+#define CPDMA_DESC_SOP BIT(31)
+#define CPDMA_DESC_EOP BIT(30)
+#define CPDMA_DESC_OWNER BIT(29)
+#define CPDMA_DESC_EOQ BIT(28)
+#define CPDMA_DESC_TD_COMPLETE BIT(27)
+#define CPDMA_DESC_PASS_CRC BIT(26)
+
+#define CPDMA_TEARDOWN_VALUE 0xfffffffc
+
+struct cpdma_desc {
+ /* hardware fields */
+ u32 hw_next;
+ u32 hw_buffer;
+ u32 hw_len;
+ u32 hw_mode;
+ /* software fields */
+ void *sw_token;
+ u32 sw_buffer;
+ u32 sw_len;
+};
+
+struct cpdma_desc_pool {
+ u32 phys;
+ void __iomem *iomap; /* ioremap map */
+ void *cpumap; /* dma_alloc map */
+ int desc_size, mem_size;
+ int num_desc, used_desc;
+ unsigned long *bitmap;
+ struct device *dev;
+ spinlock_t lock;
+};
+
+enum cpdma_state {
+ CPDMA_STATE_IDLE,
+ CPDMA_STATE_ACTIVE,
+ CPDMA_STATE_TEARDOWN,
+};
+
+const char *cpdma_state_str[] = { "idle", "active", "teardown" };
+
+struct cpdma_ctlr {
+ enum cpdma_state state;
+ struct cpdma_params params;
+ struct device *dev;
+ struct cpdma_desc_pool *pool;
+ spinlock_t lock;
+ struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS];
+};
+
+struct cpdma_chan {
+ enum cpdma_state state;
+ struct cpdma_ctlr *ctlr;
+ int chan_num;
+ spinlock_t lock;
+ struct cpdma_desc __iomem *head, *tail;
+ int count;
+ void __iomem *hdp, *cp, *rxfree;
+ u32 mask;
+ cpdma_handler_fn handler;
+ enum dma_data_direction dir;
+ struct cpdma_chan_stats stats;
+ /* offsets into dmaregs */
+ int int_set, int_clear, td;
+};
+
+/* The following make access to common cpdma_ctlr params more readable */
+#define dmaregs params.dmaregs
+#define num_chan params.num_chan
+
+/* various accessors */
+#define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs))
+#define chan_read(chan, fld) __raw_readl((chan)->fld)
+#define desc_read(desc, fld) __raw_readl(&(desc)->fld)
+#define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs))
+#define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld)
+#define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld)
+
+/*
+ * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
+ * emac) have dedicated on-chip memory for these descriptors. Some other
+ * devices (e.g. cpsw switches) use plain old memory. Descriptor pools
+ * abstract out these details
+ */
+static struct cpdma_desc_pool *
+cpdma_desc_pool_create(struct device *dev, u32 phys, int size, int align)
+{
+ int bitmap_size;
+ struct cpdma_desc_pool *pool;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return NULL;
+
+ spin_lock_init(&pool->lock);
+
+ pool->dev = dev;
+ pool->mem_size = size;
+ pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align);
+ pool->num_desc = size / pool->desc_size;
+
+ bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long);
+ pool->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!pool->bitmap)
+ goto fail;
+
+ if (phys) {
+ pool->phys = phys;
+ pool->iomap = ioremap(phys, size);
+ } else {
+ pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys,
+ GFP_KERNEL);
+ pool->iomap = (void __force __iomem *)pool->cpumap;
+ }
+
+ if (pool->iomap)
+ return pool;
+
+fail:
+ kfree(pool->bitmap);
+ kfree(pool);
+ return NULL;
+}
+
+static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
+{
+ unsigned long flags;
+
+ if (!pool)
+ return;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ WARN_ON(pool->used_desc);
+ kfree(pool->bitmap);
+ if (pool->cpumap) {
+ dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
+ pool->phys);
+ } else {
+ iounmap(pool->iomap);
+ }
+ spin_unlock_irqrestore(&pool->lock, flags);
+ kfree(pool);
+}
+
+static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
+ struct cpdma_desc __iomem *desc)
+{
+ if (!desc)
+ return 0;
+ return pool->phys + (__force dma_addr_t)desc -
+ (__force dma_addr_t)pool->iomap;
+}
+
+static inline struct cpdma_desc __iomem *
+desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
+{
+ return dma ? pool->iomap + dma - pool->phys : NULL;
+}
+
+static struct cpdma_desc __iomem *
+cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc)
+{
+ unsigned long flags;
+ int index;
+ struct cpdma_desc __iomem *desc = NULL;
+
+ spin_lock_irqsave(&pool->lock, flags);
+
+ index = bitmap_find_next_zero_area(pool->bitmap, pool->num_desc, 0,
+ num_desc, 0);
+ if (index < pool->num_desc) {
+ bitmap_set(pool->bitmap, index, num_desc);
+ desc = pool->iomap + pool->desc_size * index;
+ pool->used_desc++;
+ }
+
+ spin_unlock_irqrestore(&pool->lock, flags);
+ return desc;
+}
+
+static void cpdma_desc_free(struct cpdma_desc_pool *pool,
+ struct cpdma_desc __iomem *desc, int num_desc)
+{
+ unsigned long flags, index;
+
+ index = ((unsigned long)desc - (unsigned long)pool->iomap) /
+ pool->desc_size;
+ spin_lock_irqsave(&pool->lock, flags);
+ bitmap_clear(pool->bitmap, index, num_desc);
+ pool->used_desc--;
+ spin_unlock_irqrestore(&pool->lock, flags);
+}
+
+struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
+{
+ struct cpdma_ctlr *ctlr;
+
+ ctlr = kzalloc(sizeof(*ctlr), GFP_KERNEL);
+ if (!ctlr)
+ return NULL;
+
+ ctlr->state = CPDMA_STATE_IDLE;
+ ctlr->params = *params;
+ ctlr->dev = params->dev;
+ spin_lock_init(&ctlr->lock);
+
+ ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
+ ctlr->params.desc_mem_phys,
+ ctlr->params.desc_mem_size,
+ ctlr->params.desc_align);
+ if (!ctlr->pool) {
+ kfree(ctlr);
+ return NULL;
+ }
+
+ if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
+ ctlr->num_chan = CPDMA_MAX_CHANNELS;
+ return ctlr;
+}
+
+int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ if (ctlr->state != CPDMA_STATE_IDLE) {
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return -EBUSY;
+ }
+
+ if (ctlr->params.has_soft_reset) {
+ unsigned long timeout = jiffies + HZ/10;
+
+ dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
+ while (time_before(jiffies, timeout)) {
+ if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
+ break;
+ }
+ WARN_ON(!time_before(jiffies, timeout));
+ }
+
+ for (i = 0; i < ctlr->num_chan; i++) {
+ __raw_writel(0, ctlr->params.txhdp + 4 * i);
+ __raw_writel(0, ctlr->params.rxhdp + 4 * i);
+ __raw_writel(0, ctlr->params.txcp + 4 * i);
+ __raw_writel(0, ctlr->params.rxcp + 4 * i);
+ }
+
+ dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
+ dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
+
+ dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
+ dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
+
+ ctlr->state = CPDMA_STATE_ACTIVE;
+
+ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
+ if (ctlr->channels[i])
+ cpdma_chan_start(ctlr->channels[i]);
+ }
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return 0;
+}
+
+int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ if (ctlr->state != CPDMA_STATE_ACTIVE) {
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return -EINVAL;
+ }
+
+ ctlr->state = CPDMA_STATE_TEARDOWN;
+
+ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
+ if (ctlr->channels[i])
+ cpdma_chan_stop(ctlr->channels[i]);
+ }
+
+ dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
+ dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
+
+ dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
+ dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
+
+ ctlr->state = CPDMA_STATE_IDLE;
+
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return 0;
+}
+
+int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr)
+{
+ struct device *dev = ctlr->dev;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+
+ dev_info(dev, "CPDMA: state: %s", cpdma_state_str[ctlr->state]);
+
+ dev_info(dev, "CPDMA: txidver: %x",
+ dma_reg_read(ctlr, CPDMA_TXIDVER));
+ dev_info(dev, "CPDMA: txcontrol: %x",
+ dma_reg_read(ctlr, CPDMA_TXCONTROL));
+ dev_info(dev, "CPDMA: txteardown: %x",
+ dma_reg_read(ctlr, CPDMA_TXTEARDOWN));
+ dev_info(dev, "CPDMA: rxidver: %x",
+ dma_reg_read(ctlr, CPDMA_RXIDVER));
+ dev_info(dev, "CPDMA: rxcontrol: %x",
+ dma_reg_read(ctlr, CPDMA_RXCONTROL));
+ dev_info(dev, "CPDMA: softreset: %x",
+ dma_reg_read(ctlr, CPDMA_SOFTRESET));
+ dev_info(dev, "CPDMA: rxteardown: %x",
+ dma_reg_read(ctlr, CPDMA_RXTEARDOWN));
+ dev_info(dev, "CPDMA: txintstatraw: %x",
+ dma_reg_read(ctlr, CPDMA_TXINTSTATRAW));
+ dev_info(dev, "CPDMA: txintstatmasked: %x",
+ dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED));
+ dev_info(dev, "CPDMA: txintmaskset: %x",
+ dma_reg_read(ctlr, CPDMA_TXINTMASKSET));
+ dev_info(dev, "CPDMA: txintmaskclear: %x",
+ dma_reg_read(ctlr, CPDMA_TXINTMASKCLEAR));
+ dev_info(dev, "CPDMA: macinvector: %x",
+ dma_reg_read(ctlr, CPDMA_MACINVECTOR));
+ dev_info(dev, "CPDMA: maceoivector: %x",
+ dma_reg_read(ctlr, CPDMA_MACEOIVECTOR));
+ dev_info(dev, "CPDMA: rxintstatraw: %x",
+ dma_reg_read(ctlr, CPDMA_RXINTSTATRAW));
+ dev_info(dev, "CPDMA: rxintstatmasked: %x",
+ dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED));
+ dev_info(dev, "CPDMA: rxintmaskset: %x",
+ dma_reg_read(ctlr, CPDMA_RXINTMASKSET));
+ dev_info(dev, "CPDMA: rxintmaskclear: %x",
+ dma_reg_read(ctlr, CPDMA_RXINTMASKCLEAR));
+ dev_info(dev, "CPDMA: dmaintstatraw: %x",
+ dma_reg_read(ctlr, CPDMA_DMAINTSTATRAW));
+ dev_info(dev, "CPDMA: dmaintstatmasked: %x",
+ dma_reg_read(ctlr, CPDMA_DMAINTSTATMASKED));
+ dev_info(dev, "CPDMA: dmaintmaskset: %x",
+ dma_reg_read(ctlr, CPDMA_DMAINTMASKSET));
+ dev_info(dev, "CPDMA: dmaintmaskclear: %x",
+ dma_reg_read(ctlr, CPDMA_DMAINTMASKCLEAR));
+
+ if (!ctlr->params.has_ext_regs) {
+ dev_info(dev, "CPDMA: dmacontrol: %x",
+ dma_reg_read(ctlr, CPDMA_DMACONTROL));
+ dev_info(dev, "CPDMA: dmastatus: %x",
+ dma_reg_read(ctlr, CPDMA_DMASTATUS));
+ dev_info(dev, "CPDMA: rxbuffofs: %x",
+ dma_reg_read(ctlr, CPDMA_RXBUFFOFS));
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
+ if (ctlr->channels[i])
+ cpdma_chan_dump(ctlr->channels[i]);
+
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return 0;
+}
+
+int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
+{
+ unsigned long flags;
+ int ret = 0, i;
+
+ if (!ctlr)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ if (ctlr->state != CPDMA_STATE_IDLE)
+ cpdma_ctlr_stop(ctlr);
+
+ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
+ if (ctlr->channels[i])
+ cpdma_chan_destroy(ctlr->channels[i]);
+ }
+
+ cpdma_desc_pool_destroy(ctlr->pool);
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ kfree(ctlr);
+ return ret;
+}
+
+int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
+{
+ unsigned long flags;
+ int i, reg;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ if (ctlr->state != CPDMA_STATE_ACTIVE) {
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return -EINVAL;
+ }
+
+ reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR;
+ dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR);
+
+ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
+ if (ctlr->channels[i])
+ cpdma_chan_int_ctrl(ctlr->channels[i], enable);
+ }
+
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return 0;
+}
+
+void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr)
+{
+ dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, 0);
+}
+
+struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
+ cpdma_handler_fn handler)
+{
+ struct cpdma_chan *chan;
+ int ret, offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
+ unsigned long flags;
+
+ if (__chan_linear(chan_num) >= ctlr->num_chan)
+ return NULL;
+
+ ret = -ENOMEM;
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ goto err_chan_alloc;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ ret = -EBUSY;
+ if (ctlr->channels[chan_num])
+ goto err_chan_busy;
+
+ chan->ctlr = ctlr;
+ chan->state = CPDMA_STATE_IDLE;
+ chan->chan_num = chan_num;
+ chan->handler = handler;
+
+ if (is_rx_chan(chan)) {
+ chan->hdp = ctlr->params.rxhdp + offset;
+ chan->cp = ctlr->params.rxcp + offset;
+ chan->rxfree = ctlr->params.rxfree + offset;
+ chan->int_set = CPDMA_RXINTMASKSET;
+ chan->int_clear = CPDMA_RXINTMASKCLEAR;
+ chan->td = CPDMA_RXTEARDOWN;
+ chan->dir = DMA_FROM_DEVICE;
+ } else {
+ chan->hdp = ctlr->params.txhdp + offset;
+ chan->cp = ctlr->params.txcp + offset;
+ chan->int_set = CPDMA_TXINTMASKSET;
+ chan->int_clear = CPDMA_TXINTMASKCLEAR;
+ chan->td = CPDMA_TXTEARDOWN;
+ chan->dir = DMA_TO_DEVICE;
+ }
+ chan->mask = BIT(chan_linear(chan));
+
+ spin_lock_init(&chan->lock);
+
+ ctlr->channels[chan_num] = chan;
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return chan;
+
+err_chan_busy:
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ kfree(chan);
+err_chan_alloc:
+ return ERR_PTR(ret);
+}
+
+int cpdma_chan_destroy(struct cpdma_chan *chan)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ unsigned long flags;
+
+ if (!chan)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ if (chan->state != CPDMA_STATE_IDLE)
+ cpdma_chan_stop(chan);
+ ctlr->channels[chan->chan_num] = NULL;
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ kfree(chan);
+ return 0;
+}
+
+int cpdma_chan_get_stats(struct cpdma_chan *chan,
+ struct cpdma_chan_stats *stats)
+{
+ unsigned long flags;
+ if (!chan)
+ return -EINVAL;
+ spin_lock_irqsave(&chan->lock, flags);
+ memcpy(stats, &chan->stats, sizeof(*stats));
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return 0;
+}
+
+int cpdma_chan_dump(struct cpdma_chan *chan)
+{
+ unsigned long flags;
+ struct device *dev = chan->ctlr->dev;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ dev_info(dev, "channel %d (%s %d) state %s",
+ chan->chan_num, is_rx_chan(chan) ? "rx" : "tx",
+ chan_linear(chan), cpdma_state_str[chan->state]);
+ dev_info(dev, "\thdp: %x\n", chan_read(chan, hdp));
+ dev_info(dev, "\tcp: %x\n", chan_read(chan, cp));
+ if (chan->rxfree) {
+ dev_info(dev, "\trxfree: %x\n",
+ chan_read(chan, rxfree));
+ }
+
+ dev_info(dev, "\tstats head_enqueue: %d\n",
+ chan->stats.head_enqueue);
+ dev_info(dev, "\tstats tail_enqueue: %d\n",
+ chan->stats.tail_enqueue);
+ dev_info(dev, "\tstats pad_enqueue: %d\n",
+ chan->stats.pad_enqueue);
+ dev_info(dev, "\tstats misqueued: %d\n",
+ chan->stats.misqueued);
+ dev_info(dev, "\tstats desc_alloc_fail: %d\n",
+ chan->stats.desc_alloc_fail);
+ dev_info(dev, "\tstats pad_alloc_fail: %d\n",
+ chan->stats.pad_alloc_fail);
+ dev_info(dev, "\tstats runt_receive_buff: %d\n",
+ chan->stats.runt_receive_buff);
+ dev_info(dev, "\tstats runt_transmit_buff: %d\n",
+ chan->stats.runt_transmit_buff);
+ dev_info(dev, "\tstats empty_dequeue: %d\n",
+ chan->stats.empty_dequeue);
+ dev_info(dev, "\tstats busy_dequeue: %d\n",
+ chan->stats.busy_dequeue);
+ dev_info(dev, "\tstats good_dequeue: %d\n",
+ chan->stats.good_dequeue);
+ dev_info(dev, "\tstats requeue: %d\n",
+ chan->stats.requeue);
+ dev_info(dev, "\tstats teardown_dequeue: %d\n",
+ chan->stats.teardown_dequeue);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return 0;
+}
+
+static void __cpdma_chan_submit(struct cpdma_chan *chan,
+ struct cpdma_desc __iomem *desc)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc __iomem *prev = chan->tail;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ dma_addr_t desc_dma;
+ u32 mode;
+
+ desc_dma = desc_phys(pool, desc);
+
+ /* simple case - idle channel */
+ if (!chan->head) {
+ chan->stats.head_enqueue++;
+ chan->head = desc;
+ chan->tail = desc;
+ if (chan->state == CPDMA_STATE_ACTIVE)
+ chan_write(chan, hdp, desc_dma);
+ return;
+ }
+
+ /* first chain the descriptor at the tail of the list */
+ desc_write(prev, hw_next, desc_dma);
+ chan->tail = desc;
+ chan->stats.tail_enqueue++;
+
+ /* next check if EOQ has been triggered already */
+ mode = desc_read(prev, hw_mode);
+ if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
+ (chan->state == CPDMA_STATE_ACTIVE)) {
+ desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
+ chan_write(chan, hdp, desc_dma);
+ chan->stats.misqueued++;
+ }
+}
+
+int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
+ int len, gfp_t gfp_mask)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc __iomem *desc;
+ dma_addr_t buffer;
+ unsigned long flags;
+ u32 mode;
+ int ret = 0;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ if (chan->state == CPDMA_STATE_TEARDOWN) {
+ ret = -EINVAL;
+ goto unlock_ret;
+ }
+
+ desc = cpdma_desc_alloc(ctlr->pool, 1);
+ if (!desc) {
+ chan->stats.desc_alloc_fail++;
+ ret = -ENOMEM;
+ goto unlock_ret;
+ }
+
+ if (len < ctlr->params.min_packet_size) {
+ len = ctlr->params.min_packet_size;
+ chan->stats.runt_transmit_buff++;
+ }
+
+ buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
+ mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
+
+ desc_write(desc, hw_next, 0);
+ desc_write(desc, hw_buffer, buffer);
+ desc_write(desc, hw_len, len);
+ desc_write(desc, hw_mode, mode | len);
+ desc_write(desc, sw_token, token);
+ desc_write(desc, sw_buffer, buffer);
+ desc_write(desc, sw_len, len);
+
+ __cpdma_chan_submit(chan, desc);
+
+ if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
+ chan_write(chan, rxfree, 1);
+
+ chan->count++;
+
+unlock_ret:
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return ret;
+}
+
+static void __cpdma_chan_free(struct cpdma_chan *chan,
+ struct cpdma_desc __iomem *desc,
+ int outlen, int status)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ dma_addr_t buff_dma;
+ int origlen;
+ void *token;
+
+ token = (void *)desc_read(desc, sw_token);
+ buff_dma = desc_read(desc, sw_buffer);
+ origlen = desc_read(desc, sw_len);
+
+ dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
+ cpdma_desc_free(pool, desc, 1);
+ (*chan->handler)(token, outlen, status);
+}
+
+static int __cpdma_chan_process(struct cpdma_chan *chan)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc __iomem *desc;
+ int status, outlen;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ dma_addr_t desc_dma;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ desc = chan->head;
+ if (!desc) {
+ chan->stats.empty_dequeue++;
+ status = -ENOENT;
+ goto unlock_ret;
+ }
+ desc_dma = desc_phys(pool, desc);
+
+ status = __raw_readl(&desc->hw_mode);
+ outlen = status & 0x7ff;
+ if (status & CPDMA_DESC_OWNER) {
+ chan->stats.busy_dequeue++;
+ status = -EBUSY;
+ goto unlock_ret;
+ }
+ status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE);
+
+ chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
+ chan_write(chan, cp, desc_dma);
+ chan->count--;
+ chan->stats.good_dequeue++;
+
+ if (status & CPDMA_DESC_EOQ) {
+ chan->stats.requeue++;
+ chan_write(chan, hdp, desc_phys(pool, chan->head));
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ __cpdma_chan_free(chan, desc, outlen, status);
+ return status;
+
+unlock_ret:
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return status;
+}
+
+int cpdma_chan_process(struct cpdma_chan *chan, int quota)
+{
+ int used = 0, ret = 0;
+
+ if (chan->state != CPDMA_STATE_ACTIVE)
+ return -EINVAL;
+
+ while (used < quota) {
+ ret = __cpdma_chan_process(chan);
+ if (ret < 0)
+ break;
+ used++;
+ }
+ return used;
+}
+
+int cpdma_chan_start(struct cpdma_chan *chan)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->state != CPDMA_STATE_IDLE) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EBUSY;
+ }
+ if (ctlr->state != CPDMA_STATE_ACTIVE) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EINVAL;
+ }
+ dma_reg_write(ctlr, chan->int_set, chan->mask);
+ chan->state = CPDMA_STATE_ACTIVE;
+ if (chan->head) {
+ chan_write(chan, hdp, desc_phys(pool, chan->head));
+ if (chan->rxfree)
+ chan_write(chan, rxfree, chan->count);
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return 0;
+}
+
+int cpdma_chan_stop(struct cpdma_chan *chan)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ unsigned long flags;
+ int ret;
+ unsigned long timeout;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->state != CPDMA_STATE_ACTIVE) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EINVAL;
+ }
+
+ chan->state = CPDMA_STATE_TEARDOWN;
+ dma_reg_write(ctlr, chan->int_clear, chan->mask);
+
+ /* trigger teardown */
+ dma_reg_write(ctlr, chan->td, chan->chan_num);
+
+ /* wait for teardown complete */
+ timeout = jiffies + HZ/10; /* 100 msec */
+ while (time_before(jiffies, timeout)) {
+ u32 cp = chan_read(chan, cp);
+ if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
+ break;
+ cpu_relax();
+ }
+ WARN_ON(!time_before(jiffies, timeout));
+ chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
+
+ /* handle completed packets */
+ do {
+ ret = __cpdma_chan_process(chan);
+ if (ret < 0)
+ break;
+ } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
+
+ /* remaining packets haven't been tx/rx'ed, clean them up */
+ while (chan->head) {
+ struct cpdma_desc __iomem *desc = chan->head;
+ dma_addr_t next_dma;
+
+ next_dma = desc_read(desc, hw_next);
+ chan->head = desc_from_phys(pool, next_dma);
+ chan->stats.teardown_dequeue++;
+
+ /* issue callback without locks held */
+ spin_unlock_irqrestore(&chan->lock, flags);
+ __cpdma_chan_free(chan, desc, 0, -ENOSYS);
+ spin_lock_irqsave(&chan->lock, flags);
+ }
+
+ chan->state = CPDMA_STATE_IDLE;
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return 0;
+}
+
+int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->state != CPDMA_STATE_ACTIVE) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EINVAL;
+ }
+
+ dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
+ chan->mask);
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return 0;
+}
+
+struct cpdma_control_info {
+ u32 reg;
+ u32 shift, mask;
+ int access;
+#define ACCESS_RO BIT(0)
+#define ACCESS_WO BIT(1)
+#define ACCESS_RW (ACCESS_RO | ACCESS_WO)
+};
+
+struct cpdma_control_info controls[] = {
+ [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO},
+ [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW},
+ [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW},
+ [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW},
+ [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW},
+ [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO},
+ [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW},
+ [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW},
+ [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW},
+ [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW},
+ [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW},
+};
+
+int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
+{
+ unsigned long flags;
+ struct cpdma_control_info *info = &controls[control];
+ int ret;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+
+ ret = -ENOTSUPP;
+ if (!ctlr->params.has_ext_regs)
+ goto unlock_ret;
+
+ ret = -EINVAL;
+ if (ctlr->state != CPDMA_STATE_ACTIVE)
+ goto unlock_ret;
+
+ ret = -ENOENT;
+ if (control < 0 || control >= ARRAY_SIZE(controls))
+ goto unlock_ret;
+
+ ret = -EPERM;
+ if ((info->access & ACCESS_RO) != ACCESS_RO)
+ goto unlock_ret;
+
+ ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
+
+unlock_ret:
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return ret;
+}
+
+int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
+{
+ unsigned long flags;
+ struct cpdma_control_info *info = &controls[control];
+ int ret;
+ u32 val;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+
+ ret = -ENOTSUPP;
+ if (!ctlr->params.has_ext_regs)
+ goto unlock_ret;
+
+ ret = -EINVAL;
+ if (ctlr->state != CPDMA_STATE_ACTIVE)
+ goto unlock_ret;
+
+ ret = -ENOENT;
+ if (control < 0 || control >= ARRAY_SIZE(controls))
+ goto unlock_ret;
+
+ ret = -EPERM;
+ if ((info->access & ACCESS_WO) != ACCESS_WO)
+ goto unlock_ret;
+
+ val = dma_reg_read(ctlr, info->reg);
+ val &= ~(info->mask << info->shift);
+ val |= (value & info->mask) << info->shift;
+ dma_reg_write(ctlr, info->reg, val);
+ ret = 0;
+
+unlock_ret:
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return ret;
+}
diff --git a/drivers/net/davinci_cpdma.h b/drivers/net/davinci_cpdma.h
new file mode 100644
index 000000000000..868e50ebde45
--- /dev/null
+++ b/drivers/net/davinci_cpdma.h
@@ -0,0 +1,108 @@
+/*
+ * Texas Instruments CPDMA Driver
+ *
+ * Copyright (C) 2010 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __DAVINCI_CPDMA_H__
+#define __DAVINCI_CPDMA_H__
+
+#define CPDMA_MAX_CHANNELS BITS_PER_LONG
+
+#define tx_chan_num(chan) (chan)
+#define rx_chan_num(chan) ((chan) + CPDMA_MAX_CHANNELS)
+#define is_rx_chan(chan) ((chan)->chan_num >= CPDMA_MAX_CHANNELS)
+#define is_tx_chan(chan) (!is_rx_chan(chan))
+#define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1))
+#define chan_linear(chan) __chan_linear((chan)->chan_num)
+
+struct cpdma_params {
+ struct device *dev;
+ void __iomem *dmaregs;
+ void __iomem *txhdp, *rxhdp, *txcp, *rxcp;
+ void __iomem *rxthresh, *rxfree;
+ int num_chan;
+ bool has_soft_reset;
+ int min_packet_size;
+ u32 desc_mem_phys;
+ int desc_mem_size;
+ int desc_align;
+
+ /*
+ * Some instances of embedded cpdma controllers have extra control and
+ * status registers. The following flag enables access to these
+ * "extended" registers.
+ */
+ bool has_ext_regs;
+};
+
+struct cpdma_chan_stats {
+ u32 head_enqueue;
+ u32 tail_enqueue;
+ u32 pad_enqueue;
+ u32 misqueued;
+ u32 desc_alloc_fail;
+ u32 pad_alloc_fail;
+ u32 runt_receive_buff;
+ u32 runt_transmit_buff;
+ u32 empty_dequeue;
+ u32 busy_dequeue;
+ u32 good_dequeue;
+ u32 requeue;
+ u32 teardown_dequeue;
+};
+
+struct cpdma_ctlr;
+struct cpdma_chan;
+
+typedef void (*cpdma_handler_fn)(void *token, int len, int status);
+
+struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params);
+int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr);
+int cpdma_ctlr_start(struct cpdma_ctlr *ctlr);
+int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr);
+int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr);
+
+struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
+ cpdma_handler_fn handler);
+int cpdma_chan_destroy(struct cpdma_chan *chan);
+int cpdma_chan_start(struct cpdma_chan *chan);
+int cpdma_chan_stop(struct cpdma_chan *chan);
+int cpdma_chan_dump(struct cpdma_chan *chan);
+
+int cpdma_chan_get_stats(struct cpdma_chan *chan,
+ struct cpdma_chan_stats *stats);
+int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
+ int len, gfp_t gfp_mask);
+int cpdma_chan_process(struct cpdma_chan *chan, int quota);
+
+int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable);
+void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr);
+int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable);
+
+enum cpdma_control {
+ CPDMA_CMD_IDLE, /* write-only */
+ CPDMA_COPY_ERROR_FRAMES, /* read-write */
+ CPDMA_RX_OFF_LEN_UPDATE, /* read-write */
+ CPDMA_RX_OWNERSHIP_FLIP, /* read-write */
+ CPDMA_TX_PRIO_FIXED, /* read-write */
+ CPDMA_STAT_IDLE, /* read-only */
+ CPDMA_STAT_TX_ERR_CHAN, /* read-only */
+ CPDMA_STAT_TX_ERR_CODE, /* read-only */
+ CPDMA_STAT_RX_ERR_CHAN, /* read-only */
+ CPDMA_STAT_RX_ERR_CODE, /* read-only */
+ CPDMA_RX_BUFFER_OFFSET, /* read-write */
+};
+
+int cpdma_control_get(struct cpdma_ctlr *ctlr, int control);
+int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value);
+
+#endif
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 7fbd052ddb0a..2a628d17d178 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -63,6 +63,8 @@
#include <asm/irq.h>
#include <asm/page.h>
+#include "davinci_cpdma.h"
+
static int debug_level;
module_param(debug_level, int, 0);
MODULE_PARM_DESC(debug_level, "DaVinci EMAC debug level (NETIF_MSG bits)");
@@ -113,7 +115,7 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
#define EMAC_DEF_MAX_FRAME_SIZE (1500 + 14 + 4 + 4)
#define EMAC_DEF_TX_CH (0) /* Default 0th channel */
#define EMAC_DEF_RX_CH (0) /* Default 0th channel */
-#define EMAC_DEF_MDIO_TICK_MS (10) /* typically 1 tick=1 ms) */
+#define EMAC_DEF_RX_NUM_DESC (128)
#define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */
#define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */
#define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */
@@ -125,7 +127,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
/* EMAC register related defines */
#define EMAC_ALL_MULTI_REG_VALUE (0xFFFFFFFF)
#define EMAC_NUM_MULTICAST_BITS (64)
-#define EMAC_TEARDOWN_VALUE (0xFFFFFFFC)
#define EMAC_TX_CONTROL_TX_ENABLE_VAL (0x1)
#define EMAC_RX_CONTROL_RX_ENABLE_VAL (0x1)
#define EMAC_MAC_HOST_ERR_INTMASK_VAL (0x2)
@@ -212,24 +213,10 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
#define EMAC_DEF_MAX_MULTICAST_ADDRESSES (64) /* Max mcast addr's */
/* EMAC Peripheral Device Register Memory Layout structure */
-#define EMAC_TXIDVER 0x0
-#define EMAC_TXCONTROL 0x4
-#define EMAC_TXTEARDOWN 0x8
-#define EMAC_RXIDVER 0x10
-#define EMAC_RXCONTROL 0x14
-#define EMAC_RXTEARDOWN 0x18
-#define EMAC_TXINTSTATRAW 0x80
-#define EMAC_TXINTSTATMASKED 0x84
-#define EMAC_TXINTMASKSET 0x88
-#define EMAC_TXINTMASKCLEAR 0x8C
#define EMAC_MACINVECTOR 0x90
#define EMAC_DM646X_MACEOIVECTOR 0x94
-#define EMAC_RXINTSTATRAW 0xA0
-#define EMAC_RXINTSTATMASKED 0xA4
-#define EMAC_RXINTMASKSET 0xA8
-#define EMAC_RXINTMASKCLEAR 0xAC
#define EMAC_MACINTSTATRAW 0xB0
#define EMAC_MACINTSTATMASKED 0xB4
#define EMAC_MACINTMASKSET 0xB8
@@ -256,12 +243,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
#define EMAC_MACADDRHI 0x504
#define EMAC_MACINDEX 0x508
-/* EMAC HDP and Completion registors */
-#define EMAC_TXHDP(ch) (0x600 + (ch * 4))
-#define EMAC_RXHDP(ch) (0x620 + (ch * 4))
-#define EMAC_TXCP(ch) (0x640 + (ch * 4))
-#define EMAC_RXCP(ch) (0x660 + (ch * 4))
-
/* EMAC statistics registers */
#define EMAC_RXGOODFRAMES 0x200
#define EMAC_RXBCASTFRAMES 0x204
@@ -303,25 +284,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
#define EMAC_DM644X_INTMIN_INTVL 0x1
#define EMAC_DM644X_INTMAX_INTVL (EMAC_DM644X_EWINTCNT_MASK)
-/* EMAC MDIO related */
-/* Mask & Control defines */
-#define MDIO_CONTROL_CLKDIV (0xFF)
-#define MDIO_CONTROL_ENABLE BIT(30)
-#define MDIO_USERACCESS_GO BIT(31)
-#define MDIO_USERACCESS_WRITE BIT(30)
-#define MDIO_USERACCESS_READ (0)
-#define MDIO_USERACCESS_REGADR (0x1F << 21)
-#define MDIO_USERACCESS_PHYADR (0x1F << 16)
-#define MDIO_USERACCESS_DATA (0xFFFF)
-#define MDIO_USERPHYSEL_LINKSEL BIT(7)
-#define MDIO_VER_MODID (0xFFFF << 16)
-#define MDIO_VER_REVMAJ (0xFF << 8)
-#define MDIO_VER_REVMIN (0xFF)
-
-#define MDIO_USERACCESS(inst) (0x80 + (inst * 8))
-#define MDIO_USERPHYSEL(inst) (0x84 + (inst * 8))
-#define MDIO_CONTROL (0x04)
-
/* EMAC DM646X control module registers */
#define EMAC_DM646X_CMINTCTRL 0x0C
#define EMAC_DM646X_CMRXINTEN 0x14
@@ -345,120 +307,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
/* EMAC Stats Clear Mask */
#define EMAC_STATS_CLR_MASK (0xFFFFFFFF)
-/** net_buf_obj: EMAC network bufferdata structure
- *
- * EMAC network buffer data structure
- */
-struct emac_netbufobj {
- void *buf_token;
- char *data_ptr;
- int length;
-};
-
-/** net_pkt_obj: EMAC network packet data structure
- *
- * EMAC network packet data structure - supports buffer list (for future)
- */
-struct emac_netpktobj {
- void *pkt_token; /* data token may hold tx/rx chan id */
- struct emac_netbufobj *buf_list; /* array of network buffer objects */
- int num_bufs;
- int pkt_length;
-};
-
-/** emac_tx_bd: EMAC TX Buffer descriptor data structure
- *
- * EMAC TX Buffer descriptor data structure
- */
-struct emac_tx_bd {
- int h_next;
- int buff_ptr;
- int off_b_len;
- int mode; /* SOP, EOP, ownership, EOQ, teardown,Qstarv, length */
- struct emac_tx_bd __iomem *next;
- void *buf_token;
-};
-
-/** emac_txch: EMAC TX Channel data structure
- *
- * EMAC TX Channel data structure
- */
-struct emac_txch {
- /* Config related */
- u32 num_bd;
- u32 service_max;
-
- /* CPPI specific */
- u32 alloc_size;
- void __iomem *bd_mem;
- struct emac_tx_bd __iomem *bd_pool_head;
- struct emac_tx_bd __iomem *active_queue_head;
- struct emac_tx_bd __iomem *active_queue_tail;
- struct emac_tx_bd __iomem *last_hw_bdprocessed;
- u32 queue_active;
- u32 teardown_pending;
- u32 *tx_complete;
-
- /** statistics */
- u32 proc_count; /* TX: # of times emac_tx_bdproc is called */
- u32 mis_queued_packets;
- u32 queue_reinit;
- u32 end_of_queue_add;
- u32 out_of_tx_bd;
- u32 no_active_pkts; /* IRQ when there were no packets to process */
- u32 active_queue_count;
-};
-
-/** emac_rx_bd: EMAC RX Buffer descriptor data structure
- *
- * EMAC RX Buffer descriptor data structure
- */
-struct emac_rx_bd {
- int h_next;
- int buff_ptr;
- int off_b_len;
- int mode;
- struct emac_rx_bd __iomem *next;
- void *data_ptr;
- void *buf_token;
-};
-
-/** emac_rxch: EMAC RX Channel data structure
- *
- * EMAC RX Channel data structure
- */
-struct emac_rxch {
- /* configuration info */
- u32 num_bd;
- u32 service_max;
- u32 buf_size;
- char mac_addr[6];
-
- /** CPPI specific */
- u32 alloc_size;
- void __iomem *bd_mem;
- struct emac_rx_bd __iomem *bd_pool_head;
- struct emac_rx_bd __iomem *active_queue_head;
- struct emac_rx_bd __iomem *active_queue_tail;
- u32 queue_active;
- u32 teardown_pending;
-
- /* packet and buffer objects */
- struct emac_netpktobj pkt_queue;
- struct emac_netbufobj buf_queue;
-
- /** statistics */
- u32 proc_count; /* number of times emac_rx_bdproc is called */
- u32 processed_bd;
- u32 recycled_bd;
- u32 out_of_rx_bd;
- u32 out_of_rx_buffers;
- u32 queue_reinit;
- u32 end_of_queue_add;
- u32 end_of_queue;
- u32 mis_queued_packets;
-};
-
/* emac_priv: EMAC private data structure
*
* EMAC adapter private data structure
@@ -469,17 +317,13 @@ struct emac_priv {
struct platform_device *pdev;
struct napi_struct napi;
char mac_addr[6];
- spinlock_t tx_lock;
- spinlock_t rx_lock;
void __iomem *remap_addr;
u32 emac_base_phys;
void __iomem *emac_base;
void __iomem *ctrl_base;
- void __iomem *emac_ctrl_ram;
- u32 ctrl_ram_size;
- u32 hw_ram_addr;
- struct emac_txch *txch[EMAC_DEF_MAX_TX_CH];
- struct emac_rxch *rxch[EMAC_DEF_MAX_RX_CH];
+ struct cpdma_ctlr *dma;
+ struct cpdma_chan *txchan;
+ struct cpdma_chan *rxchan;
u32 link; /* 1=link on, 0=link off */
u32 speed; /* 0=Auto Neg, 1=No PHY, 10,100, 1000 - mbps */
u32 duplex; /* Link duplex: 0=Half, 1=Full */
@@ -493,13 +337,7 @@ struct emac_priv {
u32 mac_hash2;
u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS];
u32 rx_addr_type;
- /* periodic timer required for MDIO polling */
- struct timer_list periodic_timer;
- u32 periodic_ticks;
- u32 timer_active;
- u32 phy_mask;
- /* mii_bus,phy members */
- struct mii_bus *mii_bus;
+ const char *phy_id;
struct phy_device *phydev;
spinlock_t lock;
/*platform specific members*/
@@ -510,19 +348,6 @@ struct emac_priv {
/* clock frequency for EMAC */
static struct clk *emac_clk;
static unsigned long emac_bus_frequency;
-static unsigned long mdio_max_freq;
-
-#define emac_virt_to_phys(addr, priv) \
- (((u32 __force)(addr) - (u32 __force)(priv->emac_ctrl_ram)) \
- + priv->hw_ram_addr)
-
-/* Cache macros - Packet buffers would be from skb pool which is cached */
-#define EMAC_VIRT_NOCACHE(addr) (addr)
-
-/* DM644x does not have BD's in cached memory - so no cache functions */
-#define BD_CACHE_INVALIDATE(addr, size)
-#define BD_CACHE_WRITEBACK(addr, size)
-#define BD_CACHE_WRITEBACK_INVALIDATE(addr, size)
/* EMAC TX Host Error description strings */
static char *emac_txhost_errcodes[16] = {
@@ -548,9 +373,6 @@ static char *emac_rxhost_errcodes[16] = {
#define emac_ctrl_read(reg) ioread32((priv->ctrl_base + (reg)))
#define emac_ctrl_write(reg, val) iowrite32(val, (priv->ctrl_base + (reg)))
-#define emac_mdio_read(reg) ioread32(bus->priv + (reg))
-#define emac_mdio_write(reg, val) iowrite32(val, (bus->priv + (reg)))
-
/**
* emac_dump_regs: Dump important EMAC registers to debug terminal
* @priv: The DaVinci EMAC private adapter structure
@@ -569,20 +391,6 @@ static void emac_dump_regs(struct emac_priv *priv)
emac_ctrl_read(EMAC_CTRL_EWCTL),
emac_ctrl_read(EMAC_CTRL_EWINTTCNT));
}
- dev_info(emac_dev, "EMAC: TXID: %08X %s, RXID: %08X %s\n",
- emac_read(EMAC_TXIDVER),
- ((emac_read(EMAC_TXCONTROL)) ? "enabled" : "disabled"),
- emac_read(EMAC_RXIDVER),
- ((emac_read(EMAC_RXCONTROL)) ? "enabled" : "disabled"));
- dev_info(emac_dev, "EMAC: TXIntRaw:%08X, TxIntMasked: %08X, "\
- "TxIntMasSet: %08X\n", emac_read(EMAC_TXINTSTATRAW),
- emac_read(EMAC_TXINTSTATMASKED), emac_read(EMAC_TXINTMASKSET));
- dev_info(emac_dev, "EMAC: RXIntRaw:%08X, RxIntMasked: %08X, "\
- "RxIntMasSet: %08X\n", emac_read(EMAC_RXINTSTATRAW),
- emac_read(EMAC_RXINTSTATMASKED), emac_read(EMAC_RXINTMASKSET));
- dev_info(emac_dev, "EMAC: MacIntRaw:%08X, MacIntMasked: %08X, "\
- "MacInVector=%08X\n", emac_read(EMAC_MACINTSTATRAW),
- emac_read(EMAC_MACINTSTATMASKED), emac_read(EMAC_MACINVECTOR));
dev_info(emac_dev, "EMAC: EmuControl:%08X, FifoControl: %08X\n",
emac_read(EMAC_EMCONTROL), emac_read(EMAC_FIFOCONTROL));
dev_info(emac_dev, "EMAC: MBPEnable:%08X, RXUnicastSet: %08X, "\
@@ -591,8 +399,6 @@ static void emac_dump_regs(struct emac_priv *priv)
dev_info(emac_dev, "EMAC: MacControl:%08X, MacStatus: %08X, "\
"MacConfig=%08X\n", emac_read(EMAC_MACCONTROL),
emac_read(EMAC_MACSTATUS), emac_read(EMAC_MACCONFIG));
- dev_info(emac_dev, "EMAC: TXHDP[0]:%08X, RXHDP[0]: %08X\n",
- emac_read(EMAC_TXHDP(0)), emac_read(EMAC_RXHDP(0)));
dev_info(emac_dev, "EMAC Statistics\n");
dev_info(emac_dev, "EMAC: rx_good_frames:%d\n",
emac_read(EMAC_RXGOODFRAMES));
@@ -654,11 +460,10 @@ static void emac_dump_regs(struct emac_priv *priv)
emac_read(EMAC_RXMOFOVERRUNS));
dev_info(emac_dev, "EMAC: rx_dma_overruns:%d\n",
emac_read(EMAC_RXDMAOVERRUNS));
+
+ cpdma_ctlr_dump(priv->dma);
}
-/*************************************************************************
- * EMAC MDIO/Phy Functionality
- *************************************************************************/
/**
* emac_get_drvinfo: Get EMAC driver information
* @ndev: The DaVinci EMAC network adapter
@@ -686,7 +491,7 @@ static int emac_get_settings(struct net_device *ndev,
struct ethtool_cmd *ecmd)
{
struct emac_priv *priv = netdev_priv(ndev);
- if (priv->phy_mask)
+ if (priv->phydev)
return phy_ethtool_gset(priv->phydev, ecmd);
else
return -EOPNOTSUPP;
@@ -704,7 +509,7 @@ static int emac_get_settings(struct net_device *ndev,
static int emac_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
{
struct emac_priv *priv = netdev_priv(ndev);
- if (priv->phy_mask)
+ if (priv->phydev)
return phy_ethtool_sset(priv->phydev, ecmd);
else
return -EOPNOTSUPP;
@@ -841,7 +646,7 @@ static void emac_update_phystatus(struct emac_priv *priv)
mac_control = emac_read(EMAC_MACCONTROL);
cur_duplex = (mac_control & EMAC_MACCONTROL_FULLDUPLEXEN) ?
DUPLEX_FULL : DUPLEX_HALF;
- if (priv->phy_mask)
+ if (priv->phydev)
new_duplex = priv->phydev->duplex;
else
new_duplex = DUPLEX_FULL;
@@ -1184,371 +989,68 @@ static irqreturn_t emac_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/** EMAC on-chip buffer descriptor memory
- *
- * WARNING: Please note that the on chip memory is used for both TX and RX
- * buffer descriptor queues and is equally divided between TX and RX desc's
- * If the number of TX or RX descriptors change this memory pointers need
- * to be adjusted. If external memory is allocated then these pointers can
- * pointer to the memory
- *
- */
-#define EMAC_TX_BD_MEM(priv) ((priv)->emac_ctrl_ram)
-#define EMAC_RX_BD_MEM(priv) ((priv)->emac_ctrl_ram + \
- (((priv)->ctrl_ram_size) >> 1))
-
-/**
- * emac_init_txch: TX channel initialization
- * @priv: The DaVinci EMAC private adapter structure
- * @ch: RX channel number
- *
- * Called during device init to setup a TX channel (allocate buffer desc
- * create free pool and keep ready for transmission
- *
- * Returns success(0) or mem alloc failures error code
- */
-static int emac_init_txch(struct emac_priv *priv, u32 ch)
-{
- struct device *emac_dev = &priv->ndev->dev;
- u32 cnt, bd_size;
- void __iomem *mem;
- struct emac_tx_bd __iomem *curr_bd;
- struct emac_txch *txch = NULL;
-
- txch = kzalloc(sizeof(struct emac_txch), GFP_KERNEL);
- if (NULL == txch) {
- dev_err(emac_dev, "DaVinci EMAC: TX Ch mem alloc failed");
- return -ENOMEM;
- }
- priv->txch[ch] = txch;
- txch->service_max = EMAC_DEF_TX_MAX_SERVICE;
- txch->active_queue_head = NULL;
- txch->active_queue_tail = NULL;
- txch->queue_active = 0;
- txch->teardown_pending = 0;
-
- /* allocate memory for TX CPPI channel on a 4 byte boundry */
- txch->tx_complete = kzalloc(txch->service_max * sizeof(u32),
- GFP_KERNEL);
- if (NULL == txch->tx_complete) {
- dev_err(emac_dev, "DaVinci EMAC: Tx service mem alloc failed");
- kfree(txch);
- return -ENOMEM;
- }
-
- /* allocate buffer descriptor pool align every BD on four word
- * boundry for future requirements */
- bd_size = (sizeof(struct emac_tx_bd) + 0xF) & ~0xF;
- txch->num_bd = (priv->ctrl_ram_size >> 1) / bd_size;
- txch->alloc_size = (((bd_size * txch->num_bd) + 0xF) & ~0xF);
-
- /* alloc TX BD memory */
- txch->bd_mem = EMAC_TX_BD_MEM(priv);
- __memzero((void __force *)txch->bd_mem, txch->alloc_size);
-
- /* initialize the BD linked list */
- mem = (void __force __iomem *)
- (((u32 __force) txch->bd_mem + 0xF) & ~0xF);
- txch->bd_pool_head = NULL;
- for (cnt = 0; cnt < txch->num_bd; cnt++) {
- curr_bd = mem + (cnt * bd_size);
- curr_bd->next = txch->bd_pool_head;
- txch->bd_pool_head = curr_bd;
- }
-
- /* reset statistics counters */
- txch->out_of_tx_bd = 0;
- txch->no_active_pkts = 0;
- txch->active_queue_count = 0;
-
- return 0;
-}
-
-/**
- * emac_cleanup_txch: Book-keep function to clean TX channel resources
- * @priv: The DaVinci EMAC private adapter structure
- * @ch: TX channel number
- *
- * Called to clean up TX channel resources
- *
- */
-static void emac_cleanup_txch(struct emac_priv *priv, u32 ch)
+static struct sk_buff *emac_rx_alloc(struct emac_priv *priv)
{
- struct emac_txch *txch = priv->txch[ch];
-
- if (txch) {
- if (txch->bd_mem)
- txch->bd_mem = NULL;
- kfree(txch->tx_complete);
- kfree(txch);
- priv->txch[ch] = NULL;
- }
+ struct sk_buff *skb = dev_alloc_skb(priv->rx_buf_size);
+ if (WARN_ON(!skb))
+ return NULL;
+ skb->dev = priv->ndev;
+ skb_reserve(skb, NET_IP_ALIGN);
+ return skb;
}
-/**
- * emac_net_tx_complete: TX packet completion function
- * @priv: The DaVinci EMAC private adapter structure
- * @net_data_tokens: packet token - skb pointer
- * @num_tokens: number of skb's to free
- * @ch: TX channel number
- *
- * Frees the skb once packet is transmitted
- *
- */
-static int emac_net_tx_complete(struct emac_priv *priv,
- void **net_data_tokens,
- int num_tokens, u32 ch)
+static void emac_rx_handler(void *token, int len, int status)
{
- struct net_device *ndev = priv->ndev;
- u32 cnt;
-
- if (unlikely(num_tokens && netif_queue_stopped(ndev)))
- netif_start_queue(ndev);
- for (cnt = 0; cnt < num_tokens; cnt++) {
- struct sk_buff *skb = (struct sk_buff *)net_data_tokens[cnt];
- if (skb == NULL)
- continue;
- ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += skb->len;
+ struct sk_buff *skb = token;
+ struct net_device *ndev = skb->dev;
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct device *emac_dev = &ndev->dev;
+ int ret;
+
+ /* free and bail if we are shutting down */
+ if (unlikely(!netif_running(ndev))) {
dev_kfree_skb_any(skb);
+ return;
}
- return 0;
-}
-
-/**
- * emac_txch_teardown: TX channel teardown
- * @priv: The DaVinci EMAC private adapter structure
- * @ch: TX channel number
- *
- * Called to teardown TX channel
- *
- */
-static void emac_txch_teardown(struct emac_priv *priv, u32 ch)
-{
- struct device *emac_dev = &priv->ndev->dev;
- u32 teardown_cnt = 0xFFFFFFF0; /* Some high value */
- struct emac_txch *txch = priv->txch[ch];
- struct emac_tx_bd __iomem *curr_bd;
-
- while ((emac_read(EMAC_TXCP(ch)) & EMAC_TEARDOWN_VALUE) !=
- EMAC_TEARDOWN_VALUE) {
- /* wait till tx teardown complete */
- cpu_relax(); /* TODO: check if this helps ... */
- --teardown_cnt;
- if (0 == teardown_cnt) {
- dev_err(emac_dev, "EMAC: TX teardown aborted\n");
- break;
- }
- }
- emac_write(EMAC_TXCP(ch), EMAC_TEARDOWN_VALUE);
-
- /* process sent packets and return skb's to upper layer */
- if (1 == txch->queue_active) {
- curr_bd = txch->active_queue_head;
- while (curr_bd != NULL) {
- dma_unmap_single(emac_dev, curr_bd->buff_ptr,
- curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE,
- DMA_TO_DEVICE);
-
- emac_net_tx_complete(priv, (void __force *)
- &curr_bd->buf_token, 1, ch);
- if (curr_bd != txch->active_queue_tail)
- curr_bd = curr_bd->next;
- else
- break;
- }
- txch->bd_pool_head = txch->active_queue_head;
- txch->active_queue_head =
- txch->active_queue_tail = NULL;
- }
-}
-/**
- * emac_stop_txch: Stop TX channel operation
- * @priv: The DaVinci EMAC private adapter structure
- * @ch: TX channel number
- *
- * Called to stop TX channel operation
- *
- */
-static void emac_stop_txch(struct emac_priv *priv, u32 ch)
-{
- struct emac_txch *txch = priv->txch[ch];
-
- if (txch) {
- txch->teardown_pending = 1;
- emac_write(EMAC_TXTEARDOWN, 0);
- emac_txch_teardown(priv, ch);
- txch->teardown_pending = 0;
- emac_write(EMAC_TXINTMASKCLEAR, BIT(ch));
+ /* recycle on recieve error */
+ if (status < 0) {
+ ndev->stats.rx_errors++;
+ goto recycle;
}
-}
-/**
- * emac_tx_bdproc: TX buffer descriptor (packet) processing
- * @priv: The DaVinci EMAC private adapter structure
- * @ch: TX channel number to process buffer descriptors for
- * @budget: number of packets allowed to process
- * @pending: indication to caller that packets are pending to process
- *
- * Processes TX buffer descriptors after packets are transmitted - checks
- * ownership bit on the TX * descriptor and requeues it to free pool & frees
- * the SKB buffer. Only "budget" number of packets are processed and
- * indication of pending packets provided to the caller
- *
- * Returns number of packets processed
- */
-static int emac_tx_bdproc(struct emac_priv *priv, u32 ch, u32 budget)
-{
- struct device *emac_dev = &priv->ndev->dev;
- unsigned long flags;
- u32 frame_status;
- u32 pkts_processed = 0;
- u32 tx_complete_cnt = 0;
- struct emac_tx_bd __iomem *curr_bd;
- struct emac_txch *txch = priv->txch[ch];
- u32 *tx_complete_ptr = txch->tx_complete;
-
- if (unlikely(1 == txch->teardown_pending)) {
- if (netif_msg_tx_err(priv) && net_ratelimit()) {
- dev_err(emac_dev, "DaVinci EMAC:emac_tx_bdproc: "\
- "teardown pending\n");
- }
- return 0; /* dont handle any pkt completions */
- }
+ /* feed received packet up the stack */
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ netif_receive_skb(skb);
+ ndev->stats.rx_bytes += len;
+ ndev->stats.rx_packets++;
- ++txch->proc_count;
- spin_lock_irqsave(&priv->tx_lock, flags);
- curr_bd = txch->active_queue_head;
- if (NULL == curr_bd) {
- emac_write(EMAC_TXCP(ch),
- emac_virt_to_phys(txch->last_hw_bdprocessed, priv));
- txch->no_active_pkts++;
- spin_unlock_irqrestore(&priv->tx_lock, flags);
- return 0;
+ /* alloc a new packet for receive */
+ skb = emac_rx_alloc(priv);
+ if (!skb) {
+ if (netif_msg_rx_err(priv) && net_ratelimit())
+ dev_err(emac_dev, "failed rx buffer alloc\n");
+ return;
}
- BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
- frame_status = curr_bd->mode;
- while ((curr_bd) &&
- ((frame_status & EMAC_CPPI_OWNERSHIP_BIT) == 0) &&
- (pkts_processed < budget)) {
- emac_write(EMAC_TXCP(ch), emac_virt_to_phys(curr_bd, priv));
- txch->active_queue_head = curr_bd->next;
- if (frame_status & EMAC_CPPI_EOQ_BIT) {
- if (curr_bd->next) { /* misqueued packet */
- emac_write(EMAC_TXHDP(ch), curr_bd->h_next);
- ++txch->mis_queued_packets;
- } else {
- txch->queue_active = 0; /* end of queue */
- }
- }
- dma_unmap_single(emac_dev, curr_bd->buff_ptr,
- curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE,
- DMA_TO_DEVICE);
-
- *tx_complete_ptr = (u32) curr_bd->buf_token;
- ++tx_complete_ptr;
- ++tx_complete_cnt;
- curr_bd->next = txch->bd_pool_head;
- txch->bd_pool_head = curr_bd;
- --txch->active_queue_count;
- pkts_processed++;
- txch->last_hw_bdprocessed = curr_bd;
- curr_bd = txch->active_queue_head;
- if (curr_bd) {
- BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
- frame_status = curr_bd->mode;
- }
- } /* end of pkt processing loop */
-
- emac_net_tx_complete(priv,
- (void *)&txch->tx_complete[0],
- tx_complete_cnt, ch);
- spin_unlock_irqrestore(&priv->tx_lock, flags);
- return pkts_processed;
+recycle:
+ ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
+ skb_tailroom(skb), GFP_KERNEL);
+ if (WARN_ON(ret < 0))
+ dev_kfree_skb_any(skb);
}
-#define EMAC_ERR_TX_OUT_OF_BD -1
-
-/**
- * emac_send: EMAC Transmit function (internal)
- * @priv: The DaVinci EMAC private adapter structure
- * @pkt: packet pointer (contains skb ptr)
- * @ch: TX channel number
- *
- * Called by the transmit function to queue the packet in EMAC hardware queue
- *
- * Returns success(0) or error code (typically out of desc's)
- */
-static int emac_send(struct emac_priv *priv, struct emac_netpktobj *pkt, u32 ch)
+static void emac_tx_handler(void *token, int len, int status)
{
- unsigned long flags;
- struct emac_tx_bd __iomem *curr_bd;
- struct emac_txch *txch;
- struct emac_netbufobj *buf_list;
-
- txch = priv->txch[ch];
- buf_list = pkt->buf_list; /* get handle to the buffer array */
-
- /* check packet size and pad if short */
- if (pkt->pkt_length < EMAC_DEF_MIN_ETHPKTSIZE) {
- buf_list->length += (EMAC_DEF_MIN_ETHPKTSIZE - pkt->pkt_length);
- pkt->pkt_length = EMAC_DEF_MIN_ETHPKTSIZE;
- }
+ struct sk_buff *skb = token;
+ struct net_device *ndev = skb->dev;
- spin_lock_irqsave(&priv->tx_lock, flags);
- curr_bd = txch->bd_pool_head;
- if (curr_bd == NULL) {
- txch->out_of_tx_bd++;
- spin_unlock_irqrestore(&priv->tx_lock, flags);
- return EMAC_ERR_TX_OUT_OF_BD;
- }
-
- txch->bd_pool_head = curr_bd->next;
- curr_bd->buf_token = buf_list->buf_token;
- curr_bd->buff_ptr = dma_map_single(&priv->ndev->dev, buf_list->data_ptr,
- buf_list->length, DMA_TO_DEVICE);
- curr_bd->off_b_len = buf_list->length;
- curr_bd->h_next = 0;
- curr_bd->next = NULL;
- curr_bd->mode = (EMAC_CPPI_SOP_BIT | EMAC_CPPI_OWNERSHIP_BIT |
- EMAC_CPPI_EOP_BIT | pkt->pkt_length);
-
- /* flush the packet from cache if write back cache is present */
- BD_CACHE_WRITEBACK_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
-
- /* send the packet */
- if (txch->active_queue_head == NULL) {
- txch->active_queue_head = curr_bd;
- txch->active_queue_tail = curr_bd;
- if (1 != txch->queue_active) {
- emac_write(EMAC_TXHDP(ch),
- emac_virt_to_phys(curr_bd, priv));
- txch->queue_active = 1;
- }
- ++txch->queue_reinit;
- } else {
- register struct emac_tx_bd __iomem *tail_bd;
- register u32 frame_status;
-
- tail_bd = txch->active_queue_tail;
- tail_bd->next = curr_bd;
- txch->active_queue_tail = curr_bd;
- tail_bd = EMAC_VIRT_NOCACHE(tail_bd);
- tail_bd->h_next = (int)emac_virt_to_phys(curr_bd, priv);
- frame_status = tail_bd->mode;
- if (frame_status & EMAC_CPPI_EOQ_BIT) {
- emac_write(EMAC_TXHDP(ch),
- emac_virt_to_phys(curr_bd, priv));
- frame_status &= ~(EMAC_CPPI_EOQ_BIT);
- tail_bd->mode = frame_status;
- ++txch->end_of_queue_add;
- }
- }
- txch->active_queue_count++;
- spin_unlock_irqrestore(&priv->tx_lock, flags);
- return 0;
+ if (unlikely(netif_queue_stopped(ndev)))
+ netif_start_queue(ndev);
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += len;
+ dev_kfree_skb_any(skb);
}
/**
@@ -1565,42 +1067,36 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct device *emac_dev = &ndev->dev;
int ret_code;
- struct emac_netbufobj tx_buf; /* buffer obj-only single frame support */
- struct emac_netpktobj tx_packet; /* packet object */
struct emac_priv *priv = netdev_priv(ndev);
/* If no link, return */
if (unlikely(!priv->link)) {
if (netif_msg_tx_err(priv) && net_ratelimit())
dev_err(emac_dev, "DaVinci EMAC: No link to transmit");
- return NETDEV_TX_BUSY;
+ goto fail_tx;
+ }
+
+ ret_code = skb_padto(skb, EMAC_DEF_MIN_ETHPKTSIZE);
+ if (unlikely(ret_code < 0)) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ dev_err(emac_dev, "DaVinci EMAC: packet pad failed");
+ goto fail_tx;
}
- /* Build the buffer and packet objects - Since only single fragment is
- * supported, need not set length and token in both packet & object.
- * Doing so for completeness sake & to show that this needs to be done
- * in multifragment case
- */
- tx_packet.buf_list = &tx_buf;
- tx_packet.num_bufs = 1; /* only single fragment supported */
- tx_packet.pkt_length = skb->len;
- tx_packet.pkt_token = (void *)skb;
- tx_buf.length = skb->len;
- tx_buf.buf_token = (void *)skb;
- tx_buf.data_ptr = skb->data;
- ret_code = emac_send(priv, &tx_packet, EMAC_DEF_TX_CH);
+ ret_code = cpdma_chan_submit(priv->txchan, skb, skb->data, skb->len,
+ GFP_KERNEL);
if (unlikely(ret_code != 0)) {
- if (ret_code == EMAC_ERR_TX_OUT_OF_BD) {
- if (netif_msg_tx_err(priv) && net_ratelimit())
- dev_err(emac_dev, "DaVinci EMAC: xmit() fatal"\
- " err. Out of TX BD's");
- netif_stop_queue(priv->ndev);
- }
- ndev->stats.tx_dropped++;
- return NETDEV_TX_BUSY;
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ dev_err(emac_dev, "DaVinci EMAC: desc submit failed");
+ goto fail_tx;
}
return NETDEV_TX_OK;
+
+fail_tx:
+ ndev->stats.tx_dropped++;
+ netif_stop_queue(ndev);
+ return NETDEV_TX_BUSY;
}
/**
@@ -1621,218 +1117,16 @@ static void emac_dev_tx_timeout(struct net_device *ndev)
if (netif_msg_tx_err(priv))
dev_err(emac_dev, "DaVinci EMAC: xmit timeout, restarting TX");
+ emac_dump_regs(priv);
+
ndev->stats.tx_errors++;
emac_int_disable(priv);
- emac_stop_txch(priv, EMAC_DEF_TX_CH);
- emac_cleanup_txch(priv, EMAC_DEF_TX_CH);
- emac_init_txch(priv, EMAC_DEF_TX_CH);
- emac_write(EMAC_TXHDP(0), 0);
- emac_write(EMAC_TXINTMASKSET, BIT(EMAC_DEF_TX_CH));
+ cpdma_chan_stop(priv->txchan);
+ cpdma_chan_start(priv->txchan);
emac_int_enable(priv);
}
/**
- * emac_net_alloc_rx_buf: Allocate a skb for RX
- * @priv: The DaVinci EMAC private adapter structure
- * @buf_size: size of SKB data buffer to allocate
- * @data_token: data token returned (skb handle for storing in buffer desc)
- * @ch: RX channel number
- *
- * Called during RX channel setup - allocates skb buffer of required size
- * and provides the skb handle and allocated buffer data pointer to caller
- *
- * Returns skb data pointer or 0 on failure to alloc skb
- */
-static void *emac_net_alloc_rx_buf(struct emac_priv *priv, int buf_size,
- void **data_token, u32 ch)
-{
- struct net_device *ndev = priv->ndev;
- struct device *emac_dev = &ndev->dev;
- struct sk_buff *p_skb;
-
- p_skb = dev_alloc_skb(buf_size);
- if (unlikely(NULL == p_skb)) {
- if (netif_msg_rx_err(priv) && net_ratelimit())
- dev_err(emac_dev, "DaVinci EMAC: failed to alloc skb");
- return NULL;
- }
-
- /* set device pointer in skb and reserve space for extra bytes */
- p_skb->dev = ndev;
- skb_reserve(p_skb, NET_IP_ALIGN);
- *data_token = (void *) p_skb;
- return p_skb->data;
-}
-
-/**
- * emac_init_rxch: RX channel initialization
- * @priv: The DaVinci EMAC private adapter structure
- * @ch: RX channel number
- * @param: mac address for RX channel
- *
- * Called during device init to setup a RX channel (allocate buffers and
- * buffer descriptors, create queue and keep ready for reception
- *
- * Returns success(0) or mem alloc failures error code
- */
-static int emac_init_rxch(struct emac_priv *priv, u32 ch, char *param)
-{
- struct device *emac_dev = &priv->ndev->dev;
- u32 cnt, bd_size;
- void __iomem *mem;
- struct emac_rx_bd __iomem *curr_bd;
- struct emac_rxch *rxch = NULL;
-
- rxch = kzalloc(sizeof(struct emac_rxch), GFP_KERNEL);
- if (NULL == rxch) {
- dev_err(emac_dev, "DaVinci EMAC: RX Ch mem alloc failed");
- return -ENOMEM;
- }
- priv->rxch[ch] = rxch;
- rxch->buf_size = priv->rx_buf_size;
- rxch->service_max = EMAC_DEF_RX_MAX_SERVICE;
- rxch->queue_active = 0;
- rxch->teardown_pending = 0;
-
- /* save mac address */
- for (cnt = 0; cnt < 6; cnt++)
- rxch->mac_addr[cnt] = param[cnt];
-
- /* allocate buffer descriptor pool align every BD on four word
- * boundry for future requirements */
- bd_size = (sizeof(struct emac_rx_bd) + 0xF) & ~0xF;
- rxch->num_bd = (priv->ctrl_ram_size >> 1) / bd_size;
- rxch->alloc_size = (((bd_size * rxch->num_bd) + 0xF) & ~0xF);
- rxch->bd_mem = EMAC_RX_BD_MEM(priv);
- __memzero((void __force *)rxch->bd_mem, rxch->alloc_size);
- rxch->pkt_queue.buf_list = &rxch->buf_queue;
-
- /* allocate RX buffer and initialize the BD linked list */
- mem = (void __force __iomem *)
- (((u32 __force) rxch->bd_mem + 0xF) & ~0xF);
- rxch->active_queue_head = NULL;
- rxch->active_queue_tail = mem;
- for (cnt = 0; cnt < rxch->num_bd; cnt++) {
- curr_bd = mem + (cnt * bd_size);
- /* for future use the last parameter contains the BD ptr */
- curr_bd->data_ptr = emac_net_alloc_rx_buf(priv,
- rxch->buf_size,
- (void __force **)&curr_bd->buf_token,
- EMAC_DEF_RX_CH);
- if (curr_bd->data_ptr == NULL) {
- dev_err(emac_dev, "DaVinci EMAC: RX buf mem alloc " \
- "failed for ch %d\n", ch);
- kfree(rxch);
- return -ENOMEM;
- }
-
- /* populate the hardware descriptor */
- curr_bd->h_next = emac_virt_to_phys(rxch->active_queue_head,
- priv);
- curr_bd->buff_ptr = dma_map_single(emac_dev, curr_bd->data_ptr,
- rxch->buf_size, DMA_FROM_DEVICE);
- curr_bd->off_b_len = rxch->buf_size;
- curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT;
-
- /* write back to hardware memory */
- BD_CACHE_WRITEBACK_INVALIDATE((u32) curr_bd,
- EMAC_BD_LENGTH_FOR_CACHE);
- curr_bd->next = rxch->active_queue_head;
- rxch->active_queue_head = curr_bd;
- }
-
- /* At this point rxCppi->activeQueueHead points to the first
- RX BD ready to be given to RX HDP and rxch->active_queue_tail
- points to the last RX BD
- */
- return 0;
-}
-
-/**
- * emac_rxch_teardown: RX channel teardown
- * @priv: The DaVinci EMAC private adapter structure
- * @ch: RX channel number
- *
- * Called during device stop to teardown RX channel
- *
- */
-static void emac_rxch_teardown(struct emac_priv *priv, u32 ch)
-{
- struct device *emac_dev = &priv->ndev->dev;
- u32 teardown_cnt = 0xFFFFFFF0; /* Some high value */
-
- while ((emac_read(EMAC_RXCP(ch)) & EMAC_TEARDOWN_VALUE) !=
- EMAC_TEARDOWN_VALUE) {
- /* wait till tx teardown complete */
- cpu_relax(); /* TODO: check if this helps ... */
- --teardown_cnt;
- if (0 == teardown_cnt) {
- dev_err(emac_dev, "EMAC: RX teardown aborted\n");
- break;
- }
- }
- emac_write(EMAC_RXCP(ch), EMAC_TEARDOWN_VALUE);
-}
-
-/**
- * emac_stop_rxch: Stop RX channel operation
- * @priv: The DaVinci EMAC private adapter structure
- * @ch: RX channel number
- *
- * Called during device stop to stop RX channel operation
- *
- */
-static void emac_stop_rxch(struct emac_priv *priv, u32 ch)
-{
- struct emac_rxch *rxch = priv->rxch[ch];
-
- if (rxch) {
- rxch->teardown_pending = 1;
- emac_write(EMAC_RXTEARDOWN, ch);
- /* wait for teardown complete */
- emac_rxch_teardown(priv, ch);
- rxch->teardown_pending = 0;
- emac_write(EMAC_RXINTMASKCLEAR, BIT(ch));
- }
-}
-
-/**
- * emac_cleanup_rxch: Book-keep function to clean RX channel resources
- * @priv: The DaVinci EMAC private adapter structure
- * @ch: RX channel number
- *
- * Called during device stop to clean up RX channel resources
- *
- */
-static void emac_cleanup_rxch(struct emac_priv *priv, u32 ch)
-{
- struct emac_rxch *rxch = priv->rxch[ch];
- struct emac_rx_bd __iomem *curr_bd;
-
- if (rxch) {
- /* free the receive buffers previously allocated */
- curr_bd = rxch->active_queue_head;
- while (curr_bd) {
- if (curr_bd->buf_token) {
- dma_unmap_single(&priv->ndev->dev,
- curr_bd->buff_ptr,
- curr_bd->off_b_len
- & EMAC_RX_BD_BUF_SIZE,
- DMA_FROM_DEVICE);
-
- dev_kfree_skb_any((struct sk_buff *)\
- curr_bd->buf_token);
- }
- curr_bd = curr_bd->next;
- }
- if (rxch->bd_mem)
- rxch->bd_mem = NULL;
- kfree(rxch);
- priv->rxch[ch] = NULL;
- }
-}
-
-/**
* emac_set_type0addr: Set EMAC Type0 mac address
* @priv: The DaVinci EMAC private adapter structure
* @ch: RX channel number
@@ -1948,7 +1242,6 @@ static void emac_setmac(struct emac_priv *priv, u32 ch, char *mac_addr)
static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
{
struct emac_priv *priv = netdev_priv(ndev);
- struct emac_rxch *rxch = priv->rxch[EMAC_DEF_RX_CH];
struct device *emac_dev = &priv->ndev->dev;
struct sockaddr *sa = addr;
@@ -1959,11 +1252,10 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len);
memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len);
- /* If the interface is down - rxch is NULL. */
/* MAC address is configured only after the interface is enabled. */
if (netif_running(ndev)) {
- memcpy(rxch->mac_addr, sa->sa_data, ndev->addr_len);
- emac_setmac(priv, EMAC_DEF_RX_CH, rxch->mac_addr);
+ memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len);
+ emac_setmac(priv, EMAC_DEF_RX_CH, priv->mac_addr);
}
if (netif_msg_drv(priv))
@@ -1974,194 +1266,6 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
}
/**
- * emac_addbd_to_rx_queue: Recycle RX buffer descriptor
- * @priv: The DaVinci EMAC private adapter structure
- * @ch: RX channel number to process buffer descriptors for
- * @curr_bd: current buffer descriptor
- * @buffer: buffer pointer for descriptor
- * @buf_token: buffer token (stores skb information)
- *
- * Prepares the recycled buffer descriptor and addes it to hardware
- * receive queue - if queue empty this descriptor becomes the head
- * else addes the descriptor to end of queue
- *
- */
-static void emac_addbd_to_rx_queue(struct emac_priv *priv, u32 ch,
- struct emac_rx_bd __iomem *curr_bd,
- char *buffer, void *buf_token)
-{
- struct emac_rxch *rxch = priv->rxch[ch];
-
- /* populate the hardware descriptor */
- curr_bd->h_next = 0;
- curr_bd->buff_ptr = dma_map_single(&priv->ndev->dev, buffer,
- rxch->buf_size, DMA_FROM_DEVICE);
- curr_bd->off_b_len = rxch->buf_size;
- curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT;
- curr_bd->next = NULL;
- curr_bd->data_ptr = buffer;
- curr_bd->buf_token = buf_token;
-
- /* write back */
- BD_CACHE_WRITEBACK_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
- if (rxch->active_queue_head == NULL) {
- rxch->active_queue_head = curr_bd;
- rxch->active_queue_tail = curr_bd;
- if (0 != rxch->queue_active) {
- emac_write(EMAC_RXHDP(ch),
- emac_virt_to_phys(rxch->active_queue_head, priv));
- rxch->queue_active = 1;
- }
- } else {
- struct emac_rx_bd __iomem *tail_bd;
- u32 frame_status;
-
- tail_bd = rxch->active_queue_tail;
- rxch->active_queue_tail = curr_bd;
- tail_bd->next = curr_bd;
- tail_bd = EMAC_VIRT_NOCACHE(tail_bd);
- tail_bd->h_next = emac_virt_to_phys(curr_bd, priv);
- frame_status = tail_bd->mode;
- if (frame_status & EMAC_CPPI_EOQ_BIT) {
- emac_write(EMAC_RXHDP(ch),
- emac_virt_to_phys(curr_bd, priv));
- frame_status &= ~(EMAC_CPPI_EOQ_BIT);
- tail_bd->mode = frame_status;
- ++rxch->end_of_queue_add;
- }
- }
- ++rxch->recycled_bd;
-}
-
-/**
- * emac_net_rx_cb: Prepares packet and sends to upper layer
- * @priv: The DaVinci EMAC private adapter structure
- * @net_pkt_list: Network packet list (received packets)
- *
- * Invalidates packet buffer memory and sends the received packet to upper
- * layer
- *
- * Returns success or appropriate error code (none as of now)
- */
-static int emac_net_rx_cb(struct emac_priv *priv,
- struct emac_netpktobj *net_pkt_list)
-{
- struct net_device *ndev = priv->ndev;
- struct sk_buff *p_skb = net_pkt_list->pkt_token;
- /* set length of packet */
- skb_put(p_skb, net_pkt_list->pkt_length);
- p_skb->protocol = eth_type_trans(p_skb, priv->ndev);
- netif_receive_skb(p_skb);
- ndev->stats.rx_bytes += net_pkt_list->pkt_length;
- ndev->stats.rx_packets++;
- return 0;
-}
-
-/**
- * emac_rx_bdproc: RX buffer descriptor (packet) processing
- * @priv: The DaVinci EMAC private adapter structure
- * @ch: RX channel number to process buffer descriptors for
- * @budget: number of packets allowed to process
- * @pending: indication to caller that packets are pending to process
- *
- * Processes RX buffer descriptors - checks ownership bit on the RX buffer
- * descriptor, sends the receive packet to upper layer, allocates a new SKB
- * and recycles the buffer descriptor (requeues it in hardware RX queue).
- * Only "budget" number of packets are processed and indication of pending
- * packets provided to the caller.
- *
- * Returns number of packets processed (and indication of pending packets)
- */
-static int emac_rx_bdproc(struct emac_priv *priv, u32 ch, u32 budget)
-{
- unsigned long flags;
- u32 frame_status;
- u32 pkts_processed = 0;
- char *new_buffer;
- struct emac_rx_bd __iomem *curr_bd;
- struct emac_rx_bd __iomem *last_bd;
- struct emac_netpktobj *curr_pkt, pkt_obj;
- struct emac_netbufobj buf_obj;
- struct emac_netbufobj *rx_buf_obj;
- void *new_buf_token;
- struct emac_rxch *rxch = priv->rxch[ch];
-
- if (unlikely(1 == rxch->teardown_pending))
- return 0;
- ++rxch->proc_count;
- spin_lock_irqsave(&priv->rx_lock, flags);
- pkt_obj.buf_list = &buf_obj;
- curr_pkt = &pkt_obj;
- curr_bd = rxch->active_queue_head;
- BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
- frame_status = curr_bd->mode;
-
- while ((curr_bd) &&
- ((frame_status & EMAC_CPPI_OWNERSHIP_BIT) == 0) &&
- (pkts_processed < budget)) {
-
- new_buffer = emac_net_alloc_rx_buf(priv, rxch->buf_size,
- &new_buf_token, EMAC_DEF_RX_CH);
- if (unlikely(NULL == new_buffer)) {
- ++rxch->out_of_rx_buffers;
- goto end_emac_rx_bdproc;
- }
-
- /* populate received packet data structure */
- rx_buf_obj = &curr_pkt->buf_list[0];
- rx_buf_obj->data_ptr = (char *)curr_bd->data_ptr;
- rx_buf_obj->length = curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE;
- rx_buf_obj->buf_token = curr_bd->buf_token;
-
- dma_unmap_single(&priv->ndev->dev, curr_bd->buff_ptr,
- curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE,
- DMA_FROM_DEVICE);
-
- curr_pkt->pkt_token = curr_pkt->buf_list->buf_token;
- curr_pkt->num_bufs = 1;
- curr_pkt->pkt_length =
- (frame_status & EMAC_RX_BD_PKT_LENGTH_MASK);
- emac_write(EMAC_RXCP(ch), emac_virt_to_phys(curr_bd, priv));
- ++rxch->processed_bd;
- last_bd = curr_bd;
- curr_bd = last_bd->next;
- rxch->active_queue_head = curr_bd;
-
- /* check if end of RX queue ? */
- if (frame_status & EMAC_CPPI_EOQ_BIT) {
- if (curr_bd) {
- ++rxch->mis_queued_packets;
- emac_write(EMAC_RXHDP(ch),
- emac_virt_to_phys(curr_bd, priv));
- } else {
- ++rxch->end_of_queue;
- rxch->queue_active = 0;
- }
- }
-
- /* recycle BD */
- emac_addbd_to_rx_queue(priv, ch, last_bd, new_buffer,
- new_buf_token);
-
- /* return the packet to the user - BD ptr passed in
- * last parameter for potential *future* use */
- spin_unlock_irqrestore(&priv->rx_lock, flags);
- emac_net_rx_cb(priv, curr_pkt);
- spin_lock_irqsave(&priv->rx_lock, flags);
- curr_bd = rxch->active_queue_head;
- if (curr_bd) {
- BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
- frame_status = curr_bd->mode;
- }
- ++pkts_processed;
- }
-
-end_emac_rx_bdproc:
- spin_unlock_irqrestore(&priv->rx_lock, flags);
- return pkts_processed;
-}
-
-/**
* emac_hw_enable: Enable EMAC hardware for packet transmission/reception
* @priv: The DaVinci EMAC private adapter structure
*
@@ -2172,7 +1276,7 @@ end_emac_rx_bdproc:
*/
static int emac_hw_enable(struct emac_priv *priv)
{
- u32 ch, val, mbp_enable, mac_control;
+ u32 val, mbp_enable, mac_control;
/* Soft reset */
emac_write(EMAC_SOFTRESET, 1);
@@ -2215,26 +1319,9 @@ static int emac_hw_enable(struct emac_priv *priv)
emac_write(EMAC_RXUNICASTCLEAR, EMAC_RX_UNICAST_CLEAR_ALL);
priv->rx_addr_type = (emac_read(EMAC_MACCONFIG) >> 8) & 0xFF;
- val = emac_read(EMAC_TXCONTROL);
- val |= EMAC_TX_CONTROL_TX_ENABLE_VAL;
- emac_write(EMAC_TXCONTROL, val);
- val = emac_read(EMAC_RXCONTROL);
- val |= EMAC_RX_CONTROL_RX_ENABLE_VAL;
- emac_write(EMAC_RXCONTROL, val);
emac_write(EMAC_MACINTMASKSET, EMAC_MAC_HOST_ERR_INTMASK_VAL);
- for (ch = 0; ch < EMAC_DEF_MAX_TX_CH; ch++) {
- emac_write(EMAC_TXHDP(ch), 0);
- emac_write(EMAC_TXINTMASKSET, BIT(ch));
- }
- for (ch = 0; ch < EMAC_DEF_MAX_RX_CH; ch++) {
- struct emac_rxch *rxch = priv->rxch[ch];
- emac_setmac(priv, ch, rxch->mac_addr);
- emac_write(EMAC_RXINTMASKSET, BIT(ch));
- rxch->queue_active = 1;
- emac_write(EMAC_RXHDP(ch),
- emac_virt_to_phys(rxch->active_queue_head, priv));
- }
+ emac_setmac(priv, EMAC_DEF_RX_CH, priv->mac_addr);
/* Enable MII */
val = emac_read(EMAC_MACCONTROL);
@@ -2279,8 +1366,8 @@ static int emac_poll(struct napi_struct *napi, int budget)
mask = EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC;
if (status & mask) {
- num_tx_pkts = emac_tx_bdproc(priv, EMAC_DEF_TX_CH,
- EMAC_DEF_TX_MAX_SERVICE);
+ num_tx_pkts = cpdma_chan_process(priv->txchan,
+ EMAC_DEF_TX_MAX_SERVICE);
} /* TX processing */
mask = EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC;
@@ -2289,7 +1376,7 @@ static int emac_poll(struct napi_struct *napi, int budget)
mask = EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC;
if (status & mask) {
- num_rx_pkts = emac_rx_bdproc(priv, EMAC_DEF_RX_CH, budget);
+ num_rx_pkts = cpdma_chan_process(priv->rxchan, budget);
} /* RX processing */
mask = EMAC_DM644X_MAC_IN_VECTOR_HOST_INT;
@@ -2348,79 +1435,6 @@ void emac_poll_controller(struct net_device *ndev)
}
#endif
-/* PHY/MII bus related */
-
-/* Wait until mdio is ready for next command */
-#define MDIO_WAIT_FOR_USER_ACCESS\
- while ((emac_mdio_read((MDIO_USERACCESS(0))) &\
- MDIO_USERACCESS_GO) != 0)
-
-static int emac_mii_read(struct mii_bus *bus, int phy_id, int phy_reg)
-{
- unsigned int phy_data = 0;
- unsigned int phy_control;
-
- /* Wait until mdio is ready for next command */
- MDIO_WAIT_FOR_USER_ACCESS;
-
- phy_control = (MDIO_USERACCESS_GO |
- MDIO_USERACCESS_READ |
- ((phy_reg << 21) & MDIO_USERACCESS_REGADR) |
- ((phy_id << 16) & MDIO_USERACCESS_PHYADR) |
- (phy_data & MDIO_USERACCESS_DATA));
- emac_mdio_write(MDIO_USERACCESS(0), phy_control);
-
- /* Wait until mdio is ready for next command */
- MDIO_WAIT_FOR_USER_ACCESS;
-
- return emac_mdio_read(MDIO_USERACCESS(0)) & MDIO_USERACCESS_DATA;
-
-}
-
-static int emac_mii_write(struct mii_bus *bus, int phy_id,
- int phy_reg, u16 phy_data)
-{
-
- unsigned int control;
-
- /* until mdio is ready for next command */
- MDIO_WAIT_FOR_USER_ACCESS;
-
- control = (MDIO_USERACCESS_GO |
- MDIO_USERACCESS_WRITE |
- ((phy_reg << 21) & MDIO_USERACCESS_REGADR) |
- ((phy_id << 16) & MDIO_USERACCESS_PHYADR) |
- (phy_data & MDIO_USERACCESS_DATA));
- emac_mdio_write(MDIO_USERACCESS(0), control);
-
- return 0;
-}
-
-static int emac_mii_reset(struct mii_bus *bus)
-{
- unsigned int clk_div;
- int mdio_bus_freq = emac_bus_frequency;
-
- if (mdio_max_freq && mdio_bus_freq)
- clk_div = ((mdio_bus_freq / mdio_max_freq) - 1);
- else
- clk_div = 0xFF;
-
- clk_div &= MDIO_CONTROL_CLKDIV;
-
- /* Set enable and clock divider in MDIOControl */
- emac_mdio_write(MDIO_CONTROL, (clk_div | MDIO_CONTROL_ENABLE));
-
- return 0;
-
-}
-
-static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, PHY_POLL };
-
-/* emac_driver: EMAC MII bus structure */
-
-static struct mii_bus *emac_mii;
-
static void emac_adjust_link(struct net_device *ndev)
{
struct emac_priv *priv = netdev_priv(ndev);
@@ -2485,6 +1499,11 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
return -EOPNOTSUPP;
}
+static int match_first_device(struct device *dev, void *data)
+{
+ return 1;
+}
+
/**
* emac_dev_open: EMAC device open
* @ndev: The DaVinci EMAC network adapter
@@ -2498,10 +1517,9 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
static int emac_dev_open(struct net_device *ndev)
{
struct device *emac_dev = &ndev->dev;
- u32 rc, cnt, ch;
- int phy_addr;
+ u32 cnt;
struct resource *res;
- int q, m;
+ int q, m, ret;
int i = 0;
int k = 0;
struct emac_priv *priv = netdev_priv(ndev);
@@ -2513,29 +1531,21 @@ static int emac_dev_open(struct net_device *ndev)
/* Configuration items */
priv->rx_buf_size = EMAC_DEF_MAX_FRAME_SIZE + NET_IP_ALIGN;
- /* Clear basic hardware */
- for (ch = 0; ch < EMAC_MAX_TXRX_CHANNELS; ch++) {
- emac_write(EMAC_TXHDP(ch), 0);
- emac_write(EMAC_RXHDP(ch), 0);
- emac_write(EMAC_RXHDP(ch), 0);
- emac_write(EMAC_RXINTMASKCLEAR, EMAC_INT_MASK_CLEAR);
- emac_write(EMAC_TXINTMASKCLEAR, EMAC_INT_MASK_CLEAR);
- }
priv->mac_hash1 = 0;
priv->mac_hash2 = 0;
emac_write(EMAC_MACHASH1, 0);
emac_write(EMAC_MACHASH2, 0);
- /* multi ch not supported - open 1 TX, 1RX ch by default */
- rc = emac_init_txch(priv, EMAC_DEF_TX_CH);
- if (0 != rc) {
- dev_err(emac_dev, "DaVinci EMAC: emac_init_txch() failed");
- return rc;
- }
- rc = emac_init_rxch(priv, EMAC_DEF_RX_CH, priv->mac_addr);
- if (0 != rc) {
- dev_err(emac_dev, "DaVinci EMAC: emac_init_rxch() failed");
- return rc;
+ for (i = 0; i < EMAC_DEF_RX_NUM_DESC; i++) {
+ struct sk_buff *skb = emac_rx_alloc(priv);
+
+ if (!skb)
+ break;
+
+ ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
+ skb_tailroom(skb), GFP_KERNEL);
+ if (WARN_ON(ret < 0))
+ break;
}
/* Request IRQ */
@@ -2560,28 +1570,28 @@ static int emac_dev_open(struct net_device *ndev)
emac_set_coalesce(ndev, &coal);
}
- /* find the first phy */
+ cpdma_ctlr_start(priv->dma);
+
priv->phydev = NULL;
- if (priv->phy_mask) {
- emac_mii_reset(priv->mii_bus);
- for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
- if (priv->mii_bus->phy_map[phy_addr]) {
- priv->phydev = priv->mii_bus->phy_map[phy_addr];
- break;
- }
- }
+ /* use the first phy on the bus if pdata did not give us a phy id */
+ if (!priv->phy_id) {
+ struct device *phy;
- if (!priv->phydev) {
- printk(KERN_ERR "%s: no PHY found\n", ndev->name);
- return -1;
- }
+ phy = bus_find_device(&mdio_bus_type, NULL, NULL,
+ match_first_device);
+ if (phy)
+ priv->phy_id = dev_name(phy);
+ }
- priv->phydev = phy_connect(ndev, dev_name(&priv->phydev->dev),
- &emac_adjust_link, 0, PHY_INTERFACE_MODE_MII);
+ if (priv->phy_id && *priv->phy_id) {
+ priv->phydev = phy_connect(ndev, priv->phy_id,
+ &emac_adjust_link, 0,
+ PHY_INTERFACE_MODE_MII);
if (IS_ERR(priv->phydev)) {
- printk(KERN_ERR "%s: Could not attach to PHY\n",
- ndev->name);
+ dev_err(emac_dev, "could not connect to phy %s\n",
+ priv->phy_id);
+ priv->phydev = NULL;
return PTR_ERR(priv->phydev);
}
@@ -2589,12 +1599,13 @@ static int emac_dev_open(struct net_device *ndev)
priv->speed = 0;
priv->duplex = ~0;
- printk(KERN_INFO "%s: attached PHY driver [%s] "
- "(mii_bus:phy_addr=%s, id=%x)\n", ndev->name,
+ dev_info(emac_dev, "attached PHY driver [%s] "
+ "(mii_bus:phy_addr=%s, id=%x)\n",
priv->phydev->drv->name, dev_name(&priv->phydev->dev),
priv->phydev->phy_id);
- } else{
+ } else {
/* No PHY , fix the link, speed and duplex settings */
+ dev_notice(emac_dev, "no phy, defaulting to 100/full\n");
priv->link = 1;
priv->speed = SPEED_100;
priv->duplex = DUPLEX_FULL;
@@ -2607,7 +1618,7 @@ static int emac_dev_open(struct net_device *ndev)
if (netif_msg_drv(priv))
dev_notice(emac_dev, "DaVinci EMAC: Opened %s\n", ndev->name);
- if (priv->phy_mask)
+ if (priv->phydev)
phy_start(priv->phydev);
return 0;
@@ -2648,10 +1659,7 @@ static int emac_dev_stop(struct net_device *ndev)
netif_carrier_off(ndev);
emac_int_disable(priv);
- emac_stop_txch(priv, EMAC_DEF_TX_CH);
- emac_stop_rxch(priv, EMAC_DEF_RX_CH);
- emac_cleanup_txch(priv, EMAC_DEF_TX_CH);
- emac_cleanup_rxch(priv, EMAC_DEF_RX_CH);
+ cpdma_ctlr_stop(priv->dma);
emac_write(EMAC_SOFTRESET, 1);
if (priv->phydev)
@@ -2756,9 +1764,10 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
struct resource *res;
struct net_device *ndev;
struct emac_priv *priv;
- unsigned long size;
+ unsigned long size, hw_ram_addr;
struct emac_platform_data *pdata;
struct device *emac_dev;
+ struct cpdma_params dma_params;
/* obtain emac clock from kernel */
emac_clk = clk_get(&pdev->dev, NULL);
@@ -2782,8 +1791,6 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
priv->ndev = ndev;
priv->msg_enable = netif_msg_init(debug_level, DAVINCI_EMAC_DEBUG);
- spin_lock_init(&priv->tx_lock);
- spin_lock_init(&priv->rx_lock);
spin_lock_init(&priv->lock);
pdata = pdev->dev.platform_data;
@@ -2794,7 +1801,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
/* MAC addr and PHY mask , RMII enable info from platform_data */
memcpy(priv->mac_addr, pdata->mac_addr, 6);
- priv->phy_mask = pdata->phy_mask;
+ priv->phy_id = pdata->phy_id;
priv->rmii_en = pdata->rmii_en;
priv->version = pdata->version;
priv->int_enable = pdata->interrupt_enable;
@@ -2831,14 +1838,41 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
ndev->base_addr = (unsigned long)priv->remap_addr;
priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset;
- priv->ctrl_ram_size = pdata->ctrl_ram_size;
- priv->emac_ctrl_ram = priv->remap_addr + pdata->ctrl_ram_offset;
- if (pdata->hw_ram_addr)
- priv->hw_ram_addr = pdata->hw_ram_addr;
- else
- priv->hw_ram_addr = (u32 __force)res->start +
- pdata->ctrl_ram_offset;
+ hw_ram_addr = pdata->hw_ram_addr;
+ if (!hw_ram_addr)
+ hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset;
+
+ memset(&dma_params, 0, sizeof(dma_params));
+ dma_params.dev = emac_dev;
+ dma_params.dmaregs = priv->emac_base;
+ dma_params.rxthresh = priv->emac_base + 0x120;
+ dma_params.rxfree = priv->emac_base + 0x140;
+ dma_params.txhdp = priv->emac_base + 0x600;
+ dma_params.rxhdp = priv->emac_base + 0x620;
+ dma_params.txcp = priv->emac_base + 0x640;
+ dma_params.rxcp = priv->emac_base + 0x660;
+ dma_params.num_chan = EMAC_MAX_TXRX_CHANNELS;
+ dma_params.min_packet_size = EMAC_DEF_MIN_ETHPKTSIZE;
+ dma_params.desc_mem_phys = hw_ram_addr;
+ dma_params.desc_mem_size = pdata->ctrl_ram_size;
+ dma_params.desc_align = 16;
+
+ priv->dma = cpdma_ctlr_create(&dma_params);
+ if (!priv->dma) {
+ dev_err(emac_dev, "DaVinci EMAC: Error initializing DMA\n");
+ rc = -ENOMEM;
+ goto no_dma;
+ }
+
+ priv->txchan = cpdma_chan_create(priv->dma, tx_chan_num(EMAC_DEF_TX_CH),
+ emac_tx_handler);
+ priv->rxchan = cpdma_chan_create(priv->dma, rx_chan_num(EMAC_DEF_RX_CH),
+ emac_rx_handler);
+ if (WARN_ON(!priv->txchan || !priv->rxchan)) {
+ rc = -ENOMEM;
+ goto no_irq_res;
+ }
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
@@ -2871,32 +1905,6 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
}
- /* MII/Phy intialisation, mdio bus registration */
- emac_mii = mdiobus_alloc();
- if (emac_mii == NULL) {
- dev_err(emac_dev, "DaVinci EMAC: Error allocating mii_bus\n");
- rc = -ENOMEM;
- goto mdio_alloc_err;
- }
-
- priv->mii_bus = emac_mii;
- emac_mii->name = "emac-mii",
- emac_mii->read = emac_mii_read,
- emac_mii->write = emac_mii_write,
- emac_mii->reset = emac_mii_reset,
- emac_mii->irq = mii_irqs,
- emac_mii->phy_mask = ~(priv->phy_mask);
- emac_mii->parent = &pdev->dev;
- emac_mii->priv = priv->remap_addr + pdata->mdio_reg_offset;
- snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%x", priv->pdev->id);
- mdio_max_freq = pdata->mdio_max_freq;
- emac_mii->reset(emac_mii);
-
- /* Register the MII bus */
- rc = mdiobus_register(emac_mii);
- if (rc)
- goto mdiobus_quit;
-
if (netif_msg_probe(priv)) {
dev_notice(emac_dev, "DaVinci EMAC Probe found device "\
"(regs: %p, irq: %d)\n",
@@ -2904,13 +1912,15 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
}
return 0;
-mdiobus_quit:
- mdiobus_free(emac_mii);
-
netdev_reg_err:
-mdio_alloc_err:
clk_disable(emac_clk);
no_irq_res:
+ if (priv->txchan)
+ cpdma_chan_destroy(priv->txchan);
+ if (priv->rxchan)
+ cpdma_chan_destroy(priv->rxchan);
+ cpdma_ctlr_destroy(priv->dma);
+no_dma:
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, res->end - res->start + 1);
iounmap(priv->remap_addr);
@@ -2938,8 +1948,12 @@ static int __devexit davinci_emac_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mdiobus_unregister(priv->mii_bus);
- mdiobus_free(priv->mii_bus);
+
+ if (priv->txchan)
+ cpdma_chan_destroy(priv->txchan);
+ if (priv->rxchan)
+ cpdma_chan_destroy(priv->rxchan);
+ cpdma_ctlr_destroy(priv->dma);
release_mem_region(res->start, res->end - res->start + 1);
diff --git a/drivers/net/davinci_mdio.c b/drivers/net/davinci_mdio.c
new file mode 100644
index 000000000000..7615040df756
--- /dev/null
+++ b/drivers/net/davinci_mdio.c
@@ -0,0 +1,475 @@
+/*
+ * DaVinci MDIO Module driver
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * Shamelessly ripped out of davinci_emac.c, original copyrights follow:
+ *
+ * Copyright (C) 2009 Texas Instruments.
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * ---------------------------------------------------------------------------
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/phy.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/davinci_emac.h>
+
+/*
+ * This timeout definition is a worst-case ultra defensive measure against
+ * unexpected controller lock ups. Ideally, we should never ever hit this
+ * scenario in practice.
+ */
+#define MDIO_TIMEOUT 100 /* msecs */
+
+#define PHY_REG_MASK 0x1f
+#define PHY_ID_MASK 0x1f
+
+#define DEF_OUT_FREQ 2200000 /* 2.2 MHz */
+
+struct davinci_mdio_regs {
+ u32 version;
+ u32 control;
+#define CONTROL_IDLE BIT(31)
+#define CONTROL_ENABLE BIT(30)
+#define CONTROL_MAX_DIV (0xff)
+
+ u32 alive;
+ u32 link;
+ u32 linkintraw;
+ u32 linkintmasked;
+ u32 __reserved_0[2];
+ u32 userintraw;
+ u32 userintmasked;
+ u32 userintmaskset;
+ u32 userintmaskclr;
+ u32 __reserved_1[20];
+
+ struct {
+ u32 access;
+#define USERACCESS_GO BIT(31)
+#define USERACCESS_WRITE BIT(30)
+#define USERACCESS_ACK BIT(29)
+#define USERACCESS_READ (0)
+#define USERACCESS_DATA (0xffff)
+
+ u32 physel;
+ } user[0];
+};
+
+struct mdio_platform_data default_pdata = {
+ .bus_freq = DEF_OUT_FREQ,
+};
+
+struct davinci_mdio_data {
+ struct mdio_platform_data pdata;
+ struct davinci_mdio_regs __iomem *regs;
+ spinlock_t lock;
+ struct clk *clk;
+ struct device *dev;
+ struct mii_bus *bus;
+ bool suspended;
+ unsigned long access_time; /* jiffies */
+};
+
+static void __davinci_mdio_reset(struct davinci_mdio_data *data)
+{
+ u32 mdio_in, div, mdio_out_khz, access_time;
+
+ mdio_in = clk_get_rate(data->clk);
+ div = (mdio_in / data->pdata.bus_freq) - 1;
+ if (div > CONTROL_MAX_DIV)
+ div = CONTROL_MAX_DIV;
+
+ /* set enable and clock divider */
+ __raw_writel(div | CONTROL_ENABLE, &data->regs->control);
+
+ /*
+ * One mdio transaction consists of:
+ * 32 bits of preamble
+ * 32 bits of transferred data
+ * 24 bits of bus yield (not needed unless shared?)
+ */
+ mdio_out_khz = mdio_in / (1000 * (div + 1));
+ access_time = (88 * 1000) / mdio_out_khz;
+
+ /*
+ * In the worst case, we could be kicking off a user-access immediately
+ * after the mdio bus scan state-machine triggered its own read. If
+ * so, our request could get deferred by one access cycle. We
+ * defensively allow for 4 access cycles.
+ */
+ data->access_time = usecs_to_jiffies(access_time * 4);
+ if (!data->access_time)
+ data->access_time = 1;
+}
+
+static int davinci_mdio_reset(struct mii_bus *bus)
+{
+ struct davinci_mdio_data *data = bus->priv;
+ u32 phy_mask, ver;
+
+ __davinci_mdio_reset(data);
+
+ /* wait for scan logic to settle */
+ msleep(PHY_MAX_ADDR * data->access_time);
+
+ /* dump hardware version info */
+ ver = __raw_readl(&data->regs->version);
+ dev_info(data->dev, "davinci mdio revision %d.%d\n",
+ (ver >> 8) & 0xff, ver & 0xff);
+
+ /* get phy mask from the alive register */
+ phy_mask = __raw_readl(&data->regs->alive);
+ if (phy_mask) {
+ /* restrict mdio bus to live phys only */
+ dev_info(data->dev, "detected phy mask %x\n", ~phy_mask);
+ phy_mask = ~phy_mask;
+ } else {
+ /* desperately scan all phys */
+ dev_warn(data->dev, "no live phy, scanning all\n");
+ phy_mask = 0;
+ }
+ data->bus->phy_mask = phy_mask;
+
+ return 0;
+}
+
+/* wait until hardware is ready for another user access */
+static inline int wait_for_user_access(struct davinci_mdio_data *data)
+{
+ struct davinci_mdio_regs __iomem *regs = data->regs;
+ unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT);
+ u32 reg;
+
+ while (time_after(timeout, jiffies)) {
+ reg = __raw_readl(&regs->user[0].access);
+ if ((reg & USERACCESS_GO) == 0)
+ return 0;
+
+ reg = __raw_readl(&regs->control);
+ if ((reg & CONTROL_IDLE) == 0)
+ continue;
+
+ /*
+ * An emac soft_reset may have clobbered the mdio controller's
+ * state machine. We need to reset and retry the current
+ * operation
+ */
+ dev_warn(data->dev, "resetting idled controller\n");
+ __davinci_mdio_reset(data);
+ return -EAGAIN;
+ }
+ dev_err(data->dev, "timed out waiting for user access\n");
+ return -ETIMEDOUT;
+}
+
+/* wait until hardware state machine is idle */
+static inline int wait_for_idle(struct davinci_mdio_data *data)
+{
+ struct davinci_mdio_regs __iomem *regs = data->regs;
+ unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT);
+
+ while (time_after(timeout, jiffies)) {
+ if (__raw_readl(&regs->control) & CONTROL_IDLE)
+ return 0;
+ }
+ dev_err(data->dev, "timed out waiting for idle\n");
+ return -ETIMEDOUT;
+}
+
+static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
+{
+ struct davinci_mdio_data *data = bus->priv;
+ u32 reg;
+ int ret;
+
+ if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
+ return -EINVAL;
+
+ spin_lock(&data->lock);
+
+ if (data->suspended) {
+ spin_unlock(&data->lock);
+ return -ENODEV;
+ }
+
+ reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
+ (phy_id << 16));
+
+ while (1) {
+ ret = wait_for_user_access(data);
+ if (ret == -EAGAIN)
+ continue;
+ if (ret < 0)
+ break;
+
+ __raw_writel(reg, &data->regs->user[0].access);
+
+ ret = wait_for_user_access(data);
+ if (ret == -EAGAIN)
+ continue;
+ if (ret < 0)
+ break;
+
+ reg = __raw_readl(&data->regs->user[0].access);
+ ret = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -EIO;
+ break;
+ }
+
+ spin_unlock(&data->lock);
+
+ return ret;
+}
+
+static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
+ int phy_reg, u16 phy_data)
+{
+ struct davinci_mdio_data *data = bus->priv;
+ u32 reg;
+ int ret;
+
+ if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
+ return -EINVAL;
+
+ spin_lock(&data->lock);
+
+ if (data->suspended) {
+ spin_unlock(&data->lock);
+ return -ENODEV;
+ }
+
+ reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
+ (phy_id << 16) | (phy_data & USERACCESS_DATA));
+
+ while (1) {
+ ret = wait_for_user_access(data);
+ if (ret == -EAGAIN)
+ continue;
+ if (ret < 0)
+ break;
+
+ __raw_writel(reg, &data->regs->user[0].access);
+
+ ret = wait_for_user_access(data);
+ if (ret == -EAGAIN)
+ continue;
+ break;
+ }
+
+ spin_unlock(&data->lock);
+
+ return 0;
+}
+
+static int __devinit davinci_mdio_probe(struct platform_device *pdev)
+{
+ struct mdio_platform_data *pdata = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ struct davinci_mdio_data *data;
+ struct resource *res;
+ struct phy_device *phy;
+ int ret, addr;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ dev_err(dev, "failed to alloc device data\n");
+ return -ENOMEM;
+ }
+
+ data->pdata = pdata ? (*pdata) : default_pdata;
+
+ data->bus = mdiobus_alloc();
+ if (!data->bus) {
+ dev_err(dev, "failed to alloc mii bus\n");
+ ret = -ENOMEM;
+ goto bail_out;
+ }
+
+ data->bus->name = dev_name(dev);
+ data->bus->read = davinci_mdio_read,
+ data->bus->write = davinci_mdio_write,
+ data->bus->reset = davinci_mdio_reset,
+ data->bus->parent = dev;
+ data->bus->priv = data;
+ snprintf(data->bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
+
+ data->clk = clk_get(dev, NULL);
+ if (IS_ERR(data->clk)) {
+ data->clk = NULL;
+ dev_err(dev, "failed to get device clock\n");
+ ret = PTR_ERR(data->clk);
+ goto bail_out;
+ }
+
+ clk_enable(data->clk);
+
+ dev_set_drvdata(dev, data);
+ data->dev = dev;
+ spin_lock_init(&data->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "could not find register map resource\n");
+ ret = -ENOENT;
+ goto bail_out;
+ }
+
+ res = devm_request_mem_region(dev, res->start, resource_size(res),
+ dev_name(dev));
+ if (!res) {
+ dev_err(dev, "could not allocate register map resource\n");
+ ret = -ENXIO;
+ goto bail_out;
+ }
+
+ data->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ if (!data->regs) {
+ dev_err(dev, "could not map mdio registers\n");
+ ret = -ENOMEM;
+ goto bail_out;
+ }
+
+ /* register the mii bus */
+ ret = mdiobus_register(data->bus);
+ if (ret)
+ goto bail_out;
+
+ /* scan and dump the bus */
+ for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
+ phy = data->bus->phy_map[addr];
+ if (phy) {
+ dev_info(dev, "phy[%d]: device %s, driver %s\n",
+ phy->addr, dev_name(&phy->dev),
+ phy->drv ? phy->drv->name : "unknown");
+ }
+ }
+
+ return 0;
+
+bail_out:
+ if (data->bus)
+ mdiobus_free(data->bus);
+
+ if (data->clk) {
+ clk_disable(data->clk);
+ clk_put(data->clk);
+ }
+
+ kfree(data);
+
+ return ret;
+}
+
+static int __devexit davinci_mdio_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct davinci_mdio_data *data = dev_get_drvdata(dev);
+
+ if (data->bus)
+ mdiobus_free(data->bus);
+
+ if (data->clk) {
+ clk_disable(data->clk);
+ clk_put(data->clk);
+ }
+
+ dev_set_drvdata(dev, NULL);
+
+ kfree(data);
+
+ return 0;
+}
+
+static int davinci_mdio_suspend(struct device *dev)
+{
+ struct davinci_mdio_data *data = dev_get_drvdata(dev);
+ u32 ctrl;
+
+ spin_lock(&data->lock);
+
+ /* shutdown the scan state machine */
+ ctrl = __raw_readl(&data->regs->control);
+ ctrl &= ~CONTROL_ENABLE;
+ __raw_writel(ctrl, &data->regs->control);
+ wait_for_idle(data);
+
+ if (data->clk)
+ clk_disable(data->clk);
+
+ data->suspended = true;
+ spin_unlock(&data->lock);
+
+ return 0;
+}
+
+static int davinci_mdio_resume(struct device *dev)
+{
+ struct davinci_mdio_data *data = dev_get_drvdata(dev);
+ u32 ctrl;
+
+ spin_lock(&data->lock);
+ if (data->clk)
+ clk_enable(data->clk);
+
+ /* restart the scan state machine */
+ ctrl = __raw_readl(&data->regs->control);
+ ctrl |= CONTROL_ENABLE;
+ __raw_writel(ctrl, &data->regs->control);
+
+ data->suspended = false;
+ spin_unlock(&data->lock);
+
+ return 0;
+}
+
+static const struct dev_pm_ops davinci_mdio_pm_ops = {
+ .suspend = davinci_mdio_suspend,
+ .resume = davinci_mdio_resume,
+};
+
+static struct platform_driver davinci_mdio_driver = {
+ .driver = {
+ .name = "davinci_mdio",
+ .owner = THIS_MODULE,
+ .pm = &davinci_mdio_pm_ops,
+ },
+ .probe = davinci_mdio_probe,
+ .remove = __devexit_p(davinci_mdio_remove),
+};
+
+static int __init davinci_mdio_init(void)
+{
+ return platform_driver_register(&davinci_mdio_driver);
+}
+device_initcall(davinci_mdio_init);
+
+static void __exit davinci_mdio_exit(void)
+{
+ platform_driver_unregister(&davinci_mdio_driver);
+}
+module_exit(davinci_mdio_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DaVinci MDIO driver");