summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-02-24 12:07:00 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2023-02-24 12:07:00 -0800
commit72bffe7e1eb6cb82b90aa14cd786f3f5ede9e0ae (patch)
tree4af7472d4342f61bba8d86e8d376bbd049ee8126 /drivers
parentab7362d04d7c14923420c1e19e889da512a65cd7 (diff)
parent1243741f6b02b5f2c06bca910f894c333838f994 (diff)
Merge tag 'usb-6.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb
Pull USB / Thunderbolt driver updates from Greg KH: "Here is the big set of USB and Thunderbolt driver changes for 6.3-rc1. Nothing major in here, just lots of good development, including: - Thunderbolt additions for new device support and features - xhci driver updates and cleanups - USB gadget media driver updates (includes media core changes that were acked by the v4l2 maintainers) - lots of other USB gadget driver updates for new features - dwc3 driver updates and fixes - minor debugfs leak fixes - typec driver updates and additions - dt-bindings conversions to yaml - other small bugfixes and driver updates All have been in linux-next for a while with no reported issues" * tag 'usb-6.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb: (237 commits) usb: dwc3: xilinx: Remove unused of_gpio,h usb: typec: pd: Add higher capability sysfs for sink PDO usb: typec: pd: Remove usb_suspend_supported sysfs from sink PDO usb: dwc3: pci: add support for the Intel Meteor Lake-M usb: gadget: u_ether: Don't warn in gether_setup_name_default() usb: gadget: u_ether: Convert prints to device prints usb: gadget: u_serial: Add null pointer check in gserial_resume usb: gadget: uvc: fix missing mutex_unlock() if kstrtou8() fails xhci: host: potential NULL dereference in xhci_generic_plat_probe() dt-bindings: usb: amlogic,meson-g12a-usb-ctrl: make G12A usb3-phy0 optional usb: host: fsl-mph-dr-of: reuse device_set_of_node_from_dev of: device: Do not ignore error code in of_device_uevent_modalias of: device: Ignore modalias of reused nodes usb: gadget: configfs: Fix set but not used variable warning usb: gadget: uvc: Use custom strings if available usb: gadget: uvc: Allow linking function to string descs usb: gadget: uvc: Pick up custom string descriptor IDs usb: gadget: uvc: Allow linking XUs to string descriptors usb: gadget: configfs: Attach arbitrary strings to cdev usb: gadget: configfs: Support arbitrary string descriptors ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/clk/renesas/r9a06g032-clocks.c28
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c15
-rw-r--r--drivers/i2c/busses/i2c-nvidia-gpu.c4
-rw-r--r--drivers/media/common/Kconfig3
-rw-r--r--drivers/media/common/Makefile1
-rw-r--r--drivers/media/common/uvc.c183
-rw-r--r--drivers/media/usb/uvc/Kconfig1
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c342
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c185
-rw-r--r--drivers/media/usb/uvc/uvc_entity.c2
-rw-r--r--drivers/media/usb/uvc/uvc_status.c125
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c111
-rw-r--r--drivers/media/usb/uvc/uvc_video.c58
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h39
-rw-r--r--drivers/of/device.c6
-rw-r--r--drivers/phy/tegra/Makefile1
-rw-r--r--drivers/phy/tegra/xusb-tegra186.c64
-rw-r--r--drivers/phy/tegra/xusb.c6
-rw-r--r--drivers/phy/tegra/xusb.h23
-rw-r--r--drivers/thunderbolt/acpi.c13
-rw-r--r--drivers/thunderbolt/ctl.c52
-rw-r--r--drivers/thunderbolt/ctl.h2
-rw-r--r--drivers/thunderbolt/debugfs.c5
-rw-r--r--drivers/thunderbolt/switch.c42
-rw-r--r--drivers/thunderbolt/tb.c508
-rw-r--r--drivers/thunderbolt/tb.h39
-rw-r--r--drivers/thunderbolt/tb_msgs.h11
-rw-r--r--drivers/thunderbolt/tb_regs.h36
-rw-r--r--drivers/thunderbolt/tunnel.c506
-rw-r--r--drivers/thunderbolt/tunnel.h18
-rw-r--r--drivers/thunderbolt/usb4.c572
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.c2
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.h4
-rw-r--r--drivers/usb/cdns3/cdnsp-ring.c110
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c10
-rw-r--r--drivers/usb/chipidea/debug.c2
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c6
-rw-r--r--drivers/usb/common/ulpi.c14
-rw-r--r--drivers/usb/core/hub.c5
-rw-r--r--drivers/usb/core/sysfs.c5
-rw-r--r--drivers/usb/core/usb.c2
-rw-r--r--drivers/usb/dwc3/core.h2
-rw-r--r--drivers/usb/dwc3/debug.h3
-rw-r--r--drivers/usb/dwc3/debugfs.c19
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/dwc3/dwc3-xilinx.c1
-rw-r--r--drivers/usb/dwc3/gadget.c4
-rw-r--r--drivers/usb/early/xhci-dbc.c8
-rw-r--r--drivers/usb/fotg210/Kconfig2
-rw-r--r--drivers/usb/fotg210/fotg210-core.c83
-rw-r--r--drivers/usb/fotg210/fotg210-hcd.c69
-rw-r--r--drivers/usb/fotg210/fotg210-hcd.h1
-rw-r--r--drivers/usb/fotg210/fotg210-udc.c161
-rw-r--r--drivers/usb/fotg210/fotg210-udc.h4
-rw-r--r--drivers/usb/fotg210/fotg210.h27
-rw-r--r--drivers/usb/gadget/Kconfig1
-rw-r--r--drivers/usb/gadget/composite.c102
-rw-r--r--drivers/usb/gadget/configfs.c500
-rw-r--r--drivers/usb/gadget/function/f_fs.c8
-rw-r--r--drivers/usb/gadget/function/f_uvc.c150
-rw-r--r--drivers/usb/gadget/function/u_ether.c38
-rw-r--r--drivers/usb/gadget/function/u_serial.c23
-rw-r--r--drivers/usb/gadget/function/u_uvc.h18
-rw-r--r--drivers/usb/gadget/function/uvc.h4
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c1106
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.h52
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c16
-rw-r--r--drivers/usb/gadget/legacy/hid.c7
-rw-r--r--drivers/usb/gadget/udc/Kconfig22
-rw-r--r--drivers/usb/gadget/udc/Makefile2
-rw-r--r--drivers/usb/gadget/udc/bcm63xx_udc.c13
-rw-r--r--drivers/usb/gadget/udc/fusb300_udc.c10
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c2
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c2
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.c2
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.c2
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c136
-rw-r--r--drivers/usb/gadget/udc/renesas_usbf.c3406
-rw-r--r--drivers/usb/gadget/udc/rzv2m_usb3drd.c139
-rw-r--r--drivers/usb/gadget/udc/tegra-xudc.c50
-rw-r--r--drivers/usb/host/Kconfig22
-rw-r--r--drivers/usb/host/Makefile8
-rw-r--r--drivers/usb/host/ehci-fsl.c2
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c3
-rw-r--r--drivers/usb/host/isp116x-hcd.c2
-rw-r--r--drivers/usb/host/isp1362-hcd.c2
-rw-r--r--drivers/usb/host/max3421-hcd.c15
-rw-r--r--drivers/usb/host/sl811-hcd.c2
-rw-r--r--drivers/usb/host/uhci-hcd.c6
-rw-r--r--drivers/usb/host/xhci-debugfs.c2
-rw-r--r--drivers/usb/host/xhci-hub.c257
-rw-r--r--drivers/usb/host/xhci-mem.c338
-rw-r--r--drivers/usb/host/xhci-mvebu.c2
-rw-r--r--drivers/usb/host/xhci-plat.c144
-rw-r--r--drivers/usb/host/xhci-plat.h7
-rw-r--r--drivers/usb/host/xhci-rcar.c102
-rw-r--r--drivers/usb/host/xhci-rcar.h55
-rw-r--r--drivers/usb/host/xhci-ring.c88
-rw-r--r--drivers/usb/host/xhci-rzv2m.c38
-rw-r--r--drivers/usb/host/xhci-rzv2m.h16
-rw-r--r--drivers/usb/host/xhci-tegra.c392
-rw-r--r--drivers/usb/host/xhci.c81
-rw-r--r--drivers/usb/host/xhci.h40
-rw-r--r--drivers/usb/misc/onboard_usb_hub.c4
-rw-r--r--drivers/usb/misc/onboard_usb_hub.h11
-rw-r--r--drivers/usb/mtu3/mtu3_gadget.c3
-rw-r--r--drivers/usb/mtu3/mtu3_hw_regs.h1
-rw-r--r--drivers/usb/mtu3/mtu3_qmu.c7
-rw-r--r--drivers/usb/musb/da8xx.c4
-rw-r--r--drivers/usb/musb/mediatek.c3
-rw-r--r--drivers/usb/musb/sunxi.c99
-rw-r--r--drivers/usb/serial/option.c4
-rw-r--r--drivers/usb/storage/ene_ub6250.c2
-rw-r--r--drivers/usb/typec/altmodes/displayport.c12
-rw-r--r--drivers/usb/typec/bus.c33
-rw-r--r--drivers/usb/typec/bus.h2
-rw-r--r--drivers/usb/typec/class.c15
-rw-r--r--drivers/usb/typec/hd3ss3220.c29
-rw-r--r--drivers/usb/typec/mux/Kconfig6
-rw-r--r--drivers/usb/typec/mux/Makefile1
-rw-r--r--drivers/usb/typec/mux/gpio-sbu-mux.c172
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c13
-rw-r--r--drivers/usb/typec/pd.c9
-rw-r--r--drivers/usb/typec/retimer.h2
-rw-r--r--drivers/usb/typec/tcpm/Makefile1
-rw-r--r--drivers/usb/typec/tcpm/maxim_contaminant.c387
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c19
-rw-r--r--drivers/usb/typec/tcpm/tcpci_maxim.h89
-rw-r--r--drivers/usb/typec/tcpm/tcpci_maxim_core.c (renamed from drivers/usb/typec/tcpm/tcpci_maxim.c)53
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c60
-rw-r--r--drivers/usb/typec/tipd/core.c38
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c164
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h8
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c22
134 files changed, 10555 insertions, 1702 deletions
diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c
index 983faa5707b9..087146f2ee06 100644
--- a/drivers/clk/renesas/r9a06g032-clocks.c
+++ b/drivers/clk/renesas/r9a06g032-clocks.c
@@ -25,6 +25,8 @@
#include <linux/spinlock.h>
#include <dt-bindings/clock/r9a06g032-sysctrl.h>
+#define R9A06G032_SYSCTRL_USB 0x00
+#define R9A06G032_SYSCTRL_USB_H2MODE (1<<1)
#define R9A06G032_SYSCTRL_DMAMUX 0xA0
struct r9a06g032_gate {
@@ -918,6 +920,29 @@ static void r9a06g032_clocks_del_clk_provider(void *data)
of_clk_del_provider(data);
}
+static void __init r9a06g032_init_h2mode(struct r9a06g032_priv *clocks)
+{
+ struct device_node *usbf_np = NULL;
+ u32 usb;
+
+ while ((usbf_np = of_find_compatible_node(usbf_np, NULL,
+ "renesas,rzn1-usbf"))) {
+ if (of_device_is_available(usbf_np))
+ break;
+ }
+
+ usb = readl(clocks->reg + R9A06G032_SYSCTRL_USB);
+ if (usbf_np) {
+ /* 1 host and 1 device mode */
+ usb &= ~R9A06G032_SYSCTRL_USB_H2MODE;
+ of_node_put(usbf_np);
+ } else {
+ /* 2 hosts mode */
+ usb |= R9A06G032_SYSCTRL_USB_H2MODE;
+ }
+ writel(usb, clocks->reg + R9A06G032_SYSCTRL_USB);
+}
+
static int __init r9a06g032_clocks_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -947,6 +972,9 @@ static int __init r9a06g032_clocks_probe(struct platform_device *pdev)
clocks->reg = of_iomap(np, 0);
if (WARN_ON(!clocks->reg))
return -ENOMEM;
+
+ r9a06g032_init_h2mode(clocks);
+
for (i = 0; i < ARRAY_SIZE(r9a06g032_clocks); ++i) {
const struct r9a06g032_clkdesc *d = &r9a06g032_clocks[i];
const char *parent_name = d->source ?
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 3c573d41d404..1bbe6708d0a7 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -4150,17 +4150,6 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
spin_unlock_irqrestore(&sched_engine->lock, flags);
}
-static unsigned long list_count(struct list_head *list)
-{
- struct list_head *pos;
- unsigned long count = 0;
-
- list_for_each(pos, list)
- count++;
-
- return count;
-}
-
void intel_execlists_dump_active_requests(struct intel_engine_cs *engine,
struct i915_request *hung_rq,
struct drm_printer *m)
@@ -4171,8 +4160,8 @@ void intel_execlists_dump_active_requests(struct intel_engine_cs *engine,
intel_engine_dump_active_requests(&engine->sched_engine->requests, hung_rq, m);
- drm_printf(m, "\tOn hold?: %lu\n",
- list_count(&engine->sched_engine->hold));
+ drm_printf(m, "\tOn hold?: %zu\n",
+ list_count_nodes(&engine->sched_engine->hold));
spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
}
diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c
index 12e330cd7635..a8b99e7f6262 100644
--- a/drivers/i2c/busses/i2c-nvidia-gpu.c
+++ b/drivers/i2c/busses/i2c-nvidia-gpu.c
@@ -259,8 +259,8 @@ static const struct pci_device_id gpu_i2c_ids[] = {
MODULE_DEVICE_TABLE(pci, gpu_i2c_ids);
static const struct property_entry ccgx_props[] = {
- /* Use FW built for NVIDIA (nv) only */
- PROPERTY_ENTRY_U16("ccgx,firmware-build", ('n' << 8) | 'v'),
+ /* Use FW built for NVIDIA GPU only */
+ PROPERTY_ENTRY_STRING("firmware-name", "nvidia,gpu"),
{ }
};
diff --git a/drivers/media/common/Kconfig b/drivers/media/common/Kconfig
index 852b7d92fbdd..b1bc58da27fc 100644
--- a/drivers/media/common/Kconfig
+++ b/drivers/media/common/Kconfig
@@ -14,6 +14,9 @@ config TTPCI_EEPROM
tristate
depends on I2C
+config UVC_COMMON
+ tristate
+
config VIDEO_CX2341X
tristate
diff --git a/drivers/media/common/Makefile b/drivers/media/common/Makefile
index d78a0df15478..3f17d696feb2 100644
--- a/drivers/media/common/Makefile
+++ b/drivers/media/common/Makefile
@@ -5,5 +5,6 @@ obj-y += b2c2/ siano/ v4l2-tpg/ videobuf2/
# (e. g. LC_ALL=C sort Makefile)
obj-$(CONFIG_CYPRESS_FIRMWARE) += cypress_firmware.o
obj-$(CONFIG_TTPCI_EEPROM) += ttpci-eeprom.o
+obj-$(CONFIG_UVC_COMMON) += uvc.o
obj-$(CONFIG_VIDEO_CX2341X) += cx2341x.o
obj-$(CONFIG_VIDEO_TVEEPROM) += tveeprom.o
diff --git a/drivers/media/common/uvc.c b/drivers/media/common/uvc.c
new file mode 100644
index 000000000000..9c0ba7a6c185
--- /dev/null
+++ b/drivers/media/common/uvc.c
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/usb/uvc.h>
+#include <linux/videodev2.h>
+
+/* ------------------------------------------------------------------------
+ * Video formats
+ */
+
+static const struct uvc_format_desc uvc_fmts[] = {
+ {
+ .guid = UVC_GUID_FORMAT_YUY2,
+ .fcc = V4L2_PIX_FMT_YUYV,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_YUY2_ISIGHT,
+ .fcc = V4L2_PIX_FMT_YUYV,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_NV12,
+ .fcc = V4L2_PIX_FMT_NV12,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_MJPEG,
+ .fcc = V4L2_PIX_FMT_MJPEG,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_YV12,
+ .fcc = V4L2_PIX_FMT_YVU420,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_I420,
+ .fcc = V4L2_PIX_FMT_YUV420,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_M420,
+ .fcc = V4L2_PIX_FMT_M420,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_UYVY,
+ .fcc = V4L2_PIX_FMT_UYVY,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_Y800,
+ .fcc = V4L2_PIX_FMT_GREY,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_Y8,
+ .fcc = V4L2_PIX_FMT_GREY,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_D3DFMT_L8,
+ .fcc = V4L2_PIX_FMT_GREY,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_KSMEDIA_L8_IR,
+ .fcc = V4L2_PIX_FMT_GREY,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_Y10,
+ .fcc = V4L2_PIX_FMT_Y10,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_Y12,
+ .fcc = V4L2_PIX_FMT_Y12,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_Y16,
+ .fcc = V4L2_PIX_FMT_Y16,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_BY8,
+ .fcc = V4L2_PIX_FMT_SBGGR8,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_BA81,
+ .fcc = V4L2_PIX_FMT_SBGGR8,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_GBRG,
+ .fcc = V4L2_PIX_FMT_SGBRG8,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_GRBG,
+ .fcc = V4L2_PIX_FMT_SGRBG8,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_RGGB,
+ .fcc = V4L2_PIX_FMT_SRGGB8,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_RGBP,
+ .fcc = V4L2_PIX_FMT_RGB565,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_BGR3,
+ .fcc = V4L2_PIX_FMT_BGR24,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_BGR4,
+ .fcc = V4L2_PIX_FMT_XBGR32,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_H264,
+ .fcc = V4L2_PIX_FMT_H264,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_H265,
+ .fcc = V4L2_PIX_FMT_HEVC,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_Y8I,
+ .fcc = V4L2_PIX_FMT_Y8I,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_Y12I,
+ .fcc = V4L2_PIX_FMT_Y12I,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_Z16,
+ .fcc = V4L2_PIX_FMT_Z16,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_RW10,
+ .fcc = V4L2_PIX_FMT_SRGGB10P,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_BG16,
+ .fcc = V4L2_PIX_FMT_SBGGR16,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_GB16,
+ .fcc = V4L2_PIX_FMT_SGBRG16,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_RG16,
+ .fcc = V4L2_PIX_FMT_SRGGB16,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_GR16,
+ .fcc = V4L2_PIX_FMT_SGRBG16,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_INVZ,
+ .fcc = V4L2_PIX_FMT_Z16,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_INVI,
+ .fcc = V4L2_PIX_FMT_Y10,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_INZI,
+ .fcc = V4L2_PIX_FMT_INZI,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_CNF4,
+ .fcc = V4L2_PIX_FMT_CNF4,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_HEVC,
+ .fcc = V4L2_PIX_FMT_HEVC,
+ },
+};
+
+const struct uvc_format_desc *uvc_format_by_guid(const u8 guid[16])
+{
+ unsigned int len = ARRAY_SIZE(uvc_fmts);
+ unsigned int i;
+
+ for (i = 0; i < len; ++i) {
+ if (memcmp(guid, uvc_fmts[i].guid, 16) == 0)
+ return &uvc_fmts[i];
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(uvc_format_by_guid);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/uvc/Kconfig b/drivers/media/usb/uvc/Kconfig
index ca51ee8e45f3..579532272fd6 100644
--- a/drivers/media/usb/uvc/Kconfig
+++ b/drivers/media/usb/uvc/Kconfig
@@ -3,6 +3,7 @@ config USB_VIDEO_CLASS
tristate "USB Video Class (UVC)"
depends on VIDEO_DEV
select VIDEOBUF2_VMALLOC
+ select UVC_COMMON
help
Support for the USB Video Class (UVC). Currently only video
input devices, such as webcams, are supported.
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index c95a2229f4fa..5e9d3da862dd 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -6,19 +6,21 @@
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*/
+#include <asm/barrier.h>
+#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
+#include <linux/usb/uvc.h>
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <linux/atomic.h>
#include <media/v4l2-ctrls.h>
-#include <media/v4l2-uvc.h>
#include "uvcvideo.h"
@@ -363,19 +365,45 @@ static const u32 uvc_control_classes[] = {
V4L2_CID_USER_CLASS,
};
-static const struct uvc_menu_info power_line_frequency_controls[] = {
- { 0, "Disabled" },
- { 1, "50 Hz" },
- { 2, "60 Hz" },
- { 3, "Auto" },
-};
+static const int exposure_auto_mapping[] = { 2, 1, 4, 8 };
-static const struct uvc_menu_info exposure_auto_controls[] = {
- { 2, "Auto Mode" },
- { 1, "Manual Mode" },
- { 4, "Shutter Priority Mode" },
- { 8, "Aperture Priority Mode" },
-};
+/*
+ * This function translates the V4L2 menu index @idx, as exposed to userspace as
+ * the V4L2 control value, to the corresponding UVC control value used by the
+ * device. The custom menu_mapping in the control @mapping is used when
+ * available, otherwise the function assumes that the V4L2 and UVC values are
+ * identical.
+ *
+ * For controls of type UVC_CTRL_DATA_TYPE_BITMASK, the UVC control value is
+ * expressed as a bitmask and is thus guaranteed to have a single bit set.
+ *
+ * The function returns -EINVAL if the V4L2 menu index @idx isn't valid for the
+ * control, which includes all controls whose type isn't UVC_CTRL_DATA_TYPE_ENUM
+ * or UVC_CTRL_DATA_TYPE_BITMASK.
+ */
+static int uvc_mapping_get_menu_value(const struct uvc_control_mapping *mapping,
+ u32 idx)
+{
+ if (!test_bit(idx, &mapping->menu_mask))
+ return -EINVAL;
+
+ if (mapping->menu_mapping)
+ return mapping->menu_mapping[idx];
+
+ return idx;
+}
+
+static const char *
+uvc_mapping_get_menu_name(const struct uvc_control_mapping *mapping, u32 idx)
+{
+ if (!test_bit(idx, &mapping->menu_mask))
+ return NULL;
+
+ if (mapping->menu_names)
+ return mapping->menu_names[idx];
+
+ return v4l2_ctrl_get_menu(mapping->id)[idx];
+}
static s32 uvc_ctrl_get_zoom(struct uvc_control_mapping *mapping,
u8 query, const u8 *data)
@@ -524,8 +552,9 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_MENU,
.data_type = UVC_CTRL_DATA_TYPE_BITMASK,
- .menu_info = exposure_auto_controls,
- .menu_count = ARRAY_SIZE(exposure_auto_controls),
+ .menu_mapping = exposure_auto_mapping,
+ .menu_mask = GENMASK(V4L2_EXPOSURE_APERTURE_PRIORITY,
+ V4L2_EXPOSURE_AUTO),
.slave_ids = { V4L2_CID_EXPOSURE_ABSOLUTE, },
},
{
@@ -721,32 +750,50 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
};
-static const struct uvc_control_mapping uvc_ctrl_mappings_uvc11[] = {
- {
- .id = V4L2_CID_POWER_LINE_FREQUENCY,
- .entity = UVC_GUID_UVC_PROCESSING,
- .selector = UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
- .size = 2,
- .offset = 0,
- .v4l2_type = V4L2_CTRL_TYPE_MENU,
- .data_type = UVC_CTRL_DATA_TYPE_ENUM,
- .menu_info = power_line_frequency_controls,
- .menu_count = ARRAY_SIZE(power_line_frequency_controls) - 1,
- },
+const struct uvc_control_mapping uvc_ctrl_power_line_mapping_limited = {
+ .id = V4L2_CID_POWER_LINE_FREQUENCY,
+ .entity = UVC_GUID_UVC_PROCESSING,
+ .selector = UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
+ .size = 2,
+ .offset = 0,
+ .v4l2_type = V4L2_CTRL_TYPE_MENU,
+ .data_type = UVC_CTRL_DATA_TYPE_ENUM,
+ .menu_mask = GENMASK(V4L2_CID_POWER_LINE_FREQUENCY_60HZ,
+ V4L2_CID_POWER_LINE_FREQUENCY_50HZ),
};
-static const struct uvc_control_mapping uvc_ctrl_mappings_uvc15[] = {
- {
- .id = V4L2_CID_POWER_LINE_FREQUENCY,
- .entity = UVC_GUID_UVC_PROCESSING,
- .selector = UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
- .size = 2,
- .offset = 0,
- .v4l2_type = V4L2_CTRL_TYPE_MENU,
- .data_type = UVC_CTRL_DATA_TYPE_ENUM,
- .menu_info = power_line_frequency_controls,
- .menu_count = ARRAY_SIZE(power_line_frequency_controls),
- },
+const struct uvc_control_mapping uvc_ctrl_power_line_mapping_uvc11 = {
+ .id = V4L2_CID_POWER_LINE_FREQUENCY,
+ .entity = UVC_GUID_UVC_PROCESSING,
+ .selector = UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
+ .size = 2,
+ .offset = 0,
+ .v4l2_type = V4L2_CTRL_TYPE_MENU,
+ .data_type = UVC_CTRL_DATA_TYPE_ENUM,
+ .menu_mask = GENMASK(V4L2_CID_POWER_LINE_FREQUENCY_60HZ,
+ V4L2_CID_POWER_LINE_FREQUENCY_DISABLED),
+};
+
+static const struct uvc_control_mapping *uvc_ctrl_mappings_uvc11[] = {
+ &uvc_ctrl_power_line_mapping_uvc11,
+ NULL, /* Sentinel */
+};
+
+static const struct uvc_control_mapping uvc_ctrl_power_line_mapping_uvc15 = {
+ .id = V4L2_CID_POWER_LINE_FREQUENCY,
+ .entity = UVC_GUID_UVC_PROCESSING,
+ .selector = UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
+ .size = 2,
+ .offset = 0,
+ .v4l2_type = V4L2_CTRL_TYPE_MENU,
+ .data_type = UVC_CTRL_DATA_TYPE_ENUM,
+ .menu_mask = GENMASK(V4L2_CID_POWER_LINE_FREQUENCY_AUTO,
+ V4L2_CID_POWER_LINE_FREQUENCY_DISABLED),
+};
+
+static const struct uvc_control_mapping *uvc_ctrl_mappings_uvc15[] = {
+ &uvc_ctrl_power_line_mapping_uvc15,
+ NULL, /* Sentinel */
};
/* ------------------------------------------------------------------------
@@ -972,11 +1019,17 @@ static s32 __uvc_ctrl_get_value(struct uvc_control_mapping *mapping,
s32 value = mapping->get(mapping, UVC_GET_CUR, data);
if (mapping->v4l2_type == V4L2_CTRL_TYPE_MENU) {
- const struct uvc_menu_info *menu = mapping->menu_info;
unsigned int i;
- for (i = 0; i < mapping->menu_count; ++i, ++menu) {
- if (menu->value == value) {
+ for (i = 0; BIT(i) <= mapping->menu_mask; ++i) {
+ u32 menu_value;
+
+ if (!test_bit(i, &mapping->menu_mask))
+ continue;
+
+ menu_value = uvc_mapping_get_menu_value(mapping, i);
+
+ if (menu_value == value) {
value = i;
break;
}
@@ -1085,11 +1138,28 @@ static int uvc_query_v4l2_class(struct uvc_video_chain *chain, u32 req_id,
return 0;
}
+/*
+ * Check if control @v4l2_id can be accessed by the given control @ioctl
+ * (VIDIOC_G_EXT_CTRLS, VIDIOC_TRY_EXT_CTRLS or VIDIOC_S_EXT_CTRLS).
+ *
+ * For set operations on slave controls, check if the master's value is set to
+ * manual, either in the others controls set in the same ioctl call, or from
+ * the master's current value. This catches VIDIOC_S_EXT_CTRLS calls that set
+ * both the master and slave control, such as for instance setting
+ * auto_exposure=1, exposure_time_absolute=251.
+ */
int uvc_ctrl_is_accessible(struct uvc_video_chain *chain, u32 v4l2_id,
- bool read)
+ const struct v4l2_ext_controls *ctrls,
+ unsigned long ioctl)
{
+ struct uvc_control_mapping *master_map = NULL;
+ struct uvc_control *master_ctrl = NULL;
struct uvc_control_mapping *mapping;
struct uvc_control *ctrl;
+ bool read = ioctl == VIDIOC_G_EXT_CTRLS;
+ s32 val;
+ int ret;
+ int i;
if (__uvc_query_v4l2_class(chain, v4l2_id, 0) >= 0)
return -EACCES;
@@ -1104,6 +1174,29 @@ int uvc_ctrl_is_accessible(struct uvc_video_chain *chain, u32 v4l2_id,
if (!(ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR) && !read)
return -EACCES;
+ if (ioctl != VIDIOC_S_EXT_CTRLS || !mapping->master_id)
+ return 0;
+
+ /*
+ * Iterate backwards in cases where the master control is accessed
+ * multiple times in the same ioctl. We want the last value.
+ */
+ for (i = ctrls->count - 1; i >= 0; i--) {
+ if (ctrls->controls[i].id == mapping->master_id)
+ return ctrls->controls[i].value ==
+ mapping->master_manual ? 0 : -EACCES;
+ }
+
+ __uvc_find_control(ctrl->entity, mapping->master_id, &master_map,
+ &master_ctrl, 0);
+
+ if (!master_ctrl || !(master_ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR))
+ return 0;
+
+ ret = __uvc_ctrl_get(chain, master_ctrl, master_map, &val);
+ if (ret >= 0 && val != mapping->master_manual)
+ return -EACCES;
+
return 0;
}
@@ -1121,6 +1214,25 @@ static const char *uvc_map_get_name(const struct uvc_control_mapping *map)
return "Unknown Control";
}
+static u32 uvc_get_ctrl_bitmap(struct uvc_control *ctrl,
+ struct uvc_control_mapping *mapping)
+{
+ /*
+ * Some controls, like CT_AE_MODE_CONTROL, use GET_RES to represent
+ * the number of bits supported. Those controls do not list GET_MAX
+ * as supported.
+ */
+ if (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES)
+ return mapping->get(mapping, UVC_GET_RES,
+ uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES));
+
+ if (ctrl->info.flags & UVC_CTRL_FLAG_GET_MAX)
+ return mapping->get(mapping, UVC_GET_MAX,
+ uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MAX));
+
+ return ~0;
+}
+
static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
struct uvc_control *ctrl,
struct uvc_control_mapping *mapping,
@@ -1128,7 +1240,6 @@ static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
{
struct uvc_control_mapping *master_map = NULL;
struct uvc_control *master_ctrl = NULL;
- const struct uvc_menu_info *menu;
unsigned int i;
memset(v4l2_ctrl, 0, sizeof(*v4l2_ctrl));
@@ -1169,13 +1280,19 @@ static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
switch (mapping->v4l2_type) {
case V4L2_CTRL_TYPE_MENU:
- v4l2_ctrl->minimum = 0;
- v4l2_ctrl->maximum = mapping->menu_count - 1;
+ v4l2_ctrl->minimum = ffs(mapping->menu_mask) - 1;
+ v4l2_ctrl->maximum = fls(mapping->menu_mask) - 1;
v4l2_ctrl->step = 1;
- menu = mapping->menu_info;
- for (i = 0; i < mapping->menu_count; ++i, ++menu) {
- if (menu->value == v4l2_ctrl->default_value) {
+ for (i = 0; BIT(i) <= mapping->menu_mask; ++i) {
+ u32 menu_value;
+
+ if (!test_bit(i, &mapping->menu_mask))
+ continue;
+
+ menu_value = uvc_mapping_get_menu_value(mapping, i);
+
+ if (menu_value == v4l2_ctrl->default_value) {
v4l2_ctrl->default_value = i;
break;
}
@@ -1195,6 +1312,12 @@ static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
v4l2_ctrl->step = 0;
return 0;
+ case V4L2_CTRL_TYPE_BITMASK:
+ v4l2_ctrl->minimum = 0;
+ v4l2_ctrl->maximum = uvc_get_ctrl_bitmap(ctrl, mapping);
+ v4l2_ctrl->step = 0;
+ return 0;
+
default:
break;
}
@@ -1268,11 +1391,11 @@ done:
int uvc_query_v4l2_menu(struct uvc_video_chain *chain,
struct v4l2_querymenu *query_menu)
{
- const struct uvc_menu_info *menu_info;
struct uvc_control_mapping *mapping;
struct uvc_control *ctrl;
u32 index = query_menu->index;
u32 id = query_menu->id;
+ const char *name;
int ret;
memset(query_menu, 0, sizeof(*query_menu));
@@ -1289,16 +1412,13 @@ int uvc_query_v4l2_menu(struct uvc_video_chain *chain,
goto done;
}
- if (query_menu->index >= mapping->menu_count) {
+ if (!test_bit(query_menu->index, &mapping->menu_mask)) {
ret = -EINVAL;
goto done;
}
- menu_info = &mapping->menu_info[query_menu->index];
-
- if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK &&
- (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES)) {
- s32 bitmap;
+ if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK) {
+ int mask;
if (!ctrl->cached) {
ret = uvc_ctrl_populate_cache(chain, ctrl);
@@ -1306,15 +1426,25 @@ int uvc_query_v4l2_menu(struct uvc_video_chain *chain,
goto done;
}
- bitmap = mapping->get(mapping, UVC_GET_RES,
- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES));
- if (!(bitmap & menu_info->value)) {
+ mask = uvc_mapping_get_menu_value(mapping, query_menu->index);
+ if (mask < 0) {
+ ret = mask;
+ goto done;
+ }
+
+ if (!(uvc_get_ctrl_bitmap(ctrl, mapping) & mask)) {
ret = -EINVAL;
goto done;
}
}
- strscpy(query_menu->name, menu_info->name, sizeof(query_menu->name));
+ name = uvc_mapping_get_menu_name(mapping, query_menu->index);
+ if (!name) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ strscpy(query_menu->name, name, sizeof(query_menu->name));
done:
mutex_unlock(&chain->ctrl_mutex);
@@ -1442,6 +1572,10 @@ static void uvc_ctrl_status_event_work(struct work_struct *work)
uvc_ctrl_status_event(w->chain, w->ctrl, w->data);
+ /* The barrier is needed to synchronize with uvc_status_stop(). */
+ if (smp_load_acquire(&dev->flush_status))
+ return;
+
/* Resubmit the URB. */
w->urb->interval = dev->int_ep->desc.bInterval;
ret = usb_submit_urb(w->urb, GFP_KERNEL);
@@ -1791,31 +1925,44 @@ int uvc_ctrl_set(struct uvc_fh *handle,
value = xctrl->value;
break;
+ case V4L2_CTRL_TYPE_BITMASK:
+ if (!ctrl->cached) {
+ ret = uvc_ctrl_populate_cache(chain, ctrl);
+ if (ret < 0)
+ return ret;
+ }
+
+ xctrl->value &= uvc_get_ctrl_bitmap(ctrl, mapping);
+ value = xctrl->value;
+ break;
+
case V4L2_CTRL_TYPE_BOOLEAN:
xctrl->value = clamp(xctrl->value, 0, 1);
value = xctrl->value;
break;
case V4L2_CTRL_TYPE_MENU:
- if (xctrl->value < 0 || xctrl->value >= mapping->menu_count)
+ if (xctrl->value < (ffs(mapping->menu_mask) - 1) ||
+ xctrl->value > (fls(mapping->menu_mask) - 1))
return -ERANGE;
- value = mapping->menu_info[xctrl->value].value;
+
+ if (!test_bit(xctrl->value, &mapping->menu_mask))
+ return -EINVAL;
+
+ value = uvc_mapping_get_menu_value(mapping, xctrl->value);
/*
* Valid menu indices are reported by the GET_RES request for
* UVC controls that support it.
*/
- if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK &&
- (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES)) {
+ if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK) {
if (!ctrl->cached) {
ret = uvc_ctrl_populate_cache(chain, ctrl);
if (ret < 0)
return ret;
}
- step = mapping->get(mapping, UVC_GET_RES,
- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES));
- if (!(step & value))
+ if (!(uvc_get_ctrl_bitmap(ctrl, mapping) & value))
return -EINVAL;
}
@@ -2218,31 +2365,42 @@ static int __uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
unsigned int i;
/*
- * Most mappings come from static kernel data and need to be duplicated.
+ * Most mappings come from static kernel data, and need to be duplicated.
* Mappings that come from userspace will be unnecessarily duplicated,
* this could be optimized.
*/
map = kmemdup(mapping, sizeof(*mapping), GFP_KERNEL);
- if (map == NULL)
+ if (!map)
return -ENOMEM;
+ map->name = NULL;
+ map->menu_names = NULL;
+ map->menu_mapping = NULL;
+
/* For UVCIOC_CTRL_MAP custom control */
if (mapping->name) {
map->name = kstrdup(mapping->name, GFP_KERNEL);
- if (!map->name) {
- kfree(map);
- return -ENOMEM;
- }
+ if (!map->name)
+ goto err_nomem;
}
INIT_LIST_HEAD(&map->ev_subs);
- size = sizeof(*mapping->menu_info) * mapping->menu_count;
- map->menu_info = kmemdup(mapping->menu_info, size, GFP_KERNEL);
- if (map->menu_info == NULL) {
- kfree(map->name);
- kfree(map);
- return -ENOMEM;
+ if (mapping->menu_mapping && mapping->menu_mask) {
+ size = sizeof(mapping->menu_mapping[0])
+ * fls(mapping->menu_mask);
+ map->menu_mapping = kmemdup(mapping->menu_mapping, size,
+ GFP_KERNEL);
+ if (!map->menu_mapping)
+ goto err_nomem;
+ }
+ if (mapping->menu_names && mapping->menu_mask) {
+ size = sizeof(mapping->menu_names[0])
+ * fls(mapping->menu_mask);
+ map->menu_names = kmemdup(mapping->menu_names, size,
+ GFP_KERNEL);
+ if (!map->menu_names)
+ goto err_nomem;
}
if (map->get == NULL)
@@ -2264,6 +2422,13 @@ static int __uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
ctrl->info.selector);
return 0;
+
+err_nomem:
+ kfree(map->menu_names);
+ kfree(map->menu_mapping);
+ kfree(map->name);
+ kfree(map);
+ return -ENOMEM;
}
int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
@@ -2421,8 +2586,7 @@ static void uvc_ctrl_prune_entity(struct uvc_device *dev,
static void uvc_ctrl_init_ctrl(struct uvc_video_chain *chain,
struct uvc_control *ctrl)
{
- const struct uvc_control_mapping *mappings;
- unsigned int num_mappings;
+ const struct uvc_control_mapping **mappings;
unsigned int i;
/*
@@ -2489,16 +2653,11 @@ static void uvc_ctrl_init_ctrl(struct uvc_video_chain *chain,
}
/* Finally process version-specific mappings. */
- if (chain->dev->uvc_version < 0x0150) {
- mappings = uvc_ctrl_mappings_uvc11;
- num_mappings = ARRAY_SIZE(uvc_ctrl_mappings_uvc11);
- } else {
- mappings = uvc_ctrl_mappings_uvc15;
- num_mappings = ARRAY_SIZE(uvc_ctrl_mappings_uvc15);
- }
+ mappings = chain->dev->uvc_version < 0x0150
+ ? uvc_ctrl_mappings_uvc11 : uvc_ctrl_mappings_uvc15;
- for (i = 0; i < num_mappings; ++i) {
- const struct uvc_control_mapping *mapping = &mappings[i];
+ for (i = 0; mappings[i]; ++i) {
+ const struct uvc_control_mapping *mapping = mappings[i];
if (uvc_entity_match_guid(ctrl->entity, mapping->entity) &&
ctrl->info.selector == mapping->selector)
@@ -2591,7 +2750,8 @@ static void uvc_ctrl_cleanup_mappings(struct uvc_device *dev,
list_for_each_entry_safe(mapping, nm, &ctrl->info.mappings, list) {
list_del(&mapping->list);
- kfree(mapping->menu_info);
+ kfree(mapping->menu_names);
+ kfree(mapping->menu_mapping);
kfree(mapping->name);
kfree(mapping);
}
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index e4bcb5011360..7aefa76a42b3 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -7,12 +7,14 @@
*/
#include <linux/atomic.h>
+#include <linux/bits.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb.h>
+#include <linux/usb/uvc.h>
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
@@ -20,7 +22,6 @@
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
-#include <media/v4l2-uvc.h>
#include "uvcvideo.h"
@@ -224,7 +225,7 @@ static int uvc_parse_format(struct uvc_device *dev,
{
struct usb_interface *intf = streaming->intf;
struct usb_host_interface *alts = intf->cur_altsetting;
- struct uvc_format_desc *fmtdesc;
+ const struct uvc_format_desc *fmtdesc;
struct uvc_frame *frame;
const unsigned char *start = buffer;
unsigned int width_multiplier = 1;
@@ -251,14 +252,10 @@ static int uvc_parse_format(struct uvc_device *dev,
fmtdesc = uvc_format_by_guid(&buffer[5]);
if (fmtdesc != NULL) {
- strscpy(format->name, fmtdesc->name,
- sizeof(format->name));
format->fcc = fmtdesc->fcc;
} else {
dev_info(&streaming->intf->dev,
"Unknown video format %pUl\n", &buffer[5]);
- snprintf(format->name, sizeof(format->name), "%pUl\n",
- &buffer[5]);
format->fcc = 0;
}
@@ -270,8 +267,6 @@ static int uvc_parse_format(struct uvc_device *dev,
*/
if (dev->quirks & UVC_QUIRK_FORCE_Y8) {
if (format->fcc == V4L2_PIX_FMT_YUYV) {
- strscpy(format->name, "Greyscale 8-bit (Y8 )",
- sizeof(format->name));
format->fcc = V4L2_PIX_FMT_GREY;
format->bpp = 8;
width_multiplier = 2;
@@ -312,7 +307,6 @@ static int uvc_parse_format(struct uvc_device *dev,
return -EINVAL;
}
- strscpy(format->name, "MJPEG", sizeof(format->name));
format->fcc = V4L2_PIX_FMT_MJPEG;
format->flags = UVC_FMT_FLAG_COMPRESSED;
format->bpp = 0;
@@ -328,17 +322,7 @@ static int uvc_parse_format(struct uvc_device *dev,
return -EINVAL;
}
- switch (buffer[8] & 0x7f) {
- case 0:
- strscpy(format->name, "SD-DV", sizeof(format->name));
- break;
- case 1:
- strscpy(format->name, "SDL-DV", sizeof(format->name));
- break;
- case 2:
- strscpy(format->name, "HD-DV", sizeof(format->name));
- break;
- default:
+ if ((buffer[8] & 0x7f) > 2) {
uvc_dbg(dev, DESCR,
"device %d videostreaming interface %d: unknown DV format %u\n",
dev->udev->devnum,
@@ -346,9 +330,6 @@ static int uvc_parse_format(struct uvc_device *dev,
return -EINVAL;
}
- strlcat(format->name, buffer[8] & (1 << 7) ? " 60Hz" : " 50Hz",
- sizeof(format->name));
-
format->fcc = V4L2_PIX_FMT_DV;
format->flags = UVC_FMT_FLAG_COMPRESSED | UVC_FMT_FLAG_STREAM;
format->bpp = 0;
@@ -375,7 +356,7 @@ static int uvc_parse_format(struct uvc_device *dev,
return -EINVAL;
}
- uvc_dbg(dev, DESCR, "Found format %s\n", format->name);
+ uvc_dbg(dev, DESCR, "Found format %p4cc", &format->fcc);
buflen -= buffer[0];
buffer += buffer[0];
@@ -732,6 +713,7 @@ static int uvc_parse_streaming(struct uvc_device *dev,
/* Parse the alternate settings to find the maximum bandwidth. */
for (i = 0; i < intf->num_altsetting; ++i) {
struct usb_host_endpoint *ep;
+
alts = &intf->altsetting[i];
ep = uvc_find_endpoint(alts,
streaming->header.bEndpointAddress);
@@ -813,6 +795,27 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u16 id,
return entity;
}
+static void uvc_entity_set_name(struct uvc_device *dev, struct uvc_entity *entity,
+ const char *type_name, u8 string_id)
+{
+ int ret;
+
+ /*
+ * First attempt to read the entity name from the device. If the entity
+ * has no associated string, or if reading the string fails (most
+ * likely due to a buggy firmware), fall back to default names based on
+ * the entity type.
+ */
+ if (string_id) {
+ ret = usb_string(dev->udev, string_id, entity->name,
+ sizeof(entity->name));
+ if (!ret)
+ return;
+ }
+
+ sprintf(entity->name, "%s %u", type_name, entity->id);
+}
+
/* Parse vendor-specific extensions. */
static int uvc_parse_vendor_control(struct uvc_device *dev,
const unsigned char *buffer, int buflen)
@@ -879,11 +882,7 @@ static int uvc_parse_vendor_control(struct uvc_device *dev,
+ n;
memcpy(unit->extension.bmControls, &buffer[23+p], 2*n);
- if (buffer[24+p+2*n] != 0)
- usb_string(udev, buffer[24+p+2*n], unit->name,
- sizeof(unit->name));
- else
- sprintf(unit->name, "Extension %u", buffer[3]);
+ uvc_entity_set_name(dev, unit, "Extension", buffer[24+p+2*n]);
list_add_tail(&unit->list, &dev->entities);
handled = 1;
@@ -901,6 +900,7 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
struct usb_interface *intf;
struct usb_host_interface *alts = dev->intf->cur_altsetting;
unsigned int i, n, p, len;
+ const char *type_name;
u16 type;
switch (buffer[2]) {
@@ -1006,15 +1006,14 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
memcpy(term->media.bmTransportModes, &buffer[10+n], p);
}
- if (buffer[7] != 0)
- usb_string(udev, buffer[7], term->name,
- sizeof(term->name));
- else if (UVC_ENTITY_TYPE(term) == UVC_ITT_CAMERA)
- sprintf(term->name, "Camera %u", buffer[3]);
+ if (UVC_ENTITY_TYPE(term) == UVC_ITT_CAMERA)
+ type_name = "Camera";
else if (UVC_ENTITY_TYPE(term) == UVC_ITT_MEDIA_TRANSPORT_INPUT)
- sprintf(term->name, "Media %u", buffer[3]);
+ type_name = "Media";
else
- sprintf(term->name, "Input %u", buffer[3]);
+ type_name = "Input";
+
+ uvc_entity_set_name(dev, term, type_name, buffer[7]);
list_add_tail(&term->list, &dev->entities);
break;
@@ -1047,11 +1046,7 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
memcpy(term->baSourceID, &buffer[7], 1);
- if (buffer[8] != 0)
- usb_string(udev, buffer[8], term->name,
- sizeof(term->name));
- else
- sprintf(term->name, "Output %u", buffer[3]);
+ uvc_entity_set_name(dev, term, "Output", buffer[8]);
list_add_tail(&term->list, &dev->entities);
break;
@@ -1072,11 +1067,7 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
memcpy(unit->baSourceID, &buffer[5], p);
- if (buffer[5+p] != 0)
- usb_string(udev, buffer[5+p], unit->name,
- sizeof(unit->name));
- else
- sprintf(unit->name, "Selector %u", buffer[3]);
+ uvc_entity_set_name(dev, unit, "Selector", buffer[5+p]);
list_add_tail(&unit->list, &dev->entities);
break;
@@ -1105,11 +1096,7 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
if (dev->uvc_version >= 0x0110)
unit->processing.bmVideoStandards = buffer[9+n];
- if (buffer[8+n] != 0)
- usb_string(udev, buffer[8+n], unit->name,
- sizeof(unit->name));
- else
- sprintf(unit->name, "Processing %u", buffer[3]);
+ uvc_entity_set_name(dev, unit, "Processing", buffer[8+n]);
list_add_tail(&unit->list, &dev->entities);
break;
@@ -1136,11 +1123,7 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
unit->extension.bmControls = (u8 *)unit + sizeof(*unit);
memcpy(unit->extension.bmControls, &buffer[23+p], n);
- if (buffer[23+p+n] != 0)
- usb_string(udev, buffer[23+p+n], unit->name,
- sizeof(unit->name));
- else
- sprintf(unit->name, "Extension %u", buffer[3]);
+ uvc_entity_set_name(dev, unit, "Extension", buffer[23+p+n]);
list_add_tail(&unit->list, &dev->entities);
break;
@@ -1173,7 +1156,8 @@ static int uvc_parse_control(struct uvc_device *dev)
buffer[1] != USB_DT_CS_INTERFACE)
goto next_descriptor;
- if ((ret = uvc_parse_standard_control(dev, buffer, buflen)) < 0)
+ ret = uvc_parse_standard_control(dev, buffer, buflen);
+ if (ret < 0)
return ret;
next_descriptor:
@@ -1856,12 +1840,14 @@ static void uvc_delete(struct kref *kref)
list_for_each_safe(p, n, &dev->chains) {
struct uvc_video_chain *chain;
+
chain = list_entry(p, struct uvc_video_chain, list);
kfree(chain);
}
list_for_each_safe(p, n, &dev->entities) {
struct uvc_entity *entity;
+
entity = list_entry(p, struct uvc_entity, list);
#ifdef CONFIG_MEDIA_CONTROLLER
uvc_mc_cleanup_entity(entity);
@@ -1871,6 +1857,7 @@ static void uvc_delete(struct kref *kref)
list_for_each_safe(p, n, &dev->streams) {
struct uvc_streaming *streaming;
+
streaming = list_entry(p, struct uvc_streaming, list);
usb_driver_release_interface(&uvc_driver.driver,
streaming->intf);
@@ -2206,7 +2193,8 @@ static int uvc_probe(struct usb_interface *intf,
usb_set_intfdata(intf, dev);
/* Initialize the interrupt URB. */
- if ((ret = uvc_status_init(dev)) < 0) {
+ ret = uvc_status_init(dev);
+ if (ret < 0) {
dev_info(&dev->udev->dev,
"Unable to initialize the status endpoint (%d), status interrupt will not be supported.\n",
ret);
@@ -2353,40 +2341,23 @@ static int uvc_clock_param_set(const char *val, const struct kernel_param *kp)
}
module_param_call(clock, uvc_clock_param_set, uvc_clock_param_get,
- &uvc_clock_param, S_IRUGO|S_IWUSR);
+ &uvc_clock_param, 0644);
MODULE_PARM_DESC(clock, "Video buffers timestamp clock");
-module_param_named(hwtimestamps, uvc_hw_timestamps_param, uint, S_IRUGO|S_IWUSR);
+module_param_named(hwtimestamps, uvc_hw_timestamps_param, uint, 0644);
MODULE_PARM_DESC(hwtimestamps, "Use hardware timestamps");
-module_param_named(nodrop, uvc_no_drop_param, uint, S_IRUGO|S_IWUSR);
+module_param_named(nodrop, uvc_no_drop_param, uint, 0644);
MODULE_PARM_DESC(nodrop, "Don't drop incomplete frames");
-module_param_named(quirks, uvc_quirks_param, uint, S_IRUGO|S_IWUSR);
+module_param_named(quirks, uvc_quirks_param, uint, 0644);
MODULE_PARM_DESC(quirks, "Forced device quirks");
-module_param_named(trace, uvc_dbg_param, uint, S_IRUGO|S_IWUSR);
+module_param_named(trace, uvc_dbg_param, uint, 0644);
MODULE_PARM_DESC(trace, "Trace level bitmask");
-module_param_named(timeout, uvc_timeout_param, uint, S_IRUGO|S_IWUSR);
+module_param_named(timeout, uvc_timeout_param, uint, 0644);
MODULE_PARM_DESC(timeout, "Streaming control requests timeout");
/* ------------------------------------------------------------------------
* Driver initialization and cleanup
*/
-static const struct uvc_menu_info power_line_frequency_controls_limited[] = {
- { 1, "50 Hz" },
- { 2, "60 Hz" },
-};
-
-static const struct uvc_control_mapping uvc_ctrl_power_line_mapping_limited = {
- .id = V4L2_CID_POWER_LINE_FREQUENCY,
- .entity = UVC_GUID_UVC_PROCESSING,
- .selector = UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
- .size = 2,
- .offset = 0,
- .v4l2_type = V4L2_CTRL_TYPE_MENU,
- .data_type = UVC_CTRL_DATA_TYPE_ENUM,
- .menu_info = power_line_frequency_controls_limited,
- .menu_count = ARRAY_SIZE(power_line_frequency_controls_limited),
-};
-
static const struct uvc_device_info uvc_ctrl_power_line_limited = {
.mappings = (const struct uvc_control_mapping *[]) {
&uvc_ctrl_power_line_mapping_limited,
@@ -2394,6 +2365,13 @@ static const struct uvc_device_info uvc_ctrl_power_line_limited = {
},
};
+static const struct uvc_device_info uvc_ctrl_power_line_uvc11 = {
+ .mappings = (const struct uvc_control_mapping *[]) {
+ &uvc_ctrl_power_line_mapping_uvc11,
+ NULL, /* Sentinel */
+ },
+};
+
static const struct uvc_device_info uvc_quirk_probe_minmax = {
.quirks = UVC_QUIRK_PROBE_MINMAX,
};
@@ -2496,6 +2474,24 @@ static const struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax },
+ /* Logitech, Webcam C910 */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x046d,
+ .idProduct = 0x0821,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_WAKE_AUTOSUSPEND)},
+ /* Logitech, Webcam B910 */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x046d,
+ .idProduct = 0x0823,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_WAKE_AUTOSUSPEND)},
/* Logitech Quickcam Fusion */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
@@ -2973,6 +2969,15 @@ static const struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = UVC_INFO_QUIRK(UVC_QUIRK_FORCE_BPP) },
+ /* Lenovo Integrated Camera */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x30c9,
+ .idProduct = 0x0093,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = UVC_PC_PROTOCOL_15,
+ .driver_info = (kernel_ulong_t)&uvc_ctrl_power_line_uvc11 },
/* Sonix Technology USB 2.0 Camera */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
@@ -2991,6 +2996,24 @@ static const struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = (kernel_ulong_t)&uvc_ctrl_power_line_limited },
+ /* Acer EasyCamera */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x5986,
+ .idProduct = 0x1180,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = (kernel_ulong_t)&uvc_ctrl_power_line_limited },
+ /* Acer EasyCamera */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x5986,
+ .idProduct = 0x1180,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = (kernel_ulong_t)&uvc_ctrl_power_line_limited },
/* Intel RealSense D4M */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
diff --git a/drivers/media/usb/uvc/uvc_entity.c b/drivers/media/usb/uvc/uvc_entity.c
index 7c4d2f93d351..cc68dd24eb42 100644
--- a/drivers/media/usb/uvc/uvc_entity.c
+++ b/drivers/media/usb/uvc/uvc_entity.c
@@ -37,7 +37,7 @@ static int uvc_mc_create_links(struct uvc_video_chain *chain,
continue;
remote = uvc_entity_by_id(chain->dev, entity->baSourceID[i]);
- if (remote == NULL)
+ if (remote == NULL || remote->num_pads == 0)
return -EINVAL;
source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING)
diff --git a/drivers/media/usb/uvc/uvc_status.c b/drivers/media/usb/uvc/uvc_status.c
index 7518ffce22ed..a78a88c710e2 100644
--- a/drivers/media/usb/uvc/uvc_status.c
+++ b/drivers/media/usb/uvc/uvc_status.c
@@ -6,6 +6,7 @@
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*/
+#include <asm/barrier.h>
#include <linux/kernel.h>
#include <linux/input.h>
#include <linux/slab.h>
@@ -18,11 +19,34 @@
* Input device
*/
#ifdef CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV
+
+static bool uvc_input_has_button(struct uvc_device *dev)
+{
+ struct uvc_streaming *stream;
+
+ /*
+ * The device has button events if both bTriggerSupport and
+ * bTriggerUsage are one. Otherwise the camera button does not
+ * exist or is handled automatically by the camera without host
+ * driver or client application intervention.
+ */
+ list_for_each_entry(stream, &dev->streams, list) {
+ if (stream->header.bTriggerSupport == 1 &&
+ stream->header.bTriggerUsage == 1)
+ return true;
+ }
+
+ return false;
+}
+
static int uvc_input_init(struct uvc_device *dev)
{
struct input_dev *input;
int ret;
+ if (!uvc_input_has_button(dev))
+ return 0;
+
input = input_allocate_device();
if (input == NULL)
return -ENOMEM;
@@ -73,38 +97,23 @@ static void uvc_input_report_key(struct uvc_device *dev, unsigned int code,
/* --------------------------------------------------------------------------
* Status interrupt endpoint
*/
-struct uvc_streaming_status {
- u8 bStatusType;
- u8 bOriginator;
- u8 bEvent;
- u8 bValue[];
-} __packed;
-
-struct uvc_control_status {
- u8 bStatusType;
- u8 bOriginator;
- u8 bEvent;
- u8 bSelector;
- u8 bAttribute;
- u8 bValue[];
-} __packed;
-
static void uvc_event_streaming(struct uvc_device *dev,
- struct uvc_streaming_status *status, int len)
+ struct uvc_status *status, int len)
{
- if (len < 3) {
+ if (len <= offsetof(struct uvc_status, bEvent)) {
uvc_dbg(dev, STATUS,
"Invalid streaming status event received\n");
return;
}
if (status->bEvent == 0) {
- if (len < 4)
+ if (len <= offsetof(struct uvc_status, streaming))
return;
+
uvc_dbg(dev, STATUS, "Button (intf %u) %s len %d\n",
status->bOriginator,
- status->bValue[0] ? "pressed" : "released", len);
- uvc_input_report_key(dev, KEY_CAMERA, status->bValue[0]);
+ status->streaming.button ? "pressed" : "released", len);
+ uvc_input_report_key(dev, KEY_CAMERA, status->streaming.button);
} else {
uvc_dbg(dev, STATUS, "Stream %u error event %02x len %d\n",
status->bOriginator, status->bEvent, len);
@@ -131,7 +140,7 @@ static struct uvc_control *uvc_event_entity_find_ctrl(struct uvc_entity *entity,
}
static struct uvc_control *uvc_event_find_ctrl(struct uvc_device *dev,
- const struct uvc_control_status *status,
+ const struct uvc_status *status,
struct uvc_video_chain **chain)
{
list_for_each_entry((*chain), &dev->chains, list) {
@@ -143,7 +152,7 @@ static struct uvc_control *uvc_event_find_ctrl(struct uvc_device *dev,
continue;
ctrl = uvc_event_entity_find_ctrl(entity,
- status->bSelector);
+ status->control.bSelector);
if (ctrl)
return ctrl;
}
@@ -153,7 +162,7 @@ static struct uvc_control *uvc_event_find_ctrl(struct uvc_device *dev,
}
static bool uvc_event_control(struct urb *urb,
- const struct uvc_control_status *status, int len)
+ const struct uvc_status *status, int len)
{
static const char *attrs[] = { "value", "info", "failure", "min", "max" };
struct uvc_device *dev = urb->context;
@@ -161,24 +170,24 @@ static bool uvc_event_control(struct urb *urb,
struct uvc_control *ctrl;
if (len < 6 || status->bEvent != 0 ||
- status->bAttribute >= ARRAY_SIZE(attrs)) {
+ status->control.bAttribute >= ARRAY_SIZE(attrs)) {
uvc_dbg(dev, STATUS, "Invalid control status event received\n");
return false;
}
uvc_dbg(dev, STATUS, "Control %u/%u %s change len %d\n",
- status->bOriginator, status->bSelector,
- attrs[status->bAttribute], len);
+ status->bOriginator, status->control.bSelector,
+ attrs[status->control.bAttribute], len);
/* Find the control. */
ctrl = uvc_event_find_ctrl(dev, status, &chain);
if (!ctrl)
return false;
- switch (status->bAttribute) {
+ switch (status->control.bAttribute) {
case UVC_CTRL_VALUE_CHANGE:
return uvc_ctrl_status_event_async(urb, chain, ctrl,
- status->bValue);
+ status->control.bValue);
case UVC_CTRL_INFO_CHANGE:
case UVC_CTRL_FAILURE_CHANGE:
@@ -214,28 +223,22 @@ static void uvc_status_complete(struct urb *urb)
len = urb->actual_length;
if (len > 0) {
- switch (dev->status[0] & 0x0f) {
+ switch (dev->status->bStatusType & 0x0f) {
case UVC_STATUS_TYPE_CONTROL: {
- struct uvc_control_status *status =
- (struct uvc_control_status *)dev->status;
-
- if (uvc_event_control(urb, status, len))
+ if (uvc_event_control(urb, dev->status, len))
/* The URB will be resubmitted in work context. */
return;
break;
}
case UVC_STATUS_TYPE_STREAMING: {
- struct uvc_streaming_status *status =
- (struct uvc_streaming_status *)dev->status;
-
- uvc_event_streaming(dev, status, len);
+ uvc_event_streaming(dev, dev->status, len);
break;
}
default:
uvc_dbg(dev, STATUS, "Unknown status event type %u\n",
- dev->status[0]);
+ dev->status->bStatusType);
break;
}
}
@@ -259,12 +262,12 @@ int uvc_status_init(struct uvc_device *dev)
uvc_input_init(dev);
- dev->status = kzalloc(UVC_MAX_STATUS_SIZE, GFP_KERNEL);
- if (dev->status == NULL)
+ dev->status = kzalloc(sizeof(*dev->status), GFP_KERNEL);
+ if (!dev->status)
return -ENOMEM;
dev->int_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (dev->int_urb == NULL) {
+ if (!dev->int_urb) {
kfree(dev->status);
return -ENOMEM;
}
@@ -281,7 +284,7 @@ int uvc_status_init(struct uvc_device *dev)
interval = fls(interval) - 1;
usb_fill_int_urb(dev->int_urb, dev->udev, pipe,
- dev->status, UVC_MAX_STATUS_SIZE, uvc_status_complete,
+ dev->status, sizeof(*dev->status), uvc_status_complete,
dev, interval);
return 0;
@@ -309,5 +312,41 @@ int uvc_status_start(struct uvc_device *dev, gfp_t flags)
void uvc_status_stop(struct uvc_device *dev)
{
+ struct uvc_ctrl_work *w = &dev->async_ctrl;
+
+ /*
+ * Prevent the asynchronous control handler from requeing the URB. The
+ * barrier is needed so the flush_status change is visible to other
+ * CPUs running the asynchronous handler before usb_kill_urb() is
+ * called below.
+ */
+ smp_store_release(&dev->flush_status, true);
+
+ /*
+ * Cancel any pending asynchronous work. If any status event was queued,
+ * process it synchronously.
+ */
+ if (cancel_work_sync(&w->work))
+ uvc_ctrl_status_event(w->chain, w->ctrl, w->data);
+
+ /* Kill the urb. */
usb_kill_urb(dev->int_urb);
+
+ /*
+ * The URB completion handler may have queued asynchronous work. This
+ * won't resubmit the URB as flush_status is set, but it needs to be
+ * cancelled before returning or it could then race with a future
+ * uvc_status_start() call.
+ */
+ if (cancel_work_sync(&w->work))
+ uvc_ctrl_status_event(w->chain, w->ctrl, w->data);
+
+ /*
+ * From this point, there are no events on the queue and the status URB
+ * is dead. No events will be queued until uvc_status_start() is called.
+ * The barrier is needed to make sure that flush_status is visible to
+ * uvc_ctrl_status_event_work() when uvc_status_start() will be called
+ * again.
+ */
+ smp_store_release(&dev->flush_status, false);
}
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index f4d4c33b6dfb..35453f81c1d9 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -6,6 +6,7 @@
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*/
+#include <linux/bits.h>
#include <linux/compat.h>
#include <linux/kernel.h>
#include <linux/list.h>
@@ -25,14 +26,84 @@
#include "uvcvideo.h"
+static int uvc_control_add_xu_mapping(struct uvc_video_chain *chain,
+ struct uvc_control_mapping *map,
+ const struct uvc_xu_control_mapping *xmap)
+{
+ unsigned int i;
+ size_t size;
+ int ret;
+
+ /*
+ * Prevent excessive memory consumption, as well as integer
+ * overflows.
+ */
+ if (xmap->menu_count == 0 ||
+ xmap->menu_count > UVC_MAX_CONTROL_MENU_ENTRIES)
+ return -EINVAL;
+
+ map->menu_names = NULL;
+ map->menu_mapping = NULL;
+
+ map->menu_mask = BIT_MASK(xmap->menu_count);
+
+ size = xmap->menu_count * sizeof(*map->menu_mapping);
+ map->menu_mapping = kzalloc(size, GFP_KERNEL);
+ if (!map->menu_mapping) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ for (i = 0; i < xmap->menu_count ; i++) {
+ if (copy_from_user((u32 *)&map->menu_mapping[i],
+ &xmap->menu_info[i].value,
+ sizeof(map->menu_mapping[i]))) {
+ ret = -EACCES;
+ goto done;
+ }
+ }
+
+ /*
+ * Always use the standard naming if available, otherwise copy the
+ * names supplied by userspace.
+ */
+ if (!v4l2_ctrl_get_menu(map->id)) {
+ size = xmap->menu_count * sizeof(map->menu_names[0]);
+ map->menu_names = kzalloc(size, GFP_KERNEL);
+ if (!map->menu_names) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ for (i = 0; i < xmap->menu_count ; i++) {
+ /* sizeof(names[i]) - 1: to take care of \0 */
+ if (copy_from_user((char *)map->menu_names[i],
+ xmap->menu_info[i].name,
+ sizeof(map->menu_names[i]) - 1)) {
+ ret = -EACCES;
+ goto done;
+ }
+ }
+ }
+
+ ret = uvc_ctrl_add_mapping(chain, map);
+
+done:
+ kfree(map->menu_names);
+ map->menu_names = NULL;
+ kfree(map->menu_mapping);
+ map->menu_mapping = NULL;
+
+ return ret;
+}
+
/* ------------------------------------------------------------------------
* UVC ioctls
*/
-static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain,
- struct uvc_xu_control_mapping *xmap)
+static int uvc_ioctl_xu_ctrl_map(struct uvc_video_chain *chain,
+ struct uvc_xu_control_mapping *xmap)
{
struct uvc_control_mapping *map;
- unsigned int size;
int ret;
map = kzalloc(sizeof(*map), GFP_KERNEL);
@@ -60,39 +131,20 @@ static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain,
case V4L2_CTRL_TYPE_INTEGER:
case V4L2_CTRL_TYPE_BOOLEAN:
case V4L2_CTRL_TYPE_BUTTON:
+ ret = uvc_ctrl_add_mapping(chain, map);
break;
case V4L2_CTRL_TYPE_MENU:
- /*
- * Prevent excessive memory consumption, as well as integer
- * overflows.
- */
- if (xmap->menu_count == 0 ||
- xmap->menu_count > UVC_MAX_CONTROL_MENU_ENTRIES) {
- ret = -EINVAL;
- goto free_map;
- }
-
- size = xmap->menu_count * sizeof(*map->menu_info);
- map->menu_info = memdup_user(xmap->menu_info, size);
- if (IS_ERR(map->menu_info)) {
- ret = PTR_ERR(map->menu_info);
- goto free_map;
- }
-
- map->menu_count = xmap->menu_count;
+ ret = uvc_control_add_xu_mapping(chain, map, xmap);
break;
default:
uvc_dbg(chain->dev, CONTROL,
"Unsupported V4L2 control type %u\n", xmap->v4l2_type);
ret = -ENOTTY;
- goto free_map;
+ break;
}
- ret = uvc_ctrl_add_mapping(chain, map);
-
- kfree(map->menu_info);
free_map:
kfree(map);
@@ -660,8 +712,6 @@ static int uvc_ioctl_enum_fmt(struct uvc_streaming *stream,
fmt->flags = 0;
if (format->flags & UVC_FMT_FLAG_COMPRESSED)
fmt->flags |= V4L2_FMT_FLAG_COMPRESSED;
- strscpy(fmt->description, format->name, sizeof(fmt->description));
- fmt->description[sizeof(fmt->description) - 1] = 0;
fmt->pixelformat = format->fcc;
return 0;
}
@@ -1020,8 +1070,7 @@ static int uvc_ctrl_check_access(struct uvc_video_chain *chain,
int ret = 0;
for (i = 0; i < ctrls->count; ++ctrl, ++i) {
- ret = uvc_ctrl_is_accessible(chain, ctrl->id,
- ioctl == VIDIOC_G_EXT_CTRLS);
+ ret = uvc_ctrl_is_accessible(chain, ctrl->id, ctrls, ioctl);
if (ret)
break;
}
@@ -1316,7 +1365,7 @@ static long uvc_ioctl_default(struct file *file, void *fh, bool valid_prio,
switch (cmd) {
/* Dynamic controls. */
case UVCIOC_CTRL_MAP:
- return uvc_ioctl_ctrl_map(chain, arg);
+ return uvc_ioctl_xu_ctrl_map(chain, arg);
case UVCIOC_CTRL_QUERY:
return uvc_xu_ctrl_query(chain, arg);
@@ -1429,7 +1478,7 @@ static long uvc_v4l2_compat_ioctl32(struct file *file,
ret = uvc_v4l2_get_xu_mapping(&karg.xmap, up);
if (ret)
return ret;
- ret = uvc_ioctl_ctrl_map(handle->chain, &karg.xmap);
+ ret = uvc_ioctl_xu_ctrl_map(handle->chain, &karg.xmap);
if (ret)
return ret;
ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up);
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index d2eb9066e4dc..d4b023d4de7c 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -79,13 +79,14 @@ int uvc_query_ctrl(struct uvc_device *dev, u8 query, u8 unit,
if (likely(ret == size))
return 0;
- dev_err(&dev->udev->dev,
- "Failed to query (%s) UVC control %u on unit %u: %d (exp. %u).\n",
- uvc_query_name(query), cs, unit, ret, size);
-
- if (ret != -EPIPE)
- return ret;
+ if (ret != -EPIPE) {
+ dev_err(&dev->udev->dev,
+ "Failed to query (%s) UVC control %u on unit %u: %d (exp. %u).\n",
+ uvc_query_name(query), cs, unit, ret, size);
+ return ret < 0 ? ret : -EPIPE;
+ }
+ /* Reuse data[0] to request the error code. */
tmp = *(u8 *)data;
ret = __uvc_query_ctrl(dev, UVC_GET_CUR, 0, intfnum,
@@ -107,7 +108,7 @@ int uvc_query_ctrl(struct uvc_device *dev, u8 query, u8 unit,
case 1: /* Not ready */
return -EBUSY;
case 2: /* Wrong state */
- return -EILSEQ;
+ return -EACCES;
case 3: /* Power */
return -EREMOTE;
case 4: /* Out of range */
@@ -129,12 +130,13 @@ int uvc_query_ctrl(struct uvc_device *dev, u8 query, u8 unit,
return -EPIPE;
}
+static const struct usb_device_id elgato_cam_link_4k = {
+ USB_DEVICE(0x0fd9, 0x0066)
+};
+
static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
struct uvc_streaming_control *ctrl)
{
- static const struct usb_device_id elgato_cam_link_4k = {
- USB_DEVICE(0x0fd9, 0x0066)
- };
struct uvc_format *format = NULL;
struct uvc_frame *frame = NULL;
unsigned int i;
@@ -297,7 +299,7 @@ static int uvc_get_video_ctrl(struct uvc_streaming *stream,
dev_err(&stream->intf->dev,
"Failed to query (%u) UVC %s control : %d (exp. %u).\n",
query, probe ? "probe" : "commit", ret, size);
- ret = -EIO;
+ ret = (ret == -EPROTO) ? -EPROTO : -EIO;
goto out;
}
@@ -516,7 +518,9 @@ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf,
/*
* To limit the amount of data, drop SCRs with an SOF identical to the
- * previous one.
+ * previous one. This filtering is also needed to support UVC 1.5, where
+ * all the data packets of the same frame contains the same SOF. In that
+ * case only the first one will match the host_sof.
*/
dev_sof = get_unaligned_le16(&data[header_size - 2]);
if (dev_sof == stream->clock.last_sof)
@@ -1352,7 +1356,9 @@ static void uvc_video_decode_meta(struct uvc_streaming *stream,
if (has_scr)
memcpy(stream->clock.last_scr, scr, 6);
- memcpy(&meta->length, mem, length);
+ meta->length = mem[0];
+ meta->flags = mem[1];
+ memcpy(meta->buf, &mem[2], length - 2);
meta_buf->bytesused += length + sizeof(meta->ns) + sizeof(meta->sof);
uvc_dbg(stream->dev, FRAME,
@@ -1965,6 +1971,17 @@ static int uvc_video_start_transfer(struct uvc_streaming *stream,
"Selecting alternate setting %u (%u B/frame bandwidth)\n",
altsetting, best_psize);
+ /*
+ * Some devices, namely the Logitech C910 and B910, are unable
+ * to recover from a USB autosuspend, unless the alternate
+ * setting of the streaming interface is toggled.
+ */
+ if (stream->dev->quirks & UVC_QUIRK_WAKE_AUTOSUSPEND) {
+ usb_set_interface(stream->dev->udev, intfnum,
+ altsetting);
+ usb_set_interface(stream->dev->udev, intfnum, 0);
+ }
+
ret = usb_set_interface(stream->dev->udev, intfnum, altsetting);
if (ret < 0)
return ret;
@@ -2121,6 +2138,21 @@ int uvc_video_init(struct uvc_streaming *stream)
* request on the probe control, as required by the UVC specification.
*/
ret = uvc_get_video_ctrl(stream, probe, 1, UVC_GET_CUR);
+
+ /*
+ * Elgato Cam Link 4k can be in a stalled state if the resolution of
+ * the external source has changed while the firmware initializes.
+ * Once in this state, the device is useless until it receives a
+ * USB reset. It has even been observed that the stalled state will
+ * continue even after unplugging the device.
+ */
+ if (ret == -EPROTO &&
+ usb_match_one_id(stream->dev->intf, &elgato_cam_link_4k)) {
+ dev_err(&stream->intf->dev, "Elgato Cam Link 4K firmware crash detected\n");
+ dev_err(&stream->intf->dev, "Resetting the device, unplug and replug to recover\n");
+ usb_reset_device(stream->dev->udev);
+ }
+
if (ret < 0)
return ret;
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index df93db259312..9a596c8d894a 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -51,8 +51,6 @@
#define UVC_URBS 5
/* Maximum number of packets per URB. */
#define UVC_MAX_PACKETS 32
-/* Maximum status buffer size in bytes of interrupt URB. */
-#define UVC_MAX_STATUS_SIZE 16
#define UVC_CTRL_CONTROL_TIMEOUT 5000
#define UVC_CTRL_STREAMING_TIMEOUT 5000
@@ -74,6 +72,7 @@
#define UVC_QUIRK_RESTORE_CTRLS_ON_INIT 0x00000400
#define UVC_QUIRK_FORCE_Y8 0x00000800
#define UVC_QUIRK_FORCE_BPP 0x00001000
+#define UVC_QUIRK_WAKE_AUTOSUSPEND 0x00002000
/* Format flags */
#define UVC_FMT_FLAG_COMPRESSED 0x00000001
@@ -116,8 +115,9 @@ struct uvc_control_mapping {
enum v4l2_ctrl_type v4l2_type;
u32 data_type;
- const struct uvc_menu_info *menu_info;
- u32 menu_count;
+ const u32 *menu_mapping;
+ const char (*menu_names)[UVC_MENU_NAME_LEN];
+ unsigned long menu_mask;
u32 master_id;
s32 master_manual;
@@ -264,8 +264,6 @@ struct uvc_format {
u32 fcc;
u32 flags;
- char name[32];
-
unsigned int nframes;
struct uvc_frame *frame;
};
@@ -527,6 +525,26 @@ struct uvc_device_info {
const struct uvc_control_mapping **mappings;
};
+struct uvc_status_streaming {
+ u8 button;
+} __packed;
+
+struct uvc_status_control {
+ u8 bSelector;
+ u8 bAttribute;
+ u8 bValue[11];
+} __packed;
+
+struct uvc_status {
+ u8 bStatusType;
+ u8 bOriginator;
+ u8 bEvent;
+ union {
+ struct uvc_status_control control;
+ struct uvc_status_streaming streaming;
+ };
+} __packed;
+
struct uvc_device {
struct usb_device *udev;
struct usb_interface *intf;
@@ -559,7 +577,9 @@ struct uvc_device {
/* Status Interrupt Endpoint */
struct usb_host_endpoint *int_ep;
struct urb *int_urb;
- u8 *status;
+ struct uvc_status *status;
+ bool flush_status;
+
struct input_dev *input;
char input_phys[64];
@@ -728,6 +748,8 @@ int uvc_status_start(struct uvc_device *dev, gfp_t flags);
void uvc_status_stop(struct uvc_device *dev);
/* Controls */
+extern const struct uvc_control_mapping uvc_ctrl_power_line_mapping_limited;
+extern const struct uvc_control_mapping uvc_ctrl_power_line_mapping_uvc11;
extern const struct v4l2_subscribed_event_ops uvc_ctrl_sub_ev_ops;
int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
@@ -761,7 +783,8 @@ static inline int uvc_ctrl_rollback(struct uvc_fh *handle)
int uvc_ctrl_get(struct uvc_video_chain *chain, struct v4l2_ext_control *xctrl);
int uvc_ctrl_set(struct uvc_fh *handle, struct v4l2_ext_control *xctrl);
int uvc_ctrl_is_accessible(struct uvc_video_chain *chain, u32 v4l2_id,
- bool read);
+ const struct v4l2_ext_controls *ctrls,
+ unsigned long ioctl);
int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
struct uvc_xu_control_query *xqry);
diff --git a/drivers/of/device.c b/drivers/of/device.c
index c674a13c3055..8271793ef379 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -256,7 +256,7 @@ static ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len
ssize_t csize;
ssize_t tsize;
- if ((!dev) || (!dev->of_node))
+ if ((!dev) || (!dev->of_node) || dev->of_node_reused)
return -ENODEV;
/* Name & Type */
@@ -376,7 +376,7 @@ int of_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env)
{
int sl;
- if ((!dev) || (!dev->of_node))
+ if ((!dev) || (!dev->of_node) || dev->of_node_reused)
return -ENODEV;
/* Devicetree modalias is tricky, we add it in 2 steps */
@@ -385,6 +385,8 @@ int of_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env)
sl = of_device_get_modalias(dev, &env->buf[env->buflen-1],
sizeof(env->buf) - env->buflen);
+ if (sl < 0)
+ return sl;
if (sl >= (sizeof(env->buf) - env->buflen))
return -ENOMEM;
env->buflen += sl;
diff --git a/drivers/phy/tegra/Makefile b/drivers/phy/tegra/Makefile
index 89b84067cb4c..eeeea72de117 100644
--- a/drivers/phy/tegra/Makefile
+++ b/drivers/phy/tegra/Makefile
@@ -7,4 +7,5 @@ phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_132_SOC) += xusb-tegra124.o
phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_210_SOC) += xusb-tegra210.o
phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_186_SOC) += xusb-tegra186.o
phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_194_SOC) += xusb-tegra186.o
+phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_234_SOC) += xusb-tegra186.o
obj-$(CONFIG_PHY_TEGRA194_P2U) += phy-tegra194-p2u.o
diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c
index 6a8bd87cfdbd..1aae8535f452 100644
--- a/drivers/phy/tegra/xusb-tegra186.c
+++ b/drivers/phy/tegra/xusb-tegra186.c
@@ -89,6 +89,11 @@
#define USB2_TRK_START_TIMER(x) (((x) & 0x7f) << 12)
#define USB2_TRK_DONE_RESET_TIMER(x) (((x) & 0x7f) << 19)
#define USB2_PD_TRK BIT(26)
+#define USB2_TRK_COMPLETED BIT(31)
+
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL2 0x28c
+#define USB2_TRK_HW_MODE BIT(0)
+#define CYA_TRK_CODE_UPDATE_ON_IDLE BIT(31)
#define XUSB_PADCTL_HSIC_PADX_CTL0(x) (0x300 + (x) * 0x20)
#define HSIC_PD_TX_DATA0 BIT(1)
@@ -609,6 +614,32 @@ static void tegra186_utmi_bias_pad_power_on(struct tegra_xusb_padctl *padctl)
value &= ~USB2_PD_TRK;
padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
+ if (padctl->soc->poll_trk_completed) {
+ err = padctl_readl_poll(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL1,
+ USB2_TRK_COMPLETED, USB2_TRK_COMPLETED, 100);
+ if (err) {
+ /* The failure with polling on trk complete will not
+ * cause the failure of powering on the bias pad.
+ */
+ dev_warn(dev, "failed to poll USB2 trk completed: %d\n", err);
+ }
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
+ value |= USB2_TRK_COMPLETED;
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
+ } else {
+ udelay(100);
+ }
+
+ if (padctl->soc->trk_hw_mode) {
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL2);
+ value |= USB2_TRK_HW_MODE;
+ value &= ~CYA_TRK_CODE_UPDATE_ON_IDLE;
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL2);
+ } else {
+ clk_disable_unprepare(priv->usb2_trk_clk);
+ }
+
mutex_unlock(&padctl->lock);
}
@@ -633,7 +664,12 @@ static void tegra186_utmi_bias_pad_power_off(struct tegra_xusb_padctl *padctl)
value |= USB2_PD_TRK;
padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
- clk_disable_unprepare(priv->usb2_trk_clk);
+ if (padctl->soc->trk_hw_mode) {
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL2);
+ value &= ~USB2_TRK_HW_MODE;
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL2);
+ clk_disable_unprepare(priv->usb2_trk_clk);
+ }
mutex_unlock(&padctl->lock);
}
@@ -1557,7 +1593,8 @@ const struct tegra_xusb_padctl_soc tegra186_xusb_padctl_soc = {
EXPORT_SYMBOL_GPL(tegra186_xusb_padctl_soc);
#endif
-#if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
static const char * const tegra194_xusb_padctl_supply_names[] = {
"avdd-usb",
"vclamp-usb",
@@ -1613,8 +1650,31 @@ const struct tegra_xusb_padctl_soc tegra194_xusb_padctl_soc = {
.supply_names = tegra194_xusb_padctl_supply_names,
.num_supplies = ARRAY_SIZE(tegra194_xusb_padctl_supply_names),
.supports_gen2 = true,
+ .poll_trk_completed = true,
};
EXPORT_SYMBOL_GPL(tegra194_xusb_padctl_soc);
+
+const struct tegra_xusb_padctl_soc tegra234_xusb_padctl_soc = {
+ .num_pads = ARRAY_SIZE(tegra194_pads),
+ .pads = tegra194_pads,
+ .ports = {
+ .usb2 = {
+ .ops = &tegra186_usb2_port_ops,
+ .count = 4,
+ },
+ .usb3 = {
+ .ops = &tegra186_usb3_port_ops,
+ .count = 4,
+ },
+ },
+ .ops = &tegra186_xusb_padctl_ops,
+ .supply_names = tegra194_xusb_padctl_supply_names,
+ .num_supplies = ARRAY_SIZE(tegra194_xusb_padctl_supply_names),
+ .supports_gen2 = true,
+ .poll_trk_completed = true,
+ .trk_hw_mode = true,
+};
+EXPORT_SYMBOL_GPL(tegra234_xusb_padctl_soc);
#endif
MODULE_AUTHOR("JC Kuo <jckuo@nvidia.com>");
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index ff4b930879f3..3707a0b5c1ae 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -72,6 +72,12 @@ static const struct of_device_id tegra_xusb_padctl_of_match[] = {
.data = &tegra194_xusb_padctl_soc,
},
#endif
+#if defined(CONFIG_ARCH_TEGRA_234_SOC)
+ {
+ .compatible = "nvidia,tegra234-xusb-padctl",
+ .data = &tegra234_xusb_padctl_soc,
+ },
+#endif
{ }
};
MODULE_DEVICE_TABLE(of, tegra_xusb_padctl_of_match);
diff --git a/drivers/phy/tegra/xusb.h b/drivers/phy/tegra/xusb.h
index c384734a61c2..8bd6cd281119 100644
--- a/drivers/phy/tegra/xusb.h
+++ b/drivers/phy/tegra/xusb.h
@@ -8,6 +8,7 @@
#define __PHY_TEGRA_XUSB_H
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
@@ -431,6 +432,8 @@ struct tegra_xusb_padctl_soc {
unsigned int num_supplies;
bool supports_gen2;
bool need_fake_usb3_port;
+ bool poll_trk_completed;
+ bool trk_hw_mode;
};
struct tegra_xusb_padctl {
@@ -473,6 +476,23 @@ static inline u32 padctl_readl(struct tegra_xusb_padctl *padctl,
return value;
}
+static inline u32 padctl_readl_poll(struct tegra_xusb_padctl *padctl,
+ unsigned long offset, u32 val, u32 mask,
+ int us)
+{
+ u32 regval;
+ int err;
+
+ err = readl_poll_timeout(padctl->regs + offset, regval,
+ (regval & mask) == val, 1, us);
+ if (err) {
+ dev_err(padctl->dev, "%08lx poll timeout > %08x\n", offset,
+ regval);
+ }
+
+ return err;
+}
+
struct tegra_xusb_lane *tegra_xusb_find_lane(struct tegra_xusb_padctl *padctl,
const char *name,
unsigned int index);
@@ -489,5 +509,8 @@ extern const struct tegra_xusb_padctl_soc tegra186_xusb_padctl_soc;
#if defined(CONFIG_ARCH_TEGRA_194_SOC)
extern const struct tegra_xusb_padctl_soc tegra194_xusb_padctl_soc;
#endif
+#if defined(CONFIG_ARCH_TEGRA_234_SOC)
+extern const struct tegra_xusb_padctl_soc tegra234_xusb_padctl_soc;
+#endif
#endif /* __PHY_TEGRA_XUSB_H */
diff --git a/drivers/thunderbolt/acpi.c b/drivers/thunderbolt/acpi.c
index 317e4f5fdb97..628225deb8fe 100644
--- a/drivers/thunderbolt/acpi.c
+++ b/drivers/thunderbolt/acpi.c
@@ -36,16 +36,13 @@ static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data,
* We need to do this because the xHCI driver might not yet be
* bound so the USB3 SuperSpeed ports are not yet created.
*/
- dev = acpi_get_first_physical_node(adev);
- while (!dev) {
- adev = acpi_dev_parent(adev);
- if (!adev)
- break;
+ do {
dev = acpi_get_first_physical_node(adev);
- }
+ if (dev)
+ break;
- if (!dev)
- goto out_put;
+ adev = acpi_dev_parent(adev);
+ } while (adev);
/*
* Check that the device is PCIe. This is because USB3
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index 0c661a706160..6e7d28e8d81a 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -230,7 +230,6 @@ static int check_config_address(struct tb_cfg_address addr,
static struct tb_cfg_result decode_error(const struct ctl_pkg *response)
{
struct cfg_error_pkg *pkg = response->buffer;
- struct tb_ctl *ctl = response->ctl;
struct tb_cfg_result res = { 0 };
res.response_route = tb_cfg_get_route(&pkg->header);
res.response_port = 0;
@@ -239,13 +238,6 @@ static struct tb_cfg_result decode_error(const struct ctl_pkg *response)
if (res.err)
return res;
- if (pkg->zero1)
- tb_ctl_warn(ctl, "pkg->zero1 is %#x\n", pkg->zero1);
- if (pkg->zero2)
- tb_ctl_warn(ctl, "pkg->zero2 is %#x\n", pkg->zero2);
- if (pkg->zero3)
- tb_ctl_warn(ctl, "pkg->zero3 is %#x\n", pkg->zero3);
-
res.err = 1;
res.tb_error = pkg->error;
res.response_port = pkg->port;
@@ -416,6 +408,7 @@ static int tb_async_error(const struct ctl_pkg *pkg)
case TB_CFG_ERROR_LINK_ERROR:
case TB_CFG_ERROR_HEC_ERROR_DETECTED:
case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
+ case TB_CFG_ERROR_DP_BW:
return true;
default:
@@ -736,6 +729,47 @@ void tb_ctl_stop(struct tb_ctl *ctl)
/* public interface, commands */
/**
+ * tb_cfg_ack_notification() - Ack notification
+ * @ctl: Control channel to use
+ * @route: Router that originated the event
+ * @error: Pointer to the notification package
+ *
+ * Call this as response for non-plug notification to ack it. Returns
+ * %0 on success or an error code on failure.
+ */
+int tb_cfg_ack_notification(struct tb_ctl *ctl, u64 route,
+ const struct cfg_error_pkg *error)
+{
+ struct cfg_ack_pkg pkg = {
+ .header = tb_cfg_make_header(route),
+ };
+ const char *name;
+
+ switch (error->error) {
+ case TB_CFG_ERROR_LINK_ERROR:
+ name = "link error";
+ break;
+ case TB_CFG_ERROR_HEC_ERROR_DETECTED:
+ name = "HEC error";
+ break;
+ case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
+ name = "flow control error";
+ break;
+ case TB_CFG_ERROR_DP_BW:
+ name = "DP_BW";
+ break;
+ default:
+ name = "unknown";
+ break;
+ }
+
+ tb_ctl_dbg(ctl, "acking %s (%#x) notification on %llx\n", name,
+ error->error, route);
+
+ return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_NOTIFY_ACK);
+}
+
+/**
* tb_cfg_ack_plug() - Ack hot plug/unplug event
* @ctl: Control channel to use
* @route: Router that originated the event
@@ -754,7 +788,7 @@ int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug)
.pg = unplug ? TB_CFG_ERROR_PG_HOT_UNPLUG
: TB_CFG_ERROR_PG_HOT_PLUG,
};
- tb_ctl_dbg(ctl, "acking hot %splug event on %llx:%x\n",
+ tb_ctl_dbg(ctl, "acking hot %splug event on %llx:%u\n",
unplug ? "un" : "", route, port);
return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
}
diff --git a/drivers/thunderbolt/ctl.h b/drivers/thunderbolt/ctl.h
index 7c7d80f96c0c..eec5c953c743 100644
--- a/drivers/thunderbolt/ctl.h
+++ b/drivers/thunderbolt/ctl.h
@@ -122,6 +122,8 @@ static inline struct tb_cfg_header tb_cfg_make_header(u64 route)
return header;
}
+int tb_cfg_ack_notification(struct tb_ctl *ctl, u64 route,
+ const struct cfg_error_pkg *error);
int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug);
struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route);
struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c
index 834bcad42e9f..4339e706cc3a 100644
--- a/drivers/thunderbolt/debugfs.c
+++ b/drivers/thunderbolt/debugfs.c
@@ -1159,7 +1159,10 @@ static void port_cap_show(struct tb_port *port, struct seq_file *s,
if (tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) {
length = PORT_CAP_PCIE_LEN;
} else if (tb_port_is_dpin(port) || tb_port_is_dpout(port)) {
- length = PORT_CAP_DP_LEN;
+ if (usb4_dp_port_bw_mode_supported(port))
+ length = PORT_CAP_DP_LEN + 1;
+ else
+ length = PORT_CAP_DP_LEN;
} else if (tb_port_is_usb3_down(port) ||
tb_port_is_usb3_up(port)) {
length = PORT_CAP_USB3_LEN;
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 363d712aa364..e1ad1b437291 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -513,36 +513,44 @@ int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
while (retries--) {
state = tb_port_state(port);
- if (state < 0)
- return state;
- if (state == TB_PORT_DISABLED) {
+ switch (state) {
+ case TB_PORT_DISABLED:
tb_port_dbg(port, "is disabled (state: 0)\n");
return 0;
- }
- if (state == TB_PORT_UNPLUGGED) {
+
+ case TB_PORT_UNPLUGGED:
if (wait_if_unplugged) {
/* used during resume */
tb_port_dbg(port,
"is unplugged (state: 7), retrying...\n");
msleep(100);
- continue;
+ break;
}
tb_port_dbg(port, "is unplugged (state: 7)\n");
return 0;
- }
- if (state == TB_PORT_UP) {
- tb_port_dbg(port, "is connected, link is up (state: 2)\n");
+
+ case TB_PORT_UP:
+ case TB_PORT_TX_CL0S:
+ case TB_PORT_RX_CL0S:
+ case TB_PORT_CL1:
+ case TB_PORT_CL2:
+ tb_port_dbg(port, "is connected, link is up (state: %d)\n", state);
return 1;
+
+ default:
+ if (state < 0)
+ return state;
+
+ /*
+ * After plug-in the state is TB_PORT_CONNECTING. Give it some
+ * time.
+ */
+ tb_port_dbg(port,
+ "is connected, link is not up (state: %d), retrying...\n",
+ state);
+ msleep(100);
}
- /*
- * After plug-in the state is TB_PORT_CONNECTING. Give it some
- * time.
- */
- tb_port_dbg(port,
- "is connected, link is not up (state: %d), retrying...\n",
- state);
- msleep(100);
}
tb_port_warn(port,
"failed to reach state TB_PORT_UP. Ignoring port...\n");
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 3f1ab30c4fb1..7bfbc9ca9ba4 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -16,7 +16,8 @@
#include "tb_regs.h"
#include "tunnel.h"
-#define TB_TIMEOUT 100 /* ms */
+#define TB_TIMEOUT 100 /* ms */
+#define MAX_GROUPS 7 /* max Group_ID is 7 */
/**
* struct tb_cm - Simple Thunderbolt connection manager
@@ -28,12 +29,14 @@
* after cfg has been paused.
* @remove_work: Work used to remove any unplugged routers after
* runtime resume
+ * @groups: Bandwidth groups used in this domain.
*/
struct tb_cm {
struct list_head tunnel_list;
struct list_head dp_resources;
bool hotplug_active;
struct delayed_work remove_work;
+ struct tb_bandwidth_group groups[MAX_GROUPS];
};
static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
@@ -49,6 +52,112 @@ struct tb_hotplug_event {
bool unplug;
};
+static void tb_init_bandwidth_groups(struct tb_cm *tcm)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
+ struct tb_bandwidth_group *group = &tcm->groups[i];
+
+ group->tb = tcm_to_tb(tcm);
+ group->index = i + 1;
+ INIT_LIST_HEAD(&group->ports);
+ }
+}
+
+static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
+ struct tb_port *in)
+{
+ if (!group || WARN_ON(in->group))
+ return;
+
+ in->group = group;
+ list_add_tail(&in->group_list, &group->ports);
+
+ tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
+}
+
+static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
+ struct tb_bandwidth_group *group = &tcm->groups[i];
+
+ if (list_empty(&group->ports))
+ return group;
+ }
+
+ return NULL;
+}
+
+static struct tb_bandwidth_group *
+tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
+ struct tb_port *out)
+{
+ struct tb_bandwidth_group *group;
+ struct tb_tunnel *tunnel;
+
+ /*
+ * Find all DP tunnels that go through all the same USB4 links
+ * as this one. Because we always setup tunnels the same way we
+ * can just check for the routers at both ends of the tunnels
+ * and if they are the same we have a match.
+ */
+ list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+ if (!tb_tunnel_is_dp(tunnel))
+ continue;
+
+ if (tunnel->src_port->sw == in->sw &&
+ tunnel->dst_port->sw == out->sw) {
+ group = tunnel->src_port->group;
+ if (group) {
+ tb_bandwidth_group_attach_port(group, in);
+ return group;
+ }
+ }
+ }
+
+ /* Pick up next available group then */
+ group = tb_find_free_bandwidth_group(tcm);
+ if (group)
+ tb_bandwidth_group_attach_port(group, in);
+ else
+ tb_port_warn(in, "no available bandwidth groups\n");
+
+ return group;
+}
+
+static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
+ struct tb_port *out)
+{
+ if (usb4_dp_port_bw_mode_enabled(in)) {
+ int index, i;
+
+ index = usb4_dp_port_group_id(in);
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
+ if (tcm->groups[i].index == index) {
+ tb_bandwidth_group_attach_port(&tcm->groups[i], in);
+ return;
+ }
+ }
+ }
+
+ tb_attach_bandwidth_group(tcm, in, out);
+}
+
+static void tb_detach_bandwidth_group(struct tb_port *in)
+{
+ struct tb_bandwidth_group *group = in->group;
+
+ if (group) {
+ in->group = NULL;
+ list_del_init(&in->group_list);
+
+ tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
+ }
+}
+
static void tb_handle_hotplug(struct work_struct *work);
static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
@@ -193,9 +302,14 @@ static void tb_discover_tunnels(struct tb *tb)
parent = tb_switch_parent(parent);
}
} else if (tb_tunnel_is_dp(tunnel)) {
+ struct tb_port *in = tunnel->src_port;
+ struct tb_port *out = tunnel->dst_port;
+
/* Keep the domain from powering down */
- pm_runtime_get_sync(&tunnel->src_port->sw->dev);
- pm_runtime_get_sync(&tunnel->dst_port->sw->dev);
+ pm_runtime_get_sync(&in->sw->dev);
+ pm_runtime_get_sync(&out->sw->dev);
+
+ tb_discover_bandwidth_group(tcm, in, out);
}
}
}
@@ -350,10 +464,13 @@ static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
struct tb_tunnel *tunnel;
struct tb_port *port;
- tb_port_dbg(dst_port, "calculating available bandwidth\n");
+ tb_dbg(tb, "calculating available bandwidth between %llx:%u <-> %llx:%u\n",
+ tb_route(src_port->sw), src_port->port, tb_route(dst_port->sw),
+ dst_port->port);
tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
- if (tunnel) {
+ if (tunnel && tunnel->src_port != src_port &&
+ tunnel->dst_port != dst_port) {
ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
&usb3_consumed_down);
if (ret)
@@ -387,7 +504,8 @@ static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
up_bw -= up_bw / 10;
down_bw = up_bw;
- tb_port_dbg(port, "link total bandwidth %d Mb/s\n", up_bw);
+ tb_port_dbg(port, "link total bandwidth %d/%d Mb/s\n", up_bw,
+ down_bw);
/*
* Find all DP tunnels that cross the port and reduce
@@ -396,12 +514,24 @@ static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
int dp_consumed_up, dp_consumed_down;
+ if (tb_tunnel_is_invalid(tunnel))
+ continue;
+
if (!tb_tunnel_is_dp(tunnel))
continue;
if (!tb_tunnel_port_on_path(tunnel, port))
continue;
+ /*
+ * Ignore the DP tunnel between src_port and
+ * dst_port because it is the same tunnel and we
+ * may be re-calculating estimated bandwidth.
+ */
+ if (tunnel->src_port == src_port &&
+ tunnel->dst_port == dst_port)
+ continue;
+
ret = tb_tunnel_consumed_bandwidth(tunnel,
&dp_consumed_up,
&dp_consumed_down);
@@ -762,6 +892,7 @@ static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
switch (tunnel->type) {
case TB_TUNNEL_DP:
+ tb_detach_bandwidth_group(src_port);
/*
* In case of DP tunnel make sure the DP IN resource is
* deallocated properly.
@@ -879,6 +1010,99 @@ out:
return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
}
+static void
+tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
+{
+ struct tb_tunnel *first_tunnel;
+ struct tb *tb = group->tb;
+ struct tb_port *in;
+ int ret;
+
+ tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
+ group->index);
+
+ first_tunnel = NULL;
+ list_for_each_entry(in, &group->ports, group_list) {
+ int estimated_bw, estimated_up, estimated_down;
+ struct tb_tunnel *tunnel;
+ struct tb_port *out;
+
+ if (!usb4_dp_port_bw_mode_enabled(in))
+ continue;
+
+ tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
+ if (WARN_ON(!tunnel))
+ break;
+
+ if (!first_tunnel) {
+ /*
+ * Since USB3 bandwidth is shared by all DP
+ * tunnels under the host router USB4 port, even
+ * if they do not begin from the host router, we
+ * can release USB3 bandwidth just once and not
+ * for each tunnel separately.
+ */
+ first_tunnel = tunnel;
+ ret = tb_release_unused_usb3_bandwidth(tb,
+ first_tunnel->src_port, first_tunnel->dst_port);
+ if (ret) {
+ tb_port_warn(in,
+ "failed to release unused bandwidth\n");
+ break;
+ }
+ }
+
+ out = tunnel->dst_port;
+ ret = tb_available_bandwidth(tb, in, out, &estimated_up,
+ &estimated_down);
+ if (ret) {
+ tb_port_warn(in,
+ "failed to re-calculate estimated bandwidth\n");
+ break;
+ }
+
+ /*
+ * Estimated bandwidth includes:
+ * - already allocated bandwidth for the DP tunnel
+ * - available bandwidth along the path
+ * - bandwidth allocated for USB 3.x but not used.
+ */
+ tb_port_dbg(in, "re-calculated estimated bandwidth %u/%u Mb/s\n",
+ estimated_up, estimated_down);
+
+ if (in->sw->config.depth < out->sw->config.depth)
+ estimated_bw = estimated_down;
+ else
+ estimated_bw = estimated_up;
+
+ if (usb4_dp_port_set_estimated_bw(in, estimated_bw))
+ tb_port_warn(in, "failed to update estimated bandwidth\n");
+ }
+
+ if (first_tunnel)
+ tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
+ first_tunnel->dst_port);
+
+ tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
+}
+
+static void tb_recalc_estimated_bandwidth(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ int i;
+
+ tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
+
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
+ struct tb_bandwidth_group *group = &tcm->groups[i];
+
+ if (!list_empty(&group->ports))
+ tb_recalc_estimated_bandwidth_for_group(group);
+ }
+
+ tb_dbg(tb, "bandwidth re-calculation done\n");
+}
+
static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
{
struct tb_port *host_port, *port;
@@ -892,7 +1116,7 @@ static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
continue;
if (tb_port_is_enabled(port)) {
- tb_port_dbg(port, "in use\n");
+ tb_port_dbg(port, "DP OUT in use\n");
continue;
}
@@ -941,7 +1165,7 @@ static void tb_tunnel_dp(struct tb *tb)
continue;
if (tb_port_is_enabled(port)) {
- tb_port_dbg(port, "in use\n");
+ tb_port_dbg(port, "DP IN in use\n");
continue;
}
@@ -993,17 +1217,19 @@ static void tb_tunnel_dp(struct tb *tb)
goto err_rpm_put;
}
+ if (!tb_attach_bandwidth_group(tcm, in, out))
+ goto err_dealloc_dp;
+
/* Make all unused USB3 bandwidth available for the new DP tunnel */
ret = tb_release_unused_usb3_bandwidth(tb, in, out);
if (ret) {
tb_warn(tb, "failed to release unused bandwidth\n");
- goto err_dealloc_dp;
+ goto err_detach_group;
}
- ret = tb_available_bandwidth(tb, in, out, &available_up,
- &available_down);
+ ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down);
if (ret)
- goto err_reclaim;
+ goto err_reclaim_usb;
tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
available_up, available_down);
@@ -1012,7 +1238,7 @@ static void tb_tunnel_dp(struct tb *tb)
available_down);
if (!tunnel) {
tb_port_dbg(out, "could not allocate DP tunnel\n");
- goto err_reclaim;
+ goto err_reclaim_usb;
}
if (tb_tunnel_activate(tunnel)) {
@@ -1022,6 +1248,10 @@ static void tb_tunnel_dp(struct tb *tb)
list_add_tail(&tunnel->list, &tcm->tunnel_list);
tb_reclaim_usb3_bandwidth(tb, in, out);
+
+ /* Update the domain with the new bandwidth estimation */
+ tb_recalc_estimated_bandwidth(tb);
+
/*
* In case of DP tunnel exists, change host router's 1st children
* TMU mode to HiFi for CL0s to work.
@@ -1032,8 +1262,10 @@ static void tb_tunnel_dp(struct tb *tb)
err_free:
tb_tunnel_free(tunnel);
-err_reclaim:
+err_reclaim_usb:
tb_reclaim_usb3_bandwidth(tb, in, out);
+err_detach_group:
+ tb_detach_bandwidth_group(in);
err_dealloc_dp:
tb_switch_dealloc_dp_resource(in->sw, in);
err_rpm_put:
@@ -1066,6 +1298,7 @@ static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
* See if there is another DP OUT port that can be used for
* to create another tunnel.
*/
+ tb_recalc_estimated_bandwidth(tb);
tb_tunnel_dp(tb);
}
@@ -1313,6 +1546,7 @@ static void tb_handle_hotplug(struct work_struct *work)
if (port->dual_link_port)
port->dual_link_port->remote = NULL;
/* Maybe we can create another DP tunnel */
+ tb_recalc_estimated_bandwidth(tb);
tb_tunnel_dp(tb);
} else if (port->xdomain) {
struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
@@ -1370,6 +1604,239 @@ out:
kfree(ev);
}
+static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
+ int *requested_down)
+{
+ int allocated_up, allocated_down, available_up, available_down, ret;
+ int requested_up_corrected, requested_down_corrected, granularity;
+ int max_up, max_down, max_up_rounded, max_down_rounded;
+ struct tb *tb = tunnel->tb;
+ struct tb_port *in, *out;
+
+ ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down);
+ if (ret)
+ return ret;
+
+ in = tunnel->src_port;
+ out = tunnel->dst_port;
+
+ tb_port_dbg(in, "bandwidth allocated currently %d/%d Mb/s\n",
+ allocated_up, allocated_down);
+
+ /*
+ * If we get rounded up request from graphics side, say HBR2 x 4
+ * that is 17500 instead of 17280 (this is because of the
+ * granularity), we allow it too. Here the graphics has already
+ * negotiated with the DPRX the maximum possible rates (which is
+ * 17280 in this case).
+ *
+ * Since the link cannot go higher than 17280 we use that in our
+ * calculations but the DP IN adapter Allocated BW write must be
+ * the same value (17500) otherwise the adapter will mark it as
+ * failed for graphics.
+ */
+ ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down);
+ if (ret)
+ return ret;
+
+ ret = usb4_dp_port_granularity(in);
+ if (ret < 0)
+ return ret;
+ granularity = ret;
+
+ max_up_rounded = roundup(max_up, granularity);
+ max_down_rounded = roundup(max_down, granularity);
+
+ /*
+ * This will "fix" the request down to the maximum supported
+ * rate * lanes if it is at the maximum rounded up level.
+ */
+ requested_up_corrected = *requested_up;
+ if (requested_up_corrected == max_up_rounded)
+ requested_up_corrected = max_up;
+ else if (requested_up_corrected < 0)
+ requested_up_corrected = 0;
+ requested_down_corrected = *requested_down;
+ if (requested_down_corrected == max_down_rounded)
+ requested_down_corrected = max_down;
+ else if (requested_down_corrected < 0)
+ requested_down_corrected = 0;
+
+ tb_port_dbg(in, "corrected bandwidth request %d/%d Mb/s\n",
+ requested_up_corrected, requested_down_corrected);
+
+ if ((*requested_up >= 0 && requested_up_corrected > max_up_rounded) ||
+ (*requested_down >= 0 && requested_down_corrected > max_down_rounded)) {
+ tb_port_dbg(in, "bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n",
+ requested_up_corrected, requested_down_corrected,
+ max_up_rounded, max_down_rounded);
+ return -ENOBUFS;
+ }
+
+ if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) ||
+ (*requested_down >= 0 && requested_down_corrected <= allocated_down)) {
+ /*
+ * If requested bandwidth is less or equal than what is
+ * currently allocated to that tunnel we simply change
+ * the reservation of the tunnel. Since all the tunnels
+ * going out from the same USB4 port are in the same
+ * group the released bandwidth will be taken into
+ * account for the other tunnels automatically below.
+ */
+ return tb_tunnel_alloc_bandwidth(tunnel, requested_up,
+ requested_down);
+ }
+
+ /*
+ * More bandwidth is requested. Release all the potential
+ * bandwidth from USB3 first.
+ */
+ ret = tb_release_unused_usb3_bandwidth(tb, in, out);
+ if (ret)
+ return ret;
+
+ /*
+ * Then go over all tunnels that cross the same USB4 ports (they
+ * are also in the same group but we use the same function here
+ * that we use with the normal bandwidth allocation).
+ */
+ ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down);
+ if (ret)
+ goto reclaim;
+
+ tb_port_dbg(in, "bandwidth available for allocation %d/%d Mb/s\n",
+ available_up, available_down);
+
+ if ((*requested_up >= 0 && available_up >= requested_up_corrected) ||
+ (*requested_down >= 0 && available_down >= requested_down_corrected)) {
+ ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
+ requested_down);
+ } else {
+ ret = -ENOBUFS;
+ }
+
+reclaim:
+ tb_reclaim_usb3_bandwidth(tb, in, out);
+ return ret;
+}
+
+static void tb_handle_dp_bandwidth_request(struct work_struct *work)
+{
+ struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
+ int requested_bw, requested_up, requested_down, ret;
+ struct tb_port *in, *out;
+ struct tb_tunnel *tunnel;
+ struct tb *tb = ev->tb;
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_switch *sw;
+
+ pm_runtime_get_sync(&tb->dev);
+
+ mutex_lock(&tb->lock);
+ if (!tcm->hotplug_active)
+ goto unlock;
+
+ sw = tb_switch_find_by_route(tb, ev->route);
+ if (!sw) {
+ tb_warn(tb, "bandwidth request from non-existent router %llx\n",
+ ev->route);
+ goto unlock;
+ }
+
+ in = &sw->ports[ev->port];
+ if (!tb_port_is_dpin(in)) {
+ tb_port_warn(in, "bandwidth request to non-DP IN adapter\n");
+ goto unlock;
+ }
+
+ tb_port_dbg(in, "handling bandwidth allocation request\n");
+
+ if (!usb4_dp_port_bw_mode_enabled(in)) {
+ tb_port_warn(in, "bandwidth allocation mode not enabled\n");
+ goto unlock;
+ }
+
+ ret = usb4_dp_port_requested_bw(in);
+ if (ret < 0) {
+ if (ret == -ENODATA)
+ tb_port_dbg(in, "no bandwidth request active\n");
+ else
+ tb_port_warn(in, "failed to read requested bandwidth\n");
+ goto unlock;
+ }
+ requested_bw = ret;
+
+ tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw);
+
+ tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
+ if (!tunnel) {
+ tb_port_warn(in, "failed to find tunnel\n");
+ goto unlock;
+ }
+
+ out = tunnel->dst_port;
+
+ if (in->sw->config.depth < out->sw->config.depth) {
+ requested_up = -1;
+ requested_down = requested_bw;
+ } else {
+ requested_up = requested_bw;
+ requested_down = -1;
+ }
+
+ ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down);
+ if (ret) {
+ if (ret == -ENOBUFS)
+ tb_port_warn(in, "not enough bandwidth available\n");
+ else
+ tb_port_warn(in, "failed to change bandwidth allocation\n");
+ } else {
+ tb_port_dbg(in, "bandwidth allocation changed to %d/%d Mb/s\n",
+ requested_up, requested_down);
+
+ /* Update other clients about the allocation change */
+ tb_recalc_estimated_bandwidth(tb);
+ }
+
+unlock:
+ mutex_unlock(&tb->lock);
+
+ pm_runtime_mark_last_busy(&tb->dev);
+ pm_runtime_put_autosuspend(&tb->dev);
+}
+
+static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
+{
+ struct tb_hotplug_event *ev;
+
+ ev = kmalloc(sizeof(*ev), GFP_KERNEL);
+ if (!ev)
+ return;
+
+ ev->tb = tb;
+ ev->route = route;
+ ev->port = port;
+ INIT_WORK(&ev->work, tb_handle_dp_bandwidth_request);
+ queue_work(tb->wq, &ev->work);
+}
+
+static void tb_handle_notification(struct tb *tb, u64 route,
+ const struct cfg_error_pkg *error)
+{
+ if (tb_cfg_ack_notification(tb->ctl, route, error))
+ tb_warn(tb, "could not ack notification on %llx\n", route);
+
+ switch (error->error) {
+ case TB_CFG_ERROR_DP_BW:
+ tb_queue_dp_bandwidth_request(tb, route, error->port);
+ break;
+
+ default:
+ /* Ack is enough */
+ return;
+ }
+}
+
/*
* tb_schedule_hotplug_handler() - callback function for the control channel
*
@@ -1379,15 +1846,19 @@ static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
const void *buf, size_t size)
{
const struct cfg_event_pkg *pkg = buf;
- u64 route;
+ u64 route = tb_cfg_get_route(&pkg->header);
- if (type != TB_CFG_PKG_EVENT) {
+ switch (type) {
+ case TB_CFG_PKG_ERROR:
+ tb_handle_notification(tb, route, (const struct cfg_error_pkg *)buf);
+ return;
+ case TB_CFG_PKG_EVENT:
+ break;
+ default:
tb_warn(tb, "unexpected event %#x, ignoring\n", type);
return;
}
- route = tb_cfg_get_route(&pkg->header);
-
if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
pkg->port);
@@ -1817,6 +2288,7 @@ struct tb *tb_probe(struct tb_nhi *nhi)
INIT_LIST_HEAD(&tcm->tunnel_list);
INIT_LIST_HEAD(&tcm->dp_resources);
INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
+ tb_init_bandwidth_groups(tcm);
tb_dbg(tb, "using software connection manager\n");
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index f9786976f5ec..2953cf38c29e 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -224,6 +224,23 @@ struct tb_switch {
};
/**
+ * struct tb_bandwidth_group - Bandwidth management group
+ * @tb: Pointer to the domain the group belongs to
+ * @index: Index of the group (aka Group_ID). Valid values %1-%7
+ * @ports: DP IN adapters belonging to this group are linked here
+ *
+ * Any tunnel that requires isochronous bandwidth (that's DP for now) is
+ * attached to a bandwidth group. All tunnels going through the same
+ * USB4 links share the same group and can dynamically distribute the
+ * bandwidth within the group.
+ */
+struct tb_bandwidth_group {
+ struct tb *tb;
+ int index;
+ struct list_head ports;
+};
+
+/**
* struct tb_port - a thunderbolt port, part of a tb_switch
* @config: Cached port configuration read from registers
* @sw: Switch the port belongs to
@@ -247,6 +264,9 @@ struct tb_switch {
* @ctl_credits: Buffers reserved for control path
* @dma_credits: Number of credits allocated for DMA tunneling for all
* DMA paths through this port.
+ * @group: Bandwidth allocation group the adapter is assigned to. Only
+ * used for DP IN adapters for now.
+ * @group_list: The adapter is linked to the group's list of ports through this
*
* In USB4 terminology this structure represents an adapter (protocol or
* lane adapter).
@@ -272,6 +292,8 @@ struct tb_port {
unsigned int total_credits;
unsigned int ctl_credits;
unsigned int dma_credits;
+ struct tb_bandwidth_group *group;
+ struct list_head group_list;
};
/**
@@ -1047,7 +1069,7 @@ void tb_port_lane_bonding_disable(struct tb_port *port);
int tb_port_wait_for_link_width(struct tb_port *port, int width,
int timeout_msec);
int tb_port_update_credits(struct tb_port *port);
-bool tb_port_is_clx_enabled(struct tb_port *port, enum tb_clx clx);
+bool tb_port_is_clx_enabled(struct tb_port *port, unsigned int clx);
int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec);
int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap);
@@ -1238,6 +1260,21 @@ int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
int *downstream_bw);
+int usb4_dp_port_set_cm_id(struct tb_port *port, int cm_id);
+bool usb4_dp_port_bw_mode_supported(struct tb_port *port);
+bool usb4_dp_port_bw_mode_enabled(struct tb_port *port);
+int usb4_dp_port_set_cm_bw_mode_supported(struct tb_port *port, bool supported);
+int usb4_dp_port_group_id(struct tb_port *port);
+int usb4_dp_port_set_group_id(struct tb_port *port, int group_id);
+int usb4_dp_port_nrd(struct tb_port *port, int *rate, int *lanes);
+int usb4_dp_port_set_nrd(struct tb_port *port, int rate, int lanes);
+int usb4_dp_port_granularity(struct tb_port *port);
+int usb4_dp_port_set_granularity(struct tb_port *port, int granularity);
+int usb4_dp_port_set_estimated_bw(struct tb_port *port, int bw);
+int usb4_dp_port_allocated_bw(struct tb_port *port);
+int usb4_dp_port_allocate_bw(struct tb_port *port, int bw);
+int usb4_dp_port_requested_bw(struct tb_port *port);
+
static inline bool tb_is_usb4_port_device(const struct device *dev)
{
return dev->type == &usb4_port_device_type;
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index 33c4c7aed56d..3234bff07899 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -29,6 +29,7 @@ enum tb_cfg_error {
TB_CFG_ERROR_HEC_ERROR_DETECTED = 12,
TB_CFG_ERROR_FLOW_CONTROL_ERROR = 13,
TB_CFG_ERROR_LOCK = 15,
+ TB_CFG_ERROR_DP_BW = 32,
};
/* common header */
@@ -64,14 +65,16 @@ struct cfg_write_pkg {
/* TB_CFG_PKG_ERROR */
struct cfg_error_pkg {
struct tb_cfg_header header;
- enum tb_cfg_error error:4;
- u32 zero1:4;
+ enum tb_cfg_error error:8;
u32 port:6;
- u32 zero2:2; /* Both should be zero, still they are different fields. */
- u32 zero3:14;
+ u32 reserved:16;
u32 pg:2;
} __packed;
+struct cfg_ack_pkg {
+ struct tb_cfg_header header;
+};
+
#define TB_CFG_ERROR_PG_HOT_PLUG 0x2
#define TB_CFG_ERROR_PG_HOT_UNPLUG 0x3
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 3c38b0cb8f74..2636423748cd 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -50,6 +50,10 @@ enum tb_port_state {
TB_PORT_DISABLED = 0, /* tb_cap_phy.disable == 1 */
TB_PORT_CONNECTING = 1, /* retry */
TB_PORT_UP = 2,
+ TB_PORT_TX_CL0S = 3,
+ TB_PORT_RX_CL0S = 4,
+ TB_PORT_CL1 = 5,
+ TB_PORT_CL2 = 6,
TB_PORT_UNPLUGGED = 7,
};
@@ -381,15 +385,42 @@ struct tb_regs_port_header {
#define ADP_DP_CS_1_AUX_RX_HOPID_MASK GENMASK(21, 11)
#define ADP_DP_CS_1_AUX_RX_HOPID_SHIFT 11
#define ADP_DP_CS_2 0x02
+#define ADP_DP_CS_2_NRD_MLC_MASK GENMASK(2, 0)
#define ADP_DP_CS_2_HDP BIT(6)
+#define ADP_DP_CS_2_NRD_MLR_MASK GENMASK(9, 7)
+#define ADP_DP_CS_2_NRD_MLR_SHIFT 7
+#define ADP_DP_CS_2_CA BIT(10)
+#define ADP_DP_CS_2_GR_MASK GENMASK(12, 11)
+#define ADP_DP_CS_2_GR_SHIFT 11
+#define ADP_DP_CS_2_GR_0_25G 0x0
+#define ADP_DP_CS_2_GR_0_5G 0x1
+#define ADP_DP_CS_2_GR_1G 0x2
+#define ADP_DP_CS_2_GROUP_ID_MASK GENMASK(15, 13)
+#define ADP_DP_CS_2_GROUP_ID_SHIFT 13
+#define ADP_DP_CS_2_CM_ID_MASK GENMASK(19, 16)
+#define ADP_DP_CS_2_CM_ID_SHIFT 16
+#define ADP_DP_CS_2_CMMS BIT(20)
+#define ADP_DP_CS_2_ESTIMATED_BW_MASK GENMASK(31, 24)
+#define ADP_DP_CS_2_ESTIMATED_BW_SHIFT 24
#define ADP_DP_CS_3 0x03
#define ADP_DP_CS_3_HDPC BIT(9)
#define DP_LOCAL_CAP 0x04
#define DP_REMOTE_CAP 0x05
+/* For DP IN adapter */
+#define DP_STATUS 0x06
+#define DP_STATUS_ALLOCATED_BW_MASK GENMASK(31, 24)
+#define DP_STATUS_ALLOCATED_BW_SHIFT 24
+/* For DP OUT adapter */
#define DP_STATUS_CTRL 0x06
#define DP_STATUS_CTRL_CMHS BIT(25)
#define DP_STATUS_CTRL_UF BIT(26)
#define DP_COMMON_CAP 0x07
+/* Only if DP IN supports BW allocation mode */
+#define ADP_DP_CS_8 0x08
+#define ADP_DP_CS_8_REQUESTED_BW_MASK GENMASK(7, 0)
+#define ADP_DP_CS_8_DPME BIT(30)
+#define ADP_DP_CS_8_DR BIT(31)
+
/*
* DP_COMMON_CAP offsets work also for DP_LOCAL_CAP and DP_REMOTE_CAP
* with exception of DPRX done.
@@ -406,7 +437,12 @@ struct tb_regs_port_header {
#define DP_COMMON_CAP_2_LANES 0x1
#define DP_COMMON_CAP_4_LANES 0x2
#define DP_COMMON_CAP_LTTPR_NS BIT(27)
+#define DP_COMMON_CAP_BW_MODE BIT(28)
#define DP_COMMON_CAP_DPRX_DONE BIT(31)
+/* Only present if DP IN supports BW allocation mode */
+#define ADP_DP_CS_8 0x08
+#define ADP_DP_CS_8_DPME BIT(30)
+#define ADP_DP_CS_8_DR BIT(31)
/* PCIe adapter registers */
#define ADP_PCIE_CS_0 0x00
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 1fc3c29b24f8..9099ae73e78f 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -9,6 +9,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/list.h>
+#include <linux/ktime.h>
#include "tunnel.h"
#include "tb.h"
@@ -44,12 +45,17 @@
/* Minimum number of credits for DMA path */
#define TB_MIN_DMA_CREDITS 1U
+static bool bw_alloc_mode = true;
+module_param(bw_alloc_mode, bool, 0444);
+MODULE_PARM_DESC(bw_alloc_mode,
+ "enable bandwidth allocation mode if supported (default: true)");
+
static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
do { \
struct tb_tunnel *__tunnel = (tunnel); \
- level(__tunnel->tb, "%llx:%x <-> %llx:%x (%s): " fmt, \
+ level(__tunnel->tb, "%llx:%u <-> %llx:%u (%s): " fmt, \
tb_route(__tunnel->src_port->sw), \
__tunnel->src_port->port, \
tb_route(__tunnel->dst_port->sw), \
@@ -339,9 +345,10 @@ static bool tb_dp_is_usb4(const struct tb_switch *sw)
return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
}
-static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
+static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out,
+ int timeout_msec)
{
- int timeout = 10;
+ ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
u32 val;
int ret;
@@ -368,8 +375,8 @@ static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
return ret;
if (!(val & DP_STATUS_CTRL_CMHS))
return 0;
- usleep_range(10, 100);
- } while (timeout--);
+ usleep_range(100, 150);
+ } while (ktime_before(ktime_get(), timeout));
return -ETIMEDOUT;
}
@@ -519,7 +526,7 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
* Perform connection manager handshake between IN and OUT ports
* before capabilities exchange can take place.
*/
- ret = tb_dp_cm_handshake(in, out);
+ ret = tb_dp_cm_handshake(in, out, 1500);
if (ret)
return ret;
@@ -597,6 +604,133 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
in->cap_adap + DP_REMOTE_CAP, 1);
}
+static int tb_dp_bw_alloc_mode_enable(struct tb_tunnel *tunnel)
+{
+ int ret, estimated_bw, granularity, tmp;
+ struct tb_port *out = tunnel->dst_port;
+ struct tb_port *in = tunnel->src_port;
+ u32 out_dp_cap, out_rate, out_lanes;
+ u32 in_dp_cap, in_rate, in_lanes;
+ u32 rate, lanes;
+
+ if (!bw_alloc_mode)
+ return 0;
+
+ ret = usb4_dp_port_set_cm_bw_mode_supported(in, true);
+ if (ret)
+ return ret;
+
+ ret = usb4_dp_port_set_group_id(in, in->group->index);
+ if (ret)
+ return ret;
+
+ /*
+ * Get the non-reduced rate and lanes based on the lowest
+ * capability of both adapters.
+ */
+ ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
+ in->cap_adap + DP_LOCAL_CAP, 1);
+ if (ret)
+ return ret;
+
+ ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
+ out->cap_adap + DP_LOCAL_CAP, 1);
+ if (ret)
+ return ret;
+
+ in_rate = tb_dp_cap_get_rate(in_dp_cap);
+ in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
+ out_rate = tb_dp_cap_get_rate(out_dp_cap);
+ out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
+
+ rate = min(in_rate, out_rate);
+ lanes = min(in_lanes, out_lanes);
+ tmp = tb_dp_bandwidth(rate, lanes);
+
+ tb_port_dbg(in, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n", rate,
+ lanes, tmp);
+
+ ret = usb4_dp_port_set_nrd(in, rate, lanes);
+ if (ret)
+ return ret;
+
+ for (granularity = 250; tmp / granularity > 255 && granularity <= 1000;
+ granularity *= 2)
+ ;
+
+ tb_port_dbg(in, "granularity %d Mb/s\n", granularity);
+
+ /*
+ * Returns -EINVAL if granularity above is outside of the
+ * accepted ranges.
+ */
+ ret = usb4_dp_port_set_granularity(in, granularity);
+ if (ret)
+ return ret;
+
+ /*
+ * Bandwidth estimation is pretty much what we have in
+ * max_up/down fields. For discovery we just read what the
+ * estimation was set to.
+ */
+ if (in->sw->config.depth < out->sw->config.depth)
+ estimated_bw = tunnel->max_down;
+ else
+ estimated_bw = tunnel->max_up;
+
+ tb_port_dbg(in, "estimated bandwidth %d Mb/s\n", estimated_bw);
+
+ ret = usb4_dp_port_set_estimated_bw(in, estimated_bw);
+ if (ret)
+ return ret;
+
+ /* Initial allocation should be 0 according the spec */
+ ret = usb4_dp_port_allocate_bw(in, 0);
+ if (ret)
+ return ret;
+
+ tb_port_dbg(in, "bandwidth allocation mode enabled\n");
+ return 0;
+}
+
+static int tb_dp_init(struct tb_tunnel *tunnel)
+{
+ struct tb_port *in = tunnel->src_port;
+ struct tb_switch *sw = in->sw;
+ struct tb *tb = in->sw->tb;
+ int ret;
+
+ ret = tb_dp_xchg_caps(tunnel);
+ if (ret)
+ return ret;
+
+ if (!tb_switch_is_usb4(sw))
+ return 0;
+
+ if (!usb4_dp_port_bw_mode_supported(in))
+ return 0;
+
+ tb_port_dbg(in, "bandwidth allocation mode supported\n");
+
+ ret = usb4_dp_port_set_cm_id(in, tb->index);
+ if (ret)
+ return ret;
+
+ return tb_dp_bw_alloc_mode_enable(tunnel);
+}
+
+static void tb_dp_deinit(struct tb_tunnel *tunnel)
+{
+ struct tb_port *in = tunnel->src_port;
+
+ if (!usb4_dp_port_bw_mode_supported(in))
+ return;
+ if (usb4_dp_port_bw_mode_enabled(in)) {
+ usb4_dp_port_set_cm_bw_mode_supported(in, false);
+ tb_port_dbg(in, "bandwidth allocation mode disabled\n");
+ }
+}
+
static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
{
int ret;
@@ -634,49 +768,275 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
return 0;
}
+/* max_bw is rounded up to next granularity */
+static int tb_dp_nrd_bandwidth(struct tb_tunnel *tunnel, int *max_bw)
+{
+ struct tb_port *in = tunnel->src_port;
+ int ret, rate, lanes, nrd_bw;
+
+ ret = usb4_dp_port_nrd(in, &rate, &lanes);
+ if (ret)
+ return ret;
+
+ nrd_bw = tb_dp_bandwidth(rate, lanes);
+
+ if (max_bw) {
+ ret = usb4_dp_port_granularity(in);
+ if (ret < 0)
+ return ret;
+ *max_bw = roundup(nrd_bw, ret);
+ }
+
+ return nrd_bw;
+}
+
+static int tb_dp_bw_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
+ int *consumed_up, int *consumed_down)
+{
+ struct tb_port *out = tunnel->dst_port;
+ struct tb_port *in = tunnel->src_port;
+ int ret, allocated_bw, max_bw;
+
+ if (!usb4_dp_port_bw_mode_enabled(in))
+ return -EOPNOTSUPP;
+
+ if (!tunnel->bw_mode)
+ return -EOPNOTSUPP;
+
+ /* Read what was allocated previously if any */
+ ret = usb4_dp_port_allocated_bw(in);
+ if (ret < 0)
+ return ret;
+ allocated_bw = ret;
+
+ ret = tb_dp_nrd_bandwidth(tunnel, &max_bw);
+ if (ret < 0)
+ return ret;
+ if (allocated_bw == max_bw)
+ allocated_bw = ret;
+
+ tb_port_dbg(in, "consumed bandwidth through allocation mode %d Mb/s\n",
+ allocated_bw);
+
+ if (in->sw->config.depth < out->sw->config.depth) {
+ *consumed_up = 0;
+ *consumed_down = allocated_bw;
+ } else {
+ *consumed_up = allocated_bw;
+ *consumed_down = 0;
+ }
+
+ return 0;
+}
+
+static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
+ int *allocated_down)
+{
+ struct tb_port *out = tunnel->dst_port;
+ struct tb_port *in = tunnel->src_port;
+
+ /*
+ * If we have already set the allocated bandwidth then use that.
+ * Otherwise we read it from the DPRX.
+ */
+ if (usb4_dp_port_bw_mode_enabled(in) && tunnel->bw_mode) {
+ int ret, allocated_bw, max_bw;
+
+ ret = usb4_dp_port_allocated_bw(in);
+ if (ret < 0)
+ return ret;
+ allocated_bw = ret;
+
+ ret = tb_dp_nrd_bandwidth(tunnel, &max_bw);
+ if (ret < 0)
+ return ret;
+ if (allocated_bw == max_bw)
+ allocated_bw = ret;
+
+ if (in->sw->config.depth < out->sw->config.depth) {
+ *allocated_up = 0;
+ *allocated_down = allocated_bw;
+ } else {
+ *allocated_up = allocated_bw;
+ *allocated_down = 0;
+ }
+ return 0;
+ }
+
+ return tunnel->consumed_bandwidth(tunnel, allocated_up,
+ allocated_down);
+}
+
+static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
+ int *alloc_down)
+{
+ struct tb_port *out = tunnel->dst_port;
+ struct tb_port *in = tunnel->src_port;
+ int max_bw, ret, tmp;
+
+ if (!usb4_dp_port_bw_mode_enabled(in))
+ return -EOPNOTSUPP;
+
+ ret = tb_dp_nrd_bandwidth(tunnel, &max_bw);
+ if (ret < 0)
+ return ret;
+
+ if (in->sw->config.depth < out->sw->config.depth) {
+ tmp = min(*alloc_down, max_bw);
+ ret = usb4_dp_port_allocate_bw(in, tmp);
+ if (ret)
+ return ret;
+ *alloc_down = tmp;
+ *alloc_up = 0;
+ } else {
+ tmp = min(*alloc_up, max_bw);
+ ret = usb4_dp_port_allocate_bw(in, tmp);
+ if (ret)
+ return ret;
+ *alloc_down = 0;
+ *alloc_up = tmp;
+ }
+
+ /* Now we can use BW mode registers to figure out the bandwidth */
+ /* TODO: need to handle discovery too */
+ tunnel->bw_mode = true;
+ return 0;
+}
+
+static int tb_dp_read_dprx(struct tb_tunnel *tunnel, u32 *rate, u32 *lanes,
+ int timeout_msec)
+{
+ ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
+ struct tb_port *in = tunnel->src_port;
+
+ /*
+ * Wait for DPRX done. Normally it should be already set for
+ * active tunnel.
+ */
+ do {
+ u32 val;
+ int ret;
+
+ ret = tb_port_read(in, &val, TB_CFG_PORT,
+ in->cap_adap + DP_COMMON_CAP, 1);
+ if (ret)
+ return ret;
+
+ if (val & DP_COMMON_CAP_DPRX_DONE) {
+ *rate = tb_dp_cap_get_rate(val);
+ *lanes = tb_dp_cap_get_lanes(val);
+
+ tb_port_dbg(in, "consumed bandwidth through DPRX %d Mb/s\n",
+ tb_dp_bandwidth(*rate, *lanes));
+ return 0;
+ }
+ usleep_range(100, 150);
+ } while (ktime_before(ktime_get(), timeout));
+
+ return -ETIMEDOUT;
+}
+
+/* Read cap from tunnel DP IN */
+static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
+ u32 *lanes)
+{
+ struct tb_port *in = tunnel->src_port;
+ u32 val;
+ int ret;
+
+ switch (cap) {
+ case DP_LOCAL_CAP:
+ case DP_REMOTE_CAP:
+ break;
+
+ default:
+ tb_tunnel_WARN(tunnel, "invalid capability index %#x\n", cap);
+ return -EINVAL;
+ }
+
+ /*
+ * Read from the copied remote cap so that we take into account
+ * if capabilities were reduced during exchange.
+ */
+ ret = tb_port_read(in, &val, TB_CFG_PORT, in->cap_adap + cap, 1);
+ if (ret)
+ return ret;
+
+ *rate = tb_dp_cap_get_rate(val);
+ *lanes = tb_dp_cap_get_lanes(val);
+
+ tb_port_dbg(in, "bandwidth from %#x capability %d Mb/s\n", cap,
+ tb_dp_bandwidth(*rate, *lanes));
+ return 0;
+}
+
+static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
+ int *max_down)
+{
+ struct tb_port *in = tunnel->src_port;
+ u32 rate, lanes;
+ int ret;
+
+ /*
+ * DP IN adapter DP_LOCAL_CAP gets updated to the lowest AUX read
+ * parameter values so this so we can use this to determine the
+ * maximum possible bandwidth over this link.
+ */
+ ret = tb_dp_read_cap(tunnel, DP_LOCAL_CAP, &rate, &lanes);
+ if (ret)
+ return ret;
+
+ if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
+ *max_up = 0;
+ *max_down = tb_dp_bandwidth(rate, lanes);
+ } else {
+ *max_up = tb_dp_bandwidth(rate, lanes);
+ *max_down = 0;
+ }
+
+ return 0;
+}
+
static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
int *consumed_down)
{
struct tb_port *in = tunnel->src_port;
const struct tb_switch *sw = in->sw;
- u32 val, rate = 0, lanes = 0;
+ u32 rate = 0, lanes = 0;
int ret;
if (tb_dp_is_usb4(sw)) {
- int timeout = 20;
-
/*
- * Wait for DPRX done. Normally it should be already set
- * for active tunnel.
+ * On USB4 routers check if the bandwidth allocation
+ * mode is enabled first and then read the bandwidth
+ * through those registers.
*/
- do {
- ret = tb_port_read(in, &val, TB_CFG_PORT,
- in->cap_adap + DP_COMMON_CAP, 1);
- if (ret)
+ ret = tb_dp_bw_mode_consumed_bandwidth(tunnel, consumed_up,
+ consumed_down);
+ if (ret < 0) {
+ if (ret != -EOPNOTSUPP)
return ret;
-
- if (val & DP_COMMON_CAP_DPRX_DONE) {
- rate = tb_dp_cap_get_rate(val);
- lanes = tb_dp_cap_get_lanes(val);
- break;
- }
- msleep(250);
- } while (timeout--);
-
- if (!timeout)
- return -ETIMEDOUT;
- } else if (sw->generation >= 2) {
+ } else if (!ret) {
+ return 0;
+ }
/*
- * Read from the copied remote cap so that we take into
- * account if capabilities were reduced during exchange.
+ * Then see if the DPRX negotiation is ready and if yes
+ * return that bandwidth (it may be smaller than the
+ * reduced one). Otherwise return the remote (possibly
+ * reduced) caps.
*/
- ret = tb_port_read(in, &val, TB_CFG_PORT,
- in->cap_adap + DP_REMOTE_CAP, 1);
+ ret = tb_dp_read_dprx(tunnel, &rate, &lanes, 150);
+ if (ret) {
+ if (ret == -ETIMEDOUT)
+ ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP,
+ &rate, &lanes);
+ if (ret)
+ return ret;
+ }
+ } else if (sw->generation >= 2) {
+ ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes);
if (ret)
return ret;
-
- rate = tb_dp_cap_get_rate(val);
- lanes = tb_dp_cap_get_lanes(val);
} else {
/* No bandwidth management for legacy devices */
*consumed_up = 0;
@@ -798,8 +1158,12 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
if (!tunnel)
return NULL;
- tunnel->init = tb_dp_xchg_caps;
+ tunnel->init = tb_dp_init;
+ tunnel->deinit = tb_dp_deinit;
tunnel->activate = tb_dp_activate;
+ tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
+ tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
+ tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
tunnel->src_port = in;
@@ -887,8 +1251,12 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
if (!tunnel)
return NULL;
- tunnel->init = tb_dp_xchg_caps;
+ tunnel->init = tb_dp_init;
+ tunnel->deinit = tb_dp_deinit;
tunnel->activate = tb_dp_activate;
+ tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
+ tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
+ tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
tunnel->src_port = in;
tunnel->dst_port = out;
@@ -1714,6 +2082,72 @@ static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
}
/**
+ * tb_tunnel_maximum_bandwidth() - Return maximum possible bandwidth
+ * @tunnel: Tunnel to check
+ * @max_up: Maximum upstream bandwidth in Mb/s
+ * @max_down: Maximum downstream bandwidth in Mb/s
+ *
+ * Returns maximum possible bandwidth this tunnel can go if not limited
+ * by other bandwidth clients. If the tunnel does not support this
+ * returns %-EOPNOTSUPP.
+ */
+int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
+ int *max_down)
+{
+ if (!tb_tunnel_is_active(tunnel))
+ return -EINVAL;
+
+ if (tunnel->maximum_bandwidth)
+ return tunnel->maximum_bandwidth(tunnel, max_up, max_down);
+ return -EOPNOTSUPP;
+}
+
+/**
+ * tb_tunnel_allocated_bandwidth() - Return bandwidth allocated for the tunnel
+ * @tunnel: Tunnel to check
+ * @allocated_up: Currently allocated upstream bandwidth in Mb/s is stored here
+ * @allocated_down: Currently allocated downstream bandwidth in Mb/s is
+ * stored here
+ *
+ * Returns the bandwidth allocated for the tunnel. This may be higher
+ * than what the tunnel actually consumes.
+ */
+int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
+ int *allocated_down)
+{
+ if (!tb_tunnel_is_active(tunnel))
+ return -EINVAL;
+
+ if (tunnel->allocated_bandwidth)
+ return tunnel->allocated_bandwidth(tunnel, allocated_up,
+ allocated_down);
+ return -EOPNOTSUPP;
+}
+
+/**
+ * tb_tunnel_alloc_bandwidth() - Change tunnel bandwidth allocation
+ * @tunnel: Tunnel whose bandwidth allocation to change
+ * @alloc_up: New upstream bandwidth in Mb/s
+ * @alloc_down: New downstream bandwidth in Mb/s
+ *
+ * Tries to change tunnel bandwidth allocation. If succeeds returns %0
+ * and updates @alloc_up and @alloc_down to that was actually allocated
+ * (it may not be the same as passed originally). Returns negative errno
+ * in case of failure.
+ */
+int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
+ int *alloc_down)
+{
+ if (!tb_tunnel_is_active(tunnel))
+ return -EINVAL;
+
+ if (tunnel->alloc_bandwidth)
+ return tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down);
+
+ return -EOPNOTSUPP;
+}
+
+/**
* tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
* @tunnel: Tunnel to check
* @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h
index bb4d1f1d6d0b..bf690f7beeee 100644
--- a/drivers/thunderbolt/tunnel.h
+++ b/drivers/thunderbolt/tunnel.h
@@ -29,6 +29,9 @@ enum tb_tunnel_type {
* @init: Optional tunnel specific initialization
* @deinit: Optional tunnel specific de-initialization
* @activate: Optional tunnel specific activation/deactivation
+ * @maximum_bandwidth: Returns maximum possible bandwidth for this tunnel
+ * @allocated_bandwidth: Return how much bandwidth is allocated for the tunnel
+ * @alloc_bandwidth: Change tunnel bandwidth allocation
* @consumed_bandwidth: Return how much bandwidth the tunnel consumes
* @release_unused_bandwidth: Release all unused bandwidth
* @reclaim_available_bandwidth: Reclaim back available bandwidth
@@ -40,6 +43,8 @@ enum tb_tunnel_type {
* Only set if the bandwidth needs to be limited.
* @allocated_up: Allocated upstream bandwidth (only for USB3)
* @allocated_down: Allocated downstream bandwidth (only for USB3)
+ * @bw_mode: DP bandwidth allocation mode registers can be used to
+ * determine consumed and allocated bandwidth
*/
struct tb_tunnel {
struct tb *tb;
@@ -50,6 +55,12 @@ struct tb_tunnel {
int (*init)(struct tb_tunnel *tunnel);
void (*deinit)(struct tb_tunnel *tunnel);
int (*activate)(struct tb_tunnel *tunnel, bool activate);
+ int (*maximum_bandwidth)(struct tb_tunnel *tunnel, int *max_up,
+ int *max_down);
+ int (*allocated_bandwidth)(struct tb_tunnel *tunnel, int *allocated_up,
+ int *allocated_down);
+ int (*alloc_bandwidth)(struct tb_tunnel *tunnel, int *alloc_up,
+ int *alloc_down);
int (*consumed_bandwidth)(struct tb_tunnel *tunnel, int *consumed_up,
int *consumed_down);
int (*release_unused_bandwidth)(struct tb_tunnel *tunnel);
@@ -62,6 +73,7 @@ struct tb_tunnel {
int max_down;
int allocated_up;
int allocated_down;
+ bool bw_mode;
};
struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
@@ -92,6 +104,12 @@ void tb_tunnel_deactivate(struct tb_tunnel *tunnel);
bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel);
bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
const struct tb_port *port);
+int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
+ int *max_down);
+int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
+ int *allocated_down);
+int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
+ int *alloc_down);
int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
int *consumed_down);
int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel);
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index 2ed50fcbcca7..1e5e9c147a31 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -2186,3 +2186,575 @@ err_request:
usb4_usb3_port_clear_cm_request(port);
return ret;
}
+
+static bool is_usb4_dpin(const struct tb_port *port)
+{
+ if (!tb_port_is_dpin(port))
+ return false;
+ if (!tb_switch_is_usb4(port->sw))
+ return false;
+ return true;
+}
+
+/**
+ * usb4_dp_port_set_cm_id() - Assign CM ID to the DP IN adapter
+ * @port: DP IN adapter
+ * @cm_id: CM ID to assign
+ *
+ * Sets CM ID for the @port. Returns %0 on success and negative errno
+ * otherwise. Speficially returns %-EOPNOTSUPP if the @port does not
+ * support this.
+ */
+int usb4_dp_port_set_cm_id(struct tb_port *port, int cm_id)
+{
+ u32 val;
+ int ret;
+
+ if (!is_usb4_dpin(port))
+ return -EOPNOTSUPP;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+ if (ret)
+ return ret;
+
+ val &= ~ADP_DP_CS_2_CM_ID_MASK;
+ val |= cm_id << ADP_DP_CS_2_CM_ID_SHIFT;
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+}
+
+/**
+ * usb4_dp_port_bw_mode_supported() - Is the bandwidth allocation mode supported
+ * @port: DP IN adapter to check
+ *
+ * Can be called to any DP IN adapter. Returns true if the adapter
+ * supports USB4 bandwidth allocation mode, false otherwise.
+ */
+bool usb4_dp_port_bw_mode_supported(struct tb_port *port)
+{
+ int ret;
+ u32 val;
+
+ if (!is_usb4_dpin(port))
+ return false;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + DP_LOCAL_CAP, 1);
+ if (ret)
+ return false;
+
+ return !!(val & DP_COMMON_CAP_BW_MODE);
+}
+
+/**
+ * usb4_dp_port_bw_mode_enabled() - Is the bandwidth allocation mode enabled
+ * @port: DP IN adapter to check
+ *
+ * Can be called to any DP IN adapter. Returns true if the bandwidth
+ * allocation mode has been enabled, false otherwise.
+ */
+bool usb4_dp_port_bw_mode_enabled(struct tb_port *port)
+{
+ int ret;
+ u32 val;
+
+ if (!is_usb4_dpin(port))
+ return false;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_8, 1);
+ if (ret)
+ return false;
+
+ return !!(val & ADP_DP_CS_8_DPME);
+}
+
+/**
+ * usb4_dp_port_set_cm_bw_mode_supported() - Set/clear CM support for bandwidth allocation mode
+ * @port: DP IN adapter
+ * @supported: Does the CM support bandwidth allocation mode
+ *
+ * Can be called to any DP IN adapter. Sets or clears the CM support bit
+ * of the DP IN adapter. Returns %0 in success and negative errno
+ * otherwise. Specifically returns %-OPNOTSUPP if the passed in adapter
+ * does not support this.
+ */
+int usb4_dp_port_set_cm_bw_mode_supported(struct tb_port *port, bool supported)
+{
+ u32 val;
+ int ret;
+
+ if (!is_usb4_dpin(port))
+ return -EOPNOTSUPP;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+ if (ret)
+ return ret;
+
+ if (supported)
+ val |= ADP_DP_CS_2_CMMS;
+ else
+ val &= ~ADP_DP_CS_2_CMMS;
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+}
+
+/**
+ * usb4_dp_port_group_id() - Return Group ID assigned for the adapter
+ * @port: DP IN adapter
+ *
+ * Reads bandwidth allocation Group ID from the DP IN adapter and
+ * returns it. If the adapter does not support setting Group_ID
+ * %-EOPNOTSUPP is returned.
+ */
+int usb4_dp_port_group_id(struct tb_port *port)
+{
+ u32 val;
+ int ret;
+
+ if (!is_usb4_dpin(port))
+ return -EOPNOTSUPP;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+ if (ret)
+ return ret;
+
+ return (val & ADP_DP_CS_2_GROUP_ID_MASK) >> ADP_DP_CS_2_GROUP_ID_SHIFT;
+}
+
+/**
+ * usb4_dp_port_set_group_id() - Set adapter Group ID
+ * @port: DP IN adapter
+ * @group_id: Group ID for the adapter
+ *
+ * Sets bandwidth allocation mode Group ID for the DP IN adapter.
+ * Returns %0 in case of success and negative errno otherwise.
+ * Specifically returns %-EOPNOTSUPP if the adapter does not support
+ * this.
+ */
+int usb4_dp_port_set_group_id(struct tb_port *port, int group_id)
+{
+ u32 val;
+ int ret;
+
+ if (!is_usb4_dpin(port))
+ return -EOPNOTSUPP;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+ if (ret)
+ return ret;
+
+ val &= ~ADP_DP_CS_2_GROUP_ID_MASK;
+ val |= group_id << ADP_DP_CS_2_GROUP_ID_SHIFT;
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+}
+
+/**
+ * usb4_dp_port_nrd() - Read non-reduced rate and lanes
+ * @port: DP IN adapter
+ * @rate: Non-reduced rate in Mb/s is placed here
+ * @lanes: Non-reduced lanes are placed here
+ *
+ * Reads the non-reduced rate and lanes from the DP IN adapter. Returns
+ * %0 in success and negative errno otherwise. Specifically returns
+ * %-EOPNOTSUPP if the adapter does not support this.
+ */
+int usb4_dp_port_nrd(struct tb_port *port, int *rate, int *lanes)
+{
+ u32 val, tmp;
+ int ret;
+
+ if (!is_usb4_dpin(port))
+ return -EOPNOTSUPP;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+ if (ret)
+ return ret;
+
+ tmp = (val & ADP_DP_CS_2_NRD_MLR_MASK) >> ADP_DP_CS_2_NRD_MLR_SHIFT;
+ switch (tmp) {
+ case DP_COMMON_CAP_RATE_RBR:
+ *rate = 1620;
+ break;
+ case DP_COMMON_CAP_RATE_HBR:
+ *rate = 2700;
+ break;
+ case DP_COMMON_CAP_RATE_HBR2:
+ *rate = 5400;
+ break;
+ case DP_COMMON_CAP_RATE_HBR3:
+ *rate = 8100;
+ break;
+ }
+
+ tmp = val & ADP_DP_CS_2_NRD_MLC_MASK;
+ switch (tmp) {
+ case DP_COMMON_CAP_1_LANE:
+ *lanes = 1;
+ break;
+ case DP_COMMON_CAP_2_LANES:
+ *lanes = 2;
+ break;
+ case DP_COMMON_CAP_4_LANES:
+ *lanes = 4;
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * usb4_dp_port_set_nrd() - Set non-reduced rate and lanes
+ * @port: DP IN adapter
+ * @rate: Non-reduced rate in Mb/s
+ * @lanes: Non-reduced lanes
+ *
+ * Before the capabilities reduction this function can be used to set
+ * the non-reduced values for the DP IN adapter. Returns %0 in success
+ * and negative errno otherwise. If the adapter does not support this
+ * %-EOPNOTSUPP is returned.
+ */
+int usb4_dp_port_set_nrd(struct tb_port *port, int rate, int lanes)
+{
+ u32 val;
+ int ret;
+
+ if (!is_usb4_dpin(port))
+ return -EOPNOTSUPP;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+ if (ret)
+ return ret;
+
+ val &= ~ADP_DP_CS_2_NRD_MLR_MASK;
+
+ switch (rate) {
+ case 1620:
+ break;
+ case 2700:
+ val |= (DP_COMMON_CAP_RATE_HBR << ADP_DP_CS_2_NRD_MLR_SHIFT)
+ & ADP_DP_CS_2_NRD_MLR_MASK;
+ break;
+ case 5400:
+ val |= (DP_COMMON_CAP_RATE_HBR2 << ADP_DP_CS_2_NRD_MLR_SHIFT)
+ & ADP_DP_CS_2_NRD_MLR_MASK;
+ break;
+ case 8100:
+ val |= (DP_COMMON_CAP_RATE_HBR3 << ADP_DP_CS_2_NRD_MLR_SHIFT)
+ & ADP_DP_CS_2_NRD_MLR_MASK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val &= ~ADP_DP_CS_2_NRD_MLC_MASK;
+
+ switch (lanes) {
+ case 1:
+ break;
+ case 2:
+ val |= DP_COMMON_CAP_2_LANES;
+ break;
+ case 4:
+ val |= DP_COMMON_CAP_4_LANES;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+}
+
+/**
+ * usb4_dp_port_granularity() - Return granularity for the bandwidth values
+ * @port: DP IN adapter
+ *
+ * Reads the programmed granularity from @port. If the DP IN adapter does
+ * not support bandwidth allocation mode returns %-EOPNOTSUPP and negative
+ * errno in other error cases.
+ */
+int usb4_dp_port_granularity(struct tb_port *port)
+{
+ u32 val;
+ int ret;
+
+ if (!is_usb4_dpin(port))
+ return -EOPNOTSUPP;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+ if (ret)
+ return ret;
+
+ val &= ADP_DP_CS_2_GR_MASK;
+ val >>= ADP_DP_CS_2_GR_SHIFT;
+
+ switch (val) {
+ case ADP_DP_CS_2_GR_0_25G:
+ return 250;
+ case ADP_DP_CS_2_GR_0_5G:
+ return 500;
+ case ADP_DP_CS_2_GR_1G:
+ return 1000;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * usb4_dp_port_set_granularity() - Set granularity for the bandwidth values
+ * @port: DP IN adapter
+ * @granularity: Granularity in Mb/s. Supported values: 1000, 500 and 250.
+ *
+ * Sets the granularity used with the estimated, allocated and requested
+ * bandwidth. Returns %0 in success and negative errno otherwise. If the
+ * adapter does not support this %-EOPNOTSUPP is returned.
+ */
+int usb4_dp_port_set_granularity(struct tb_port *port, int granularity)
+{
+ u32 val;
+ int ret;
+
+ if (!is_usb4_dpin(port))
+ return -EOPNOTSUPP;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+ if (ret)
+ return ret;
+
+ val &= ~ADP_DP_CS_2_GR_MASK;
+
+ switch (granularity) {
+ case 250:
+ val |= ADP_DP_CS_2_GR_0_25G << ADP_DP_CS_2_GR_SHIFT;
+ break;
+ case 500:
+ val |= ADP_DP_CS_2_GR_0_5G << ADP_DP_CS_2_GR_SHIFT;
+ break;
+ case 1000:
+ val |= ADP_DP_CS_2_GR_1G << ADP_DP_CS_2_GR_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+}
+
+/**
+ * usb4_dp_port_set_estimated_bw() - Set estimated bandwidth
+ * @port: DP IN adapter
+ * @bw: Estimated bandwidth in Mb/s.
+ *
+ * Sets the estimated bandwidth to @bw. Set the granularity by calling
+ * usb4_dp_port_set_granularity() before calling this. The @bw is round
+ * down to the closest granularity multiplier. Returns %0 in success
+ * and negative errno otherwise. Specifically returns %-EOPNOTSUPP if
+ * the adapter does not support this.
+ */
+int usb4_dp_port_set_estimated_bw(struct tb_port *port, int bw)
+{
+ u32 val, granularity;
+ int ret;
+
+ if (!is_usb4_dpin(port))
+ return -EOPNOTSUPP;
+
+ ret = usb4_dp_port_granularity(port);
+ if (ret < 0)
+ return ret;
+ granularity = ret;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+ if (ret)
+ return ret;
+
+ val &= ~ADP_DP_CS_2_ESTIMATED_BW_MASK;
+ val |= (bw / granularity) << ADP_DP_CS_2_ESTIMATED_BW_SHIFT;
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+}
+
+/**
+ * usb4_dp_port_allocated_bw() - Return allocated bandwidth
+ * @port: DP IN adapter
+ *
+ * Reads and returns allocated bandwidth for @port in Mb/s (taking into
+ * account the programmed granularity). Returns negative errno in case
+ * of error.
+ */
+int usb4_dp_port_allocated_bw(struct tb_port *port)
+{
+ u32 val, granularity;
+ int ret;
+
+ if (!is_usb4_dpin(port))
+ return -EOPNOTSUPP;
+
+ ret = usb4_dp_port_granularity(port);
+ if (ret < 0)
+ return ret;
+ granularity = ret;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + DP_STATUS, 1);
+ if (ret)
+ return ret;
+
+ val &= DP_STATUS_ALLOCATED_BW_MASK;
+ val >>= DP_STATUS_ALLOCATED_BW_SHIFT;
+
+ return val * granularity;
+}
+
+static int __usb4_dp_port_set_cm_ack(struct tb_port *port, bool ack)
+{
+ u32 val;
+ int ret;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+ if (ret)
+ return ret;
+
+ if (ack)
+ val |= ADP_DP_CS_2_CA;
+ else
+ val &= ~ADP_DP_CS_2_CA;
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+}
+
+static inline int usb4_dp_port_set_cm_ack(struct tb_port *port)
+{
+ return __usb4_dp_port_set_cm_ack(port, true);
+}
+
+static int usb4_dp_port_wait_and_clear_cm_ack(struct tb_port *port,
+ int timeout_msec)
+{
+ ktime_t end;
+ u32 val;
+ int ret;
+
+ ret = __usb4_dp_port_set_cm_ack(port, false);
+ if (ret)
+ return ret;
+
+ end = ktime_add_ms(ktime_get(), timeout_msec);
+ do {
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_8, 1);
+ if (ret)
+ return ret;
+
+ if (!(val & ADP_DP_CS_8_DR))
+ break;
+
+ usleep_range(50, 100);
+ } while (ktime_before(ktime_get(), end));
+
+ if (val & ADP_DP_CS_8_DR)
+ return -ETIMEDOUT;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+ if (ret)
+ return ret;
+
+ val &= ~ADP_DP_CS_2_CA;
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+}
+
+/**
+ * usb4_dp_port_allocate_bw() - Set allocated bandwidth
+ * @port: DP IN adapter
+ * @bw: New allocated bandwidth in Mb/s
+ *
+ * Communicates the new allocated bandwidth with the DPCD (graphics
+ * driver). Takes into account the programmed granularity. Returns %0 in
+ * success and negative errno in case of error.
+ */
+int usb4_dp_port_allocate_bw(struct tb_port *port, int bw)
+{
+ u32 val, granularity;
+ int ret;
+
+ if (!is_usb4_dpin(port))
+ return -EOPNOTSUPP;
+
+ ret = usb4_dp_port_granularity(port);
+ if (ret < 0)
+ return ret;
+ granularity = ret;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + DP_STATUS, 1);
+ if (ret)
+ return ret;
+
+ val &= ~DP_STATUS_ALLOCATED_BW_MASK;
+ val |= (bw / granularity) << DP_STATUS_ALLOCATED_BW_SHIFT;
+
+ ret = tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_adap + DP_STATUS, 1);
+ if (ret)
+ return ret;
+
+ ret = usb4_dp_port_set_cm_ack(port);
+ if (ret)
+ return ret;
+
+ return usb4_dp_port_wait_and_clear_cm_ack(port, 500);
+}
+
+/**
+ * usb4_dp_port_requested_bw() - Read requested bandwidth
+ * @port: DP IN adapter
+ *
+ * Reads the DPCD (graphics driver) requested bandwidth and returns it
+ * in Mb/s. Takes the programmed granularity into account. In case of
+ * error returns negative errno. Specifically returns %-EOPNOTSUPP if
+ * the adapter does not support bandwidth allocation mode, and %ENODATA
+ * if there is no active bandwidth request from the graphics driver.
+ */
+int usb4_dp_port_requested_bw(struct tb_port *port)
+{
+ u32 val, granularity;
+ int ret;
+
+ if (!is_usb4_dpin(port))
+ return -EOPNOTSUPP;
+
+ ret = usb4_dp_port_granularity(port);
+ if (ret < 0)
+ return ret;
+ granularity = ret;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_8, 1);
+ if (ret)
+ return ret;
+
+ if (!(val & ADP_DP_CS_8_DR))
+ return -ENODATA;
+
+ return (val & ADP_DP_CS_8_REQUESTED_BW_MASK) * granularity;
+}
diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c
index f9aa50ff14d4..fff9ec9c391f 100644
--- a/drivers/usb/cdns3/cdnsp-gadget.c
+++ b/drivers/usb/cdns3/cdnsp-gadget.c
@@ -378,7 +378,7 @@ int cdnsp_ep_enqueue(struct cdnsp_ep *pep, struct cdnsp_request *preq)
ret = cdnsp_queue_bulk_tx(pdev, preq);
break;
case USB_ENDPOINT_XFER_ISOC:
- ret = cdnsp_queue_isoc_tx_prepare(pdev, preq);
+ ret = cdnsp_queue_isoc_tx(pdev, preq);
}
if (ret)
diff --git a/drivers/usb/cdns3/cdnsp-gadget.h b/drivers/usb/cdns3/cdnsp-gadget.h
index f740fa6089d8..e1b5801fdddf 100644
--- a/drivers/usb/cdns3/cdnsp-gadget.h
+++ b/drivers/usb/cdns3/cdnsp-gadget.h
@@ -1532,8 +1532,8 @@ void cdnsp_queue_stop_endpoint(struct cdnsp_device *pdev,
unsigned int ep_index);
int cdnsp_queue_ctrl_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq);
int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq);
-int cdnsp_queue_isoc_tx_prepare(struct cdnsp_device *pdev,
- struct cdnsp_request *preq);
+int cdnsp_queue_isoc_tx(struct cdnsp_device *pdev,
+ struct cdnsp_request *preq);
void cdnsp_queue_configure_endpoint(struct cdnsp_device *pdev,
dma_addr_t in_ctx_ptr);
void cdnsp_queue_reset_ep(struct cdnsp_device *pdev, unsigned int ep_index);
diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c
index b23e543b3a3d..07f6068342d4 100644
--- a/drivers/usb/cdns3/cdnsp-ring.c
+++ b/drivers/usb/cdns3/cdnsp-ring.c
@@ -1333,6 +1333,20 @@ static int cdnsp_handle_tx_event(struct cdnsp_device *pdev,
ep_ring->dequeue, td->last_trb,
ep_trb_dma);
+ desc = td->preq->pep->endpoint.desc;
+
+ if (ep_seg) {
+ ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma)
+ / sizeof(*ep_trb)];
+
+ trace_cdnsp_handle_transfer(ep_ring,
+ (struct cdnsp_generic_trb *)ep_trb);
+
+ if (pep->skip && usb_endpoint_xfer_isoc(desc) &&
+ td->last_trb != ep_trb)
+ return -EAGAIN;
+ }
+
/*
* Skip the Force Stopped Event. The event_trb(ep_trb_dma)
* of FSE is not in the current TD pointed by ep_ring->dequeue
@@ -1347,7 +1361,6 @@ static int cdnsp_handle_tx_event(struct cdnsp_device *pdev,
goto cleanup;
}
- desc = td->preq->pep->endpoint.desc;
if (!ep_seg) {
if (!pep->skip || !usb_endpoint_xfer_isoc(desc)) {
/* Something is busted, give up! */
@@ -1374,12 +1387,6 @@ static int cdnsp_handle_tx_event(struct cdnsp_device *pdev,
goto cleanup;
}
- ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma)
- / sizeof(*ep_trb)];
-
- trace_cdnsp_handle_transfer(ep_ring,
- (struct cdnsp_generic_trb *)ep_trb);
-
if (cdnsp_trb_is_noop(ep_trb))
goto cleanup;
@@ -1726,11 +1733,6 @@ static unsigned int count_sg_trbs_needed(struct cdnsp_request *preq)
return num_trbs;
}
-static unsigned int count_isoc_trbs_needed(struct cdnsp_request *preq)
-{
- return cdnsp_count_trbs(preq->request.dma, preq->request.length);
-}
-
static void cdnsp_check_trb_math(struct cdnsp_request *preq, int running_total)
{
if (running_total != preq->request.length)
@@ -2192,28 +2194,48 @@ static unsigned int
}
/* Queue function isoc transfer */
-static int cdnsp_queue_isoc_tx(struct cdnsp_device *pdev,
- struct cdnsp_request *preq)
+int cdnsp_queue_isoc_tx(struct cdnsp_device *pdev,
+ struct cdnsp_request *preq)
{
- int trb_buff_len, td_len, td_remain_len, ret;
+ unsigned int trb_buff_len, td_len, td_remain_len, block_len;
unsigned int burst_count, last_burst_pkt;
unsigned int total_pkt_count, max_pkt;
struct cdnsp_generic_trb *start_trb;
+ struct scatterlist *sg = NULL;
bool more_trbs_coming = true;
struct cdnsp_ring *ep_ring;
+ unsigned int num_sgs = 0;
int running_total = 0;
u32 field, length_field;
+ u64 addr, send_addr;
int start_cycle;
int trbs_per_td;
- u64 addr;
- int i;
+ int i, sent_len, ret;
ep_ring = preq->pep->ring;
+
+ td_len = preq->request.length;
+
+ if (preq->request.num_sgs) {
+ num_sgs = preq->request.num_sgs;
+ sg = preq->request.sg;
+ addr = (u64)sg_dma_address(sg);
+ block_len = sg_dma_len(sg);
+ trbs_per_td = count_sg_trbs_needed(preq);
+ } else {
+ addr = (u64)preq->request.dma;
+ block_len = td_len;
+ trbs_per_td = count_trbs_needed(preq);
+ }
+
+ ret = cdnsp_prepare_transfer(pdev, preq, trbs_per_td);
+ if (ret)
+ return ret;
+
start_trb = &ep_ring->enqueue->generic;
start_cycle = ep_ring->cycle_state;
- td_len = preq->request.length;
- addr = (u64)preq->request.dma;
td_remain_len = td_len;
+ send_addr = addr;
max_pkt = usb_endpoint_maxp(preq->pep->endpoint.desc);
total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);
@@ -2225,11 +2247,6 @@ static int cdnsp_queue_isoc_tx(struct cdnsp_device *pdev,
burst_count = cdnsp_get_burst_count(pdev, preq, total_pkt_count);
last_burst_pkt = cdnsp_get_last_burst_packet_count(pdev, preq,
total_pkt_count);
- trbs_per_td = count_isoc_trbs_needed(preq);
-
- ret = cdnsp_prepare_transfer(pdev, preq, trbs_per_td);
- if (ret)
- goto cleanup;
/*
* Set isoc specific data for the first TRB in a TD.
@@ -2248,6 +2265,7 @@ static int cdnsp_queue_isoc_tx(struct cdnsp_device *pdev,
/* Calculate TRB length. */
trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
+ trb_buff_len = min(trb_buff_len, block_len);
if (trb_buff_len > td_remain_len)
trb_buff_len = td_remain_len;
@@ -2256,7 +2274,8 @@ static int cdnsp_queue_isoc_tx(struct cdnsp_device *pdev,
trb_buff_len, td_len, preq,
more_trbs_coming, 0);
- length_field = TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0);
+ length_field = TRB_LEN(trb_buff_len) | TRB_TD_SIZE(remainder) |
+ TRB_INTR_TARGET(0);
/* Only first TRB is isoc, overwrite otherwise. */
if (i) {
@@ -2281,12 +2300,27 @@ static int cdnsp_queue_isoc_tx(struct cdnsp_device *pdev,
}
cdnsp_queue_trb(pdev, ep_ring, more_trbs_coming,
- lower_32_bits(addr), upper_32_bits(addr),
+ lower_32_bits(send_addr), upper_32_bits(send_addr),
length_field, field);
running_total += trb_buff_len;
addr += trb_buff_len;
td_remain_len -= trb_buff_len;
+
+ sent_len = trb_buff_len;
+ while (sg && sent_len >= block_len) {
+ /* New sg entry */
+ --num_sgs;
+ sent_len -= block_len;
+ if (num_sgs != 0) {
+ sg = sg_next(sg);
+ block_len = sg_dma_len(sg);
+ addr = (u64)sg_dma_address(sg);
+ addr += sent_len;
+ }
+ }
+ block_len -= sent_len;
+ send_addr = addr;
}
/* Check TD length */
@@ -2324,30 +2358,6 @@ cleanup:
return ret;
}
-int cdnsp_queue_isoc_tx_prepare(struct cdnsp_device *pdev,
- struct cdnsp_request *preq)
-{
- struct cdnsp_ring *ep_ring;
- u32 ep_state;
- int num_trbs;
- int ret;
-
- ep_ring = preq->pep->ring;
- ep_state = GET_EP_CTX_STATE(preq->pep->out_ctx);
- num_trbs = count_isoc_trbs_needed(preq);
-
- /*
- * Check the ring to guarantee there is enough room for the whole
- * request. Do not insert any td of the USB Request to the ring if the
- * check failed.
- */
- ret = cdnsp_prepare_ring(pdev, ep_ring, ep_state, num_trbs, GFP_ATOMIC);
- if (ret)
- return ret;
-
- return cdnsp_queue_isoc_tx(pdev, preq);
-}
-
/**** Command Ring Operations ****/
/*
* Generic function for queuing a command TRB on the command ring.
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 0dc482542d85..2eeccf4ec9d6 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -413,15 +413,19 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
data->phy = devm_usb_get_phy_by_phandle(dev, "fsl,usbphy", 0);
if (IS_ERR(data->phy)) {
ret = PTR_ERR(data->phy);
- if (ret != -ENODEV)
+ if (ret != -ENODEV) {
+ dev_err_probe(dev, ret, "Failed to parse fsl,usbphy\n");
goto err_clk;
+ }
data->phy = devm_usb_get_phy_by_phandle(dev, "phys", 0);
if (IS_ERR(data->phy)) {
ret = PTR_ERR(data->phy);
- if (ret == -ENODEV)
+ if (ret == -ENODEV) {
data->phy = NULL;
- else
+ } else {
+ dev_err_probe(dev, ret, "Failed to parse phys\n");
goto err_clk;
+ }
}
}
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
index faf6b078b6c4..bbc610e5bd69 100644
--- a/drivers/usb/chipidea/debug.c
+++ b/drivers/usb/chipidea/debug.c
@@ -364,5 +364,5 @@ void dbg_create_files(struct ci_hdrc *ci)
*/
void dbg_remove_files(struct ci_hdrc *ci)
{
- debugfs_remove(debugfs_lookup(dev_name(ci->dev), usb_debug_root));
+ debugfs_lookup_and_remove(dev_name(ci->dev), usb_debug_root);
}
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index acdb13316cd0..c57c1a71a513 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -1263,14 +1263,8 @@ static int usbmisc_imx_probe(struct platform_device *pdev)
return 0;
}
-static int usbmisc_imx_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
static struct platform_driver usbmisc_imx_driver = {
.probe = usbmisc_imx_probe,
- .remove = usbmisc_imx_remove,
.driver = {
.name = "usbmisc_imx",
.of_match_table = usbmisc_imx_dt_ids,
diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c
index d7c8461976ce..38703781ee2d 100644
--- a/drivers/usb/common/ulpi.c
+++ b/drivers/usb/common/ulpi.c
@@ -271,7 +271,7 @@ static int ulpi_regs_show(struct seq_file *seq, void *data)
}
DEFINE_SHOW_ATTRIBUTE(ulpi_regs);
-#define ULPI_ROOT debugfs_lookup(KBUILD_MODNAME, NULL)
+static struct dentry *ulpi_root;
static int ulpi_register(struct device *dev, struct ulpi *ulpi)
{
@@ -301,7 +301,7 @@ static int ulpi_register(struct device *dev, struct ulpi *ulpi)
return ret;
}
- root = debugfs_create_dir(dev_name(dev), ULPI_ROOT);
+ root = debugfs_create_dir(dev_name(dev), ulpi_root);
debugfs_create_file("regs", 0444, root, ulpi, &ulpi_regs_fops);
dev_dbg(&ulpi->dev, "registered ULPI PHY: vendor %04x, product %04x\n",
@@ -349,8 +349,7 @@ EXPORT_SYMBOL_GPL(ulpi_register_interface);
*/
void ulpi_unregister_interface(struct ulpi *ulpi)
{
- debugfs_remove_recursive(debugfs_lookup(dev_name(&ulpi->dev),
- ULPI_ROOT));
+ debugfs_lookup_and_remove(dev_name(&ulpi->dev), ulpi_root);
device_unregister(&ulpi->dev);
}
EXPORT_SYMBOL_GPL(ulpi_unregister_interface);
@@ -360,12 +359,11 @@ EXPORT_SYMBOL_GPL(ulpi_unregister_interface);
static int __init ulpi_init(void)
{
int ret;
- struct dentry *root;
- root = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ ulpi_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
ret = bus_register(&ulpi_bus);
if (ret)
- debugfs_remove(root);
+ debugfs_remove(ulpi_root);
return ret;
}
subsys_initcall(ulpi_init);
@@ -373,7 +371,7 @@ subsys_initcall(ulpi_init);
static void __exit ulpi_exit(void)
{
bus_unregister(&ulpi_bus);
- debugfs_remove_recursive(ULPI_ROOT);
+ debugfs_remove(ulpi_root);
}
module_exit(ulpi_exit);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 9eca403af2a8..97a0f8faea6e 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2389,9 +2389,8 @@ static int usb_enumerate_device_otg(struct usb_device *udev)
* usb_enumerate_device - Read device configs/intfs/otg (usbcore-internal)
* @udev: newly addressed device (in ADDRESS state)
*
- * This is only called by usb_new_device() and usb_authorize_device()
- * and FIXME -- all comments that apply to them apply here wrt to
- * environment.
+ * This is only called by usb_new_device() -- all comments that apply there
+ * apply here wrt to environment.
*
* If the device is WUSB and not authorized, we don't attempt to read
* the string descriptors, as they will be errored out by the device
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 8217032dfb85..b63f78e48c74 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -869,11 +869,7 @@ read_descriptors(struct file *filp, struct kobject *kobj,
size_t srclen, n;
int cfgno;
void *src;
- int retval;
- retval = usb_lock_device_interruptible(udev);
- if (retval < 0)
- return -EINTR;
/* The binary attribute begins with the device descriptor.
* Following that are the raw descriptor entries for all the
* configurations (config plus subsidiary descriptors).
@@ -898,7 +894,6 @@ read_descriptors(struct file *filp, struct kobject *kobj,
off -= srclen;
}
}
- usb_unlock_device(udev);
return count - nleft;
}
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 11b15d7b357a..a415206cab04 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -998,7 +998,7 @@ static void usb_debugfs_init(void)
static void usb_debugfs_cleanup(void)
{
- debugfs_remove(debugfs_lookup("devices", usb_debug_root));
+ debugfs_lookup_and_remove("devices", usb_debug_root);
}
/*
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 8f9959ba9fd4..582ebd9cf9c2 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -1117,6 +1117,7 @@ struct dwc3_scratchpad_array {
* address.
* @num_ep_resized: carries the current number endpoints which have had its tx
* fifo resized.
+ * @debug_root: root debugfs directory for this device to put its files in.
*/
struct dwc3 {
struct work_struct drd_work;
@@ -1332,6 +1333,7 @@ struct dwc3 {
int max_cfg_eps;
int last_fifo_depth;
int num_ep_resized;
+ struct dentry *debug_root;
};
#define INCRX_BURST_MODE 0
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index 48b44b88dc25..8bb2c9e3b9ac 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -414,11 +414,14 @@ static inline const char *dwc3_gadget_generic_cmd_status_string(int status)
#ifdef CONFIG_DEBUG_FS
extern void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep);
+extern void dwc3_debugfs_remove_endpoint_dir(struct dwc3_ep *dep);
extern void dwc3_debugfs_init(struct dwc3 *d);
extern void dwc3_debugfs_exit(struct dwc3 *d);
#else
static inline void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep)
{ }
+static inline void dwc3_debugfs_remove_endpoint_dir(struct dwc3_ep *dep)
+{ }
static inline void dwc3_debugfs_init(struct dwc3 *d)
{ }
static inline void dwc3_debugfs_exit(struct dwc3 *d)
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index f2b7675c7f62..850df0e6bcab 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -873,27 +873,23 @@ static const struct dwc3_ep_file_map dwc3_ep_file_map[] = {
{ "GDBGEPINFO", &dwc3_ep_info_register_fops, },
};
-static void dwc3_debugfs_create_endpoint_files(struct dwc3_ep *dep,
- struct dentry *parent)
+void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep)
{
+ struct dentry *dir;
int i;
+ dir = debugfs_create_dir(dep->name, dep->dwc->debug_root);
for (i = 0; i < ARRAY_SIZE(dwc3_ep_file_map); i++) {
const struct file_operations *fops = dwc3_ep_file_map[i].fops;
const char *name = dwc3_ep_file_map[i].name;
- debugfs_create_file(name, 0444, parent, dep, fops);
+ debugfs_create_file(name, 0444, dir, dep, fops);
}
}
-void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep)
+void dwc3_debugfs_remove_endpoint_dir(struct dwc3_ep *dep)
{
- struct dentry *dir;
- struct dentry *root;
-
- root = debugfs_lookup(dev_name(dep->dwc->dev), usb_debug_root);
- dir = debugfs_create_dir(dep->name, root);
- dwc3_debugfs_create_endpoint_files(dep, dir);
+ debugfs_lookup_and_remove(dep->name, dep->dwc->debug_root);
}
void dwc3_debugfs_init(struct dwc3 *dwc)
@@ -911,6 +907,7 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
dwc->regset->base = dwc->regs - DWC3_GLOBALS_REGS_START;
root = debugfs_create_dir(dev_name(dwc->dev), usb_debug_root);
+ dwc->debug_root = root;
debugfs_create_regset32("regdump", 0444, root, dwc->regset);
debugfs_create_file("lsp_dump", 0644, root, dwc, &dwc3_lsp_fops);
@@ -929,6 +926,6 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
void dwc3_debugfs_exit(struct dwc3 *dwc)
{
- debugfs_remove(debugfs_lookup(dev_name(dwc->dev), usb_debug_root));
+ debugfs_lookup_and_remove(dev_name(dwc->dev), usb_debug_root);
kfree(dwc->regset);
}
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 89c9ab2b19f8..a23ddbb81979 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -47,6 +47,7 @@
#define PCI_DEVICE_ID_INTEL_ADLS 0x7ae1
#define PCI_DEVICE_ID_INTEL_RPL 0xa70e
#define PCI_DEVICE_ID_INTEL_RPLS 0x7a61
+#define PCI_DEVICE_ID_INTEL_MTLM 0x7eb1
#define PCI_DEVICE_ID_INTEL_MTLP 0x7ec1
#define PCI_DEVICE_ID_INTEL_MTL 0x7e7e
#define PCI_DEVICE_ID_INTEL_TGL 0x9a15
@@ -467,6 +468,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPLS),
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTLM),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTLP),
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
diff --git a/drivers/usb/dwc3/dwc3-xilinx.c b/drivers/usb/dwc3/dwc3-xilinx.c
index 0745e9f11b2e..2c36f97652ca 100644
--- a/drivers/usb/dwc3/dwc3-xilinx.c
+++ b/drivers/usb/dwc3/dwc3-xilinx.c
@@ -14,7 +14,6 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/gpio/consumer.h>
-#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 89dcfac01235..3c63fa97a680 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -3194,9 +3194,7 @@ static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
list_del(&dep->endpoint.ep_list);
}
- debugfs_remove_recursive(debugfs_lookup(dep->name,
- debugfs_lookup(dev_name(dep->dwc->dev),
- usb_debug_root)));
+ dwc3_debugfs_remove_endpoint_dir(dep);
kfree(dep);
}
}
diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c
index 797047154820..341408410ed9 100644
--- a/drivers/usb/early/xhci-dbc.c
+++ b/drivers/usb/early/xhci-dbc.c
@@ -499,8 +499,7 @@ static int xdbc_bulk_transfer(void *data, int size, bool read)
addr = xdbc.in_dma;
xdbc.flags |= XDBC_FLAGS_IN_PROCESS;
} else {
- memset(xdbc.out_buf, 0, XDBC_MAX_PACKET);
- memcpy(xdbc.out_buf, data, size);
+ memcpy_and_pad(xdbc.out_buf, XDBC_MAX_PACKET, data, size, 0);
addr = xdbc.out_dma;
xdbc.flags |= XDBC_FLAGS_OUT_PROCESS;
}
@@ -874,13 +873,14 @@ retry:
static void early_xdbc_write(struct console *con, const char *str, u32 n)
{
- static char buf[XDBC_MAX_PACKET];
+ /* static variables are zeroed, so buf is always NULL terminated */
+ static char buf[XDBC_MAX_PACKET + 1];
int chunk, ret;
int use_cr = 0;
if (!xdbc.xdbc_reg)
return;
- memset(buf, 0, XDBC_MAX_PACKET);
+
while (n > 0) {
for (chunk = 0; chunk < XDBC_MAX_PACKET && n > 0; str++, chunk++, n--) {
diff --git a/drivers/usb/fotg210/Kconfig b/drivers/usb/fotg210/Kconfig
index 2b05968735ba..87a16258274e 100644
--- a/drivers/usb/fotg210/Kconfig
+++ b/drivers/usb/fotg210/Kconfig
@@ -29,7 +29,7 @@ config USB_FOTG210_UDC
bool "Faraday FOTG210 USB Peripheral Controller support"
help
Faraday USB2.0 OTG controller which can be configured as
- high speed or full speed USB device. This driver suppports
+ high speed or full speed USB device. This driver supports
Bulk Transfer so far.
Say "y" to link the driver statically, or "m" to build a
diff --git a/drivers/usb/fotg210/fotg210-core.c b/drivers/usb/fotg210/fotg210-core.c
index ee740a6da463..cb75464ab290 100644
--- a/drivers/usb/fotg210/fotg210-core.c
+++ b/drivers/usb/fotg210/fotg210-core.c
@@ -6,6 +6,7 @@
* driver.
*/
#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/device.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
@@ -17,6 +18,11 @@
#include "fotg210.h"
+/* Role Register 0x80 */
+#define FOTG210_RR 0x80
+#define FOTG210_RR_ID BIT(21) /* 1 = B-device, 0 = A-device */
+#define FOTG210_RR_CROLE BIT(20) /* 1 = device, 0 = host */
+
/*
* Gemini-specific initialization function, only executed on the
* Gemini SoC using the global misc control register.
@@ -33,9 +39,10 @@
#define GEMINI_MISC_USB0_MINI_B BIT(29)
#define GEMINI_MISC_USB1_MINI_B BIT(30)
-static int fotg210_gemini_init(struct device *dev, struct resource *res,
+static int fotg210_gemini_init(struct fotg210 *fotg, struct resource *res,
enum usb_dr_mode mode)
{
+ struct device *dev = fotg->dev;
struct device_node *np = dev->of_node;
struct regmap *map;
bool wakeup;
@@ -43,10 +50,9 @@ static int fotg210_gemini_init(struct device *dev, struct resource *res,
int ret;
map = syscon_regmap_lookup_by_phandle(np, "syscon");
- if (IS_ERR(map)) {
- dev_err(dev, "no syscon\n");
- return PTR_ERR(map);
- }
+ if (IS_ERR(map))
+ return dev_err_probe(dev, PTR_ERR(map), "no syscon\n");
+ fotg->map = map;
wakeup = of_property_read_bool(np, "wakeup-source");
/*
@@ -55,6 +61,7 @@ static int fotg210_gemini_init(struct device *dev, struct resource *res,
*/
mask = 0;
if (res->start == 0x69000000) {
+ fotg->port = GEMINI_PORT_1;
mask = GEMINI_MISC_USB1_VBUS_ON | GEMINI_MISC_USB1_MINI_B |
GEMINI_MISC_USB1_WAKEUP;
if (mode == USB_DR_MODE_HOST)
@@ -64,6 +71,7 @@ static int fotg210_gemini_init(struct device *dev, struct resource *res,
if (wakeup)
val |= GEMINI_MISC_USB1_WAKEUP;
} else {
+ fotg->port = GEMINI_PORT_0;
mask = GEMINI_MISC_USB0_VBUS_ON | GEMINI_MISC_USB0_MINI_B |
GEMINI_MISC_USB0_WAKEUP;
if (mode == USB_DR_MODE_HOST)
@@ -85,27 +93,74 @@ static int fotg210_gemini_init(struct device *dev, struct resource *res,
return 0;
}
+/**
+ * fotg210_vbus() - Called by gadget driver to enable/disable VBUS
+ * @enable: true to enable VBUS, false to disable VBUS
+ */
+void fotg210_vbus(struct fotg210 *fotg, bool enable)
+{
+ u32 mask;
+ u32 val;
+ int ret;
+
+ switch (fotg->port) {
+ case GEMINI_PORT_0:
+ mask = GEMINI_MISC_USB0_VBUS_ON;
+ val = enable ? GEMINI_MISC_USB0_VBUS_ON : 0;
+ break;
+ case GEMINI_PORT_1:
+ mask = GEMINI_MISC_USB1_VBUS_ON;
+ val = enable ? GEMINI_MISC_USB1_VBUS_ON : 0;
+ break;
+ default:
+ return;
+ }
+ ret = regmap_update_bits(fotg->map, GEMINI_GLOBAL_MISC_CTRL, mask, val);
+ if (ret)
+ dev_err(fotg->dev, "failed to %s VBUS\n",
+ enable ? "enable" : "disable");
+ dev_info(fotg->dev, "%s: %s VBUS\n", __func__, enable ? "enable" : "disable");
+}
+
static int fotg210_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
enum usb_dr_mode mode;
+ struct fotg210 *fotg;
+ u32 val;
int ret;
+ fotg = devm_kzalloc(dev, sizeof(*fotg), GFP_KERNEL);
+ if (!fotg)
+ return -ENOMEM;
+ fotg->dev = dev;
+
+ fotg->base = devm_platform_get_and_ioremap_resource(pdev, 0, &fotg->res);
+ if (IS_ERR(fotg->base))
+ return PTR_ERR(fotg->base);
+
+ fotg->pclk = devm_clk_get_optional_enabled(dev, "PCLK");
+ if (IS_ERR(fotg->pclk))
+ return PTR_ERR(fotg->pclk);
+
mode = usb_get_dr_mode(dev);
if (of_device_is_compatible(dev->of_node, "cortina,gemini-usb")) {
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ret = fotg210_gemini_init(dev, res, mode);
+ ret = fotg210_gemini_init(fotg, fotg->res, mode);
if (ret)
return ret;
}
- if (mode == USB_DR_MODE_PERIPHERAL)
- ret = fotg210_udc_probe(pdev);
- else
- ret = fotg210_hcd_probe(pdev);
+ val = readl(fotg->base + FOTG210_RR);
+ if (mode == USB_DR_MODE_PERIPHERAL) {
+ if (!(val & FOTG210_RR_CROLE))
+ dev_err(dev, "block not in device role\n");
+ ret = fotg210_udc_probe(pdev, fotg);
+ } else {
+ if (val & FOTG210_RR_CROLE)
+ dev_err(dev, "block not in host role\n");
+ ret = fotg210_hcd_probe(pdev, fotg);
+ }
return ret;
}
@@ -127,7 +182,9 @@ static int fotg210_remove(struct platform_device *pdev)
#ifdef CONFIG_OF
static const struct of_device_id fotg210_of_match[] = {
+ { .compatible = "faraday,fotg200" },
{ .compatible = "faraday,fotg210" },
+ /* TODO: can we also handle FUSB220? */
{},
};
MODULE_DEVICE_TABLE(of, fotg210_of_match);
diff --git a/drivers/usb/fotg210/fotg210-hcd.c b/drivers/usb/fotg210/fotg210-hcd.c
index 51ac93a2eb98..929106c16b29 100644
--- a/drivers/usb/fotg210/fotg210-hcd.c
+++ b/drivers/usb/fotg210/fotg210-hcd.c
@@ -33,7 +33,6 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/iopoll.h>
-#include <linux/clk.h>
#include <asm/byteorder.h>
#include <asm/irq.h>
@@ -862,7 +861,7 @@ static inline void remove_debug_files(struct fotg210_hcd *fotg210)
{
struct usb_bus *bus = &fotg210_to_hcd(fotg210)->self;
- debugfs_remove(debugfs_lookup(bus->bus_name, fotg210_debug_root));
+ debugfs_lookup_and_remove(bus->bus_name, fotg210_debug_root);
}
/* handshake - spin reading hc until handshake completes or fails
@@ -4687,14 +4686,11 @@ static ssize_t uframe_periodic_max_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fotg210_hcd *fotg210;
- int n;
fotg210 = hcd_to_fotg210(bus_to_hcd(dev_get_drvdata(dev)));
- n = scnprintf(buf, PAGE_SIZE, "%d\n", fotg210->uframe_periodic_max);
- return n;
+ return sysfs_emit(buf, "%d\n", fotg210->uframe_periodic_max);
}
-
static ssize_t uframe_periodic_max_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
@@ -4706,8 +4702,10 @@ static ssize_t uframe_periodic_max_store(struct device *dev,
ssize_t ret;
fotg210 = hcd_to_fotg210(bus_to_hcd(dev_get_drvdata(dev)));
- if (kstrtouint(buf, 0, &uframe_periodic_max) < 0)
- return -EINVAL;
+
+ ret = kstrtouint(buf, 0, &uframe_periodic_max);
+ if (ret)
+ return ret;
if (uframe_periodic_max < 100 || uframe_periodic_max >= 125) {
fotg210_info(fotg210, "rejecting invalid request for uframe_periodic_max=%u\n",
@@ -5557,11 +5555,10 @@ static void fotg210_init(struct fotg210_hcd *fotg210)
* then invokes the start() method for the HCD associated with it
* through the hotplug entry's driver_data.
*/
-int fotg210_hcd_probe(struct platform_device *pdev)
+int fotg210_hcd_probe(struct platform_device *pdev, struct fotg210 *fotg)
{
struct device *dev = &pdev->dev;
struct usb_hcd *hcd;
- struct resource *res;
int irq;
int retval;
struct fotg210_hcd *fotg210;
@@ -5578,70 +5575,42 @@ int fotg210_hcd_probe(struct platform_device *pdev)
hcd = usb_create_hcd(&fotg210_fotg210_hc_driver, dev,
dev_name(dev));
if (!hcd) {
- dev_err(dev, "failed to create hcd\n");
- retval = -ENOMEM;
+ retval = dev_err_probe(dev, -ENOMEM, "failed to create hcd\n");
goto fail_create_hcd;
}
hcd->has_tt = 1;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hcd->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(hcd->regs)) {
- retval = PTR_ERR(hcd->regs);
- goto failed_put_hcd;
- }
+ hcd->regs = fotg->base;
- hcd->rsrc_start = res->start;
- hcd->rsrc_len = resource_size(res);
+ hcd->rsrc_start = fotg->res->start;
+ hcd->rsrc_len = resource_size(fotg->res);
fotg210 = hcd_to_fotg210(hcd);
+ fotg210->fotg = fotg;
fotg210->caps = hcd->regs;
- /* It's OK not to supply this clock */
- fotg210->pclk = clk_get(dev, "PCLK");
- if (!IS_ERR(fotg210->pclk)) {
- retval = clk_prepare_enable(fotg210->pclk);
- if (retval) {
- dev_err(dev, "failed to enable PCLK\n");
- goto failed_put_hcd;
- }
- } else if (PTR_ERR(fotg210->pclk) == -EPROBE_DEFER) {
- /*
- * Percolate deferrals, for anything else,
- * just live without the clocking.
- */
- retval = PTR_ERR(fotg210->pclk);
- goto failed_dis_clk;
- }
-
retval = fotg210_setup(hcd);
if (retval)
- goto failed_dis_clk;
+ goto failed_put_hcd;
fotg210_init(fotg210);
retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval) {
- dev_err(dev, "failed to add hcd with err %d\n", retval);
- goto failed_dis_clk;
+ dev_err_probe(dev, retval, "failed to add hcd\n");
+ goto failed_put_hcd;
}
device_wakeup_enable(hcd->self.controller);
platform_set_drvdata(pdev, hcd);
return retval;
-failed_dis_clk:
- if (!IS_ERR(fotg210->pclk)) {
- clk_disable_unprepare(fotg210->pclk);
- clk_put(fotg210->pclk);
- }
failed_put_hcd:
usb_put_hcd(hcd);
fail_create_hcd:
- dev_err(dev, "init %s fail, %d\n", dev_name(dev), retval);
- return retval;
+ return dev_err_probe(dev, retval, "init %s fail\n", dev_name(dev));
}
/*
@@ -5652,12 +5621,6 @@ fail_create_hcd:
int fotg210_hcd_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
- struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
-
- if (!IS_ERR(fotg210->pclk)) {
- clk_disable_unprepare(fotg210->pclk);
- clk_put(fotg210->pclk);
- }
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
diff --git a/drivers/usb/fotg210/fotg210-hcd.h b/drivers/usb/fotg210/fotg210-hcd.h
index 0781442b7a24..13c9342982ee 100644
--- a/drivers/usb/fotg210/fotg210-hcd.h
+++ b/drivers/usb/fotg210/fotg210-hcd.h
@@ -182,6 +182,7 @@ struct fotg210_hcd { /* one per controller */
# define INCR(x) do {} while (0)
#endif
+ struct fotg210 *fotg; /* Overarching FOTG210 device */
/* silicon clock */
struct clk *pclk;
};
diff --git a/drivers/usb/fotg210/fotg210-udc.c b/drivers/usb/fotg210/fotg210-udc.c
index eb076746f032..f7ea84070554 100644
--- a/drivers/usb/fotg210/fotg210-udc.c
+++ b/drivers/usb/fotg210/fotg210-udc.c
@@ -7,6 +7,7 @@
* Author : Yuan-Hsin Chen <yhchen@faraday-tech.com>
*/
+#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -15,7 +16,6 @@
#include <linux/platform_device.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
-#include <linux/clk.h>
#include <linux/usb/otg.h>
#include <linux/usb/phy.h>
@@ -29,6 +29,14 @@ static const char udc_name[] = "fotg210_udc";
static const char * const fotg210_ep_name[] = {
"ep0", "ep1", "ep2", "ep3", "ep4"};
+static void fotg210_ack_int(struct fotg210_udc *fotg210, u32 offset, u32 mask)
+{
+ u32 value = ioread32(fotg210->reg + offset);
+
+ value &= ~mask;
+ iowrite32(value, fotg210->reg + offset);
+}
+
static void fotg210_disable_fifo_int(struct fotg210_ep *ep)
{
u32 value = ioread32(ep->fotg210->reg + FOTG210_DMISGR1);
@@ -304,8 +312,7 @@ static void fotg210_wait_dma_done(struct fotg210_ep *ep)
goto dma_reset;
} while (!(value & DISGR2_DMA_CMPLT));
- value &= ~DISGR2_DMA_CMPLT;
- iowrite32(value, ep->fotg210->reg + FOTG210_DISGR2);
+ fotg210_ack_int(ep->fotg210, FOTG210_DISGR2, DISGR2_DMA_CMPLT);
return;
dma_reset:
@@ -710,6 +717,20 @@ static int fotg210_is_epnstall(struct fotg210_ep *ep)
return value & INOUTEPMPSR_STL_EP ? 1 : 0;
}
+/* For EP0 requests triggered by this driver (currently GET_STATUS response) */
+static void fotg210_ep0_complete(struct usb_ep *_ep, struct usb_request *req)
+{
+ struct fotg210_ep *ep;
+ struct fotg210_udc *fotg210;
+
+ ep = container_of(_ep, struct fotg210_ep, ep);
+ fotg210 = ep->fotg210;
+
+ if (req->status || req->actual != req->length) {
+ dev_warn(&fotg210->gadget.dev, "EP0 request failed: %d\n", req->status);
+ }
+}
+
static void fotg210_get_status(struct fotg210_udc *fotg210,
struct usb_ctrlrequest *ctrl)
{
@@ -831,14 +852,6 @@ static void fotg210_ep0in(struct fotg210_udc *fotg210)
}
}
-static void fotg210_clear_comabt_int(struct fotg210_udc *fotg210)
-{
- u32 value = ioread32(fotg210->reg + FOTG210_DISGR0);
-
- value &= ~DISGR0_CX_COMABT_INT;
- iowrite32(value, fotg210->reg + FOTG210_DISGR0);
-}
-
static void fotg210_in_fifo_handler(struct fotg210_ep *ep)
{
struct fotg210_request *req = list_entry(ep->queue.next,
@@ -880,60 +893,43 @@ static irqreturn_t fotg210_irq(int irq, void *_fotg210)
void __iomem *reg = fotg210->reg + FOTG210_DISGR2;
u32 int_grp2 = ioread32(reg);
u32 int_msk2 = ioread32(fotg210->reg + FOTG210_DMISGR2);
- u32 value;
int_grp2 &= ~int_msk2;
if (int_grp2 & DISGR2_USBRST_INT) {
usb_gadget_udc_reset(&fotg210->gadget,
fotg210->driver);
- value = ioread32(reg);
- value &= ~DISGR2_USBRST_INT;
- iowrite32(value, reg);
+ fotg210_ack_int(fotg210, FOTG210_DISGR2, DISGR2_USBRST_INT);
pr_info("fotg210 udc reset\n");
}
if (int_grp2 & DISGR2_SUSP_INT) {
- value = ioread32(reg);
- value &= ~DISGR2_SUSP_INT;
- iowrite32(value, reg);
+ fotg210_ack_int(fotg210, FOTG210_DISGR2, DISGR2_SUSP_INT);
pr_info("fotg210 udc suspend\n");
}
if (int_grp2 & DISGR2_RESM_INT) {
- value = ioread32(reg);
- value &= ~DISGR2_RESM_INT;
- iowrite32(value, reg);
+ fotg210_ack_int(fotg210, FOTG210_DISGR2, DISGR2_RESM_INT);
pr_info("fotg210 udc resume\n");
}
if (int_grp2 & DISGR2_ISO_SEQ_ERR_INT) {
- value = ioread32(reg);
- value &= ~DISGR2_ISO_SEQ_ERR_INT;
- iowrite32(value, reg);
+ fotg210_ack_int(fotg210, FOTG210_DISGR2, DISGR2_ISO_SEQ_ERR_INT);
pr_info("fotg210 iso sequence error\n");
}
if (int_grp2 & DISGR2_ISO_SEQ_ABORT_INT) {
- value = ioread32(reg);
- value &= ~DISGR2_ISO_SEQ_ABORT_INT;
- iowrite32(value, reg);
+ fotg210_ack_int(fotg210, FOTG210_DISGR2, DISGR2_ISO_SEQ_ABORT_INT);
pr_info("fotg210 iso sequence abort\n");
}
if (int_grp2 & DISGR2_TX0BYTE_INT) {
fotg210_clear_tx0byte(fotg210);
- value = ioread32(reg);
- value &= ~DISGR2_TX0BYTE_INT;
- iowrite32(value, reg);
+ fotg210_ack_int(fotg210, FOTG210_DISGR2, DISGR2_TX0BYTE_INT);
pr_info("fotg210 transferred 0 byte\n");
}
if (int_grp2 & DISGR2_RX0BYTE_INT) {
fotg210_clear_rx0byte(fotg210);
- value = ioread32(reg);
- value &= ~DISGR2_RX0BYTE_INT;
- iowrite32(value, reg);
+ fotg210_ack_int(fotg210, FOTG210_DISGR2, DISGR2_RX0BYTE_INT);
pr_info("fotg210 received 0 byte\n");
}
if (int_grp2 & DISGR2_DMA_ERROR) {
- value = ioread32(reg);
- value &= ~DISGR2_DMA_ERROR;
- iowrite32(value, reg);
+ fotg210_ack_int(fotg210, FOTG210_DISGR2, DISGR2_DMA_ERROR);
}
}
@@ -947,7 +943,7 @@ static irqreturn_t fotg210_irq(int irq, void *_fotg210)
/* the highest priority in this source register */
if (int_grp0 & DISGR0_CX_COMABT_INT) {
- fotg210_clear_comabt_int(fotg210);
+ fotg210_ack_int(fotg210, FOTG210_DISGR0, DISGR0_CX_COMABT_INT);
pr_info("fotg210 CX command abort\n");
}
@@ -1015,6 +1011,10 @@ static int fotg210_udc_start(struct usb_gadget *g,
/* hook up the driver */
fotg210->driver = driver;
+ fotg210->gadget.dev.of_node = fotg210->dev->of_node;
+ fotg210->gadget.speed = USB_SPEED_UNKNOWN;
+
+ dev_info(fotg210->dev, "bound driver %s\n", driver->driver.name);
if (!IS_ERR_OR_NULL(fotg210->phy)) {
ret = otg_set_peripheral(fotg210->phy->otg,
@@ -1023,6 +1023,11 @@ static int fotg210_udc_start(struct usb_gadget *g,
dev_err(fotg210->dev, "can't bind to phy\n");
}
+ /* chip enable */
+ value = ioread32(fotg210->reg + FOTG210_DMCR);
+ value |= DMCR_CHIP_EN;
+ iowrite32(value, fotg210->reg + FOTG210_DMCR);
+
/* enable device global interrupt */
value = ioread32(fotg210->reg + FOTG210_DMCR);
value |= DMCR_GLINT_EN;
@@ -1039,6 +1044,15 @@ static void fotg210_init(struct fotg210_udc *fotg210)
iowrite32(GMIR_MHC_INT | GMIR_MOTG_INT | GMIR_INT_POLARITY,
fotg210->reg + FOTG210_GMIR);
+ /* mask interrupts for groups other than 0-2 */
+ iowrite32(~(DMIGR_MINT_G0 | DMIGR_MINT_G1 | DMIGR_MINT_G2),
+ fotg210->reg + FOTG210_DMIGR);
+
+ /* udc software reset */
+ iowrite32(DMCR_SFRST, fotg210->reg + FOTG210_DMCR);
+ /* Better wait a bit, but without a datasheet, no idea how long. */
+ usleep_range(100, 200);
+
/* disable device global interrupt */
value = ioread32(fotg210->reg + FOTG210_DMCR);
value &= ~DMCR_GLINT_EN;
@@ -1071,15 +1085,33 @@ static int fotg210_udc_stop(struct usb_gadget *g)
fotg210_init(fotg210);
fotg210->driver = NULL;
+ fotg210->gadget.speed = USB_SPEED_UNKNOWN;
spin_unlock_irqrestore(&fotg210->lock, flags);
return 0;
}
+/**
+ * fotg210_vbus_session - Called by external transceiver to enable/disable udc
+ * @_gadget: usb gadget
+ * @is_active: 0 if should disable UDC VBUS, 1 if should enable
+ *
+ * Returns 0
+ */
+static int fotg210_vbus_session(struct usb_gadget *g, int is_active)
+{
+ struct fotg210_udc *fotg210 = gadget_to_fotg210(g);
+
+ /* Call down to core integration layer to drive or disable VBUS */
+ fotg210_vbus(fotg210->fotg, is_active);
+ return 0;
+}
+
static const struct usb_gadget_ops fotg210_gadget_ops = {
.udc_start = fotg210_udc_start,
.udc_stop = fotg210_udc_stop,
+ .vbus_session = fotg210_vbus_session,
};
/**
@@ -1133,34 +1165,22 @@ int fotg210_udc_remove(struct platform_device *pdev)
for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
kfree(fotg210->ep[i]);
- if (!IS_ERR(fotg210->pclk))
- clk_disable_unprepare(fotg210->pclk);
-
kfree(fotg210);
return 0;
}
-int fotg210_udc_probe(struct platform_device *pdev)
+int fotg210_udc_probe(struct platform_device *pdev, struct fotg210 *fotg)
{
- struct resource *res;
struct fotg210_udc *fotg210 = NULL;
struct device *dev = &pdev->dev;
int irq;
int ret = 0;
int i;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- pr_err("platform_get_resource error.\n");
- return -ENODEV;
- }
-
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- pr_err("could not get irq\n");
- return -ENODEV;
- }
+ if (irq < 0)
+ return irq;
/* initialize udc */
fotg210 = kzalloc(sizeof(struct fotg210_udc), GFP_KERNEL);
@@ -1168,35 +1188,19 @@ int fotg210_udc_probe(struct platform_device *pdev)
return -ENOMEM;
fotg210->dev = dev;
-
- /* It's OK not to supply this clock */
- fotg210->pclk = devm_clk_get(dev, "PCLK");
- if (!IS_ERR(fotg210->pclk)) {
- ret = clk_prepare_enable(fotg210->pclk);
- if (ret) {
- dev_err(dev, "failed to enable PCLK\n");
- goto err;
- }
- } else if (PTR_ERR(fotg210->pclk) == -EPROBE_DEFER) {
- /*
- * Percolate deferrals, for anything else,
- * just live without the clocking.
- */
- ret = -EPROBE_DEFER;
- goto err;
- }
+ fotg210->fotg = fotg;
fotg210->phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 0);
if (IS_ERR(fotg210->phy)) {
ret = PTR_ERR(fotg210->phy);
if (ret == -EPROBE_DEFER)
- goto err_pclk;
+ goto err_free;
dev_info(dev, "no PHY found\n");
fotg210->phy = NULL;
} else {
ret = usb_phy_init(fotg210->phy);
if (ret)
- goto err_pclk;
+ goto err_free;
dev_info(dev, "found and initialized PHY\n");
}
@@ -1208,11 +1212,7 @@ int fotg210_udc_probe(struct platform_device *pdev)
goto err_alloc;
}
- fotg210->reg = ioremap(res->start, resource_size(res));
- if (fotg210->reg == NULL) {
- dev_err(dev, "ioremap error\n");
- goto err_alloc;
- }
+ fotg210->reg = fotg->base;
spin_lock_init(&fotg210->lock);
@@ -1261,6 +1261,8 @@ int fotg210_udc_probe(struct platform_device *pdev)
if (fotg210->ep0_req == NULL)
goto err_map;
+ fotg210->ep0_req->complete = fotg210_ep0_complete;
+
fotg210_init(fotg210);
fotg210_disable_unplug(fotg210);
@@ -1268,7 +1270,7 @@ int fotg210_udc_probe(struct platform_device *pdev)
ret = request_irq(irq, fotg210_irq, IRQF_SHARED,
udc_name, fotg210);
if (ret < 0) {
- dev_err(dev, "request_irq error (%d)\n", ret);
+ dev_err_probe(dev, ret, "request_irq error\n");
goto err_req;
}
@@ -1297,11 +1299,8 @@ err_map:
err_alloc:
for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
kfree(fotg210->ep[i]);
-err_pclk:
- if (!IS_ERR(fotg210->pclk))
- clk_disable_unprepare(fotg210->pclk);
-err:
+err_free:
kfree(fotg210);
return ret;
}
diff --git a/drivers/usb/fotg210/fotg210-udc.h b/drivers/usb/fotg210/fotg210-udc.h
index fadb57ca8d78..252cb2b8e2fe 100644
--- a/drivers/usb/fotg210/fotg210-udc.h
+++ b/drivers/usb/fotg210/fotg210-udc.h
@@ -58,6 +58,8 @@
/* Device Mask of Interrupt Group Register (0x130) */
#define FOTG210_DMIGR 0x130
+#define DMIGR_MINT_G2 (1 << 2)
+#define DMIGR_MINT_G1 (1 << 1)
#define DMIGR_MINT_G0 (1 << 0)
/* Device Mask of Interrupt Source Group 0(0x134) */
@@ -231,11 +233,11 @@ struct fotg210_ep {
struct fotg210_udc {
spinlock_t lock; /* protect the struct */
void __iomem *reg;
- struct clk *pclk;
unsigned long irq_trigger;
struct device *dev;
+ struct fotg210 *fotg;
struct usb_phy *phy;
struct usb_gadget gadget;
struct usb_gadget_driver *driver;
diff --git a/drivers/usb/fotg210/fotg210.h b/drivers/usb/fotg210/fotg210.h
index ef79d8323d89..c44c0afe2956 100644
--- a/drivers/usb/fotg210/fotg210.h
+++ b/drivers/usb/fotg210/fotg210.h
@@ -2,13 +2,31 @@
#ifndef __FOTG210_H
#define __FOTG210_H
+enum gemini_port {
+ GEMINI_PORT_NONE = 0,
+ GEMINI_PORT_0,
+ GEMINI_PORT_1,
+};
+
+struct fotg210 {
+ struct device *dev;
+ struct resource *res;
+ void __iomem *base;
+ struct clk *pclk;
+ struct regmap *map;
+ enum gemini_port port;
+};
+
+void fotg210_vbus(struct fotg210 *fotg, bool enable);
+
#ifdef CONFIG_USB_FOTG210_HCD
-int fotg210_hcd_probe(struct platform_device *pdev);
+int fotg210_hcd_probe(struct platform_device *pdev, struct fotg210 *fotg);
int fotg210_hcd_remove(struct platform_device *pdev);
int fotg210_hcd_init(void);
void fotg210_hcd_cleanup(void);
#else
-static inline int fotg210_hcd_probe(struct platform_device *pdev)
+static inline int fotg210_hcd_probe(struct platform_device *pdev,
+ struct fotg210 *fotg)
{
return 0;
}
@@ -26,10 +44,11 @@ static inline void fotg210_hcd_cleanup(void)
#endif
#ifdef CONFIG_USB_FOTG210_UDC
-int fotg210_udc_probe(struct platform_device *pdev);
+int fotg210_udc_probe(struct platform_device *pdev, struct fotg210 *fotg);
int fotg210_udc_remove(struct platform_device *pdev);
#else
-static inline int fotg210_udc_probe(struct platform_device *pdev)
+static inline int fotg210_udc_probe(struct platform_device *pdev,
+ struct fotg210 *fotg)
{
return 0;
}
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 4fa2ddf322b4..336db8f92afa 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -203,6 +203,7 @@ config USB_F_UAC2
config USB_F_UVC
tristate
+ select UVC_COMMON
config USB_F_MIDI
tristate
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 403563c06477..fa7dd6cf014d 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -14,9 +14,11 @@
#include <linux/device.h>
#include <linux/utsname.h>
#include <linux/bitfield.h>
+#include <linux/uuid.h>
#include <linux/usb/composite.h>
#include <linux/usb/otg.h>
+#include <linux/usb/webusb.h>
#include <asm/unaligned.h>
#include "u_os_desc.h"
@@ -713,14 +715,16 @@ static int bos_desc(struct usb_composite_dev *cdev)
* A SuperSpeed device shall include the USB2.0 extension descriptor
* and shall support LPM when operating in USB2.0 HS mode.
*/
- usb_ext = cdev->req->buf + le16_to_cpu(bos->wTotalLength);
- bos->bNumDeviceCaps++;
- le16_add_cpu(&bos->wTotalLength, USB_DT_USB_EXT_CAP_SIZE);
- usb_ext->bLength = USB_DT_USB_EXT_CAP_SIZE;
- usb_ext->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
- usb_ext->bDevCapabilityType = USB_CAP_TYPE_EXT;
- usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT |
- USB_BESL_SUPPORT | besl);
+ if (cdev->gadget->lpm_capable) {
+ usb_ext = cdev->req->buf + le16_to_cpu(bos->wTotalLength);
+ bos->bNumDeviceCaps++;
+ le16_add_cpu(&bos->wTotalLength, USB_DT_USB_EXT_CAP_SIZE);
+ usb_ext->bLength = USB_DT_USB_EXT_CAP_SIZE;
+ usb_ext->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
+ usb_ext->bDevCapabilityType = USB_CAP_TYPE_EXT;
+ usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT |
+ USB_BESL_SUPPORT | besl);
+ }
/*
* The Superspeed USB Capability descriptor shall be implemented by all
@@ -821,6 +825,37 @@ static int bos_desc(struct usb_composite_dev *cdev)
}
}
+ /* The WebUSB Platform Capability descriptor */
+ if (cdev->use_webusb) {
+ struct usb_plat_dev_cap_descriptor *webusb_cap;
+ struct usb_webusb_cap_data *webusb_cap_data;
+ guid_t webusb_uuid = WEBUSB_UUID;
+
+ webusb_cap = cdev->req->buf + le16_to_cpu(bos->wTotalLength);
+ webusb_cap_data = (struct usb_webusb_cap_data *) webusb_cap->CapabilityData;
+ bos->bNumDeviceCaps++;
+ le16_add_cpu(&bos->wTotalLength,
+ USB_DT_USB_PLAT_DEV_CAP_SIZE(USB_WEBUSB_CAP_DATA_SIZE));
+
+ webusb_cap->bLength = USB_DT_USB_PLAT_DEV_CAP_SIZE(USB_WEBUSB_CAP_DATA_SIZE);
+ webusb_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
+ webusb_cap->bDevCapabilityType = USB_PLAT_DEV_CAP_TYPE;
+ webusb_cap->bReserved = 0;
+ export_guid(webusb_cap->UUID, &webusb_uuid);
+
+ if (cdev->bcd_webusb_version != 0)
+ webusb_cap_data->bcdVersion = cpu_to_le16(cdev->bcd_webusb_version);
+ else
+ webusb_cap_data->bcdVersion = WEBUSB_VERSION_1_00;
+
+ webusb_cap_data->bVendorCode = cdev->b_webusb_vendor_code;
+
+ if (strnlen(cdev->landing_page, sizeof(cdev->landing_page)) > 0)
+ webusb_cap_data->iLandingPage = WEBUSB_LANDING_PAGE_PRESENT;
+ else
+ webusb_cap_data->iLandingPage = WEBUSB_LANDING_PAGE_NOT_PRESENT;
+ }
+
return le16_to_cpu(bos->wTotalLength);
}
@@ -1744,7 +1779,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
cdev->desc.bcdUSB = cpu_to_le16(0x0210);
}
} else {
- if (gadget->lpm_capable)
+ if (gadget->lpm_capable || cdev->use_webusb)
cdev->desc.bcdUSB = cpu_to_le16(0x0201);
else
cdev->desc.bcdUSB = cpu_to_le16(0x0200);
@@ -1779,7 +1814,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
break;
case USB_DT_BOS:
if (gadget_is_superspeed(gadget) ||
- gadget->lpm_capable) {
+ gadget->lpm_capable || cdev->use_webusb) {
value = bos_desc(cdev);
value = min(w_length, (u16) value);
}
@@ -2013,6 +2048,53 @@ unknown:
goto check_value;
}
+ /*
+ * WebUSB URL descriptor handling, following:
+ * https://wicg.github.io/webusb/#device-requests
+ */
+ if (cdev->use_webusb &&
+ ctrl->bRequestType == (USB_DIR_IN | USB_TYPE_VENDOR) &&
+ w_index == WEBUSB_GET_URL &&
+ w_value == WEBUSB_LANDING_PAGE_PRESENT &&
+ ctrl->bRequest == cdev->b_webusb_vendor_code) {
+ unsigned int landing_page_length;
+ unsigned int landing_page_offset;
+ struct webusb_url_descriptor *url_descriptor =
+ (struct webusb_url_descriptor *)cdev->req->buf;
+
+ url_descriptor->bDescriptorType = WEBUSB_URL_DESCRIPTOR_TYPE;
+
+ if (strncasecmp(cdev->landing_page, "https://", 8) == 0) {
+ landing_page_offset = 8;
+ url_descriptor->bScheme = WEBUSB_URL_SCHEME_HTTPS;
+ } else if (strncasecmp(cdev->landing_page, "http://", 7) == 0) {
+ landing_page_offset = 7;
+ url_descriptor->bScheme = WEBUSB_URL_SCHEME_HTTP;
+ } else {
+ landing_page_offset = 0;
+ url_descriptor->bScheme = WEBUSB_URL_SCHEME_NONE;
+ }
+
+ landing_page_length = strnlen(cdev->landing_page,
+ sizeof(url_descriptor->URL)
+ - WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH + landing_page_offset);
+
+ if (ctrl->wLength < WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH
+ + landing_page_length)
+ landing_page_length = ctrl->wLength
+ - WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH + landing_page_offset;
+
+ memcpy(url_descriptor->URL,
+ cdev->landing_page + landing_page_offset,
+ landing_page_length - landing_page_offset);
+ url_descriptor->bLength = landing_page_length
+ - landing_page_offset + WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH;
+
+ value = url_descriptor->bLength;
+
+ goto check_value;
+ }
+
VDBG(cdev,
"non-core control req%02x.%02x v%04x i%04x l%d\n",
ctrl->bRequestType, ctrl->bRequest,
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 0853536cbf2e..b9f1136aa0a2 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -7,6 +7,7 @@
#include <linux/nls.h>
#include <linux/usb/composite.h>
#include <linux/usb/gadget_configfs.h>
+#include <linux/usb/webusb.h>
#include "configfs.h"
#include "u_f.h"
#include "u_os_desc.h"
@@ -39,6 +40,7 @@ struct gadget_info {
struct config_group configs_group;
struct config_group strings_group;
struct config_group os_desc_group;
+ struct config_group webusb_group;
struct mutex lock;
struct usb_gadget_strings *gstrings[MAX_USB_STRING_LANGS + 1];
@@ -50,6 +52,11 @@ struct gadget_info {
bool use_os_desc;
char b_vendor_code;
char qw_sign[OS_STRING_QW_SIGN_LEN];
+ bool use_webusb;
+ u16 bcd_webusb_version;
+ u8 b_webusb_vendor_code;
+ char landing_page[WEBUSB_URL_RAW_MAX_LENGTH];
+
spinlock_t spinlock;
bool unbind;
};
@@ -79,7 +86,7 @@ static inline struct gadget_info *cfg_to_gadget_info(struct config_usb_cfg *cfg)
return container_of(cfg->c.cdev, struct gadget_info, cdev);
}
-struct gadget_strings {
+struct gadget_language {
struct usb_gadget_strings stringtab_dev;
struct usb_string strings[USB_GADGET_FIRST_AVAIL_IDX];
char *manufacturer;
@@ -88,6 +95,8 @@ struct gadget_strings {
struct config_group group;
struct list_head list;
+ struct list_head gadget_strings;
+ unsigned int nstrings;
};
struct gadget_config_name {
@@ -365,9 +374,9 @@ static struct configfs_attribute *gadget_root_attrs[] = {
NULL,
};
-static inline struct gadget_strings *to_gadget_strings(struct config_item *item)
+static inline struct gadget_language *to_gadget_language(struct config_item *item)
{
- return container_of(to_config_group(item), struct gadget_strings,
+ return container_of(to_config_group(item), struct gadget_language,
group);
}
@@ -430,6 +439,12 @@ static int config_usb_cfg_link(
* from another gadget or a random directory.
* Also a function instance can only be linked once.
*/
+
+ if (gi->composite.gadget_driver.udc_name) {
+ ret = -EINVAL;
+ goto out;
+ }
+
list_for_each_entry(iter, &gi->available_func, cfs_list) {
if (iter != fi)
continue;
@@ -755,20 +770,20 @@ static const struct config_item_type config_desc_type = {
.ct_owner = THIS_MODULE,
};
-GS_STRINGS_RW(gadget_strings, manufacturer);
-GS_STRINGS_RW(gadget_strings, product);
-GS_STRINGS_RW(gadget_strings, serialnumber);
+GS_STRINGS_RW(gadget_language, manufacturer);
+GS_STRINGS_RW(gadget_language, product);
+GS_STRINGS_RW(gadget_language, serialnumber);
-static struct configfs_attribute *gadget_strings_langid_attrs[] = {
- &gadget_strings_attr_manufacturer,
- &gadget_strings_attr_product,
- &gadget_strings_attr_serialnumber,
+static struct configfs_attribute *gadget_language_langid_attrs[] = {
+ &gadget_language_attr_manufacturer,
+ &gadget_language_attr_product,
+ &gadget_language_attr_serialnumber,
NULL,
};
-static void gadget_strings_attr_release(struct config_item *item)
+static void gadget_language_attr_release(struct config_item *item)
{
- struct gadget_strings *gs = to_gadget_strings(item);
+ struct gadget_language *gs = to_gadget_language(item);
kfree(gs->manufacturer);
kfree(gs->product);
@@ -778,8 +793,317 @@ static void gadget_strings_attr_release(struct config_item *item)
kfree(gs);
}
-USB_CONFIG_STRING_RW_OPS(gadget_strings);
-USB_CONFIG_STRINGS_LANG(gadget_strings, gadget_info);
+static struct configfs_item_operations gadget_language_langid_item_ops = {
+ .release = gadget_language_attr_release,
+};
+
+static ssize_t gadget_string_id_show(struct config_item *item, char *page)
+{
+ struct gadget_string *string = to_gadget_string(item);
+ int ret;
+
+ ret = sprintf(page, "%u\n", string->usb_string.id);
+ return ret;
+}
+CONFIGFS_ATTR_RO(gadget_string_, id);
+
+static ssize_t gadget_string_s_show(struct config_item *item, char *page)
+{
+ struct gadget_string *string = to_gadget_string(item);
+ int ret;
+
+ ret = snprintf(page, sizeof(string->string), "%s\n", string->string);
+ return ret;
+}
+
+static ssize_t gadget_string_s_store(struct config_item *item, const char *page,
+ size_t len)
+{
+ struct gadget_string *string = to_gadget_string(item);
+ int size = min(sizeof(string->string), len + 1);
+
+ if (len > USB_MAX_STRING_LEN)
+ return -EINVAL;
+
+ return strscpy(string->string, page, size);
+}
+CONFIGFS_ATTR(gadget_string_, s);
+
+static struct configfs_attribute *gadget_string_attrs[] = {
+ &gadget_string_attr_id,
+ &gadget_string_attr_s,
+ NULL,
+};
+
+static void gadget_string_release(struct config_item *item)
+{
+ struct gadget_string *string = to_gadget_string(item);
+
+ kfree(string);
+}
+
+static struct configfs_item_operations gadget_string_item_ops = {
+ .release = gadget_string_release,
+};
+
+static const struct config_item_type gadget_string_type = {
+ .ct_item_ops = &gadget_string_item_ops,
+ .ct_attrs = gadget_string_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_item *gadget_language_string_make(struct config_group *group,
+ const char *name)
+{
+ struct gadget_language *language;
+ struct gadget_string *string;
+
+ language = to_gadget_language(&group->cg_item);
+
+ string = kzalloc(sizeof(*string), GFP_KERNEL);
+ if (!string)
+ return ERR_PTR(-ENOMEM);
+
+ string->usb_string.id = language->nstrings++;
+ string->usb_string.s = string->string;
+ list_add_tail(&string->list, &language->gadget_strings);
+
+ config_item_init_type_name(&string->item, name, &gadget_string_type);
+
+ return &string->item;
+}
+
+static void gadget_language_string_drop(struct config_group *group,
+ struct config_item *item)
+{
+ struct gadget_language *language;
+ struct gadget_string *string;
+ unsigned int i = USB_GADGET_FIRST_AVAIL_IDX;
+
+ language = to_gadget_language(&group->cg_item);
+ string = to_gadget_string(item);
+
+ list_del(&string->list);
+ language->nstrings--;
+
+ /* Reset the ids for the language's strings to guarantee a continuous set */
+ list_for_each_entry(string, &language->gadget_strings, list)
+ string->usb_string.id = i++;
+}
+
+static struct configfs_group_operations gadget_language_langid_group_ops = {
+ .make_item = gadget_language_string_make,
+ .drop_item = gadget_language_string_drop,
+};
+
+static struct config_item_type gadget_language_type = {
+ .ct_item_ops = &gadget_language_langid_item_ops,
+ .ct_group_ops = &gadget_language_langid_group_ops,
+ .ct_attrs = gadget_language_langid_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *gadget_language_make(struct config_group *group,
+ const char *name)
+{
+ struct gadget_info *gi;
+ struct gadget_language *gs;
+ struct gadget_language *new;
+ int langs = 0;
+ int ret;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return ERR_PTR(-ENOMEM);
+
+ ret = check_user_usb_string(name, &new->stringtab_dev);
+ if (ret)
+ goto err;
+ config_group_init_type_name(&new->group, name,
+ &gadget_language_type);
+
+ gi = container_of(group, struct gadget_info, strings_group);
+ ret = -EEXIST;
+ list_for_each_entry(gs, &gi->string_list, list) {
+ if (gs->stringtab_dev.language == new->stringtab_dev.language)
+ goto err;
+ langs++;
+ }
+ ret = -EOVERFLOW;
+ if (langs >= MAX_USB_STRING_LANGS)
+ goto err;
+
+ list_add_tail(&new->list, &gi->string_list);
+ INIT_LIST_HEAD(&new->gadget_strings);
+
+ /* We have the default manufacturer, product and serialnumber strings */
+ new->nstrings = 3;
+ return &new->group;
+err:
+ kfree(new);
+ return ERR_PTR(ret);
+}
+
+static void gadget_language_drop(struct config_group *group,
+ struct config_item *item)
+{
+ config_item_put(item);
+}
+
+static struct configfs_group_operations gadget_language_group_ops = {
+ .make_group = &gadget_language_make,
+ .drop_item = &gadget_language_drop,
+};
+
+static struct config_item_type gadget_language_strings_type = {
+ .ct_group_ops = &gadget_language_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static inline struct gadget_info *webusb_item_to_gadget_info(
+ struct config_item *item)
+{
+ return container_of(to_config_group(item),
+ struct gadget_info, webusb_group);
+}
+
+static ssize_t webusb_use_show(struct config_item *item, char *page)
+{
+ return sysfs_emit(page, "%d\n",
+ webusb_item_to_gadget_info(item)->use_webusb);
+}
+
+static ssize_t webusb_use_store(struct config_item *item, const char *page,
+ size_t len)
+{
+ struct gadget_info *gi = webusb_item_to_gadget_info(item);
+ int ret;
+ bool use;
+
+ ret = kstrtobool(page, &use);
+ if (ret)
+ return ret;
+
+ mutex_lock(&gi->lock);
+ gi->use_webusb = use;
+ mutex_unlock(&gi->lock);
+
+ return len;
+}
+
+static ssize_t webusb_bcdVersion_show(struct config_item *item, char *page)
+{
+ return sysfs_emit(page, "0x%04x\n",
+ webusb_item_to_gadget_info(item)->bcd_webusb_version);
+}
+
+static ssize_t webusb_bcdVersion_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct gadget_info *gi = webusb_item_to_gadget_info(item);
+ u16 bcdVersion;
+ int ret;
+
+ ret = kstrtou16(page, 0, &bcdVersion);
+ if (ret)
+ return ret;
+
+ ret = is_valid_bcd(bcdVersion);
+ if (ret)
+ return ret;
+
+ mutex_lock(&gi->lock);
+ gi->bcd_webusb_version = bcdVersion;
+ mutex_unlock(&gi->lock);
+
+ return len;
+}
+
+static ssize_t webusb_bVendorCode_show(struct config_item *item, char *page)
+{
+ return sysfs_emit(page, "0x%02x\n",
+ webusb_item_to_gadget_info(item)->b_webusb_vendor_code);
+}
+
+static ssize_t webusb_bVendorCode_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct gadget_info *gi = webusb_item_to_gadget_info(item);
+ int ret;
+ u8 b_vendor_code;
+
+ ret = kstrtou8(page, 0, &b_vendor_code);
+ if (ret)
+ return ret;
+
+ mutex_lock(&gi->lock);
+ gi->b_webusb_vendor_code = b_vendor_code;
+ mutex_unlock(&gi->lock);
+
+ return len;
+}
+
+static ssize_t webusb_landingPage_show(struct config_item *item, char *page)
+{
+ return sysfs_emit(page, "%s\n", webusb_item_to_gadget_info(item)->landing_page);
+}
+
+static ssize_t webusb_landingPage_store(struct config_item *item, const char *page,
+ size_t len)
+{
+ struct gadget_info *gi = webusb_item_to_gadget_info(item);
+ unsigned int bytes_to_strip = 0;
+ int l = len;
+
+ if (page[l - 1] == '\n') {
+ --l;
+ ++bytes_to_strip;
+ }
+
+ if (l > sizeof(gi->landing_page)) {
+ pr_err("webusb: landingPage URL too long\n");
+ return -EINVAL;
+ }
+
+ // validation
+ if (strncasecmp(page, "https://", 8) == 0)
+ bytes_to_strip = 8;
+ else if (strncasecmp(page, "http://", 7) == 0)
+ bytes_to_strip = 7;
+ else
+ bytes_to_strip = 0;
+
+ if (l > U8_MAX - WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH + bytes_to_strip) {
+ pr_err("webusb: landingPage URL %d bytes too long for given URL scheme\n",
+ l - U8_MAX + WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH - bytes_to_strip);
+ return -EINVAL;
+ }
+
+ mutex_lock(&gi->lock);
+ // ensure 0 bytes are set, in case the new landing page is shorter then the old one.
+ memcpy_and_pad(gi->landing_page, sizeof(gi->landing_page), page, l, 0);
+ mutex_unlock(&gi->lock);
+
+ return len;
+}
+
+CONFIGFS_ATTR(webusb_, use);
+CONFIGFS_ATTR(webusb_, bVendorCode);
+CONFIGFS_ATTR(webusb_, bcdVersion);
+CONFIGFS_ATTR(webusb_, landingPage);
+
+static struct configfs_attribute *webusb_attrs[] = {
+ &webusb_attr_use,
+ &webusb_attr_bcdVersion,
+ &webusb_attr_bVendorCode,
+ &webusb_attr_landingPage,
+ NULL,
+};
+
+static struct config_item_type webusb_type = {
+ .ct_attrs = webusb_attrs,
+ .ct_owner = THIS_MODULE,
+};
static inline struct gadget_info *os_desc_item_to_gadget_info(
struct config_item *item)
@@ -801,15 +1125,15 @@ static ssize_t os_desc_use_store(struct config_item *item, const char *page,
int ret;
bool use;
- mutex_lock(&gi->lock);
ret = kstrtobool(page, &use);
- if (!ret) {
- gi->use_os_desc = use;
- ret = len;
- }
+ if (ret)
+ return ret;
+
+ mutex_lock(&gi->lock);
+ gi->use_os_desc = use;
mutex_unlock(&gi->lock);
- return ret;
+ return len;
}
static ssize_t os_desc_b_vendor_code_show(struct config_item *item, char *page)
@@ -825,15 +1149,15 @@ static ssize_t os_desc_b_vendor_code_store(struct config_item *item,
int ret;
u8 b_vendor_code;
- mutex_lock(&gi->lock);
ret = kstrtou8(page, 0, &b_vendor_code);
- if (!ret) {
- gi->b_vendor_code = b_vendor_code;
- ret = len;
- }
+ if (ret)
+ return ret;
+
+ mutex_lock(&gi->lock);
+ gi->b_vendor_code = b_vendor_code;
mutex_unlock(&gi->lock);
- return ret;
+ return len;
}
static ssize_t os_desc_qw_sign_show(struct config_item *item, char *page)
@@ -958,15 +1282,15 @@ static ssize_t ext_prop_type_store(struct config_item *item,
u8 type;
int ret;
- if (desc->opts_mutex)
- mutex_lock(desc->opts_mutex);
ret = kstrtou8(page, 0, &type);
if (ret)
- goto end;
- if (type < USB_EXT_PROP_UNICODE || type > USB_EXT_PROP_UNICODE_MULTI) {
- ret = -EINVAL;
- goto end;
- }
+ return ret;
+
+ if (type < USB_EXT_PROP_UNICODE || type > USB_EXT_PROP_UNICODE_MULTI)
+ return -EINVAL;
+
+ if (desc->opts_mutex)
+ mutex_lock(desc->opts_mutex);
if ((ext_prop->type == USB_EXT_PROP_BINARY ||
ext_prop->type == USB_EXT_PROP_LE32 ||
@@ -983,12 +1307,10 @@ static ssize_t ext_prop_type_store(struct config_item *item,
type == USB_EXT_PROP_BE32))
ext_prop->data_len >>= 1;
ext_prop->type = type;
- ret = len;
-end:
if (desc->opts_mutex)
mutex_unlock(desc->opts_mutex);
- return ret;
+ return len;
}
static ssize_t ext_prop_data_show(struct config_item *item, char *page)
@@ -1273,6 +1595,80 @@ static void purge_configs_funcs(struct gadget_info *gi)
}
}
+static struct usb_string *
+configfs_attach_gadget_strings(struct gadget_info *gi)
+{
+ struct usb_gadget_strings **gadget_strings;
+ struct gadget_language *language;
+ struct gadget_string *string;
+ unsigned int nlangs = 0;
+ struct list_head *iter;
+ struct usb_string *us;
+ unsigned int i = 0;
+ int nstrings = -1;
+ unsigned int j;
+
+ list_for_each(iter, &gi->string_list)
+ nlangs++;
+
+ /* Bail out early if no languages are configured */
+ if (!nlangs)
+ return NULL;
+
+ gadget_strings = kcalloc(nlangs + 1, /* including NULL terminator */
+ sizeof(struct usb_gadget_strings *), GFP_KERNEL);
+ if (!gadget_strings)
+ return ERR_PTR(-ENOMEM);
+
+ list_for_each_entry(language, &gi->string_list, list) {
+ struct usb_string *stringtab;
+
+ if (nstrings == -1) {
+ nstrings = language->nstrings;
+ } else if (nstrings != language->nstrings) {
+ pr_err("languages must contain the same number of strings\n");
+ us = ERR_PTR(-EINVAL);
+ goto cleanup;
+ }
+
+ stringtab = kcalloc(language->nstrings + 1, sizeof(struct usb_string),
+ GFP_KERNEL);
+ if (!stringtab) {
+ us = ERR_PTR(-ENOMEM);
+ goto cleanup;
+ }
+
+ stringtab[USB_GADGET_MANUFACTURER_IDX].id = USB_GADGET_MANUFACTURER_IDX;
+ stringtab[USB_GADGET_MANUFACTURER_IDX].s = language->manufacturer;
+ stringtab[USB_GADGET_PRODUCT_IDX].id = USB_GADGET_PRODUCT_IDX;
+ stringtab[USB_GADGET_PRODUCT_IDX].s = language->product;
+ stringtab[USB_GADGET_SERIAL_IDX].id = USB_GADGET_SERIAL_IDX;
+ stringtab[USB_GADGET_SERIAL_IDX].s = language->serialnumber;
+
+ j = USB_GADGET_FIRST_AVAIL_IDX;
+ list_for_each_entry(string, &language->gadget_strings, list) {
+ memcpy(&stringtab[j], &string->usb_string, sizeof(struct usb_string));
+ j++;
+ }
+
+ language->stringtab_dev.strings = stringtab;
+ gadget_strings[i] = &language->stringtab_dev;
+ i++;
+ }
+
+ us = usb_gstrings_attach(&gi->cdev, gadget_strings, nstrings);
+
+cleanup:
+ list_for_each_entry(language, &gi->string_list, list) {
+ kfree(language->stringtab_dev.strings);
+ language->stringtab_dev.strings = NULL;
+ }
+
+ kfree(gadget_strings);
+
+ return us;
+}
+
static int configfs_composite_bind(struct usb_gadget *gadget,
struct usb_gadget_driver *gdriver)
{
@@ -1316,22 +1712,7 @@ static int configfs_composite_bind(struct usb_gadget *gadget,
/* init all strings */
if (!list_empty(&gi->string_list)) {
- struct gadget_strings *gs;
-
- i = 0;
- list_for_each_entry(gs, &gi->string_list, list) {
-
- gi->gstrings[i] = &gs->stringtab_dev;
- gs->stringtab_dev.strings = gs->strings;
- gs->strings[USB_GADGET_MANUFACTURER_IDX].s =
- gs->manufacturer;
- gs->strings[USB_GADGET_PRODUCT_IDX].s = gs->product;
- gs->strings[USB_GADGET_SERIAL_IDX].s = gs->serialnumber;
- i++;
- }
- gi->gstrings[i] = NULL;
- s = usb_gstrings_attach(&gi->cdev, gi->gstrings,
- USB_GADGET_FIRST_AVAIL_IDX);
+ s = configfs_attach_gadget_strings(gi);
if (IS_ERR(s)) {
ret = PTR_ERR(s);
goto err_comp_cleanup;
@@ -1340,6 +1721,15 @@ static int configfs_composite_bind(struct usb_gadget *gadget,
gi->cdev.desc.iManufacturer = s[USB_GADGET_MANUFACTURER_IDX].id;
gi->cdev.desc.iProduct = s[USB_GADGET_PRODUCT_IDX].id;
gi->cdev.desc.iSerialNumber = s[USB_GADGET_SERIAL_IDX].id;
+
+ gi->cdev.usb_strings = s;
+ }
+
+ if (gi->use_webusb) {
+ cdev->use_webusb = true;
+ cdev->bcd_webusb_version = gi->bcd_webusb_version;
+ cdev->b_webusb_vendor_code = gi->b_webusb_vendor_code;
+ memcpy(cdev->landing_page, gi->landing_page, WEBUSB_URL_RAW_MAX_LENGTH);
}
if (gi->use_os_desc) {
@@ -1598,13 +1988,17 @@ static struct config_group *gadgets_make(
configfs_add_default_group(&gi->configs_group, &gi->group);
config_group_init_type_name(&gi->strings_group, "strings",
- &gadget_strings_strings_type);
+ &gadget_language_strings_type);
configfs_add_default_group(&gi->strings_group, &gi->group);
config_group_init_type_name(&gi->os_desc_group, "os_desc",
&os_desc_type);
configfs_add_default_group(&gi->os_desc_group, &gi->group);
+ config_group_init_type_name(&gi->webusb_group, "webusb",
+ &webusb_type);
+ configfs_add_default_group(&gi->webusb_group, &gi->group);
+
gi->composite.bind = configfs_do_nothing;
gi->composite.unbind = configfs_do_nothing;
gi->composite.suspend = NULL;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 8ad354741380..ddfc537c7526 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -830,8 +830,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
{
struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
work);
- int ret = io_data->req->status ? io_data->req->status :
- io_data->req->actual;
+ int ret = io_data->status;
bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
if (io_data->read && ret > 0) {
@@ -845,8 +844,6 @@ static void ffs_user_copy_worker(struct work_struct *work)
if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
eventfd_signal(io_data->ffs->ffs_eventfd, 1);
- usb_ep_free_request(io_data->ep, io_data->req);
-
if (io_data->read)
kfree(io_data->to_free);
ffs_free_buffer(io_data);
@@ -861,6 +858,9 @@ static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
ENTER();
+ io_data->status = req->status ? req->status : req->actual;
+ usb_ep_free_request(_ep, req);
+
INIT_WORK(&io_data->work, ffs_user_copy_worker);
queue_work(ffs->io_completion_wq, &io_data->work);
}
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index 32f2c1645467..5e919fb65833 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -76,14 +76,14 @@ static struct usb_interface_descriptor uvc_control_intf = {
.bDescriptorType = USB_DT_INTERFACE,
.bInterfaceNumber = UVC_INTF_VIDEO_CONTROL,
.bAlternateSetting = 0,
- .bNumEndpoints = 1,
+ .bNumEndpoints = 0,
.bInterfaceClass = USB_CLASS_VIDEO,
.bInterfaceSubClass = UVC_SC_VIDEOCONTROL,
.bInterfaceProtocol = 0x00,
.iInterface = 0,
};
-static struct usb_endpoint_descriptor uvc_control_ep = {
+static struct usb_endpoint_descriptor uvc_interrupt_ep = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
@@ -92,8 +92,8 @@ static struct usb_endpoint_descriptor uvc_control_ep = {
.bInterval = 8,
};
-static struct usb_ss_ep_comp_descriptor uvc_ss_control_comp = {
- .bLength = sizeof(uvc_ss_control_comp),
+static struct usb_ss_ep_comp_descriptor uvc_ss_interrupt_comp = {
+ .bLength = sizeof(uvc_ss_interrupt_comp),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
/* The following 3 values can be tweaked if necessary. */
.bMaxBurst = 0,
@@ -101,7 +101,7 @@ static struct usb_ss_ep_comp_descriptor uvc_ss_control_comp = {
.wBytesPerInterval = cpu_to_le16(UVC_STATUS_MAX_PACKET_SIZE),
};
-static struct uvc_control_endpoint_descriptor uvc_control_cs_ep = {
+static struct uvc_control_endpoint_descriptor uvc_interrupt_cs_ep = {
.bLength = UVC_DT_CONTROL_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_CS_ENDPOINT,
.bDescriptorSubType = UVC_EP_INTERRUPT,
@@ -300,14 +300,17 @@ uvc_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt)
if (alt)
return -EINVAL;
- uvcg_info(f, "reset UVC Control\n");
- usb_ep_disable(uvc->control_ep);
+ if (uvc->enable_interrupt_ep) {
+ uvcg_info(f, "reset UVC interrupt endpoint\n");
+ usb_ep_disable(uvc->interrupt_ep);
- if (!uvc->control_ep->desc)
- if (config_ep_by_speed(cdev->gadget, f, uvc->control_ep))
- return -EINVAL;
+ if (!uvc->interrupt_ep->desc)
+ if (config_ep_by_speed(cdev->gadget, f,
+ uvc->interrupt_ep))
+ return -EINVAL;
- usb_ep_enable(uvc->control_ep);
+ usb_ep_enable(uvc->interrupt_ep);
+ }
if (uvc->state == UVC_STATE_DISCONNECTED) {
memset(&v4l2_event, 0, sizeof(v4l2_event));
@@ -385,7 +388,8 @@ uvc_function_disable(struct usb_function *f)
uvc->state = UVC_STATE_DISCONNECTED;
usb_ep_disable(uvc->video.ep);
- usb_ep_disable(uvc->control_ep);
+ if (uvc->enable_interrupt_ep)
+ usb_ep_disable(uvc->interrupt_ep);
}
/* --------------------------------------------------------------------------
@@ -474,6 +478,25 @@ uvc_register_video(struct uvc_device *uvc)
} \
} while (0)
+#define UVC_COPY_XU_DESCRIPTOR(mem, dst, desc) \
+ do { \
+ *(dst)++ = mem; \
+ memcpy(mem, desc, 22); /* bLength to bNrInPins */ \
+ mem += 22; \
+ \
+ memcpy(mem, (desc)->baSourceID, (desc)->bNrInPins); \
+ mem += (desc)->bNrInPins; \
+ \
+ memcpy(mem, &(desc)->bControlSize, 1); \
+ mem++; \
+ \
+ memcpy(mem, (desc)->bmControls, (desc)->bControlSize); \
+ mem += (desc)->bControlSize; \
+ \
+ memcpy(mem, &(desc)->iExtension, 1); \
+ mem++; \
+ } while (0)
+
static struct usb_descriptor_header **
uvc_copy_descriptors(struct uvc_device *uvc, enum usb_device_speed speed)
{
@@ -485,6 +508,7 @@ uvc_copy_descriptors(struct uvc_device *uvc, enum usb_device_speed speed)
const struct usb_descriptor_header * const *src;
struct usb_descriptor_header **dst;
struct usb_descriptor_header **hdr;
+ struct uvcg_extension *xu;
unsigned int control_size;
unsigned int streaming_size;
unsigned int n_desc;
@@ -521,9 +545,9 @@ uvc_copy_descriptors(struct uvc_device *uvc, enum usb_device_speed speed)
* uvc_iad
* uvc_control_intf
* Class-specific UVC control descriptors
- * uvc_control_ep
- * uvc_control_cs_ep
- * uvc_ss_control_comp (for SS only)
+ * uvc_interrupt_ep
+ * uvc_interrupt_cs_ep
+ * uvc_ss_interrupt_comp (for SS only)
* uvc_streaming_intf_alt0
* Class-specific UVC streaming descriptors
* uvc_{fs|hs}_streaming
@@ -533,14 +557,17 @@ uvc_copy_descriptors(struct uvc_device *uvc, enum usb_device_speed speed)
control_size = 0;
streaming_size = 0;
bytes = uvc_iad.bLength + uvc_control_intf.bLength
- + uvc_control_ep.bLength + uvc_control_cs_ep.bLength
+ uvc_streaming_intf_alt0.bLength;
- if (speed == USB_SPEED_SUPER) {
- bytes += uvc_ss_control_comp.bLength;
- n_desc = 6;
- } else {
- n_desc = 5;
+ n_desc = 3;
+ if (uvc->enable_interrupt_ep) {
+ bytes += uvc_interrupt_ep.bLength + uvc_interrupt_cs_ep.bLength;
+ n_desc += 2;
+
+ if (speed == USB_SPEED_SUPER) {
+ bytes += uvc_ss_interrupt_comp.bLength;
+ n_desc += 1;
+ }
}
for (src = (const struct usb_descriptor_header **)uvc_control_desc;
@@ -549,6 +576,13 @@ uvc_copy_descriptors(struct uvc_device *uvc, enum usb_device_speed speed)
bytes += (*src)->bLength;
n_desc++;
}
+
+ list_for_each_entry(xu, uvc->desc.extension_units, list) {
+ control_size += xu->desc.bLength;
+ bytes += xu->desc.bLength;
+ n_desc++;
+ }
+
for (src = (const struct usb_descriptor_header **)uvc_streaming_cls;
*src; ++src) {
streaming_size += (*src)->bLength;
@@ -575,15 +609,22 @@ uvc_copy_descriptors(struct uvc_device *uvc, enum usb_device_speed speed)
uvc_control_header = mem;
UVC_COPY_DESCRIPTORS(mem, dst,
(const struct usb_descriptor_header **)uvc_control_desc);
+
+ list_for_each_entry(xu, uvc->desc.extension_units, list)
+ UVC_COPY_XU_DESCRIPTOR(mem, dst, &xu->desc);
+
uvc_control_header->wTotalLength = cpu_to_le16(control_size);
uvc_control_header->bInCollection = 1;
uvc_control_header->baInterfaceNr[0] = uvc->streaming_intf;
- UVC_COPY_DESCRIPTOR(mem, dst, &uvc_control_ep);
- if (speed == USB_SPEED_SUPER)
- UVC_COPY_DESCRIPTOR(mem, dst, &uvc_ss_control_comp);
+ if (uvc->enable_interrupt_ep) {
+ UVC_COPY_DESCRIPTOR(mem, dst, &uvc_interrupt_ep);
+ if (speed == USB_SPEED_SUPER)
+ UVC_COPY_DESCRIPTOR(mem, dst, &uvc_ss_interrupt_comp);
+
+ UVC_COPY_DESCRIPTOR(mem, dst, &uvc_interrupt_cs_ep);
+ }
- UVC_COPY_DESCRIPTOR(mem, dst, &uvc_control_cs_ep);
UVC_COPY_DESCRIPTOR(mem, dst, &uvc_streaming_intf_alt0);
uvc_streaming_header = mem;
@@ -603,6 +644,7 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
struct uvc_device *uvc = to_uvc(f);
+ struct uvcg_extension *xu;
struct usb_string *us;
unsigned int max_packet_mult;
unsigned int max_packet_size;
@@ -666,12 +708,16 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
(opts->streaming_maxburst + 1));
/* Allocate endpoints. */
- ep = usb_ep_autoconfig(cdev->gadget, &uvc_control_ep);
- if (!ep) {
- uvcg_info(f, "Unable to allocate control EP\n");
- goto error;
+ if (opts->enable_interrupt_ep) {
+ ep = usb_ep_autoconfig(cdev->gadget, &uvc_interrupt_ep);
+ if (!ep) {
+ uvcg_info(f, "Unable to allocate interrupt EP\n");
+ goto error;
+ }
+ uvc->interrupt_ep = ep;
+ uvc_control_intf.bNumEndpoints = 1;
}
- uvc->control_ep = ep;
+ uvc->enable_interrupt_ep = opts->enable_interrupt_ep;
if (gadget_is_superspeed(c->cdev->gadget))
ep = usb_ep_autoconfig_ss(cdev->gadget, &uvc_ss_streaming_ep,
@@ -691,6 +737,18 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
uvc_hs_streaming_ep.bEndpointAddress = uvc->video.ep->address;
uvc_ss_streaming_ep.bEndpointAddress = uvc->video.ep->address;
+ /*
+ * XUs can have an arbitrary string descriptor describing them. If they
+ * have one pick up the ID.
+ */
+ list_for_each_entry(xu, &opts->extension_units, list)
+ if (xu->string_descriptor_index)
+ xu->desc.iExtension = cdev->usb_strings[xu->string_descriptor_index].id;
+
+ /*
+ * We attach the hard-coded defaults incase the user does not provide
+ * any more appropriate strings through configfs.
+ */
uvc_en_us_strings[UVC_STRING_CONTROL_IDX].s = opts->function_name;
us = usb_gstrings_attach(cdev, uvc_function_strings,
ARRAY_SIZE(uvc_en_us_strings));
@@ -698,11 +756,15 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
ret = PTR_ERR(us);
goto error;
}
- uvc_iad.iFunction = us[UVC_STRING_CONTROL_IDX].id;
- uvc_control_intf.iInterface = us[UVC_STRING_CONTROL_IDX].id;
- ret = us[UVC_STRING_STREAMING_IDX].id;
- uvc_streaming_intf_alt0.iInterface = ret;
- uvc_streaming_intf_alt1.iInterface = ret;
+
+ uvc_iad.iFunction = opts->iad_index ? cdev->usb_strings[opts->iad_index].id :
+ us[UVC_STRING_CONTROL_IDX].id;
+ uvc_streaming_intf_alt0.iInterface = opts->vs0_index ?
+ cdev->usb_strings[opts->vs0_index].id :
+ us[UVC_STRING_STREAMING_IDX].id;
+ uvc_streaming_intf_alt1.iInterface = opts->vs1_index ?
+ cdev->usb_strings[opts->vs1_index].id :
+ us[UVC_STRING_STREAMING_IDX].id;
/* Allocate interface IDs. */
if ((ret = usb_interface_id(c, f)) < 0)
@@ -803,7 +865,6 @@ static struct usb_function_instance *uvc_alloc_inst(void)
struct uvc_camera_terminal_descriptor *cd;
struct uvc_processing_unit_descriptor *pd;
struct uvc_output_terminal_descriptor *od;
- struct uvc_color_matching_descriptor *md;
struct uvc_descriptor_header **ctl_cls;
int ret;
@@ -852,13 +913,12 @@ static struct usb_function_instance *uvc_alloc_inst(void)
od->bSourceID = 2;
od->iTerminal = 0;
- md = &opts->uvc_color_matching;
- md->bLength = UVC_DT_COLOR_MATCHING_SIZE;
- md->bDescriptorType = USB_DT_CS_INTERFACE;
- md->bDescriptorSubType = UVC_VS_COLORFORMAT;
- md->bColorPrimaries = 1;
- md->bTransferCharacteristics = 1;
- md->bMatrixCoefficients = 4;
+ /*
+ * With the ability to add XUs to the UVC function graph, we need to be
+ * able to allocate unique unit IDs to them. The IDs are 1-based, with
+ * the CT, PU and OT above consuming the first 3.
+ */
+ opts->last_unit_id = 3;
/* Prepare fs control class descriptors for configfs-based gadgets */
ctl_cls = opts->uvc_fs_control_cls;
@@ -880,6 +940,8 @@ static struct usb_function_instance *uvc_alloc_inst(void)
opts->ss_control =
(const struct uvc_descriptor_header * const *)ctl_cls;
+ INIT_LIST_HEAD(&opts->extension_units);
+
opts->streaming_interval = 1;
opts->streaming_maxpacket = 1024;
snprintf(opts->function_name, sizeof(opts->function_name), "UVC Camera");
@@ -1011,6 +1073,8 @@ static struct usb_function *uvc_alloc(struct usb_function_instance *fi)
return ERR_PTR(-EBUSY);
}
+ uvc->desc.extension_units = &opts->extension_units;
+
++opts->refcnt;
mutex_unlock(&opts->lock);
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index e06022873df1..f259975dfba4 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -17,6 +17,7 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
+#include <linux/usb/composite.h>
#include "u_ether.h"
@@ -103,41 +104,6 @@ static inline int qlen(struct usb_gadget *gadget, unsigned qmult)
/*-------------------------------------------------------------------------*/
-/* REVISIT there must be a better way than having two sets
- * of debug calls ...
- */
-
-#undef DBG
-#undef VDBG
-#undef ERROR
-#undef INFO
-
-#define xprintk(d, level, fmt, args...) \
- printk(level "%s: " fmt , (d)->net->name , ## args)
-
-#ifdef DEBUG
-#undef DEBUG
-#define DBG(dev, fmt, args...) \
- xprintk(dev , KERN_DEBUG , fmt , ## args)
-#else
-#define DBG(dev, fmt, args...) \
- do { } while (0)
-#endif /* DEBUG */
-
-#ifdef VERBOSE_DEBUG
-#define VDBG DBG
-#else
-#define VDBG(dev, fmt, args...) \
- do { } while (0)
-#endif /* DEBUG */
-
-#define ERROR(dev, fmt, args...) \
- xprintk(dev , KERN_ERR , fmt , ## args)
-#define INFO(dev, fmt, args...) \
- xprintk(dev , KERN_INFO , fmt , ## args)
-
-/*-------------------------------------------------------------------------*/
-
/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
@@ -846,13 +812,11 @@ struct net_device *gether_setup_name_default(const char *netname)
snprintf(net->name, sizeof(net->name), "%s%%d", netname);
eth_random_addr(dev->dev_mac);
- pr_warn("using random %s ethernet address\n", "self");
/* by default we always have a random MAC address */
net->addr_assign_type = NET_ADDR_RANDOM;
eth_random_addr(dev->host_mac);
- pr_warn("using random %s ethernet address\n", "host");
net->netdev_ops = &eth_netdev_ops;
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 840626e064e1..a0ca47fbff0f 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -82,6 +82,9 @@
#define WRITE_BUF_SIZE 8192 /* TX only */
#define GS_CONSOLE_BUF_SIZE 8192
+/* Prevents race conditions while accessing gser->ioport */
+static DEFINE_SPINLOCK(serial_port_lock);
+
/* console info */
struct gs_console {
struct console console;
@@ -1375,8 +1378,10 @@ void gserial_disconnect(struct gserial *gser)
if (!port)
return;
+ spin_lock_irqsave(&serial_port_lock, flags);
+
/* tell the TTY glue not to do I/O here any more */
- spin_lock_irqsave(&port->port_lock, flags);
+ spin_lock(&port->port_lock);
gs_console_disconnect(port);
@@ -1391,7 +1396,8 @@ void gserial_disconnect(struct gserial *gser)
tty_hangup(port->port.tty);
}
port->suspended = false;
- spin_unlock_irqrestore(&port->port_lock, flags);
+ spin_unlock(&port->port_lock);
+ spin_unlock_irqrestore(&serial_port_lock, flags);
/* disable endpoints, aborting down any active I/O */
usb_ep_disable(gser->out);
@@ -1425,10 +1431,19 @@ EXPORT_SYMBOL_GPL(gserial_suspend);
void gserial_resume(struct gserial *gser)
{
- struct gs_port *port = gser->ioport;
+ struct gs_port *port;
unsigned long flags;
- spin_lock_irqsave(&port->port_lock, flags);
+ spin_lock_irqsave(&serial_port_lock, flags);
+ port = gser->ioport;
+
+ if (!port) {
+ spin_unlock_irqrestore(&serial_port_lock, flags);
+ return;
+ }
+
+ spin_lock(&port->port_lock);
+ spin_unlock(&serial_port_lock);
port->suspended = false;
if (!port->start_delayed) {
spin_unlock_irqrestore(&port->port_lock, flags);
diff --git a/drivers/usb/gadget/function/u_uvc.h b/drivers/usb/gadget/function/u_uvc.h
index 24b8681b0d6f..1ce58f61253c 100644
--- a/drivers/usb/gadget/function/u_uvc.h
+++ b/drivers/usb/gadget/function/u_uvc.h
@@ -28,6 +28,9 @@ struct f_uvc_opts {
unsigned int control_interface;
unsigned int streaming_interface;
char function_name[32];
+ unsigned int last_unit_id;
+
+ bool enable_interrupt_ep;
/*
* Control descriptors array pointers for full-/high-speed and
@@ -52,7 +55,6 @@ struct f_uvc_opts {
struct uvc_camera_terminal_descriptor uvc_camera_terminal;
struct uvc_processing_unit_descriptor uvc_processing;
struct uvc_output_terminal_descriptor uvc_output_terminal;
- struct uvc_color_matching_descriptor uvc_color_matching;
/*
* Control descriptors pointers arrays for full-/high-speed and
@@ -65,6 +67,12 @@ struct f_uvc_opts {
struct uvc_descriptor_header *uvc_ss_control_cls[5];
/*
+ * Control descriptors for extension units. There could be any number
+ * of these, including none at all.
+ */
+ struct list_head extension_units;
+
+ /*
* Streaming descriptors for full-speed, high-speed and super-speed.
* Used by configfs only, must not be touched by legacy gadgets. The
* arrays are allocated at runtime as the number of descriptors isn't
@@ -75,6 +83,14 @@ struct f_uvc_opts {
struct uvc_descriptor_header **uvc_ss_streaming_cls;
/*
+ * Indexes into the function's string descriptors allowing users to set
+ * custom descriptions rather than the hard-coded defaults.
+ */
+ u8 iad_index;
+ u8 vs0_index;
+ u8 vs1_index;
+
+ /*
* Read/write access to configfs attributes is handled by configfs.
*
* This lock protects the descriptors from concurrent access by
diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h
index 40226b1f7e14..100475b1363e 100644
--- a/drivers/usb/gadget/function/uvc.h
+++ b/drivers/usb/gadget/function/uvc.h
@@ -143,12 +143,14 @@ struct uvc_device {
const struct uvc_descriptor_header * const *fs_streaming;
const struct uvc_descriptor_header * const *hs_streaming;
const struct uvc_descriptor_header * const *ss_streaming;
+ struct list_head *extension_units;
} desc;
unsigned int control_intf;
- struct usb_ep *control_ep;
+ struct usb_ep *interrupt_ep;
struct usb_request *control_req;
void *control_buf;
+ bool enable_interrupt_ep;
unsigned int streaming_intf;
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
index 76cb60d13049..62b759bb7613 100644
--- a/drivers/usb/gadget/function/uvc_configfs.c
+++ b/drivers/usb/gadget/function/uvc_configfs.c
@@ -13,6 +13,7 @@
#include "uvc_configfs.h"
#include <linux/sort.h>
+#include <linux/usb/video.h>
/* -----------------------------------------------------------------------------
* Global Utility Structures and Macros
@@ -46,6 +47,71 @@ static int uvcg_config_compare_u32(const void *l, const void *r)
return li < ri ? -1 : li == ri ? 0 : 1;
}
+static inline int __uvcg_count_item_entries(char *buf, void *priv, unsigned int size)
+{
+ ++*((int *)priv);
+ return 0;
+}
+
+static inline int __uvcg_fill_item_entries(char *buf, void *priv, unsigned int size)
+{
+ unsigned int num;
+ u8 **values;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &num);
+ if (ret)
+ return ret;
+
+ if (num != (num & GENMASK((size * 8) - 1, 0)))
+ return -ERANGE;
+
+ values = priv;
+ memcpy(*values, &num, size);
+ *values += size;
+
+ return 0;
+}
+
+static int __uvcg_iter_item_entries(const char *page, size_t len,
+ int (*fun)(char *, void *, unsigned int),
+ void *priv, unsigned int size)
+{
+ /* sign, base 2 representation, newline, terminator */
+ unsigned int bufsize = 1 + size * 8 + 1 + 1;
+ const char *pg = page;
+ int i, ret = 0;
+ char *buf;
+
+ if (!fun)
+ return -EINVAL;
+
+ buf = kzalloc(bufsize, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ while (pg - page < len) {
+ i = 0;
+ while (i < sizeof(buf) && (pg - page < len) &&
+ *pg != '\0' && *pg != '\n')
+ buf[i++] = *pg++;
+ if (i == sizeof(buf)) {
+ ret = -EINVAL;
+ goto out_free_buf;
+ }
+ while ((pg - page < len) && (*pg == '\0' || *pg == '\n'))
+ ++pg;
+ buf[i] = '\0';
+ ret = fun(buf, priv, size);
+ if (ret)
+ goto out_free_buf;
+ }
+
+out_free_buf:
+ kfree(buf);
+ return ret;
+}
+
struct uvcg_config_group_type {
struct config_item_type type;
const char *name;
@@ -483,11 +549,68 @@ UVC_ATTR_RO(uvcg_default_output_, cname, aname)
UVCG_DEFAULT_OUTPUT_ATTR(b_terminal_id, bTerminalID, 8);
UVCG_DEFAULT_OUTPUT_ATTR(w_terminal_type, wTerminalType, 16);
UVCG_DEFAULT_OUTPUT_ATTR(b_assoc_terminal, bAssocTerminal, 8);
-UVCG_DEFAULT_OUTPUT_ATTR(b_source_id, bSourceID, 8);
UVCG_DEFAULT_OUTPUT_ATTR(i_terminal, iTerminal, 8);
#undef UVCG_DEFAULT_OUTPUT_ATTR
+static ssize_t uvcg_default_output_b_source_id_show(struct config_item *item,
+ char *page)
+{
+ struct config_group *group = to_config_group(item);
+ struct f_uvc_opts *opts;
+ struct config_item *opts_item;
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
+ struct uvc_output_terminal_descriptor *cd;
+ int result;
+
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ opts_item = group->cg_item.ci_parent->ci_parent->
+ ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+ cd = &opts->uvc_output_terminal;
+
+ mutex_lock(&opts->lock);
+ result = sprintf(page, "%u\n", le8_to_cpu(cd->bSourceID));
+ mutex_unlock(&opts->lock);
+
+ mutex_unlock(su_mutex);
+
+ return result;
+}
+
+static ssize_t uvcg_default_output_b_source_id_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct config_group *group = to_config_group(item);
+ struct f_uvc_opts *opts;
+ struct config_item *opts_item;
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
+ struct uvc_output_terminal_descriptor *cd;
+ int result;
+ u8 num;
+
+ result = kstrtou8(page, 0, &num);
+ if (result)
+ return result;
+
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ opts_item = group->cg_item.ci_parent->ci_parent->
+ ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+ cd = &opts->uvc_output_terminal;
+
+ mutex_lock(&opts->lock);
+ cd->bSourceID = num;
+ mutex_unlock(&opts->lock);
+
+ mutex_unlock(su_mutex);
+
+ return len;
+}
+UVC_ATTR(uvcg_default_output_, b_source_id, bSourceID);
+
static struct configfs_attribute *uvcg_default_output_attrs[] = {
&uvcg_default_output_attr_b_terminal_id,
&uvcg_default_output_attr_w_terminal_type,
@@ -540,6 +663,537 @@ static const struct uvcg_config_group_type uvcg_terminal_grp_type = {
};
/* -----------------------------------------------------------------------------
+ * control/extensions
+ */
+
+#define UVCG_EXTENSION_ATTR(cname, aname, ro...) \
+static ssize_t uvcg_extension_##cname##_show(struct config_item *item, \
+ char *page) \
+{ \
+ struct config_group *group = to_config_group(item->ci_parent); \
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex; \
+ struct uvcg_extension *xu = to_uvcg_extension(item); \
+ struct config_item *opts_item; \
+ struct f_uvc_opts *opts; \
+ int ret; \
+ \
+ mutex_lock(su_mutex); \
+ \
+ opts_item = item->ci_parent->ci_parent->ci_parent; \
+ opts = to_f_uvc_opts(opts_item); \
+ \
+ mutex_lock(&opts->lock); \
+ ret = sprintf(page, "%u\n", xu->desc.aname); \
+ mutex_unlock(&opts->lock); \
+ \
+ mutex_unlock(su_mutex); \
+ \
+ return ret; \
+} \
+UVC_ATTR##ro(uvcg_extension_, cname, aname)
+
+UVCG_EXTENSION_ATTR(b_length, bLength, _RO);
+UVCG_EXTENSION_ATTR(b_unit_id, bUnitID, _RO);
+UVCG_EXTENSION_ATTR(i_extension, iExtension, _RO);
+
+static ssize_t uvcg_extension_b_num_controls_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct config_group *group = to_config_group(item->ci_parent);
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
+ struct uvcg_extension *xu = to_uvcg_extension(item);
+ struct config_item *opts_item;
+ struct f_uvc_opts *opts;
+ int ret;
+ u8 num;
+
+ ret = kstrtou8(page, 0, &num);
+ if (ret)
+ return ret;
+
+ mutex_lock(su_mutex);
+
+ opts_item = item->ci_parent->ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+ xu->desc.bNumControls = num;
+ mutex_unlock(&opts->lock);
+
+ mutex_unlock(su_mutex);
+
+ return len;
+}
+UVCG_EXTENSION_ATTR(b_num_controls, bNumControls);
+
+/*
+ * In addition to storing bNrInPins, this function needs to realloc the
+ * memory for the baSourceID array and additionally expand bLength.
+ */
+static ssize_t uvcg_extension_b_nr_in_pins_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct config_group *group = to_config_group(item->ci_parent);
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
+ struct uvcg_extension *xu = to_uvcg_extension(item);
+ struct config_item *opts_item;
+ struct f_uvc_opts *opts;
+ void *tmp_buf;
+ int ret;
+ u8 num;
+
+ ret = kstrtou8(page, 0, &num);
+ if (ret)
+ return ret;
+
+ mutex_lock(su_mutex);
+
+ opts_item = item->ci_parent->ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+
+ if (num == xu->desc.bNrInPins) {
+ ret = len;
+ goto unlock;
+ }
+
+ tmp_buf = krealloc_array(xu->desc.baSourceID, num, sizeof(u8),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!tmp_buf) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ xu->desc.baSourceID = tmp_buf;
+ xu->desc.bNrInPins = num;
+ xu->desc.bLength = UVC_DT_EXTENSION_UNIT_SIZE(xu->desc.bNrInPins,
+ xu->desc.bControlSize);
+
+ ret = len;
+
+unlock:
+ mutex_unlock(&opts->lock);
+ mutex_unlock(su_mutex);
+ return ret;
+}
+UVCG_EXTENSION_ATTR(b_nr_in_pins, bNrInPins);
+
+/*
+ * In addition to storing bControlSize, this function needs to realloc the
+ * memory for the bmControls array and additionally expand bLength.
+ */
+static ssize_t uvcg_extension_b_control_size_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct config_group *group = to_config_group(item->ci_parent);
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
+ struct uvcg_extension *xu = to_uvcg_extension(item);
+ struct config_item *opts_item;
+ struct f_uvc_opts *opts;
+ void *tmp_buf;
+ int ret;
+ u8 num;
+
+ ret = kstrtou8(page, 0, &num);
+ if (ret)
+ return ret;
+
+ mutex_lock(su_mutex);
+
+ opts_item = item->ci_parent->ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+
+ if (num == xu->desc.bControlSize) {
+ ret = len;
+ goto unlock;
+ }
+
+ tmp_buf = krealloc_array(xu->desc.bmControls, num, sizeof(u8),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!tmp_buf) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ xu->desc.bmControls = tmp_buf;
+ xu->desc.bControlSize = num;
+ xu->desc.bLength = UVC_DT_EXTENSION_UNIT_SIZE(xu->desc.bNrInPins,
+ xu->desc.bControlSize);
+
+ ret = len;
+
+unlock:
+ mutex_unlock(&opts->lock);
+ mutex_unlock(su_mutex);
+ return ret;
+}
+
+UVCG_EXTENSION_ATTR(b_control_size, bControlSize);
+
+static ssize_t uvcg_extension_guid_extension_code_show(struct config_item *item,
+ char *page)
+{
+ struct config_group *group = to_config_group(item->ci_parent);
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
+ struct uvcg_extension *xu = to_uvcg_extension(item);
+ struct config_item *opts_item;
+ struct f_uvc_opts *opts;
+
+ mutex_lock(su_mutex);
+
+ opts_item = item->ci_parent->ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+ memcpy(page, xu->desc.guidExtensionCode, sizeof(xu->desc.guidExtensionCode));
+ mutex_unlock(&opts->lock);
+
+ mutex_unlock(su_mutex);
+
+ return sizeof(xu->desc.guidExtensionCode);
+}
+
+static ssize_t uvcg_extension_guid_extension_code_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct config_group *group = to_config_group(item->ci_parent);
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
+ struct uvcg_extension *xu = to_uvcg_extension(item);
+ struct config_item *opts_item;
+ struct f_uvc_opts *opts;
+ int ret;
+
+ mutex_lock(su_mutex);
+
+ opts_item = item->ci_parent->ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+ memcpy(xu->desc.guidExtensionCode, page,
+ min(sizeof(xu->desc.guidExtensionCode), len));
+ mutex_unlock(&opts->lock);
+
+ mutex_unlock(su_mutex);
+
+ ret = sizeof(xu->desc.guidExtensionCode);
+
+ return ret;
+}
+
+UVC_ATTR(uvcg_extension_, guid_extension_code, guidExtensionCode);
+
+static ssize_t uvcg_extension_ba_source_id_show(struct config_item *item,
+ char *page)
+{
+ struct config_group *group = to_config_group(item->ci_parent);
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
+ struct uvcg_extension *xu = to_uvcg_extension(item);
+ struct config_item *opts_item;
+ struct f_uvc_opts *opts;
+ char *pg = page;
+ int ret, i;
+
+ mutex_lock(su_mutex);
+
+ opts_item = item->ci_parent->ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+ for (ret = 0, i = 0; i < xu->desc.bNrInPins; ++i) {
+ ret += sprintf(pg, "%u\n", xu->desc.baSourceID[i]);
+ pg = page + ret;
+ }
+ mutex_unlock(&opts->lock);
+
+ mutex_unlock(su_mutex);
+
+ return ret;
+}
+
+static ssize_t uvcg_extension_ba_source_id_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct config_group *group = to_config_group(item->ci_parent);
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
+ struct uvcg_extension *xu = to_uvcg_extension(item);
+ struct config_item *opts_item;
+ struct f_uvc_opts *opts;
+ u8 *source_ids, *iter;
+ int ret, n = 0;
+
+ mutex_lock(su_mutex);
+
+ opts_item = item->ci_parent->ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+
+ ret = __uvcg_iter_item_entries(page, len, __uvcg_count_item_entries, &n,
+ sizeof(u8));
+ if (ret)
+ goto unlock;
+
+ iter = source_ids = kcalloc(n, sizeof(u8), GFP_KERNEL);
+ if (!source_ids) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ ret = __uvcg_iter_item_entries(page, len, __uvcg_fill_item_entries, &iter,
+ sizeof(u8));
+ if (ret) {
+ kfree(source_ids);
+ goto unlock;
+ }
+
+ kfree(xu->desc.baSourceID);
+ xu->desc.baSourceID = source_ids;
+ xu->desc.bNrInPins = n;
+ xu->desc.bLength = UVC_DT_EXTENSION_UNIT_SIZE(xu->desc.bNrInPins,
+ xu->desc.bControlSize);
+
+ ret = len;
+
+unlock:
+ mutex_unlock(&opts->lock);
+ mutex_unlock(su_mutex);
+ return ret;
+}
+UVC_ATTR(uvcg_extension_, ba_source_id, baSourceID);
+
+static ssize_t uvcg_extension_bm_controls_show(struct config_item *item,
+ char *page)
+{
+ struct config_group *group = to_config_group(item->ci_parent);
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
+ struct uvcg_extension *xu = to_uvcg_extension(item);
+ struct config_item *opts_item;
+ struct f_uvc_opts *opts;
+ char *pg = page;
+ int ret, i;
+
+ mutex_lock(su_mutex);
+
+ opts_item = item->ci_parent->ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+ for (ret = 0, i = 0; i < xu->desc.bControlSize; ++i) {
+ ret += sprintf(pg, "0x%02x\n", xu->desc.bmControls[i]);
+ pg = page + ret;
+ }
+ mutex_unlock(&opts->lock);
+
+ mutex_unlock(su_mutex);
+
+ return ret;
+}
+
+static ssize_t uvcg_extension_bm_controls_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct config_group *group = to_config_group(item->ci_parent);
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
+ struct uvcg_extension *xu = to_uvcg_extension(item);
+ struct config_item *opts_item;
+ struct f_uvc_opts *opts;
+ u8 *bm_controls, *iter;
+ int ret, n = 0;
+
+ mutex_lock(su_mutex);
+
+ opts_item = item->ci_parent->ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+
+ ret = __uvcg_iter_item_entries(page, len, __uvcg_count_item_entries, &n,
+ sizeof(u8));
+ if (ret)
+ goto unlock;
+
+ iter = bm_controls = kcalloc(n, sizeof(u8), GFP_KERNEL);
+ if (!bm_controls) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ ret = __uvcg_iter_item_entries(page, len, __uvcg_fill_item_entries, &iter,
+ sizeof(u8));
+ if (ret) {
+ kfree(bm_controls);
+ goto unlock;
+ }
+
+ kfree(xu->desc.bmControls);
+ xu->desc.bmControls = bm_controls;
+ xu->desc.bControlSize = n;
+ xu->desc.bLength = UVC_DT_EXTENSION_UNIT_SIZE(xu->desc.bNrInPins,
+ xu->desc.bControlSize);
+
+ ret = len;
+
+unlock:
+ mutex_unlock(&opts->lock);
+ mutex_unlock(su_mutex);
+ return ret;
+}
+
+UVC_ATTR(uvcg_extension_, bm_controls, bmControls);
+
+static struct configfs_attribute *uvcg_extension_attrs[] = {
+ &uvcg_extension_attr_b_length,
+ &uvcg_extension_attr_b_unit_id,
+ &uvcg_extension_attr_b_num_controls,
+ &uvcg_extension_attr_b_nr_in_pins,
+ &uvcg_extension_attr_b_control_size,
+ &uvcg_extension_attr_guid_extension_code,
+ &uvcg_extension_attr_ba_source_id,
+ &uvcg_extension_attr_bm_controls,
+ &uvcg_extension_attr_i_extension,
+ NULL,
+};
+
+static void uvcg_extension_release(struct config_item *item)
+{
+ struct uvcg_extension *xu = container_of(item, struct uvcg_extension, item);
+
+ kfree(xu);
+}
+
+static int uvcg_extension_allow_link(struct config_item *src, struct config_item *tgt)
+{
+ struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
+ struct uvcg_extension *xu = to_uvcg_extension(src);
+ struct config_item *gadget_item;
+ struct gadget_string *string;
+ struct config_item *strings;
+ int ret = 0;
+
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ /* Validate that the target of the link is an entry in strings/<langid> */
+ gadget_item = src->ci_parent->ci_parent->ci_parent->ci_parent->ci_parent;
+ strings = config_group_find_item(to_config_group(gadget_item), "strings");
+ if (!strings || tgt->ci_parent->ci_parent != strings) {
+ ret = -EINVAL;
+ goto put_strings;
+ }
+
+ string = to_gadget_string(tgt);
+ xu->string_descriptor_index = string->usb_string.id;
+
+put_strings:
+ config_item_put(strings);
+ mutex_unlock(su_mutex);
+
+ return ret;
+}
+
+static void uvcg_extension_drop_link(struct config_item *src, struct config_item *tgt)
+{
+ struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
+ struct uvcg_extension *xu = to_uvcg_extension(src);
+ struct config_item *opts_item;
+ struct f_uvc_opts *opts;
+
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ opts_item = src->ci_parent->ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+
+ xu->string_descriptor_index = 0;
+
+ mutex_unlock(&opts->lock);
+
+ mutex_unlock(su_mutex);
+}
+
+static struct configfs_item_operations uvcg_extension_item_ops = {
+ .release = uvcg_extension_release,
+ .allow_link = uvcg_extension_allow_link,
+ .drop_link = uvcg_extension_drop_link,
+};
+
+static const struct config_item_type uvcg_extension_type = {
+ .ct_item_ops = &uvcg_extension_item_ops,
+ .ct_attrs = uvcg_extension_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static void uvcg_extension_drop(struct config_group *group, struct config_item *item)
+{
+ struct uvcg_extension *xu = container_of(item, struct uvcg_extension, item);
+ struct config_item *opts_item;
+ struct f_uvc_opts *opts;
+
+ opts_item = group->cg_item.ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+
+ config_item_put(item);
+ list_del(&xu->list);
+ kfree(xu->desc.baSourceID);
+ kfree(xu->desc.bmControls);
+
+ mutex_unlock(&opts->lock);
+}
+
+static struct config_item *uvcg_extension_make(struct config_group *group, const char *name)
+{
+ struct config_item *opts_item;
+ struct uvcg_extension *xu;
+ struct f_uvc_opts *opts;
+
+ opts_item = group->cg_item.ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ xu = kzalloc(sizeof(*xu), GFP_KERNEL);
+ if (!xu)
+ return ERR_PTR(-ENOMEM);
+
+ xu->desc.bLength = UVC_DT_EXTENSION_UNIT_SIZE(0, 0);
+ xu->desc.bDescriptorType = USB_DT_CS_INTERFACE;
+ xu->desc.bDescriptorSubType = UVC_VC_EXTENSION_UNIT;
+ xu->desc.bNumControls = 0;
+ xu->desc.bNrInPins = 0;
+ xu->desc.baSourceID = NULL;
+ xu->desc.bControlSize = 0;
+ xu->desc.bmControls = NULL;
+
+ mutex_lock(&opts->lock);
+
+ xu->desc.bUnitID = ++opts->last_unit_id;
+
+ config_item_init_type_name(&xu->item, name, &uvcg_extension_type);
+ list_add_tail(&xu->list, &opts->extension_units);
+
+ mutex_unlock(&opts->lock);
+
+ return &xu->item;
+}
+
+static struct configfs_group_operations uvcg_extensions_grp_ops = {
+ .make_item = uvcg_extension_make,
+ .drop_item = uvcg_extension_drop,
+};
+
+static const struct uvcg_config_group_type uvcg_extensions_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_group_ops = &uvcg_extensions_grp_ops,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "extensions",
+};
+
+/* -----------------------------------------------------------------------------
* control/class/{fs|ss}
*/
@@ -716,8 +1370,61 @@ static ssize_t uvcg_default_control_b_interface_number_show(
UVC_ATTR_RO(uvcg_default_control_, b_interface_number, bInterfaceNumber);
+static ssize_t uvcg_default_control_enable_interrupt_ep_show(
+ struct config_item *item, char *page)
+{
+ struct config_group *group = to_config_group(item);
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
+ struct config_item *opts_item;
+ struct f_uvc_opts *opts;
+ int result = 0;
+
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ opts_item = item->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+ result += sprintf(page, "%u\n", opts->enable_interrupt_ep);
+ mutex_unlock(&opts->lock);
+
+ mutex_unlock(su_mutex);
+
+ return result;
+}
+
+static ssize_t uvcg_default_control_enable_interrupt_ep_store(
+ struct config_item *item, const char *page, size_t len)
+{
+ struct config_group *group = to_config_group(item);
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
+ struct config_item *opts_item;
+ struct f_uvc_opts *opts;
+ ssize_t ret;
+ u8 num;
+
+ ret = kstrtou8(page, 0, &num);
+ if (ret)
+ return ret;
+
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ opts_item = item->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+ opts->enable_interrupt_ep = num;
+ mutex_unlock(&opts->lock);
+
+ mutex_unlock(su_mutex);
+
+ return len;
+}
+UVC_ATTR(uvcg_default_control_, enable_interrupt_ep, enable_interrupt_ep);
+
static struct configfs_attribute *uvcg_default_control_attrs[] = {
&uvcg_default_control_attr_b_interface_number,
+ &uvcg_default_control_attr_enable_interrupt_ep,
NULL,
};
@@ -733,6 +1440,7 @@ static const struct uvcg_config_group_type uvcg_control_grp_type = {
&uvcg_processing_grp_type,
&uvcg_terminal_grp_type,
&uvcg_control_class_grp_type,
+ &uvcg_extensions_grp_type,
NULL,
},
};
@@ -747,6 +1455,100 @@ static const char * const uvcg_format_names[] = {
"mjpeg",
};
+static struct uvcg_color_matching *
+uvcg_format_get_default_color_match(struct config_item *streaming)
+{
+ struct config_item *color_matching_item, *cm_default;
+ struct uvcg_color_matching *color_match;
+
+ color_matching_item = config_group_find_item(to_config_group(streaming),
+ "color_matching");
+ if (!color_matching_item)
+ return NULL;
+
+ cm_default = config_group_find_item(to_config_group(color_matching_item),
+ "default");
+ config_item_put(color_matching_item);
+ if (!cm_default)
+ return NULL;
+
+ color_match = to_uvcg_color_matching(to_config_group(cm_default));
+ config_item_put(cm_default);
+
+ return color_match;
+}
+
+static int uvcg_format_allow_link(struct config_item *src, struct config_item *tgt)
+{
+ struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
+ struct uvcg_color_matching *color_matching_desc;
+ struct config_item *streaming, *color_matching;
+ struct uvcg_format *fmt;
+ int ret = 0;
+
+ mutex_lock(su_mutex);
+
+ streaming = src->ci_parent->ci_parent;
+ color_matching = config_group_find_item(to_config_group(streaming), "color_matching");
+ if (!color_matching || color_matching != tgt->ci_parent) {
+ ret = -EINVAL;
+ goto out_put_cm;
+ }
+
+ fmt = to_uvcg_format(src);
+
+ /*
+ * There's always a color matching descriptor associated with the format
+ * but without a symlink it should only ever be the default one. If it's
+ * not the default, there's already a symlink and we should bail out.
+ */
+ color_matching_desc = uvcg_format_get_default_color_match(streaming);
+ if (fmt->color_matching != color_matching_desc) {
+ ret = -EBUSY;
+ goto out_put_cm;
+ }
+
+ color_matching_desc->refcnt--;
+
+ color_matching_desc = to_uvcg_color_matching(to_config_group(tgt));
+ fmt->color_matching = color_matching_desc;
+ color_matching_desc->refcnt++;
+
+out_put_cm:
+ config_item_put(color_matching);
+ mutex_unlock(su_mutex);
+
+ return ret;
+}
+
+static void uvcg_format_drop_link(struct config_item *src, struct config_item *tgt)
+{
+ struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
+ struct uvcg_color_matching *color_matching_desc;
+ struct config_item *streaming;
+ struct uvcg_format *fmt;
+
+ mutex_lock(su_mutex);
+
+ color_matching_desc = to_uvcg_color_matching(to_config_group(tgt));
+ color_matching_desc->refcnt--;
+
+ streaming = src->ci_parent->ci_parent;
+ color_matching_desc = uvcg_format_get_default_color_match(streaming);
+
+ fmt = to_uvcg_format(src);
+ fmt->color_matching = color_matching_desc;
+ color_matching_desc->refcnt++;
+
+ mutex_unlock(su_mutex);
+}
+
+static struct configfs_item_operations uvcg_format_item_operations = {
+ .release = uvcg_config_item_release,
+ .allow_link = uvcg_format_allow_link,
+ .drop_link = uvcg_format_drop_link,
+};
+
static ssize_t uvcg_format_bma_controls_show(struct uvcg_format *f, char *page)
{
struct f_uvc_opts *opts;
@@ -1131,57 +1933,6 @@ static ssize_t uvcg_frame_dw_frame_interval_show(struct config_item *item,
return result;
}
-static inline int __uvcg_count_frm_intrv(char *buf, void *priv)
-{
- ++*((int *)priv);
- return 0;
-}
-
-static inline int __uvcg_fill_frm_intrv(char *buf, void *priv)
-{
- u32 num, **interv;
- int ret;
-
- ret = kstrtou32(buf, 0, &num);
- if (ret)
- return ret;
-
- interv = priv;
- **interv = num;
- ++*interv;
-
- return 0;
-}
-
-static int __uvcg_iter_frm_intrv(const char *page, size_t len,
- int (*fun)(char *, void *), void *priv)
-{
- /* sign, base 2 representation, newline, terminator */
- char buf[1 + sizeof(u32) * 8 + 1 + 1];
- const char *pg = page;
- int i, ret;
-
- if (!fun)
- return -EINVAL;
-
- while (pg - page < len) {
- i = 0;
- while (i < sizeof(buf) && (pg - page < len) &&
- *pg != '\0' && *pg != '\n')
- buf[i++] = *pg++;
- if (i == sizeof(buf))
- return -EINVAL;
- while ((pg - page < len) && (*pg == '\0' || *pg == '\n'))
- ++pg;
- buf[i] = '\0';
- ret = fun(buf, priv);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
static ssize_t uvcg_frame_dw_frame_interval_store(struct config_item *item,
const char *page, size_t len)
{
@@ -1205,7 +1956,7 @@ static ssize_t uvcg_frame_dw_frame_interval_store(struct config_item *item,
goto end;
}
- ret = __uvcg_iter_frm_intrv(page, len, __uvcg_count_frm_intrv, &n);
+ ret = __uvcg_iter_item_entries(page, len, __uvcg_count_item_entries, &n, sizeof(u32));
if (ret)
goto end;
@@ -1215,7 +1966,7 @@ static ssize_t uvcg_frame_dw_frame_interval_store(struct config_item *item,
goto end;
}
- ret = __uvcg_iter_frm_intrv(page, len, __uvcg_fill_frm_intrv, &tmp);
+ ret = __uvcg_iter_item_entries(page, len, __uvcg_fill_item_entries, &tmp, sizeof(u32));
if (ret) {
kfree(frm_intrv);
goto end;
@@ -1547,7 +2298,7 @@ static struct configfs_attribute *uvcg_uncompressed_attrs[] = {
};
static const struct config_item_type uvcg_uncompressed_type = {
- .ct_item_ops = &uvcg_config_item_ops,
+ .ct_item_ops = &uvcg_format_item_operations,
.ct_group_ops = &uvcg_uncompressed_group_ops,
.ct_attrs = uvcg_uncompressed_attrs,
.ct_owner = THIS_MODULE,
@@ -1560,8 +2311,15 @@ static struct config_group *uvcg_uncompressed_make(struct config_group *group,
'Y', 'U', 'Y', '2', 0x00, 0x00, 0x10, 0x00,
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71
};
+ struct uvcg_color_matching *color_match;
+ struct config_item *streaming;
struct uvcg_uncompressed *h;
+ streaming = group->cg_item.ci_parent;
+ color_match = uvcg_format_get_default_color_match(streaming);
+ if (!color_match)
+ return ERR_PTR(-EINVAL);
+
h = kzalloc(sizeof(*h), GFP_KERNEL);
if (!h)
return ERR_PTR(-ENOMEM);
@@ -1579,6 +2337,8 @@ static struct config_group *uvcg_uncompressed_make(struct config_group *group,
INIT_LIST_HEAD(&h->fmt.frames);
h->fmt.type = UVCG_UNCOMPRESSED;
+ h->fmt.color_matching = color_match;
+ color_match->refcnt++;
config_group_init_type_name(&h->fmt.group, name,
&uvcg_uncompressed_type);
@@ -1734,7 +2494,7 @@ static struct configfs_attribute *uvcg_mjpeg_attrs[] = {
};
static const struct config_item_type uvcg_mjpeg_type = {
- .ct_item_ops = &uvcg_config_item_ops,
+ .ct_item_ops = &uvcg_format_item_operations,
.ct_group_ops = &uvcg_mjpeg_group_ops,
.ct_attrs = uvcg_mjpeg_attrs,
.ct_owner = THIS_MODULE,
@@ -1743,8 +2503,15 @@ static const struct config_item_type uvcg_mjpeg_type = {
static struct config_group *uvcg_mjpeg_make(struct config_group *group,
const char *name)
{
+ struct uvcg_color_matching *color_match;
+ struct config_item *streaming;
struct uvcg_mjpeg *h;
+ streaming = group->cg_item.ci_parent;
+ color_match = uvcg_format_get_default_color_match(streaming);
+ if (!color_match)
+ return ERR_PTR(-EINVAL);
+
h = kzalloc(sizeof(*h), GFP_KERNEL);
if (!h)
return ERR_PTR(-ENOMEM);
@@ -1760,6 +2527,8 @@ static struct config_group *uvcg_mjpeg_make(struct config_group *group,
INIT_LIST_HEAD(&h->fmt.frames);
h->fmt.type = UVCG_MJPEG;
+ h->fmt.color_matching = color_match;
+ color_match->refcnt++;
config_group_init_type_name(&h->fmt.group, name,
&uvcg_mjpeg_type);
@@ -1783,70 +2552,159 @@ static const struct uvcg_config_group_type uvcg_mjpeg_grp_type = {
* streaming/color_matching/default
*/
-#define UVCG_DEFAULT_COLOR_MATCHING_ATTR(cname, aname, bits) \
-static ssize_t uvcg_default_color_matching_##cname##_show( \
+#define UVCG_COLOR_MATCHING_ATTR(cname, aname, bits) \
+static ssize_t uvcg_color_matching_##cname##_show( \
struct config_item *item, char *page) \
{ \
struct config_group *group = to_config_group(item); \
+ struct uvcg_color_matching *color_match = \
+ to_uvcg_color_matching(group); \
struct f_uvc_opts *opts; \
struct config_item *opts_item; \
struct mutex *su_mutex = &group->cg_subsys->su_mutex; \
- struct uvc_color_matching_descriptor *cd; \
int result; \
\
mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
\
opts_item = group->cg_item.ci_parent->ci_parent->ci_parent; \
opts = to_f_uvc_opts(opts_item); \
- cd = &opts->uvc_color_matching; \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%u\n", le##bits##_to_cpu(cd->aname)); \
+ result = sprintf(page, "%u\n", \
+ le##bits##_to_cpu(color_match->desc.aname)); \
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
return result; \
} \
\
-UVC_ATTR_RO(uvcg_default_color_matching_, cname, aname)
+static ssize_t uvcg_color_matching_##cname##_store( \
+ struct config_item *item, const char *page, size_t len) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex; \
+ struct uvcg_color_matching *color_match = \
+ to_uvcg_color_matching(group); \
+ struct f_uvc_opts *opts; \
+ struct config_item *opts_item; \
+ int ret; \
+ u##bits num; \
+ \
+ ret = kstrtou##bits(page, 0, &num); \
+ if (ret) \
+ return ret; \
+ \
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
+ \
+ if (color_match->refcnt) { \
+ ret = -EBUSY; \
+ goto unlock_su; \
+ } \
+ \
+ opts_item = group->cg_item.ci_parent->ci_parent->ci_parent; \
+ opts = to_f_uvc_opts(opts_item); \
+ \
+ mutex_lock(&opts->lock); \
+ \
+ color_match->desc.aname = num; \
+ ret = len; \
+ \
+ mutex_unlock(&opts->lock); \
+unlock_su: \
+ mutex_unlock(su_mutex); \
+ \
+ return ret; \
+} \
+UVC_ATTR(uvcg_color_matching_, cname, aname)
-UVCG_DEFAULT_COLOR_MATCHING_ATTR(b_color_primaries, bColorPrimaries, 8);
-UVCG_DEFAULT_COLOR_MATCHING_ATTR(b_transfer_characteristics,
- bTransferCharacteristics, 8);
-UVCG_DEFAULT_COLOR_MATCHING_ATTR(b_matrix_coefficients, bMatrixCoefficients, 8);
+UVCG_COLOR_MATCHING_ATTR(b_color_primaries, bColorPrimaries, 8);
+UVCG_COLOR_MATCHING_ATTR(b_transfer_characteristics, bTransferCharacteristics, 8);
+UVCG_COLOR_MATCHING_ATTR(b_matrix_coefficients, bMatrixCoefficients, 8);
-#undef UVCG_DEFAULT_COLOR_MATCHING_ATTR
+#undef UVCG_COLOR_MATCHING_ATTR
-static struct configfs_attribute *uvcg_default_color_matching_attrs[] = {
- &uvcg_default_color_matching_attr_b_color_primaries,
- &uvcg_default_color_matching_attr_b_transfer_characteristics,
- &uvcg_default_color_matching_attr_b_matrix_coefficients,
+static struct configfs_attribute *uvcg_color_matching_attrs[] = {
+ &uvcg_color_matching_attr_b_color_primaries,
+ &uvcg_color_matching_attr_b_transfer_characteristics,
+ &uvcg_color_matching_attr_b_matrix_coefficients,
NULL,
};
-static const struct uvcg_config_group_type uvcg_default_color_matching_type = {
- .type = {
- .ct_item_ops = &uvcg_config_item_ops,
- .ct_attrs = uvcg_default_color_matching_attrs,
- .ct_owner = THIS_MODULE,
- },
- .name = "default",
+static void uvcg_color_matching_release(struct config_item *item)
+{
+ struct uvcg_color_matching *color_match =
+ to_uvcg_color_matching(to_config_group(item));
+
+ kfree(color_match);
+}
+
+static struct configfs_item_operations uvcg_color_matching_item_ops = {
+ .release = uvcg_color_matching_release,
+};
+
+static const struct config_item_type uvcg_color_matching_type = {
+ .ct_item_ops = &uvcg_color_matching_item_ops,
+ .ct_attrs = uvcg_color_matching_attrs,
+ .ct_owner = THIS_MODULE,
};
/* -----------------------------------------------------------------------------
* streaming/color_matching
*/
+static struct config_group *uvcg_color_matching_make(struct config_group *group,
+ const char *name)
+{
+ struct uvcg_color_matching *color_match;
+
+ color_match = kzalloc(sizeof(*color_match), GFP_KERNEL);
+ if (!color_match)
+ return ERR_PTR(-ENOMEM);
+
+ color_match->desc.bLength = UVC_DT_COLOR_MATCHING_SIZE;
+ color_match->desc.bDescriptorType = USB_DT_CS_INTERFACE;
+ color_match->desc.bDescriptorSubType = UVC_VS_COLORFORMAT;
+
+ config_group_init_type_name(&color_match->group, name,
+ &uvcg_color_matching_type);
+
+ return &color_match->group;
+}
+
+static struct configfs_group_operations uvcg_color_matching_grp_group_ops = {
+ .make_group = uvcg_color_matching_make,
+};
+
+static int uvcg_color_matching_create_children(struct config_group *parent)
+{
+ struct uvcg_color_matching *color_match;
+
+ color_match = kzalloc(sizeof(*color_match), GFP_KERNEL);
+ if (!color_match)
+ return -ENOMEM;
+
+ color_match->desc.bLength = UVC_DT_COLOR_MATCHING_SIZE;
+ color_match->desc.bDescriptorType = USB_DT_CS_INTERFACE;
+ color_match->desc.bDescriptorSubType = UVC_VS_COLORFORMAT;
+ color_match->desc.bColorPrimaries = UVC_COLOR_PRIMARIES_BT_709_SRGB;
+ color_match->desc.bTransferCharacteristics = UVC_TRANSFER_CHARACTERISTICS_BT_709;
+ color_match->desc.bMatrixCoefficients = UVC_MATRIX_COEFFICIENTS_SMPTE_170M;
+
+ config_group_init_type_name(&color_match->group, "default",
+ &uvcg_color_matching_type);
+ configfs_add_default_group(&color_match->group, parent);
+
+ return 0;
+}
+
static const struct uvcg_config_group_type uvcg_color_matching_grp_type = {
.type = {
.ct_item_ops = &uvcg_config_item_ops,
+ .ct_group_ops = &uvcg_color_matching_grp_group_ops,
.ct_owner = THIS_MODULE,
},
.name = "color_matching",
- .children = (const struct uvcg_config_group_type*[]) {
- &uvcg_default_color_matching_type,
- NULL,
- },
+ .create_children = uvcg_color_matching_create_children,
};
/* -----------------------------------------------------------------------------
@@ -1880,7 +2738,8 @@ static inline struct uvc_descriptor_header
enum uvcg_strm_type {
UVCG_HEADER = 0,
UVCG_FORMAT,
- UVCG_FRAME
+ UVCG_FRAME,
+ UVCG_COLOR_MATCHING,
};
/*
@@ -1930,6 +2789,11 @@ static int __uvcg_iter_strm_cls(struct uvcg_streaming_header *h,
if (ret)
return ret;
}
+
+ ret = fun(f->fmt->color_matching, priv2, priv3, 0,
+ UVCG_COLOR_MATCHING);
+ if (ret)
+ return ret;
}
return ret;
@@ -1985,6 +2849,12 @@ static int __uvcg_cnt_strm(void *priv1, void *priv2, void *priv3, int n,
*size += frm->frame.b_frame_interval_type * sz;
}
break;
+ case UVCG_COLOR_MATCHING: {
+ struct uvcg_color_matching *color_match = priv1;
+
+ *size += sizeof(color_match->desc);
+ }
+ break;
}
++*count;
@@ -2070,6 +2940,13 @@ static int __uvcg_fill_strm(void *priv1, void *priv2, void *priv3, int n,
frm->frame.b_frame_interval_type);
}
break;
+ case UVCG_COLOR_MATCHING: {
+ struct uvcg_color_matching *color_match = priv1;
+
+ memcpy(*dest, &color_match->desc, sizeof(color_match->desc));
+ *dest += sizeof(color_match->desc);
+ }
+ break;
}
return 0;
@@ -2109,7 +2986,7 @@ static int uvcg_streaming_class_allow_link(struct config_item *src,
if (ret)
goto unlock;
- count += 2; /* color_matching, NULL */
+ count += 1; /* NULL */
*class_array = kcalloc(count, sizeof(void *), GFP_KERNEL);
if (!*class_array) {
ret = -ENOMEM;
@@ -2136,7 +3013,6 @@ static int uvcg_streaming_class_allow_link(struct config_item *src,
kfree(data_save);
goto unlock;
}
- *cl_arr = (struct uvc_descriptor_header *)&opts->uvc_color_matching;
++target_hdr->linked;
ret = 0;
@@ -2298,8 +3174,68 @@ static void uvc_func_item_release(struct config_item *item)
usb_put_function_instance(&opts->func_inst);
}
+static int uvc_func_allow_link(struct config_item *src, struct config_item *tgt)
+{
+ struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
+ struct gadget_string *string;
+ struct config_item *strings;
+ struct f_uvc_opts *opts;
+ int ret = 0;
+
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ /* Validate that the target is an entry in strings/<langid> */
+ strings = config_group_find_item(to_config_group(src->ci_parent->ci_parent),
+ "strings");
+ if (!strings || tgt->ci_parent->ci_parent != strings) {
+ ret = -EINVAL;
+ goto put_strings;
+ }
+
+ string = to_gadget_string(tgt);
+
+ opts = to_f_uvc_opts(src);
+ mutex_lock(&opts->lock);
+
+ if (!strcmp(tgt->ci_name, "iad_desc"))
+ opts->iad_index = string->usb_string.id;
+ else if (!strcmp(tgt->ci_name, "vs0_desc"))
+ opts->vs0_index = string->usb_string.id;
+ else if (!strcmp(tgt->ci_name, "vs1_desc"))
+ opts->vs1_index = string->usb_string.id;
+ else
+ ret = -EINVAL;
+
+ mutex_unlock(&opts->lock);
+
+put_strings:
+ config_item_put(strings);
+ mutex_unlock(su_mutex);
+
+ return ret;
+}
+
+static void uvc_func_drop_link(struct config_item *src, struct config_item *tgt)
+{
+ struct f_uvc_opts *opts;
+
+ opts = to_f_uvc_opts(src);
+ mutex_lock(&opts->lock);
+
+ if (!strcmp(tgt->ci_name, "iad_desc"))
+ opts->iad_index = 0;
+ else if (!strcmp(tgt->ci_name, "vs0_desc"))
+ opts->vs0_index = 0;
+ else if (!strcmp(tgt->ci_name, "vs1_desc"))
+ opts->vs1_index = 0;
+
+ mutex_unlock(&opts->lock);
+}
+
static struct configfs_item_operations uvc_func_item_ops = {
.release = uvc_func_item_release,
+ .allow_link = uvc_func_allow_link,
+ .drop_link = uvc_func_drop_link,
};
#define UVCG_OPTS_ATTR(cname, aname, limit) \
diff --git a/drivers/usb/gadget/function/uvc_configfs.h b/drivers/usb/gadget/function/uvc_configfs.h
index ad2ec8c4c78c..c6a690158138 100644
--- a/drivers/usb/gadget/function/uvc_configfs.h
+++ b/drivers/usb/gadget/function/uvc_configfs.h
@@ -37,18 +37,28 @@ static inline struct uvcg_control_header *to_uvcg_control_header(struct config_i
return container_of(item, struct uvcg_control_header, item);
}
+struct uvcg_color_matching {
+ struct config_group group;
+ struct uvc_color_matching_descriptor desc;
+ unsigned int refcnt;
+};
+
+#define to_uvcg_color_matching(group_ptr) \
+container_of(group_ptr, struct uvcg_color_matching, group)
+
enum uvcg_format_type {
UVCG_UNCOMPRESSED = 0,
UVCG_MJPEG,
};
struct uvcg_format {
- struct config_group group;
- enum uvcg_format_type type;
- unsigned linked;
- struct list_head frames;
- unsigned num_frames;
- __u8 bmaControls[UVCG_STREAMING_CONTROL_SIZE];
+ struct config_group group;
+ enum uvcg_format_type type;
+ unsigned linked;
+ struct list_head frames;
+ unsigned num_frames;
+ __u8 bmaControls[UVCG_STREAMING_CONTROL_SIZE];
+ struct uvcg_color_matching *color_matching;
};
struct uvcg_format_ptr {
@@ -132,6 +142,36 @@ static inline struct uvcg_mjpeg *to_uvcg_mjpeg(struct config_item *item)
return container_of(to_uvcg_format(item), struct uvcg_mjpeg, fmt);
}
+/* -----------------------------------------------------------------------------
+ * control/extensions/<NAME>
+ */
+
+struct uvcg_extension_unit_descriptor {
+ u8 bLength;
+ u8 bDescriptorType;
+ u8 bDescriptorSubType;
+ u8 bUnitID;
+ u8 guidExtensionCode[16];
+ u8 bNumControls;
+ u8 bNrInPins;
+ u8 *baSourceID;
+ u8 bControlSize;
+ u8 *bmControls;
+ u8 iExtension;
+} __packed;
+
+struct uvcg_extension {
+ struct config_item item;
+ struct list_head list;
+ u8 string_descriptor_index;
+ struct uvcg_extension_unit_descriptor desc;
+};
+
+static inline struct uvcg_extension *to_uvcg_extension(struct config_item *item)
+{
+ return container_of(item, struct uvcg_extension, item);
+}
+
int uvcg_attach_configfs(struct f_uvc_opts *opts);
#endif /* UVC_CONFIGFS_H */
diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c
index a189b08bba80..3f0a9795c0d4 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.c
+++ b/drivers/usb/gadget/function/uvc_v4l2.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/usb/g_uvc.h>
+#include <linux/usb/uvc.h>
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
@@ -18,7 +19,6 @@
#include <media/v4l2-dev.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
-#include <media/v4l2-uvc.h>
#include "f_uvc.h"
#include "uvc.h"
@@ -27,10 +27,10 @@
#include "uvc_v4l2.h"
#include "uvc_configfs.h"
-static struct uvc_format_desc *to_uvc_format(struct uvcg_format *uformat)
+static const struct uvc_format_desc *to_uvc_format(struct uvcg_format *uformat)
{
char guid[16] = UVC_GUID_FORMAT_MJPEG;
- struct uvc_format_desc *format;
+ const struct uvc_format_desc *format;
struct uvcg_uncompressed *unc;
if (uformat->type == UVCG_UNCOMPRESSED) {
@@ -119,7 +119,7 @@ static struct uvcg_format *find_format_by_pix(struct uvc_device *uvc,
struct uvcg_format *uformat = NULL;
list_for_each_entry(format, &uvc->header->formats, entry) {
- struct uvc_format_desc *fmtdesc = to_uvc_format(format->fmt);
+ const struct uvc_format_desc *fmtdesc = to_uvc_format(format->fmt);
if (fmtdesc->fcc == pixelformat) {
uformat = format->fmt;
@@ -364,7 +364,7 @@ uvc_v4l2_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
- struct uvc_format_desc *fmtdesc;
+ const struct uvc_format_desc *fmtdesc;
struct uvcg_format *uformat;
if (f->index >= uvc->header->num_fmt)
@@ -374,15 +374,9 @@ uvc_v4l2_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
if (!uformat)
return -EINVAL;
- if (uformat->type != UVCG_UNCOMPRESSED)
- f->flags |= V4L2_FMT_FLAG_COMPRESSED;
-
fmtdesc = to_uvc_format(uformat);
f->pixelformat = fmtdesc->fcc;
- strscpy(f->description, fmtdesc->name, sizeof(f->description));
- f->description[strlen(fmtdesc->name) - 1] = 0;
-
return 0;
}
diff --git a/drivers/usb/gadget/legacy/hid.c b/drivers/usb/gadget/legacy/hid.c
index 1187ee4f316a..133daf88162e 100644
--- a/drivers/usb/gadget/legacy/hid.c
+++ b/drivers/usb/gadget/legacy/hid.c
@@ -133,14 +133,11 @@ static struct usb_configuration config_driver = {
static int hid_bind(struct usb_composite_dev *cdev)
{
struct usb_gadget *gadget = cdev->gadget;
- struct list_head *tmp;
struct hidg_func_node *n = NULL, *m, *iter_n;
struct f_hid_opts *hid_opts;
- int status, funcs = 0;
-
- list_for_each(tmp, &hidg_func_list)
- funcs++;
+ int status, funcs;
+ funcs = list_count_nodes(&hidg_func_list);
if (!funcs)
return -ENODEV;
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 511ab57cdc81..83cae6bb12eb 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -179,9 +179,20 @@ config USB_RENESAS_USBHS_UDC
dynamically linked module called "renesas_usbhs" and force all
gadget drivers to also be dynamically linked.
+config USB_RZV2M_USB3DRD
+ tristate 'Renesas USB3.1 DRD controller'
+ depends on ARCH_R9A09G011 || COMPILE_TEST
+ help
+ Renesas USB3.1 DRD controller is a USB DRD controller
+ that supports both host and device switching.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "rzv2m_usb3drd".
+
config USB_RENESAS_USB3
tristate 'Renesas USB3.0 Peripheral controller'
depends on ARCH_RENESAS || COMPILE_TEST
+ depends on USB_RZV2M_USB3DRD || !USB_RZV2M_USB3DRD
depends on EXTCON
select USB_ROLE_SWITCH
help
@@ -192,6 +203,17 @@ config USB_RENESAS_USB3
dynamically linked module called "renesas_usb3" and force all
gadget drivers to also be dynamically linked.
+config USB_RENESAS_USBF
+ tristate 'Renesas USB Function controller'
+ depends on ARCH_RENESAS || COMPILE_TEST
+ help
+ Renesas USB Function controller is a USB peripheral controller
+ available on RZ/N1 Renesas SoCs.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "renesas_usbf" and force all
+ gadget drivers to also be dynamically linked.
+
config USB_PXA27X
tristate "PXA 27x"
depends on HAS_IOMEM
diff --git a/drivers/usb/gadget/udc/Makefile b/drivers/usb/gadget/udc/Makefile
index 239ea22bdfd9..ee569f63c74a 100644
--- a/drivers/usb/gadget/udc/Makefile
+++ b/drivers/usb/gadget/udc/Makefile
@@ -26,6 +26,8 @@ obj-$(CONFIG_USB_TEGRA_XUDC) += tegra-xudc.o
obj-$(CONFIG_USB_M66592) += m66592-udc.o
obj-$(CONFIG_USB_R8A66597) += r8a66597-udc.o
obj-$(CONFIG_USB_RENESAS_USB3) += renesas_usb3.o
+obj-$(CONFIG_USB_RZV2M_USB3DRD) += rzv2m_usb3drd.o
+obj-$(CONFIG_USB_RENESAS_USBF) += renesas_usbf.o
obj-$(CONFIG_USB_FSL_QE) += fsl_qe_udc.o
obj-$(CONFIG_USB_LPC32XX) += lpc32xx_udc.o
obj-$(CONFIG_USB_EG20T) += pch_udc.o
diff --git a/drivers/usb/gadget/udc/bcm63xx_udc.c b/drivers/usb/gadget/udc/bcm63xx_udc.c
index d04d72f5816e..a3055dd4acfb 100644
--- a/drivers/usb/gadget/udc/bcm63xx_udc.c
+++ b/drivers/usb/gadget/udc/bcm63xx_udc.c
@@ -2171,7 +2171,6 @@ static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
for (ch_idx = 0; ch_idx < BCM63XX_NUM_IUDMA; ch_idx++) {
struct iudma_ch *iudma = &udc->iudma[ch_idx];
- struct list_head *pos;
seq_printf(s, "IUDMA channel %d -- ", ch_idx);
switch (iudma_defaults[ch_idx].ep_type) {
@@ -2204,14 +2203,10 @@ static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
seq_printf(s, " desc: %d/%d used", iudma->n_bds_used,
iudma->n_bds);
- if (iudma->bep) {
- i = 0;
- list_for_each(pos, &iudma->bep->queue)
- i++;
- seq_printf(s, "; %d queued\n", i);
- } else {
+ if (iudma->bep)
+ seq_printf(s, "; %zu queued\n", list_count_nodes(&iudma->bep->queue));
+ else
seq_printf(s, "\n");
- }
for (i = 0; i < iudma->n_bds; i++) {
struct bcm_enet_desc *d = &iudma->bd_ring[i];
@@ -2258,7 +2253,7 @@ static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
*/
static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc)
{
- debugfs_remove(debugfs_lookup(udc->gadget.name, usb_debug_root));
+ debugfs_lookup_and_remove(udc->gadget.name, usb_debug_root);
}
/***********************************************************************
diff --git a/drivers/usb/gadget/udc/fusb300_udc.c b/drivers/usb/gadget/udc/fusb300_udc.c
index 5954800d652c..08ba9c8c1e67 100644
--- a/drivers/usb/gadget/udc/fusb300_udc.c
+++ b/drivers/usb/gadget/udc/fusb300_udc.c
@@ -1346,6 +1346,7 @@ static int fusb300_remove(struct platform_device *pdev)
usb_del_gadget_udc(&fusb300->gadget);
iounmap(fusb300->reg);
free_irq(platform_get_irq(pdev, 0), fusb300);
+ free_irq(platform_get_irq(pdev, 1), fusb300);
fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req);
for (i = 0; i < FUSB300_MAX_NUM_EP; i++)
@@ -1431,7 +1432,7 @@ static int fusb300_probe(struct platform_device *pdev)
IRQF_SHARED, udc_name, fusb300);
if (ret < 0) {
pr_err("request_irq1 error (%d)\n", ret);
- goto clean_up;
+ goto err_request_irq1;
}
INIT_LIST_HEAD(&fusb300->gadget.ep_list);
@@ -1470,7 +1471,7 @@ static int fusb300_probe(struct platform_device *pdev)
GFP_KERNEL);
if (fusb300->ep0_req == NULL) {
ret = -ENOMEM;
- goto clean_up3;
+ goto err_alloc_request;
}
init_controller(fusb300);
@@ -1485,7 +1486,10 @@ static int fusb300_probe(struct platform_device *pdev)
err_add_udc:
fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req);
-clean_up3:
+err_alloc_request:
+ free_irq(ires1->start, fusb300);
+
+err_request_irq1:
free_irq(ires->start, fusb300);
clean_up:
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
index 85cdc0af3bf9..09762559912d 100644
--- a/drivers/usb/gadget/udc/gr_udc.c
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -215,7 +215,7 @@ static void gr_dfs_create(struct gr_udc *dev)
static void gr_dfs_delete(struct gr_udc *dev)
{
- debugfs_remove(debugfs_lookup(dev_name(dev->dev), usb_debug_root));
+ debugfs_lookup_and_remove(dev_name(dev->dev), usb_debug_root);
}
#else /* !CONFIG_USB_GADGET_DEBUG_FS */
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index cea10cdb83ae..fe62db32dd0e 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -532,7 +532,7 @@ static void create_debug_file(struct lpc32xx_udc *udc)
static void remove_debug_file(struct lpc32xx_udc *udc)
{
- debugfs_remove(debugfs_lookup(debug_filename, NULL));
+ debugfs_lookup_and_remove(debug_filename, NULL);
}
#else
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.c b/drivers/usb/gadget/udc/pxa25x_udc.c
index e19b84a46a1e..df0551ecc810 100644
--- a/drivers/usb/gadget/udc/pxa25x_udc.c
+++ b/drivers/usb/gadget/udc/pxa25x_udc.c
@@ -1340,7 +1340,7 @@ DEFINE_SHOW_ATTRIBUTE(udc_debug);
debugfs_create_file(dev->gadget.name, \
S_IRUGO, NULL, dev, &udc_debug_fops); \
} while (0)
-#define remove_debug_files(dev) debugfs_remove(debugfs_lookup(dev->gadget.name, NULL))
+#define remove_debug_files(dev) debugfs_lookup_and_remove(dev->gadget.name, NULL)
#else /* !CONFIG_USB_GADGET_DEBUG_FILES */
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
index ac980d6a4740..0ecdfd2ba9e9 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.c
+++ b/drivers/usb/gadget/udc/pxa27x_udc.c
@@ -215,7 +215,7 @@ static void pxa_init_debugfs(struct pxa_udc *udc)
static void pxa_cleanup_debugfs(struct pxa_udc *udc)
{
- debugfs_remove(debugfs_lookup(udc->gadget.name, usb_debug_root));
+ debugfs_lookup_and_remove(udc->gadget.name, usb_debug_root);
}
#else
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 615ba0a6fbee..bee6bceafc4f 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -7,6 +7,7 @@
#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/extcon-provider.h>
@@ -27,6 +28,7 @@
#include <linux/usb/gadget.h>
#include <linux/usb/of.h>
#include <linux/usb/role.h>
+#include <linux/usb/rzv2m_usb3drd.h>
/* register definitions */
#define USB3_AXI_INT_STA 0x008
@@ -334,7 +336,7 @@ struct renesas_usb3_priv {
struct renesas_usb3 {
void __iomem *reg;
- struct reset_control *drd_rstc;
+ void __iomem *drd_reg;
struct reset_control *usbp_rstc;
struct usb_gadget gadget;
@@ -426,6 +428,46 @@ static void usb3_clear_bit(struct renesas_usb3 *usb3, u32 bits, u32 offs)
usb3_write(usb3, val, offs);
}
+static void usb3_drd_write(struct renesas_usb3 *usb3, u32 data, u32 offs)
+{
+ void __iomem *reg;
+
+ if (usb3->is_rzv2m)
+ reg = usb3->drd_reg + offs - USB3_DRD_CON(usb3);
+ else
+ reg = usb3->reg + offs;
+
+ iowrite32(data, reg);
+}
+
+static u32 usb3_drd_read(struct renesas_usb3 *usb3, u32 offs)
+{
+ void __iomem *reg;
+
+ if (usb3->is_rzv2m)
+ reg = usb3->drd_reg + offs - USB3_DRD_CON(usb3);
+ else
+ reg = usb3->reg + offs;
+
+ return ioread32(reg);
+}
+
+static void usb3_drd_set_bit(struct renesas_usb3 *usb3, u32 bits, u32 offs)
+{
+ u32 val = usb3_drd_read(usb3, offs);
+
+ val |= bits;
+ usb3_drd_write(usb3, val, offs);
+}
+
+static void usb3_drd_clear_bit(struct renesas_usb3 *usb3, u32 bits, u32 offs)
+{
+ u32 val = usb3_drd_read(usb3, offs);
+
+ val &= ~bits;
+ usb3_drd_write(usb3, val, offs);
+}
+
static int usb3_wait(struct renesas_usb3 *usb3, u32 reg, u32 mask,
u32 expected)
{
@@ -474,7 +516,7 @@ static void usb3_disable_pipe_irq(struct renesas_usb3 *usb3, int num)
static bool usb3_is_host(struct renesas_usb3 *usb3)
{
- return !(usb3_read(usb3, USB3_DRD_CON(usb3)) & DRD_CON_PERI_CON);
+ return !(usb3_drd_read(usb3, USB3_DRD_CON(usb3)) & DRD_CON_PERI_CON);
}
static void usb3_init_axi_bridge(struct renesas_usb3 *usb3)
@@ -683,18 +725,18 @@ static void usb3_set_mode(struct renesas_usb3 *usb3, bool host)
{
if (usb3->is_rzv2m) {
if (host) {
- usb3_set_bit(usb3, DRD_CON_PERI_RST, USB3_DRD_CON(usb3));
- usb3_clear_bit(usb3, DRD_CON_HOST_RST, USB3_DRD_CON(usb3));
+ usb3_drd_set_bit(usb3, DRD_CON_PERI_RST, USB3_DRD_CON(usb3));
+ usb3_drd_clear_bit(usb3, DRD_CON_HOST_RST, USB3_DRD_CON(usb3));
} else {
- usb3_set_bit(usb3, DRD_CON_HOST_RST, USB3_DRD_CON(usb3));
- usb3_clear_bit(usb3, DRD_CON_PERI_RST, USB3_DRD_CON(usb3));
+ usb3_drd_set_bit(usb3, DRD_CON_HOST_RST, USB3_DRD_CON(usb3));
+ usb3_drd_clear_bit(usb3, DRD_CON_PERI_RST, USB3_DRD_CON(usb3));
}
}
if (host)
- usb3_clear_bit(usb3, DRD_CON_PERI_CON, USB3_DRD_CON(usb3));
+ usb3_drd_clear_bit(usb3, DRD_CON_PERI_CON, USB3_DRD_CON(usb3));
else
- usb3_set_bit(usb3, DRD_CON_PERI_CON, USB3_DRD_CON(usb3));
+ usb3_drd_set_bit(usb3, DRD_CON_PERI_CON, USB3_DRD_CON(usb3));
}
static void usb3_set_mode_by_role_sw(struct renesas_usb3 *usb3, bool host)
@@ -710,9 +752,9 @@ static void usb3_set_mode_by_role_sw(struct renesas_usb3 *usb3, bool host)
static void usb3_vbus_out(struct renesas_usb3 *usb3, bool enable)
{
if (enable)
- usb3_set_bit(usb3, DRD_CON_VBOUT, USB3_DRD_CON(usb3));
+ usb3_drd_set_bit(usb3, DRD_CON_VBOUT, USB3_DRD_CON(usb3));
else
- usb3_clear_bit(usb3, DRD_CON_VBOUT, USB3_DRD_CON(usb3));
+ usb3_drd_clear_bit(usb3, DRD_CON_VBOUT, USB3_DRD_CON(usb3));
}
static void usb3_mode_config(struct renesas_usb3 *usb3, bool host, bool a_dev)
@@ -733,7 +775,7 @@ static void usb3_mode_config(struct renesas_usb3 *usb3, bool host, bool a_dev)
static bool usb3_is_a_device(struct renesas_usb3 *usb3)
{
- return !(usb3_read(usb3, USB3_USB_OTG_STA(usb3)) & USB_OTG_IDMON(usb3));
+ return !(usb3_drd_read(usb3, USB3_USB_OTG_STA(usb3)) & USB_OTG_IDMON(usb3));
}
static void usb3_check_id(struct renesas_usb3 *usb3)
@@ -756,8 +798,8 @@ static void renesas_usb3_init_controller(struct renesas_usb3 *usb3)
usb3_set_bit(usb3, USB_COM_CON_PN_WDATAIF_NL |
USB_COM_CON_PN_RDATAIF_NL | USB_COM_CON_PN_LSTTR_PP,
USB3_USB_COM_CON);
- usb3_write(usb3, USB_OTG_IDMON(usb3), USB3_USB_OTG_INT_STA(usb3));
- usb3_write(usb3, USB_OTG_IDMON(usb3), USB3_USB_OTG_INT_ENA(usb3));
+ usb3_drd_write(usb3, USB_OTG_IDMON(usb3), USB3_USB_OTG_INT_STA(usb3));
+ usb3_drd_write(usb3, USB_OTG_IDMON(usb3), USB3_USB_OTG_INT_ENA(usb3));
usb3_check_id(usb3);
usb3_check_vbus(usb3);
@@ -767,7 +809,7 @@ static void renesas_usb3_stop_controller(struct renesas_usb3 *usb3)
{
usb3_disconnect(usb3);
usb3_write(usb3, 0, USB3_P0_INT_ENA);
- usb3_write(usb3, 0, USB3_USB_OTG_INT_ENA(usb3));
+ usb3_drd_write(usb3, 0, USB3_USB_OTG_INT_ENA(usb3));
usb3_write(usb3, 0, USB3_USB_INT_ENA_1);
usb3_write(usb3, 0, USB3_USB_INT_ENA_2);
usb3_write(usb3, 0, USB3_AXI_INT_ENA);
@@ -2024,11 +2066,11 @@ static void usb3_irq_idmon_change(struct renesas_usb3 *usb3)
static void usb3_irq_otg_int(struct renesas_usb3 *usb3)
{
- u32 otg_int_sta = usb3_read(usb3, USB3_USB_OTG_INT_STA(usb3));
+ u32 otg_int_sta = usb3_drd_read(usb3, USB3_USB_OTG_INT_STA(usb3));
- otg_int_sta &= usb3_read(usb3, USB3_USB_OTG_INT_ENA(usb3));
+ otg_int_sta &= usb3_drd_read(usb3, USB3_USB_OTG_INT_ENA(usb3));
if (otg_int_sta)
- usb3_write(usb3, otg_int_sta, USB3_USB_OTG_INT_STA(usb3));
+ usb3_drd_write(usb3, otg_int_sta, USB3_USB_OTG_INT_STA(usb3));
if (otg_int_sta & USB_OTG_IDMON(usb3))
usb3_irq_idmon_change(usb3);
@@ -2325,6 +2367,9 @@ static int renesas_usb3_start(struct usb_gadget *gadget,
usb3 = gadget_to_renesas_usb3(gadget);
+ if (usb3->is_rzv2m && usb3_is_a_device(usb3))
+ return -EBUSY;
+
/* hook up the driver */
usb3->driver = driver;
@@ -2333,6 +2378,10 @@ static int renesas_usb3_start(struct usb_gadget *gadget,
pm_runtime_get_sync(usb3_to_dev(usb3));
+ /* Peripheral Reset */
+ if (usb3->is_rzv2m)
+ rzv2m_usb3drd_reset(usb3_to_dev(usb3)->parent, false);
+
renesas_usb3_init_controller(usb3);
return 0;
@@ -2345,8 +2394,10 @@ static int renesas_usb3_stop(struct usb_gadget *gadget)
usb3->softconnect = false;
usb3->gadget.speed = USB_SPEED_UNKNOWN;
usb3->driver = NULL;
- renesas_usb3_stop_controller(usb3);
+ if (usb3->is_rzv2m)
+ rzv2m_usb3drd_reset(usb3_to_dev(usb3)->parent, false);
+ renesas_usb3_stop_controller(usb3);
if (usb3->phy)
phy_exit(usb3->phy);
@@ -2406,18 +2457,29 @@ static void handle_ext_role_switch_states(struct device *dev,
switch (role) {
case USB_ROLE_NONE:
usb3->connection_state = USB_ROLE_NONE;
- if (cur_role == USB_ROLE_HOST)
+ if (!usb3->is_rzv2m && cur_role == USB_ROLE_HOST)
device_release_driver(host);
- if (usb3->driver)
+ if (usb3->driver) {
+ if (usb3->is_rzv2m)
+ rzv2m_usb3drd_reset(dev->parent, false);
usb3_disconnect(usb3);
+ }
usb3_vbus_out(usb3, false);
+
+ if (usb3->is_rzv2m) {
+ rzv2m_usb3drd_reset(dev->parent, true);
+ device_release_driver(host);
+ }
break;
case USB_ROLE_DEVICE:
if (usb3->connection_state == USB_ROLE_NONE) {
usb3->connection_state = USB_ROLE_DEVICE;
usb3_set_mode(usb3, false);
- if (usb3->driver)
+ if (usb3->driver) {
+ if (usb3->is_rzv2m)
+ renesas_usb3_init_controller(usb3);
usb3_connect(usb3);
+ }
} else if (cur_role == USB_ROLE_HOST) {
device_release_driver(host);
usb3_set_mode(usb3, false);
@@ -2428,8 +2490,11 @@ static void handle_ext_role_switch_states(struct device *dev,
break;
case USB_ROLE_HOST:
if (usb3->connection_state == USB_ROLE_NONE) {
- if (usb3->driver)
+ if (usb3->driver) {
+ if (usb3->is_rzv2m)
+ rzv2m_usb3drd_reset(dev->parent, false);
usb3_disconnect(usb3);
+ }
usb3->connection_state = USB_ROLE_HOST;
usb3_set_mode(usb3, true);
@@ -2600,7 +2665,6 @@ static int renesas_usb3_remove(struct platform_device *pdev)
usb_del_gadget_udc(&usb3->gadget);
reset_control_assert(usb3->usbp_rstc);
- reset_control_assert(usb3->drd_rstc);
renesas_usb3_dma_free_prd(usb3, &pdev->dev);
__renesas_usb3_ep_free_request(usb3->ep0_req);
@@ -2788,7 +2852,7 @@ static struct usb_role_switch_desc renesas_usb3_role_switch_desc = {
static int renesas_usb3_probe(struct platform_device *pdev)
{
struct renesas_usb3 *usb3;
- int irq, drd_irq, ret;
+ int irq, ret;
const struct renesas_usb3_priv *priv;
const struct soc_device_attribute *attr;
@@ -2802,12 +2866,6 @@ static int renesas_usb3_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- if (priv->is_rzv2m) {
- drd_irq = platform_get_irq_byname(pdev, "drd");
- if (drd_irq < 0)
- return drd_irq;
- }
-
usb3 = devm_kzalloc(&pdev->dev, sizeof(*usb3), GFP_KERNEL);
if (!usb3)
return -ENOMEM;
@@ -2836,9 +2894,12 @@ static int renesas_usb3_probe(struct platform_device *pdev)
return ret;
if (usb3->is_rzv2m) {
- ret = devm_request_irq(&pdev->dev, drd_irq,
+ struct rzv2m_usb3drd *ddata = dev_get_drvdata(pdev->dev.parent);
+
+ usb3->drd_reg = ddata->reg;
+ ret = devm_request_irq(ddata->dev, ddata->drd_irq,
renesas_usb3_otg_irq, 0,
- dev_name(&pdev->dev), usb3);
+ dev_name(ddata->dev), usb3);
if (ret < 0)
return ret;
}
@@ -2873,21 +2934,13 @@ static int renesas_usb3_probe(struct platform_device *pdev)
goto err_add_udc;
}
- usb3->drd_rstc = devm_reset_control_get_optional_shared(&pdev->dev,
- "drd_reset");
- if (IS_ERR(usb3->drd_rstc)) {
- ret = PTR_ERR(usb3->drd_rstc);
- goto err_add_udc;
- }
-
usb3->usbp_rstc = devm_reset_control_get_optional_shared(&pdev->dev,
- "aresetn_p");
+ NULL);
if (IS_ERR(usb3->usbp_rstc)) {
ret = PTR_ERR(usb3->usbp_rstc);
goto err_add_udc;
}
- reset_control_deassert(usb3->drd_rstc);
reset_control_deassert(usb3->usbp_rstc);
pm_runtime_enable(&pdev->dev);
@@ -2933,7 +2986,6 @@ err_dev_create:
err_reset:
reset_control_assert(usb3->usbp_rstc);
- reset_control_assert(usb3->drd_rstc);
err_add_udc:
renesas_usb3_dma_free_prd(usb3, &pdev->dev);
diff --git a/drivers/usb/gadget/udc/renesas_usbf.c b/drivers/usb/gadget/udc/renesas_usbf.c
new file mode 100644
index 000000000000..cb23e62e8a87
--- /dev/null
+++ b/drivers/usb/gadget/udc/renesas_usbf.c
@@ -0,0 +1,3406 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas USBF USB Function driver
+ *
+ * Copyright 2022 Schneider Electric
+ * Author: Herve Codina <herve.codina@bootlin.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/kfifo.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <linux/types.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/role.h>
+
+#define USBF_NUM_ENDPOINTS 16
+#define USBF_EP0_MAX_PCKT_SIZE 64
+
+/* EPC registers */
+#define USBF_REG_USB_CONTROL 0x000
+#define USBF_USB_PUE2 BIT(2)
+#define USBF_USB_CONNECTB BIT(3)
+#define USBF_USB_DEFAULT BIT(4)
+#define USBF_USB_CONF BIT(5)
+#define USBF_USB_SUSPEND BIT(6)
+#define USBF_USB_RSUM_IN BIT(7)
+#define USBF_USB_SOF_RCV BIT(8)
+#define USBF_USB_FORCEFS BIT(9)
+#define USBF_USB_INT_SEL BIT(10)
+#define USBF_USB_SOF_CLK_MODE BIT(11)
+
+#define USBF_REG_USB_STATUS 0x004
+#define USBF_USB_RSUM_OUT BIT(1)
+#define USBF_USB_SPND_OUT BIT(2)
+#define USBF_USB_USB_RST BIT(3)
+#define USBF_USB_DEFAULT_ST BIT(4)
+#define USBF_USB_CONF_ST BIT(5)
+#define USBF_USB_SPEED_MODE BIT(6)
+#define USBF_USB_SOF_DELAY_STATUS BIT(31)
+
+#define USBF_REG_USB_ADDRESS 0x008
+#define USBF_USB_SOF_STATUS BIT(15)
+#define USBF_USB_SET_USB_ADDR(_a) ((_a) << 16)
+#define USBF_USB_GET_FRAME(_r) ((_r) & 0x7FF)
+
+#define USBF_REG_SETUP_DATA0 0x018
+#define USBF_REG_SETUP_DATA1 0x01C
+#define USBF_REG_USB_INT_STA 0x020
+#define USBF_USB_RSUM_INT BIT(1)
+#define USBF_USB_SPND_INT BIT(2)
+#define USBF_USB_USB_RST_INT BIT(3)
+#define USBF_USB_SOF_INT BIT(4)
+#define USBF_USB_SOF_ERROR_INT BIT(5)
+#define USBF_USB_SPEED_MODE_INT BIT(6)
+#define USBF_USB_EPN_INT(_n) (BIT(8) << (_n)) /* n=0..15 */
+
+#define USBF_REG_USB_INT_ENA 0x024
+#define USBF_USB_RSUM_EN BIT(1)
+#define USBF_USB_SPND_EN BIT(2)
+#define USBF_USB_USB_RST_EN BIT(3)
+#define USBF_USB_SOF_EN BIT(4)
+#define USBF_USB_SOF_ERROR_EN BIT(5)
+#define USBF_USB_SPEED_MODE_EN BIT(6)
+#define USBF_USB_EPN_EN(_n) (BIT(8) << (_n)) /* n=0..15 */
+
+#define USBF_BASE_EP0 0x028
+/* EP0 registers offsets from Base + USBF_BASE_EP0 (EP0 regs area) */
+#define USBF_REG_EP0_CONTROL 0x00
+#define USBF_EP0_ONAK BIT(0)
+#define USBF_EP0_INAK BIT(1)
+#define USBF_EP0_STL BIT(2)
+#define USBF_EP0_PERR_NAK_CLR BIT(3)
+#define USBF_EP0_INAK_EN BIT(4)
+#define USBF_EP0_DW_MASK (0x3 << 5)
+#define USBF_EP0_DW(_s) ((_s) << 5)
+#define USBF_EP0_DEND BIT(7)
+#define USBF_EP0_BCLR BIT(8)
+#define USBF_EP0_PIDCLR BIT(9)
+#define USBF_EP0_AUTO BIT(16)
+#define USBF_EP0_OVERSEL BIT(17)
+#define USBF_EP0_STGSEL BIT(18)
+
+#define USBF_REG_EP0_STATUS 0x04
+#define USBF_EP0_SETUP_INT BIT(0)
+#define USBF_EP0_STG_START_INT BIT(1)
+#define USBF_EP0_STG_END_INT BIT(2)
+#define USBF_EP0_STALL_INT BIT(3)
+#define USBF_EP0_IN_INT BIT(4)
+#define USBF_EP0_OUT_INT BIT(5)
+#define USBF_EP0_OUT_OR_INT BIT(6)
+#define USBF_EP0_OUT_NULL_INT BIT(7)
+#define USBF_EP0_IN_EMPTY BIT(8)
+#define USBF_EP0_IN_FULL BIT(9)
+#define USBF_EP0_IN_DATA BIT(10)
+#define USBF_EP0_IN_NAK_INT BIT(11)
+#define USBF_EP0_OUT_EMPTY BIT(12)
+#define USBF_EP0_OUT_FULL BIT(13)
+#define USBF_EP0_OUT_NULL BIT(14)
+#define USBF_EP0_OUT_NAK_INT BIT(15)
+#define USBF_EP0_PERR_NAK_INT BIT(16)
+#define USBF_EP0_PERR_NAK BIT(17)
+#define USBF_EP0_PID BIT(18)
+
+#define USBF_REG_EP0_INT_ENA 0x08
+#define USBF_EP0_SETUP_EN BIT(0)
+#define USBF_EP0_STG_START_EN BIT(1)
+#define USBF_EP0_STG_END_EN BIT(2)
+#define USBF_EP0_STALL_EN BIT(3)
+#define USBF_EP0_IN_EN BIT(4)
+#define USBF_EP0_OUT_EN BIT(5)
+#define USBF_EP0_OUT_OR_EN BIT(6)
+#define USBF_EP0_OUT_NULL_EN BIT(7)
+#define USBF_EP0_IN_NAK_EN BIT(11)
+#define USBF_EP0_OUT_NAK_EN BIT(15)
+#define USBF_EP0_PERR_NAK_EN BIT(16)
+
+#define USBF_REG_EP0_LENGTH 0x0C
+#define USBF_EP0_LDATA (0x7FF << 0)
+#define USBF_REG_EP0_READ 0x10
+#define USBF_REG_EP0_WRITE 0x14
+
+#define USBF_BASE_EPN(_n) (0x040 + (_n) * 0x020)
+/* EPn registers offsets from Base + USBF_BASE_EPN(n-1). n=1..15 */
+#define USBF_REG_EPN_CONTROL 0x000
+#define USBF_EPN_ONAK BIT(0)
+#define USBF_EPN_OSTL BIT(2)
+#define USBF_EPN_ISTL BIT(3)
+#define USBF_EPN_OSTL_EN BIT(4)
+#define USBF_EPN_DW_MASK (0x3 << 5)
+#define USBF_EPN_DW(_s) ((_s) << 5)
+#define USBF_EPN_DEND BIT(7)
+#define USBF_EPN_CBCLR BIT(8)
+#define USBF_EPN_BCLR BIT(9)
+#define USBF_EPN_OPIDCLR BIT(10)
+#define USBF_EPN_IPIDCLR BIT(11)
+#define USBF_EPN_AUTO BIT(16)
+#define USBF_EPN_OVERSEL BIT(17)
+#define USBF_EPN_MODE_MASK (0x3 << 24)
+#define USBF_EPN_MODE_BULK (0x0 << 24)
+#define USBF_EPN_MODE_INTR (0x1 << 24)
+#define USBF_EPN_MODE_ISO (0x2 << 24)
+#define USBF_EPN_DIR0 BIT(26)
+#define USBF_EPN_BUF_TYPE_DOUBLE BIT(30)
+#define USBF_EPN_EN BIT(31)
+
+#define USBF_REG_EPN_STATUS 0x004
+#define USBF_EPN_IN_EMPTY BIT(0)
+#define USBF_EPN_IN_FULL BIT(1)
+#define USBF_EPN_IN_DATA BIT(2)
+#define USBF_EPN_IN_INT BIT(3)
+#define USBF_EPN_IN_STALL_INT BIT(4)
+#define USBF_EPN_IN_NAK_ERR_INT BIT(5)
+#define USBF_EPN_IN_END_INT BIT(7)
+#define USBF_EPN_IPID BIT(10)
+#define USBF_EPN_OUT_EMPTY BIT(16)
+#define USBF_EPN_OUT_FULL BIT(17)
+#define USBF_EPN_OUT_NULL_INT BIT(18)
+#define USBF_EPN_OUT_INT BIT(19)
+#define USBF_EPN_OUT_STALL_INT BIT(20)
+#define USBF_EPN_OUT_NAK_ERR_INT BIT(21)
+#define USBF_EPN_OUT_OR_INT BIT(22)
+#define USBF_EPN_OUT_END_INT BIT(23)
+#define USBF_EPN_ISO_CRC BIT(24)
+#define USBF_EPN_ISO_OR BIT(26)
+#define USBF_EPN_OUT_NOTKN BIT(27)
+#define USBF_EPN_ISO_OPID BIT(28)
+#define USBF_EPN_ISO_PIDERR BIT(29)
+
+#define USBF_REG_EPN_INT_ENA 0x008
+#define USBF_EPN_IN_EN BIT(3)
+#define USBF_EPN_IN_STALL_EN BIT(4)
+#define USBF_EPN_IN_NAK_ERR_EN BIT(5)
+#define USBF_EPN_IN_END_EN BIT(7)
+#define USBF_EPN_OUT_NULL_EN BIT(18)
+#define USBF_EPN_OUT_EN BIT(19)
+#define USBF_EPN_OUT_STALL_EN BIT(20)
+#define USBF_EPN_OUT_NAK_ERR_EN BIT(21)
+#define USBF_EPN_OUT_OR_EN BIT(22)
+#define USBF_EPN_OUT_END_EN BIT(23)
+
+#define USBF_REG_EPN_DMA_CTRL 0x00C
+#define USBF_EPN_DMAMODE0 BIT(0)
+#define USBF_EPN_DMA_EN BIT(4)
+#define USBF_EPN_STOP_SET BIT(8)
+#define USBF_EPN_BURST_SET BIT(9)
+#define USBF_EPN_DEND_SET BIT(10)
+#define USBF_EPN_STOP_MODE BIT(11)
+
+#define USBF_REG_EPN_PCKT_ADRS 0x010
+#define USBF_EPN_MPKT(_l) ((_l) << 0)
+#define USBF_EPN_BASEAD(_a) ((_a) << 16)
+
+#define USBF_REG_EPN_LEN_DCNT 0x014
+#define USBF_EPN_GET_LDATA(_r) ((_r) & 0x7FF)
+#define USBF_EPN_SET_DMACNT(_c) ((_c) << 16)
+#define USBF_EPN_GET_DMACNT(_r) (((_r) >> 16) & 0x1ff)
+
+#define USBF_REG_EPN_READ 0x018
+#define USBF_REG_EPN_WRITE 0x01C
+
+/* AHB-EPC Bridge registers */
+#define USBF_REG_AHBSCTR 0x1000
+#define USBF_REG_AHBMCTR 0x1004
+#define USBF_SYS_WBURST_TYPE BIT(2)
+#define USBF_SYS_ARBITER_CTR BIT(31)
+
+#define USBF_REG_AHBBINT 0x1008
+#define USBF_SYS_ERR_MASTER (0x0F << 0)
+#define USBF_SYS_SBUS_ERRINT0 BIT(4)
+#define USBF_SYS_SBUS_ERRINT1 BIT(5)
+#define USBF_SYS_MBUS_ERRINT BIT(6)
+#define USBF_SYS_VBUS_INT BIT(13)
+#define USBF_SYS_DMA_ENDINT_EPN(_n) (BIT(16) << (_n)) /* _n=1..15 */
+
+#define USBF_REG_AHBBINTEN 0x100C
+#define USBF_SYS_SBUS_ERRINT0EN BIT(4)
+#define USBF_SYS_SBUS_ERRINT1EN BIT(5)
+#define USBF_SYS_MBUS_ERRINTEN BIT(6)
+#define USBF_SYS_VBUS_INTEN BIT(13)
+#define USBF_SYS_DMA_ENDINTEN_EPN(_n) (BIT(16) << (_n)) /* _n=1..15 */
+
+#define USBF_REG_EPCTR 0x1010
+#define USBF_SYS_EPC_RST BIT(0)
+#define USBF_SYS_PLL_RST BIT(2)
+#define USBF_SYS_PLL_LOCK BIT(4)
+#define USBF_SYS_PLL_RESUME BIT(5)
+#define USBF_SYS_VBUS_LEVEL BIT(8)
+#define USBF_SYS_DIRPD BIT(12)
+
+#define USBF_REG_USBSSVER 0x1020
+#define USBF_REG_USBSSCONF 0x1024
+#define USBF_SYS_DMA_AVAILABLE(_n) (BIT(0) << (_n)) /* _n=0..15 */
+#define USBF_SYS_EP_AVAILABLE(_n) (BIT(16) << (_n)) /* _n=0..15 */
+
+#define USBF_BASE_DMA_EPN(_n) (0x1110 + (_n) * 0x010)
+/* EPn DMA registers offsets from Base USBF_BASE_DMA_EPN(n-1). n=1..15*/
+#define USBF_REG_DMA_EPN_DCR1 0x00
+#define USBF_SYS_EPN_REQEN BIT(0)
+#define USBF_SYS_EPN_DIR0 BIT(1)
+#define USBF_SYS_EPN_SET_DMACNT(_c) ((_c) << 16)
+#define USBF_SYS_EPN_GET_DMACNT(_r) (((_r) >> 16) & 0x0FF)
+
+#define USBF_REG_DMA_EPN_DCR2 0x04
+#define USBF_SYS_EPN_MPKT(_s) ((_s) << 0)
+#define USBF_SYS_EPN_LMPKT(_l) ((_l) << 16)
+
+#define USBF_REG_DMA_EPN_TADR 0x08
+
+/* USB request */
+struct usbf_req {
+ struct usb_request req;
+ struct list_head queue;
+ unsigned int is_zero_sent : 1;
+ unsigned int is_mapped : 1;
+ enum {
+ USBF_XFER_START,
+ USBF_XFER_WAIT_DMA,
+ USBF_XFER_SEND_NULL,
+ USBF_XFER_WAIT_END,
+ USBF_XFER_WAIT_DMA_SHORT,
+ USBF_XFER_WAIT_BRIDGE,
+ } xfer_step;
+ size_t dma_size;
+};
+
+/* USB Endpoint */
+struct usbf_ep {
+ struct usb_ep ep;
+ char name[32];
+ struct list_head queue;
+ unsigned int is_processing : 1;
+ unsigned int is_in : 1;
+ struct usbf_udc *udc;
+ void __iomem *regs;
+ void __iomem *dma_regs;
+ unsigned int id : 8;
+ unsigned int disabled : 1;
+ unsigned int is_wedged : 1;
+ unsigned int delayed_status : 1;
+ u32 status;
+ void (*bridge_on_dma_end)(struct usbf_ep *ep);
+};
+
+enum usbf_ep0state {
+ EP0_IDLE,
+ EP0_IN_DATA_PHASE,
+ EP0_OUT_DATA_PHASE,
+ EP0_OUT_STATUS_START_PHASE,
+ EP0_OUT_STATUS_PHASE,
+ EP0_OUT_STATUS_END_PHASE,
+ EP0_IN_STATUS_START_PHASE,
+ EP0_IN_STATUS_PHASE,
+ EP0_IN_STATUS_END_PHASE,
+};
+
+struct usbf_udc {
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+ struct device *dev;
+ void __iomem *regs;
+ spinlock_t lock;
+ bool is_remote_wakeup;
+ bool is_usb_suspended;
+ struct usbf_ep ep[USBF_NUM_ENDPOINTS];
+ /* for EP0 control messages */
+ enum usbf_ep0state ep0state;
+ struct usbf_req setup_reply;
+ u8 ep0_buf[USBF_EP0_MAX_PCKT_SIZE];
+};
+
+struct usbf_ep_info {
+ const char *name;
+ struct usb_ep_caps caps;
+ u16 base_addr;
+ unsigned int is_double : 1;
+ u16 maxpacket_limit;
+};
+
+#define USBF_SINGLE_BUFFER 0
+#define USBF_DOUBLE_BUFFER 1
+#define USBF_EP_INFO(_name, _caps, _base_addr, _is_double, _maxpacket_limit) \
+ { \
+ .name = _name, \
+ .caps = _caps, \
+ .base_addr = _base_addr, \
+ .is_double = _is_double, \
+ .maxpacket_limit = _maxpacket_limit, \
+ }
+
+/* This table is computed from the recommended values provided in the SOC
+ * datasheet. The buffer type (single/double) and the endpoint type cannot
+ * be changed. The mapping in internal RAM (base_addr and number of words)
+ * for each endpoints depends on the max packet size and the buffer type.
+ */
+static const struct usbf_ep_info usbf_ep_info[USBF_NUM_ENDPOINTS] = {
+ /* ep0: buf @0x0000 64 bytes, fixed 32 words */
+ [0] = USBF_EP_INFO("ep0-ctrl",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL,
+ USB_EP_CAPS_DIR_ALL),
+ 0x0000, USBF_SINGLE_BUFFER, USBF_EP0_MAX_PCKT_SIZE),
+ /* ep1: buf @0x0020, 2 buffers 512 bytes -> (512 * 2 / 4) words */
+ [1] = USBF_EP_INFO("ep1-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_ALL),
+ 0x0020, USBF_DOUBLE_BUFFER, 512),
+ /* ep2: buf @0x0120, 2 buffers 512 bytes -> (512 * 2 / 4) words */
+ [2] = USBF_EP_INFO("ep2-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_ALL),
+ 0x0120, USBF_DOUBLE_BUFFER, 512),
+ /* ep3: buf @0x0220, 1 buffer 512 bytes -> (512 * 2 / 4) words */
+ [3] = USBF_EP_INFO("ep3-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_ALL),
+ 0x0220, USBF_SINGLE_BUFFER, 512),
+ /* ep4: buf @0x02A0, 1 buffer 512 bytes -> (512 * 1 / 4) words */
+ [4] = USBF_EP_INFO("ep4-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_ALL),
+ 0x02A0, USBF_SINGLE_BUFFER, 512),
+ /* ep5: buf @0x0320, 1 buffer 512 bytes -> (512 * 2 / 4) words */
+ [5] = USBF_EP_INFO("ep5-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_ALL),
+ 0x0320, USBF_SINGLE_BUFFER, 512),
+ /* ep6: buf @0x03A0, 1 buffer 1024 bytes -> (1024 * 1 / 4) words */
+ [6] = USBF_EP_INFO("ep6-int",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
+ USB_EP_CAPS_DIR_ALL),
+ 0x03A0, USBF_SINGLE_BUFFER, 1024),
+ /* ep7: buf @0x04A0, 1 buffer 1024 bytes -> (1024 * 1 / 4) words */
+ [7] = USBF_EP_INFO("ep7-int",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
+ USB_EP_CAPS_DIR_ALL),
+ 0x04A0, USBF_SINGLE_BUFFER, 1024),
+ /* ep8: buf @0x0520, 1 buffer 1024 bytes -> (1024 * 1 / 4) words */
+ [8] = USBF_EP_INFO("ep8-int",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
+ USB_EP_CAPS_DIR_ALL),
+ 0x0520, USBF_SINGLE_BUFFER, 1024),
+ /* ep9: buf @0x0620, 1 buffer 1024 bytes -> (1024 * 1 / 4) words */
+ [9] = USBF_EP_INFO("ep9-int",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
+ USB_EP_CAPS_DIR_ALL),
+ 0x0620, USBF_SINGLE_BUFFER, 1024),
+ /* ep10: buf @0x0720, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
+ [10] = USBF_EP_INFO("ep10-iso",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+ USB_EP_CAPS_DIR_ALL),
+ 0x0720, USBF_DOUBLE_BUFFER, 1024),
+ /* ep11: buf @0x0920, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
+ [11] = USBF_EP_INFO("ep11-iso",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+ USB_EP_CAPS_DIR_ALL),
+ 0x0920, USBF_DOUBLE_BUFFER, 1024),
+ /* ep12: buf @0x0B20, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
+ [12] = USBF_EP_INFO("ep12-iso",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+ USB_EP_CAPS_DIR_ALL),
+ 0x0B20, USBF_DOUBLE_BUFFER, 1024),
+ /* ep13: buf @0x0D20, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
+ [13] = USBF_EP_INFO("ep13-iso",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+ USB_EP_CAPS_DIR_ALL),
+ 0x0D20, USBF_DOUBLE_BUFFER, 1024),
+ /* ep14: buf @0x0F20, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
+ [14] = USBF_EP_INFO("ep14-iso",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+ USB_EP_CAPS_DIR_ALL),
+ 0x0F20, USBF_DOUBLE_BUFFER, 1024),
+ /* ep15: buf @0x1120, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
+ [15] = USBF_EP_INFO("ep15-iso",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+ USB_EP_CAPS_DIR_ALL),
+ 0x1120, USBF_DOUBLE_BUFFER, 1024),
+};
+
+static inline u32 usbf_reg_readl(struct usbf_udc *udc, uint offset)
+{
+ return readl(udc->regs + offset);
+}
+
+static inline void usbf_reg_writel(struct usbf_udc *udc, uint offset, u32 val)
+{
+ writel(val, udc->regs + offset);
+}
+
+static inline void usbf_reg_bitset(struct usbf_udc *udc, uint offset, u32 set)
+{
+ u32 tmp;
+
+ tmp = usbf_reg_readl(udc, offset);
+ tmp |= set;
+ usbf_reg_writel(udc, offset, tmp);
+}
+
+static inline void usbf_reg_bitclr(struct usbf_udc *udc, uint offset, u32 clr)
+{
+ u32 tmp;
+
+ tmp = usbf_reg_readl(udc, offset);
+ tmp &= ~clr;
+ usbf_reg_writel(udc, offset, tmp);
+}
+
+static inline void usbf_reg_clrset(struct usbf_udc *udc, uint offset,
+ u32 clr, u32 set)
+{
+ u32 tmp;
+
+ tmp = usbf_reg_readl(udc, offset);
+ tmp &= ~clr;
+ tmp |= set;
+ usbf_reg_writel(udc, offset, tmp);
+}
+
+static inline u32 usbf_ep_reg_readl(struct usbf_ep *ep, uint offset)
+{
+ return readl(ep->regs + offset);
+}
+
+static inline void usbf_ep_reg_read_rep(struct usbf_ep *ep, uint offset,
+ void *dst, uint count)
+{
+ readsl(ep->regs + offset, dst, count);
+}
+
+static inline void usbf_ep_reg_writel(struct usbf_ep *ep, uint offset, u32 val)
+{
+ writel(val, ep->regs + offset);
+}
+
+static inline void usbf_ep_reg_write_rep(struct usbf_ep *ep, uint offset,
+ const void *src, uint count)
+{
+ writesl(ep->regs + offset, src, count);
+}
+
+static inline void usbf_ep_reg_bitset(struct usbf_ep *ep, uint offset, u32 set)
+{
+ u32 tmp;
+
+ tmp = usbf_ep_reg_readl(ep, offset);
+ tmp |= set;
+ usbf_ep_reg_writel(ep, offset, tmp);
+}
+
+static inline void usbf_ep_reg_bitclr(struct usbf_ep *ep, uint offset, u32 clr)
+{
+ u32 tmp;
+
+ tmp = usbf_ep_reg_readl(ep, offset);
+ tmp &= ~clr;
+ usbf_ep_reg_writel(ep, offset, tmp);
+}
+
+static inline void usbf_ep_reg_clrset(struct usbf_ep *ep, uint offset,
+ u32 clr, u32 set)
+{
+ u32 tmp;
+
+ tmp = usbf_ep_reg_readl(ep, offset);
+ tmp &= ~clr;
+ tmp |= set;
+ usbf_ep_reg_writel(ep, offset, tmp);
+}
+
+static inline u32 usbf_ep_dma_reg_readl(struct usbf_ep *ep, uint offset)
+{
+ return readl(ep->dma_regs + offset);
+}
+
+static inline void usbf_ep_dma_reg_writel(struct usbf_ep *ep, uint offset,
+ u32 val)
+{
+ writel(val, ep->dma_regs + offset);
+}
+
+static inline void usbf_ep_dma_reg_bitset(struct usbf_ep *ep, uint offset,
+ u32 set)
+{
+ u32 tmp;
+
+ tmp = usbf_ep_dma_reg_readl(ep, offset);
+ tmp |= set;
+ usbf_ep_dma_reg_writel(ep, offset, tmp);
+}
+
+static inline void usbf_ep_dma_reg_bitclr(struct usbf_ep *ep, uint offset,
+ u32 clr)
+{
+ u32 tmp;
+
+ tmp = usbf_ep_dma_reg_readl(ep, offset);
+ tmp &= ~clr;
+ usbf_ep_dma_reg_writel(ep, offset, tmp);
+}
+
+static inline void usbf_ep_dma_reg_clrset(struct usbf_ep *ep, uint offset,
+ u32 clr, u32 set)
+{
+ u32 tmp;
+
+ tmp = usbf_ep_dma_reg_readl(ep, offset);
+ tmp &= ~clr;
+ tmp |= set;
+ usbf_ep_dma_reg_writel(ep, offset, tmp);
+}
+
+static void usbf_ep0_send_null(struct usbf_ep *ep0, bool is_data1)
+{
+ u32 set;
+
+ set = USBF_EP0_DEND;
+ if (is_data1)
+ set |= USBF_EP0_PIDCLR;
+
+ usbf_ep_reg_bitset(ep0, USBF_REG_EP0_CONTROL, set);
+}
+
+static int usbf_ep0_pio_in(struct usbf_ep *ep0, struct usbf_req *req)
+{
+ unsigned int left;
+ unsigned int nb;
+ const void *buf;
+ u32 ctrl;
+ u32 last;
+
+ left = req->req.length - req->req.actual;
+
+ if (left == 0) {
+ if (!req->is_zero_sent) {
+ if (req->req.length == 0) {
+ dev_dbg(ep0->udc->dev, "ep0 send null\n");
+ usbf_ep0_send_null(ep0, false);
+ req->is_zero_sent = 1;
+ return -EINPROGRESS;
+ }
+ if ((req->req.actual % ep0->ep.maxpacket) == 0) {
+ if (req->req.zero) {
+ dev_dbg(ep0->udc->dev, "ep0 send null\n");
+ usbf_ep0_send_null(ep0, false);
+ req->is_zero_sent = 1;
+ return -EINPROGRESS;
+ }
+ }
+ }
+ return 0;
+ }
+
+ if (left > ep0->ep.maxpacket)
+ left = ep0->ep.maxpacket;
+
+ buf = req->req.buf;
+ buf += req->req.actual;
+
+ nb = left / sizeof(u32);
+ if (nb) {
+ usbf_ep_reg_write_rep(ep0, USBF_REG_EP0_WRITE, buf, nb);
+ buf += (nb * sizeof(u32));
+ req->req.actual += (nb * sizeof(u32));
+ left -= (nb * sizeof(u32));
+ }
+ ctrl = usbf_ep_reg_readl(ep0, USBF_REG_EP0_CONTROL);
+ ctrl &= ~USBF_EP0_DW_MASK;
+ if (left) {
+ memcpy(&last, buf, left);
+ usbf_ep_reg_writel(ep0, USBF_REG_EP0_WRITE, last);
+ ctrl |= USBF_EP0_DW(left);
+ req->req.actual += left;
+ }
+ usbf_ep_reg_writel(ep0, USBF_REG_EP0_CONTROL, ctrl | USBF_EP0_DEND);
+
+ dev_dbg(ep0->udc->dev, "ep0 send %u/%u\n",
+ req->req.actual, req->req.length);
+
+ return -EINPROGRESS;
+}
+
+static int usbf_ep0_pio_out(struct usbf_ep *ep0, struct usbf_req *req)
+{
+ int req_status = 0;
+ unsigned int count;
+ unsigned int recv;
+ unsigned int left;
+ unsigned int nb;
+ void *buf;
+ u32 last;
+
+ if (ep0->status & USBF_EP0_OUT_INT) {
+ recv = usbf_ep_reg_readl(ep0, USBF_REG_EP0_LENGTH) & USBF_EP0_LDATA;
+ count = recv;
+
+ buf = req->req.buf;
+ buf += req->req.actual;
+
+ left = req->req.length - req->req.actual;
+
+ dev_dbg(ep0->udc->dev, "ep0 recv %u, left %u\n", count, left);
+
+ if (left > ep0->ep.maxpacket)
+ left = ep0->ep.maxpacket;
+
+ if (count > left) {
+ req_status = -EOVERFLOW;
+ count = left;
+ }
+
+ if (count) {
+ nb = count / sizeof(u32);
+ if (nb) {
+ usbf_ep_reg_read_rep(ep0, USBF_REG_EP0_READ,
+ buf, nb);
+ buf += (nb * sizeof(u32));
+ req->req.actual += (nb * sizeof(u32));
+ count -= (nb * sizeof(u32));
+ }
+ if (count) {
+ last = usbf_ep_reg_readl(ep0, USBF_REG_EP0_READ);
+ memcpy(buf, &last, count);
+ req->req.actual += count;
+ }
+ }
+ dev_dbg(ep0->udc->dev, "ep0 recv %u/%u\n",
+ req->req.actual, req->req.length);
+
+ if (req_status) {
+ dev_dbg(ep0->udc->dev, "ep0 req.status=%d\n", req_status);
+ req->req.status = req_status;
+ return 0;
+ }
+
+ if (recv < ep0->ep.maxpacket) {
+ dev_dbg(ep0->udc->dev, "ep0 short packet\n");
+ /* This is a short packet -> It is the end */
+ req->req.status = 0;
+ return 0;
+ }
+
+ /* The Data stage of a control transfer from an endpoint to the
+ * host is complete when the endpoint does one of the following:
+ * - Has transferred exactly the expected amount of data
+ * - Transfers a packet with a payload size less than
+ * wMaxPacketSize or transfers a zero-length packet
+ */
+ if (req->req.actual == req->req.length) {
+ req->req.status = 0;
+ return 0;
+ }
+ }
+
+ if (ep0->status & USBF_EP0_OUT_NULL_INT) {
+ /* NULL packet received */
+ dev_dbg(ep0->udc->dev, "ep0 null packet\n");
+ if (req->req.actual != req->req.length) {
+ req->req.status = req->req.short_not_ok ?
+ -EREMOTEIO : 0;
+ } else {
+ req->req.status = 0;
+ }
+ return 0;
+ }
+
+ return -EINPROGRESS;
+}
+
+static void usbf_ep0_fifo_flush(struct usbf_ep *ep0)
+{
+ u32 sts;
+ int ret;
+
+ usbf_ep_reg_bitset(ep0, USBF_REG_EP0_CONTROL, USBF_EP0_BCLR);
+
+ ret = readl_poll_timeout_atomic(ep0->regs + USBF_REG_EP0_STATUS, sts,
+ (sts & (USBF_EP0_IN_DATA | USBF_EP0_IN_EMPTY)) == USBF_EP0_IN_EMPTY,
+ 0, 10000);
+ if (ret)
+ dev_err(ep0->udc->dev, "ep0 flush fifo timed out\n");
+
+}
+
+static void usbf_epn_send_null(struct usbf_ep *epn)
+{
+ usbf_ep_reg_bitset(epn, USBF_REG_EPN_CONTROL, USBF_EPN_DEND);
+}
+
+static void usbf_epn_send_residue(struct usbf_ep *epn, const void *buf,
+ unsigned int size)
+{
+ u32 tmp;
+
+ memcpy(&tmp, buf, size);
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_WRITE, tmp);
+
+ usbf_ep_reg_clrset(epn, USBF_REG_EPN_CONTROL,
+ USBF_EPN_DW_MASK,
+ USBF_EPN_DW(size) | USBF_EPN_DEND);
+}
+
+static int usbf_epn_pio_in(struct usbf_ep *epn, struct usbf_req *req)
+{
+ unsigned int left;
+ unsigned int nb;
+ const void *buf;
+
+ left = req->req.length - req->req.actual;
+
+ if (left == 0) {
+ if (!req->is_zero_sent) {
+ if (req->req.length == 0) {
+ dev_dbg(epn->udc->dev, "ep%u send_null\n", epn->id);
+ usbf_epn_send_null(epn);
+ req->is_zero_sent = 1;
+ return -EINPROGRESS;
+ }
+ if ((req->req.actual % epn->ep.maxpacket) == 0) {
+ if (req->req.zero) {
+ dev_dbg(epn->udc->dev, "ep%u send_null\n",
+ epn->id);
+ usbf_epn_send_null(epn);
+ req->is_zero_sent = 1;
+ return -EINPROGRESS;
+ }
+ }
+ }
+ return 0;
+ }
+
+ if (left > epn->ep.maxpacket)
+ left = epn->ep.maxpacket;
+
+ buf = req->req.buf;
+ buf += req->req.actual;
+
+ nb = left / sizeof(u32);
+ if (nb) {
+ usbf_ep_reg_write_rep(epn, USBF_REG_EPN_WRITE, buf, nb);
+ buf += (nb * sizeof(u32));
+ req->req.actual += (nb * sizeof(u32));
+ left -= (nb * sizeof(u32));
+ }
+
+ if (left) {
+ usbf_epn_send_residue(epn, buf, left);
+ req->req.actual += left;
+ } else {
+ usbf_ep_reg_clrset(epn, USBF_REG_EPN_CONTROL,
+ USBF_EPN_DW_MASK,
+ USBF_EPN_DEND);
+ }
+
+ dev_dbg(epn->udc->dev, "ep%u send %u/%u\n", epn->id, req->req.actual,
+ req->req.length);
+
+ return -EINPROGRESS;
+}
+
+static void usbf_epn_enable_in_end_int(struct usbf_ep *epn)
+{
+ usbf_ep_reg_bitset(epn, USBF_REG_EPN_INT_ENA, USBF_EPN_IN_END_EN);
+}
+
+static int usbf_epn_dma_in(struct usbf_ep *epn, struct usbf_req *req)
+{
+ unsigned int left;
+ u32 npkt;
+ u32 lastpkt;
+ int ret;
+
+ if (!IS_ALIGNED((uintptr_t)req->req.buf, 4)) {
+ dev_dbg(epn->udc->dev, "ep%u buf unaligned -> fallback pio\n",
+ epn->id);
+ return usbf_epn_pio_in(epn, req);
+ }
+
+ left = req->req.length - req->req.actual;
+
+ switch (req->xfer_step) {
+ default:
+ case USBF_XFER_START:
+ if (left == 0) {
+ dev_dbg(epn->udc->dev, "ep%u send null\n", epn->id);
+ usbf_epn_send_null(epn);
+ req->xfer_step = USBF_XFER_WAIT_END;
+ break;
+ }
+ if (left < 4) {
+ dev_dbg(epn->udc->dev, "ep%u send residue %u\n", epn->id,
+ left);
+ usbf_epn_send_residue(epn,
+ req->req.buf + req->req.actual, left);
+ req->req.actual += left;
+ req->xfer_step = USBF_XFER_WAIT_END;
+ break;
+ }
+
+ ret = usb_gadget_map_request(&epn->udc->gadget, &req->req, 1);
+ if (ret < 0) {
+ dev_err(epn->udc->dev, "usb_gadget_map_request failed (%d)\n",
+ ret);
+ return ret;
+ }
+ req->is_mapped = 1;
+
+ npkt = DIV_ROUND_UP(left, epn->ep.maxpacket);
+ lastpkt = (left % epn->ep.maxpacket);
+ if (lastpkt == 0)
+ lastpkt = epn->ep.maxpacket;
+ lastpkt &= ~0x3; /* DMA is done on 32bit units */
+
+ usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_DCR2,
+ USBF_SYS_EPN_MPKT(epn->ep.maxpacket) | USBF_SYS_EPN_LMPKT(lastpkt));
+ usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_TADR,
+ req->req.dma);
+ usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_DCR1,
+ USBF_SYS_EPN_SET_DMACNT(npkt));
+ usbf_ep_dma_reg_bitset(epn, USBF_REG_DMA_EPN_DCR1,
+ USBF_SYS_EPN_REQEN);
+
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_LEN_DCNT, USBF_EPN_SET_DMACNT(npkt));
+
+ usbf_ep_reg_bitset(epn, USBF_REG_EPN_CONTROL, USBF_EPN_AUTO);
+
+ /* The end of DMA transfer at the USBF level needs to be handle
+ * after the detection of the end of DMA transfer at the brige
+ * level.
+ * To force this sequence, EPN_IN_END_EN will be set by the
+ * detection of the end of transfer at bridge level (ie. bridge
+ * interrupt).
+ */
+ usbf_ep_reg_bitclr(epn, USBF_REG_EPN_INT_ENA,
+ USBF_EPN_IN_EN | USBF_EPN_IN_END_EN);
+ epn->bridge_on_dma_end = usbf_epn_enable_in_end_int;
+
+ /* Clear any pending IN_END interrupt */
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS, ~(u32)USBF_EPN_IN_END_INT);
+
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_DMA_CTRL,
+ USBF_EPN_BURST_SET | USBF_EPN_DMAMODE0);
+ usbf_ep_reg_bitset(epn, USBF_REG_EPN_DMA_CTRL,
+ USBF_EPN_DMA_EN);
+
+ req->dma_size = (npkt - 1) * epn->ep.maxpacket + lastpkt;
+
+ dev_dbg(epn->udc->dev, "ep%u dma xfer %zu\n", epn->id,
+ req->dma_size);
+
+ req->xfer_step = USBF_XFER_WAIT_DMA;
+ break;
+
+ case USBF_XFER_WAIT_DMA:
+ if (!(epn->status & USBF_EPN_IN_END_INT)) {
+ dev_dbg(epn->udc->dev, "ep%u dma not done\n", epn->id);
+ break;
+ }
+ dev_dbg(epn->udc->dev, "ep%u dma done\n", epn->id);
+
+ usb_gadget_unmap_request(&epn->udc->gadget, &req->req, 1);
+ req->is_mapped = 0;
+
+ usbf_ep_reg_bitclr(epn, USBF_REG_EPN_CONTROL, USBF_EPN_AUTO);
+
+ usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
+ USBF_EPN_IN_END_EN,
+ USBF_EPN_IN_EN);
+
+ req->req.actual += req->dma_size;
+
+ left = req->req.length - req->req.actual;
+ if (left) {
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS, ~(u32)USBF_EPN_IN_INT);
+
+ dev_dbg(epn->udc->dev, "ep%u send residue %u\n", epn->id,
+ left);
+ usbf_epn_send_residue(epn,
+ req->req.buf + req->req.actual, left);
+ req->req.actual += left;
+ req->xfer_step = USBF_XFER_WAIT_END;
+ break;
+ }
+
+ if (req->req.actual % epn->ep.maxpacket) {
+ /* last packet was a short packet. Tell the hardware to
+ * send it right now.
+ */
+ dev_dbg(epn->udc->dev, "ep%u send short\n", epn->id);
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS,
+ ~(u32)USBF_EPN_IN_INT);
+ usbf_ep_reg_bitset(epn, USBF_REG_EPN_CONTROL,
+ USBF_EPN_DEND);
+
+ req->xfer_step = USBF_XFER_WAIT_END;
+ break;
+ }
+
+ /* Last packet size was a maxpacket size
+ * Send null packet if needed
+ */
+ if (req->req.zero) {
+ req->xfer_step = USBF_XFER_SEND_NULL;
+ break;
+ }
+
+ /* No more action to do. Wait for the end of the USB transfer */
+ req->xfer_step = USBF_XFER_WAIT_END;
+ break;
+
+ case USBF_XFER_SEND_NULL:
+ dev_dbg(epn->udc->dev, "ep%u send null\n", epn->id);
+ usbf_epn_send_null(epn);
+ req->xfer_step = USBF_XFER_WAIT_END;
+ break;
+
+ case USBF_XFER_WAIT_END:
+ if (!(epn->status & USBF_EPN_IN_INT)) {
+ dev_dbg(epn->udc->dev, "ep%u end not done\n", epn->id);
+ break;
+ }
+ dev_dbg(epn->udc->dev, "ep%u send done %u/%u\n", epn->id,
+ req->req.actual, req->req.length);
+ req->xfer_step = USBF_XFER_START;
+ return 0;
+ }
+
+ return -EINPROGRESS;
+}
+
+static void usbf_epn_recv_residue(struct usbf_ep *epn, void *buf,
+ unsigned int size)
+{
+ u32 last;
+
+ last = usbf_ep_reg_readl(epn, USBF_REG_EPN_READ);
+ memcpy(buf, &last, size);
+}
+
+static int usbf_epn_pio_out(struct usbf_ep *epn, struct usbf_req *req)
+{
+ int req_status = 0;
+ unsigned int count;
+ unsigned int recv;
+ unsigned int left;
+ unsigned int nb;
+ void *buf;
+
+ if (epn->status & USBF_EPN_OUT_INT) {
+ recv = USBF_EPN_GET_LDATA(
+ usbf_ep_reg_readl(epn, USBF_REG_EPN_LEN_DCNT));
+ count = recv;
+
+ buf = req->req.buf;
+ buf += req->req.actual;
+
+ left = req->req.length - req->req.actual;
+
+ dev_dbg(epn->udc->dev, "ep%u recv %u, left %u, mpkt %u\n", epn->id,
+ recv, left, epn->ep.maxpacket);
+
+ if (left > epn->ep.maxpacket)
+ left = epn->ep.maxpacket;
+
+ if (count > left) {
+ req_status = -EOVERFLOW;
+ count = left;
+ }
+
+ if (count) {
+ nb = count / sizeof(u32);
+ if (nb) {
+ usbf_ep_reg_read_rep(epn, USBF_REG_EPN_READ,
+ buf, nb);
+ buf += (nb * sizeof(u32));
+ req->req.actual += (nb * sizeof(u32));
+ count -= (nb * sizeof(u32));
+ }
+ if (count) {
+ usbf_epn_recv_residue(epn, buf, count);
+ req->req.actual += count;
+ }
+ }
+ dev_dbg(epn->udc->dev, "ep%u recv %u/%u\n", epn->id,
+ req->req.actual, req->req.length);
+
+ if (req_status) {
+ dev_dbg(epn->udc->dev, "ep%u req.status=%d\n", epn->id,
+ req_status);
+ req->req.status = req_status;
+ return 0;
+ }
+
+ if (recv < epn->ep.maxpacket) {
+ dev_dbg(epn->udc->dev, "ep%u short packet\n", epn->id);
+ /* This is a short packet -> It is the end */
+ req->req.status = 0;
+ return 0;
+ }
+
+ /* Request full -> complete */
+ if (req->req.actual == req->req.length) {
+ req->req.status = 0;
+ return 0;
+ }
+ }
+
+ if (epn->status & USBF_EPN_OUT_NULL_INT) {
+ /* NULL packet received */
+ dev_dbg(epn->udc->dev, "ep%u null packet\n", epn->id);
+ if (req->req.actual != req->req.length) {
+ req->req.status = req->req.short_not_ok ?
+ -EREMOTEIO : 0;
+ } else {
+ req->req.status = 0;
+ }
+ return 0;
+ }
+
+ return -EINPROGRESS;
+}
+
+static void usbf_epn_enable_out_end_int(struct usbf_ep *epn)
+{
+ usbf_ep_reg_bitset(epn, USBF_REG_EPN_INT_ENA, USBF_EPN_OUT_END_EN);
+}
+
+static void usbf_epn_process_queue(struct usbf_ep *epn);
+
+static void usbf_epn_dma_out_send_dma(struct usbf_ep *epn, dma_addr_t addr, u32 npkt, bool is_short)
+{
+ usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_DCR2, USBF_SYS_EPN_MPKT(epn->ep.maxpacket));
+ usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_TADR, addr);
+
+ if (is_short) {
+ usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_DCR1,
+ USBF_SYS_EPN_SET_DMACNT(1) | USBF_SYS_EPN_DIR0);
+ usbf_ep_dma_reg_bitset(epn, USBF_REG_DMA_EPN_DCR1,
+ USBF_SYS_EPN_REQEN);
+
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_LEN_DCNT,
+ USBF_EPN_SET_DMACNT(0));
+
+ /* The end of DMA transfer at the USBF level needs to be handled
+ * after the detection of the end of DMA transfer at the brige
+ * level.
+ * To force this sequence, enabling the OUT_END interrupt will
+ * be donee by the detection of the end of transfer at bridge
+ * level (ie. bridge interrupt).
+ */
+ usbf_ep_reg_bitclr(epn, USBF_REG_EPN_INT_ENA,
+ USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN | USBF_EPN_OUT_END_EN);
+ epn->bridge_on_dma_end = usbf_epn_enable_out_end_int;
+
+ /* Clear any pending OUT_END interrupt */
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS,
+ ~(u32)USBF_EPN_OUT_END_INT);
+
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_DMA_CTRL,
+ USBF_EPN_STOP_MODE | USBF_EPN_STOP_SET | USBF_EPN_DMAMODE0);
+ usbf_ep_reg_bitset(epn, USBF_REG_EPN_DMA_CTRL,
+ USBF_EPN_DMA_EN);
+ return;
+ }
+
+ usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_DCR1,
+ USBF_SYS_EPN_SET_DMACNT(npkt) | USBF_SYS_EPN_DIR0);
+ usbf_ep_dma_reg_bitset(epn, USBF_REG_DMA_EPN_DCR1,
+ USBF_SYS_EPN_REQEN);
+
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_LEN_DCNT,
+ USBF_EPN_SET_DMACNT(npkt));
+
+ /* Here, the bridge may or may not generate an interrupt to signal the
+ * end of DMA transfer.
+ * Keep only OUT_END interrupt and let handle the bridge later during
+ * the OUT_END processing.
+ */
+ usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
+ USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN,
+ USBF_EPN_OUT_END_EN);
+
+ /* Disable bridge interrupt. It will be renabled later */
+ usbf_reg_bitclr(epn->udc, USBF_REG_AHBBINTEN,
+ USBF_SYS_DMA_ENDINTEN_EPN(epn->id));
+
+ /* Clear any pending DMA_END interrupt at bridge level */
+ usbf_reg_writel(epn->udc, USBF_REG_AHBBINT,
+ USBF_SYS_DMA_ENDINT_EPN(epn->id));
+
+ /* Clear any pending OUT_END interrupt */
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS,
+ ~(u32)USBF_EPN_OUT_END_INT);
+
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_DMA_CTRL,
+ USBF_EPN_STOP_MODE | USBF_EPN_STOP_SET | USBF_EPN_DMAMODE0 | USBF_EPN_BURST_SET);
+ usbf_ep_reg_bitset(epn, USBF_REG_EPN_DMA_CTRL,
+ USBF_EPN_DMA_EN);
+}
+
+static size_t usbf_epn_dma_out_complete_dma(struct usbf_ep *epn, bool is_short)
+{
+ u32 dmacnt;
+ u32 tmp;
+ int ret;
+
+ /* Restore interrupt mask */
+ usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
+ USBF_EPN_OUT_END_EN,
+ USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN);
+
+ if (is_short) {
+ /* Nothing more to do when the DMA was for a short packet */
+ return 0;
+ }
+
+ /* Enable the bridge interrupt */
+ usbf_reg_bitset(epn->udc, USBF_REG_AHBBINTEN,
+ USBF_SYS_DMA_ENDINTEN_EPN(epn->id));
+
+ tmp = usbf_ep_reg_readl(epn, USBF_REG_EPN_LEN_DCNT);
+ dmacnt = USBF_EPN_GET_DMACNT(tmp);
+
+ if (dmacnt) {
+ /* Some packet were not received (halted by a short or a null
+ * packet.
+ * The bridge never raises an interrupt in this case.
+ * Wait for the end of transfer at bridge level
+ */
+ ret = readl_poll_timeout_atomic(
+ epn->dma_regs + USBF_REG_DMA_EPN_DCR1,
+ tmp, (USBF_SYS_EPN_GET_DMACNT(tmp) == dmacnt),
+ 0, 10000);
+ if (ret) {
+ dev_err(epn->udc->dev, "ep%u wait bridge timed out\n",
+ epn->id);
+ }
+
+ usbf_ep_dma_reg_bitclr(epn, USBF_REG_DMA_EPN_DCR1,
+ USBF_SYS_EPN_REQEN);
+
+ /* The dmacnt value tells how many packet were not transferred
+ * from the maximum number of packet we set for the DMA transfer.
+ * Compute the left DMA size based on this value.
+ */
+ return dmacnt * epn->ep.maxpacket;
+ }
+
+ return 0;
+}
+
+static int usbf_epn_dma_out(struct usbf_ep *epn, struct usbf_req *req)
+{
+ unsigned int dma_left;
+ unsigned int count;
+ unsigned int recv;
+ unsigned int left;
+ u32 npkt;
+ int ret;
+
+ if (!IS_ALIGNED((uintptr_t)req->req.buf, 4)) {
+ dev_dbg(epn->udc->dev, "ep%u buf unaligned -> fallback pio\n",
+ epn->id);
+ return usbf_epn_pio_out(epn, req);
+ }
+
+ switch (req->xfer_step) {
+ default:
+ case USBF_XFER_START:
+ if (epn->status & USBF_EPN_OUT_NULL_INT) {
+ dev_dbg(epn->udc->dev, "ep%u null packet\n", epn->id);
+ if (req->req.actual != req->req.length) {
+ req->req.status = req->req.short_not_ok ?
+ -EREMOTEIO : 0;
+ } else {
+ req->req.status = 0;
+ }
+ return 0;
+ }
+
+ if (!(epn->status & USBF_EPN_OUT_INT)) {
+ dev_dbg(epn->udc->dev, "ep%u OUT_INT not set -> spurious\n",
+ epn->id);
+ break;
+ }
+
+ recv = USBF_EPN_GET_LDATA(
+ usbf_ep_reg_readl(epn, USBF_REG_EPN_LEN_DCNT));
+ if (!recv) {
+ dev_dbg(epn->udc->dev, "ep%u recv = 0 -> spurious\n",
+ epn->id);
+ break;
+ }
+
+ left = req->req.length - req->req.actual;
+
+ dev_dbg(epn->udc->dev, "ep%u recv %u, left %u, mpkt %u\n", epn->id,
+ recv, left, epn->ep.maxpacket);
+
+ if (recv > left) {
+ dev_err(epn->udc->dev, "ep%u overflow (%u/%u)\n",
+ epn->id, recv, left);
+ req->req.status = -EOVERFLOW;
+ return -EOVERFLOW;
+ }
+
+ if (recv < epn->ep.maxpacket) {
+ /* Short packet received */
+ dev_dbg(epn->udc->dev, "ep%u short packet\n", epn->id);
+ if (recv <= 3) {
+ usbf_epn_recv_residue(epn,
+ req->req.buf + req->req.actual, recv);
+ req->req.actual += recv;
+
+ dev_dbg(epn->udc->dev, "ep%u recv done %u/%u\n",
+ epn->id, req->req.actual, req->req.length);
+
+ req->xfer_step = USBF_XFER_START;
+ return 0;
+ }
+
+ ret = usb_gadget_map_request(&epn->udc->gadget, &req->req, 0);
+ if (ret < 0) {
+ dev_err(epn->udc->dev, "map request failed (%d)\n",
+ ret);
+ return ret;
+ }
+ req->is_mapped = 1;
+
+ usbf_epn_dma_out_send_dma(epn,
+ req->req.dma + req->req.actual,
+ 1, true);
+ req->dma_size = recv & ~0x3;
+
+ dev_dbg(epn->udc->dev, "ep%u dma short xfer %zu\n", epn->id,
+ req->dma_size);
+
+ req->xfer_step = USBF_XFER_WAIT_DMA_SHORT;
+ break;
+ }
+
+ ret = usb_gadget_map_request(&epn->udc->gadget, &req->req, 0);
+ if (ret < 0) {
+ dev_err(epn->udc->dev, "map request failed (%d)\n",
+ ret);
+ return ret;
+ }
+ req->is_mapped = 1;
+
+ /* Use the maximum DMA size according to the request buffer.
+ * We will adjust the received size later at the end of the DMA
+ * transfer with the left size computed from
+ * usbf_epn_dma_out_complete_dma().
+ */
+ npkt = left / epn->ep.maxpacket;
+ usbf_epn_dma_out_send_dma(epn,
+ req->req.dma + req->req.actual,
+ npkt, false);
+ req->dma_size = npkt * epn->ep.maxpacket;
+
+ dev_dbg(epn->udc->dev, "ep%u dma xfer %zu (%u)\n", epn->id,
+ req->dma_size, npkt);
+
+ req->xfer_step = USBF_XFER_WAIT_DMA;
+ break;
+
+ case USBF_XFER_WAIT_DMA_SHORT:
+ if (!(epn->status & USBF_EPN_OUT_END_INT)) {
+ dev_dbg(epn->udc->dev, "ep%u dma short not done\n", epn->id);
+ break;
+ }
+ dev_dbg(epn->udc->dev, "ep%u dma short done\n", epn->id);
+
+ usbf_epn_dma_out_complete_dma(epn, true);
+
+ usb_gadget_unmap_request(&epn->udc->gadget, &req->req, 0);
+ req->is_mapped = 0;
+
+ req->req.actual += req->dma_size;
+
+ recv = USBF_EPN_GET_LDATA(
+ usbf_ep_reg_readl(epn, USBF_REG_EPN_LEN_DCNT));
+
+ count = recv & 0x3;
+ if (count) {
+ dev_dbg(epn->udc->dev, "ep%u recv residue %u\n", epn->id,
+ count);
+ usbf_epn_recv_residue(epn,
+ req->req.buf + req->req.actual, count);
+ req->req.actual += count;
+ }
+
+ dev_dbg(epn->udc->dev, "ep%u recv done %u/%u\n", epn->id,
+ req->req.actual, req->req.length);
+
+ req->xfer_step = USBF_XFER_START;
+ return 0;
+
+ case USBF_XFER_WAIT_DMA:
+ if (!(epn->status & USBF_EPN_OUT_END_INT)) {
+ dev_dbg(epn->udc->dev, "ep%u dma not done\n", epn->id);
+ break;
+ }
+ dev_dbg(epn->udc->dev, "ep%u dma done\n", epn->id);
+
+ dma_left = usbf_epn_dma_out_complete_dma(epn, false);
+ if (dma_left) {
+ /* Adjust the final DMA size with */
+ count = req->dma_size - dma_left;
+
+ dev_dbg(epn->udc->dev, "ep%u dma xfer done %u\n", epn->id,
+ count);
+
+ req->req.actual += count;
+
+ if (epn->status & USBF_EPN_OUT_NULL_INT) {
+ /* DMA was stopped by a null packet reception */
+ dev_dbg(epn->udc->dev, "ep%u dma stopped by null pckt\n",
+ epn->id);
+ usb_gadget_unmap_request(&epn->udc->gadget,
+ &req->req, 0);
+ req->is_mapped = 0;
+
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS,
+ ~(u32)USBF_EPN_OUT_NULL_INT);
+
+ if (req->req.actual != req->req.length) {
+ req->req.status = req->req.short_not_ok ?
+ -EREMOTEIO : 0;
+ } else {
+ req->req.status = 0;
+ }
+ dev_dbg(epn->udc->dev, "ep%u recv done %u/%u\n",
+ epn->id, req->req.actual, req->req.length);
+ req->xfer_step = USBF_XFER_START;
+ return 0;
+ }
+
+ recv = USBF_EPN_GET_LDATA(
+ usbf_ep_reg_readl(epn, USBF_REG_EPN_LEN_DCNT));
+ left = req->req.length - req->req.actual;
+ if (recv > left) {
+ dev_err(epn->udc->dev,
+ "ep%u overflow (%u/%u)\n", epn->id,
+ recv, left);
+ req->req.status = -EOVERFLOW;
+ usb_gadget_unmap_request(&epn->udc->gadget,
+ &req->req, 0);
+ req->is_mapped = 0;
+
+ req->xfer_step = USBF_XFER_START;
+ return -EOVERFLOW;
+ }
+
+ if (recv > 3) {
+ usbf_epn_dma_out_send_dma(epn,
+ req->req.dma + req->req.actual,
+ 1, true);
+ req->dma_size = recv & ~0x3;
+
+ dev_dbg(epn->udc->dev, "ep%u dma short xfer %zu\n",
+ epn->id, req->dma_size);
+
+ req->xfer_step = USBF_XFER_WAIT_DMA_SHORT;
+ break;
+ }
+
+ usb_gadget_unmap_request(&epn->udc->gadget, &req->req, 0);
+ req->is_mapped = 0;
+
+ count = recv & 0x3;
+ if (count) {
+ dev_dbg(epn->udc->dev, "ep%u recv residue %u\n",
+ epn->id, count);
+ usbf_epn_recv_residue(epn,
+ req->req.buf + req->req.actual, count);
+ req->req.actual += count;
+ }
+
+ dev_dbg(epn->udc->dev, "ep%u recv done %u/%u\n", epn->id,
+ req->req.actual, req->req.length);
+
+ req->xfer_step = USBF_XFER_START;
+ return 0;
+ }
+
+ /* Process queue at bridge interrupt only */
+ usbf_ep_reg_bitclr(epn, USBF_REG_EPN_INT_ENA,
+ USBF_EPN_OUT_END_EN | USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN);
+ epn->status = 0;
+ epn->bridge_on_dma_end = usbf_epn_process_queue;
+
+ req->xfer_step = USBF_XFER_WAIT_BRIDGE;
+ break;
+
+ case USBF_XFER_WAIT_BRIDGE:
+ dev_dbg(epn->udc->dev, "ep%u bridge transfers done\n", epn->id);
+
+ /* Restore interrupt mask */
+ usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
+ USBF_EPN_OUT_END_EN,
+ USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN);
+
+ usb_gadget_unmap_request(&epn->udc->gadget, &req->req, 0);
+ req->is_mapped = 0;
+
+ req->req.actual += req->dma_size;
+
+ req->xfer_step = USBF_XFER_START;
+ left = req->req.length - req->req.actual;
+ if (!left) {
+ /* No more data can be added to the buffer */
+ dev_dbg(epn->udc->dev, "ep%u recv done %u/%u\n", epn->id,
+ req->req.actual, req->req.length);
+ return 0;
+ }
+ dev_dbg(epn->udc->dev, "ep%u recv done %u/%u, wait more data\n",
+ epn->id, req->req.actual, req->req.length);
+ break;
+ }
+
+ return -EINPROGRESS;
+}
+
+static void usbf_epn_dma_stop(struct usbf_ep *epn)
+{
+ usbf_ep_dma_reg_bitclr(epn, USBF_REG_DMA_EPN_DCR1, USBF_SYS_EPN_REQEN);
+
+ /* In the datasheet:
+ * If EP[m]_REQEN = 0b is set during DMA transfer, AHB-EPC stops DMA
+ * after 1 packet transfer completed.
+ * Therefore, wait sufficient time for ensuring DMA transfer
+ * completion. The WAIT time depends on the system, especially AHB
+ * bus activity
+ * So arbitrary 10ms would be sufficient.
+ */
+ mdelay(10);
+
+ usbf_ep_reg_bitclr(epn, USBF_REG_EPN_DMA_CTRL, USBF_EPN_DMA_EN);
+}
+
+static void usbf_epn_dma_abort(struct usbf_ep *epn, struct usbf_req *req)
+{
+ dev_dbg(epn->udc->dev, "ep%u %s dma abort\n", epn->id,
+ epn->is_in ? "in" : "out");
+
+ epn->bridge_on_dma_end = NULL;
+
+ usbf_epn_dma_stop(epn);
+
+ usb_gadget_unmap_request(&epn->udc->gadget, &req->req,
+ epn->is_in ? 1 : 0);
+ req->is_mapped = 0;
+
+ usbf_ep_reg_bitclr(epn, USBF_REG_EPN_CONTROL, USBF_EPN_AUTO);
+
+ if (epn->is_in) {
+ usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
+ USBF_EPN_IN_END_EN,
+ USBF_EPN_IN_EN);
+ } else {
+ usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
+ USBF_EPN_OUT_END_EN,
+ USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN);
+ }
+
+ /* As dma is stopped, be sure that no DMA interrupt are pending */
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS,
+ USBF_EPN_IN_END_INT | USBF_EPN_OUT_END_INT);
+
+ usbf_reg_writel(epn->udc, USBF_REG_AHBBINT, USBF_SYS_DMA_ENDINT_EPN(epn->id));
+
+ /* Enable DMA interrupt the bridge level */
+ usbf_reg_bitset(epn->udc, USBF_REG_AHBBINTEN,
+ USBF_SYS_DMA_ENDINTEN_EPN(epn->id));
+
+ /* Reset transfer step */
+ req->xfer_step = USBF_XFER_START;
+}
+
+static void usbf_epn_fifo_flush(struct usbf_ep *epn)
+{
+ u32 ctrl;
+ u32 sts;
+ int ret;
+
+ dev_dbg(epn->udc->dev, "ep%u %s fifo flush\n", epn->id,
+ epn->is_in ? "in" : "out");
+
+ ctrl = usbf_ep_reg_readl(epn, USBF_REG_EPN_CONTROL);
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_CONTROL, ctrl | USBF_EPN_BCLR);
+
+ if (ctrl & USBF_EPN_DIR0)
+ return;
+
+ ret = readl_poll_timeout_atomic(epn->regs + USBF_REG_EPN_STATUS, sts,
+ (sts & (USBF_EPN_IN_DATA | USBF_EPN_IN_EMPTY)) == USBF_EPN_IN_EMPTY,
+ 0, 10000);
+ if (ret)
+ dev_err(epn->udc->dev, "ep%u flush fifo timed out\n", epn->id);
+}
+
+static void usbf_ep_req_done(struct usbf_ep *ep, struct usbf_req *req,
+ int status)
+{
+ list_del_init(&req->queue);
+
+ if (status) {
+ req->req.status = status;
+ } else {
+ if (req->req.status == -EINPROGRESS)
+ req->req.status = status;
+ }
+
+ dev_dbg(ep->udc->dev, "ep%u %s req done length %u/%u, status=%d\n", ep->id,
+ ep->is_in ? "in" : "out",
+ req->req.actual, req->req.length, req->req.status);
+
+ if (req->is_mapped)
+ usbf_epn_dma_abort(ep, req);
+
+ spin_unlock(&ep->udc->lock);
+ usb_gadget_giveback_request(&ep->ep, &req->req);
+ spin_lock(&ep->udc->lock);
+}
+
+static void usbf_ep_nuke(struct usbf_ep *ep, int status)
+{
+ struct usbf_req *req;
+
+ dev_dbg(ep->udc->dev, "ep%u %s nuke status %d\n", ep->id,
+ ep->is_in ? "in" : "out",
+ status);
+
+ while (!list_empty(&ep->queue)) {
+ req = list_first_entry(&ep->queue, struct usbf_req, queue);
+ usbf_ep_req_done(ep, req, status);
+ }
+
+ if (ep->id == 0)
+ usbf_ep0_fifo_flush(ep);
+ else
+ usbf_epn_fifo_flush(ep);
+}
+
+static bool usbf_ep_is_stalled(struct usbf_ep *ep)
+{
+ u32 ctrl;
+
+ if (ep->id == 0) {
+ ctrl = usbf_ep_reg_readl(ep, USBF_REG_EP0_CONTROL);
+ return (ctrl & USBF_EP0_STL) ? true : false;
+ }
+
+ ctrl = usbf_ep_reg_readl(ep, USBF_REG_EPN_CONTROL);
+ if (ep->is_in)
+ return (ctrl & USBF_EPN_ISTL) ? true : false;
+
+ return (ctrl & USBF_EPN_OSTL) ? true : false;
+}
+
+static int usbf_epn_start_queue(struct usbf_ep *epn)
+{
+ struct usbf_req *req;
+ int ret;
+
+ if (usbf_ep_is_stalled(epn))
+ return 0;
+
+ req = list_first_entry_or_null(&epn->queue, struct usbf_req, queue);
+
+ if (epn->is_in) {
+ if (req && !epn->is_processing) {
+ ret = epn->dma_regs ?
+ usbf_epn_dma_in(epn, req) :
+ usbf_epn_pio_in(epn, req);
+ if (ret != -EINPROGRESS) {
+ dev_err(epn->udc->dev,
+ "queued next request not in progress\n");
+ /* The request cannot be completed (ie
+ * ret == 0) on the first call.
+ * stall and nuke the endpoint
+ */
+ return ret ? ret : -EIO;
+ }
+ }
+ } else {
+ if (req) {
+ /* Clear ONAK to accept OUT tokens */
+ usbf_ep_reg_bitclr(epn, USBF_REG_EPN_CONTROL,
+ USBF_EPN_ONAK);
+
+ /* Enable interrupts */
+ usbf_ep_reg_bitset(epn, USBF_REG_EPN_INT_ENA,
+ USBF_EPN_OUT_INT | USBF_EPN_OUT_NULL_INT);
+ } else {
+ /* Disable incoming data and interrupt.
+ * They will be enable on next usb_eb_queue call
+ */
+ usbf_ep_reg_bitset(epn, USBF_REG_EPN_CONTROL,
+ USBF_EPN_ONAK);
+ usbf_ep_reg_bitclr(epn, USBF_REG_EPN_INT_ENA,
+ USBF_EPN_OUT_INT | USBF_EPN_OUT_NULL_INT);
+ }
+ }
+ return 0;
+}
+
+static int usbf_ep_process_queue(struct usbf_ep *ep)
+{
+ int (*usbf_ep_xfer)(struct usbf_ep *ep, struct usbf_req *req);
+ struct usbf_req *req;
+ int is_processing;
+ int ret;
+
+ if (ep->is_in) {
+ usbf_ep_xfer = usbf_ep0_pio_in;
+ if (ep->id) {
+ usbf_ep_xfer = ep->dma_regs ?
+ usbf_epn_dma_in : usbf_epn_pio_in;
+ }
+ } else {
+ usbf_ep_xfer = usbf_ep0_pio_out;
+ if (ep->id) {
+ usbf_ep_xfer = ep->dma_regs ?
+ usbf_epn_dma_out : usbf_epn_pio_out;
+ }
+ }
+
+ req = list_first_entry_or_null(&ep->queue, struct usbf_req, queue);
+ if (!req) {
+ dev_err(ep->udc->dev,
+ "no request available for ep%u %s process\n", ep->id,
+ ep->is_in ? "in" : "out");
+ return -ENOENT;
+ }
+
+ do {
+ /* Were going to read the FIFO for this current request.
+ * NAK any other incoming data to avoid a race condition if no
+ * more request are available.
+ */
+ if (!ep->is_in && ep->id != 0) {
+ usbf_ep_reg_bitset(ep, USBF_REG_EPN_CONTROL,
+ USBF_EPN_ONAK);
+ }
+
+ ret = usbf_ep_xfer(ep, req);
+ if (ret == -EINPROGRESS) {
+ if (!ep->is_in && ep->id != 0) {
+ /* The current request needs more data.
+ * Allow incoming data
+ */
+ usbf_ep_reg_bitclr(ep, USBF_REG_EPN_CONTROL,
+ USBF_EPN_ONAK);
+ }
+ return ret;
+ }
+
+ is_processing = ep->is_processing;
+ ep->is_processing = 1;
+ usbf_ep_req_done(ep, req, ret);
+ ep->is_processing = is_processing;
+
+ if (ret) {
+ /* An error was detected during the request transfer.
+ * Any pending DMA transfers were aborted by the
+ * usbf_ep_req_done() call.
+ * It's time to flush the fifo
+ */
+ if (ep->id == 0)
+ usbf_ep0_fifo_flush(ep);
+ else
+ usbf_epn_fifo_flush(ep);
+ }
+
+ req = list_first_entry_or_null(&ep->queue, struct usbf_req,
+ queue);
+
+ if (ep->is_in)
+ continue;
+
+ if (ep->id != 0) {
+ if (req) {
+ /* An other request is available.
+ * Allow incoming data
+ */
+ usbf_ep_reg_bitclr(ep, USBF_REG_EPN_CONTROL,
+ USBF_EPN_ONAK);
+ } else {
+ /* No request queued. Disable interrupts.
+ * They will be enabled on usb_ep_queue
+ */
+ usbf_ep_reg_bitclr(ep, USBF_REG_EPN_INT_ENA,
+ USBF_EPN_OUT_INT | USBF_EPN_OUT_NULL_INT);
+ }
+ }
+ /* Do not recall usbf_ep_xfer() */
+ return req ? -EINPROGRESS : 0;
+
+ } while (req);
+
+ return 0;
+}
+
+static void usbf_ep_stall(struct usbf_ep *ep, bool stall)
+{
+ struct usbf_req *first;
+
+ dev_dbg(ep->udc->dev, "ep%u %s %s\n", ep->id,
+ ep->is_in ? "in" : "out",
+ stall ? "stall" : "unstall");
+
+ if (ep->id == 0) {
+ if (stall)
+ usbf_ep_reg_bitset(ep, USBF_REG_EP0_CONTROL, USBF_EP0_STL);
+ else
+ usbf_ep_reg_bitclr(ep, USBF_REG_EP0_CONTROL, USBF_EP0_STL);
+ return;
+ }
+
+ if (stall) {
+ if (ep->is_in)
+ usbf_ep_reg_bitset(ep, USBF_REG_EPN_CONTROL,
+ USBF_EPN_ISTL);
+ else
+ usbf_ep_reg_bitset(ep, USBF_REG_EPN_CONTROL,
+ USBF_EPN_OSTL | USBF_EPN_OSTL_EN);
+ } else {
+ first = list_first_entry_or_null(&ep->queue, struct usbf_req, queue);
+ if (first && first->is_mapped) {
+ /* This can appear if the host halts an endpoint using
+ * SET_FEATURE and then un-halts the endpoint
+ */
+ usbf_epn_dma_abort(ep, first);
+ }
+ usbf_epn_fifo_flush(ep);
+ if (ep->is_in) {
+ usbf_ep_reg_clrset(ep, USBF_REG_EPN_CONTROL,
+ USBF_EPN_ISTL,
+ USBF_EPN_IPIDCLR);
+ } else {
+ usbf_ep_reg_clrset(ep, USBF_REG_EPN_CONTROL,
+ USBF_EPN_OSTL,
+ USBF_EPN_OSTL_EN | USBF_EPN_OPIDCLR);
+ }
+ usbf_epn_start_queue(ep);
+ }
+}
+
+static void usbf_ep0_enable(struct usbf_ep *ep0)
+{
+ usbf_ep_reg_writel(ep0, USBF_REG_EP0_CONTROL, USBF_EP0_INAK_EN | USBF_EP0_BCLR);
+
+ usbf_ep_reg_writel(ep0, USBF_REG_EP0_INT_ENA,
+ USBF_EP0_SETUP_EN | USBF_EP0_STG_START_EN | USBF_EP0_STG_END_EN |
+ USBF_EP0_OUT_EN | USBF_EP0_OUT_NULL_EN | USBF_EP0_IN_EN);
+
+ ep0->udc->ep0state = EP0_IDLE;
+ ep0->disabled = 0;
+
+ /* enable interrupts for the ep0 */
+ usbf_reg_bitset(ep0->udc, USBF_REG_USB_INT_ENA, USBF_USB_EPN_EN(0));
+}
+
+static int usbf_epn_enable(struct usbf_ep *epn)
+{
+ u32 base_addr;
+ u32 ctrl;
+
+ base_addr = usbf_ep_info[epn->id].base_addr;
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_PCKT_ADRS,
+ USBF_EPN_BASEAD(base_addr) | USBF_EPN_MPKT(epn->ep.maxpacket));
+
+ /* OUT transfer interrupt are enabled during usb_ep_queue */
+ if (epn->is_in) {
+ /* Will be changed in DMA processing */
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_INT_ENA, USBF_EPN_IN_EN);
+ }
+
+ /* Clear, set endpoint direction, set IN/OUT STL, and enable
+ * Send NAK for Data out as request are not queued yet
+ */
+ ctrl = USBF_EPN_EN | USBF_EPN_BCLR;
+ if (epn->is_in)
+ ctrl |= USBF_EPN_OSTL | USBF_EPN_OSTL_EN;
+ else
+ ctrl |= USBF_EPN_DIR0 | USBF_EPN_ISTL | USBF_EPN_OSTL_EN | USBF_EPN_ONAK;
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_CONTROL, ctrl);
+
+ return 0;
+}
+
+static int usbf_ep_enable(struct usb_ep *_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
+ struct usbf_udc *udc = ep->udc;
+ unsigned long flags;
+ int ret;
+
+ if (ep->id == 0)
+ return -EINVAL;
+
+ if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT)
+ return -EINVAL;
+
+ dev_dbg(ep->udc->dev, "ep%u %s mpkts %d\n", ep->id,
+ usb_endpoint_dir_in(desc) ? "in" : "out",
+ usb_endpoint_maxp(desc));
+
+ spin_lock_irqsave(&ep->udc->lock, flags);
+ ep->is_in = usb_endpoint_dir_in(desc);
+ ep->ep.maxpacket = usb_endpoint_maxp(desc);
+
+ ret = usbf_epn_enable(ep);
+ if (ret)
+ goto end;
+
+ ep->disabled = 0;
+
+ /* enable interrupts for this endpoint */
+ usbf_reg_bitset(udc, USBF_REG_USB_INT_ENA, USBF_USB_EPN_EN(ep->id));
+
+ /* enable DMA interrupt at bridge level if DMA is used */
+ if (ep->dma_regs) {
+ ep->bridge_on_dma_end = NULL;
+ usbf_reg_bitset(udc, USBF_REG_AHBBINTEN,
+ USBF_SYS_DMA_ENDINTEN_EPN(ep->id));
+ }
+
+ ret = 0;
+end:
+ spin_unlock_irqrestore(&ep->udc->lock, flags);
+ return ret;
+}
+
+static int usbf_epn_disable(struct usbf_ep *epn)
+{
+ /* Disable interrupts */
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_INT_ENA, 0);
+
+ /* Disable endpoint */
+ usbf_ep_reg_bitclr(epn, USBF_REG_EPN_CONTROL, USBF_EPN_EN);
+
+ /* remove anything that was pending */
+ usbf_ep_nuke(epn, -ESHUTDOWN);
+
+ return 0;
+}
+
+static int usbf_ep_disable(struct usb_ep *_ep)
+{
+ struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
+ struct usbf_udc *udc = ep->udc;
+ unsigned long flags;
+ int ret;
+
+ if (ep->id == 0)
+ return -EINVAL;
+
+ dev_dbg(ep->udc->dev, "ep%u %s mpkts %d\n", ep->id,
+ ep->is_in ? "in" : "out", ep->ep.maxpacket);
+
+ spin_lock_irqsave(&ep->udc->lock, flags);
+ ep->disabled = 1;
+ /* Disable DMA interrupt */
+ if (ep->dma_regs) {
+ usbf_reg_bitclr(udc, USBF_REG_AHBBINTEN,
+ USBF_SYS_DMA_ENDINTEN_EPN(ep->id));
+ ep->bridge_on_dma_end = NULL;
+ }
+ /* disable interrupts for this endpoint */
+ usbf_reg_bitclr(udc, USBF_REG_USB_INT_ENA, USBF_USB_EPN_EN(ep->id));
+ /* and the endpoint itself */
+ ret = usbf_epn_disable(ep);
+ spin_unlock_irqrestore(&ep->udc->lock, flags);
+
+ return ret;
+}
+
+static int usbf_ep0_queue(struct usbf_ep *ep0, struct usbf_req *req,
+ gfp_t gfp_flags)
+{
+ int ret;
+
+ req->req.actual = 0;
+ req->req.status = -EINPROGRESS;
+ req->is_zero_sent = 0;
+
+ list_add_tail(&req->queue, &ep0->queue);
+
+ if (ep0->udc->ep0state == EP0_IN_STATUS_START_PHASE)
+ return 0;
+
+ if (!ep0->is_in)
+ return 0;
+
+ if (ep0->udc->ep0state == EP0_IN_STATUS_PHASE) {
+ if (req->req.length) {
+ dev_err(ep0->udc->dev,
+ "request lng %u for ep0 in status phase\n",
+ req->req.length);
+ return -EINVAL;
+ }
+ ep0->delayed_status = 0;
+ }
+ if (!ep0->is_processing) {
+ ret = usbf_ep0_pio_in(ep0, req);
+ if (ret != -EINPROGRESS) {
+ dev_err(ep0->udc->dev,
+ "queued request not in progress\n");
+ /* The request cannot be completed (ie
+ * ret == 0) on the first call
+ */
+ return ret ? ret : -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int usbf_epn_queue(struct usbf_ep *ep, struct usbf_req *req,
+ gfp_t gfp_flags)
+{
+ int was_empty;
+ int ret;
+
+ if (ep->disabled) {
+ dev_err(ep->udc->dev, "ep%u request queue while disable\n",
+ ep->id);
+ return -ESHUTDOWN;
+ }
+
+ req->req.actual = 0;
+ req->req.status = -EINPROGRESS;
+ req->is_zero_sent = 0;
+ req->xfer_step = USBF_XFER_START;
+
+ was_empty = list_empty(&ep->queue);
+ list_add_tail(&req->queue, &ep->queue);
+ if (was_empty) {
+ ret = usbf_epn_start_queue(ep);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int usbf_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
+ gfp_t gfp_flags)
+{
+ struct usbf_req *req = container_of(_req, struct usbf_req, req);
+ struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
+ struct usbf_udc *udc = ep->udc;
+ unsigned long flags;
+ int ret;
+
+ if (!_req || !_req->buf)
+ return -EINVAL;
+
+ if (!udc || !udc->driver)
+ return -EINVAL;
+
+ dev_dbg(ep->udc->dev, "ep%u %s req queue length %u, zero %u, short_not_ok %u\n",
+ ep->id, ep->is_in ? "in" : "out",
+ req->req.length, req->req.zero, req->req.short_not_ok);
+
+ spin_lock_irqsave(&ep->udc->lock, flags);
+ if (ep->id == 0)
+ ret = usbf_ep0_queue(ep, req, gfp_flags);
+ else
+ ret = usbf_epn_queue(ep, req, gfp_flags);
+ spin_unlock_irqrestore(&ep->udc->lock, flags);
+ return ret;
+}
+
+static int usbf_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct usbf_req *req = container_of(_req, struct usbf_req, req);
+ struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
+ unsigned long flags;
+ int is_processing;
+ int first;
+ int ret;
+
+ spin_lock_irqsave(&ep->udc->lock, flags);
+
+ dev_dbg(ep->udc->dev, "ep%u %s req dequeue length %u/%u\n",
+ ep->id, ep->is_in ? "in" : "out",
+ req->req.actual, req->req.length);
+
+ first = list_is_first(&req->queue, &ep->queue);
+
+ /* Complete the request but avoid any operation that could be done
+ * if a new request is queued during the request completion
+ */
+ is_processing = ep->is_processing;
+ ep->is_processing = 1;
+ usbf_ep_req_done(ep, req, -ECONNRESET);
+ ep->is_processing = is_processing;
+
+ if (first) {
+ /* The first item in the list was dequeued.
+ * This item could already be submitted to the hardware.
+ * So, flush the fifo
+ */
+ if (ep->id)
+ usbf_epn_fifo_flush(ep);
+ else
+ usbf_ep0_fifo_flush(ep);
+ }
+
+ if (ep->id == 0) {
+ /* We dequeue a request on ep0. On this endpoint, we can have
+ * 1 request related to the data stage and/or 1 request
+ * related to the status stage.
+ * We dequeue one of them and so the USB control transaction
+ * is no more coherent. The simple way to be consistent after
+ * dequeuing is to stall and nuke the endpoint and wait the
+ * next SETUP packet.
+ */
+ usbf_ep_stall(ep, true);
+ usbf_ep_nuke(ep, -ECONNRESET);
+ ep->udc->ep0state = EP0_IDLE;
+ goto end;
+ }
+
+ if (!first)
+ goto end;
+
+ ret = usbf_epn_start_queue(ep);
+ if (ret) {
+ usbf_ep_stall(ep, true);
+ usbf_ep_nuke(ep, -EIO);
+ }
+end:
+ spin_unlock_irqrestore(&ep->udc->lock, flags);
+ return 0;
+}
+
+static struct usb_request *usbf_ep_alloc_request(struct usb_ep *_ep,
+ gfp_t gfp_flags)
+{
+ struct usbf_req *req;
+
+ if (!_ep)
+ return NULL;
+
+ req = kzalloc(sizeof(*req), gfp_flags);
+ if (!req)
+ return NULL;
+
+ INIT_LIST_HEAD(&req->queue);
+
+ return &req->req;
+}
+
+static void usbf_ep_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct usbf_req *req;
+ unsigned long flags;
+ struct usbf_ep *ep;
+
+ if (!_ep || !_req)
+ return;
+
+ req = container_of(_req, struct usbf_req, req);
+ ep = container_of(_ep, struct usbf_ep, ep);
+
+ spin_lock_irqsave(&ep->udc->lock, flags);
+ list_del_init(&req->queue);
+ spin_unlock_irqrestore(&ep->udc->lock, flags);
+ kfree(req);
+}
+
+static int usbf_ep_set_halt(struct usb_ep *_ep, int halt)
+{
+ struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
+ unsigned long flags;
+ int ret;
+
+ if (ep->id == 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ep->udc->lock, flags);
+
+ if (!list_empty(&ep->queue)) {
+ ret = -EAGAIN;
+ goto end;
+ }
+
+ usbf_ep_stall(ep, halt);
+ if (!halt)
+ ep->is_wedged = 0;
+
+ ret = 0;
+end:
+ spin_unlock_irqrestore(&ep->udc->lock, flags);
+
+ return ret;
+}
+
+static int usbf_ep_set_wedge(struct usb_ep *_ep)
+{
+ struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
+ unsigned long flags;
+ int ret;
+
+ if (ep->id == 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ep->udc->lock, flags);
+ if (!list_empty(&ep->queue)) {
+ ret = -EAGAIN;
+ goto end;
+ }
+ usbf_ep_stall(ep, 1);
+ ep->is_wedged = 1;
+
+ ret = 0;
+end:
+ spin_unlock_irqrestore(&ep->udc->lock, flags);
+ return ret;
+}
+
+static struct usb_ep_ops usbf_ep_ops = {
+ .enable = usbf_ep_enable,
+ .disable = usbf_ep_disable,
+ .queue = usbf_ep_queue,
+ .dequeue = usbf_ep_dequeue,
+ .set_halt = usbf_ep_set_halt,
+ .set_wedge = usbf_ep_set_wedge,
+ .alloc_request = usbf_ep_alloc_request,
+ .free_request = usbf_ep_free_request,
+};
+
+static void usbf_ep0_req_complete(struct usb_ep *_ep, struct usb_request *_req)
+{
+}
+
+static void usbf_ep0_fill_req(struct usbf_ep *ep0, struct usbf_req *req,
+ void *buf, unsigned int length,
+ void (*complete)(struct usb_ep *_ep,
+ struct usb_request *_req))
+{
+ if (buf && length)
+ memcpy(ep0->udc->ep0_buf, buf, length);
+
+ req->req.buf = ep0->udc->ep0_buf;
+ req->req.length = length;
+ req->req.dma = 0;
+ req->req.zero = true;
+ req->req.complete = complete ? complete : usbf_ep0_req_complete;
+ req->req.status = -EINPROGRESS;
+ req->req.context = NULL;
+ req->req.actual = 0;
+}
+
+static struct usbf_ep *usbf_get_ep_by_addr(struct usbf_udc *udc, u8 address)
+{
+ struct usbf_ep *ep;
+ unsigned int i;
+
+ if ((address & USB_ENDPOINT_NUMBER_MASK) == 0)
+ return &udc->ep[0];
+
+ for (i = 1; i < ARRAY_SIZE(udc->ep); i++) {
+ ep = &udc->ep[i];
+
+ if (!ep->ep.desc)
+ continue;
+
+ if (ep->ep.desc->bEndpointAddress == address)
+ return ep;
+ }
+
+ return NULL;
+}
+
+static int usbf_req_delegate(struct usbf_udc *udc,
+ const struct usb_ctrlrequest *ctrlrequest)
+{
+ int ret;
+
+ spin_unlock(&udc->lock);
+ ret = udc->driver->setup(&udc->gadget, ctrlrequest);
+ spin_lock(&udc->lock);
+ if (ret < 0) {
+ dev_dbg(udc->dev, "udc driver setup failed %d\n", ret);
+ return ret;
+ }
+ if (ret == USB_GADGET_DELAYED_STATUS) {
+ dev_dbg(udc->dev, "delayed status set\n");
+ udc->ep[0].delayed_status = 1;
+ return 0;
+ }
+ return ret;
+}
+
+static int usbf_req_get_status(struct usbf_udc *udc,
+ const struct usb_ctrlrequest *ctrlrequest)
+{
+ struct usbf_ep *ep;
+ u16 status_data;
+ u16 wLength;
+ u16 wValue;
+ u16 wIndex;
+
+ wValue = le16_to_cpu(ctrlrequest->wValue);
+ wLength = le16_to_cpu(ctrlrequest->wLength);
+ wIndex = le16_to_cpu(ctrlrequest->wIndex);
+
+ switch (ctrlrequest->bRequestType) {
+ case USB_DIR_IN | USB_RECIP_DEVICE | USB_TYPE_STANDARD:
+ if ((wValue != 0) || (wIndex != 0) || (wLength != 2))
+ goto delegate;
+
+ status_data = 0;
+ if (udc->gadget.is_selfpowered)
+ status_data |= BIT(USB_DEVICE_SELF_POWERED);
+
+ if (udc->is_remote_wakeup)
+ status_data |= BIT(USB_DEVICE_REMOTE_WAKEUP);
+
+ break;
+
+ case USB_DIR_IN | USB_RECIP_ENDPOINT | USB_TYPE_STANDARD:
+ if ((wValue != 0) || (wLength != 2))
+ goto delegate;
+
+ ep = usbf_get_ep_by_addr(udc, wIndex);
+ if (!ep)
+ return -EINVAL;
+
+ status_data = 0;
+ if (usbf_ep_is_stalled(ep))
+ status_data |= cpu_to_le16(1);
+ break;
+
+ case USB_DIR_IN | USB_RECIP_INTERFACE | USB_TYPE_STANDARD:
+ if ((wValue != 0) || (wLength != 2))
+ goto delegate;
+ status_data = 0;
+ break;
+
+ default:
+ goto delegate;
+ }
+
+ usbf_ep0_fill_req(&udc->ep[0], &udc->setup_reply, &status_data,
+ sizeof(status_data), NULL);
+ usbf_ep0_queue(&udc->ep[0], &udc->setup_reply, GFP_ATOMIC);
+
+ return 0;
+
+delegate:
+ return usbf_req_delegate(udc, ctrlrequest);
+}
+
+static int usbf_req_clear_set_feature(struct usbf_udc *udc,
+ const struct usb_ctrlrequest *ctrlrequest,
+ bool is_set)
+{
+ struct usbf_ep *ep;
+ u16 wLength;
+ u16 wValue;
+ u16 wIndex;
+
+ wValue = le16_to_cpu(ctrlrequest->wValue);
+ wLength = le16_to_cpu(ctrlrequest->wLength);
+ wIndex = le16_to_cpu(ctrlrequest->wIndex);
+
+ switch (ctrlrequest->bRequestType) {
+ case USB_DIR_OUT | USB_RECIP_DEVICE:
+ if ((wIndex != 0) || (wLength != 0))
+ goto delegate;
+
+ if (wValue != cpu_to_le16(USB_DEVICE_REMOTE_WAKEUP))
+ goto delegate;
+
+ udc->is_remote_wakeup = is_set;
+ break;
+
+ case USB_DIR_OUT | USB_RECIP_ENDPOINT:
+ if (wLength != 0)
+ goto delegate;
+
+ ep = usbf_get_ep_by_addr(udc, wIndex);
+ if (!ep)
+ return -EINVAL;
+
+ if ((ep->id == 0) && is_set) {
+ /* Endpoint 0 cannot be halted (stalled)
+ * Returning an error code leads to a STALL on this ep0
+ * but keep the automate in a consistent state.
+ */
+ return -EINVAL;
+ }
+ if (ep->is_wedged && !is_set) {
+ /* Ignore CLEAR_FEATURE(HALT ENDPOINT) when the
+ * endpoint is wedged
+ */
+ break;
+ }
+ usbf_ep_stall(ep, is_set);
+ break;
+
+ default:
+ goto delegate;
+ }
+
+ return 0;
+
+delegate:
+ return usbf_req_delegate(udc, ctrlrequest);
+}
+
+static void usbf_ep0_req_set_address_complete(struct usb_ep *_ep,
+ struct usb_request *_req)
+{
+ struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
+
+ /* The status phase of the SET_ADDRESS request is completed ... */
+ if (_req->status == 0) {
+ /* ... without any errors -> Signaled the state to the core. */
+ usb_gadget_set_state(&ep->udc->gadget, USB_STATE_ADDRESS);
+ }
+
+ /* In case of request failure, there is no need to revert the address
+ * value set to the hardware as the hardware will take care of the
+ * value only if the status stage is completed normally.
+ */
+}
+
+static int usbf_req_set_address(struct usbf_udc *udc,
+ const struct usb_ctrlrequest *ctrlrequest)
+{
+ u16 wLength;
+ u16 wValue;
+ u16 wIndex;
+ u32 addr;
+
+ wValue = le16_to_cpu(ctrlrequest->wValue);
+ wLength = le16_to_cpu(ctrlrequest->wLength);
+ wIndex = le16_to_cpu(ctrlrequest->wIndex);
+
+ if (ctrlrequest->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE))
+ goto delegate;
+
+ if ((wIndex != 0) || (wLength != 0) || (wValue > 127))
+ return -EINVAL;
+
+ addr = wValue;
+ /* The hardware will take care of this USB address after the status
+ * stage of the SET_ADDRESS request is completed normally.
+ * It is safe to write it now
+ */
+ usbf_reg_writel(udc, USBF_REG_USB_ADDRESS, USBF_USB_SET_USB_ADDR(addr));
+
+ /* Queued the status request */
+ usbf_ep0_fill_req(&udc->ep[0], &udc->setup_reply, NULL, 0,
+ usbf_ep0_req_set_address_complete);
+ usbf_ep0_queue(&udc->ep[0], &udc->setup_reply, GFP_ATOMIC);
+
+ return 0;
+
+delegate:
+ return usbf_req_delegate(udc, ctrlrequest);
+}
+
+static int usbf_req_set_configuration(struct usbf_udc *udc,
+ const struct usb_ctrlrequest *ctrlrequest)
+{
+ u16 wLength;
+ u16 wValue;
+ u16 wIndex;
+ int ret;
+
+ ret = usbf_req_delegate(udc, ctrlrequest);
+ if (ret)
+ return ret;
+
+ wValue = le16_to_cpu(ctrlrequest->wValue);
+ wLength = le16_to_cpu(ctrlrequest->wLength);
+ wIndex = le16_to_cpu(ctrlrequest->wIndex);
+
+ if ((ctrlrequest->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE)) ||
+ (wIndex != 0) || (wLength != 0)) {
+ /* No error detected by driver->setup() but it is not an USB2.0
+ * Ch9 SET_CONFIGURATION.
+ * Nothing more to do
+ */
+ return 0;
+ }
+
+ if (wValue & 0x00FF) {
+ usbf_reg_bitset(udc, USBF_REG_USB_CONTROL, USBF_USB_CONF);
+ } else {
+ usbf_reg_bitclr(udc, USBF_REG_USB_CONTROL, USBF_USB_CONF);
+ /* Go back to Address State */
+ spin_unlock(&udc->lock);
+ usb_gadget_set_state(&udc->gadget, USB_STATE_ADDRESS);
+ spin_lock(&udc->lock);
+ }
+
+ return 0;
+}
+
+static int usbf_handle_ep0_setup(struct usbf_ep *ep0)
+{
+ union {
+ struct usb_ctrlrequest ctrlreq;
+ u32 raw[2];
+ } crq;
+ struct usbf_udc *udc = ep0->udc;
+ int ret;
+
+ /* Read setup data (ie the USB control request) */
+ crq.raw[0] = usbf_reg_readl(udc, USBF_REG_SETUP_DATA0);
+ crq.raw[1] = usbf_reg_readl(udc, USBF_REG_SETUP_DATA1);
+
+ dev_dbg(ep0->udc->dev,
+ "ep0 req%02x.%02x, wValue 0x%04x, wIndex 0x%04x, wLength 0x%04x\n",
+ crq.ctrlreq.bRequestType, crq.ctrlreq.bRequest,
+ crq.ctrlreq.wValue, crq.ctrlreq.wIndex, crq.ctrlreq.wLength);
+
+ /* Set current EP0 state according to the received request */
+ if (crq.ctrlreq.wLength) {
+ if (crq.ctrlreq.bRequestType & USB_DIR_IN) {
+ udc->ep0state = EP0_IN_DATA_PHASE;
+ usbf_ep_reg_clrset(ep0, USBF_REG_EP0_CONTROL,
+ USBF_EP0_INAK,
+ USBF_EP0_INAK_EN);
+ ep0->is_in = 1;
+ } else {
+ udc->ep0state = EP0_OUT_DATA_PHASE;
+ usbf_ep_reg_bitclr(ep0, USBF_REG_EP0_CONTROL,
+ USBF_EP0_ONAK);
+ ep0->is_in = 0;
+ }
+ } else {
+ udc->ep0state = EP0_IN_STATUS_START_PHASE;
+ ep0->is_in = 1;
+ }
+
+ /* We starts a new control transfer -> Clear the delayed status flag */
+ ep0->delayed_status = 0;
+
+ if ((crq.ctrlreq.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD) {
+ /* This is not a USB standard request -> delelate */
+ goto delegate;
+ }
+
+ switch (crq.ctrlreq.bRequest) {
+ case USB_REQ_GET_STATUS:
+ ret = usbf_req_get_status(udc, &crq.ctrlreq);
+ break;
+
+ case USB_REQ_CLEAR_FEATURE:
+ ret = usbf_req_clear_set_feature(udc, &crq.ctrlreq, false);
+ break;
+
+ case USB_REQ_SET_FEATURE:
+ ret = usbf_req_clear_set_feature(udc, &crq.ctrlreq, true);
+ break;
+
+ case USB_REQ_SET_ADDRESS:
+ ret = usbf_req_set_address(udc, &crq.ctrlreq);
+ break;
+
+ case USB_REQ_SET_CONFIGURATION:
+ ret = usbf_req_set_configuration(udc, &crq.ctrlreq);
+ break;
+
+ default:
+ goto delegate;
+ }
+
+ return ret;
+
+delegate:
+ return usbf_req_delegate(udc, &crq.ctrlreq);
+}
+
+static int usbf_handle_ep0_data_status(struct usbf_ep *ep0,
+ const char *ep0state_name,
+ enum usbf_ep0state next_ep0state)
+{
+ struct usbf_udc *udc = ep0->udc;
+ int ret;
+
+ ret = usbf_ep_process_queue(ep0);
+ switch (ret) {
+ case -ENOENT:
+ dev_err(udc->dev,
+ "no request available for ep0 %s phase\n",
+ ep0state_name);
+ break;
+ case -EINPROGRESS:
+ /* More data needs to be processed */
+ ret = 0;
+ break;
+ case 0:
+ /* All requests in the queue are processed */
+ udc->ep0state = next_ep0state;
+ break;
+ default:
+ dev_err(udc->dev,
+ "process queue failed for ep0 %s phase (%d)\n",
+ ep0state_name, ret);
+ break;
+ }
+ return ret;
+}
+
+static int usbf_handle_ep0_out_status_start(struct usbf_ep *ep0)
+{
+ struct usbf_udc *udc = ep0->udc;
+ struct usbf_req *req;
+
+ usbf_ep_reg_clrset(ep0, USBF_REG_EP0_CONTROL,
+ USBF_EP0_ONAK,
+ USBF_EP0_PIDCLR);
+ ep0->is_in = 0;
+
+ req = list_first_entry_or_null(&ep0->queue, struct usbf_req, queue);
+ if (!req) {
+ usbf_ep0_fill_req(ep0, &udc->setup_reply, NULL, 0, NULL);
+ usbf_ep0_queue(ep0, &udc->setup_reply, GFP_ATOMIC);
+ } else {
+ if (req->req.length) {
+ dev_err(udc->dev,
+ "queued request length %u for ep0 out status phase\n",
+ req->req.length);
+ }
+ }
+ udc->ep0state = EP0_OUT_STATUS_PHASE;
+ return 0;
+}
+
+static int usbf_handle_ep0_in_status_start(struct usbf_ep *ep0)
+{
+ struct usbf_udc *udc = ep0->udc;
+ struct usbf_req *req;
+ int ret;
+
+ usbf_ep_reg_clrset(ep0, USBF_REG_EP0_CONTROL,
+ USBF_EP0_INAK,
+ USBF_EP0_INAK_EN | USBF_EP0_PIDCLR);
+ ep0->is_in = 1;
+
+ /* Queue request for status if needed */
+ req = list_first_entry_or_null(&ep0->queue, struct usbf_req, queue);
+ if (!req) {
+ if (ep0->delayed_status) {
+ dev_dbg(ep0->udc->dev,
+ "EP0_IN_STATUS_START_PHASE ep0->delayed_status set\n");
+ udc->ep0state = EP0_IN_STATUS_PHASE;
+ return 0;
+ }
+
+ usbf_ep0_fill_req(ep0, &udc->setup_reply, NULL,
+ 0, NULL);
+ usbf_ep0_queue(ep0, &udc->setup_reply,
+ GFP_ATOMIC);
+
+ req = list_first_entry_or_null(&ep0->queue, struct usbf_req, queue);
+ } else {
+ if (req->req.length) {
+ dev_err(udc->dev,
+ "queued request length %u for ep0 in status phase\n",
+ req->req.length);
+ }
+ }
+
+ ret = usbf_ep0_pio_in(ep0, req);
+ if (ret != -EINPROGRESS) {
+ usbf_ep_req_done(ep0, req, ret);
+ udc->ep0state = EP0_IN_STATUS_END_PHASE;
+ return 0;
+ }
+
+ udc->ep0state = EP0_IN_STATUS_PHASE;
+ return 0;
+}
+
+static void usbf_ep0_interrupt(struct usbf_ep *ep0)
+{
+ struct usbf_udc *udc = ep0->udc;
+ u32 sts, prev_sts;
+ int prev_ep0state;
+ int ret;
+
+ ep0->status = usbf_ep_reg_readl(ep0, USBF_REG_EP0_STATUS);
+ usbf_ep_reg_writel(ep0, USBF_REG_EP0_STATUS, ~ep0->status);
+
+ dev_dbg(ep0->udc->dev, "ep0 status=0x%08x, enable=%08x\n, ctrl=0x%08x\n",
+ ep0->status,
+ usbf_ep_reg_readl(ep0, USBF_REG_EP0_INT_ENA),
+ usbf_ep_reg_readl(ep0, USBF_REG_EP0_CONTROL));
+
+ sts = ep0->status & (USBF_EP0_SETUP_INT | USBF_EP0_IN_INT | USBF_EP0_OUT_INT |
+ USBF_EP0_OUT_NULL_INT | USBF_EP0_STG_START_INT |
+ USBF_EP0_STG_END_INT);
+
+ ret = 0;
+ do {
+ dev_dbg(ep0->udc->dev, "udc->ep0state=%d\n", udc->ep0state);
+
+ prev_sts = sts;
+ prev_ep0state = udc->ep0state;
+ switch (udc->ep0state) {
+ case EP0_IDLE:
+ if (!(sts & USBF_EP0_SETUP_INT))
+ break;
+
+ sts &= ~USBF_EP0_SETUP_INT;
+ dev_dbg(ep0->udc->dev, "ep0 handle setup\n");
+ ret = usbf_handle_ep0_setup(ep0);
+ break;
+
+ case EP0_IN_DATA_PHASE:
+ if (!(sts & USBF_EP0_IN_INT))
+ break;
+
+ sts &= ~USBF_EP0_IN_INT;
+ dev_dbg(ep0->udc->dev, "ep0 handle in data phase\n");
+ ret = usbf_handle_ep0_data_status(ep0,
+ "in data", EP0_OUT_STATUS_START_PHASE);
+ break;
+
+ case EP0_OUT_STATUS_START_PHASE:
+ if (!(sts & USBF_EP0_STG_START_INT))
+ break;
+
+ sts &= ~USBF_EP0_STG_START_INT;
+ dev_dbg(ep0->udc->dev, "ep0 handle out status start phase\n");
+ ret = usbf_handle_ep0_out_status_start(ep0);
+ break;
+
+ case EP0_OUT_STATUS_PHASE:
+ if (!(sts & (USBF_EP0_OUT_INT | USBF_EP0_OUT_NULL_INT)))
+ break;
+
+ sts &= ~(USBF_EP0_OUT_INT | USBF_EP0_OUT_NULL_INT);
+ dev_dbg(ep0->udc->dev, "ep0 handle out status phase\n");
+ ret = usbf_handle_ep0_data_status(ep0,
+ "out status",
+ EP0_OUT_STATUS_END_PHASE);
+ break;
+
+ case EP0_OUT_STATUS_END_PHASE:
+ if (!(sts & (USBF_EP0_STG_END_INT | USBF_EP0_SETUP_INT)))
+ break;
+
+ sts &= ~USBF_EP0_STG_END_INT;
+ dev_dbg(ep0->udc->dev, "ep0 handle out status end phase\n");
+ udc->ep0state = EP0_IDLE;
+ break;
+
+ case EP0_OUT_DATA_PHASE:
+ if (!(sts & (USBF_EP0_OUT_INT | USBF_EP0_OUT_NULL_INT)))
+ break;
+
+ sts &= ~(USBF_EP0_OUT_INT | USBF_EP0_OUT_NULL_INT);
+ dev_dbg(ep0->udc->dev, "ep0 handle out data phase\n");
+ ret = usbf_handle_ep0_data_status(ep0,
+ "out data", EP0_IN_STATUS_START_PHASE);
+ break;
+
+ case EP0_IN_STATUS_START_PHASE:
+ if (!(sts & USBF_EP0_STG_START_INT))
+ break;
+
+ sts &= ~USBF_EP0_STG_START_INT;
+ dev_dbg(ep0->udc->dev, "ep0 handle in status start phase\n");
+ ret = usbf_handle_ep0_in_status_start(ep0);
+ break;
+
+ case EP0_IN_STATUS_PHASE:
+ if (!(sts & USBF_EP0_IN_INT))
+ break;
+
+ sts &= ~USBF_EP0_IN_INT;
+ dev_dbg(ep0->udc->dev, "ep0 handle in status phase\n");
+ ret = usbf_handle_ep0_data_status(ep0,
+ "in status", EP0_IN_STATUS_END_PHASE);
+ break;
+
+ case EP0_IN_STATUS_END_PHASE:
+ if (!(sts & (USBF_EP0_STG_END_INT | USBF_EP0_SETUP_INT)))
+ break;
+
+ sts &= ~USBF_EP0_STG_END_INT;
+ dev_dbg(ep0->udc->dev, "ep0 handle in status end\n");
+ udc->ep0state = EP0_IDLE;
+ break;
+
+ default:
+ udc->ep0state = EP0_IDLE;
+ break;
+ }
+
+ if (ret) {
+ dev_dbg(ep0->udc->dev, "ep0 failed (%d)\n", ret);
+ /* Failure -> stall.
+ * This stall state will be automatically cleared when
+ * the IP receives the next SETUP packet
+ */
+ usbf_ep_stall(ep0, true);
+
+ /* Remove anything that was pending */
+ usbf_ep_nuke(ep0, -EPROTO);
+
+ udc->ep0state = EP0_IDLE;
+ break;
+ }
+
+ } while ((prev_ep0state != udc->ep0state) || (prev_sts != sts));
+
+ dev_dbg(ep0->udc->dev, "ep0 done udc->ep0state=%d, status=0x%08x. next=0x%08x\n",
+ udc->ep0state, sts,
+ usbf_ep_reg_readl(ep0, USBF_REG_EP0_STATUS));
+}
+
+static void usbf_epn_process_queue(struct usbf_ep *epn)
+{
+ int ret;
+
+ ret = usbf_ep_process_queue(epn);
+ switch (ret) {
+ case -ENOENT:
+ dev_warn(epn->udc->dev, "ep%u %s, no request available\n",
+ epn->id, epn->is_in ? "in" : "out");
+ break;
+ case -EINPROGRESS:
+ /* More data needs to be processed */
+ ret = 0;
+ break;
+ case 0:
+ /* All requests in the queue are processed */
+ break;
+ default:
+ dev_err(epn->udc->dev, "ep%u %s, process queue failed (%d)\n",
+ epn->id, epn->is_in ? "in" : "out", ret);
+ break;
+ }
+
+ if (ret) {
+ dev_dbg(epn->udc->dev, "ep%u %s failed (%d)\n", epn->id,
+ epn->is_in ? "in" : "out", ret);
+ usbf_ep_stall(epn, true);
+ usbf_ep_nuke(epn, ret);
+ }
+}
+
+static void usbf_epn_interrupt(struct usbf_ep *epn)
+{
+ u32 sts;
+ u32 ena;
+
+ epn->status = usbf_ep_reg_readl(epn, USBF_REG_EPN_STATUS);
+ ena = usbf_ep_reg_readl(epn, USBF_REG_EPN_INT_ENA);
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS, ~(epn->status & ena));
+
+ dev_dbg(epn->udc->dev, "ep%u %s status=0x%08x, enable=%08x\n, ctrl=0x%08x\n",
+ epn->id, epn->is_in ? "in" : "out", epn->status, ena,
+ usbf_ep_reg_readl(epn, USBF_REG_EPN_CONTROL));
+
+ if (epn->disabled) {
+ dev_warn(epn->udc->dev, "ep%u %s, interrupt while disabled\n",
+ epn->id, epn->is_in ? "in" : "out");
+ return;
+ }
+
+ sts = epn->status & ena;
+
+ if (sts & (USBF_EPN_IN_END_INT | USBF_EPN_IN_INT)) {
+ sts &= ~(USBF_EPN_IN_END_INT | USBF_EPN_IN_INT);
+ dev_dbg(epn->udc->dev, "ep%u %s process queue (in interrupts)\n",
+ epn->id, epn->is_in ? "in" : "out");
+ usbf_epn_process_queue(epn);
+ }
+
+ if (sts & (USBF_EPN_OUT_END_INT | USBF_EPN_OUT_INT | USBF_EPN_OUT_NULL_INT)) {
+ sts &= ~(USBF_EPN_OUT_END_INT | USBF_EPN_OUT_INT | USBF_EPN_OUT_NULL_INT);
+ dev_dbg(epn->udc->dev, "ep%u %s process queue (out interrupts)\n",
+ epn->id, epn->is_in ? "in" : "out");
+ usbf_epn_process_queue(epn);
+ }
+
+ dev_dbg(epn->udc->dev, "ep%u %s done status=0x%08x. next=0x%08x\n",
+ epn->id, epn->is_in ? "in" : "out",
+ sts, usbf_ep_reg_readl(epn, USBF_REG_EPN_STATUS));
+}
+
+static void usbf_ep_reset(struct usbf_ep *ep)
+{
+ ep->status = 0;
+ /* Remove anything that was pending */
+ usbf_ep_nuke(ep, -ESHUTDOWN);
+}
+
+static void usbf_reset(struct usbf_udc *udc)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(udc->ep); i++) {
+ if (udc->ep[i].disabled)
+ continue;
+
+ usbf_ep_reset(&udc->ep[i]);
+ }
+
+ if (usbf_reg_readl(udc, USBF_REG_USB_STATUS) & USBF_USB_SPEED_MODE)
+ udc->gadget.speed = USB_SPEED_HIGH;
+ else
+ udc->gadget.speed = USB_SPEED_FULL;
+
+ /* Remote wakeup feature must be disabled on USB bus reset */
+ udc->is_remote_wakeup = false;
+
+ /* Enable endpoint zero */
+ usbf_ep0_enable(&udc->ep[0]);
+
+ if (udc->driver) {
+ /* Signal the reset */
+ spin_unlock(&udc->lock);
+ usb_gadget_udc_reset(&udc->gadget, udc->driver);
+ spin_lock(&udc->lock);
+ }
+}
+
+static void usbf_driver_suspend(struct usbf_udc *udc)
+{
+ if (udc->is_usb_suspended) {
+ dev_dbg(udc->dev, "already suspended\n");
+ return;
+ }
+
+ dev_dbg(udc->dev, "do usb suspend\n");
+ udc->is_usb_suspended = true;
+
+ if (udc->driver && udc->driver->suspend) {
+ spin_unlock(&udc->lock);
+ udc->driver->suspend(&udc->gadget);
+ spin_lock(&udc->lock);
+
+ /* The datasheet tells to set the USB_CONTROL register SUSPEND
+ * bit when the USB bus suspend is detected.
+ * This bit stops the clocks (clocks for EPC, SIE, USBPHY) but
+ * these clocks seems not used only by the USB device. Some
+ * UARTs can be lost ...
+ * So, do not set the USB_CONTROL register SUSPEND bit.
+ */
+ }
+}
+
+static void usbf_driver_resume(struct usbf_udc *udc)
+{
+ if (!udc->is_usb_suspended)
+ return;
+
+ dev_dbg(udc->dev, "do usb resume\n");
+ udc->is_usb_suspended = false;
+
+ if (udc->driver && udc->driver->resume) {
+ spin_unlock(&udc->lock);
+ udc->driver->resume(&udc->gadget);
+ spin_lock(&udc->lock);
+ }
+}
+
+static irqreturn_t usbf_epc_irq(int irq, void *_udc)
+{
+ struct usbf_udc *udc = (struct usbf_udc *)_udc;
+ unsigned long flags;
+ struct usbf_ep *ep;
+ u32 int_sts;
+ u32 int_en;
+ int i;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ int_en = usbf_reg_readl(udc, USBF_REG_USB_INT_ENA);
+ int_sts = usbf_reg_readl(udc, USBF_REG_USB_INT_STA) & int_en;
+ usbf_reg_writel(udc, USBF_REG_USB_INT_STA, ~int_sts);
+
+ dev_dbg(udc->dev, "int_sts=0x%08x\n", int_sts);
+
+ if (int_sts & USBF_USB_RSUM_INT) {
+ dev_dbg(udc->dev, "handle resume\n");
+ usbf_driver_resume(udc);
+ }
+
+ if (int_sts & USBF_USB_USB_RST_INT) {
+ dev_dbg(udc->dev, "handle bus reset\n");
+ usbf_driver_resume(udc);
+ usbf_reset(udc);
+ }
+
+ if (int_sts & USBF_USB_SPEED_MODE_INT) {
+ if (usbf_reg_readl(udc, USBF_REG_USB_STATUS) & USBF_USB_SPEED_MODE)
+ udc->gadget.speed = USB_SPEED_HIGH;
+ else
+ udc->gadget.speed = USB_SPEED_FULL;
+ dev_dbg(udc->dev, "handle speed change (%s)\n",
+ udc->gadget.speed == USB_SPEED_HIGH ? "High" : "Full");
+ }
+
+ if (int_sts & USBF_USB_EPN_INT(0)) {
+ usbf_driver_resume(udc);
+ usbf_ep0_interrupt(&udc->ep[0]);
+ }
+
+ for (i = 1; i < ARRAY_SIZE(udc->ep); i++) {
+ ep = &udc->ep[i];
+
+ if (int_sts & USBF_USB_EPN_INT(i)) {
+ usbf_driver_resume(udc);
+ usbf_epn_interrupt(ep);
+ }
+ }
+
+ if (int_sts & USBF_USB_SPND_INT) {
+ dev_dbg(udc->dev, "handle suspend\n");
+ usbf_driver_suspend(udc);
+ }
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t usbf_ahb_epc_irq(int irq, void *_udc)
+{
+ struct usbf_udc *udc = (struct usbf_udc *)_udc;
+ unsigned long flags;
+ struct usbf_ep *epn;
+ u32 sysbint;
+ void (*ep_action)(struct usbf_ep *epn);
+ int i;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ /* Read and ack interrupts */
+ sysbint = usbf_reg_readl(udc, USBF_REG_AHBBINT);
+ usbf_reg_writel(udc, USBF_REG_AHBBINT, sysbint);
+
+ if ((sysbint & USBF_SYS_VBUS_INT) == USBF_SYS_VBUS_INT) {
+ if (usbf_reg_readl(udc, USBF_REG_EPCTR) & USBF_SYS_VBUS_LEVEL) {
+ dev_dbg(udc->dev, "handle vbus (1)\n");
+ spin_unlock(&udc->lock);
+ usb_udc_vbus_handler(&udc->gadget, true);
+ usb_gadget_set_state(&udc->gadget, USB_STATE_POWERED);
+ spin_lock(&udc->lock);
+ } else {
+ dev_dbg(udc->dev, "handle vbus (0)\n");
+ udc->is_usb_suspended = false;
+ spin_unlock(&udc->lock);
+ usb_udc_vbus_handler(&udc->gadget, false);
+ usb_gadget_set_state(&udc->gadget,
+ USB_STATE_NOTATTACHED);
+ spin_lock(&udc->lock);
+ }
+ }
+
+ for (i = 1; i < ARRAY_SIZE(udc->ep); i++) {
+ if (sysbint & USBF_SYS_DMA_ENDINT_EPN(i)) {
+ epn = &udc->ep[i];
+ dev_dbg(epn->udc->dev,
+ "ep%u handle DMA complete. action=%ps\n",
+ epn->id, epn->bridge_on_dma_end);
+ ep_action = epn->bridge_on_dma_end;
+ if (ep_action) {
+ epn->bridge_on_dma_end = NULL;
+ ep_action(epn);
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int usbf_udc_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
+ unsigned long flags;
+
+ dev_info(udc->dev, "start (driver '%s')\n", driver->driver.name);
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ /* hook up the driver */
+ udc->driver = driver;
+
+ /* Enable VBUS interrupt */
+ usbf_reg_writel(udc, USBF_REG_AHBBINTEN, USBF_SYS_VBUS_INTEN);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static int usbf_udc_stop(struct usb_gadget *gadget)
+{
+ struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ /* Disable VBUS interrupt */
+ usbf_reg_writel(udc, USBF_REG_AHBBINTEN, 0);
+
+ udc->driver = NULL;
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ dev_info(udc->dev, "stopped\n");
+
+ return 0;
+}
+
+static int usbf_get_frame(struct usb_gadget *gadget)
+{
+ struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
+
+ return USBF_USB_GET_FRAME(usbf_reg_readl(udc, USBF_REG_USB_ADDRESS));
+}
+
+static void usbf_attach(struct usbf_udc *udc)
+{
+ /* Enable USB signal to Function PHY
+ * D+ signal Pull-up
+ * Disable endpoint 0, it will be automatically enable when a USB reset
+ * is received.
+ * Disable the other endpoints
+ */
+ usbf_reg_clrset(udc, USBF_REG_USB_CONTROL,
+ USBF_USB_CONNECTB | USBF_USB_DEFAULT | USBF_USB_CONF,
+ USBF_USB_PUE2);
+
+ /* Enable reset and mode change interrupts */
+ usbf_reg_bitset(udc, USBF_REG_USB_INT_ENA,
+ USBF_USB_USB_RST_EN | USBF_USB_SPEED_MODE_EN | USBF_USB_RSUM_EN | USBF_USB_SPND_EN);
+}
+
+static void usbf_detach(struct usbf_udc *udc)
+{
+ int i;
+
+ /* Disable interrupts */
+ usbf_reg_writel(udc, USBF_REG_USB_INT_ENA, 0);
+
+ for (i = 0; i < ARRAY_SIZE(udc->ep); i++) {
+ if (udc->ep[i].disabled)
+ continue;
+
+ usbf_ep_reset(&udc->ep[i]);
+ }
+
+ /* Disable USB signal to Function PHY
+ * Do not Pull-up D+ signal
+ * Disable endpoint 0
+ * Disable the other endpoints
+ */
+ usbf_reg_clrset(udc, USBF_REG_USB_CONTROL,
+ USBF_USB_PUE2 | USBF_USB_DEFAULT | USBF_USB_CONF,
+ USBF_USB_CONNECTB);
+}
+
+static int usbf_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
+ unsigned long flags;
+
+ dev_dbg(udc->dev, "pullup %d\n", is_on);
+
+ spin_lock_irqsave(&udc->lock, flags);
+ if (is_on)
+ usbf_attach(udc);
+ else
+ usbf_detach(udc);
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static int usbf_udc_set_selfpowered(struct usb_gadget *gadget,
+ int is_selfpowered)
+{
+ struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ gadget->is_selfpowered = (is_selfpowered != 0);
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static int usbf_udc_wakeup(struct usb_gadget *gadget)
+{
+ struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ if (!udc->is_remote_wakeup) {
+ dev_dbg(udc->dev, "remote wakeup not allowed\n");
+ ret = -EINVAL;
+ goto end;
+ }
+
+ dev_dbg(udc->dev, "do wakeup\n");
+
+ /* Send the resume signal */
+ usbf_reg_bitset(udc, USBF_REG_USB_CONTROL, USBF_USB_RSUM_IN);
+ usbf_reg_bitclr(udc, USBF_REG_USB_CONTROL, USBF_USB_RSUM_IN);
+
+ ret = 0;
+end:
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return ret;
+}
+
+static struct usb_gadget_ops usbf_gadget_ops = {
+ .get_frame = usbf_get_frame,
+ .pullup = usbf_pullup,
+ .udc_start = usbf_udc_start,
+ .udc_stop = usbf_udc_stop,
+ .set_selfpowered = usbf_udc_set_selfpowered,
+ .wakeup = usbf_udc_wakeup,
+};
+
+static int usbf_epn_check(struct usbf_ep *epn)
+{
+ const char *type_txt;
+ const char *buf_txt;
+ int ret = 0;
+ u32 ctrl;
+
+ ctrl = usbf_ep_reg_readl(epn, USBF_REG_EPN_CONTROL);
+
+ switch (ctrl & USBF_EPN_MODE_MASK) {
+ case USBF_EPN_MODE_BULK:
+ type_txt = "bulk";
+ if (epn->ep.caps.type_control || epn->ep.caps.type_iso ||
+ !epn->ep.caps.type_bulk || epn->ep.caps.type_int) {
+ dev_err(epn->udc->dev,
+ "ep%u caps mismatch, bulk expected\n", epn->id);
+ ret = -EINVAL;
+ }
+ break;
+ case USBF_EPN_MODE_INTR:
+ type_txt = "intr";
+ if (epn->ep.caps.type_control || epn->ep.caps.type_iso ||
+ epn->ep.caps.type_bulk || !epn->ep.caps.type_int) {
+ dev_err(epn->udc->dev,
+ "ep%u caps mismatch, int expected\n", epn->id);
+ ret = -EINVAL;
+ }
+ break;
+ case USBF_EPN_MODE_ISO:
+ type_txt = "iso";
+ if (epn->ep.caps.type_control || !epn->ep.caps.type_iso ||
+ epn->ep.caps.type_bulk || epn->ep.caps.type_int) {
+ dev_err(epn->udc->dev,
+ "ep%u caps mismatch, iso expected\n", epn->id);
+ ret = -EINVAL;
+ }
+ break;
+ default:
+ type_txt = "unknown";
+ dev_err(epn->udc->dev, "ep%u unknown type\n", epn->id);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ctrl & USBF_EPN_BUF_TYPE_DOUBLE) {
+ buf_txt = "double";
+ if (!usbf_ep_info[epn->id].is_double) {
+ dev_err(epn->udc->dev,
+ "ep%u buffer mismatch, double expected\n",
+ epn->id);
+ ret = -EINVAL;
+ }
+ } else {
+ buf_txt = "single";
+ if (usbf_ep_info[epn->id].is_double) {
+ dev_err(epn->udc->dev,
+ "ep%u buffer mismatch, single expected\n",
+ epn->id);
+ ret = -EINVAL;
+ }
+ }
+
+ dev_dbg(epn->udc->dev, "ep%u (%s) %s, %s buffer %u, checked %s\n",
+ epn->id, epn->ep.name, type_txt, buf_txt,
+ epn->ep.maxpacket_limit, ret ? "failed" : "ok");
+
+ return ret;
+}
+
+static int usbf_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct usbf_udc *udc;
+ struct usbf_ep *ep;
+ unsigned int i;
+ int irq;
+ int ret;
+
+ udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
+ if (!udc)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, udc);
+
+ udc->dev = dev;
+ spin_lock_init(&udc->lock);
+
+ udc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(udc->regs))
+ return PTR_ERR(udc->regs);
+
+ devm_pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0)
+ return ret;
+
+ dev_info(dev, "USBF version: %08x\n",
+ usbf_reg_readl(udc, USBF_REG_USBSSVER));
+
+ /* Resetting the PLL is handled via the clock driver as it has common
+ * registers with USB Host
+ */
+ usbf_reg_bitclr(udc, USBF_REG_EPCTR, USBF_SYS_EPC_RST);
+
+ /* modify in register gadget process */
+ udc->gadget.speed = USB_SPEED_FULL;
+ udc->gadget.max_speed = USB_SPEED_HIGH;
+ udc->gadget.ops = &usbf_gadget_ops;
+
+ udc->gadget.name = dev->driver->name;
+ udc->gadget.dev.parent = dev;
+ udc->gadget.ep0 = &udc->ep[0].ep;
+
+ /* The hardware DMA controller needs dma addresses aligned on 32bit.
+ * A fallback to pio is done if DMA addresses are not aligned.
+ */
+ udc->gadget.quirk_avoids_skb_reserve = 1;
+
+ INIT_LIST_HEAD(&udc->gadget.ep_list);
+ /* we have a canned request structure to allow sending packets as reply
+ * to get_status requests
+ */
+ INIT_LIST_HEAD(&udc->setup_reply.queue);
+
+ for (i = 0; i < ARRAY_SIZE(udc->ep); i++) {
+ ep = &udc->ep[i];
+
+ if (!(usbf_reg_readl(udc, USBF_REG_USBSSCONF) &
+ USBF_SYS_EP_AVAILABLE(i))) {
+ continue;
+ }
+
+ INIT_LIST_HEAD(&ep->queue);
+
+ ep->id = i;
+ ep->disabled = 1;
+ ep->udc = udc;
+ ep->ep.ops = &usbf_ep_ops;
+ ep->ep.name = usbf_ep_info[i].name;
+ ep->ep.caps = usbf_ep_info[i].caps;
+ usb_ep_set_maxpacket_limit(&ep->ep,
+ usbf_ep_info[i].maxpacket_limit);
+
+ if (ep->id == 0) {
+ ep->regs = ep->udc->regs + USBF_BASE_EP0;
+ } else {
+ ep->regs = ep->udc->regs + USBF_BASE_EPN(ep->id - 1);
+ ret = usbf_epn_check(ep);
+ if (ret)
+ return ret;
+ if (usbf_reg_readl(udc, USBF_REG_USBSSCONF) &
+ USBF_SYS_DMA_AVAILABLE(i)) {
+ ep->dma_regs = ep->udc->regs +
+ USBF_BASE_DMA_EPN(ep->id - 1);
+ }
+ list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
+ }
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+ ret = devm_request_irq(dev, irq, usbf_epc_irq, 0, "usbf-epc", udc);
+ if (ret) {
+ dev_err(dev, "cannot request irq %d err %d\n", irq, ret);
+ return ret;
+ }
+
+ irq = platform_get_irq(pdev, 1);
+ if (irq < 0)
+ return irq;
+ ret = devm_request_irq(dev, irq, usbf_ahb_epc_irq, 0, "usbf-ahb-epc", udc);
+ if (ret) {
+ dev_err(dev, "cannot request irq %d err %d\n", irq, ret);
+ return ret;
+ }
+
+ usbf_reg_bitset(udc, USBF_REG_AHBMCTR, USBF_SYS_WBURST_TYPE);
+
+ usbf_reg_bitset(udc, USBF_REG_USB_CONTROL,
+ USBF_USB_INT_SEL | USBF_USB_SOF_RCV | USBF_USB_SOF_CLK_MODE);
+
+ ret = usb_add_gadget_udc(dev, &udc->gadget);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int usbf_remove(struct platform_device *pdev)
+{
+ struct usbf_udc *udc = platform_get_drvdata(pdev);
+
+ usb_del_gadget_udc(&udc->gadget);
+
+ pm_runtime_put(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id usbf_match[] = {
+ { .compatible = "renesas,rzn1-usbf" },
+ {} /* sentinel */
+};
+MODULE_DEVICE_TABLE(of, usbf_match);
+
+static struct platform_driver udc_driver = {
+ .driver = {
+ .name = "usbf_renesas",
+ .owner = THIS_MODULE,
+ .of_match_table = usbf_match,
+ },
+ .probe = usbf_probe,
+ .remove = usbf_remove,
+};
+
+module_platform_driver(udc_driver);
+
+MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
+MODULE_DESCRIPTION("Renesas R-Car Gen3 & RZ/N1 USB Function driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/udc/rzv2m_usb3drd.c b/drivers/usb/gadget/udc/rzv2m_usb3drd.c
new file mode 100644
index 000000000000..3c8bbf843038
--- /dev/null
+++ b/drivers/usb/gadget/udc/rzv2m_usb3drd.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/V2M USB3DRD driver
+ *
+ * Copyright (C) 2022 Renesas Electronics Corporation
+ */
+
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/usb/rzv2m_usb3drd.h>
+
+#define USB_PERI_DRD_CON 0x000
+
+#define USB_PERI_DRD_CON_PERI_RST BIT(31)
+#define USB_PERI_DRD_CON_HOST_RST BIT(30)
+#define USB_PERI_DRD_CON_PERI_CON BIT(24)
+
+static void rzv2m_usb3drd_set_bit(struct rzv2m_usb3drd *usb3, u32 bits,
+ u32 offs)
+{
+ u32 val = readl(usb3->reg + offs);
+
+ val |= bits;
+ writel(val, usb3->reg + offs);
+}
+
+static void rzv2m_usb3drd_clear_bit(struct rzv2m_usb3drd *usb3, u32 bits,
+ u32 offs)
+{
+ u32 val = readl(usb3->reg + offs);
+
+ val &= ~bits;
+ writel(val, usb3->reg + offs);
+}
+
+void rzv2m_usb3drd_reset(struct device *dev, bool host)
+{
+ struct rzv2m_usb3drd *usb3 = dev_get_drvdata(dev);
+
+ if (host) {
+ rzv2m_usb3drd_clear_bit(usb3, USB_PERI_DRD_CON_PERI_CON,
+ USB_PERI_DRD_CON);
+ rzv2m_usb3drd_clear_bit(usb3, USB_PERI_DRD_CON_HOST_RST,
+ USB_PERI_DRD_CON);
+ rzv2m_usb3drd_set_bit(usb3, USB_PERI_DRD_CON_PERI_RST,
+ USB_PERI_DRD_CON);
+ } else {
+ rzv2m_usb3drd_set_bit(usb3, USB_PERI_DRD_CON_PERI_CON,
+ USB_PERI_DRD_CON);
+ rzv2m_usb3drd_set_bit(usb3, USB_PERI_DRD_CON_HOST_RST,
+ USB_PERI_DRD_CON);
+ rzv2m_usb3drd_clear_bit(usb3, USB_PERI_DRD_CON_PERI_RST,
+ USB_PERI_DRD_CON);
+ }
+}
+EXPORT_SYMBOL_GPL(rzv2m_usb3drd_reset);
+
+static int rzv2m_usb3drd_remove(struct platform_device *pdev)
+{
+ struct rzv2m_usb3drd *usb3 = platform_get_drvdata(pdev);
+
+ of_platform_depopulate(usb3->dev);
+ pm_runtime_put(usb3->dev);
+ pm_runtime_disable(&pdev->dev);
+ reset_control_assert(usb3->drd_rstc);
+
+ return 0;
+}
+
+static int rzv2m_usb3drd_probe(struct platform_device *pdev)
+{
+ struct rzv2m_usb3drd *usb3;
+ int ret;
+
+ usb3 = devm_kzalloc(&pdev->dev, sizeof(*usb3), GFP_KERNEL);
+ if (!usb3)
+ return -ENOMEM;
+
+ usb3->dev = &pdev->dev;
+
+ usb3->drd_irq = platform_get_irq_byname(pdev, "drd");
+ if (usb3->drd_irq < 0)
+ return usb3->drd_irq;
+
+ usb3->reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(usb3->reg))
+ return PTR_ERR(usb3->reg);
+
+ platform_set_drvdata(pdev, usb3);
+
+ usb3->drd_rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(usb3->drd_rstc))
+ return dev_err_probe(&pdev->dev, PTR_ERR(usb3->drd_rstc),
+ "failed to get drd reset");
+
+ reset_control_deassert(usb3->drd_rstc);
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_resume_and_get(usb3->dev);
+ if (ret)
+ goto err_rst;
+
+ ret = of_platform_populate(usb3->dev->of_node, NULL, NULL, usb3->dev);
+ if (ret)
+ goto err_pm;
+
+ return 0;
+
+err_pm:
+ pm_runtime_put(usb3->dev);
+
+err_rst:
+ pm_runtime_disable(&pdev->dev);
+ reset_control_assert(usb3->drd_rstc);
+ return ret;
+}
+
+static const struct of_device_id rzv2m_usb3drd_of_match[] = {
+ { .compatible = "renesas,rzv2m-usb3drd", },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rzv2m_usb3drd_of_match);
+
+static struct platform_driver rzv2m_usb3drd_driver = {
+ .driver = {
+ .name = "rzv2m-usb3drd",
+ .of_match_table = of_match_ptr(rzv2m_usb3drd_of_match),
+ },
+ .probe = rzv2m_usb3drd_probe,
+ .remove = rzv2m_usb3drd_remove,
+};
+module_platform_driver(rzv2m_usb3drd_driver);
+
+MODULE_AUTHOR("Biju Das <biju.das.jz@bp.renesas.com>");
+MODULE_DESCRIPTION("Renesas RZ/V2M USB3DRD driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:rzv2m_usb3drd");
diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
index 76919d7570d2..2b71b33725f1 100644
--- a/drivers/usb/gadget/udc/tegra-xudc.c
+++ b/drivers/usb/gadget/udc/tegra-xudc.c
@@ -796,21 +796,16 @@ static int tegra_xudc_get_phy_index(struct tegra_xudc *xudc,
return -1;
}
-static int tegra_xudc_vbus_notify(struct notifier_block *nb,
- unsigned long action, void *data)
+static void tegra_xudc_update_data_role(struct tegra_xudc *xudc,
+ struct usb_phy *usbphy)
{
- struct tegra_xudc *xudc = container_of(nb, struct tegra_xudc,
- vbus_nb);
- struct usb_phy *usbphy = (struct usb_phy *)data;
int phy_index;
- dev_dbg(xudc->dev, "%s(): event is %d\n", __func__, usbphy->last_event);
-
if ((xudc->device_mode && usbphy->last_event == USB_EVENT_VBUS) ||
(!xudc->device_mode && usbphy->last_event != USB_EVENT_VBUS)) {
dev_dbg(xudc->dev, "Same role(%d) received. Ignore",
xudc->device_mode);
- return NOTIFY_OK;
+ return;
}
xudc->device_mode = (usbphy->last_event == USB_EVENT_VBUS) ? true :
@@ -826,6 +821,18 @@ static int tegra_xudc_vbus_notify(struct notifier_block *nb,
xudc->curr_usbphy = usbphy;
schedule_work(&xudc->usb_role_sw_work);
}
+}
+
+static int tegra_xudc_vbus_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct tegra_xudc *xudc = container_of(nb, struct tegra_xudc,
+ vbus_nb);
+ struct usb_phy *usbphy = (struct usb_phy *)data;
+
+ dev_dbg(xudc->dev, "%s(): event is %d\n", __func__, usbphy->last_event);
+
+ tegra_xudc_update_data_role(xudc, usbphy);
return NOTIFY_OK;
}
@@ -3521,7 +3528,7 @@ static int tegra_xudc_phy_get(struct tegra_xudc *xudc)
/* Get usb-phy, if utmi phy is available */
xudc->usbphy[i] = devm_usb_get_phy_by_node(xudc->dev,
xudc->utmi_phy[i]->dev.of_node,
- &xudc->vbus_nb);
+ NULL);
if (IS_ERR(xudc->usbphy[i])) {
err = PTR_ERR(xudc->usbphy[i]);
dev_err_probe(xudc->dev, err,
@@ -3660,6 +3667,19 @@ static struct tegra_xudc_soc tegra194_xudc_soc_data = {
.has_ipfs = false,
};
+static struct tegra_xudc_soc tegra234_xudc_soc_data = {
+ .clock_names = tegra186_xudc_clock_names,
+ .num_clks = ARRAY_SIZE(tegra186_xudc_clock_names),
+ .num_phys = 4,
+ .u1_enable = true,
+ .u2_enable = true,
+ .lpm_enable = true,
+ .invalid_seq_num = false,
+ .pls_quirk = false,
+ .port_reset_quirk = false,
+ .has_ipfs = false,
+};
+
static const struct of_device_id tegra_xudc_of_match[] = {
{
.compatible = "nvidia,tegra210-xudc",
@@ -3673,6 +3693,10 @@ static const struct of_device_id tegra_xudc_of_match[] = {
.compatible = "nvidia,tegra194-xudc",
.data = &tegra194_xudc_soc_data
},
+ {
+ .compatible = "nvidia,tegra234-xudc",
+ .data = &tegra234_xudc_soc_data
+ },
{ }
};
MODULE_DEVICE_TABLE(of, tegra_xudc_of_match);
@@ -3856,6 +3880,14 @@ static int tegra_xudc_probe(struct platform_device *pdev)
goto free_eps;
}
+ for (i = 0; i < xudc->soc->num_phys; i++) {
+ if (!xudc->usbphy[i])
+ continue;
+
+ usb_register_notifier(xudc->usbphy[i], &xudc->vbus_nb);
+ tegra_xudc_update_data_role(xudc, xudc->usbphy[i]);
+ }
+
return 0;
free_eps:
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index a97923897c8e..eacb603ad1b2 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -53,7 +53,6 @@ config USB_XHCI_PCI_RENESAS
config USB_XHCI_PLATFORM
tristate "Generic xHCI driver for a platform device"
- select USB_XHCI_RCAR if ARCH_RENESAS
help
Adds an xHCI host driver for a generic platform device, which
provides a memory space and an irq.
@@ -91,10 +90,20 @@ config USB_XHCI_RCAR
tristate "xHCI support for Renesas R-Car SoCs"
depends on USB_XHCI_PLATFORM
depends on ARCH_RENESAS || COMPILE_TEST
+ default ARCH_RENESAS
help
Say 'Y' to enable the support for the xHCI host controller
found in Renesas R-Car ARM SoCs.
+config USB_XHCI_RZV2M
+ bool "xHCI support for Renesas RZ/V2M SoC"
+ depends on USB_XHCI_RCAR
+ depends on ARCH_R9A09G011 || COMPILE_TEST
+ depends on USB_RZV2M_USB3DRD=y || (USB_RZV2M_USB3DRD=USB_XHCI_RCAR)
+ help
+ Say 'Y' to enable the support for the xHCI host controller
+ found in Renesas RZ/V2M SoC.
+
config USB_XHCI_TEGRA
tristate "xHCI support for NVIDIA Tegra SoCs"
depends on PHY_TEGRA_XUSB
@@ -535,17 +544,6 @@ config USB_OHCI_HCD_SSB
If unsure, say N.
-config USB_OHCI_SH
- bool "OHCI support for SuperH USB controller (DEPRECATED)"
- depends on SUPERH || COMPILE_TEST
- select USB_OHCI_HCD_PLATFORM
- help
- This option is deprecated now and the driver was removed, use
- USB_OHCI_HCD_PLATFORM instead.
-
- Enables support for the on-chip OHCI controller on the SuperH.
- If you use the PCI OHCI controller, this option is not necessary.
-
config USB_OHCI_EXYNOS
tristate "OHCI support for Samsung S5P/Exynos SoC Series"
depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 6d8ee264c9b2..5a13712f367d 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -25,14 +25,13 @@ xhci-plat-hcd-y := xhci-plat.o
ifneq ($(CONFIG_USB_XHCI_MVEBU), )
xhci-plat-hcd-y += xhci-mvebu.o
endif
-ifneq ($(CONFIG_USB_XHCI_RCAR), )
- xhci-plat-hcd-y += xhci-rcar.o
-endif
-
ifneq ($(CONFIG_DEBUG_FS),)
xhci-hcd-y += xhci-debugfs.o
endif
+xhci-rcar-hcd-y += xhci-rcar.o
+xhci-rcar-hcd-$(CONFIG_USB_XHCI_RZV2M) += xhci-rzv2m.o
+
obj-$(CONFIG_USB_PCI) += pci-quirks.o
obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o
@@ -72,6 +71,7 @@ obj-$(CONFIG_USB_XHCI_PCI) += xhci-pci.o
obj-$(CONFIG_USB_XHCI_PCI_RENESAS) += xhci-pci-renesas.o
obj-$(CONFIG_USB_XHCI_PLATFORM) += xhci-plat-hcd.o
obj-$(CONFIG_USB_XHCI_HISTB) += xhci-histb.o
+obj-$(CONFIG_USB_XHCI_RCAR) += xhci-rcar-hcd.o
obj-$(CONFIG_USB_XHCI_MTK) += xhci-mtk-hcd.o
obj-$(CONFIG_USB_XHCI_TEGRA) += xhci-tegra.o
obj-$(CONFIG_USB_SL811_HCD) += sl811-hcd.o
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 38d06e5abfbb..d74fa5ba845b 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -712,7 +712,7 @@ static struct platform_driver ehci_fsl_driver = {
.remove = fsl_ehci_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
- .name = "fsl-ehci",
+ .name = DRV_NAME,
.pm = EHCI_FSL_PM_OPS,
},
};
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index e5df17522892..46c6a152b865 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -112,8 +112,7 @@ static struct platform_device *fsl_usb2_device_register(
goto error;
}
- pdev->dev.of_node = ofdev->dev.of_node;
- pdev->dev.of_node_reused = true;
+ device_set_of_node_from_dev(&pdev->dev, &ofdev->dev);
retval = platform_device_add(pdev);
if (retval)
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
index 4f564d71bb0b..49ae01487af4 100644
--- a/drivers/usb/host/isp116x-hcd.c
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -1205,7 +1205,7 @@ static void create_debug_file(struct isp116x *isp116x)
static void remove_debug_file(struct isp116x *isp116x)
{
- debugfs_remove(debugfs_lookup(hcd_name, usb_debug_root));
+ debugfs_lookup_and_remove(hcd_name, usb_debug_root);
}
#else
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 0e14d1d07709..b0da143ef4be 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -2170,7 +2170,7 @@ static void create_debug_file(struct isp1362_hcd *isp1362_hcd)
static void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
{
- debugfs_remove(debugfs_lookup("isp1362", usb_debug_root));
+ debugfs_lookup_and_remove("isp1362", usb_debug_root);
}
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
index 352e3ac2b377..28d1524ee2fa 100644
--- a/drivers/usb/host/max3421-hcd.c
+++ b/drivers/usb/host/max3421-hcd.c
@@ -72,12 +72,6 @@
#define USB_MAX_FRAME_NUMBER 0x7ff
#define USB_MAX_RETRIES 3 /* # of retries before error is reported */
-/*
- * Max. # of times we're willing to retransmit a request immediately in
- * resposne to a NAK. Afterwards, we fall back on trying once a frame.
- */
-#define NAK_MAX_FAST_RETRANSMITS 2
-
#define POWER_BUDGET 500 /* in mA; use 8 for low-power port testing */
/* Port-change mask: */
@@ -924,11 +918,8 @@ max3421_handle_error(struct usb_hcd *hcd, u8 hrsl)
* Device wasn't ready for data or has no data
* available: retry the packet again.
*/
- if (max3421_ep->naks++ < NAK_MAX_FAST_RETRANSMITS) {
- max3421_next_transfer(hcd, 1);
- switch_sndfifo = 0;
- } else
- max3421_slow_retransmit(hcd);
+ max3421_next_transfer(hcd, 1);
+ switch_sndfifo = 0;
break;
}
if (switch_sndfifo)
@@ -1436,7 +1427,7 @@ max3421_spi_thread(void *dev_id)
* use spi_wr_buf().
*/
for (i = 0; i < ARRAY_SIZE(max3421_hcd->iopins); ++i) {
- u8 val = spi_rd8(hcd, MAX3421_REG_IOPINS1);
+ u8 val = spi_rd8(hcd, MAX3421_REG_IOPINS1 + i);
val = ((val & 0xf0) |
(max3421_hcd->iopins[i] & 0x0f));
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index d206bd95c7bb..b8b90eec9107 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -1501,7 +1501,7 @@ static void create_debug_file(struct sl811 *sl811)
static void remove_debug_file(struct sl811 *sl811)
{
- debugfs_remove(debugfs_lookup("sl811h", usb_debug_root));
+ debugfs_lookup_and_remove("sl811h", usb_debug_root);
}
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index c22b51af83fc..7cdc2fa7c28f 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -536,8 +536,8 @@ static void release_uhci(struct uhci_hcd *uhci)
uhci->is_initialized = 0;
spin_unlock_irq(&uhci->lock);
- debugfs_remove(debugfs_lookup(uhci_to_hcd(uhci)->self.bus_name,
- uhci_debugfs_root));
+ debugfs_lookup_and_remove(uhci_to_hcd(uhci)->self.bus_name,
+ uhci_debugfs_root);
for (i = 0; i < UHCI_NUM_SKELQH; i++)
uhci_free_qh(uhci, uhci->skelqh[i]);
@@ -700,7 +700,7 @@ err_alloc_frame_cpu:
uhci->frame, uhci->frame_dma_handle);
err_alloc_frame:
- debugfs_remove(debugfs_lookup(hcd->self.bus_name, uhci_debugfs_root));
+ debugfs_lookup_and_remove(hcd->self.bus_name, uhci_debugfs_root);
return retval;
}
diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
index dc832ddf7033..0bc7fe11f749 100644
--- a/drivers/usb/host/xhci-debugfs.c
+++ b/drivers/usb/host/xhci-debugfs.c
@@ -692,7 +692,7 @@ void xhci_debugfs_init(struct xhci_hcd *xhci)
"command-ring",
xhci->debugfs_root);
- xhci_debugfs_create_ring_dir(xhci, &xhci->event_ring,
+ xhci_debugfs_create_ring_dir(xhci, &xhci->interrupter->event_ring,
"event-ring",
xhci->debugfs_root);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 94c94db3faf6..0054d02239e2 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -578,13 +578,16 @@ void xhci_ring_device(struct xhci_hcd *xhci, int slot_id)
return;
}
-static void xhci_disable_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
- u16 wIndex, __le32 __iomem *addr, u32 port_status)
+static void xhci_disable_port(struct xhci_hcd *xhci, struct xhci_port *port)
{
+ struct usb_hcd *hcd;
+ u32 portsc;
+
+ hcd = port->rhub->hcd;
+
/* Don't allow the USB core to disable SuperSpeed ports. */
if (hcd->speed >= HCD_USB3) {
- xhci_dbg(xhci, "Ignoring request to disable "
- "SuperSpeed port.\n");
+ xhci_dbg(xhci, "Ignoring request to disable SuperSpeed port.\n");
return;
}
@@ -594,11 +597,15 @@ static void xhci_disable_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
return;
}
+ portsc = readl(port->addr);
+ portsc = xhci_port_state_to_neutral(portsc);
+
/* Write 1 to disable the port */
- writel(port_status | PORT_PE, addr);
- port_status = readl(addr);
+ writel(portsc | PORT_PE, port->addr);
+
+ portsc = readl(port->addr);
xhci_dbg(xhci, "disable port %d-%d, portsc: 0x%x\n",
- hcd->self.busnum, wIndex + 1, port_status);
+ hcd->self.busnum, port->hcd_portnum + 1, portsc);
}
static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
@@ -666,20 +673,18 @@ struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd)
* It will release and re-aquire the lock while calling ACPI
* method.
*/
-static void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd,
- u16 index, bool on, unsigned long *flags)
+static void xhci_set_port_power(struct xhci_hcd *xhci, struct xhci_port *port,
+ bool on, unsigned long *flags)
__must_hold(&xhci->lock)
{
- struct xhci_hub *rhub;
- struct xhci_port *port;
+ struct usb_hcd *hcd;
u32 temp;
- rhub = xhci_get_rhub(hcd);
- port = rhub->ports[index];
+ hcd = port->rhub->hcd;
temp = readl(port->addr);
xhci_dbg(xhci, "set port power %d-%d %s, portsc: 0x%x\n",
- hcd->self.busnum, index + 1, on ? "ON" : "OFF", temp);
+ hcd->self.busnum, port->hcd_portnum + 1, on ? "ON" : "OFF", temp);
temp = xhci_port_state_to_neutral(temp);
@@ -694,10 +699,10 @@ static void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd,
spin_unlock_irqrestore(&xhci->lock, *flags);
temp = usb_acpi_power_manageable(hcd->self.root_hub,
- index);
+ port->hcd_portnum);
if (temp)
usb_acpi_set_power_state(hcd->self.root_hub,
- index, on);
+ port->hcd_portnum, on);
spin_lock_irqsave(&xhci->lock, *flags);
}
@@ -721,7 +726,6 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci,
u16 test_mode, u16 wIndex, unsigned long *flags)
__must_hold(&xhci->lock)
{
- struct usb_hcd *usb3_hcd = xhci_get_usb3_hcd(xhci);
int i, retval;
/* Disable all Device Slots */
@@ -742,10 +746,10 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci,
xhci_dbg(xhci, "Disable all port (PP = 0)\n");
/* Power off USB3 ports*/
for (i = 0; i < xhci->usb3_rhub.num_ports; i++)
- xhci_set_port_power(xhci, usb3_hcd, i, false, flags);
+ xhci_set_port_power(xhci, xhci->usb3_rhub.ports[i], false, flags);
/* Power off USB2 ports*/
for (i = 0; i < xhci->usb2_rhub.num_ports; i++)
- xhci_set_port_power(xhci, xhci->main_hcd, i, false, flags);
+ xhci_set_port_power(xhci, xhci->usb2_rhub.ports[i], false, flags);
/* Stop the controller */
xhci_dbg(xhci, "Stop controller\n");
retval = xhci_halt(xhci);
@@ -920,7 +924,7 @@ static void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status,
}
static int xhci_handle_usb2_port_link_resume(struct xhci_port *port,
- u32 *status, u32 portsc,
+ u32 portsc,
unsigned long *flags)
{
struct xhci_bus_state *bus_state;
@@ -935,11 +939,10 @@ static int xhci_handle_usb2_port_link_resume(struct xhci_port *port,
wIndex = port->hcd_portnum;
if ((portsc & PORT_RESET) || !(portsc & PORT_PE)) {
- *status = 0xffffffff;
return -EINVAL;
}
/* did port event handler already start resume timing? */
- if (!bus_state->resume_done[wIndex]) {
+ if (!port->resume_timestamp) {
/* If not, maybe we are in a host initated resume? */
if (test_bit(wIndex, &bus_state->resuming_ports)) {
/* Host initated resume doesn't time the resume
@@ -956,28 +959,29 @@ static int xhci_handle_usb2_port_link_resume(struct xhci_port *port,
msecs_to_jiffies(USB_RESUME_TIMEOUT);
set_bit(wIndex, &bus_state->resuming_ports);
- bus_state->resume_done[wIndex] = timeout;
+ port->resume_timestamp = timeout;
mod_timer(&hcd->rh_timer, timeout);
usb_hcd_start_port_resume(&hcd->self, wIndex);
}
/* Has resume been signalled for USB_RESUME_TIME yet? */
- } else if (time_after_eq(jiffies, bus_state->resume_done[wIndex])) {
+ } else if (time_after_eq(jiffies, port->resume_timestamp)) {
int time_left;
xhci_dbg(xhci, "resume USB2 port %d-%d\n",
hcd->self.busnum, wIndex + 1);
- bus_state->resume_done[wIndex] = 0;
+ port->resume_timestamp = 0;
clear_bit(wIndex, &bus_state->resuming_ports);
- set_bit(wIndex, &bus_state->rexit_ports);
+ reinit_completion(&port->rexit_done);
+ port->rexit_active = true;
xhci_test_and_clear_bit(xhci, port, PORT_PLC);
xhci_set_link_state(xhci, port, XDEV_U0);
spin_unlock_irqrestore(&xhci->lock, *flags);
time_left = wait_for_completion_timeout(
- &bus_state->rexit_done[wIndex],
+ &port->rexit_done,
msecs_to_jiffies(XHCI_MAX_REXIT_TIMEOUT_MS));
spin_lock_irqsave(&xhci->lock, *flags);
@@ -986,7 +990,6 @@ static int xhci_handle_usb2_port_link_resume(struct xhci_port *port,
wIndex + 1);
if (!slot_id) {
xhci_dbg(xhci, "slot_id is zero\n");
- *status = 0xffffffff;
return -ENODEV;
}
xhci_ring_device(xhci, slot_id);
@@ -995,22 +998,19 @@ static int xhci_handle_usb2_port_link_resume(struct xhci_port *port,
xhci_warn(xhci, "Port resume timed out, port %d-%d: 0x%x\n",
hcd->self.busnum, wIndex + 1, port_status);
- *status |= USB_PORT_STAT_SUSPEND;
- clear_bit(wIndex, &bus_state->rexit_ports);
+ /*
+ * keep rexit_active set if U0 transition failed so we
+ * know to report PORT_STAT_SUSPEND status back to
+ * usbcore. It will be cleared later once the port is
+ * out of RESUME/U3 state
+ */
}
usb_hcd_end_port_resume(&hcd->self, wIndex);
bus_state->port_c_suspend |= 1 << wIndex;
bus_state->suspended_ports &= ~(1 << wIndex);
- } else {
- /*
- * The resume has been signaling for less than
- * USB_RESUME_TIME. Report the port status as SUSPEND,
- * let the usbcore check port status again and clear
- * resume signaling later.
- */
- *status |= USB_PORT_STAT_SUSPEND;
}
+
return 0;
}
@@ -1087,7 +1087,7 @@ static void xhci_get_usb2_port_status(struct xhci_port *port, u32 *status,
struct xhci_bus_state *bus_state;
u32 link_state;
u32 portnum;
- int ret;
+ int err;
bus_state = &port->rhub->bus_state;
link_state = portsc & PORT_PLS_MASK;
@@ -1103,22 +1103,34 @@ static void xhci_get_usb2_port_status(struct xhci_port *port, u32 *status,
if (link_state == XDEV_U2)
*status |= USB_PORT_STAT_L1;
if (link_state == XDEV_U0) {
- if (bus_state->resume_done[portnum])
- usb_hcd_end_port_resume(&port->rhub->hcd->self,
- portnum);
- bus_state->resume_done[portnum] = 0;
- clear_bit(portnum, &bus_state->resuming_ports);
if (bus_state->suspended_ports & (1 << portnum)) {
bus_state->suspended_ports &= ~(1 << portnum);
bus_state->port_c_suspend |= 1 << portnum;
}
}
if (link_state == XDEV_RESUME) {
- ret = xhci_handle_usb2_port_link_resume(port, status,
- portsc, flags);
- if (ret)
- return;
+ err = xhci_handle_usb2_port_link_resume(port, portsc,
+ flags);
+ if (err < 0)
+ *status = 0xffffffff;
+ else if (port->resume_timestamp || port->rexit_active)
+ *status |= USB_PORT_STAT_SUSPEND;
+ }
+ }
+
+ /*
+ * Clear usb2 resume signalling variables if port is no longer suspended
+ * or resuming. Port either resumed to U0/U1/U2, disconnected, or in a
+ * error state. Resume related variables should be cleared in all those cases.
+ */
+ if (link_state != XDEV_U3 && link_state != XDEV_RESUME) {
+ if (port->resume_timestamp ||
+ test_bit(portnum, &bus_state->resuming_ports)) {
+ port->resume_timestamp = 0;
+ clear_bit(portnum, &bus_state->resuming_ports);
+ usb_hcd_end_port_resume(&port->rhub->hcd->self, portnum);
}
+ port->rexit_active = 0;
}
}
@@ -1174,18 +1186,6 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
else
xhci_get_usb2_port_status(port, &status, raw_port_status,
flags);
- /*
- * Clear stale usb2 resume signalling variables in case port changed
- * state during resume signalling. For example on error
- */
- if ((bus_state->resume_done[wIndex] ||
- test_bit(wIndex, &bus_state->resuming_ports)) &&
- (raw_port_status & PORT_PLS_MASK) != XDEV_U3 &&
- (raw_port_status & PORT_PLS_MASK) != XDEV_RESUME) {
- bus_state->resume_done[wIndex] = 0;
- clear_bit(wIndex, &bus_state->resuming_ports);
- usb_hcd_end_port_resume(&hcd->self, wIndex);
- }
if (bus_state->port_c_suspend & (1 << wIndex))
status |= USB_PORT_STAT_C_SUSPEND << 16;
@@ -1209,11 +1209,14 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 test_mode = 0;
struct xhci_hub *rhub;
struct xhci_port **ports;
+ struct xhci_port *port;
+ int portnum1;
rhub = xhci_get_rhub(hcd);
ports = rhub->ports;
max_ports = rhub->num_ports;
bus_state = &rhub->bus_state;
+ portnum1 = wIndex & 0xff;
spin_lock_irqsave(&xhci->lock, flags);
switch (typeReq) {
@@ -1247,10 +1250,12 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
spin_unlock_irqrestore(&xhci->lock, flags);
return retval;
case GetPortStatus:
- if (!wIndex || wIndex > max_ports)
+ if (!portnum1 || portnum1 > max_ports)
goto error;
+
wIndex--;
- temp = readl(ports[wIndex]->addr);
+ port = ports[portnum1 - 1];
+ temp = readl(port->addr);
if (temp == ~(u32)0) {
xhci_hc_died(xhci);
retval = -ENODEV;
@@ -1263,7 +1268,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
goto error;
xhci_dbg(xhci, "Get port status %d-%d read: 0x%x, return 0x%x",
- hcd->self.busnum, wIndex + 1, temp, status);
+ hcd->self.busnum, portnum1, temp, status);
put_unaligned(cpu_to_le32(status), (__le32 *) buf);
/* if USB 3.1 extended port status return additional 4 bytes */
@@ -1275,7 +1280,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
retval = -EINVAL;
break;
}
- port_li = readl(ports[wIndex]->addr + PORTLI);
+ port_li = readl(port->addr + PORTLI);
status = xhci_get_ext_port_status(temp, port_li);
put_unaligned_le32(status, &buf[4]);
}
@@ -1289,11 +1294,14 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
test_mode = (wIndex & 0xff00) >> 8;
/* The MSB of wIndex is the U1/U2 timeout */
timeout = (wIndex & 0xff00) >> 8;
+
wIndex &= 0xff;
- if (!wIndex || wIndex > max_ports)
+ if (!portnum1 || portnum1 > max_ports)
goto error;
+
+ port = ports[portnum1 - 1];
wIndex--;
- temp = readl(ports[wIndex]->addr);
+ temp = readl(port->addr);
if (temp == ~(u32)0) {
xhci_hc_died(xhci);
retval = -ENODEV;
@@ -1303,11 +1311,10 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
/* FIXME: What new port features do we need to support? */
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
- temp = readl(ports[wIndex]->addr);
+ temp = readl(port->addr);
if ((temp & PORT_PLS_MASK) != XDEV_U0) {
/* Resume the port to U0 first */
- xhci_set_link_state(xhci, ports[wIndex],
- XDEV_U0);
+ xhci_set_link_state(xhci, port, XDEV_U0);
spin_unlock_irqrestore(&xhci->lock, flags);
msleep(10);
spin_lock_irqsave(&xhci->lock, flags);
@@ -1316,16 +1323,16 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
* a port unless the port reports that it is in the
* enabled (PED = ‘1’,PLS < ‘3’) state.
*/
- temp = readl(ports[wIndex]->addr);
+ temp = readl(port->addr);
if ((temp & PORT_PE) == 0 || (temp & PORT_RESET)
|| (temp & PORT_PLS_MASK) >= XDEV_U3) {
xhci_warn(xhci, "USB core suspending port %d-%d not in U0/U1/U2\n",
- hcd->self.busnum, wIndex + 1);
+ hcd->self.busnum, portnum1);
goto error;
}
slot_id = xhci_find_slot_id_by_port(hcd, xhci,
- wIndex + 1);
+ portnum1);
if (!slot_id) {
xhci_warn(xhci, "slot_id is zero\n");
goto error;
@@ -1335,21 +1342,21 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
xhci_stop_device(xhci, slot_id, 1);
spin_lock_irqsave(&xhci->lock, flags);
- xhci_set_link_state(xhci, ports[wIndex], XDEV_U3);
+ xhci_set_link_state(xhci, port, XDEV_U3);
spin_unlock_irqrestore(&xhci->lock, flags);
msleep(10); /* wait device to enter */
spin_lock_irqsave(&xhci->lock, flags);
- temp = readl(ports[wIndex]->addr);
+ temp = readl(port->addr);
bus_state->suspended_ports |= 1 << wIndex;
break;
case USB_PORT_FEAT_LINK_STATE:
- temp = readl(ports[wIndex]->addr);
+ temp = readl(port->addr);
/* Disable port */
if (link_state == USB_SS_PORT_LS_SS_DISABLED) {
xhci_dbg(xhci, "Disable port %d-%d\n",
- hcd->self.busnum, wIndex + 1);
+ hcd->self.busnum, portnum1);
temp = xhci_port_state_to_neutral(temp);
/*
* Clear all change bits, so that we get a new
@@ -1358,18 +1365,17 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
temp |= PORT_CSC | PORT_PEC | PORT_WRC |
PORT_OCC | PORT_RC | PORT_PLC |
PORT_CEC;
- writel(temp | PORT_PE, ports[wIndex]->addr);
- temp = readl(ports[wIndex]->addr);
+ writel(temp | PORT_PE, port->addr);
+ temp = readl(port->addr);
break;
}
/* Put link in RxDetect (enable port) */
if (link_state == USB_SS_PORT_LS_RX_DETECT) {
xhci_dbg(xhci, "Enable port %d-%d\n",
- hcd->self.busnum, wIndex + 1);
- xhci_set_link_state(xhci, ports[wIndex],
- link_state);
- temp = readl(ports[wIndex]->addr);
+ hcd->self.busnum, portnum1);
+ xhci_set_link_state(xhci, port, link_state);
+ temp = readl(port->addr);
break;
}
@@ -1399,11 +1405,10 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
}
xhci_dbg(xhci, "Enable compliance mode transition for port %d-%d\n",
- hcd->self.busnum, wIndex + 1);
- xhci_set_link_state(xhci, ports[wIndex],
- link_state);
+ hcd->self.busnum, portnum1);
+ xhci_set_link_state(xhci, port, link_state);
- temp = readl(ports[wIndex]->addr);
+ temp = readl(port->addr);
break;
}
/* Port must be enabled */
@@ -1414,8 +1419,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
/* Can't set port link state above '3' (U3) */
if (link_state > USB_SS_PORT_LS_U3) {
xhci_warn(xhci, "Cannot set port %d-%d link state %d\n",
- hcd->self.busnum, wIndex + 1,
- link_state);
+ hcd->self.busnum, portnum1, link_state);
goto error;
}
@@ -1437,30 +1441,29 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
pls == XDEV_RESUME ||
pls == XDEV_RECOVERY) {
wait_u0 = true;
- reinit_completion(&bus_state->u3exit_done[wIndex]);
+ reinit_completion(&port->u3exit_done);
}
if (pls <= XDEV_U3) /* U1, U2, U3 */
- xhci_set_link_state(xhci, ports[wIndex],
- USB_SS_PORT_LS_U0);
+ xhci_set_link_state(xhci, port, USB_SS_PORT_LS_U0);
if (!wait_u0) {
if (pls > XDEV_U3)
goto error;
break;
}
spin_unlock_irqrestore(&xhci->lock, flags);
- if (!wait_for_completion_timeout(&bus_state->u3exit_done[wIndex],
+ if (!wait_for_completion_timeout(&port->u3exit_done,
msecs_to_jiffies(500)))
xhci_dbg(xhci, "missing U0 port change event for port %d-%d\n",
- hcd->self.busnum, wIndex + 1);
+ hcd->self.busnum, portnum1);
spin_lock_irqsave(&xhci->lock, flags);
- temp = readl(ports[wIndex]->addr);
+ temp = readl(port->addr);
break;
}
if (link_state == USB_SS_PORT_LS_U3) {
int retries = 16;
slot_id = xhci_find_slot_id_by_port(hcd, xhci,
- wIndex + 1);
+ portnum1);
if (slot_id) {
/* unlock to execute stop endpoint
* commands */
@@ -1469,16 +1472,16 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
xhci_stop_device(xhci, slot_id, 1);
spin_lock_irqsave(&xhci->lock, flags);
}
- xhci_set_link_state(xhci, ports[wIndex], USB_SS_PORT_LS_U3);
+ xhci_set_link_state(xhci, port, USB_SS_PORT_LS_U3);
spin_unlock_irqrestore(&xhci->lock, flags);
while (retries--) {
usleep_range(4000, 8000);
- temp = readl(ports[wIndex]->addr);
+ temp = readl(port->addr);
if ((temp & PORT_PLS_MASK) == XDEV_U3)
break;
}
spin_lock_irqsave(&xhci->lock, flags);
- temp = readl(ports[wIndex]->addr);
+ temp = readl(port->addr);
bus_state->suspended_ports |= 1 << wIndex;
}
break;
@@ -1489,43 +1492,42 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
* However, hub_wq will ignore the roothub events until
* the roothub is registered.
*/
- xhci_set_port_power(xhci, hcd, wIndex, true, &flags);
+ xhci_set_port_power(xhci, port, true, &flags);
break;
case USB_PORT_FEAT_RESET:
temp = (temp | PORT_RESET);
- writel(temp, ports[wIndex]->addr);
+ writel(temp, port->addr);
- temp = readl(ports[wIndex]->addr);
+ temp = readl(port->addr);
xhci_dbg(xhci, "set port reset, actual port %d-%d status = 0x%x\n",
- hcd->self.busnum, wIndex + 1, temp);
+ hcd->self.busnum, portnum1, temp);
break;
case USB_PORT_FEAT_REMOTE_WAKE_MASK:
- xhci_set_remote_wake_mask(xhci, ports[wIndex],
- wake_mask);
- temp = readl(ports[wIndex]->addr);
+ xhci_set_remote_wake_mask(xhci, port, wake_mask);
+ temp = readl(port->addr);
xhci_dbg(xhci, "set port remote wake mask, actual port %d-%d status = 0x%x\n",
- hcd->self.busnum, wIndex + 1, temp);
+ hcd->self.busnum, portnum1, temp);
break;
case USB_PORT_FEAT_BH_PORT_RESET:
temp |= PORT_WR;
- writel(temp, ports[wIndex]->addr);
- temp = readl(ports[wIndex]->addr);
+ writel(temp, port->addr);
+ temp = readl(port->addr);
break;
case USB_PORT_FEAT_U1_TIMEOUT:
if (hcd->speed < HCD_USB3)
goto error;
- temp = readl(ports[wIndex]->addr + PORTPMSC);
+ temp = readl(port->addr + PORTPMSC);
temp &= ~PORT_U1_TIMEOUT_MASK;
temp |= PORT_U1_TIMEOUT(timeout);
- writel(temp, ports[wIndex]->addr + PORTPMSC);
+ writel(temp, port->addr + PORTPMSC);
break;
case USB_PORT_FEAT_U2_TIMEOUT:
if (hcd->speed < HCD_USB3)
goto error;
- temp = readl(ports[wIndex]->addr + PORTPMSC);
+ temp = readl(port->addr + PORTPMSC);
temp &= ~PORT_U2_TIMEOUT_MASK;
temp |= PORT_U2_TIMEOUT(timeout);
- writel(temp, ports[wIndex]->addr + PORTPMSC);
+ writel(temp, port->addr + PORTPMSC);
break;
case USB_PORT_FEAT_TEST:
/* 4.19.6 Port Test Modes (USB2 Test Mode) */
@@ -1541,13 +1543,16 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
goto error;
}
/* unblock any posted writes */
- temp = readl(ports[wIndex]->addr);
+ temp = readl(port->addr);
break;
case ClearPortFeature:
- if (!wIndex || wIndex > max_ports)
+ if (!portnum1 || portnum1 > max_ports)
goto error;
+
+ port = ports[portnum1 - 1];
+
wIndex--;
- temp = readl(ports[wIndex]->addr);
+ temp = readl(port->addr);
if (temp == ~(u32)0) {
xhci_hc_died(xhci);
retval = -ENODEV;
@@ -1557,7 +1562,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
temp = xhci_port_state_to_neutral(temp);
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
- temp = readl(ports[wIndex]->addr);
+ temp = readl(port->addr);
xhci_dbg(xhci, "clear USB_PORT_FEAT_SUSPEND\n");
xhci_dbg(xhci, "PORTSC %04x\n", temp);
if (temp & PORT_RESET)
@@ -1568,20 +1573,18 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
set_bit(wIndex, &bus_state->resuming_ports);
usb_hcd_start_port_resume(&hcd->self, wIndex);
- xhci_set_link_state(xhci, ports[wIndex],
- XDEV_RESUME);
+ xhci_set_link_state(xhci, port, XDEV_RESUME);
spin_unlock_irqrestore(&xhci->lock, flags);
msleep(USB_RESUME_TIMEOUT);
spin_lock_irqsave(&xhci->lock, flags);
- xhci_set_link_state(xhci, ports[wIndex],
- XDEV_U0);
+ xhci_set_link_state(xhci, port, XDEV_U0);
clear_bit(wIndex, &bus_state->resuming_ports);
usb_hcd_end_port_resume(&hcd->self, wIndex);
}
bus_state->port_c_suspend |= 1 << wIndex;
slot_id = xhci_find_slot_id_by_port(hcd, xhci,
- wIndex + 1);
+ portnum1);
if (!slot_id) {
xhci_dbg(xhci, "slot_id is zero\n");
goto error;
@@ -1599,14 +1602,13 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case USB_PORT_FEAT_C_PORT_LINK_STATE:
case USB_PORT_FEAT_C_PORT_CONFIG_ERROR:
xhci_clear_port_change_bit(xhci, wValue, wIndex,
- ports[wIndex]->addr, temp);
+ port->addr, temp);
break;
case USB_PORT_FEAT_ENABLE:
- xhci_disable_port(hcd, xhci, wIndex,
- ports[wIndex]->addr, temp);
+ xhci_disable_port(xhci, port);
break;
case USB_PORT_FEAT_POWER:
- xhci_set_port_power(xhci, hcd, wIndex, false, &flags);
+ xhci_set_port_power(xhci, port, false, &flags);
break;
case USB_PORT_FEAT_TEST:
retval = xhci_exit_test_mode(xhci);
@@ -1623,6 +1625,7 @@ error:
spin_unlock_irqrestore(&xhci->lock, flags);
return retval;
}
+EXPORT_SYMBOL_GPL(xhci_hub_control);
/*
* Returns 0 if the status hasn't changed, or the number of bytes in buf.
@@ -1687,8 +1690,8 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
if ((temp & mask) != 0 ||
(bus_state->port_c_suspend & 1 << i) ||
- (bus_state->resume_done[i] && time_after_eq(
- jiffies, bus_state->resume_done[i]))) {
+ (ports[i]->resume_timestamp && time_after_eq(
+ jiffies, ports[i]->resume_timestamp))) {
buf[(i + 1) / 8] |= 1 << (i + 1) % 8;
status = 1;
}
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 81ca2bc1f0be..d0a9467aa5fc 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1819,17 +1819,43 @@ int xhci_alloc_erst(struct xhci_hcd *xhci,
return 0;
}
-void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
+static void
+xhci_free_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
{
- size_t size;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+ size_t erst_size;
+ u64 tmp64;
+ u32 tmp;
- size = sizeof(struct xhci_erst_entry) * (erst->num_entries);
- if (erst->entries)
- dma_free_coherent(dev, size,
- erst->entries,
- erst->erst_dma_addr);
- erst->entries = NULL;
+ if (!ir)
+ return;
+
+ erst_size = sizeof(struct xhci_erst_entry) * (ir->erst.num_entries);
+ if (ir->erst.entries)
+ dma_free_coherent(dev, erst_size,
+ ir->erst.entries,
+ ir->erst.erst_dma_addr);
+ ir->erst.entries = NULL;
+
+ /*
+ * Clean out interrupter registers except ERSTBA. Clearing either the
+ * low or high 32 bits of ERSTBA immediately causes the controller to
+ * dereference the partially cleared 64 bit address, causing IOMMU error.
+ */
+ tmp = readl(&ir->ir_set->erst_size);
+ tmp &= ERST_SIZE_MASK;
+ writel(tmp, &ir->ir_set->erst_size);
+
+ tmp64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
+ tmp64 &= (u64) ERST_PTR_MASK;
+ xhci_write_64(xhci, tmp64, &ir->ir_set->erst_dequeue);
+
+ /* free interrrupter event ring */
+ if (ir->event_ring)
+ xhci_ring_free(xhci, ir->event_ring);
+ ir->event_ring = NULL;
+
+ kfree(ir);
}
void xhci_mem_cleanup(struct xhci_hcd *xhci)
@@ -1839,12 +1865,9 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
cancel_delayed_work_sync(&xhci->cmd_timer);
- xhci_free_erst(xhci, &xhci->erst);
-
- if (xhci->event_ring)
- xhci_ring_free(xhci, xhci->event_ring);
- xhci->event_ring = NULL;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
+ xhci_free_interrupter(xhci, xhci->interrupter);
+ xhci->interrupter = NULL;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed primary event ring");
if (xhci->cmd_ring)
xhci_ring_free(xhci, xhci->cmd_ring);
@@ -1929,176 +1952,18 @@ no_bw:
xhci->usb3_rhub.bus_state.bus_suspended = 0;
}
-static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
- struct xhci_segment *input_seg,
- union xhci_trb *start_trb,
- union xhci_trb *end_trb,
- dma_addr_t input_dma,
- struct xhci_segment *result_seg,
- char *test_name, int test_number)
-{
- unsigned long long start_dma;
- unsigned long long end_dma;
- struct xhci_segment *seg;
-
- start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
- end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
-
- seg = trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma, false);
- if (seg != result_seg) {
- xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
- test_name, test_number);
- xhci_warn(xhci, "Tested TRB math w/ seg %p and "
- "input DMA 0x%llx\n",
- input_seg,
- (unsigned long long) input_dma);
- xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
- "ending TRB %p (0x%llx DMA)\n",
- start_trb, start_dma,
- end_trb, end_dma);
- xhci_warn(xhci, "Expected seg %p, got seg %p\n",
- result_seg, seg);
- trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma,
- true);
- return -1;
- }
- return 0;
-}
-
-/* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
-static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci)
-{
- struct {
- dma_addr_t input_dma;
- struct xhci_segment *result_seg;
- } simple_test_vector [] = {
- /* A zeroed DMA field should fail */
- { 0, NULL },
- /* One TRB before the ring start should fail */
- { xhci->event_ring->first_seg->dma - 16, NULL },
- /* One byte before the ring start should fail */
- { xhci->event_ring->first_seg->dma - 1, NULL },
- /* Starting TRB should succeed */
- { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
- /* Ending TRB should succeed */
- { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
- xhci->event_ring->first_seg },
- /* One byte after the ring end should fail */
- { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
- /* One TRB after the ring end should fail */
- { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
- /* An address of all ones should fail */
- { (dma_addr_t) (~0), NULL },
- };
- struct {
- struct xhci_segment *input_seg;
- union xhci_trb *start_trb;
- union xhci_trb *end_trb;
- dma_addr_t input_dma;
- struct xhci_segment *result_seg;
- } complex_test_vector [] = {
- /* Test feeding a valid DMA address from a different ring */
- { .input_seg = xhci->event_ring->first_seg,
- .start_trb = xhci->event_ring->first_seg->trbs,
- .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
- .input_dma = xhci->cmd_ring->first_seg->dma,
- .result_seg = NULL,
- },
- /* Test feeding a valid end TRB from a different ring */
- { .input_seg = xhci->event_ring->first_seg,
- .start_trb = xhci->event_ring->first_seg->trbs,
- .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
- .input_dma = xhci->cmd_ring->first_seg->dma,
- .result_seg = NULL,
- },
- /* Test feeding a valid start and end TRB from a different ring */
- { .input_seg = xhci->event_ring->first_seg,
- .start_trb = xhci->cmd_ring->first_seg->trbs,
- .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
- .input_dma = xhci->cmd_ring->first_seg->dma,
- .result_seg = NULL,
- },
- /* TRB in this ring, but after this TD */
- { .input_seg = xhci->event_ring->first_seg,
- .start_trb = &xhci->event_ring->first_seg->trbs[0],
- .end_trb = &xhci->event_ring->first_seg->trbs[3],
- .input_dma = xhci->event_ring->first_seg->dma + 4*16,
- .result_seg = NULL,
- },
- /* TRB in this ring, but before this TD */
- { .input_seg = xhci->event_ring->first_seg,
- .start_trb = &xhci->event_ring->first_seg->trbs[3],
- .end_trb = &xhci->event_ring->first_seg->trbs[6],
- .input_dma = xhci->event_ring->first_seg->dma + 2*16,
- .result_seg = NULL,
- },
- /* TRB in this ring, but after this wrapped TD */
- { .input_seg = xhci->event_ring->first_seg,
- .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
- .end_trb = &xhci->event_ring->first_seg->trbs[1],
- .input_dma = xhci->event_ring->first_seg->dma + 2*16,
- .result_seg = NULL,
- },
- /* TRB in this ring, but before this wrapped TD */
- { .input_seg = xhci->event_ring->first_seg,
- .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
- .end_trb = &xhci->event_ring->first_seg->trbs[1],
- .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
- .result_seg = NULL,
- },
- /* TRB not in this ring, and we have a wrapped TD */
- { .input_seg = xhci->event_ring->first_seg,
- .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
- .end_trb = &xhci->event_ring->first_seg->trbs[1],
- .input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
- .result_seg = NULL,
- },
- };
-
- unsigned int num_tests;
- int i, ret;
-
- num_tests = ARRAY_SIZE(simple_test_vector);
- for (i = 0; i < num_tests; i++) {
- ret = xhci_test_trb_in_td(xhci,
- xhci->event_ring->first_seg,
- xhci->event_ring->first_seg->trbs,
- &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
- simple_test_vector[i].input_dma,
- simple_test_vector[i].result_seg,
- "Simple", i);
- if (ret < 0)
- return ret;
- }
-
- num_tests = ARRAY_SIZE(complex_test_vector);
- for (i = 0; i < num_tests; i++) {
- ret = xhci_test_trb_in_td(xhci,
- complex_test_vector[i].input_seg,
- complex_test_vector[i].start_trb,
- complex_test_vector[i].end_trb,
- complex_test_vector[i].input_dma,
- complex_test_vector[i].result_seg,
- "Complex", i);
- if (ret < 0)
- return ret;
- }
- xhci_dbg(xhci, "TRB math tests passed.\n");
- return 0;
-}
-
-static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
+static void xhci_set_hc_event_deq(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
{
u64 temp;
dma_addr_t deq;
- deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
- xhci->event_ring->dequeue);
+ deq = xhci_trb_virt_to_dma(ir->event_ring->deq_seg,
+ ir->event_ring->dequeue);
if (!deq)
xhci_warn(xhci, "WARN something wrong with SW event ring "
"dequeue ptr.\n");
/* Update HC event ring dequeue pointer */
- temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+ temp = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
temp &= ERST_PTR_MASK;
/* Don't clear the EHB bit (which is RW1C) because
* there might be more events to service.
@@ -2108,7 +1973,7 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
"// Write event ring dequeue pointer, "
"preserving EHB bit");
xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
- &xhci->ir_set->erst_dequeue);
+ &ir->ir_set->erst_dequeue);
}
static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
@@ -2289,6 +2154,9 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
xhci->hw_ports[i].addr = &xhci->op_regs->port_status_base +
NUM_PORT_REGS * i;
xhci->hw_ports[i].hw_portnum = i;
+
+ init_completion(&xhci->hw_ports[i].rexit_done);
+ init_completion(&xhci->hw_ports[i].u3exit_done);
}
xhci->rh_bw = kcalloc_node(num_ports, sizeof(*xhci->rh_bw), flags,
@@ -2375,6 +2243,68 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
return 0;
}
+static struct xhci_interrupter *
+xhci_alloc_interrupter(struct xhci_hcd *xhci, unsigned int intr_num, gfp_t flags)
+{
+ struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+ struct xhci_interrupter *ir;
+ u64 erst_base;
+ u32 erst_size;
+ int ret;
+
+ if (intr_num > xhci->max_interrupters) {
+ xhci_warn(xhci, "Can't allocate interrupter %d, max interrupters %d\n",
+ intr_num, xhci->max_interrupters);
+ return NULL;
+ }
+
+ if (xhci->interrupter) {
+ xhci_warn(xhci, "Can't allocate already set up interrupter %d\n", intr_num);
+ return NULL;
+ }
+
+ ir = kzalloc_node(sizeof(*ir), flags, dev_to_node(dev));
+ if (!ir)
+ return NULL;
+
+ ir->ir_set = &xhci->run_regs->ir_set[intr_num];
+ ir->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
+ 0, flags);
+ if (!ir->event_ring) {
+ xhci_warn(xhci, "Failed to allocate interrupter %d event ring\n", intr_num);
+ goto fail_ir;
+ }
+
+ ret = xhci_alloc_erst(xhci, ir->event_ring, &ir->erst, flags);
+ if (ret) {
+ xhci_warn(xhci, "Failed to allocate interrupter %d erst\n", intr_num);
+ goto fail_ev;
+
+ }
+ /* set ERST count with the number of entries in the segment table */
+ erst_size = readl(&ir->ir_set->erst_size);
+ erst_size &= ERST_SIZE_MASK;
+ erst_size |= ERST_NUM_SEGS;
+ writel(erst_size, &ir->ir_set->erst_size);
+
+ erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
+ erst_base &= ERST_PTR_MASK;
+ erst_base |= (ir->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
+ xhci_write_64(xhci, erst_base, &ir->ir_set->erst_base);
+
+ /* Set the event ring dequeue address of this interrupter */
+ xhci_set_hc_event_deq(xhci, ir);
+
+ return ir;
+
+fail_ev:
+ xhci_ring_free(xhci, ir->event_ring);
+fail_ir:
+ kfree(ir);
+
+ return NULL;
+}
+
int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
{
dma_addr_t dma;
@@ -2382,7 +2312,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
unsigned int val, val2;
u64 val_64;
u32 page_size, temp;
- int i, ret;
+ int i;
INIT_LIST_HEAD(&xhci->cmd_list);
@@ -2495,48 +2425,13 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
" from cap regs base addr", val);
xhci->dba = (void __iomem *) xhci->cap_regs + val;
/* Set ir_set to interrupt register set 0 */
- xhci->ir_set = &xhci->run_regs->ir_set[0];
-
- /*
- * Event ring setup: Allocate a normal ring, but also setup
- * the event ring segment table (ERST). Section 4.9.3.
- */
- xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
- xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
- 0, flags);
- if (!xhci->event_ring)
- goto fail;
- if (xhci_check_trb_in_td_math(xhci) < 0)
- goto fail;
-
- ret = xhci_alloc_erst(xhci, xhci->event_ring, &xhci->erst, flags);
- if (ret)
- goto fail;
-
- /* set ERST count with the number of entries in the segment table */
- val = readl(&xhci->ir_set->erst_size);
- val &= ERST_SIZE_MASK;
- val |= ERST_NUM_SEGS;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Write ERST size = %i to ir_set 0 (some bits preserved)",
- val);
- writel(val, &xhci->ir_set->erst_size);
+ /* allocate and set up primary interrupter with an event ring. */
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Set ERST entries to point to event ring.");
- /* set the segment table base address */
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Set ERST base address for ir_set 0 = 0x%llx",
- (unsigned long long)xhci->erst.erst_dma_addr);
- val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
- val_64 &= ERST_PTR_MASK;
- val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
- xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
-
- /* Set the event ring dequeue address */
- xhci_set_hc_event_deq(xhci);
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "Wrote ERST address to ir_set 0.");
+ "Allocating primary event ring");
+ xhci->interrupter = xhci_alloc_interrupter(xhci, 0, flags);
+ if (!xhci->interrupter)
+ goto fail;
xhci->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX;
@@ -2547,13 +2442,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
*/
for (i = 0; i < MAX_HC_SLOTS; i++)
xhci->devs[i] = NULL;
- for (i = 0; i < USB_MAXCHILDREN; i++) {
- xhci->usb2_rhub.bus_state.resume_done[i] = 0;
- xhci->usb3_rhub.bus_state.resume_done[i] = 0;
- /* Only the USB 2.0 completions will ever be used. */
- init_completion(&xhci->usb2_rhub.bus_state.rexit_done[i]);
- init_completion(&xhci->usb3_rhub.bus_state.u3exit_done[i]);
- }
if (scratchpad_alloc(xhci, flags))
goto fail;
diff --git a/drivers/usb/host/xhci-mvebu.c b/drivers/usb/host/xhci-mvebu.c
index 60651a50770f..87f1597a0e5a 100644
--- a/drivers/usb/host/xhci-mvebu.c
+++ b/drivers/usb/host/xhci-mvebu.c
@@ -32,7 +32,7 @@ static void xhci_mvebu_mbus_config(void __iomem *base,
/* Program each DRAM CS in a seperate window */
for (win = 0; win < dram->num_cs; win++) {
- const struct mbus_dram_window *cs = dram->cs + win;
+ const struct mbus_dram_window *cs = &dram->cs[win];
writel(((cs->size - 1) & 0xffff0000) | (cs->mbus_attr << 8) |
(dram->mbus_dram_target_id << 4) | 1,
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 5fb55bf19493..b9f9625467d6 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -19,11 +19,11 @@
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/usb/of.h>
+#include <linux/reset.h>
#include "xhci.h"
#include "xhci-plat.h"
#include "xhci-mvebu.h"
-#include "xhci-rcar.h"
static struct hc_driver __read_mostly xhci_plat_hc_driver;
@@ -114,14 +114,6 @@ static const struct xhci_plat_priv xhci_plat_marvell_armada3700 = {
.init_quirk = xhci_mvebu_a3700_init_quirk,
};
-static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen2 = {
- SET_XHCI_PLAT_PRIV_FOR_RCAR(XHCI_RCAR_FIRMWARE_NAME_V1)
-};
-
-static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen3 = {
- SET_XHCI_PLAT_PRIV_FOR_RCAR(XHCI_RCAR_FIRMWARE_NAME_V3)
-};
-
static const struct xhci_plat_priv xhci_plat_brcm = {
.quirks = XHCI_RESET_ON_RESUME | XHCI_SUSPEND_RESUME_CLKS,
};
@@ -141,27 +133,6 @@ static const struct of_device_id usb_xhci_of_match[] = {
.compatible = "marvell,armada3700-xhci",
.data = &xhci_plat_marvell_armada3700,
}, {
- .compatible = "renesas,xhci-r8a7790",
- .data = &xhci_plat_renesas_rcar_gen2,
- }, {
- .compatible = "renesas,xhci-r8a7791",
- .data = &xhci_plat_renesas_rcar_gen2,
- }, {
- .compatible = "renesas,xhci-r8a7793",
- .data = &xhci_plat_renesas_rcar_gen2,
- }, {
- .compatible = "renesas,xhci-r8a7795",
- .data = &xhci_plat_renesas_rcar_gen3,
- }, {
- .compatible = "renesas,xhci-r8a7796",
- .data = &xhci_plat_renesas_rcar_gen3,
- }, {
- .compatible = "renesas,rcar-gen2-xhci",
- .data = &xhci_plat_renesas_rcar_gen2,
- }, {
- .compatible = "renesas,rcar-gen3-xhci",
- .data = &xhci_plat_renesas_rcar_gen3,
- }, {
.compatible = "brcm,xhci-brcm-v2",
.data = &xhci_plat_brcm,
}, {
@@ -173,11 +144,10 @@ static const struct of_device_id usb_xhci_of_match[] = {
MODULE_DEVICE_TABLE(of, usb_xhci_of_match);
#endif
-static int xhci_plat_probe(struct platform_device *pdev)
+int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const struct xhci_plat_priv *priv_match)
{
- const struct xhci_plat_priv *priv_match;
const struct hc_driver *driver;
- struct device *sysdev, *tmpdev;
+ struct device *tmpdev;
struct xhci_hcd *xhci;
struct resource *res;
struct usb_hcd *hcd, *usb3_hcd;
@@ -195,31 +165,10 @@ static int xhci_plat_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- /*
- * sysdev must point to a device that is known to the system firmware
- * or PCI hardware. We handle these three cases here:
- * 1. xhci_plat comes from firmware
- * 2. xhci_plat is child of a device from firmware (dwc3-plat)
- * 3. xhci_plat is grandchild of a pci device (dwc3-pci)
- */
- for (sysdev = &pdev->dev; sysdev; sysdev = sysdev->parent) {
- if (is_of_node(sysdev->fwnode) ||
- is_acpi_device_node(sysdev->fwnode))
- break;
-#ifdef CONFIG_PCI
- else if (sysdev->bus == &pci_bus_type)
- break;
-#endif
- }
-
if (!sysdev)
sysdev = &pdev->dev;
- if (WARN_ON(!sysdev->dma_mask))
- /* Platform did not initialize dma_mask */
- ret = dma_coerce_mask_and_coherent(sysdev, DMA_BIT_MASK(64));
- else
- ret = dma_set_mask_and_coherent(sysdev, DMA_BIT_MASK(64));
+ ret = dma_set_mask_and_coherent(sysdev, DMA_BIT_MASK(64));
if (ret)
return ret;
@@ -257,25 +206,30 @@ static int xhci_plat_probe(struct platform_device *pdev)
goto put_hcd;
}
- ret = clk_prepare_enable(xhci->reg_clk);
- if (ret)
- goto put_hcd;
-
xhci->clk = devm_clk_get_optional(&pdev->dev, NULL);
if (IS_ERR(xhci->clk)) {
ret = PTR_ERR(xhci->clk);
- goto disable_reg_clk;
+ goto put_hcd;
}
+ xhci->reset = devm_reset_control_array_get_optional_shared(&pdev->dev);
+ if (IS_ERR(xhci->reset)) {
+ ret = PTR_ERR(xhci->reset);
+ goto put_hcd;
+ }
+
+ ret = reset_control_deassert(xhci->reset);
+ if (ret)
+ goto put_hcd;
+
+ ret = clk_prepare_enable(xhci->reg_clk);
+ if (ret)
+ goto err_reset;
+
ret = clk_prepare_enable(xhci->clk);
if (ret)
goto disable_reg_clk;
- if (pdev->dev.of_node)
- priv_match = of_device_get_match_data(&pdev->dev);
- else
- priv_match = dev_get_platdata(&pdev->dev);
-
if (priv_match) {
priv = hcd_to_xhci_priv(hcd);
/* Just copy data for now */
@@ -377,6 +331,9 @@ disable_clk:
disable_reg_clk:
clk_disable_unprepare(xhci->reg_clk);
+err_reset:
+ reset_control_assert(xhci->reset);
+
put_hcd:
usb_put_hcd(hcd);
@@ -386,8 +343,50 @@ disable_runtime:
return ret;
}
+EXPORT_SYMBOL_GPL(xhci_plat_probe);
+
+static int xhci_generic_plat_probe(struct platform_device *pdev)
+{
+ const struct xhci_plat_priv *priv_match;
+ struct device *sysdev;
+ int ret;
+
+ /*
+ * sysdev must point to a device that is known to the system firmware
+ * or PCI hardware. We handle these three cases here:
+ * 1. xhci_plat comes from firmware
+ * 2. xhci_plat is child of a device from firmware (dwc3-plat)
+ * 3. xhci_plat is grandchild of a pci device (dwc3-pci)
+ */
+ for (sysdev = &pdev->dev; sysdev; sysdev = sysdev->parent) {
+ if (is_of_node(sysdev->fwnode) ||
+ is_acpi_device_node(sysdev->fwnode))
+ break;
+#ifdef CONFIG_PCI
+ else if (sysdev->bus == &pci_bus_type)
+ break;
+#endif
+ }
+
+ if (!sysdev)
+ sysdev = &pdev->dev;
+
+ if (WARN_ON(!sysdev->dma_mask)) {
+ /* Platform did not initialize dma_mask */
+ ret = dma_coerce_mask_and_coherent(sysdev, DMA_BIT_MASK(64));
+ if (ret)
+ return ret;
+ }
+
+ if (pdev->dev.of_node)
+ priv_match = of_device_get_match_data(&pdev->dev);
+ else
+ priv_match = dev_get_platdata(&pdev->dev);
+
+ return xhci_plat_probe(pdev, sysdev, priv_match);
+}
-static int xhci_plat_remove(struct platform_device *dev)
+int xhci_plat_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
@@ -412,6 +411,7 @@ static int xhci_plat_remove(struct platform_device *dev)
clk_disable_unprepare(clk);
clk_disable_unprepare(reg_clk);
+ reset_control_assert(xhci->reset);
usb_put_hcd(hcd);
pm_runtime_disable(&dev->dev);
@@ -420,6 +420,7 @@ static int xhci_plat_remove(struct platform_device *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(xhci_plat_remove);
static int __maybe_unused xhci_plat_suspend(struct device *dev)
{
@@ -496,13 +497,14 @@ static int __maybe_unused xhci_plat_runtime_resume(struct device *dev)
return xhci_resume(xhci, 0);
}
-static const struct dev_pm_ops xhci_plat_pm_ops = {
+const struct dev_pm_ops xhci_plat_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(xhci_plat_suspend, xhci_plat_resume)
SET_RUNTIME_PM_OPS(xhci_plat_runtime_suspend,
xhci_plat_runtime_resume,
NULL)
};
+EXPORT_SYMBOL_GPL(xhci_plat_pm_ops);
#ifdef CONFIG_ACPI
static const struct acpi_device_id usb_xhci_acpi_match[] = {
@@ -513,8 +515,8 @@ static const struct acpi_device_id usb_xhci_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match);
#endif
-static struct platform_driver usb_xhci_driver = {
- .probe = xhci_plat_probe,
+static struct platform_driver usb_generic_xhci_driver = {
+ .probe = xhci_generic_plat_probe,
.remove = xhci_plat_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
@@ -529,13 +531,13 @@ MODULE_ALIAS("platform:xhci-hcd");
static int __init xhci_plat_init(void)
{
xhci_init_driver(&xhci_plat_hc_driver, &xhci_plat_overrides);
- return platform_driver_register(&usb_xhci_driver);
+ return platform_driver_register(&usb_generic_xhci_driver);
}
module_init(xhci_plat_init);
static void __exit xhci_plat_exit(void)
{
- platform_driver_unregister(&usb_xhci_driver);
+ platform_driver_unregister(&usb_generic_xhci_driver);
}
module_exit(xhci_plat_exit);
diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
index 1fb149d1fbce..83b5b5aa9f8e 100644
--- a/drivers/usb/host/xhci-plat.h
+++ b/drivers/usb/host/xhci-plat.h
@@ -21,4 +21,11 @@ struct xhci_plat_priv {
#define hcd_to_xhci_priv(h) ((struct xhci_plat_priv *)hcd_to_xhci(h)->priv)
#define xhci_to_priv(x) ((struct xhci_plat_priv *)(x)->priv)
+
+int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev,
+ const struct xhci_plat_priv *priv_match);
+
+int xhci_plat_remove(struct platform_device *dev);
+extern const struct dev_pm_ops xhci_plat_pm_ops;
+
#endif /* _XHCI_PLAT_H */
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index aef0258a7160..7f18509a1d39 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -10,12 +10,17 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/usb/phy.h>
#include <linux/sys_soc.h>
#include "xhci.h"
#include "xhci-plat.h"
-#include "xhci-rcar.h"
+#include "xhci-rzv2m.h"
+
+#define XHCI_RCAR_FIRMWARE_NAME_V1 "r8a779x_usb3_v1.dlmem"
+#define XHCI_RCAR_FIRMWARE_NAME_V2 "r8a779x_usb3_v2.dlmem"
+#define XHCI_RCAR_FIRMWARE_NAME_V3 "r8a779x_usb3_v3.dlmem"
/*
* - The V3 firmware is for almost all R-Car Gen3 (except r8a7795 ES1.x)
@@ -108,7 +113,7 @@ static int xhci_rcar_is_gen2(struct device *dev)
of_device_is_compatible(node, "renesas,rcar-gen2-xhci");
}
-void xhci_rcar_start(struct usb_hcd *hcd)
+static void xhci_rcar_start(struct usb_hcd *hcd)
{
u32 temp;
@@ -203,7 +208,7 @@ static bool xhci_rcar_wait_for_pll_active(struct usb_hcd *hcd)
}
/* This function needs to initialize a "phy" of usb before */
-int xhci_rcar_init_quirk(struct usb_hcd *hcd)
+static int xhci_rcar_init_quirk(struct usb_hcd *hcd)
{
/* If hcd->regs is NULL, we don't just call the following function */
if (!hcd->regs)
@@ -215,7 +220,7 @@ int xhci_rcar_init_quirk(struct usb_hcd *hcd)
return xhci_rcar_download_firmware(hcd);
}
-int xhci_rcar_resume_quirk(struct usb_hcd *hcd)
+static int xhci_rcar_resume_quirk(struct usb_hcd *hcd)
{
int ret;
@@ -225,3 +230,92 @@ int xhci_rcar_resume_quirk(struct usb_hcd *hcd)
return ret;
}
+
+/*
+ * On R-Car Gen2 and Gen3, the AC64 bit (bit 0) of HCCPARAMS1 is set
+ * to 1. However, these SoCs don't support 64-bit address memory
+ * pointers. So, this driver clears the AC64 bit of xhci->hcc_params
+ * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
+ * xhci_gen_setup() by using the XHCI_NO_64BIT_SUPPORT quirk.
+ *
+ * And, since the firmware/internal CPU control the USBSTS.STS_HALT
+ * and the process speed is down when the roothub port enters U3,
+ * long delay for the handshake of STS_HALT is neeed in xhci_suspend()
+ * by using the XHCI_SLOW_SUSPEND quirk.
+ */
+#define SET_XHCI_PLAT_PRIV_FOR_RCAR(firmware) \
+ .firmware_name = firmware, \
+ .quirks = XHCI_NO_64BIT_SUPPORT | XHCI_TRUST_TX_LENGTH | \
+ XHCI_SLOW_SUSPEND, \
+ .init_quirk = xhci_rcar_init_quirk, \
+ .plat_start = xhci_rcar_start, \
+ .resume_quirk = xhci_rcar_resume_quirk,
+
+static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen2 = {
+ SET_XHCI_PLAT_PRIV_FOR_RCAR(XHCI_RCAR_FIRMWARE_NAME_V1)
+};
+
+static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen3 = {
+ SET_XHCI_PLAT_PRIV_FOR_RCAR(XHCI_RCAR_FIRMWARE_NAME_V3)
+};
+
+static const struct xhci_plat_priv xhci_plat_renesas_rzv2m = {
+ .quirks = XHCI_NO_64BIT_SUPPORT | XHCI_TRUST_TX_LENGTH |
+ XHCI_SLOW_SUSPEND,
+ .init_quirk = xhci_rzv2m_init_quirk,
+ .plat_start = xhci_rzv2m_start,
+};
+
+static const struct of_device_id usb_xhci_of_match[] = {
+ {
+ .compatible = "renesas,xhci-r8a7790",
+ .data = &xhci_plat_renesas_rcar_gen2,
+ }, {
+ .compatible = "renesas,xhci-r8a7791",
+ .data = &xhci_plat_renesas_rcar_gen2,
+ }, {
+ .compatible = "renesas,xhci-r8a7793",
+ .data = &xhci_plat_renesas_rcar_gen2,
+ }, {
+ .compatible = "renesas,xhci-r8a7795",
+ .data = &xhci_plat_renesas_rcar_gen3,
+ }, {
+ .compatible = "renesas,xhci-r8a7796",
+ .data = &xhci_plat_renesas_rcar_gen3,
+ }, {
+ .compatible = "renesas,rcar-gen2-xhci",
+ .data = &xhci_plat_renesas_rcar_gen2,
+ }, {
+ .compatible = "renesas,rcar-gen3-xhci",
+ .data = &xhci_plat_renesas_rcar_gen3,
+ }, {
+ .compatible = "renesas,rzv2m-xhci",
+ .data = &xhci_plat_renesas_rzv2m,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, usb_xhci_of_match);
+
+static int xhci_renesas_probe(struct platform_device *pdev)
+{
+ const struct xhci_plat_priv *priv_match;
+
+ priv_match = of_device_get_match_data(&pdev->dev);
+
+ return xhci_plat_probe(pdev, NULL, priv_match);
+}
+
+static struct platform_driver usb_xhci_renesas_driver = {
+ .probe = xhci_renesas_probe,
+ .remove = xhci_plat_remove,
+ .shutdown = usb_hcd_platform_shutdown,
+ .driver = {
+ .name = "xhci-renesas-hcd",
+ .pm = &xhci_plat_pm_ops,
+ .of_match_table = of_match_ptr(usb_xhci_of_match),
+ },
+};
+module_platform_driver(usb_xhci_renesas_driver);
+
+MODULE_DESCRIPTION("xHCI Platform Host Controller Driver for Renesas R-Car and RZ");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/xhci-rcar.h b/drivers/usb/host/xhci-rcar.h
deleted file mode 100644
index 048ad3b8a6c7..000000000000
--- a/drivers/usb/host/xhci-rcar.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * drivers/usb/host/xhci-rcar.h
- *
- * Copyright (C) 2014 Renesas Electronics Corporation
- */
-
-#ifndef _XHCI_RCAR_H
-#define _XHCI_RCAR_H
-
-#define XHCI_RCAR_FIRMWARE_NAME_V1 "r8a779x_usb3_v1.dlmem"
-#define XHCI_RCAR_FIRMWARE_NAME_V2 "r8a779x_usb3_v2.dlmem"
-#define XHCI_RCAR_FIRMWARE_NAME_V3 "r8a779x_usb3_v3.dlmem"
-
-#if IS_ENABLED(CONFIG_USB_XHCI_RCAR)
-void xhci_rcar_start(struct usb_hcd *hcd);
-int xhci_rcar_init_quirk(struct usb_hcd *hcd);
-int xhci_rcar_resume_quirk(struct usb_hcd *hcd);
-#else
-static inline void xhci_rcar_start(struct usb_hcd *hcd)
-{
-}
-
-static inline int xhci_rcar_init_quirk(struct usb_hcd *hcd)
-{
- return 0;
-}
-
-static inline int xhci_rcar_resume_quirk(struct usb_hcd *hcd)
-{
- return 0;
-}
-#endif
-
-/*
- * On R-Car Gen2 and Gen3, the AC64 bit (bit 0) of HCCPARAMS1 is set
- * to 1. However, these SoCs don't support 64-bit address memory
- * pointers. So, this driver clears the AC64 bit of xhci->hcc_params
- * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
- * xhci_gen_setup() by using the XHCI_NO_64BIT_SUPPORT quirk.
- *
- * And, since the firmware/internal CPU control the USBSTS.STS_HALT
- * and the process speed is down when the roothub port enters U3,
- * long delay for the handshake of STS_HALT is neeed in xhci_suspend()
- * by using the XHCI_SLOW_SUSPEND quirk.
- */
-#define SET_XHCI_PLAT_PRIV_FOR_RCAR(firmware) \
- .firmware_name = firmware, \
- .quirks = XHCI_NO_64BIT_SUPPORT | XHCI_TRUST_TX_LENGTH | \
- XHCI_SLOW_SUSPEND, \
- .init_quirk = xhci_rcar_init_quirk, \
- .plat_start = xhci_rcar_start, \
- .resume_quirk = xhci_rcar_resume_quirk,
-
-#endif /* _XHCI_RCAR_H */
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index f5b0e1ce22af..eb788c60c1c0 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1833,7 +1833,8 @@ static void xhci_cavium_reset_phy_quirk(struct xhci_hcd *xhci)
}
static void handle_port_status(struct xhci_hcd *xhci,
- union xhci_trb *event)
+ struct xhci_interrupter *ir,
+ union xhci_trb *event)
{
struct usb_hcd *hcd;
u32 port_id;
@@ -1856,7 +1857,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
if ((port_id <= 0) || (port_id > max_ports)) {
xhci_warn(xhci, "Port change event with invalid port ID %d\n",
port_id);
- inc_deq(xhci, xhci->event_ring);
+ inc_deq(xhci, ir->event_ring);
return;
}
@@ -1923,7 +1924,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
goto cleanup;
} else if (!test_bit(hcd_portnum, &bus_state->resuming_ports)) {
xhci_dbg(xhci, "resume HS port %d\n", port_id);
- bus_state->resume_done[hcd_portnum] = jiffies +
+ port->resume_timestamp = jiffies +
msecs_to_jiffies(USB_RESUME_TIMEOUT);
set_bit(hcd_portnum, &bus_state->resuming_ports);
/* Do the rest in GetPortStatus after resume time delay.
@@ -1932,7 +1933,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
*/
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
mod_timer(&hcd->rh_timer,
- bus_state->resume_done[hcd_portnum]);
+ port->resume_timestamp);
usb_hcd_start_port_resume(&hcd->self, hcd_portnum);
bogus_port_status = true;
}
@@ -1944,7 +1945,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
(portsc & PORT_PLS_MASK) == XDEV_U1 ||
(portsc & PORT_PLS_MASK) == XDEV_U2)) {
xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
- complete(&bus_state->u3exit_done[hcd_portnum]);
+ complete(&port->u3exit_done);
/* We've just brought the device into U0/1/2 through either the
* Resume state after a device remote wakeup, or through the
* U3Exit state after a host-initiated resume. If it's a device
@@ -1969,10 +1970,9 @@ static void handle_port_status(struct xhci_hcd *xhci,
* RExit to a disconnect state). If so, let the driver know it's
* out of the RExit state.
*/
- if (!DEV_SUPERSPEED_ANY(portsc) && hcd->speed < HCD_USB3 &&
- test_and_clear_bit(hcd_portnum,
- &bus_state->rexit_ports)) {
- complete(&bus_state->rexit_done[hcd_portnum]);
+ if (hcd->speed < HCD_USB3 && port->rexit_active) {
+ complete(&port->rexit_done);
+ port->rexit_active = false;
bogus_port_status = true;
goto cleanup;
}
@@ -1986,7 +1986,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
cleanup:
/* Update event ring dequeue pointer before dropping the lock */
- inc_deq(xhci, xhci->event_ring);
+ inc_deq(xhci, ir->event_ring);
/* Don't make the USB core poll the roothub if we got a bad port status
* change event. Besides, at that point we can't tell which roothub
@@ -2519,7 +2519,8 @@ finish_td:
* At this point, the host controller is probably hosed and should be reset.
*/
static int handle_tx_event(struct xhci_hcd *xhci,
- struct xhci_transfer_event *event)
+ struct xhci_interrupter *ir,
+ struct xhci_transfer_event *event)
{
struct xhci_virt_ep *ep;
struct xhci_ring *ep_ring;
@@ -2531,7 +2532,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
union xhci_trb *ep_trb;
int status = -EINPROGRESS;
struct xhci_ep_ctx *ep_ctx;
- struct list_head *tmp;
u32 trb_comp_code;
int td_num = 0;
bool handling_skipped_tds = false;
@@ -2585,10 +2585,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
}
/* Count current td numbers if ep->skip is set */
- if (ep->skip) {
- list_for_each(tmp, &ep_ring->td_list)
- td_num++;
- }
+ if (ep->skip)
+ td_num += list_count_nodes(&ep_ring->td_list);
/* Look for common error cases */
switch (trb_comp_code) {
@@ -2871,7 +2869,7 @@ cleanup:
* processing missed tds.
*/
if (!handling_skipped_tds)
- inc_deq(xhci, xhci->event_ring);
+ inc_deq(xhci, ir->event_ring);
/*
* If ep->skip is set, it means there are missed tds on the
@@ -2886,8 +2884,8 @@ cleanup:
err_out:
xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
(unsigned long long) xhci_trb_virt_to_dma(
- xhci->event_ring->deq_seg,
- xhci->event_ring->dequeue),
+ ir->event_ring->deq_seg,
+ ir->event_ring->dequeue),
lower_32_bits(le64_to_cpu(event->buffer)),
upper_32_bits(le64_to_cpu(event->buffer)),
le32_to_cpu(event->transfer_len),
@@ -2901,7 +2899,7 @@ err_out:
* Returns >0 for "possibly more events to process" (caller should call again),
* otherwise 0 if done. In future, <0 returns should indicate error code.
*/
-static int xhci_handle_event(struct xhci_hcd *xhci)
+static int xhci_handle_event(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
{
union xhci_trb *event;
int update_ptrs = 1;
@@ -2909,18 +2907,18 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
int ret;
/* Event ring hasn't been allocated yet. */
- if (!xhci->event_ring || !xhci->event_ring->dequeue) {
- xhci_err(xhci, "ERROR event ring not ready\n");
+ if (!ir || !ir->event_ring || !ir->event_ring->dequeue) {
+ xhci_err(xhci, "ERROR interrupter not ready\n");
return -ENOMEM;
}
- event = xhci->event_ring->dequeue;
+ event = ir->event_ring->dequeue;
/* Does the HC or OS own the TRB? */
if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
- xhci->event_ring->cycle_state)
+ ir->event_ring->cycle_state)
return 0;
- trace_xhci_handle_event(xhci->event_ring, &event->generic);
+ trace_xhci_handle_event(ir->event_ring, &event->generic);
/*
* Barrier between reading the TRB_CYCLE (valid) flag above and any
@@ -2935,11 +2933,11 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
handle_cmd_completion(xhci, &event->event_cmd);
break;
case TRB_PORT_STATUS:
- handle_port_status(xhci, event);
+ handle_port_status(xhci, ir, event);
update_ptrs = 0;
break;
case TRB_TRANSFER:
- ret = handle_tx_event(xhci, &event->trans_event);
+ ret = handle_tx_event(xhci, ir, &event->trans_event);
if (ret >= 0)
update_ptrs = 0;
break;
@@ -2963,7 +2961,7 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
if (update_ptrs)
/* Update SW event ring dequeue pointer */
- inc_deq(xhci, xhci->event_ring);
+ inc_deq(xhci, ir->event_ring);
/* Are there more items on the event ring? Caller will call us again to
* check.
@@ -2977,16 +2975,17 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
* - To avoid "Event Ring Full Error" condition
*/
static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
- union xhci_trb *event_ring_deq)
+ struct xhci_interrupter *ir,
+ union xhci_trb *event_ring_deq)
{
u64 temp_64;
dma_addr_t deq;
- temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+ temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
/* If necessary, update the HW's version of the event ring deq ptr. */
- if (event_ring_deq != xhci->event_ring->dequeue) {
- deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
- xhci->event_ring->dequeue);
+ if (event_ring_deq != ir->event_ring->dequeue) {
+ deq = xhci_trb_virt_to_dma(ir->event_ring->deq_seg,
+ ir->event_ring->dequeue);
if (deq == 0)
xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n");
/*
@@ -3004,7 +3003,7 @@ static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
/* Clear the event handler busy flag (RW1C) */
temp_64 |= ERST_EHB;
- xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
+ xhci_write_64(xhci, temp_64, &ir->ir_set->erst_dequeue);
}
/*
@@ -3016,6 +3015,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
union xhci_trb *event_ring_deq;
+ struct xhci_interrupter *ir;
irqreturn_t ret = IRQ_NONE;
u64 temp_64;
u32 status;
@@ -3053,11 +3053,13 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
status |= STS_EINT;
writel(status, &xhci->op_regs->status);
+ /* This is the handler of the primary interrupter */
+ ir = xhci->interrupter;
if (!hcd->msi_enabled) {
u32 irq_pending;
- irq_pending = readl(&xhci->ir_set->irq_pending);
+ irq_pending = readl(&ir->ir_set->irq_pending);
irq_pending |= IMAN_IP;
- writel(irq_pending, &xhci->ir_set->irq_pending);
+ writel(irq_pending, &ir->ir_set->irq_pending);
}
if (xhci->xhc_state & XHCI_STATE_DYING ||
@@ -3067,22 +3069,22 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
/* Clear the event handler busy flag (RW1C);
* the event ring should be empty.
*/
- temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+ temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
xhci_write_64(xhci, temp_64 | ERST_EHB,
- &xhci->ir_set->erst_dequeue);
+ &ir->ir_set->erst_dequeue);
ret = IRQ_HANDLED;
goto out;
}
- event_ring_deq = xhci->event_ring->dequeue;
+ event_ring_deq = ir->event_ring->dequeue;
/* FIXME this should be a delayed service routine
* that clears the EHB.
*/
- while (xhci_handle_event(xhci) > 0) {
+ while (xhci_handle_event(xhci, ir) > 0) {
if (event_loop++ < TRBS_PER_SEGMENT / 2)
continue;
- xhci_update_erst_dequeue(xhci, event_ring_deq);
- event_ring_deq = xhci->event_ring->dequeue;
+ xhci_update_erst_dequeue(xhci, ir, event_ring_deq);
+ event_ring_deq = ir->event_ring->dequeue;
/* ring is half-full, force isoc trbs to interrupt more often */
if (xhci->isoc_bei_interval > AVOID_BEI_INTERVAL_MIN)
@@ -3091,7 +3093,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
event_loop = 0;
}
- xhci_update_erst_dequeue(xhci, event_ring_deq);
+ xhci_update_erst_dequeue(xhci, ir, event_ring_deq);
ret = IRQ_HANDLED;
out:
diff --git a/drivers/usb/host/xhci-rzv2m.c b/drivers/usb/host/xhci-rzv2m.c
new file mode 100644
index 000000000000..ec65b24eafa8
--- /dev/null
+++ b/drivers/usb/host/xhci-rzv2m.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * xHCI host controller driver for RZ/V2M
+ *
+ * Copyright (C) 2022 Renesas Electronics Corporation
+ */
+
+#include <linux/usb/rzv2m_usb3drd.h>
+#include "xhci-plat.h"
+#include "xhci-rzv2m.h"
+
+#define RZV2M_USB3_INTEN 0x1044 /* Interrupt Enable */
+
+#define RZV2M_USB3_INT_XHC_ENA BIT(0)
+#define RZV2M_USB3_INT_HSE_ENA BIT(2)
+#define RZV2M_USB3_INT_ENA_VAL (RZV2M_USB3_INT_XHC_ENA \
+ | RZV2M_USB3_INT_HSE_ENA)
+
+int xhci_rzv2m_init_quirk(struct usb_hcd *hcd)
+{
+ struct device *dev = hcd->self.controller;
+
+ rzv2m_usb3drd_reset(dev->parent, true);
+
+ return 0;
+}
+
+void xhci_rzv2m_start(struct usb_hcd *hcd)
+{
+ u32 int_en;
+
+ if (hcd->regs) {
+ /* Interrupt Enable */
+ int_en = readl(hcd->regs + RZV2M_USB3_INTEN);
+ int_en |= RZV2M_USB3_INT_ENA_VAL;
+ writel(int_en, hcd->regs + RZV2M_USB3_INTEN);
+ }
+}
diff --git a/drivers/usb/host/xhci-rzv2m.h b/drivers/usb/host/xhci-rzv2m.h
new file mode 100644
index 000000000000..12448b0e8d5b
--- /dev/null
+++ b/drivers/usb/host/xhci-rzv2m.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __XHCI_RZV2M_H
+#define __XHCI_RZV2M_H
+
+#if IS_ENABLED(CONFIG_USB_XHCI_RZV2M)
+void xhci_rzv2m_start(struct usb_hcd *hcd);
+int xhci_rzv2m_init_quirk(struct usb_hcd *hcd);
+#else
+static inline void xhci_rzv2m_start(struct usb_hcd *hcd) {}
+static inline int xhci_rzv2m_init_quirk(struct usb_hcd *hcd)
+{
+ return -EINVAL;
+}
+#endif
+
+#endif /* __XHCI_RZV2M_H */
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index bdb776553826..1ff22f675930 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -44,6 +44,9 @@
#define XUSB_CFG_4 0x010
#define XUSB_BASE_ADDR_SHIFT 15
#define XUSB_BASE_ADDR_MASK 0x1ffff
+#define XUSB_CFG_7 0x01c
+#define XUSB_BASE2_ADDR_SHIFT 16
+#define XUSB_BASE2_ADDR_MASK 0xffff
#define XUSB_CFG_16 0x040
#define XUSB_CFG_24 0x060
#define XUSB_CFG_AXI_CFG 0x0f8
@@ -75,6 +78,20 @@
#define MBOX_SMI_INTR_FW_HANG BIT(1)
#define MBOX_SMI_INTR_EN BIT(3)
+/* BAR2 registers */
+#define XUSB_BAR2_ARU_MBOX_CMD 0x004
+#define XUSB_BAR2_ARU_MBOX_DATA_IN 0x008
+#define XUSB_BAR2_ARU_MBOX_DATA_OUT 0x00c
+#define XUSB_BAR2_ARU_MBOX_OWNER 0x010
+#define XUSB_BAR2_ARU_SMI_INTR 0x014
+#define XUSB_BAR2_ARU_SMI_ARU_FW_SCRATCH_DATA0 0x01c
+#define XUSB_BAR2_ARU_IFRDMA_CFG0 0x0e0
+#define XUSB_BAR2_ARU_IFRDMA_CFG1 0x0e4
+#define XUSB_BAR2_ARU_IFRDMA_STREAMID_FIELD 0x0e8
+#define XUSB_BAR2_ARU_C11_CSBRANGE 0x9c
+#define XUSB_BAR2_ARU_FW_SCRATCH 0x1000
+#define XUSB_BAR2_CSB_BASE_ADDR 0x2000
+
/* IPFS registers */
#define IPFS_XUSB_HOST_MSI_BAR_SZ_0 0x0c0
#define IPFS_XUSB_HOST_MSI_AXI_BAR_ST_0 0x0c4
@@ -111,6 +128,9 @@
#define IMFILLRNG1_TAG_HI_SHIFT 16
#define XUSB_FALC_IMFILLCTL 0x158
+/* CSB ARU registers */
+#define XUSB_CSB_ARU_SCRATCH0 0x100100
+
/* MP CSB registers */
#define XUSB_CSB_MP_ILOAD_ATTR 0x101a00
#define XUSB_CSB_MP_ILOAD_BASE_LO 0x101a04
@@ -131,6 +151,9 @@
#define IMEM_BLOCK_SIZE 256
+#define FW_IOCTL_TYPE_SHIFT 24
+#define FW_IOCTL_CFGTBL_READ 17
+
struct tegra_xusb_fw_header {
__le32 boot_loadaddr_in_imem;
__le32 boot_codedfi_offset;
@@ -175,6 +198,7 @@ struct tegra_xusb_mbox_regs {
u16 data_in;
u16 data_out;
u16 owner;
+ u16 smi_intr;
};
struct tegra_xusb_context_soc {
@@ -189,6 +213,14 @@ struct tegra_xusb_context_soc {
} fpci;
};
+struct tegra_xusb;
+struct tegra_xusb_soc_ops {
+ u32 (*mbox_reg_readl)(struct tegra_xusb *tegra, unsigned int offset);
+ void (*mbox_reg_writel)(struct tegra_xusb *tegra, u32 value, unsigned int offset);
+ u32 (*csb_reg_readl)(struct tegra_xusb *tegra, unsigned int offset);
+ void (*csb_reg_writel)(struct tegra_xusb *tegra, u32 value, unsigned int offset);
+};
+
struct tegra_xusb_soc {
const char *firmware;
const char * const *supply_names;
@@ -205,11 +237,14 @@ struct tegra_xusb_soc {
} ports;
struct tegra_xusb_mbox_regs mbox;
+ const struct tegra_xusb_soc_ops *ops;
bool scale_ss_clock;
bool has_ipfs;
bool lpm_support;
bool otg_reset_sspi;
+
+ bool has_bar2;
};
struct tegra_xusb_context {
@@ -230,6 +265,8 @@ struct tegra_xusb {
void __iomem *ipfs_base;
void __iomem *fpci_base;
+ void __iomem *bar2_base;
+ struct resource *bar2;
const struct tegra_xusb_soc *soc;
@@ -274,6 +311,7 @@ struct tegra_xusb {
bool suspended;
struct tegra_xusb_context context;
+ u8 lp0_utmi_pad_mask;
};
static struct hc_driver __read_mostly tegra_xhci_hc_driver;
@@ -300,8 +338,34 @@ static inline void ipfs_writel(struct tegra_xusb *tegra, u32 value,
writel(value, tegra->ipfs_base + offset);
}
+static inline u32 bar2_readl(struct tegra_xusb *tegra, unsigned int offset)
+{
+ return readl(tegra->bar2_base + offset);
+}
+
+static inline void bar2_writel(struct tegra_xusb *tegra, u32 value,
+ unsigned int offset)
+{
+ writel(value, tegra->bar2_base + offset);
+}
+
static u32 csb_readl(struct tegra_xusb *tegra, unsigned int offset)
{
+ const struct tegra_xusb_soc_ops *ops = tegra->soc->ops;
+
+ return ops->csb_reg_readl(tegra, offset);
+}
+
+static void csb_writel(struct tegra_xusb *tegra, u32 value,
+ unsigned int offset)
+{
+ const struct tegra_xusb_soc_ops *ops = tegra->soc->ops;
+
+ ops->csb_reg_writel(tegra, value, offset);
+}
+
+static u32 fpci_csb_readl(struct tegra_xusb *tegra, unsigned int offset)
+{
u32 page = CSB_PAGE_SELECT(offset);
u32 ofs = CSB_PAGE_OFFSET(offset);
@@ -310,8 +374,8 @@ static u32 csb_readl(struct tegra_xusb *tegra, unsigned int offset)
return fpci_readl(tegra, XUSB_CFG_CSB_BASE_ADDR + ofs);
}
-static void csb_writel(struct tegra_xusb *tegra, u32 value,
- unsigned int offset)
+static void fpci_csb_writel(struct tegra_xusb *tegra, u32 value,
+ unsigned int offset)
{
u32 page = CSB_PAGE_SELECT(offset);
u32 ofs = CSB_PAGE_OFFSET(offset);
@@ -320,6 +384,26 @@ static void csb_writel(struct tegra_xusb *tegra, u32 value,
fpci_writel(tegra, value, XUSB_CFG_CSB_BASE_ADDR + ofs);
}
+static u32 bar2_csb_readl(struct tegra_xusb *tegra, unsigned int offset)
+{
+ u32 page = CSB_PAGE_SELECT(offset);
+ u32 ofs = CSB_PAGE_OFFSET(offset);
+
+ bar2_writel(tegra, page, XUSB_BAR2_ARU_C11_CSBRANGE);
+
+ return bar2_readl(tegra, XUSB_BAR2_CSB_BASE_ADDR + ofs);
+}
+
+static void bar2_csb_writel(struct tegra_xusb *tegra, u32 value,
+ unsigned int offset)
+{
+ u32 page = CSB_PAGE_SELECT(offset);
+ u32 ofs = CSB_PAGE_OFFSET(offset);
+
+ bar2_writel(tegra, page, XUSB_BAR2_ARU_C11_CSBRANGE);
+ bar2_writel(tegra, value, XUSB_BAR2_CSB_BASE_ADDR + ofs);
+}
+
static int tegra_xusb_set_ss_clk(struct tegra_xusb *tegra,
unsigned long rate)
{
@@ -451,6 +535,7 @@ static bool tegra_xusb_mbox_cmd_requires_ack(enum tegra_xusb_mbox_cmd cmd)
static int tegra_xusb_mbox_send(struct tegra_xusb *tegra,
const struct tegra_xusb_mbox_msg *msg)
{
+ const struct tegra_xusb_soc_ops *ops = tegra->soc->ops;
bool wait_for_idle = false;
u32 value;
@@ -459,15 +544,15 @@ static int tegra_xusb_mbox_send(struct tegra_xusb *tegra,
* ACK/NAK messages.
*/
if (!(msg->cmd == MBOX_CMD_ACK || msg->cmd == MBOX_CMD_NAK)) {
- value = fpci_readl(tegra, tegra->soc->mbox.owner);
+ value = ops->mbox_reg_readl(tegra, tegra->soc->mbox.owner);
if (value != MBOX_OWNER_NONE) {
dev_err(tegra->dev, "mailbox is busy\n");
return -EBUSY;
}
- fpci_writel(tegra, MBOX_OWNER_SW, tegra->soc->mbox.owner);
+ ops->mbox_reg_writel(tegra, MBOX_OWNER_SW, tegra->soc->mbox.owner);
- value = fpci_readl(tegra, tegra->soc->mbox.owner);
+ value = ops->mbox_reg_readl(tegra, tegra->soc->mbox.owner);
if (value != MBOX_OWNER_SW) {
dev_err(tegra->dev, "failed to acquire mailbox\n");
return -EBUSY;
@@ -477,17 +562,17 @@ static int tegra_xusb_mbox_send(struct tegra_xusb *tegra,
}
value = tegra_xusb_mbox_pack(msg);
- fpci_writel(tegra, value, tegra->soc->mbox.data_in);
+ ops->mbox_reg_writel(tegra, value, tegra->soc->mbox.data_in);
- value = fpci_readl(tegra, tegra->soc->mbox.cmd);
+ value = ops->mbox_reg_readl(tegra, tegra->soc->mbox.cmd);
value |= MBOX_INT_EN | MBOX_DEST_FALC;
- fpci_writel(tegra, value, tegra->soc->mbox.cmd);
+ ops->mbox_reg_writel(tegra, value, tegra->soc->mbox.cmd);
if (wait_for_idle) {
unsigned long timeout = jiffies + msecs_to_jiffies(250);
while (time_before(jiffies, timeout)) {
- value = fpci_readl(tegra, tegra->soc->mbox.owner);
+ value = ops->mbox_reg_readl(tegra, tegra->soc->mbox.owner);
if (value == MBOX_OWNER_NONE)
break;
@@ -495,7 +580,7 @@ static int tegra_xusb_mbox_send(struct tegra_xusb *tegra,
}
if (time_after(jiffies, timeout))
- value = fpci_readl(tegra, tegra->soc->mbox.owner);
+ value = ops->mbox_reg_readl(tegra, tegra->soc->mbox.owner);
if (value != MBOX_OWNER_NONE)
return -ETIMEDOUT;
@@ -507,11 +592,12 @@ static int tegra_xusb_mbox_send(struct tegra_xusb *tegra,
static irqreturn_t tegra_xusb_mbox_irq(int irq, void *data)
{
struct tegra_xusb *tegra = data;
+ const struct tegra_xusb_soc_ops *ops = tegra->soc->ops;
u32 value;
/* clear mailbox interrupts */
- value = fpci_readl(tegra, XUSB_CFG_ARU_SMI_INTR);
- fpci_writel(tegra, value, XUSB_CFG_ARU_SMI_INTR);
+ value = ops->mbox_reg_readl(tegra, tegra->soc->mbox.smi_intr);
+ ops->mbox_reg_writel(tegra, value, tegra->soc->mbox.smi_intr);
if (value & MBOX_SMI_INTR_FW_HANG)
dev_err(tegra->dev, "controller firmware hang\n");
@@ -664,6 +750,7 @@ static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra,
static irqreturn_t tegra_xusb_mbox_thread(int irq, void *data)
{
struct tegra_xusb *tegra = data;
+ const struct tegra_xusb_soc_ops *ops = tegra->soc->ops;
struct tegra_xusb_mbox_msg msg;
u32 value;
@@ -672,16 +759,16 @@ static irqreturn_t tegra_xusb_mbox_thread(int irq, void *data)
if (pm_runtime_suspended(tegra->dev) || tegra->suspended)
goto out;
- value = fpci_readl(tegra, tegra->soc->mbox.data_out);
+ value = ops->mbox_reg_readl(tegra, tegra->soc->mbox.data_out);
tegra_xusb_mbox_unpack(&msg, value);
- value = fpci_readl(tegra, tegra->soc->mbox.cmd);
+ value = ops->mbox_reg_readl(tegra, tegra->soc->mbox.cmd);
value &= ~MBOX_DEST_SMI;
- fpci_writel(tegra, value, tegra->soc->mbox.cmd);
+ ops->mbox_reg_writel(tegra, value, tegra->soc->mbox.cmd);
/* clear mailbox owner if no ACK/NAK is required */
if (!tegra_xusb_mbox_cmd_requires_ack(msg.cmd))
- fpci_writel(tegra, MBOX_OWNER_NONE, tegra->soc->mbox.owner);
+ ops->mbox_reg_writel(tegra, MBOX_OWNER_NONE, tegra->soc->mbox.owner);
tegra_xusb_mbox_handle(tegra, &msg);
@@ -709,6 +796,15 @@ static void tegra_xusb_config(struct tegra_xusb *tegra)
value |= regs & (XUSB_BASE_ADDR_MASK << XUSB_BASE_ADDR_SHIFT);
fpci_writel(tegra, value, XUSB_CFG_4);
+ /* Program BAR2 space */
+ if (tegra->bar2) {
+ value = fpci_readl(tegra, XUSB_CFG_7);
+ value &= ~(XUSB_BASE2_ADDR_MASK << XUSB_BASE2_ADDR_SHIFT);
+ value |= tegra->bar2->start &
+ (XUSB_BASE2_ADDR_MASK << XUSB_BASE2_ADDR_SHIFT);
+ fpci_writel(tegra, value, XUSB_CFG_7);
+ }
+
usleep_range(100, 200);
/* Enable bus master */
@@ -881,21 +977,36 @@ static int tegra_xusb_request_firmware(struct tegra_xusb *tegra)
return 0;
}
-static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
+static int tegra_xusb_wait_for_falcon(struct tegra_xusb *tegra)
+{
+ struct xhci_cap_regs __iomem *cap_regs;
+ struct xhci_op_regs __iomem *op_regs;
+ int ret;
+ u32 value;
+
+ cap_regs = tegra->regs;
+ op_regs = tegra->regs + HC_LENGTH(readl(&cap_regs->hc_capbase));
+
+ ret = readl_poll_timeout(&op_regs->status, value, !(value & STS_CNR), 1000, 200000);
+
+ if (ret)
+ dev_err(tegra->dev, "XHCI Controller not ready. Falcon state: 0x%x\n",
+ csb_readl(tegra, XUSB_FALC_CPUCTL));
+
+ return ret;
+}
+
+static int tegra_xusb_load_firmware_rom(struct tegra_xusb *tegra)
{
unsigned int code_tag_blocks, code_size_blocks, code_blocks;
- struct xhci_cap_regs __iomem *cap = tegra->regs;
struct tegra_xusb_fw_header *header;
struct device *dev = tegra->dev;
- struct xhci_op_regs __iomem *op;
- unsigned long timeout;
time64_t timestamp;
u64 address;
u32 value;
int err;
header = (struct tegra_xusb_fw_header *)tegra->fw.virt;
- op = tegra->regs + HC_LENGTH(readl(&cap->hc_capbase));
if (csb_readl(tegra, XUSB_CSB_MP_ILOAD_BASE_LO) != 0) {
dev_info(dev, "Firmware already loaded, Falcon state %#x\n",
@@ -968,30 +1079,54 @@ static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
/* Boot Falcon CPU and wait for USBSTS_CNR to get cleared. */
csb_writel(tegra, CPUCTL_STARTCPU, XUSB_FALC_CPUCTL);
- timeout = jiffies + msecs_to_jiffies(200);
+ if (tegra_xusb_wait_for_falcon(tegra))
+ return -EIO;
- do {
- value = readl(&op->status);
- if ((value & STS_CNR) == 0)
- break;
+ timestamp = le32_to_cpu(header->fwimg_created_time);
+
+ dev_info(dev, "Firmware timestamp: %ptTs UTC\n", &timestamp);
+
+ return 0;
+}
+
+static u32 tegra_xusb_read_firmware_header(struct tegra_xusb *tegra, u32 offset)
+{
+ /*
+ * We only accept reading the firmware config table
+ * The offset should not exceed the fw header structure
+ */
+ if (offset >= sizeof(struct tegra_xusb_fw_header))
+ return 0;
- usleep_range(1000, 2000);
- } while (time_is_after_jiffies(timeout));
+ bar2_writel(tegra, (FW_IOCTL_CFGTBL_READ << FW_IOCTL_TYPE_SHIFT) | offset,
+ XUSB_BAR2_ARU_FW_SCRATCH);
+ return bar2_readl(tegra, XUSB_BAR2_ARU_SMI_ARU_FW_SCRATCH_DATA0);
+}
- value = readl(&op->status);
- if (value & STS_CNR) {
- value = csb_readl(tegra, XUSB_FALC_CPUCTL);
- dev_err(dev, "XHCI controller not read: %#010x\n", value);
+static int tegra_xusb_init_ifr_firmware(struct tegra_xusb *tegra)
+{
+ time64_t timestamp;
+
+ if (tegra_xusb_wait_for_falcon(tegra))
return -EIO;
- }
- timestamp = le32_to_cpu(header->fwimg_created_time);
+#define offsetof_32(X, Y) ((u8)(offsetof(X, Y) / sizeof(__le32)))
+ timestamp = tegra_xusb_read_firmware_header(tegra, offsetof_32(struct tegra_xusb_fw_header,
+ fwimg_created_time) << 2);
- dev_info(dev, "Firmware timestamp: %ptTs UTC\n", &timestamp);
+ dev_info(tegra->dev, "Firmware timestamp: %ptTs UTC\n", &timestamp);
return 0;
}
+static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
+{
+ if (!tegra->soc->firmware)
+ return tegra_xusb_init_ifr_firmware(tegra);
+ else
+ return tegra_xusb_load_firmware_rom(tegra);
+}
+
static void tegra_xusb_powerdomain_remove(struct device *dev,
struct tegra_xusb *tegra)
{
@@ -1435,6 +1570,10 @@ static int tegra_xusb_probe(struct platform_device *pdev)
tegra->ipfs_base = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(tegra->ipfs_base))
return PTR_ERR(tegra->ipfs_base);
+ } else if (tegra->soc->has_bar2) {
+ tegra->bar2_base = devm_platform_get_and_ioremap_resource(pdev, 2, &tegra->bar2);
+ if (IS_ERR(tegra->bar2_base))
+ return PTR_ERR(tegra->bar2_base);
}
tegra->xhci_irq = platform_get_irq(pdev, 0);
@@ -1651,10 +1790,13 @@ static int tegra_xusb_probe(struct platform_device *pdev)
goto disable_phy;
}
- err = tegra_xusb_request_firmware(tegra);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to request firmware: %d\n", err);
- goto disable_phy;
+ if (tegra->soc->firmware) {
+ err = tegra_xusb_request_firmware(tegra);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "failed to request firmware: %d\n", err);
+ goto disable_phy;
+ }
}
err = tegra_xusb_unpowergate_partitions(tegra);
@@ -1951,10 +2093,24 @@ static void tegra_xhci_disable_phy_wake(struct tegra_xusb *tegra)
struct tegra_xusb_padctl *padctl = tegra->padctl;
unsigned int i;
+ for (i = 0; i < tegra->num_usb_phys; i++) {
+ struct phy *phy = tegra_xusb_get_phy(tegra, "usb2", i);
+
+ if (!phy)
+ continue;
+
+ if (tegra_xusb_padctl_remote_wake_detected(padctl, phy))
+ tegra_phy_xusb_utmi_pad_power_on(phy);
+ }
+
for (i = 0; i < tegra->num_phys; i++) {
if (!tegra->phys[i])
continue;
+ if (tegra_xusb_padctl_remote_wake_detected(padctl, tegra->phys[i]))
+ dev_dbg(tegra->dev, "%pOF remote wake detected\n",
+ tegra->phys[i]->dev.of_node);
+
tegra_xusb_padctl_disable_phy_wake(padctl, tegra->phys[i]);
}
}
@@ -1972,6 +2128,28 @@ static void tegra_xhci_disable_phy_sleepwalk(struct tegra_xusb *tegra)
}
}
+static void tegra_xhci_program_utmi_power_lp0_exit(struct tegra_xusb *tegra)
+{
+ unsigned int i, index_to_usb2;
+ struct phy *phy;
+
+ for (i = 0; i < tegra->soc->num_types; i++) {
+ if (strcmp(tegra->soc->phy_types[i].name, "usb2") == 0)
+ index_to_usb2 = i;
+ }
+
+ for (i = 0; i < tegra->num_usb_phys; i++) {
+ if (!is_host_mode_phy(tegra, index_to_usb2, i))
+ continue;
+
+ phy = tegra_xusb_get_phy(tegra, "usb2", i);
+ if (tegra->lp0_utmi_pad_mask & BIT(i))
+ tegra_phy_xusb_utmi_pad_power_on(phy);
+ else
+ tegra_phy_xusb_utmi_pad_power_down(phy);
+ }
+}
+
static int tegra_xusb_enter_elpg(struct tegra_xusb *tegra, bool runtime)
{
struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
@@ -1980,6 +2158,7 @@ static int tegra_xusb_enter_elpg(struct tegra_xusb *tegra, bool runtime)
unsigned int i;
int err;
u32 usbcmd;
+ u32 portsc;
dev_dbg(dev, "entering ELPG\n");
@@ -1993,6 +2172,15 @@ static int tegra_xusb_enter_elpg(struct tegra_xusb *tegra, bool runtime)
goto out;
}
+ for (i = 0; i < tegra->num_usb_phys; i++) {
+ if (!xhci->usb2_rhub.ports[i])
+ continue;
+ portsc = readl(xhci->usb2_rhub.ports[i]->addr);
+ tegra->lp0_utmi_pad_mask &= ~BIT(i);
+ if (((portsc & PORT_PLS_MASK) == XDEV_U3) || ((portsc & DEV_SPEED_MASK) == XDEV_FS))
+ tegra->lp0_utmi_pad_mask |= BIT(i);
+ }
+
err = xhci_suspend(xhci, wakeup);
if (err < 0) {
dev_err(tegra->dev, "failed to suspend XHCI: %d\n", err);
@@ -2066,6 +2254,8 @@ static int tegra_xusb_exit_elpg(struct tegra_xusb *tegra, bool runtime)
phy_power_on(tegra->phys[i]);
}
+ if (tegra->suspended)
+ tegra_xhci_program_utmi_power_lp0_exit(tegra);
tegra_xusb_config(tegra);
tegra_xusb_restore_context(tegra);
@@ -2271,6 +2461,13 @@ static const struct tegra_xusb_context_soc tegra124_xusb_context = {
},
};
+static const struct tegra_xusb_soc_ops tegra124_ops = {
+ .mbox_reg_readl = &fpci_readl,
+ .mbox_reg_writel = &fpci_writel,
+ .csb_reg_readl = &fpci_csb_readl,
+ .csb_reg_writel = &fpci_csb_writel,
+};
+
static const struct tegra_xusb_soc tegra124_soc = {
.firmware = "nvidia/tegra124/xusb.bin",
.supply_names = tegra124_supply_names,
@@ -2286,11 +2483,13 @@ static const struct tegra_xusb_soc tegra124_soc = {
.scale_ss_clock = true,
.has_ipfs = true,
.otg_reset_sspi = false,
+ .ops = &tegra124_ops,
.mbox = {
.cmd = 0xe4,
.data_in = 0xe8,
.data_out = 0xec,
.owner = 0xf0,
+ .smi_intr = XUSB_CFG_ARU_SMI_INTR,
},
};
MODULE_FIRMWARE("nvidia/tegra124/xusb.bin");
@@ -2322,11 +2521,13 @@ static const struct tegra_xusb_soc tegra210_soc = {
.scale_ss_clock = false,
.has_ipfs = true,
.otg_reset_sspi = true,
+ .ops = &tegra124_ops,
.mbox = {
.cmd = 0xe4,
.data_in = 0xe8,
.data_out = 0xec,
.owner = 0xf0,
+ .smi_intr = XUSB_CFG_ARU_SMI_INTR,
},
};
MODULE_FIRMWARE("nvidia/tegra210/xusb.bin");
@@ -2363,11 +2564,13 @@ static const struct tegra_xusb_soc tegra186_soc = {
.scale_ss_clock = false,
.has_ipfs = false,
.otg_reset_sspi = false,
+ .ops = &tegra124_ops,
.mbox = {
.cmd = 0xe4,
.data_in = 0xe8,
.data_out = 0xec,
.owner = 0xf0,
+ .smi_intr = XUSB_CFG_ARU_SMI_INTR,
},
.lpm_support = true,
};
@@ -2394,21 +2597,56 @@ static const struct tegra_xusb_soc tegra194_soc = {
.scale_ss_clock = false,
.has_ipfs = false,
.otg_reset_sspi = false,
+ .ops = &tegra124_ops,
.mbox = {
.cmd = 0x68,
.data_in = 0x6c,
.data_out = 0x70,
.owner = 0x74,
+ .smi_intr = XUSB_CFG_ARU_SMI_INTR,
},
.lpm_support = true,
};
MODULE_FIRMWARE("nvidia/tegra194/xusb.bin");
+static const struct tegra_xusb_soc_ops tegra234_ops = {
+ .mbox_reg_readl = &bar2_readl,
+ .mbox_reg_writel = &bar2_writel,
+ .csb_reg_readl = &bar2_csb_readl,
+ .csb_reg_writel = &bar2_csb_writel,
+};
+
+static const struct tegra_xusb_soc tegra234_soc = {
+ .supply_names = tegra194_supply_names,
+ .num_supplies = ARRAY_SIZE(tegra194_supply_names),
+ .phy_types = tegra194_phy_types,
+ .num_types = ARRAY_SIZE(tegra194_phy_types),
+ .context = &tegra186_xusb_context,
+ .ports = {
+ .usb3 = { .offset = 0, .count = 4, },
+ .usb2 = { .offset = 4, .count = 4, },
+ },
+ .scale_ss_clock = false,
+ .has_ipfs = false,
+ .otg_reset_sspi = false,
+ .ops = &tegra234_ops,
+ .mbox = {
+ .cmd = XUSB_BAR2_ARU_MBOX_CMD,
+ .data_in = XUSB_BAR2_ARU_MBOX_DATA_IN,
+ .data_out = XUSB_BAR2_ARU_MBOX_DATA_OUT,
+ .owner = XUSB_BAR2_ARU_MBOX_OWNER,
+ .smi_intr = XUSB_BAR2_ARU_SMI_INTR,
+ },
+ .lpm_support = true,
+ .has_bar2 = true,
+};
+
static const struct of_device_id tegra_xusb_of_match[] = {
{ .compatible = "nvidia,tegra124-xusb", .data = &tegra124_soc },
{ .compatible = "nvidia,tegra210-xusb", .data = &tegra210_soc },
{ .compatible = "nvidia,tegra186-xusb", .data = &tegra186_soc },
{ .compatible = "nvidia,tegra194-xusb", .data = &tegra194_soc },
+ { .compatible = "nvidia,tegra234-xusb", .data = &tegra234_soc },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_xusb_of_match);
@@ -2437,8 +2675,84 @@ static int tegra_xhci_setup(struct usb_hcd *hcd)
return xhci_gen_setup(hcd, tegra_xhci_quirks);
}
+static int tegra_xhci_hub_control(struct usb_hcd *hcd, u16 type_req, u16 value, u16 index,
+ char *buf, u16 length)
+{
+ struct tegra_xusb *tegra = dev_get_drvdata(hcd->self.controller);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct xhci_hub *rhub;
+ struct xhci_bus_state *bus_state;
+ int port = (index & 0xff) - 1;
+ unsigned int i;
+ struct xhci_port **ports;
+ u32 portsc;
+ int ret;
+ struct phy *phy;
+
+ rhub = &xhci->usb2_rhub;
+ bus_state = &rhub->bus_state;
+ if (bus_state->resuming_ports && hcd->speed == HCD_USB2) {
+ ports = rhub->ports;
+ i = rhub->num_ports;
+ while (i--) {
+ if (!test_bit(i, &bus_state->resuming_ports))
+ continue;
+ portsc = readl(ports[i]->addr);
+ if ((portsc & PORT_PLS_MASK) == XDEV_RESUME)
+ tegra_phy_xusb_utmi_pad_power_on(
+ tegra_xusb_get_phy(tegra, "usb2", (int) i));
+ }
+ }
+
+ if (hcd->speed == HCD_USB2) {
+ phy = tegra_xusb_get_phy(tegra, "usb2", port);
+ if ((type_req == ClearPortFeature) && (value == USB_PORT_FEAT_SUSPEND)) {
+ if (!index || index > rhub->num_ports)
+ return -EPIPE;
+ tegra_phy_xusb_utmi_pad_power_on(phy);
+ }
+ if ((type_req == SetPortFeature) && (value == USB_PORT_FEAT_RESET)) {
+ if (!index || index > rhub->num_ports)
+ return -EPIPE;
+ ports = rhub->ports;
+ portsc = readl(ports[port]->addr);
+ if (portsc & PORT_CONNECT)
+ tegra_phy_xusb_utmi_pad_power_on(phy);
+ }
+ }
+
+ ret = xhci_hub_control(hcd, type_req, value, index, buf, length);
+ if (ret < 0)
+ return ret;
+
+ if (hcd->speed == HCD_USB2) {
+ /* Use phy where we set previously */
+ if ((type_req == SetPortFeature) && (value == USB_PORT_FEAT_SUSPEND))
+ /* We don't suspend the PAD while HNP role swap happens on the OTG port */
+ if (!((hcd->self.otg_port == (port + 1)) && hcd->self.b_hnp_enable))
+ tegra_phy_xusb_utmi_pad_power_down(phy);
+
+ if ((type_req == ClearPortFeature) && (value == USB_PORT_FEAT_C_CONNECTION)) {
+ ports = rhub->ports;
+ portsc = readl(ports[port]->addr);
+ if (!(portsc & PORT_CONNECT)) {
+ /* We don't suspend the PAD while HNP role swap happens on the OTG
+ * port
+ */
+ if (!((hcd->self.otg_port == (port + 1)) && hcd->self.b_hnp_enable))
+ tegra_phy_xusb_utmi_pad_power_down(phy);
+ }
+ }
+ if ((type_req == SetPortFeature) && (value == USB_PORT_FEAT_TEST))
+ tegra_phy_xusb_utmi_pad_power_on(phy);
+ }
+
+ return ret;
+}
+
static const struct xhci_driver_overrides tegra_xhci_overrides __initconst = {
.reset = tegra_xhci_setup,
+ .hub_control = tegra_xhci_hub_control,
};
static int __init tegra_xusb_init(void)
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 2b280beb0011..6183ce8574b1 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -292,6 +292,32 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
xhci_info(xhci, "Fault detected\n");
}
+static int xhci_enable_interrupter(struct xhci_interrupter *ir)
+{
+ u32 iman;
+
+ if (!ir || !ir->ir_set)
+ return -EINVAL;
+
+ iman = readl(&ir->ir_set->irq_pending);
+ writel(ER_IRQ_ENABLE(iman), &ir->ir_set->irq_pending);
+
+ return 0;
+}
+
+static int xhci_disable_interrupter(struct xhci_interrupter *ir)
+{
+ u32 iman;
+
+ if (!ir || !ir->ir_set)
+ return -EINVAL;
+
+ iman = readl(&ir->ir_set->irq_pending);
+ writel(ER_IRQ_DISABLE(iman), &ir->ir_set->irq_pending);
+
+ return 0;
+}
+
#ifdef CONFIG_USB_PCI
/*
* Set up MSI
@@ -610,9 +636,9 @@ static int xhci_init(struct usb_hcd *hcd)
/*-------------------------------------------------------------------------*/
-
static int xhci_run_finished(struct xhci_hcd *xhci)
{
+ struct xhci_interrupter *ir = xhci->interrupter;
unsigned long flags;
u32 temp;
@@ -628,8 +654,7 @@ static int xhci_run_finished(struct xhci_hcd *xhci)
writel(temp, &xhci->op_regs->command);
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable primary interrupter");
- temp = readl(&xhci->ir_set->irq_pending);
- writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending);
+ xhci_enable_interrupter(ir);
if (xhci_start(xhci)) {
xhci_halt(xhci);
@@ -665,7 +690,7 @@ int xhci_run(struct usb_hcd *hcd)
u64 temp_64;
int ret;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
-
+ struct xhci_interrupter *ir = xhci->interrupter;
/* Start the xHCI host controller running only after the USB 2.0 roothub
* is setup.
*/
@@ -680,17 +705,17 @@ int xhci_run(struct usb_hcd *hcd)
if (ret)
return ret;
- temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+ temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
temp_64 &= ~ERST_PTR_MASK;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"ERST deq = 64'h%0lx", (long unsigned int) temp_64);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Set the interrupt modulation register");
- temp = readl(&xhci->ir_set->irq_control);
+ temp = readl(&ir->ir_set->irq_control);
temp &= ~ER_IRQ_INTERVAL_MASK;
temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
- writel(temp, &xhci->ir_set->irq_control);
+ writel(temp, &ir->ir_set->irq_control);
if (xhci->quirks & XHCI_NEC_HOST) {
struct xhci_command *command;
@@ -733,6 +758,7 @@ static void xhci_stop(struct usb_hcd *hcd)
{
u32 temp;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct xhci_interrupter *ir = xhci->interrupter;
mutex_lock(&xhci->mutex);
@@ -769,8 +795,7 @@ static void xhci_stop(struct usb_hcd *hcd)
"// Disabling event ring interrupts");
temp = readl(&xhci->op_regs->status);
writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
- temp = readl(&xhci->ir_set->irq_pending);
- writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
+ xhci_disable_interrupter(ir);
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
xhci_mem_cleanup(xhci);
@@ -832,28 +857,36 @@ EXPORT_SYMBOL_GPL(xhci_shutdown);
#ifdef CONFIG_PM
static void xhci_save_registers(struct xhci_hcd *xhci)
{
+ struct xhci_interrupter *ir = xhci->interrupter;
+
xhci->s3.command = readl(&xhci->op_regs->command);
xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
- xhci->s3.erst_size = readl(&xhci->ir_set->erst_size);
- xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
- xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
- xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending);
- xhci->s3.irq_control = readl(&xhci->ir_set->irq_control);
+
+ if (!ir)
+ return;
+
+ ir->s3_erst_size = readl(&ir->ir_set->erst_size);
+ ir->s3_erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
+ ir->s3_erst_dequeue = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
+ ir->s3_irq_pending = readl(&ir->ir_set->irq_pending);
+ ir->s3_irq_control = readl(&ir->ir_set->irq_control);
}
static void xhci_restore_registers(struct xhci_hcd *xhci)
{
+ struct xhci_interrupter *ir = xhci->interrupter;
+
writel(xhci->s3.command, &xhci->op_regs->command);
writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
- writel(xhci->s3.erst_size, &xhci->ir_set->erst_size);
- xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
- xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
- writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
- writel(xhci->s3.irq_control, &xhci->ir_set->irq_control);
+ writel(ir->s3_erst_size, &ir->ir_set->erst_size);
+ xhci_write_64(xhci, ir->s3_erst_base, &ir->ir_set->erst_base);
+ xhci_write_64(xhci, ir->s3_erst_dequeue, &ir->ir_set->erst_dequeue);
+ writel(ir->s3_irq_pending, &ir->ir_set->irq_pending);
+ writel(ir->s3_irq_control, &ir->ir_set->irq_control);
}
static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
@@ -1218,8 +1251,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
xhci_dbg(xhci, "// Disabling event ring interrupts\n");
temp = readl(&xhci->op_regs->status);
writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
- temp = readl(&xhci->ir_set->irq_pending);
- writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
+ xhci_disable_interrupter(xhci->interrupter);
xhci_dbg(xhci, "cleaning up memory\n");
xhci_mem_cleanup(xhci);
@@ -5334,6 +5366,11 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
if (xhci->hci_version > 0x100)
xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
+ /* xhci-plat or xhci-pci might have set max_interrupters already */
+ if ((!xhci->max_interrupters) ||
+ xhci->max_interrupters > HCS_MAX_INTRS(xhci->hcs_params1))
+ xhci->max_interrupters = HCS_MAX_INTRS(xhci->hcs_params1);
+
xhci->quirks |= quirks;
get_quirks(dev, xhci);
@@ -5518,6 +5555,8 @@ void xhci_init_driver(struct hc_driver *drv,
drv->reset_bandwidth = over->reset_bandwidth;
if (over->update_hub_device)
drv->update_hub_device = over->update_hub_device;
+ if (over->hub_control)
+ drv->hub_control = over->hub_control;
}
}
EXPORT_SYMBOL_GPL(xhci_init_driver);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index dcee7f3207ad..786002bb35db 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -513,6 +513,9 @@ struct xhci_intr_reg {
/* Preserve bits 16:31 of erst_size */
#define ERST_SIZE_MASK (0xffff << 16)
+/* erst_base bitmasks */
+#define ERST_BASE_RSVDP (0x3f)
+
/* erst_dequeue bitmasks */
/* Dequeue ERST Segment Index (DESI) - Segment number (or alias)
* where the current dequeue pointer lies. This is an optional HW hint.
@@ -1684,11 +1687,6 @@ struct s3_save {
u32 dev_nt;
u64 dcbaa_ptr;
u32 config_reg;
- u32 irq_pending;
- u32 irq_control;
- u32 erst_size;
- u64 erst_base;
- u64 erst_dequeue;
};
/* Use for lpm */
@@ -1706,16 +1704,22 @@ struct xhci_bus_state {
u32 port_c_suspend;
u32 suspended_ports;
u32 port_remote_wakeup;
- unsigned long resume_done[USB_MAXCHILDREN];
/* which ports have started to resume */
unsigned long resuming_ports;
- /* Which ports are waiting on RExit to U0 transition. */
- unsigned long rexit_ports;
- struct completion rexit_done[USB_MAXCHILDREN];
- struct completion u3exit_done[USB_MAXCHILDREN];
};
-
+struct xhci_interrupter {
+ struct xhci_ring *event_ring;
+ struct xhci_erst erst;
+ struct xhci_intr_reg __iomem *ir_set;
+ unsigned int intr_num;
+ /* For interrupter registers save and restore over suspend/resume */
+ u32 s3_irq_pending;
+ u32 s3_irq_control;
+ u32 s3_erst_size;
+ u64 s3_erst_base;
+ u64 s3_erst_dequeue;
+};
/*
* It can take up to 20 ms to transition from RExit to U0 on the
* Intel Lynx Point LP xHCI host.
@@ -1736,6 +1740,10 @@ struct xhci_port {
struct xhci_hub *rhub;
struct xhci_port_cap *port_cap;
unsigned int lpm_incapable:1;
+ unsigned long resume_timestamp;
+ bool rexit_active;
+ struct completion rexit_done;
+ struct completion u3exit_done;
};
struct xhci_hub {
@@ -1758,8 +1766,6 @@ struct xhci_hcd {
struct xhci_op_regs __iomem *op_regs;
struct xhci_run_regs __iomem *run_regs;
struct xhci_doorbell_array __iomem *dba;
- /* Our HCD's current interrupter register set */
- struct xhci_intr_reg __iomem *ir_set;
/* Cached register copies of read-only HC data */
__u32 hcs_params1;
@@ -1774,7 +1780,7 @@ struct xhci_hcd {
u8 sbrn;
u16 hci_version;
u8 max_slots;
- u8 max_interrupters;
+ u16 max_interrupters;
u8 max_ports;
u8 isoc_threshold;
/* imod_interval in ns (I * 250ns) */
@@ -1794,6 +1800,7 @@ struct xhci_hcd {
struct reset_control *reset;
/* data structures */
struct xhci_device_context_array *dcbaa;
+ struct xhci_interrupter *interrupter;
struct xhci_ring *cmd_ring;
unsigned int cmd_ring_state;
#define CMD_RING_STATE_RUNNING (1 << 0)
@@ -1804,8 +1811,7 @@ struct xhci_hcd {
struct delayed_work cmd_timer;
struct completion cmd_ring_stop_completion;
struct xhci_command *current_cmd;
- struct xhci_ring *event_ring;
- struct xhci_erst erst;
+
/* Scratchpad */
struct xhci_scratchpad *scratchpad;
@@ -1946,6 +1952,8 @@ struct xhci_driver_overrides {
void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
int (*update_hub_device)(struct usb_hcd *hcd, struct usb_device *hdev,
struct usb_tt *tt, gfp_t mem_flags);
+ int (*hub_control)(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength);
};
#define XHCI_CFC_DELAY 10
diff --git a/drivers/usb/misc/onboard_usb_hub.c b/drivers/usb/misc/onboard_usb_hub.c
index 969c4c4f2ae9..5402e4b7267b 100644
--- a/drivers/usb/misc/onboard_usb_hub.c
+++ b/drivers/usb/misc/onboard_usb_hub.c
@@ -333,6 +333,7 @@ static struct platform_driver onboard_hub_driver = {
#define VENDOR_ID_MICROCHIP 0x0424
#define VENDOR_ID_REALTEK 0x0bda
#define VENDOR_ID_TI 0x0451
+#define VENDOR_ID_VIA 0x2109
/*
* Returns the onboard_hub platform device that is associated with the USB
@@ -407,6 +408,7 @@ static void onboard_hub_usbdev_disconnect(struct usb_device *udev)
static const struct usb_device_id onboard_hub_id_table[] = {
{ USB_DEVICE(VENDOR_ID_GENESYS, 0x0608) }, /* Genesys Logic GL850G USB 2.0 */
+ { USB_DEVICE(VENDOR_ID_GENESYS, 0x0610) }, /* Genesys Logic GL852G USB 2.0 */
{ USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2514) }, /* USB2514B USB 2.0 */
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x0411) }, /* RTS5411 USB 3.1 */
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x5411) }, /* RTS5411 USB 2.1 */
@@ -414,6 +416,8 @@ static const struct usb_device_id onboard_hub_id_table[] = {
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x5414) }, /* RTS5414 USB 2.1 */
{ USB_DEVICE(VENDOR_ID_TI, 0x8140) }, /* TI USB8041 3.0 */
{ USB_DEVICE(VENDOR_ID_TI, 0x8142) }, /* TI USB8041 2.0 */
+ { USB_DEVICE(VENDOR_ID_VIA, 0x0817) }, /* VIA VL817 3.1 */
+ { USB_DEVICE(VENDOR_ID_VIA, 0x2817) }, /* VIA VL817 2.0 */
{}
};
MODULE_DEVICE_TABLE(usb, onboard_hub_id_table);
diff --git a/drivers/usb/misc/onboard_usb_hub.h b/drivers/usb/misc/onboard_usb_hub.h
index 62129a6a1ba5..0a943a154649 100644
--- a/drivers/usb/misc/onboard_usb_hub.h
+++ b/drivers/usb/misc/onboard_usb_hub.h
@@ -26,15 +26,26 @@ static const struct onboard_hub_pdata genesys_gl850g_data = {
.reset_us = 3,
};
+static const struct onboard_hub_pdata genesys_gl852g_data = {
+ .reset_us = 50,
+};
+
+static const struct onboard_hub_pdata vialab_vl817_data = {
+ .reset_us = 10,
+};
+
static const struct of_device_id onboard_hub_match[] = {
{ .compatible = "usb424,2514", .data = &microchip_usb424_data, },
{ .compatible = "usb451,8140", .data = &ti_tusb8041_data, },
{ .compatible = "usb451,8142", .data = &ti_tusb8041_data, },
{ .compatible = "usb5e3,608", .data = &genesys_gl850g_data, },
+ { .compatible = "usb5e3,610", .data = &genesys_gl852g_data, },
{ .compatible = "usbbda,411", .data = &realtek_rts5411_data, },
{ .compatible = "usbbda,5411", .data = &realtek_rts5411_data, },
{ .compatible = "usbbda,414", .data = &realtek_rts5411_data, },
{ .compatible = "usbbda,5414", .data = &realtek_rts5411_data, },
+ { .compatible = "usb2109,817", .data = &vialab_vl817_data, },
+ { .compatible = "usb2109,2817", .data = &vialab_vl817_data, },
{}
};
diff --git a/drivers/usb/mtu3/mtu3_gadget.c b/drivers/usb/mtu3/mtu3_gadget.c
index 80236e7b0895..c0264d5426bf 100644
--- a/drivers/usb/mtu3/mtu3_gadget.c
+++ b/drivers/usb/mtu3/mtu3_gadget.c
@@ -133,10 +133,9 @@ static int mtu3_ep_disable(struct mtu3_ep *mep)
{
struct mtu3 *mtu = mep->mtu;
- mtu3_qmu_stop(mep);
-
/* abort all pending requests */
nuke(mep, -ESHUTDOWN);
+ mtu3_qmu_stop(mep);
mtu3_deconfig_ep(mtu, mep);
mtu3_gpd_ring_free(mep);
diff --git a/drivers/usb/mtu3/mtu3_hw_regs.h b/drivers/usb/mtu3/mtu3_hw_regs.h
index 519a58301f45..ee30ae0a4b54 100644
--- a/drivers/usb/mtu3/mtu3_hw_regs.h
+++ b/drivers/usb/mtu3/mtu3_hw_regs.h
@@ -128,6 +128,7 @@
#define TX_FIFOEMPTY BIT(24)
#define TX_SENTSTALL BIT(22)
#define TX_SENDSTALL BIT(21)
+#define TX_FLUSHFIFO BIT(20)
#define TX_TXPKTRDY BIT(16)
#define TX_TXMAXPKTSZ_MSK GENMASK(10, 0)
#define TX_TXMAXPKTSZ(x) ((x) & TX_TXMAXPKTSZ_MSK)
diff --git a/drivers/usb/mtu3/mtu3_qmu.c b/drivers/usb/mtu3/mtu3_qmu.c
index 2ea3157ddb6e..a2fdab8b63b2 100644
--- a/drivers/usb/mtu3/mtu3_qmu.c
+++ b/drivers/usb/mtu3/mtu3_qmu.c
@@ -388,6 +388,9 @@ void mtu3_qmu_stop(struct mtu3_ep *mep)
}
mtu3_writel(mbase, qcsr, QMU_Q_STOP);
+ if (mep->is_in)
+ mtu3_setbits(mbase, MU3D_EP_TXCR0(epnum), TX_FLUSHFIFO);
+
ret = readl_poll_timeout_atomic(mbase + qcsr, value,
!(value & QMU_Q_ACTIVE), 1, 1000);
if (ret) {
@@ -395,6 +398,10 @@ void mtu3_qmu_stop(struct mtu3_ep *mep)
return;
}
+ /* flush fifo again to make sure the fifo is empty */
+ if (mep->is_in)
+ mtu3_setbits(mbase, MU3D_EP_TXCR0(epnum), TX_FLUSHFIFO);
+
dev_dbg(mtu->dev, "%s's qmu stop now!\n", mep->name);
}
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index a4e55b0c52cf..d47e5c94587b 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -368,8 +368,10 @@ static int da8xx_musb_init(struct musb *musb)
/* Returns zero if e.g. not clocked */
rev = musb_readl(reg_base, DA8XX_USB_REVISION_REG);
- if (!rev)
+ if (!rev) {
+ ret = -ENODEV;
goto fail;
+ }
musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
if (IS_ERR_OR_NULL(musb->xceiv)) {
diff --git a/drivers/usb/musb/mediatek.c b/drivers/usb/musb/mediatek.c
index cad991380b0c..27b9bd258340 100644
--- a/drivers/usb/musb/mediatek.c
+++ b/drivers/usb/musb/mediatek.c
@@ -294,7 +294,8 @@ static int mtk_musb_init(struct musb *musb)
err_phy_power_on:
phy_exit(glue->phy);
err_phy_init:
- mtk_otg_switch_exit(glue);
+ if (musb->port_mode == MUSB_OTG)
+ mtk_otg_switch_exit(glue);
return ret;
}
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
index 7f9a999cd5ff..9b622cd9b2bd 100644
--- a/drivers/usb/musb/sunxi.c
+++ b/drivers/usb/musb/sunxi.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/phy/phy-sun4i-usb.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
@@ -67,6 +68,13 @@
#define SUNXI_MUSB_FL_NO_CONFIGDATA 7
#define SUNXI_MUSB_FL_PHY_MODE_PEND 8
+struct sunxi_musb_cfg {
+ const struct musb_hdrc_config *hdrc_config;
+ bool has_sram;
+ bool has_reset;
+ bool no_configdata;
+};
+
/* Our read/write methods need access and do not get passed in a musb ref :| */
static struct musb *sunxi_musb;
@@ -621,11 +629,10 @@ static const struct musb_platform_ops sunxi_musb_ops = {
.post_root_reset_end = sunxi_musb_post_root_reset_end,
};
-/* Allwinner OTG supports up to 5 endpoints */
-#define SUNXI_MUSB_MAX_EP_NUM 6
#define SUNXI_MUSB_RAM_BITS 11
-static struct musb_fifo_cfg sunxi_musb_mode_cfg[] = {
+/* Allwinner OTG supports up to 5 endpoints */
+static struct musb_fifo_cfg sunxi_musb_mode_cfg_5eps[] = {
MUSB_EP_FIFO_SINGLE(1, FIFO_TX, 512),
MUSB_EP_FIFO_SINGLE(1, FIFO_RX, 512),
MUSB_EP_FIFO_SINGLE(2, FIFO_TX, 512),
@@ -639,9 +646,7 @@ static struct musb_fifo_cfg sunxi_musb_mode_cfg[] = {
};
/* H3/V3s OTG supports only 4 endpoints */
-#define SUNXI_MUSB_MAX_EP_NUM_H3 5
-
-static struct musb_fifo_cfg sunxi_musb_mode_cfg_h3[] = {
+static struct musb_fifo_cfg sunxi_musb_mode_cfg_4eps[] = {
MUSB_EP_FIFO_SINGLE(1, FIFO_TX, 512),
MUSB_EP_FIFO_SINGLE(1, FIFO_RX, 512),
MUSB_EP_FIFO_SINGLE(2, FIFO_TX, 512),
@@ -652,31 +657,33 @@ static struct musb_fifo_cfg sunxi_musb_mode_cfg_h3[] = {
MUSB_EP_FIFO_SINGLE(4, FIFO_RX, 512),
};
-static const struct musb_hdrc_config sunxi_musb_hdrc_config = {
- .fifo_cfg = sunxi_musb_mode_cfg,
- .fifo_cfg_size = ARRAY_SIZE(sunxi_musb_mode_cfg),
+static const struct musb_hdrc_config sunxi_musb_hdrc_config_5eps = {
+ .fifo_cfg = sunxi_musb_mode_cfg_5eps,
+ .fifo_cfg_size = ARRAY_SIZE(sunxi_musb_mode_cfg_5eps),
.multipoint = true,
.dyn_fifo = true,
- .num_eps = SUNXI_MUSB_MAX_EP_NUM,
+ /* Two FIFOs per endpoint, plus ep_0. */
+ .num_eps = (ARRAY_SIZE(sunxi_musb_mode_cfg_5eps) / 2) + 1,
.ram_bits = SUNXI_MUSB_RAM_BITS,
};
-static struct musb_hdrc_config sunxi_musb_hdrc_config_h3 = {
- .fifo_cfg = sunxi_musb_mode_cfg_h3,
- .fifo_cfg_size = ARRAY_SIZE(sunxi_musb_mode_cfg_h3),
+static const struct musb_hdrc_config sunxi_musb_hdrc_config_4eps = {
+ .fifo_cfg = sunxi_musb_mode_cfg_4eps,
+ .fifo_cfg_size = ARRAY_SIZE(sunxi_musb_mode_cfg_4eps),
.multipoint = true,
.dyn_fifo = true,
- .num_eps = SUNXI_MUSB_MAX_EP_NUM_H3,
+ /* Two FIFOs per endpoint, plus ep_0. */
+ .num_eps = (ARRAY_SIZE(sunxi_musb_mode_cfg_4eps) / 2) + 1,
.ram_bits = SUNXI_MUSB_RAM_BITS,
};
-
static int sunxi_musb_probe(struct platform_device *pdev)
{
struct musb_hdrc_platform_data pdata;
struct platform_device_info pinfo;
struct sunxi_glue *glue;
struct device_node *np = pdev->dev.of_node;
+ const struct sunxi_musb_cfg *cfg;
int ret;
if (!np) {
@@ -713,26 +720,25 @@ static int sunxi_musb_probe(struct platform_device *pdev)
return -EINVAL;
}
pdata.platform_ops = &sunxi_musb_ops;
- if (!of_device_is_compatible(np, "allwinner,sun8i-h3-musb"))
- pdata.config = &sunxi_musb_hdrc_config;
- else
- pdata.config = &sunxi_musb_hdrc_config_h3;
+
+ cfg = of_device_get_match_data(&pdev->dev);
+ if (!cfg)
+ return -EINVAL;
+
+ pdata.config = cfg->hdrc_config;
glue->dev = &pdev->dev;
INIT_WORK(&glue->work, sunxi_musb_work);
glue->host_nb.notifier_call = sunxi_musb_host_notifier;
- if (of_device_is_compatible(np, "allwinner,sun4i-a10-musb"))
+ if (cfg->has_sram)
set_bit(SUNXI_MUSB_FL_HAS_SRAM, &glue->flags);
- if (of_device_is_compatible(np, "allwinner,sun6i-a31-musb"))
+ if (cfg->has_reset)
set_bit(SUNXI_MUSB_FL_HAS_RESET, &glue->flags);
- if (of_device_is_compatible(np, "allwinner,sun8i-a33-musb") ||
- of_device_is_compatible(np, "allwinner,sun8i-h3-musb")) {
- set_bit(SUNXI_MUSB_FL_HAS_RESET, &glue->flags);
+ if (cfg->no_configdata)
set_bit(SUNXI_MUSB_FL_NO_CONFIGDATA, &glue->flags);
- }
glue->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(glue->clk)) {
@@ -810,11 +816,46 @@ static int sunxi_musb_remove(struct platform_device *pdev)
return 0;
}
+static const struct sunxi_musb_cfg sun4i_a10_musb_cfg = {
+ .hdrc_config = &sunxi_musb_hdrc_config_5eps,
+ .has_sram = true,
+};
+
+static const struct sunxi_musb_cfg sun6i_a31_musb_cfg = {
+ .hdrc_config = &sunxi_musb_hdrc_config_5eps,
+ .has_reset = true,
+};
+
+static const struct sunxi_musb_cfg sun8i_a33_musb_cfg = {
+ .hdrc_config = &sunxi_musb_hdrc_config_5eps,
+ .has_reset = true,
+ .no_configdata = true,
+};
+
+static const struct sunxi_musb_cfg sun8i_h3_musb_cfg = {
+ .hdrc_config = &sunxi_musb_hdrc_config_4eps,
+ .has_reset = true,
+ .no_configdata = true,
+};
+
+static const struct sunxi_musb_cfg suniv_f1c100s_musb_cfg = {
+ .hdrc_config = &sunxi_musb_hdrc_config_5eps,
+ .has_sram = true,
+ .has_reset = true,
+ .no_configdata = true,
+};
+
static const struct of_device_id sunxi_musb_match[] = {
- { .compatible = "allwinner,sun4i-a10-musb", },
- { .compatible = "allwinner,sun6i-a31-musb", },
- { .compatible = "allwinner,sun8i-a33-musb", },
- { .compatible = "allwinner,sun8i-h3-musb", },
+ { .compatible = "allwinner,sun4i-a10-musb",
+ .data = &sun4i_a10_musb_cfg, },
+ { .compatible = "allwinner,sun6i-a31-musb",
+ .data = &sun6i_a31_musb_cfg, },
+ { .compatible = "allwinner,sun8i-a33-musb",
+ .data = &sun8i_a33_musb_cfg, },
+ { .compatible = "allwinner,sun8i-h3-musb",
+ .data = &sun8i_h3_musb_cfg, },
+ { .compatible = "allwinner,suniv-f1c100s-musb",
+ .data = &suniv_f1c100s_musb_cfg, },
{}
};
MODULE_DEVICE_TABLE(of, sunxi_musb_match);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index ee5ac4ef7e16..e6d8d9b35ad0 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -402,6 +402,8 @@ static void option_instat_callback(struct urb *urb);
#define LONGCHEER_VENDOR_ID 0x1c9e
/* 4G Systems products */
+/* This one was sold as the VW and Skoda "Carstick LTE" */
+#define FOUR_G_SYSTEMS_PRODUCT_CARSTICK_LTE 0x7605
/* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick *
* It seems to contain a Qualcomm QSC6240/6290 chipset */
#define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
@@ -1976,6 +1978,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(2) },
{ USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
{ USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_CARSTICK_LTE),
+ .driver_info = RSVD(0) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
.driver_info = NCTRL(0) | NCTRL(1) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100),
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index 6012603f3630..97c66c0d91f4 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -939,7 +939,7 @@ static int ms_lib_process_bootblock(struct us_data *us, u16 PhyBlock, u8 *PageDa
struct ms_lib_type_extdat ExtraData;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
- PageBuffer = kmalloc(MS_BYTES_PER_PAGE, GFP_KERNEL);
+ PageBuffer = kzalloc(MS_BYTES_PER_PAGE * 2, GFP_KERNEL);
if (PageBuffer == NULL)
return (u32)-1;
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index 50b24096eb7f..662cd043b50e 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -146,6 +146,7 @@ static int dp_altmode_status_update(struct dp_altmode *dp)
if (dp->hpd != hpd) {
drm_connector_oob_hotplug_event(dp->connector_fwnode);
dp->hpd = hpd;
+ sysfs_notify(&dp->alt->dev.kobj, "displayport", "hpd");
}
}
@@ -276,9 +277,11 @@ static int dp_altmode_vdm(struct typec_altmode *alt,
case CMDT_RSP_ACK:
switch (cmd) {
case CMD_ENTER_MODE:
+ typec_altmode_update_active(alt, true);
dp->state = DP_STATE_UPDATE;
break;
case CMD_EXIT_MODE:
+ typec_altmode_update_active(alt, false);
dp->data.status = 0;
dp->data.conf = 0;
break;
@@ -514,9 +517,18 @@ static ssize_t pin_assignment_show(struct device *dev,
}
static DEVICE_ATTR_RW(pin_assignment);
+static ssize_t hpd_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dp_altmode *dp = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", dp->hpd);
+}
+static DEVICE_ATTR_RO(hpd);
+
static struct attribute *dp_altmode_attrs[] = {
&dev_attr_configuration.attr,
&dev_attr_pin_assignment.attr,
+ &dev_attr_hpd.attr,
NULL
};
diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c
index 31c2a3130cad..0c8d554240be 100644
--- a/drivers/usb/typec/bus.c
+++ b/drivers/usb/typec/bus.c
@@ -11,6 +11,22 @@
#include "bus.h"
#include "class.h"
#include "mux.h"
+#include "retimer.h"
+
+static inline int
+typec_altmode_set_retimer(struct altmode *alt, unsigned long conf, void *data)
+{
+ struct typec_retimer_state state;
+
+ if (!alt->retimer)
+ return 0;
+
+ state.alt = &alt->adev;
+ state.mode = conf;
+ state.data = data;
+
+ return typec_retimer_set(alt->retimer, &state);
+}
static inline int
typec_altmode_set_mux(struct altmode *alt, unsigned long conf, void *data)
@@ -27,6 +43,19 @@ typec_altmode_set_mux(struct altmode *alt, unsigned long conf, void *data)
return typec_mux_set(alt->mux, &state);
}
+/* Wrapper to set various Type-C port switches together. */
+static inline int
+typec_altmode_set_switches(struct altmode *alt, unsigned long conf, void *data)
+{
+ int ret;
+
+ ret = typec_altmode_set_retimer(alt, conf, data);
+ if (ret)
+ return ret;
+
+ return typec_altmode_set_mux(alt, conf, data);
+}
+
static int typec_altmode_set_state(struct typec_altmode *adev,
unsigned long conf, void *data)
{
@@ -35,7 +64,7 @@ static int typec_altmode_set_state(struct typec_altmode *adev,
port_altmode = is_port ? to_altmode(adev) : to_altmode(adev)->partner;
- return typec_altmode_set_mux(port_altmode, conf, data);
+ return typec_altmode_set_switches(port_altmode, conf, data);
}
/* -------------------------------------------------------------------------- */
@@ -73,7 +102,7 @@ int typec_altmode_notify(struct typec_altmode *adev,
is_port = is_typec_port(adev->dev.parent);
partner = altmode->partner;
- ret = typec_altmode_set_mux(is_port ? altmode : partner, conf, data);
+ ret = typec_altmode_set_switches(is_port ? altmode : partner, conf, data);
if (ret)
return ret;
diff --git a/drivers/usb/typec/bus.h b/drivers/usb/typec/bus.h
index 56dec268d4dd..c89168857417 100644
--- a/drivers/usb/typec/bus.h
+++ b/drivers/usb/typec/bus.h
@@ -7,11 +7,13 @@
struct bus_type;
struct typec_mux;
+struct typec_retimer;
struct altmode {
unsigned int id;
struct typec_altmode adev;
struct typec_mux *mux;
+ struct typec_retimer *retimer;
enum typec_port_data roles;
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index 5897905cb4f0..ed3d070b1ca4 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -583,6 +583,7 @@ void typec_unregister_altmode(struct typec_altmode *adev)
{
if (IS_ERR_OR_NULL(adev))
return;
+ typec_retimer_put(to_altmode(adev)->retimer);
typec_mux_put(to_altmode(adev)->mux);
device_unregister(&adev->dev);
}
@@ -2108,16 +2109,26 @@ typec_port_register_altmode(struct typec_port *port,
{
struct typec_altmode *adev;
struct typec_mux *mux;
+ struct typec_retimer *retimer;
mux = typec_mux_get(&port->dev, desc);
if (IS_ERR(mux))
return ERR_CAST(mux);
+ retimer = typec_retimer_get(&port->dev);
+ if (IS_ERR(retimer)) {
+ typec_mux_put(mux);
+ return ERR_CAST(retimer);
+ }
+
adev = typec_register_altmode(&port->dev, desc);
- if (IS_ERR(adev))
+ if (IS_ERR(adev)) {
+ typec_retimer_put(retimer);
typec_mux_put(mux);
- else
+ } else {
to_altmode(adev)->mux = mux;
+ to_altmode(adev)->retimer = retimer;
+ }
return adev;
}
diff --git a/drivers/usb/typec/hd3ss3220.c b/drivers/usb/typec/hd3ss3220.c
index f128664cb130..746ef3a75b76 100644
--- a/drivers/usb/typec/hd3ss3220.c
+++ b/drivers/usb/typec/hd3ss3220.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/usb/typec.h>
#include <linux/delay.h>
+#include <linux/workqueue.h>
#define HD3SS3220_REG_CN_STAT_CTRL 0x09
#define HD3SS3220_REG_GEN_CTRL 0x0A
@@ -37,6 +38,9 @@ struct hd3ss3220 {
struct regmap *regmap;
struct usb_role_switch *role_sw;
struct typec_port *port;
+ struct delayed_work output_poll_work;
+ enum usb_role role_state;
+ bool poll;
};
static int hd3ss3220_set_source_pref(struct hd3ss3220 *hd3ss3220, int src_pref)
@@ -118,6 +122,22 @@ static void hd3ss3220_set_role(struct hd3ss3220 *hd3ss3220)
default:
break;
}
+
+ hd3ss3220->role_state = role_state;
+}
+
+static void output_poll_execute(struct work_struct *work)
+{
+ struct delayed_work *delayed_work = to_delayed_work(work);
+ struct hd3ss3220 *hd3ss3220 = container_of(delayed_work,
+ struct hd3ss3220,
+ output_poll_work);
+ enum usb_role role_state = hd3ss3220_get_attached_state(hd3ss3220);
+
+ if (hd3ss3220->role_state != role_state)
+ hd3ss3220_set_role(hd3ss3220);
+
+ schedule_delayed_work(&hd3ss3220->output_poll_work, HZ);
}
static irqreturn_t hd3ss3220_irq(struct hd3ss3220 *hd3ss3220)
@@ -223,6 +243,9 @@ static int hd3ss3220_probe(struct i2c_client *client)
"hd3ss3220", &client->dev);
if (ret)
goto err_unreg_port;
+ } else {
+ INIT_DELAYED_WORK(&hd3ss3220->output_poll_work, output_poll_execute);
+ hd3ss3220->poll = true;
}
ret = i2c_smbus_read_byte_data(client, HD3SS3220_REG_DEV_REV);
@@ -231,6 +254,9 @@ static int hd3ss3220_probe(struct i2c_client *client)
fwnode_handle_put(connector);
+ if (hd3ss3220->poll)
+ schedule_delayed_work(&hd3ss3220->output_poll_work, HZ);
+
dev_info(&client->dev, "probed revision=0x%x\n", ret);
return 0;
@@ -248,6 +274,9 @@ static void hd3ss3220_remove(struct i2c_client *client)
{
struct hd3ss3220 *hd3ss3220 = i2c_get_clientdata(client);
+ if (hd3ss3220->poll)
+ cancel_delayed_work_sync(&hd3ss3220->output_poll_work);
+
typec_unregister_port(hd3ss3220->port);
usb_role_switch_put(hd3ss3220->role_sw);
}
diff --git a/drivers/usb/typec/mux/Kconfig b/drivers/usb/typec/mux/Kconfig
index 5eb2c17d72c1..c46fa4f9d3df 100644
--- a/drivers/usb/typec/mux/Kconfig
+++ b/drivers/usb/typec/mux/Kconfig
@@ -12,6 +12,12 @@ config TYPEC_MUX_FSA4480
common USB Type-C connector.
If compiled as a module, the module will be named fsa4480.
+config TYPEC_MUX_GPIO_SBU
+ tristate "Generic GPIO based SBU mux for USB Type-C applications"
+ help
+ Say Y or M if your system uses a GPIO based mux for managing the
+ connected state and the swapping of the SBU lines in a Type-C port.
+
config TYPEC_MUX_PI3USB30532
tristate "Pericom PI3USB30532 Type-C cross switch driver"
depends on I2C
diff --git a/drivers/usb/typec/mux/Makefile b/drivers/usb/typec/mux/Makefile
index e52a56c16bfb..dda67e19b58b 100644
--- a/drivers/usb/typec/mux/Makefile
+++ b/drivers/usb/typec/mux/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_TYPEC_MUX_FSA4480) += fsa4480.o
+obj-$(CONFIG_TYPEC_MUX_GPIO_SBU) += gpio-sbu-mux.o
obj-$(CONFIG_TYPEC_MUX_PI3USB30532) += pi3usb30532.o
obj-$(CONFIG_TYPEC_MUX_INTEL_PMC) += intel_pmc_mux.o
diff --git a/drivers/usb/typec/mux/gpio-sbu-mux.c b/drivers/usb/typec/mux/gpio-sbu-mux.c
new file mode 100644
index 000000000000..f62516dafe8f
--- /dev/null
+++ b/drivers/usb/typec/mux/gpio-sbu-mux.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Linaro Ltd.
+ */
+
+#include <linux/bits.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/gpio/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/usb/typec_dp.h>
+#include <linux/usb/typec_mux.h>
+
+struct gpio_sbu_mux {
+ struct gpio_desc *enable_gpio;
+ struct gpio_desc *select_gpio;
+
+ struct typec_switch_dev *sw;
+ struct typec_mux_dev *mux;
+
+ struct mutex lock; /* protect enabled and swapped */
+ bool enabled;
+ bool swapped;
+};
+
+static int gpio_sbu_switch_set(struct typec_switch_dev *sw,
+ enum typec_orientation orientation)
+{
+ struct gpio_sbu_mux *sbu_mux = typec_switch_get_drvdata(sw);
+ bool enabled;
+ bool swapped;
+
+ mutex_lock(&sbu_mux->lock);
+
+ enabled = sbu_mux->enabled;
+ swapped = sbu_mux->swapped;
+
+ switch (orientation) {
+ case TYPEC_ORIENTATION_NONE:
+ enabled = false;
+ break;
+ case TYPEC_ORIENTATION_NORMAL:
+ swapped = false;
+ break;
+ case TYPEC_ORIENTATION_REVERSE:
+ swapped = true;
+ break;
+ }
+
+ if (enabled != sbu_mux->enabled)
+ gpiod_set_value(sbu_mux->enable_gpio, enabled);
+
+ if (swapped != sbu_mux->swapped)
+ gpiod_set_value(sbu_mux->select_gpio, swapped);
+
+ sbu_mux->enabled = enabled;
+ sbu_mux->swapped = swapped;
+
+ mutex_unlock(&sbu_mux->lock);
+
+ return 0;
+}
+
+static int gpio_sbu_mux_set(struct typec_mux_dev *mux,
+ struct typec_mux_state *state)
+{
+ struct gpio_sbu_mux *sbu_mux = typec_mux_get_drvdata(mux);
+
+ mutex_lock(&sbu_mux->lock);
+
+ switch (state->mode) {
+ case TYPEC_STATE_SAFE:
+ case TYPEC_STATE_USB:
+ sbu_mux->enabled = false;
+ break;
+ case TYPEC_DP_STATE_C:
+ case TYPEC_DP_STATE_D:
+ case TYPEC_DP_STATE_E:
+ sbu_mux->enabled = true;
+ break;
+ default:
+ break;
+ }
+
+ gpiod_set_value(sbu_mux->enable_gpio, sbu_mux->enabled);
+
+ mutex_unlock(&sbu_mux->lock);
+
+ return 0;
+}
+
+static int gpio_sbu_mux_probe(struct platform_device *pdev)
+{
+ struct typec_switch_desc sw_desc = { };
+ struct typec_mux_desc mux_desc = { };
+ struct device *dev = &pdev->dev;
+ struct gpio_sbu_mux *sbu_mux;
+
+ sbu_mux = devm_kzalloc(dev, sizeof(*sbu_mux), GFP_KERNEL);
+ if (!sbu_mux)
+ return -ENOMEM;
+
+ mutex_init(&sbu_mux->lock);
+
+ sbu_mux->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(sbu_mux->enable_gpio))
+ return dev_err_probe(dev, PTR_ERR(sbu_mux->enable_gpio),
+ "unable to acquire enable gpio\n");
+
+ sbu_mux->select_gpio = devm_gpiod_get(dev, "select", GPIOD_OUT_LOW);
+ if (IS_ERR(sbu_mux->select_gpio))
+ return dev_err_probe(dev, PTR_ERR(sbu_mux->select_gpio),
+ "unable to acquire select gpio\n");
+
+ sw_desc.drvdata = sbu_mux;
+ sw_desc.fwnode = dev_fwnode(dev);
+ sw_desc.set = gpio_sbu_switch_set;
+
+ sbu_mux->sw = typec_switch_register(dev, &sw_desc);
+ if (IS_ERR(sbu_mux->sw))
+ return dev_err_probe(dev, PTR_ERR(sbu_mux->sw),
+ "failed to register typec switch\n");
+
+ mux_desc.drvdata = sbu_mux;
+ mux_desc.fwnode = dev_fwnode(dev);
+ mux_desc.set = gpio_sbu_mux_set;
+
+ sbu_mux->mux = typec_mux_register(dev, &mux_desc);
+ if (IS_ERR(sbu_mux->mux)) {
+ typec_switch_unregister(sbu_mux->sw);
+ return dev_err_probe(dev, PTR_ERR(sbu_mux->mux),
+ "failed to register typec mux\n");
+ }
+
+ platform_set_drvdata(pdev, sbu_mux);
+
+ return 0;
+}
+
+static int gpio_sbu_mux_remove(struct platform_device *pdev)
+{
+ struct gpio_sbu_mux *sbu_mux = platform_get_drvdata(pdev);
+
+ gpiod_set_value(sbu_mux->enable_gpio, 0);
+
+ typec_mux_unregister(sbu_mux->mux);
+ typec_switch_unregister(sbu_mux->sw);
+
+ return 0;
+}
+
+static const struct of_device_id gpio_sbu_mux_match[] = {
+ { .compatible = "gpio-sbu-mux", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, gpio_sbu_mux_match);
+
+static struct platform_driver gpio_sbu_mux_driver = {
+ .probe = gpio_sbu_mux_probe,
+ .remove = gpio_sbu_mux_remove,
+ .driver = {
+ .name = "gpio_sbu_mux",
+ .of_match_table = gpio_sbu_mux_match,
+ },
+};
+module_platform_driver(gpio_sbu_mux_driver);
+
+MODULE_DESCRIPTION("GPIO based SBU mux driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
index fdbf3694e21f..34e4188a40ff 100644
--- a/drivers/usb/typec/mux/intel_pmc_mux.c
+++ b/drivers/usb/typec/mux/intel_pmc_mux.c
@@ -602,20 +602,21 @@ static int pmc_usb_probe_iom(struct pmc_usb *pmc)
int ret;
for (dev_id = &iom_acpi_ids[0]; dev_id->id[0]; dev_id++) {
- if (acpi_dev_present(dev_id->id, NULL, -1)) {
- pmc->iom_port_status_offset = (u32)dev_id->driver_data;
- adev = acpi_dev_get_first_match_dev(dev_id->id, NULL, -1);
+ adev = acpi_dev_get_first_match_dev(dev_id->id, NULL, -1);
+ if (adev)
break;
- }
}
-
if (!adev)
return -ENODEV;
+ pmc->iom_port_status_offset = (u32)dev_id->driver_data;
+
INIT_LIST_HEAD(&resource_list);
ret = acpi_dev_get_memory_resources(adev, &resource_list);
- if (ret < 0)
+ if (ret < 0) {
+ acpi_dev_put(adev);
return ret;
+ }
rentry = list_first_entry_or_null(&resource_list, struct resource_entry, node);
if (rentry)
diff --git a/drivers/usb/typec/pd.c b/drivers/usb/typec/pd.c
index dc72005d68db..59c537a5e600 100644
--- a/drivers/usb/typec/pd.c
+++ b/drivers/usb/typec/pd.c
@@ -49,6 +49,13 @@ usb_suspend_supported_show(struct device *dev, struct device_attribute *attr, ch
static DEVICE_ATTR_RO(usb_suspend_supported);
static ssize_t
+higher_capability_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%u\n", !!(to_pdo(dev)->pdo & PDO_FIXED_HIGHER_CAP));
+}
+static DEVICE_ATTR_RO(higher_capability);
+
+static ssize_t
unconstrained_power_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%u\n", !!(to_pdo(dev)->pdo & PDO_FIXED_EXTPOWER));
@@ -161,7 +168,7 @@ static struct device_type source_fixed_supply_type = {
static struct attribute *sink_fixed_supply_attrs[] = {
&dev_attr_dual_role_power.attr,
- &dev_attr_usb_suspend_supported.attr,
+ &dev_attr_higher_capability.attr,
&dev_attr_unconstrained_power.attr,
&dev_attr_usb_communication_capable.attr,
&dev_attr_dual_role_data.attr,
diff --git a/drivers/usb/typec/retimer.h b/drivers/usb/typec/retimer.h
index e34bd23323be..d6a5ef9881e1 100644
--- a/drivers/usb/typec/retimer.h
+++ b/drivers/usb/typec/retimer.h
@@ -12,7 +12,7 @@ struct typec_retimer {
#define to_typec_retimer(_dev_) container_of(_dev_, struct typec_retimer, dev)
-const struct device_type typec_retimer_dev_type;
+extern const struct device_type typec_retimer_dev_type;
#define is_typec_retimer(dev) ((dev)->type == &typec_retimer_dev_type)
diff --git a/drivers/usb/typec/tcpm/Makefile b/drivers/usb/typec/tcpm/Makefile
index 906d9dced8e7..08e57bb499cb 100644
--- a/drivers/usb/typec/tcpm/Makefile
+++ b/drivers/usb/typec/tcpm/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_TYPEC_RT1711H) += tcpci_rt1711h.o
obj-$(CONFIG_TYPEC_MT6360) += tcpci_mt6360.o
obj-$(CONFIG_TYPEC_TCPCI_MT6370) += tcpci_mt6370.o
obj-$(CONFIG_TYPEC_TCPCI_MAXIM) += tcpci_maxim.o
+tcpci_maxim-y += tcpci_maxim_core.o maxim_contaminant.o
diff --git a/drivers/usb/typec/tcpm/maxim_contaminant.c b/drivers/usb/typec/tcpm/maxim_contaminant.c
new file mode 100644
index 000000000000..f8504a90da26
--- /dev/null
+++ b/drivers/usb/typec/tcpm/maxim_contaminant.c
@@ -0,0 +1,387 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2022 Google, Inc
+ *
+ * USB-C module to reduce wakeups due to contaminants.
+ */
+
+#include <linux/device.h>
+#include <linux/irqreturn.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/usb/tcpci.h>
+#include <linux/usb/tcpm.h>
+#include <linux/usb/typec.h>
+
+#include "tcpci_maxim.h"
+
+enum fladc_select {
+ CC1_SCALE1 = 1,
+ CC1_SCALE2,
+ CC2_SCALE1,
+ CC2_SCALE2,
+ SBU1,
+ SBU2,
+};
+
+#define FLADC_1uA_LSB_MV 25
+/* High range CC */
+#define FLADC_CC_HIGH_RANGE_LSB_MV 208
+/* Low range CC */
+#define FLADC_CC_LOW_RANGE_LSB_MV 126
+
+/* 1uA current source */
+#define FLADC_CC_SCALE1 1
+/* 5 uA current source */
+#define FLADC_CC_SCALE2 5
+
+#define FLADC_1uA_CC_OFFSET_MV 300
+#define FLADC_CC_HIGH_RANGE_OFFSET_MV 624
+#define FLADC_CC_LOW_RANGE_OFFSET_MV 378
+
+#define CONTAMINANT_THRESHOLD_SBU_K 1000
+#define CONTAMINANT_THRESHOLD_CC_K 1000
+
+#define READ1_SLEEP_MS 10
+#define READ2_SLEEP_MS 5
+
+#define STATUS_CHECK(reg, mask, val) (((reg) & (mask)) == (val))
+
+#define IS_CC_OPEN(cc_status) \
+ (STATUS_CHECK((cc_status), TCPC_CC_STATUS_CC1_MASK << TCPC_CC_STATUS_CC1_SHIFT, \
+ TCPC_CC_STATE_SRC_OPEN) && STATUS_CHECK((cc_status), \
+ TCPC_CC_STATUS_CC2_MASK << \
+ TCPC_CC_STATUS_CC2_SHIFT, \
+ TCPC_CC_STATE_SRC_OPEN))
+
+static int max_contaminant_adc_to_mv(struct max_tcpci_chip *chip, enum fladc_select channel,
+ bool ua_src, u8 fladc)
+{
+ /* SBU channels only have 1 scale with 1uA. */
+ if ((ua_src && (channel == CC1_SCALE2 || channel == CC2_SCALE2 || channel == SBU1 ||
+ channel == SBU2)))
+ /* Mean of range */
+ return FLADC_1uA_CC_OFFSET_MV + (fladc * FLADC_1uA_LSB_MV);
+ else if (!ua_src && (channel == CC1_SCALE1 || channel == CC2_SCALE1))
+ return FLADC_CC_HIGH_RANGE_OFFSET_MV + (fladc * FLADC_CC_HIGH_RANGE_LSB_MV);
+ else if (!ua_src && (channel == CC1_SCALE2 || channel == CC2_SCALE2))
+ return FLADC_CC_LOW_RANGE_OFFSET_MV + (fladc * FLADC_CC_LOW_RANGE_LSB_MV);
+
+ dev_err_once(chip->dev, "ADC ERROR: SCALE UNKNOWN");
+
+ return -EINVAL;
+}
+
+static int max_contaminant_read_adc_mv(struct max_tcpci_chip *chip, enum fladc_select channel,
+ int sleep_msec, bool raw, bool ua_src)
+{
+ struct regmap *regmap = chip->data.regmap;
+ u8 fladc;
+ int ret;
+
+ /* Channel & scale select */
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_ADC_CTRL1, ADCINSEL_MASK,
+ channel << ADC_CHANNEL_OFFSET);
+ if (ret < 0)
+ return ret;
+
+ /* Enable ADC */
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_ADC_CTRL1, ADCEN, ADCEN);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(sleep_msec * 1000, (sleep_msec + 1) * 1000);
+ ret = max_tcpci_read8(chip, TCPC_VENDOR_FLADC_STATUS, &fladc);
+ if (ret < 0)
+ return ret;
+
+ /* Disable ADC */
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_ADC_CTRL1, ADCEN, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_ADC_CTRL1, ADCINSEL_MASK, 0);
+ if (ret < 0)
+ return ret;
+
+ if (!raw)
+ return max_contaminant_adc_to_mv(chip, channel, ua_src, fladc);
+ else
+ return fladc;
+}
+
+static int max_contaminant_read_resistance_kohm(struct max_tcpci_chip *chip,
+ enum fladc_select channel, int sleep_msec, bool raw)
+{
+ struct regmap *regmap = chip->data.regmap;
+ int mv;
+ int ret;
+
+ if (channel == CC1_SCALE1 || channel == CC2_SCALE1 || channel == CC1_SCALE2 ||
+ channel == CC2_SCALE2) {
+ /* Enable 1uA current source */
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCLPMODESEL_MASK,
+ ULTRA_LOW_POWER_MODE);
+ if (ret < 0)
+ return ret;
+
+ /* Enable 1uA current source */
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCRPCTRL_MASK, UA_1_SRC);
+ if (ret < 0)
+ return ret;
+
+ /* OVP disable */
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCOVPDIS, CCOVPDIS);
+ if (ret < 0)
+ return ret;
+
+ mv = max_contaminant_read_adc_mv(chip, channel, sleep_msec, raw, true);
+ if (mv < 0)
+ return ret;
+
+ /* OVP enable */
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCOVPDIS, 0);
+ if (ret < 0)
+ return ret;
+ /* returns KOhm as 1uA source is used. */
+ return mv;
+ }
+
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, SBUOVPDIS, SBUOVPDIS);
+ if (ret < 0)
+ return ret;
+
+ /* SBU switches auto configure when channel is selected. */
+ /* Enable 1ua current source */
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, SBURPCTRL, SBURPCTRL);
+ if (ret < 0)
+ return ret;
+
+ mv = max_contaminant_read_adc_mv(chip, channel, sleep_msec, raw, true);
+ if (mv < 0)
+ return ret;
+ /* Disable current source */
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, SBURPCTRL, 0);
+ if (ret < 0)
+ return ret;
+
+ /* OVP disable */
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, SBUOVPDIS, 0);
+ if (ret < 0)
+ return ret;
+
+ return mv;
+}
+
+static int max_contaminant_read_comparators(struct max_tcpci_chip *chip, u8 *vendor_cc_status2_cc1,
+ u8 *vendor_cc_status2_cc2)
+{
+ struct regmap *regmap = chip->data.regmap;
+ int ret;
+
+ /* Enable 80uA source */
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCRPCTRL_MASK, UA_80_SRC);
+ if (ret < 0)
+ return ret;
+
+ /* Enable comparators */
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL1, CCCOMPEN, CCCOMPEN);
+ if (ret < 0)
+ return ret;
+
+ /* Sleep to allow comparators settle */
+ usleep_range(5000, 6000);
+ ret = regmap_update_bits(regmap, TCPC_TCPC_CTRL, TCPC_TCPC_CTRL_ORIENTATION, PLUG_ORNT_CC1);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(5000, 6000);
+ ret = max_tcpci_read8(chip, VENDOR_CC_STATUS2, vendor_cc_status2_cc1);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(regmap, TCPC_TCPC_CTRL, TCPC_TCPC_CTRL_ORIENTATION, PLUG_ORNT_CC2);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(5000, 6000);
+ ret = max_tcpci_read8(chip, VENDOR_CC_STATUS2, vendor_cc_status2_cc2);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL1, CCCOMPEN, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCRPCTRL_MASK, 0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int max_contaminant_detect_contaminant(struct max_tcpci_chip *chip)
+{
+ int cc1_k, cc2_k, sbu1_k, sbu2_k, ret;
+ u8 vendor_cc_status2_cc1 = 0xff, vendor_cc_status2_cc2 = 0xff;
+ u8 role_ctrl = 0, role_ctrl_backup = 0;
+ int inferred_state = NOT_DETECTED;
+
+ ret = max_tcpci_read8(chip, TCPC_ROLE_CTRL, &role_ctrl);
+ if (ret < 0)
+ return NOT_DETECTED;
+
+ role_ctrl_backup = role_ctrl;
+ role_ctrl = 0x0F;
+ ret = max_tcpci_write8(chip, TCPC_ROLE_CTRL, role_ctrl);
+ if (ret < 0)
+ return NOT_DETECTED;
+
+ cc1_k = max_contaminant_read_resistance_kohm(chip, CC1_SCALE2, READ1_SLEEP_MS, false);
+ if (cc1_k < 0)
+ goto exit;
+
+ cc2_k = max_contaminant_read_resistance_kohm(chip, CC2_SCALE2, READ2_SLEEP_MS, false);
+ if (cc2_k < 0)
+ goto exit;
+
+ sbu1_k = max_contaminant_read_resistance_kohm(chip, SBU1, READ1_SLEEP_MS, false);
+ if (sbu1_k < 0)
+ goto exit;
+
+ sbu2_k = max_contaminant_read_resistance_kohm(chip, SBU2, READ2_SLEEP_MS, false);
+ if (sbu2_k < 0)
+ goto exit;
+
+ ret = max_contaminant_read_comparators(chip, &vendor_cc_status2_cc1,
+ &vendor_cc_status2_cc2);
+
+ if (ret < 0)
+ goto exit;
+
+ if ((!(CC1_VUFP_RD0P5 & vendor_cc_status2_cc1) ||
+ !(CC2_VUFP_RD0P5 & vendor_cc_status2_cc2)) &&
+ !(CC1_VUFP_RD0P5 & vendor_cc_status2_cc1 && CC2_VUFP_RD0P5 & vendor_cc_status2_cc2))
+ inferred_state = SINK;
+ else if ((cc1_k < CONTAMINANT_THRESHOLD_CC_K || cc2_k < CONTAMINANT_THRESHOLD_CC_K) &&
+ (sbu1_k < CONTAMINANT_THRESHOLD_SBU_K || sbu2_k < CONTAMINANT_THRESHOLD_SBU_K))
+ inferred_state = DETECTED;
+
+ if (inferred_state == NOT_DETECTED)
+ max_tcpci_write8(chip, TCPC_ROLE_CTRL, role_ctrl_backup);
+ else
+ max_tcpci_write8(chip, TCPC_ROLE_CTRL, (TCPC_ROLE_CTRL_DRP | 0xA));
+
+ return inferred_state;
+exit:
+ max_tcpci_write8(chip, TCPC_ROLE_CTRL, role_ctrl_backup);
+ return NOT_DETECTED;
+}
+
+static int max_contaminant_enable_dry_detection(struct max_tcpci_chip *chip)
+{
+ struct regmap *regmap = chip->data.regmap;
+ u8 temp;
+ int ret;
+
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL3, CCWTRDEB_MASK | CCWTRSEL_MASK
+ | WTRCYCLE_MASK, CCWTRDEB_1MS << CCWTRDEB_SHIFT |
+ CCWTRSEL_1V << CCWTRSEL_SHIFT | WTRCYCLE_4_8_S <<
+ WTRCYCLE_SHIFT);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(regmap, TCPC_ROLE_CTRL, TCPC_ROLE_CTRL_DRP, TCPC_ROLE_CTRL_DRP);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL1, CCCONNDRY, CCCONNDRY);
+ if (ret < 0)
+ return ret;
+ ret = max_tcpci_read8(chip, TCPC_VENDOR_CC_CTRL1, &temp);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCLPMODESEL_MASK,
+ ULTRA_LOW_POWER_MODE);
+ if (ret < 0)
+ return ret;
+ ret = max_tcpci_read8(chip, TCPC_VENDOR_CC_CTRL2, &temp);
+ if (ret < 0)
+ return ret;
+
+ /* Enable Look4Connection before sending the command */
+ ret = regmap_update_bits(regmap, TCPC_TCPC_CTRL, TCPC_TCPC_CTRL_EN_LK4CONN_ALRT,
+ TCPC_TCPC_CTRL_EN_LK4CONN_ALRT);
+ if (ret < 0)
+ return ret;
+
+ ret = max_tcpci_write8(chip, TCPC_COMMAND, TCPC_CMD_LOOK4CONNECTION);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect_while_debounce)
+{
+ u8 cc_status, pwr_cntl;
+ int ret;
+
+ ret = max_tcpci_read8(chip, TCPC_CC_STATUS, &cc_status);
+ if (ret < 0)
+ return false;
+
+ ret = max_tcpci_read8(chip, TCPC_POWER_CTRL, &pwr_cntl);
+ if (ret < 0)
+ return false;
+
+ if (chip->contaminant_state == NOT_DETECTED || chip->contaminant_state == SINK) {
+ if (!disconnect_while_debounce)
+ msleep(100);
+
+ ret = max_tcpci_read8(chip, TCPC_CC_STATUS, &cc_status);
+ if (ret < 0)
+ return false;
+
+ if (IS_CC_OPEN(cc_status)) {
+ u8 role_ctrl, role_ctrl_backup;
+
+ ret = max_tcpci_read8(chip, TCPC_ROLE_CTRL, &role_ctrl);
+ if (ret < 0)
+ return false;
+
+ role_ctrl_backup = role_ctrl;
+ role_ctrl |= 0x0F;
+ role_ctrl &= ~(TCPC_ROLE_CTRL_DRP);
+ ret = max_tcpci_write8(chip, TCPC_ROLE_CTRL, role_ctrl);
+ if (ret < 0)
+ return false;
+
+ chip->contaminant_state = max_contaminant_detect_contaminant(chip);
+
+ ret = max_tcpci_write8(chip, TCPC_ROLE_CTRL, role_ctrl_backup);
+ if (ret < 0)
+ return false;
+
+ if (chip->contaminant_state == DETECTED) {
+ max_contaminant_enable_dry_detection(chip);
+ return true;
+ }
+ }
+ return false;
+ } else if (chip->contaminant_state == DETECTED) {
+ if (STATUS_CHECK(cc_status, TCPC_CC_STATUS_TOGGLING, 0)) {
+ chip->contaminant_state = max_contaminant_detect_contaminant(chip);
+ if (chip->contaminant_state == DETECTED) {
+ max_contaminant_enable_dry_detection(chip);
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+MODULE_DESCRIPTION("MAXIM TCPC CONTAMINANT Module");
+MODULE_AUTHOR("Badhri Jagan Sridharan <badhri@google.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index fe781a38dc82..8da23240afbe 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -33,6 +33,7 @@ struct tcpci {
struct tcpm_port *port;
struct regmap *regmap;
+ unsigned int alert_mask;
bool controls_vbus;
@@ -403,6 +404,14 @@ static void tcpci_frs_sourcing_vbus(struct tcpc_dev *dev)
tcpci->data->frs_sourcing_vbus(tcpci, tcpci->data);
}
+static void tcpci_check_contaminant(struct tcpc_dev *dev)
+{
+ struct tcpci *tcpci = tcpc_to_tcpci(dev);
+
+ if (tcpci->data->check_contaminant)
+ tcpci->data->check_contaminant(tcpci, tcpci->data);
+}
+
static int tcpci_set_bist_data(struct tcpc_dev *tcpc, bool enable)
{
struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
@@ -632,6 +641,9 @@ static int tcpci_init(struct tcpc_dev *tcpc)
if (ret < 0)
return ret;
}
+
+ tcpci->alert_mask = reg;
+
return tcpci_write16(tcpci, TCPC_ALERT_MASK, reg);
}
@@ -715,7 +727,7 @@ irqreturn_t tcpci_irq(struct tcpci *tcpci)
else if (status & TCPC_ALERT_TX_FAILED)
tcpm_pd_transmit_complete(tcpci->port, TCPC_TX_FAILED);
- return IRQ_HANDLED;
+ return IRQ_RETVAL(status & tcpci->alert_mask);
}
EXPORT_SYMBOL_GPL(tcpci_irq);
@@ -778,6 +790,9 @@ struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data)
tcpci->tcpc.frs_sourcing_vbus = tcpci_frs_sourcing_vbus;
tcpci->tcpc.set_partner_usb_comm_capable = tcpci_set_partner_usb_comm_capable;
+ if (tcpci->data->check_contaminant)
+ tcpci->tcpc.check_contaminant = tcpci_check_contaminant;
+
if (tcpci->data->auto_discharge_disconnect) {
tcpci->tcpc.enable_auto_vbus_discharge = tcpci_enable_auto_vbus_discharge;
tcpci->tcpc.set_auto_vbus_discharge_threshold =
@@ -838,7 +853,7 @@ static int tcpci_probe(struct i2c_client *client)
err = devm_request_threaded_irq(&client->dev, client->irq, NULL,
_tcpci_irq,
- IRQF_ONESHOT | IRQF_TRIGGER_LOW,
+ IRQF_SHARED | IRQF_ONESHOT | IRQF_TRIGGER_LOW,
dev_name(&client->dev), chip);
if (err < 0) {
tcpci_unregister_port(chip->tcpci);
diff --git a/drivers/usb/typec/tcpm/tcpci_maxim.h b/drivers/usb/typec/tcpm/tcpci_maxim.h
new file mode 100644
index 000000000000..2c1c4d161b0d
--- /dev/null
+++ b/drivers/usb/typec/tcpm/tcpci_maxim.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2022 Google, Inc
+ *
+ * MAXIM TCPC header file.
+ */
+#ifndef TCPCI_MAXIM_H_
+#define TCPCI_MAXIM_H_
+
+#define VENDOR_CC_STATUS2 0x85
+#define CC1_VUFP_RD0P5 BIT(1)
+#define CC2_VUFP_RD0P5 BIT(5)
+#define TCPC_VENDOR_FLADC_STATUS 0x89
+
+#define TCPC_VENDOR_CC_CTRL1 0x8c
+#define CCCONNDRY BIT(7)
+#define CCCOMPEN BIT(5)
+
+#define TCPC_VENDOR_CC_CTRL2 0x8d
+#define SBUOVPDIS BIT(7)
+#define CCOVPDIS BIT(6)
+#define SBURPCTRL BIT(5)
+#define CCLPMODESEL_MASK GENMASK(4, 3)
+#define ULTRA_LOW_POWER_MODE BIT(3)
+#define CCRPCTRL_MASK GENMASK(2, 0)
+#define UA_1_SRC 1
+#define UA_80_SRC 3
+
+#define TCPC_VENDOR_CC_CTRL3 0x8e
+#define CCWTRDEB_MASK GENMASK(7, 6)
+#define CCWTRDEB_SHIFT 6
+#define CCWTRDEB_1MS 1
+#define CCWTRSEL_MASK GENMASK(5, 3)
+#define CCWTRSEL_SHIFT 3
+#define CCWTRSEL_1V 0x4
+#define CCLADDERDIS BIT(2)
+#define WTRCYCLE_MASK BIT(0)
+#define WTRCYCLE_SHIFT 0
+#define WTRCYCLE_2_4_S 0
+#define WTRCYCLE_4_8_S 1
+
+#define TCPC_VENDOR_ADC_CTRL1 0x91
+#define ADCINSEL_MASK GENMASK(7, 5)
+#define ADC_CHANNEL_OFFSET 5
+#define ADCEN BIT(0)
+
+enum contamiant_state {
+ NOT_DETECTED,
+ DETECTED,
+ SINK,
+};
+
+/*
+ * @potential_contaminant:
+ * Last returned result to tcpm indicating whether the TCPM port
+ * has potential contaminant.
+ */
+struct max_tcpci_chip {
+ struct tcpci_data data;
+ struct tcpci *tcpci;
+ struct device *dev;
+ struct i2c_client *client;
+ struct tcpm_port *port;
+ enum contamiant_state contaminant_state;
+};
+
+static inline int max_tcpci_read16(struct max_tcpci_chip *chip, unsigned int reg, u16 *val)
+{
+ return regmap_raw_read(chip->data.regmap, reg, val, sizeof(u16));
+}
+
+static inline int max_tcpci_write16(struct max_tcpci_chip *chip, unsigned int reg, u16 val)
+{
+ return regmap_raw_write(chip->data.regmap, reg, &val, sizeof(u16));
+}
+
+static inline int max_tcpci_read8(struct max_tcpci_chip *chip, unsigned int reg, u8 *val)
+{
+ return regmap_raw_read(chip->data.regmap, reg, val, sizeof(u8));
+}
+
+static inline int max_tcpci_write8(struct max_tcpci_chip *chip, unsigned int reg, u8 val)
+{
+ return regmap_raw_write(chip->data.regmap, reg, &val, sizeof(u8));
+}
+
+bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect_while_debounce);
+
+#endif // TCPCI_MAXIM_H_
diff --git a/drivers/usb/typec/tcpm/tcpci_maxim.c b/drivers/usb/typec/tcpm/tcpci_maxim_core.c
index 83e140ffcc3e..f32cda2a5e3a 100644
--- a/drivers/usb/typec/tcpm/tcpci_maxim.c
+++ b/drivers/usb/typec/tcpm/tcpci_maxim_core.c
@@ -1,6 +1,6 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright (C) 2020, Google LLC
+ * Copyright (C) 2020 - 2022, Google LLC
*
* MAXIM TCPCI based TCPC driver
*/
@@ -15,6 +15,8 @@
#include <linux/usb/tcpm.h>
#include <linux/usb/typec.h>
+#include "tcpci_maxim.h"
+
#define PD_ACTIVITY_TIMEOUT_MS 10000
#define TCPC_VENDOR_ALERT 0x80
@@ -39,14 +41,6 @@
#define MAX_BUCK_BOOST_SOURCE 0xa
#define MAX_BUCK_BOOST_SINK 0x5
-struct max_tcpci_chip {
- struct tcpci_data data;
- struct tcpci *tcpci;
- struct device *dev;
- struct i2c_client *client;
- struct tcpm_port *port;
-};
-
static const struct regmap_range max_tcpci_tcpci_range[] = {
regmap_reg_range(0x00, 0x95)
};
@@ -68,26 +62,6 @@ static struct max_tcpci_chip *tdata_to_max_tcpci(struct tcpci_data *tdata)
return container_of(tdata, struct max_tcpci_chip, data);
}
-static int max_tcpci_read16(struct max_tcpci_chip *chip, unsigned int reg, u16 *val)
-{
- return regmap_raw_read(chip->data.regmap, reg, val, sizeof(u16));
-}
-
-static int max_tcpci_write16(struct max_tcpci_chip *chip, unsigned int reg, u16 val)
-{
- return regmap_raw_write(chip->data.regmap, reg, &val, sizeof(u16));
-}
-
-static int max_tcpci_read8(struct max_tcpci_chip *chip, unsigned int reg, u8 *val)
-{
- return regmap_raw_read(chip->data.regmap, reg, val, sizeof(u8));
-}
-
-static int max_tcpci_write8(struct max_tcpci_chip *chip, unsigned int reg, u8 val)
-{
- return regmap_raw_write(chip->data.regmap, reg, &val, sizeof(u8));
-}
-
static void max_tcpci_init_regs(struct max_tcpci_chip *chip)
{
u16 alert_mask = 0;
@@ -348,8 +322,14 @@ static irqreturn_t _max_tcpci_irq(struct max_tcpci_chip *chip, u16 status)
if (status & TCPC_ALERT_VBUS_DISCNCT)
tcpm_vbus_change(chip->port);
- if (status & TCPC_ALERT_CC_STATUS)
- tcpm_cc_change(chip->port);
+ if (status & TCPC_ALERT_CC_STATUS) {
+ if (chip->contaminant_state == DETECTED || tcpm_port_is_toggling(chip->port)) {
+ if (!max_contaminant_is_contaminant(chip, false))
+ tcpm_port_clean(chip->port);
+ } else {
+ tcpm_cc_change(chip->port);
+ }
+ }
if (status & TCPC_ALERT_POWER_STATUS)
process_power_status(chip);
@@ -438,6 +418,14 @@ static int tcpci_init(struct tcpci *tcpci, struct tcpci_data *data)
return -1;
}
+static void max_tcpci_check_contaminant(struct tcpci *tcpci, struct tcpci_data *tdata)
+{
+ struct max_tcpci_chip *chip = tdata_to_max_tcpci(tdata);
+
+ if (!max_contaminant_is_contaminant(chip, true))
+ tcpm_port_clean(chip->port);
+}
+
static int max_tcpci_probe(struct i2c_client *client)
{
int ret;
@@ -471,6 +459,7 @@ static int max_tcpci_probe(struct i2c_client *client)
chip->data.auto_discharge_disconnect = true;
chip->data.vbus_vsafe0v = true;
chip->data.set_partner_usb_comm_capable = max_tcpci_set_partner_usb_comm_capable;
+ chip->data.check_contaminant = max_tcpci_check_contaminant;
max_tcpci_init_regs(chip);
chip->tcpci = tcpci_register_port(chip->dev, &chip->data);
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 59b366b5c614..a0d943d78580 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -36,6 +36,7 @@
#define FOREACH_STATE(S) \
S(INVALID_STATE), \
S(TOGGLING), \
+ S(CHECK_CONTAMINANT), \
S(SRC_UNATTACHED), \
S(SRC_ATTACH_WAIT), \
S(SRC_ATTACHED), \
@@ -249,6 +250,7 @@ enum frs_typec_current {
#define TCPM_RESET_EVENT BIT(2)
#define TCPM_FRS_EVENT BIT(3)
#define TCPM_SOURCING_VBUS BIT(4)
+#define TCPM_PORT_CLEAN BIT(5)
#define LOG_BUFFER_ENTRIES 1024
#define LOG_BUFFER_ENTRY_SIZE 128
@@ -483,6 +485,13 @@ struct tcpm_port {
* SNK_READY for non-pd link.
*/
bool slow_charger_loop;
+
+ /*
+ * When true indicates that the lower level drivers indicate potential presence
+ * of contaminant in the connector pins based on the tcpm state machine
+ * transitions.
+ */
+ bool potential_contaminant;
#ifdef CONFIG_DEBUG_FS
struct dentry *dentry;
struct mutex logbuffer_lock; /* log buffer access lock */
@@ -647,7 +656,7 @@ static void tcpm_log(struct tcpm_port *port, const char *fmt, ...)
/* Do not log while disconnected and unattached */
if (tcpm_port_is_disconnected(port) &&
(port->state == SRC_UNATTACHED || port->state == SNK_UNATTACHED ||
- port->state == TOGGLING))
+ port->state == TOGGLING || port->state == CHECK_CONTAMINANT))
return;
va_start(args, fmt);
@@ -1693,14 +1702,11 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
}
break;
case CMD_ENTER_MODE:
- if (adev && pdev) {
- typec_altmode_update_active(pdev, true);
+ if (adev && pdev)
*adev_action = ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL;
- }
return 0;
case CMD_EXIT_MODE:
if (adev && pdev) {
- typec_altmode_update_active(pdev, false);
/* Back to USB Operation */
*adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM;
return 0;
@@ -3904,15 +3910,28 @@ static void run_state_machine(struct tcpm_port *port)
unsigned int msecs;
enum tcpm_state upcoming_state;
+ if (port->tcpc->check_contaminant && port->state != CHECK_CONTAMINANT)
+ port->potential_contaminant = ((port->enter_state == SRC_ATTACH_WAIT &&
+ port->state == SRC_UNATTACHED) ||
+ (port->enter_state == SNK_ATTACH_WAIT &&
+ port->state == SNK_UNATTACHED));
+
port->enter_state = port->state;
switch (port->state) {
case TOGGLING:
break;
+ case CHECK_CONTAMINANT:
+ port->tcpc->check_contaminant(port->tcpc);
+ break;
/* SRC states */
case SRC_UNATTACHED:
if (!port->non_pd_role_swap)
tcpm_swap_complete(port, -ENOTCONN);
tcpm_src_detach(port);
+ if (port->potential_contaminant) {
+ tcpm_set_state(port, CHECK_CONTAMINANT, 0);
+ break;
+ }
if (tcpm_start_toggling(port, tcpm_rp_cc(port))) {
tcpm_set_state(port, TOGGLING, 0);
break;
@@ -4150,6 +4169,10 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_swap_complete(port, -ENOTCONN);
tcpm_pps_complete(port, -ENOTCONN);
tcpm_snk_detach(port);
+ if (port->potential_contaminant) {
+ tcpm_set_state(port, CHECK_CONTAMINANT, 0);
+ break;
+ }
if (tcpm_start_toggling(port, TYPEC_CC_RD)) {
tcpm_set_state(port, TOGGLING, 0);
break;
@@ -4925,6 +4948,9 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
else if (tcpm_port_is_sink(port))
tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
break;
+ case CHECK_CONTAMINANT:
+ /* Wait for Toggling to be resumed */
+ break;
case SRC_UNATTACHED:
case ACC_UNATTACHED:
if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
@@ -5424,6 +5450,15 @@ static void tcpm_pd_event_handler(struct kthread_work *work)
port->vbus_source = true;
_tcpm_pd_vbus_on(port);
}
+ if (events & TCPM_PORT_CLEAN) {
+ tcpm_log(port, "port clean");
+ if (port->state == CHECK_CONTAMINANT) {
+ if (tcpm_start_toggling(port, tcpm_rp_cc(port)))
+ tcpm_set_state(port, TOGGLING, 0);
+ else
+ tcpm_set_state(port, tcpm_default_state(port), 0);
+ }
+ }
spin_lock(&port->pd_event_lock);
}
@@ -5476,6 +5511,21 @@ void tcpm_sourcing_vbus(struct tcpm_port *port)
}
EXPORT_SYMBOL_GPL(tcpm_sourcing_vbus);
+void tcpm_port_clean(struct tcpm_port *port)
+{
+ spin_lock(&port->pd_event_lock);
+ port->pd_events |= TCPM_PORT_CLEAN;
+ spin_unlock(&port->pd_event_lock);
+ kthread_queue_work(port->wq, &port->event_work);
+}
+EXPORT_SYMBOL_GPL(tcpm_port_clean);
+
+bool tcpm_port_is_toggling(struct tcpm_port *port)
+{
+ return port->port_type == TYPEC_PORT_DRP && port->state == TOGGLING;
+}
+EXPORT_SYMBOL_GPL(tcpm_port_is_toggling);
+
static void tcpm_enable_frs_work(struct kthread_work *work)
{
struct tcpm_port *port = container_of(work, struct tcpm_port, enable_frs);
diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
index 46a4d8b128f0..485b90c13078 100644
--- a/drivers/usb/typec/tipd/core.c
+++ b/drivers/usb/typec/tipd/core.c
@@ -95,6 +95,7 @@ struct tps6598x {
struct power_supply_desc psy_desc;
enum power_supply_usb_type usb_type;
+ int wakeup;
u16 pwr_status;
};
@@ -846,6 +847,12 @@ static int tps6598x_probe(struct i2c_client *client)
i2c_set_clientdata(client, tps);
fwnode_handle_put(fwnode);
+ tps->wakeup = device_property_read_bool(tps->dev, "wakeup-source");
+ if (tps->wakeup) {
+ device_init_wakeup(&client->dev, true);
+ enable_irq_wake(client->irq);
+ }
+
return 0;
err_disconnect:
@@ -870,6 +877,36 @@ static void tps6598x_remove(struct i2c_client *client)
usb_role_switch_put(tps->role_sw);
}
+static int __maybe_unused tps6598x_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct tps6598x *tps = i2c_get_clientdata(client);
+
+ if (tps->wakeup) {
+ disable_irq(client->irq);
+ enable_irq_wake(client->irq);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused tps6598x_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct tps6598x *tps = i2c_get_clientdata(client);
+
+ if (tps->wakeup) {
+ disable_irq_wake(client->irq);
+ enable_irq(client->irq);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops tps6598x_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tps6598x_suspend, tps6598x_resume)
+};
+
static const struct of_device_id tps6598x_of_match[] = {
{ .compatible = "ti,tps6598x", },
{ .compatible = "apple,cd321x", },
@@ -886,6 +923,7 @@ MODULE_DEVICE_TABLE(i2c, tps6598x_id);
static struct i2c_driver tps6598x_i2c_driver = {
.driver = {
.name = "tps6598x",
+ .pm = &tps6598x_pm_ops,
.of_match_table = tps6598x_of_match,
},
.probe_new = tps6598x_probe,
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 1cf8947c6d66..f632350f6dcb 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -567,8 +567,9 @@ static void ucsi_unregister_altmodes(struct ucsi_connector *con, u8 recipient)
}
}
-static int ucsi_get_pdos(struct ucsi_connector *con, int is_partner,
- u32 *pdos, int offset, int num_pdos)
+static int ucsi_read_pdos(struct ucsi_connector *con,
+ enum typec_role role, int is_partner,
+ u32 *pdos, int offset, int num_pdos)
{
struct ucsi *ucsi = con->ucsi;
u64 command;
@@ -578,7 +579,7 @@ static int ucsi_get_pdos(struct ucsi_connector *con, int is_partner,
command |= UCSI_GET_PDOS_PARTNER_PDO(is_partner);
command |= UCSI_GET_PDOS_PDO_OFFSET(offset);
command |= UCSI_GET_PDOS_NUM_PDOS(num_pdos - 1);
- command |= UCSI_GET_PDOS_SRC_PDOS;
+ command |= is_source(role) ? UCSI_GET_PDOS_SRC_PDOS : 0;
ret = ucsi_send_command(ucsi, command, pdos + offset,
num_pdos * sizeof(u32));
if (ret < 0 && ret != -ETIMEDOUT)
@@ -587,30 +588,43 @@ static int ucsi_get_pdos(struct ucsi_connector *con, int is_partner,
return ret;
}
-static int ucsi_get_src_pdos(struct ucsi_connector *con)
+static int ucsi_get_pdos(struct ucsi_connector *con, enum typec_role role,
+ int is_partner, u32 *pdos)
{
+ u8 num_pdos;
int ret;
/* UCSI max payload means only getting at most 4 PDOs at a time */
- ret = ucsi_get_pdos(con, 1, con->src_pdos, 0, UCSI_MAX_PDOS);
+ ret = ucsi_read_pdos(con, role, is_partner, pdos, 0, UCSI_MAX_PDOS);
if (ret < 0)
return ret;
- con->num_pdos = ret / sizeof(u32); /* number of bytes to 32-bit PDOs */
- if (con->num_pdos < UCSI_MAX_PDOS)
- return 0;
+ num_pdos = ret / sizeof(u32); /* number of bytes to 32-bit PDOs */
+ if (num_pdos < UCSI_MAX_PDOS)
+ return num_pdos;
/* get the remaining PDOs, if any */
- ret = ucsi_get_pdos(con, 1, con->src_pdos, UCSI_MAX_PDOS,
- PDO_MAX_OBJECTS - UCSI_MAX_PDOS);
+ ret = ucsi_read_pdos(con, role, is_partner, pdos, UCSI_MAX_PDOS,
+ PDO_MAX_OBJECTS - UCSI_MAX_PDOS);
if (ret < 0)
return ret;
- con->num_pdos += ret / sizeof(u32);
+ return ret / sizeof(u32) + num_pdos;
+}
+
+static int ucsi_get_src_pdos(struct ucsi_connector *con)
+{
+ int ret;
+
+ ret = ucsi_get_pdos(con, TYPEC_SOURCE, 1, con->src_pdos);
+ if (ret < 0)
+ return ret;
+
+ con->num_pdos = ret;
ucsi_port_psy_changed(con);
- return 0;
+ return ret;
}
static int ucsi_check_altmodes(struct ucsi_connector *con)
@@ -635,6 +649,72 @@ static int ucsi_check_altmodes(struct ucsi_connector *con)
return ret;
}
+static int ucsi_register_partner_pdos(struct ucsi_connector *con)
+{
+ struct usb_power_delivery_desc desc = { con->ucsi->cap.pd_version };
+ struct usb_power_delivery_capabilities_desc caps;
+ struct usb_power_delivery_capabilities *cap;
+ int ret;
+
+ if (con->partner_pd)
+ return 0;
+
+ con->partner_pd = usb_power_delivery_register(NULL, &desc);
+ if (IS_ERR(con->partner_pd))
+ return PTR_ERR(con->partner_pd);
+
+ ret = ucsi_get_pdos(con, TYPEC_SOURCE, 1, caps.pdo);
+ if (ret > 0) {
+ if (ret < PDO_MAX_OBJECTS)
+ caps.pdo[ret] = 0;
+
+ caps.role = TYPEC_SOURCE;
+ cap = usb_power_delivery_register_capabilities(con->partner_pd, &caps);
+ if (IS_ERR(cap))
+ return PTR_ERR(cap);
+
+ con->partner_source_caps = cap;
+
+ ret = typec_partner_set_usb_power_delivery(con->partner, con->partner_pd);
+ if (ret) {
+ usb_power_delivery_unregister_capabilities(con->partner_source_caps);
+ return ret;
+ }
+ }
+
+ ret = ucsi_get_pdos(con, TYPEC_SINK, 1, caps.pdo);
+ if (ret > 0) {
+ if (ret < PDO_MAX_OBJECTS)
+ caps.pdo[ret] = 0;
+
+ caps.role = TYPEC_SINK;
+
+ cap = usb_power_delivery_register_capabilities(con->partner_pd, &caps);
+ if (IS_ERR(cap))
+ return PTR_ERR(cap);
+
+ con->partner_sink_caps = cap;
+
+ ret = typec_partner_set_usb_power_delivery(con->partner, con->partner_pd);
+ if (ret) {
+ usb_power_delivery_unregister_capabilities(con->partner_sink_caps);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void ucsi_unregister_partner_pdos(struct ucsi_connector *con)
+{
+ usb_power_delivery_unregister_capabilities(con->partner_sink_caps);
+ con->partner_sink_caps = NULL;
+ usb_power_delivery_unregister_capabilities(con->partner_source_caps);
+ con->partner_source_caps = NULL;
+ usb_power_delivery_unregister(con->partner_pd);
+ con->partner_pd = NULL;
+}
+
static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
{
switch (UCSI_CONSTAT_PWR_OPMODE(con->status.flags)) {
@@ -643,6 +723,7 @@ static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_PD);
ucsi_partner_task(con, ucsi_get_src_pdos, 30, 0);
ucsi_partner_task(con, ucsi_check_altmodes, 30, 0);
+ ucsi_partner_task(con, ucsi_register_partner_pdos, 1, HZ);
break;
case UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5:
con->rdo = 0;
@@ -701,6 +782,7 @@ static void ucsi_unregister_partner(struct ucsi_connector *con)
if (!con->partner)
return;
+ ucsi_unregister_partner_pdos(con);
ucsi_unregister_altmodes(con, UCSI_RECIPIENT_SOP);
typec_unregister_partner(con->partner);
con->partner = NULL;
@@ -805,6 +887,10 @@ static void ucsi_handle_connector_change(struct work_struct *work)
if (con->status.flags & UCSI_CONSTAT_CONNECTED) {
ucsi_register_partner(con);
ucsi_partner_task(con, ucsi_check_connection, 1, HZ);
+
+ if (UCSI_CONSTAT_PWR_OPMODE(con->status.flags) ==
+ UCSI_CONSTAT_PWR_OPMODE_PD)
+ ucsi_partner_task(con, ucsi_register_partner_pdos, 1, HZ);
} else {
ucsi_unregister_partner(con);
}
@@ -1041,6 +1127,9 @@ static struct fwnode_handle *ucsi_find_fwnode(struct ucsi_connector *con)
static int ucsi_register_port(struct ucsi *ucsi, int index)
{
+ struct usb_power_delivery_desc desc = { ucsi->cap.pd_version};
+ struct usb_power_delivery_capabilities_desc pd_caps;
+ struct usb_power_delivery_capabilities *pd_cap;
struct ucsi_connector *con = &ucsi->connector[index];
struct typec_capability *cap = &con->typec_cap;
enum typec_accessory *accessory = cap->accessory;
@@ -1120,6 +1209,41 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
goto out;
}
+ con->pd = usb_power_delivery_register(ucsi->dev, &desc);
+
+ ret = ucsi_get_pdos(con, TYPEC_SOURCE, 0, pd_caps.pdo);
+ if (ret > 0) {
+ if (ret < PDO_MAX_OBJECTS)
+ pd_caps.pdo[ret] = 0;
+
+ pd_caps.role = TYPEC_SOURCE;
+ pd_cap = usb_power_delivery_register_capabilities(con->pd, &pd_caps);
+ if (IS_ERR(pd_cap)) {
+ ret = PTR_ERR(pd_cap);
+ goto out;
+ }
+
+ con->port_source_caps = pd_cap;
+ typec_port_set_usb_power_delivery(con->port, con->pd);
+ }
+
+ memset(&pd_caps, 0, sizeof(pd_caps));
+ ret = ucsi_get_pdos(con, TYPEC_SINK, 0, pd_caps.pdo);
+ if (ret > 0) {
+ if (ret < PDO_MAX_OBJECTS)
+ pd_caps.pdo[ret] = 0;
+
+ pd_caps.role = TYPEC_SINK;
+ pd_cap = usb_power_delivery_register_capabilities(con->pd, &pd_caps);
+ if (IS_ERR(pd_cap)) {
+ ret = PTR_ERR(pd_cap);
+ goto out;
+ }
+
+ con->port_sink_caps = pd_cap;
+ typec_port_set_usb_power_delivery(con->port, con->pd);
+ }
+
/* Alternate modes */
ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_CON);
if (ret) {
@@ -1158,8 +1282,8 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
if (con->status.flags & UCSI_CONSTAT_CONNECTED) {
typec_set_pwr_role(con->port,
!!(con->status.flags & UCSI_CONSTAT_PWR_DIR));
- ucsi_pwr_opmode_change(con);
ucsi_register_partner(con);
+ ucsi_pwr_opmode_change(con);
ucsi_port_psy_changed(con);
}
@@ -1265,6 +1389,13 @@ err_unregister:
ucsi_unregister_port_psy(con);
if (con->wq)
destroy_workqueue(con->wq);
+
+ usb_power_delivery_unregister_capabilities(con->port_sink_caps);
+ con->port_sink_caps = NULL;
+ usb_power_delivery_unregister_capabilities(con->port_source_caps);
+ con->port_source_caps = NULL;
+ usb_power_delivery_unregister(con->pd);
+ con->pd = NULL;
typec_unregister_port(con->port);
con->port = NULL;
}
@@ -1447,6 +1578,13 @@ void ucsi_unregister(struct ucsi *ucsi)
mutex_unlock(&ucsi->connector[i].lock);
destroy_workqueue(ucsi->connector[i].wq);
}
+
+ usb_power_delivery_unregister_capabilities(ucsi->connector[i].port_sink_caps);
+ ucsi->connector[i].port_sink_caps = NULL;
+ usb_power_delivery_unregister_capabilities(ucsi->connector[i].port_source_caps);
+ ucsi->connector[i].port_source_caps = NULL;
+ usb_power_delivery_unregister(ucsi->connector[i].pd);
+ ucsi->connector[i].pd = NULL;
typec_unregister_port(ucsi->connector[i].port);
}
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 60ce9fb6e745..c09af859f573 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -340,6 +340,14 @@ struct ucsi_connector {
u32 src_pdos[PDO_MAX_OBJECTS];
int num_pdos;
+ /* USB PD objects */
+ struct usb_power_delivery *pd;
+ struct usb_power_delivery_capabilities *port_source_caps;
+ struct usb_power_delivery_capabilities *port_sink_caps;
+ struct usb_power_delivery *partner_pd;
+ struct usb_power_delivery_capabilities *partner_source_caps;
+ struct usb_power_delivery_capabilities *partner_sink_caps;
+
struct usb_role_switch *usb_role_sw;
};
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index 46441f1477f2..e0ed465bd518 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -643,7 +643,7 @@ static int ccg_request_irq(struct ucsi_ccg *uc)
{
unsigned long flags = IRQF_ONESHOT;
- if (!has_acpi_companion(uc->dev))
+ if (!dev_fwnode(uc->dev))
flags |= IRQF_TRIGGER_HIGH;
return request_threaded_irq(uc->irq, NULL, ccg_irq_handler, flags, dev_name(uc->dev), uc);
@@ -1342,6 +1342,7 @@ static int ucsi_ccg_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ucsi_ccg *uc;
+ const char *fw_name;
int status;
uc = devm_kzalloc(dev, sizeof(*uc), GFP_KERNEL);
@@ -1357,9 +1358,15 @@ static int ucsi_ccg_probe(struct i2c_client *client)
INIT_WORK(&uc->pm_work, ccg_pm_workaround_work);
/* Only fail FW flashing when FW build information is not provided */
- status = device_property_read_u16(dev, "ccgx,firmware-build",
- &uc->fw_build);
- if (status)
+ status = device_property_read_string(dev, "firmware-name", &fw_name);
+ if (!status) {
+ if (!strcmp(fw_name, "nvidia,jetson-agx-xavier"))
+ uc->fw_build = CCG_FW_BUILD_NVIDIA_TEGRA;
+ else if (!strcmp(fw_name, "nvidia,gpu"))
+ uc->fw_build = CCG_FW_BUILD_NVIDIA;
+ }
+
+ if (!uc->fw_build)
dev_err(uc->dev, "failed to get FW build information\n");
/* reset ccg device and initialize ucsi */
@@ -1426,6 +1433,12 @@ static void ucsi_ccg_remove(struct i2c_client *client)
free_irq(uc->irq, uc);
}
+static const struct of_device_id ucsi_ccg_of_match_table[] = {
+ { .compatible = "cypress,cypd4226", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ucsi_ccg_of_match_table);
+
static const struct i2c_device_id ucsi_ccg_device_id[] = {
{"ccgx-ucsi", 0},
{}
@@ -1480,6 +1493,7 @@ static struct i2c_driver ucsi_ccg_driver = {
.pm = &ucsi_ccg_pm,
.dev_groups = ucsi_ccg_groups,
.acpi_match_table = amd_i2c_ucsi_match,
+ .of_match_table = ucsi_ccg_of_match_table,
},
.probe_new = ucsi_ccg_probe,
.remove = ucsi_ccg_remove,