summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/power/clock_ops.c4
-rw-r--r--drivers/base/power/main.c28
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c46
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c25
-rw-r--r--drivers/infiniband/hw/qib/qib_intr.c6
-rw-r--r--drivers/pci/pci-driver.c4
-rw-r--r--drivers/pci/pci.c2
-rw-r--r--drivers/pci/probe.c2
-rw-r--r--drivers/tty/serial/mrst_max3110.c5
-rw-r--r--drivers/usb/core/driver.c6
13 files changed, 100 insertions, 39 deletions
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index eaa8a854af03..ad367c4139b1 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -387,7 +387,7 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
clknb = container_of(nb, struct pm_clk_notifier_block, nb);
switch (action) {
- case BUS_NOTIFY_ADD_DEVICE:
+ case BUS_NOTIFY_BIND_DRIVER:
if (clknb->con_ids[0]) {
for (con_id = clknb->con_ids; *con_id; con_id++)
enable_clock(dev, *con_id);
@@ -395,7 +395,7 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
enable_clock(dev, NULL);
}
break;
- case BUS_NOTIFY_DEL_DEVICE:
+ case BUS_NOTIFY_UNBOUND_DRIVER:
if (clknb->con_ids[0]) {
for (con_id = clknb->con_ids; *con_id; con_id++)
disable_clock(dev, *con_id);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index aa6320207745..06f09bf89cb2 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -57,7 +57,8 @@ static int async_error;
*/
void device_pm_init(struct device *dev)
{
- dev->power.in_suspend = false;
+ dev->power.is_prepared = false;
+ dev->power.is_suspended = false;
init_completion(&dev->power.completion);
complete_all(&dev->power.completion);
dev->power.wakeup = NULL;
@@ -91,7 +92,7 @@ void device_pm_add(struct device *dev)
pr_debug("PM: Adding info for %s:%s\n",
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
mutex_lock(&dpm_list_mtx);
- if (dev->parent && dev->parent->power.in_suspend)
+ if (dev->parent && dev->parent->power.is_prepared)
dev_warn(dev, "parent %s should not be sleeping\n",
dev_name(dev->parent));
list_add_tail(&dev->power.entry, &dpm_list);
@@ -511,7 +512,14 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
dpm_wait(dev->parent, async);
device_lock(dev);
- dev->power.in_suspend = false;
+ /*
+ * This is a fib. But we'll allow new children to be added below
+ * a resumed device, even if the device hasn't been completed yet.
+ */
+ dev->power.is_prepared = false;
+
+ if (!dev->power.is_suspended)
+ goto Unlock;
if (dev->pwr_domain) {
pm_dev_dbg(dev, state, "power domain ");
@@ -548,6 +556,9 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
}
End:
+ dev->power.is_suspended = false;
+
+ Unlock:
device_unlock(dev);
complete_all(&dev->power.completion);
@@ -670,7 +681,7 @@ void dpm_complete(pm_message_t state)
struct device *dev = to_device(dpm_prepared_list.prev);
get_device(dev);
- dev->power.in_suspend = false;
+ dev->power.is_prepared = false;
list_move(&dev->power.entry, &list);
mutex_unlock(&dpm_list_mtx);
@@ -835,11 +846,11 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
device_lock(dev);
if (async_error)
- goto End;
+ goto Unlock;
if (pm_wakeup_pending()) {
async_error = -EBUSY;
- goto End;
+ goto Unlock;
}
if (dev->pwr_domain) {
@@ -877,6 +888,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
}
End:
+ dev->power.is_suspended = !error;
+
+ Unlock:
device_unlock(dev);
complete_all(&dev->power.completion);
@@ -1042,7 +1056,7 @@ int dpm_prepare(pm_message_t state)
put_device(dev);
break;
}
- dev->power.in_suspend = true;
+ dev->power.is_prepared = true;
if (!list_empty(&dev->power.entry))
list_move_tail(&dev->power.entry, &dpm_prepared_list);
put_device(dev);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index f660cd04ec2f..31fb44085c9b 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1463,9 +1463,9 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_qp_attributes attrs;
int disconnect = 1;
int release = 0;
- int abort = 0;
struct tid_info *t = dev->rdev.lldi.tids;
unsigned int tid = GET_TID(hdr);
+ int ret;
ep = lookup_tid(t, tid);
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
@@ -1501,10 +1501,12 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
start_ep_timer(ep);
__state_set(&ep->com, CLOSING);
attrs.next_state = C4IW_QP_STATE_CLOSING;
- abort = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
+ ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
- peer_close_upcall(ep);
- disconnect = 1;
+ if (ret != -ECONNRESET) {
+ peer_close_upcall(ep);
+ disconnect = 1;
+ }
break;
case ABORTING:
disconnect = 0;
@@ -2109,15 +2111,16 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
break;
}
- mutex_unlock(&ep->com.mutex);
if (close) {
- if (abrupt)
- ret = abort_connection(ep, NULL, gfp);
- else
+ if (abrupt) {
+ close_complete_upcall(ep);
+ ret = send_abort(ep, NULL, gfp);
+ } else
ret = send_halfclose(ep, gfp);
if (ret)
fatal = 1;
}
+ mutex_unlock(&ep->com.mutex);
if (fatal)
release_ep_resources(ep);
return ret;
@@ -2301,6 +2304,31 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
}
+static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct cpl_abort_req_rss *req = cplhdr(skb);
+ struct c4iw_ep *ep;
+ struct tid_info *t = dev->rdev.lldi.tids;
+ unsigned int tid = GET_TID(req);
+
+ ep = lookup_tid(t, tid);
+ if (is_neg_adv_abort(req->status)) {
+ PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
+ ep->hwtid);
+ kfree_skb(skb);
+ return 0;
+ }
+ PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
+ ep->com.state);
+
+ /*
+ * Wake up any threads in rdma_init() or rdma_fini().
+ */
+ c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+ sched(dev, skb);
+ return 0;
+}
+
/*
* Most upcalls from the T4 Core go to sched() to
* schedule the processing on a work queue.
@@ -2317,7 +2345,7 @@ c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
[CPL_PASS_ESTABLISH] = sched,
[CPL_PEER_CLOSE] = sched,
[CPL_CLOSE_CON_RPL] = sched,
- [CPL_ABORT_REQ_RSS] = sched,
+ [CPL_ABORT_REQ_RSS] = peer_abort_intr,
[CPL_RDMA_TERMINATE] = sched,
[CPL_FW4_ACK] = sched,
[CPL_SET_TCB_RPL] = set_tcb_rpl,
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 8d8f8add6fcd..1720dc790d13 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -801,6 +801,10 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
if (ucontext) {
memsize = roundup(memsize, PAGE_SIZE);
hwentries = memsize / sizeof *chp->cq.queue;
+ while (hwentries > T4_MAX_IQ_SIZE) {
+ memsize -= PAGE_SIZE;
+ hwentries = memsize / sizeof *chp->cq.queue;
+ }
}
chp->cq.size = hwentries;
chp->cq.memsize = memsize;
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 273ffe49525a..0347eed4a167 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -625,7 +625,7 @@ pbl_done:
mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
mhp->attr.va_fbo = virt;
mhp->attr.page_size = shift - 12;
- mhp->attr.len = (u32) length;
+ mhp->attr.len = length;
err = register_mem(rhp, php, mhp, shift);
if (err)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 3b773b05a898..a41578e48c7b 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1207,11 +1207,8 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
c4iw_get_ep(&qhp->ep->com);
}
ret = rdma_fini(rhp, qhp, ep);
- if (ret) {
- if (internal)
- c4iw_get_ep(&qhp->ep->com);
+ if (ret)
goto err;
- }
break;
case C4IW_QP_STATE_TERMINATE:
set_state(qhp, C4IW_QP_STATE_TERMINATE);
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 9f53e68a096a..8ec5237031a0 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -469,6 +469,8 @@ static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
#define IB_7322_LT_STATE_RECOVERIDLE 0x0f
#define IB_7322_LT_STATE_CFGENH 0x10
#define IB_7322_LT_STATE_CFGTEST 0x11
+#define IB_7322_LT_STATE_CFGWAITRMTTEST 0x12
+#define IB_7322_LT_STATE_CFGWAITENH 0x13
/* link state machine states from IBC */
#define IB_7322_L_STATE_DOWN 0x0
@@ -498,8 +500,10 @@ static const u8 qib_7322_physportstate[0x20] = {
IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
[IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
[IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x13] = IB_PHYSPORTSTATE_CFG_WAIT_ENH,
+ [IB_7322_LT_STATE_CFGWAITRMTTEST] =
+ IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_7322_LT_STATE_CFGWAITENH] =
+ IB_PHYSPORTSTATE_CFG_WAIT_ENH,
[0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
[0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
[0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
@@ -1692,7 +1696,9 @@ static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
break;
}
- if (ibclt == IB_7322_LT_STATE_CFGTEST &&
+ if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
+ ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
+ ibclt == IB_7322_LT_STATE_LINKUP) &&
(ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
force_h1(ppd);
ppd->cpspec->qdr_reforce = 1;
@@ -7301,12 +7307,17 @@ static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
{
u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
- printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS %s\n",
- ppd->dd->unit, ppd->port, (enable ? "on" : "off"));
- if (enable)
+ u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
+
+ if (enable && !state) {
+ printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS on\n",
+ ppd->dd->unit, ppd->port);
data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
- else
+ } else if (!enable && state) {
+ printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS off\n",
+ ppd->dd->unit, ppd->port);
data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
+ }
qib_write_kreg_port(ppd, krp_serdesctrl, data);
}
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c
index a693c56ec8a6..6ae57d23004a 100644
--- a/drivers/infiniband/hw/qib/qib_intr.c
+++ b/drivers/infiniband/hw/qib/qib_intr.c
@@ -96,8 +96,12 @@ void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs)
* states, or if it transitions from any of the up (INIT or better)
* states into any of the down states (except link recovery), then
* call the chip-specific code to take appropriate actions.
+ *
+ * ppd->lflags could be 0 if this is the first time the interrupt
+ * handlers has been called but the link is already up.
*/
- if (lstate >= IB_PORT_INIT && (ppd->lflags & QIBL_LINKDOWN) &&
+ if (lstate >= IB_PORT_INIT &&
+ (!ppd->lflags || (ppd->lflags & QIBL_LINKDOWN)) &&
ltstate == IB_PHYSPORTSTATE_LINKUP) {
/* transitioned to UP */
if (dd->f_ib_updown(ppd, 1, ibcs))
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 135df164a4c1..46767c53917a 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -624,7 +624,7 @@ static int pci_pm_prepare(struct device *dev)
* system from the sleep state, we'll have to prevent it from signaling
* wake-up.
*/
- pm_runtime_resume(dev);
+ pm_runtime_get_sync(dev);
if (drv && drv->pm && drv->pm->prepare)
error = drv->pm->prepare(dev);
@@ -638,6 +638,8 @@ static void pci_pm_complete(struct device *dev)
if (drv && drv->pm && drv->pm->complete)
drv->pm->complete(dev);
+
+ pm_runtime_put_sync(dev);
}
#else /* !CONFIG_PM_SLEEP */
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 5f10c23dff94..2c5b9b991279 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3284,7 +3284,7 @@ static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
* @dev: the PCI device
* @decode: true = enable decoding, false = disable decoding
* @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
- * @change_bridge_flags: traverse ancestors and change bridges
+ * @flags: traverse ancestors and change bridges
* CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
*/
int pci_set_vga_state(struct pci_dev *dev, bool decode,
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 48849ffdd672..bafb3c3d4a89 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -168,7 +168,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN;
if (type == pci_bar_io) {
l &= PCI_BASE_ADDRESS_IO_MASK;
- mask = PCI_BASE_ADDRESS_IO_MASK & IO_SPACE_LIMIT;
+ mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT;
} else {
l &= PCI_BASE_ADDRESS_MEM_MASK;
mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c
index 1bd28450ca40..a764bf99743b 100644
--- a/drivers/tty/serial/mrst_max3110.c
+++ b/drivers/tty/serial/mrst_max3110.c
@@ -421,7 +421,6 @@ static int max3110_main_thread(void *_max)
int ret = 0;
struct circ_buf *xmit = &max->con_xmit;
- init_waitqueue_head(wq);
pr_info(PR_FMT "start main thread\n");
do {
@@ -823,7 +822,7 @@ static int __devinit serial_m3110_probe(struct spi_device *spi)
res = RC_TAG;
ret = max3110_write_then_read(max, (u8 *)&res, (u8 *)&res, 2, 0);
if (ret < 0 || res == 0 || res == 0xffff) {
- printk(KERN_ERR "MAX3111 deemed not present (conf reg %04x)",
+ dev_dbg(&spi->dev, "MAX3111 deemed not present (conf reg %04x)",
res);
ret = -ENODEV;
goto err_get_page;
@@ -838,6 +837,8 @@ static int __devinit serial_m3110_probe(struct spi_device *spi)
max->con_xmit.head = 0;
max->con_xmit.tail = 0;
+ init_waitqueue_head(&max->wq);
+
max->main_thread = kthread_run(max3110_main_thread,
max, "max3110_main");
if (IS_ERR(max->main_thread)) {
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index e35a17687c05..aa3cc465a601 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -375,7 +375,7 @@ static int usb_unbind_interface(struct device *dev)
* Just re-enable it without affecting the endpoint toggles.
*/
usb_enable_interface(udev, intf, false);
- } else if (!error && !intf->dev.power.in_suspend) {
+ } else if (!error && !intf->dev.power.is_prepared) {
r = usb_set_interface(udev, intf->altsetting[0].
desc.bInterfaceNumber, 0);
if (r < 0)
@@ -960,7 +960,7 @@ void usb_rebind_intf(struct usb_interface *intf)
}
/* Try to rebind the interface */
- if (!intf->dev.power.in_suspend) {
+ if (!intf->dev.power.is_prepared) {
intf->needs_binding = 0;
rc = device_attach(&intf->dev);
if (rc < 0)
@@ -1107,7 +1107,7 @@ static int usb_resume_interface(struct usb_device *udev,
if (intf->condition == USB_INTERFACE_UNBOUND) {
/* Carry out a deferred switch to altsetting 0 */
- if (intf->needs_altsetting0 && !intf->dev.power.in_suspend) {
+ if (intf->needs_altsetting0 && !intf->dev.power.is_prepared) {
usb_set_interface(udev, intf->altsetting[0].
desc.bInterfaceNumber, 0);
intf->needs_altsetting0 = 0;