summaryrefslogtreecommitdiff
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/arch_topology.c89
-rw-r--r--drivers/base/dd.c3
-rw-r--r--drivers/base/power/runtime.c111
-rw-r--r--drivers/base/power/wakeup_stats.c4
4 files changed, 157 insertions, 50 deletions
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index de8587cc119e..c1179edc0f3b 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -21,17 +21,94 @@
#include <linux/sched.h>
#include <linux/smp.h>
+static DEFINE_PER_CPU(struct scale_freq_data *, sft_data);
+static struct cpumask scale_freq_counters_mask;
+static bool scale_freq_invariant;
+
+static bool supports_scale_freq_counters(const struct cpumask *cpus)
+{
+ return cpumask_subset(cpus, &scale_freq_counters_mask);
+}
+
bool topology_scale_freq_invariant(void)
{
return cpufreq_supports_freq_invariance() ||
- arch_freq_counters_available(cpu_online_mask);
+ supports_scale_freq_counters(cpu_online_mask);
}
-__weak bool arch_freq_counters_available(const struct cpumask *cpus)
+static void update_scale_freq_invariant(bool status)
{
- return false;
+ if (scale_freq_invariant == status)
+ return;
+
+ /*
+ * Task scheduler behavior depends on frequency invariance support,
+ * either cpufreq or counter driven. If the support status changes as
+ * a result of counter initialisation and use, retrigger the build of
+ * scheduling domains to ensure the information is propagated properly.
+ */
+ if (topology_scale_freq_invariant() == status) {
+ scale_freq_invariant = status;
+ rebuild_sched_domains_energy();
+ }
}
-DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
+
+void topology_set_scale_freq_source(struct scale_freq_data *data,
+ const struct cpumask *cpus)
+{
+ struct scale_freq_data *sfd;
+ int cpu;
+
+ /*
+ * Avoid calling rebuild_sched_domains() unnecessarily if FIE is
+ * supported by cpufreq.
+ */
+ if (cpumask_empty(&scale_freq_counters_mask))
+ scale_freq_invariant = topology_scale_freq_invariant();
+
+ for_each_cpu(cpu, cpus) {
+ sfd = per_cpu(sft_data, cpu);
+
+ /* Use ARCH provided counters whenever possible */
+ if (!sfd || sfd->source != SCALE_FREQ_SOURCE_ARCH) {
+ per_cpu(sft_data, cpu) = data;
+ cpumask_set_cpu(cpu, &scale_freq_counters_mask);
+ }
+ }
+
+ update_scale_freq_invariant(true);
+}
+EXPORT_SYMBOL_GPL(topology_set_scale_freq_source);
+
+void topology_clear_scale_freq_source(enum scale_freq_source source,
+ const struct cpumask *cpus)
+{
+ struct scale_freq_data *sfd;
+ int cpu;
+
+ for_each_cpu(cpu, cpus) {
+ sfd = per_cpu(sft_data, cpu);
+
+ if (sfd && sfd->source == source) {
+ per_cpu(sft_data, cpu) = NULL;
+ cpumask_clear_cpu(cpu, &scale_freq_counters_mask);
+ }
+ }
+
+ update_scale_freq_invariant(false);
+}
+EXPORT_SYMBOL_GPL(topology_clear_scale_freq_source);
+
+void topology_scale_freq_tick(void)
+{
+ struct scale_freq_data *sfd = *this_cpu_ptr(&sft_data);
+
+ if (sfd)
+ sfd->set_freq_scale();
+}
+
+DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE;
+EXPORT_PER_CPU_SYMBOL_GPL(arch_freq_scale);
void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
unsigned long max_freq)
@@ -47,13 +124,13 @@ void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
* want to update the scale factor with information from CPUFREQ.
* Instead the scale factor will be updated from arch_scale_freq_tick.
*/
- if (arch_freq_counters_available(cpus))
+ if (supports_scale_freq_counters(cpus))
return;
scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;
for_each_cpu(i, cpus)
- per_cpu(freq_scale, i) = scale;
+ per_cpu(arch_freq_scale, i) = scale;
}
DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 9179825ff646..e2cf3b29123e 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -97,6 +97,9 @@ static void deferred_probe_work_func(struct work_struct *work)
get_device(dev);
+ kfree(dev->p->deferred_probe_reason);
+ dev->p->deferred_probe_reason = NULL;
+
/*
* Drop the mutex while probing each device; the probe path may
* manipulate the deferred list
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 18b82427d0cb..fe1dad68aee4 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -305,7 +305,7 @@ static int rpm_get_suppliers(struct device *dev)
return 0;
}
-static void rpm_put_suppliers(struct device *dev)
+static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
{
struct device_link *link;
@@ -313,10 +313,30 @@ static void rpm_put_suppliers(struct device *dev)
device_links_read_lock_held()) {
while (refcount_dec_not_one(&link->rpm_active))
- pm_runtime_put(link->supplier);
+ pm_runtime_put_noidle(link->supplier);
+
+ if (try_to_suspend)
+ pm_request_idle(link->supplier);
}
}
+static void rpm_put_suppliers(struct device *dev)
+{
+ __rpm_put_suppliers(dev, true);
+}
+
+static void rpm_suspend_suppliers(struct device *dev)
+{
+ struct device_link *link;
+ int idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
+ device_links_read_lock_held())
+ pm_request_idle(link->supplier);
+
+ device_links_read_unlock(idx);
+}
+
/**
* __rpm_callback - Run a given runtime PM callback for a given device.
* @cb: Runtime PM callback to run.
@@ -325,27 +345,29 @@ static void rpm_put_suppliers(struct device *dev)
static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
__releases(&dev->power.lock) __acquires(&dev->power.lock)
{
- bool use_links = dev->power.links_count > 0;
- bool get = false;
int retval, idx;
- bool put;
+ bool use_links = dev->power.links_count > 0;
if (dev->power.irq_safe) {
spin_unlock(&dev->power.lock);
- } else if (!use_links) {
- spin_unlock_irq(&dev->power.lock);
} else {
- get = dev->power.runtime_status == RPM_RESUMING;
-
spin_unlock_irq(&dev->power.lock);
- /* Resume suppliers if necessary. */
- if (get) {
+ /*
+ * Resume suppliers if necessary.
+ *
+ * The device's runtime PM status cannot change until this
+ * routine returns, so it is safe to read the status outside of
+ * the lock.
+ */
+ if (use_links && dev->power.runtime_status == RPM_RESUMING) {
idx = device_links_read_lock();
retval = rpm_get_suppliers(dev);
- if (retval)
+ if (retval) {
+ rpm_put_suppliers(dev);
goto fail;
+ }
device_links_read_unlock(idx);
}
@@ -355,36 +377,24 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
if (dev->power.irq_safe) {
spin_lock(&dev->power.lock);
- return retval;
- }
-
- spin_lock_irq(&dev->power.lock);
-
- if (!use_links)
- return retval;
-
- /*
- * If the device is suspending and the callback has returned success,
- * drop the usage counters of the suppliers that have been reference
- * counted on its resume.
- *
- * Do that if the resume fails too.
- */
- put = dev->power.runtime_status == RPM_SUSPENDING && !retval;
- if (put)
- __update_runtime_status(dev, RPM_SUSPENDED);
- else
- put = get && retval;
-
- if (put) {
- spin_unlock_irq(&dev->power.lock);
+ } else {
+ /*
+ * If the device is suspending and the callback has returned
+ * success, drop the usage counters of the suppliers that have
+ * been reference counted on its resume.
+ *
+ * Do that if resume fails too.
+ */
+ if (use_links
+ && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
+ || (dev->power.runtime_status == RPM_RESUMING && retval))) {
+ idx = device_links_read_lock();
- idx = device_links_read_lock();
+ __rpm_put_suppliers(dev, false);
fail:
- rpm_put_suppliers(dev);
-
- device_links_read_unlock(idx);
+ device_links_read_unlock(idx);
+ }
spin_lock_irq(&dev->power.lock);
}
@@ -654,8 +664,11 @@ static int rpm_suspend(struct device *dev, int rpmflags)
goto out;
}
+ if (dev->power.irq_safe)
+ goto out;
+
/* Maybe the parent is now able to suspend. */
- if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
+ if (parent && !parent->power.ignore_children) {
spin_unlock(&dev->power.lock);
spin_lock(&parent->power.lock);
@@ -664,6 +677,14 @@ static int rpm_suspend(struct device *dev, int rpmflags)
spin_lock(&dev->power.lock);
}
+ /* Maybe the suppliers are now able to suspend. */
+ if (dev->power.links_count > 0) {
+ spin_unlock_irq(&dev->power.lock);
+
+ rpm_suspend_suppliers(dev);
+
+ spin_lock_irq(&dev->power.lock);
+ }
out:
trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
@@ -1669,8 +1690,8 @@ void pm_runtime_get_suppliers(struct device *dev)
device_links_read_lock_held())
if (link->flags & DL_FLAG_PM_RUNTIME) {
link->supplier_preactivated = true;
- refcount_inc(&link->rpm_active);
pm_runtime_get_sync(link->supplier);
+ refcount_inc(&link->rpm_active);
}
device_links_read_unlock(idx);
@@ -1683,6 +1704,8 @@ void pm_runtime_get_suppliers(struct device *dev)
void pm_runtime_put_suppliers(struct device *dev)
{
struct device_link *link;
+ unsigned long flags;
+ bool put;
int idx;
idx = device_links_read_lock();
@@ -1691,7 +1714,11 @@ void pm_runtime_put_suppliers(struct device *dev)
device_links_read_lock_held())
if (link->supplier_preactivated) {
link->supplier_preactivated = false;
- if (refcount_dec_not_one(&link->rpm_active))
+ spin_lock_irqsave(&dev->power.lock, flags);
+ put = pm_runtime_status_suspended(dev) &&
+ refcount_dec_not_one(&link->rpm_active);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+ if (put)
pm_runtime_put(link->supplier);
}
diff --git a/drivers/base/power/wakeup_stats.c b/drivers/base/power/wakeup_stats.c
index d638259b829a..924fac493c4f 100644
--- a/drivers/base/power/wakeup_stats.c
+++ b/drivers/base/power/wakeup_stats.c
@@ -137,7 +137,7 @@ static struct device *wakeup_source_device_create(struct device *parent,
struct wakeup_source *ws)
{
struct device *dev = NULL;
- int retval = -ENODEV;
+ int retval;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) {
@@ -154,7 +154,7 @@ static struct device *wakeup_source_device_create(struct device *parent,
dev_set_drvdata(dev, ws);
device_set_pm_not_required(dev);
- retval = kobject_set_name(&dev->kobj, "wakeup%d", ws->id);
+ retval = dev_set_name(dev, "wakeup%d", ws->id);
if (retval)
goto error;