diff options
30 files changed, 282 insertions, 111 deletions
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c index 03991eeff643..3006f4324808 100644 --- a/arch/arm64/kernel/cpuidle.c +++ b/arch/arm64/kernel/cpuidle.c @@ -54,6 +54,9 @@ static int psci_acpi_cpu_init_idle(unsigned int cpu) struct acpi_lpi_state *lpi; struct acpi_processor *pr = per_cpu(processors, cpu); + if (unlikely(!pr || !pr->flags.has_lpi)) + return -EINVAL; + /* * If the PSCI cpu_suspend function hook has not been initialized * idle states must not be enabled, so bail out @@ -61,9 +64,6 @@ static int psci_acpi_cpu_init_idle(unsigned int cpu) if (!psci_ops.cpu_suspend) return -EOPNOTSUPP; - if (unlikely(!pr || !pr->flags.has_lpi)) - return -EINVAL; - count = pr->power.count - 1; if (count <= 0) return -ENODEV; diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 1e97f944b47d..3b7f4cdbf2e0 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -15,6 +15,7 @@ #include <asm/desc.h> #include <asm/cacheflush.h> #include <asm/realmode.h> +#include <asm/hypervisor.h> #include <linux/ftrace.h> #include "../../realmode/rm/wakeup.h" @@ -140,9 +141,9 @@ static int __init acpi_sleep_setup(char *str) acpi_realmode_flags |= 4; #ifdef CONFIG_HIBERNATION if (strncmp(str, "s4_hwsig", 8) == 0) - acpi_check_s4_hw_signature(1); + acpi_check_s4_hw_signature = 1; if (strncmp(str, "s4_nohwsig", 10) == 0) - acpi_check_s4_hw_signature(0); + acpi_check_s4_hw_signature = 0; #endif if (strncmp(str, "nonvs", 5) == 0) acpi_nvs_nosave(); @@ -160,3 +161,21 @@ static int __init acpi_sleep_setup(char *str) } __setup("acpi_sleep=", acpi_sleep_setup); + +#if defined(CONFIG_HIBERNATION) && defined(CONFIG_HYPERVISOR_GUEST) +static int __init init_s4_sigcheck(void) +{ + /* + * If running on a hypervisor, honour the ACPI specification + * by default and trigger a clean reboot when the hardware + * signature in FACS is changed after hibernation. + */ + if (acpi_check_s4_hw_signature == -1 && + !hypervisor_is_type(X86_HYPER_NATIVE)) + acpi_check_s4_hw_signature = 1; + + return 0; +} +/* This must happen before acpi_init() which is a subsys initcall */ +arch_initcall(init_s4_sigcheck); +#endif diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index f8e9fa82cb9b..32b20efff5f8 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -1080,6 +1080,11 @@ static int flatten_lpi_states(struct acpi_processor *pr, return 0; } +int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu) +{ + return -EOPNOTSUPP; +} + static int acpi_processor_get_lpi_info(struct acpi_processor *pr) { int ret, i; @@ -1088,6 +1093,11 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr) struct acpi_device *d = NULL; struct acpi_lpi_states_array info[2], *tmp, *prev, *curr; + /* make sure our architecture has support */ + ret = acpi_processor_ffh_lpi_probe(pr->id); + if (ret == -EOPNOTSUPP) + return ret; + if (!osc_pc_lpi_support_confirmed) return -EOPNOTSUPP; @@ -1139,11 +1149,6 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr) return 0; } -int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu) -{ - return -ENODEV; -} - int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi) { return -ENODEV; diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index d4fbea91ab6b..74e54fd44b8e 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -869,12 +869,7 @@ static inline void acpi_sleep_syscore_init(void) {} #ifdef CONFIG_HIBERNATION static unsigned long s4_hardware_signature; static struct acpi_table_facs *facs; -static int sigcheck = -1; /* Default behaviour is just to warn */ - -void __init acpi_check_s4_hw_signature(int check) -{ - sigcheck = check; -} +int acpi_check_s4_hw_signature = -1; /* Default behaviour is just to warn */ static int acpi_hibernation_begin(pm_message_t stage) { @@ -999,7 +994,7 @@ static void acpi_sleep_hibernate_setup(void) hibernation_set_ops(old_suspend_ordering ? &acpi_hibernation_ops_old : &acpi_hibernation_ops); sleep_states[ACPI_STATE_S4] = 1; - if (!sigcheck) + if (!acpi_check_s4_hw_signature) return; acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs); @@ -1011,7 +1006,7 @@ static void acpi_sleep_hibernate_setup(void) */ s4_hardware_signature = facs->hardware_signature; - if (sigcheck > 0) { + if (acpi_check_s4_hw_signature > 0) { /* * If we're actually obeying the ACPI specification * then the signature is written out as part of the diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 5db704f02e71..1ee878d126fd 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -636,6 +636,18 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, atomic_read(&genpd->sd_count) > 0) return -EBUSY; + /* + * The children must be in their deepest (powered-off) states to allow + * the parent to be powered off. Note that, there's no need for + * additional locking, as powering on a child, requires the parent's + * lock to be acquired first. + */ + list_for_each_entry(link, &genpd->parent_links, parent_node) { + struct generic_pm_domain *child = link->child; + if (child->state_idx < child->state_count - 1) + return -EBUSY; + } + list_for_each_entry(pdd, &genpd->dev_list, list_node) { enum pm_qos_flags_status stat; @@ -1073,6 +1085,13 @@ static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock, || atomic_read(&genpd->sd_count) > 0) return; + /* Check that the children are in their deepest (powered-off) state. */ + list_for_each_entry(link, &genpd->parent_links, parent_node) { + struct generic_pm_domain *child = link->child; + if (child->state_idx < child->state_count - 1) + return; + } + /* Choose the deepest state when suspending */ genpd->state_idx = genpd->state_count - 1; if (_genpd_power_off(genpd, false)) @@ -2058,9 +2077,9 @@ static int genpd_remove(struct generic_pm_domain *genpd) kfree(link); } - genpd_debug_remove(genpd); list_del(&genpd->gpd_list_node); genpd_unlock(genpd); + genpd_debug_remove(genpd); cancel_work_sync(&genpd->power_off_work); if (genpd_is_cpu_domain(genpd)) free_cpumask_var(genpd->cpus); @@ -2248,12 +2267,8 @@ int of_genpd_add_provider_simple(struct device_node *np, /* Parse genpd OPP table */ if (genpd->set_performance_state) { ret = dev_pm_opp_of_add_table(&genpd->dev); - if (ret) { - if (ret != -EPROBE_DEFER) - dev_err(&genpd->dev, "Failed to add OPP table: %d\n", - ret); - return ret; - } + if (ret) + return dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n"); /* * Save table for faster processing while setting performance @@ -2312,9 +2327,8 @@ int of_genpd_add_provider_onecell(struct device_node *np, if (genpd->set_performance_state) { ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i); if (ret) { - if (ret != -EPROBE_DEFER) - dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n", - i, ret); + dev_err_probe(&genpd->dev, ret, + "Failed to add OPP table for index %d\n", i); goto error; } @@ -2672,12 +2686,8 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev, ret = genpd_add_device(pd, dev, base_dev); mutex_unlock(&gpd_list_lock); - if (ret < 0) { - if (ret != -EPROBE_DEFER) - dev_err(dev, "failed to add to PM domain %s: %d", - pd->name, ret); - return ret; - } + if (ret < 0) + return dev_err_probe(dev, ret, "failed to add to PM domain %s\n", pd->name); dev->pm_domain->detach = genpd_dev_pm_detach; dev->pm_domain->sync = genpd_dev_pm_sync; diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 04ea92cbd9cf..c50139207794 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -485,7 +485,7 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev, trace_device_pm_callback_start(dev, info, state.event); error = cb(dev); trace_device_pm_callback_end(dev, error); - suspend_report_result(cb, error); + suspend_report_result(dev, cb, error); initcall_debug_report(dev, calltime, cb, error); @@ -1568,7 +1568,7 @@ static int legacy_suspend(struct device *dev, pm_message_t state, trace_device_pm_callback_start(dev, info, state.event); error = cb(dev, state); trace_device_pm_callback_end(dev, error); - suspend_report_result(cb, error); + suspend_report_result(dev, cb, error); initcall_debug_report(dev, calltime, cb, error); @@ -1855,7 +1855,7 @@ unlock: device_unlock(dev); if (ret < 0) { - suspend_report_result(callback, ret); + suspend_report_result(dev, callback, ret); pm_runtime_put(dev); return ret; } @@ -1960,10 +1960,10 @@ int dpm_suspend_start(pm_message_t state) } EXPORT_SYMBOL_GPL(dpm_suspend_start); -void __suspend_report_result(const char *function, void *fn, int ret) +void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret) { if (ret) - pr_err("%s(): %pS returns %d\n", function, fn, ret); + dev_err(dev, "%s(): %pS returns %d\n", function, fn, ret); } EXPORT_SYMBOL_GPL(__suspend_report_result); @@ -2018,7 +2018,9 @@ static bool pm_ops_is_empty(const struct dev_pm_ops *ops) void device_pm_check_callbacks(struct device *dev) { - spin_lock_irq(&dev->power.lock); + unsigned long flags; + + spin_lock_irqsave(&dev->power.lock, flags); dev->power.no_pm_callbacks = (!dev->bus || (pm_ops_is_empty(dev->bus->pm) && !dev->bus->suspend && !dev->bus->resume)) && @@ -2027,7 +2029,7 @@ void device_pm_check_callbacks(struct device *dev) (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) && (!dev->driver || (pm_ops_is_empty(dev->driver->pm) && !dev->driver->suspend && !dev->driver->resume)); - spin_unlock_irq(&dev->power.lock); + spin_unlock_irqrestore(&dev->power.lock, flags); } bool dev_pm_skip_suspend(struct device *dev) diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 2f3cce17219b..d4059e6ffeae 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -1476,11 +1476,16 @@ EXPORT_SYMBOL_GPL(pm_runtime_enable); static void pm_runtime_disable_action(void *data) { + pm_runtime_dont_use_autosuspend(data); pm_runtime_disable(data); } /** * devm_pm_runtime_enable - devres-enabled version of pm_runtime_enable. + * + * NOTE: this will also handle calling pm_runtime_dont_use_autosuspend() for + * you at driver exit time if needed. + * * @dev: Device to handle. */ int devm_pm_runtime_enable(struct device *dev) diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c index 0004db4a9d3b..d487a6bac630 100644 --- a/drivers/base/power/wakeirq.c +++ b/drivers/base/power/wakeirq.c @@ -289,7 +289,7 @@ EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq); * * Enables wakeirq conditionally. We need to enable wake-up interrupt * lazily on the first rpm_suspend(). This is needed as the consumer device - * starts in RPM_SUSPENDED state, and the the first pm_runtime_get() would + * starts in RPM_SUSPENDED state, and the first pm_runtime_get() would * otherwise try to disable already disabled wakeirq. The wake-up interrupt * starts disabled with IRQ_NOAUTOEN set. * diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 8666590201c9..a57d469676ca 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -587,7 +587,7 @@ static bool wakeup_source_not_registered(struct wakeup_source *ws) * @ws: Wakeup source to handle. * * Update the @ws' statistics and, if @ws has just been activated, notify the PM - * core of the event by incrementing the counter of of wakeup events being + * core of the event by incrementing the counter of the wakeup events being * processed. */ static void wakeup_source_activate(struct wakeup_source *ws) @@ -733,7 +733,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws) /* * Increment the counter of registered wakeup events and decrement the - * couter of wakeup events in progress simultaneously. + * counter of wakeup events in progress simultaneously. */ cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count); trace_wakeup_source_deactivate(ws->name, cec); diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 08515f7e515f..b6bd0ff35323 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -146,7 +146,7 @@ static unsigned int cs_dbs_update(struct cpufreq_policy *policy) /************************** sysfs interface ************************/ -static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set, +static ssize_t sampling_down_factor_store(struct gov_attr_set *attr_set, const char *buf, size_t count) { struct dbs_data *dbs_data = to_dbs_data(attr_set); @@ -161,7 +161,7 @@ static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set, return count; } -static ssize_t store_up_threshold(struct gov_attr_set *attr_set, +static ssize_t up_threshold_store(struct gov_attr_set *attr_set, const char *buf, size_t count) { struct dbs_data *dbs_data = to_dbs_data(attr_set); @@ -177,7 +177,7 @@ static ssize_t store_up_threshold(struct gov_attr_set *attr_set, return count; } -static ssize_t store_down_threshold(struct gov_attr_set *attr_set, +static ssize_t down_threshold_store(struct gov_attr_set *attr_set, const char *buf, size_t count) { struct dbs_data *dbs_data = to_dbs_data(attr_set); @@ -195,7 +195,7 @@ static ssize_t store_down_threshold(struct gov_attr_set *attr_set, return count; } -static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set, +static ssize_t ignore_nice_load_store(struct gov_attr_set *attr_set, const char *buf, size_t count) { struct dbs_data *dbs_data = to_dbs_data(attr_set); @@ -220,7 +220,7 @@ static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set, return count; } -static ssize_t store_freq_step(struct gov_attr_set *attr_set, const char *buf, +static ssize_t freq_step_store(struct gov_attr_set *attr_set, const char *buf, size_t count) { struct dbs_data *dbs_data = to_dbs_data(attr_set); diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 63f7c219062b..0d42cf8b88d8 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -27,7 +27,7 @@ static DEFINE_MUTEX(gov_dbs_data_mutex); /* Common sysfs tunables */ /* - * store_sampling_rate - update sampling rate effective immediately if needed. + * sampling_rate_store - update sampling rate effective immediately if needed. * * If new rate is smaller than the old, simply updating * dbs.sampling_rate might not be appropriate. For example, if the @@ -41,7 +41,7 @@ static DEFINE_MUTEX(gov_dbs_data_mutex); * This must be called with dbs_data->mutex held, otherwise traversing * policy_dbs_list isn't safe. */ -ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf, +ssize_t sampling_rate_store(struct gov_attr_set *attr_set, const char *buf, size_t count) { struct dbs_data *dbs_data = to_dbs_data(attr_set); @@ -80,7 +80,7 @@ ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf, return count; } -EXPORT_SYMBOL_GPL(store_sampling_rate); +EXPORT_SYMBOL_GPL(sampling_rate_store); /** * gov_update_cpu_data - Update CPU load data. diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index bab8e6140377..a5a0bc3cc23e 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h @@ -51,7 +51,7 @@ static inline struct dbs_data *to_dbs_data(struct gov_attr_set *attr_set) } #define gov_show_one(_gov, file_name) \ -static ssize_t show_##file_name \ +static ssize_t file_name##_show \ (struct gov_attr_set *attr_set, char *buf) \ { \ struct dbs_data *dbs_data = to_dbs_data(attr_set); \ @@ -60,7 +60,7 @@ static ssize_t show_##file_name \ } #define gov_show_one_common(file_name) \ -static ssize_t show_##file_name \ +static ssize_t file_name##_show \ (struct gov_attr_set *attr_set, char *buf) \ { \ struct dbs_data *dbs_data = to_dbs_data(attr_set); \ @@ -68,12 +68,10 @@ static ssize_t show_##file_name \ } #define gov_attr_ro(_name) \ -static struct governor_attr _name = \ -__ATTR(_name, 0444, show_##_name, NULL) +static struct governor_attr _name = __ATTR_RO(_name) #define gov_attr_rw(_name) \ -static struct governor_attr _name = \ -__ATTR(_name, 0644, show_##_name, store_##_name) +static struct governor_attr _name = __ATTR_RW(_name) /* Common to all CPUs of a policy */ struct policy_dbs_info { @@ -176,7 +174,7 @@ void od_register_powersave_bias_handler(unsigned int (*f) (struct cpufreq_policy *, unsigned int, unsigned int), unsigned int powersave_bias); void od_unregister_powersave_bias_handler(void); -ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf, +ssize_t sampling_rate_store(struct gov_attr_set *attr_set, const char *buf, size_t count); void gov_update_cpu_data(struct dbs_data *dbs_data); #endif /* _CPUFREQ_GOVERNOR_H */ diff --git a/drivers/cpufreq/cpufreq_governor_attr_set.c b/drivers/cpufreq/cpufreq_governor_attr_set.c index a6f365b9cc1a..771770ea0ed0 100644 --- a/drivers/cpufreq/cpufreq_governor_attr_set.c +++ b/drivers/cpufreq/cpufreq_governor_attr_set.c @@ -8,11 +8,6 @@ #include "cpufreq_governor.h" -static inline struct gov_attr_set *to_gov_attr_set(struct kobject *kobj) -{ - return container_of(kobj, struct gov_attr_set, kobj); -} - static inline struct governor_attr *to_gov_attr(struct attribute *attr) { return container_of(attr, struct governor_attr, attr); diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 6a41ea4729b8..e8fbf970ff07 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -202,7 +202,7 @@ static unsigned int od_dbs_update(struct cpufreq_policy *policy) /************************** sysfs interface ************************/ static struct dbs_governor od_dbs_gov; -static ssize_t store_io_is_busy(struct gov_attr_set *attr_set, const char *buf, +static ssize_t io_is_busy_store(struct gov_attr_set *attr_set, const char *buf, size_t count) { struct dbs_data *dbs_data = to_dbs_data(attr_set); @@ -220,7 +220,7 @@ static ssize_t store_io_is_busy(struct gov_attr_set *attr_set, const char *buf, return count; } -static ssize_t store_up_threshold(struct gov_attr_set *attr_set, +static ssize_t up_threshold_store(struct gov_attr_set *attr_set, const char *buf, size_t count) { struct dbs_data *dbs_data = to_dbs_data(attr_set); @@ -237,7 +237,7 @@ static ssize_t store_up_threshold(struct gov_attr_set *attr_set, return count; } -static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set, +static ssize_t sampling_down_factor_store(struct gov_attr_set *attr_set, const char *buf, size_t count) { struct dbs_data *dbs_data = to_dbs_data(attr_set); @@ -265,7 +265,7 @@ static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set, return count; } -static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set, +static ssize_t ignore_nice_load_store(struct gov_attr_set *attr_set, const char *buf, size_t count) { struct dbs_data *dbs_data = to_dbs_data(attr_set); @@ -290,7 +290,7 @@ static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set, return count; } -static ssize_t store_powersave_bias(struct gov_attr_set *attr_set, +static ssize_t powersave_bias_store(struct gov_attr_set *attr_set, const char *buf, size_t count) { struct dbs_data *dbs_data = to_dbs_data(attr_set); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index bc7f7e6759bd..846bb3a78788 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1692,6 +1692,37 @@ static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata) } } +static void intel_pstate_update_epp_defaults(struct cpudata *cpudata) +{ + cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); + + /* + * If this CPU gen doesn't call for change in balance_perf + * EPP return. + */ + if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE) + return; + + /* + * If powerup EPP is something other than chipset default 0x80 and + * - is more performance oriented than 0x80 (default balance_perf EPP) + * - But less performance oriented than performance EPP + * then use this as new balance_perf EPP. + */ + if (cpudata->epp_default < HWP_EPP_BALANCE_PERFORMANCE && + cpudata->epp_default > HWP_EPP_PERFORMANCE) { + epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = cpudata->epp_default; + return; + } + + /* + * Use hard coded value per gen to update the balance_perf + * and default EPP. + */ + cpudata->epp_default = epp_values[EPP_INDEX_BALANCE_PERFORMANCE]; + intel_pstate_set_epp(cpudata, cpudata->epp_default); +} + static void intel_pstate_hwp_enable(struct cpudata *cpudata) { /* First disable HWP notification interrupt till we activate again */ @@ -1705,12 +1736,7 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata) if (cpudata->epp_default >= 0) return; - if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE) { - cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); - } else { - cpudata->epp_default = epp_values[EPP_INDEX_BALANCE_PERFORMANCE]; - intel_pstate_set_epp(cpudata, cpudata->epp_default); - } + intel_pstate_update_epp_defaults(cpudata); } static int atom_get_min_pstate(void) diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index c538a153ee82..3e000e1a75c6 100644 --- a/drivers/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c @@ -668,9 +668,9 @@ static acpi_status longhaul_walk_callback(acpi_handle obj_handle, u32 nesting_level, void *context, void **return_value) { - struct acpi_device *d; + struct acpi_device *d = acpi_fetch_acpi_dev(obj_handle); - if (acpi_bus_get_device(obj_handle, &d)) + if (!d) return 0; *return_value = acpi_driver_data(d); diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index 12ab4014af71..d289036beff2 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -1172,14 +1172,14 @@ static int powernowk8_init(void) unsigned int i, supported_cpus = 0; int ret; + if (!x86_match_cpu(powernow_k8_ids)) + return -ENODEV; + if (boot_cpu_has(X86_FEATURE_HW_PSTATE)) { __request_acpi_cpufreq(); return -ENODEV; } - if (!x86_match_cpu(powernow_k8_ids)) - return -ENODEV; - cpus_read_lock(); for_each_online_cpu(i) { smp_call_function_single(i, check_supported_cpu, &ret, 1); diff --git a/drivers/cpuidle/cpuidle-haltpoll.c b/drivers/cpuidle/cpuidle-haltpoll.c index fcc53215bac8..3a39a7f48b77 100644 --- a/drivers/cpuidle/cpuidle-haltpoll.c +++ b/drivers/cpuidle/cpuidle-haltpoll.c @@ -108,11 +108,11 @@ static int __init haltpoll_init(void) if (boot_option_idle_override != IDLE_NO_OVERRIDE) return -ENODEV; - cpuidle_poll_state_init(drv); - if (!kvm_para_available() || !haltpoll_want()) return -ENODEV; + cpuidle_poll_state_init(drv); + ret = cpuidle_register_driver(drv); if (ret < 0) return ret; diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 0b66e25c0e2d..b7640cfe0020 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -64,6 +64,7 @@ static struct cpuidle_driver intel_idle_driver = { /* intel_idle.max_cstate=0 disables driver */ static int max_cstate = CPUIDLE_STATE_MAX - 1; static unsigned int disabled_states_mask; +static unsigned int preferred_states_mask; static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; @@ -121,9 +122,6 @@ static unsigned int mwait_substates __initdata; * If the local APIC timer is not known to be reliable in the target idle state, * enable one-shot tick broadcasting for the target CPU before executing MWAIT. * - * Optionally call leave_mm() for the target CPU upfront to avoid wakeups due to - * flushing user TLBs. - * * Must be called under local_irq_disable(). */ static __cpuidle int intel_idle(struct cpuidle_device *dev, @@ -761,6 +759,46 @@ static struct cpuidle_state icx_cstates[] __initdata = { .enter = NULL } }; +/* + * On Sapphire Rapids Xeon C1 has to be disabled if C1E is enabled, and vice + * versa. On SPR C1E is enabled only if "C1E promotion" bit is set in + * MSR_IA32_POWER_CTL. But in this case there effectively no C1, because C1 + * requests are promoted to C1E. If the "C1E promotion" bit is cleared, then + * both C1 and C1E requests end up with C1, so there is effectively no C1E. + * + * By default we enable C1 and disable C1E by marking it with + * 'CPUIDLE_FLAG_UNUSABLE'. + */ +static struct cpuidle_state spr_cstates[] __initdata = { + { + .name = "C1", + .desc = "MWAIT 0x00", + .flags = MWAIT2flg(0x00), + .exit_latency = 1, + .target_residency = 1, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C1E", + .desc = "MWAIT 0x01", + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE | + CPUIDLE_FLAG_UNUSABLE, + .exit_latency = 2, + .target_residency = 4, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C6", + .desc = "MWAIT 0x20", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 290, + .target_residency = 800, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .enter = NULL } +}; + static struct cpuidle_state atom_cstates[] __initdata = { { .name = "C1E", @@ -1104,6 +1142,12 @@ static const struct idle_cpu idle_cpu_icx __initconst = { .use_acpi = true, }; +static const struct idle_cpu idle_cpu_spr __initconst = { + .state_table = spr_cstates, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct idle_cpu idle_cpu_avn __initconst = { .state_table = avn_cstates, .disable_promotion_to_c1e = true, @@ -1166,6 +1210,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &idle_cpu_skx), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &idle_cpu_icx), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &idle_cpu_icx), + X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &idle_cpu_spr), X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &idle_cpu_knl), X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &idle_cpu_knl), X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &idle_cpu_bxt), @@ -1353,6 +1398,8 @@ static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { } static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; } #endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */ +static void c1e_promotion_enable(void); + /** * ivt_idle_state_table_update - Tune the idle states table for Ivy Town. * @@ -1523,6 +1570,41 @@ static void __init skx_idle_state_table_update(void) } } +/** + * spr_idle_state_table_update - Adjust Sapphire Rapids idle states table. + */ +static void __init spr_idle_state_table_update(void) +{ + unsigned long long msr; + + /* Check if user prefers C1E over C1. */ + if (preferred_states_mask & BIT(2)) { + if (preferred_states_mask & BIT(1)) + /* Both can't be enabled, stick to the defaults. */ + return; + + spr_cstates[0].flags |= CPUIDLE_FLAG_UNUSABLE; + spr_cstates[1].flags &= ~CPUIDLE_FLAG_UNUSABLE; + + /* Enable C1E using the "C1E promotion" bit. */ + c1e_promotion_enable(); + disable_promotion_to_c1e = false; + } + + /* + * By default, the C6 state assumes the worst-case scenario of package + * C6. However, if PC6 is disabled, we update the numbers to match + * core C6. + */ + rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr); + + /* Limit value 2 and above allow for PC6. */ + if ((msr & 0x7) < 2) { + spr_cstates[2].exit_latency = 190; + spr_cstates[2].target_residency = 600; + } +} + static bool __init intel_idle_verify_cstate(unsigned int mwait_hint) { unsigned int mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint) + 1; @@ -1557,6 +1639,9 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv) case INTEL_FAM6_SKYLAKE_X: skx_idle_state_table_update(); break; + case INTEL_FAM6_SAPPHIRERAPIDS_X: + spr_idle_state_table_update(); + break; } for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) { @@ -1629,6 +1714,15 @@ static void auto_demotion_disable(void) wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); } +static void c1e_promotion_enable(void) +{ + unsigned long long msr_bits; + + rdmsrl(MSR_IA32_POWER_CTL, msr_bits); + msr_bits |= 0x2; + wrmsrl(MSR_IA32_POWER_CTL, msr_bits); +} + static void c1e_promotion_disable(void) { unsigned long long msr_bits; @@ -1798,3 +1892,14 @@ module_param(max_cstate, int, 0444); */ module_param_named(states_off, disabled_states_mask, uint, 0444); MODULE_PARM_DESC(states_off, "Mask of disabled idle states"); +/* + * Some platforms come with mutually exclusive C-states, so that if one is + * enabled, the other C-states must not be used. Example: C1 and C1E on + * Sapphire Rapids platform. This parameter allows for selecting the + * preferred C-states among the groups of mutually exclusive C-states - the + * selected C-states will be registered, the other C-states from the mutually + * exclusive group won't be registered. If the platform has no mutually + * exclusive C-states, this parameter has no effect. + */ +module_param_named(preferred_cstates, preferred_states_mask, uint, 0444); +MODULE_PARM_DESC(preferred_cstates, "Mask of preferred idle states"); diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 588588cfda48..415f7664b010 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -596,7 +596,7 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state) int error; error = drv->suspend(pci_dev, state); - suspend_report_result(drv->suspend, error); + suspend_report_result(dev, drv->suspend, error); if (error) return error; @@ -775,7 +775,7 @@ static int pci_pm_suspend(struct device *dev) int error; error = pm->suspend(dev); - suspend_report_result(pm->suspend, error); + suspend_report_result(dev, pm->suspend, error); if (error) return error; @@ -821,7 +821,7 @@ static int pci_pm_suspend_noirq(struct device *dev) int error; error = pm->suspend_noirq(dev); - suspend_report_result(pm->suspend_noirq, error); + suspend_report_result(dev, pm->suspend_noirq, error); if (error) return error; @@ -1010,7 +1010,7 @@ static int pci_pm_freeze(struct device *dev) int error; error = pm->freeze(dev); - suspend_report_result(pm->freeze, error); + suspend_report_result(dev, pm->freeze, error); if (error) return error; } @@ -1030,7 +1030,7 @@ static int pci_pm_freeze_noirq(struct device *dev) int error; error = pm->freeze_noirq(dev); - suspend_report_result(pm->freeze_noirq, error); + suspend_report_result(dev, pm->freeze_noirq, error); if (error) return error; } @@ -1116,7 +1116,7 @@ static int pci_pm_poweroff(struct device *dev) int error; error = pm->poweroff(dev); - suspend_report_result(pm->poweroff, error); + suspend_report_result(dev, pm->poweroff, error); if (error) return error; } @@ -1154,7 +1154,7 @@ static int pci_pm_poweroff_noirq(struct device *dev) int error; error = pm->poweroff_noirq(dev); - suspend_report_result(pm->poweroff_noirq, error); + suspend_report_result(dev, pm->poweroff_noirq, error); if (error) return error; } diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c index cc6757dfa3f1..c02e7bf643a6 100644 --- a/drivers/pnp/driver.c +++ b/drivers/pnp/driver.c @@ -171,7 +171,7 @@ static int __pnp_bus_suspend(struct device *dev, pm_message_t state) if (pnp_drv->driver.pm && pnp_drv->driver.pm->suspend) { error = pnp_drv->driver.pm->suspend(dev); - suspend_report_result(pnp_drv->driver.pm->suspend, error); + suspend_report_result(dev, pnp_drv->driver.pm->suspend, error); if (error) return error; } diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c index d630cccd2e6e..dd44e37a454a 100644 --- a/drivers/usb/core/hcd-pci.c +++ b/drivers/usb/core/hcd-pci.c @@ -446,7 +446,7 @@ static int suspend_common(struct device *dev, bool do_wakeup) HCD_WAKEUP_PENDING(hcd->shared_hcd)) return -EBUSY; retval = hcd->driver->pci_suspend(hcd, do_wakeup); - suspend_report_result(hcd->driver->pci_suspend, retval); + suspend_report_result(dev, hcd->driver->pci_suspend, retval); /* Check again in case wakeup raced with pci_suspend */ if ((retval == 0 && do_wakeup && HCD_WAKEUP_PENDING(hcd)) || @@ -556,7 +556,7 @@ static int hcd_pci_suspend_noirq(struct device *dev) dev_dbg(dev, "--> PCI %s\n", pci_power_name(pci_dev->current_state)); } else { - suspend_report_result(pci_prepare_to_sleep, retval); + suspend_report_result(dev, pci_prepare_to_sleep, retval); return retval; } diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 6274758648e3..766dbcb82df1 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -526,7 +526,7 @@ acpi_status acpi_release_memory(acpi_handle handle, struct resource *res, int acpi_resources_are_enforced(void); #ifdef CONFIG_HIBERNATION -void __init acpi_check_s4_hw_signature(int check); +extern int acpi_check_s4_hw_signature; #endif #ifdef CONFIG_PM_SLEEP diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 3522a272b74d..35c7d6db4139 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -661,6 +661,11 @@ struct gov_attr_set { /* sysfs ops for cpufreq governors */ extern const struct sysfs_ops governor_sysfs_ops; +static inline struct gov_attr_set *to_gov_attr_set(struct kobject *kobj) +{ + return container_of(kobj, struct gov_attr_set, kobj); +} + void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node); void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node); unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node); diff --git a/include/linux/pm.h b/include/linux/pm.h index f7d2be686359..e65b3ab28377 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -770,11 +770,11 @@ extern int dpm_suspend_late(pm_message_t state); extern int dpm_suspend(pm_message_t state); extern int dpm_prepare(pm_message_t state); -extern void __suspend_report_result(const char *function, void *fn, int ret); +extern void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret); -#define suspend_report_result(fn, ret) \ +#define suspend_report_result(dev, fn, ret) \ do { \ - __suspend_report_result(__func__, fn, ret); \ + __suspend_report_result(__func__, dev, fn, ret); \ } while (0) extern int device_pm_wait_for_dev(struct device *sub, struct device *dev); @@ -814,7 +814,7 @@ static inline int dpm_suspend_start(pm_message_t state) return 0; } -#define suspend_report_result(fn, ret) do {} while (0) +#define suspend_report_result(dev, fn, ret) do {} while (0) static inline int device_pm_wait_for_dev(struct device *a, struct device *b) { diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 9f09601c465a..2bff6a10095d 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -567,6 +567,10 @@ static inline void pm_runtime_disable(struct device *dev) * Allow the runtime PM autosuspend mechanism to be used for @dev whenever * requested (or "autosuspend" will be handled as direct runtime-suspend for * it). + * + * NOTE: It's important to undo this with pm_runtime_dont_use_autosuspend() + * at driver exit time unless your driver initially enabled pm_runtime + * with devm_pm_runtime_enable() (which handles it for you). */ static inline void pm_runtime_use_autosuspend(struct device *dev) { diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index e6af502c2fd7..0ac805b753e5 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -689,8 +689,10 @@ static int load_image_and_restore(void) lock_device_hotplug(); error = create_basic_memory_bitmaps(); - if (error) + if (error) { + swsusp_close(FMODE_READ | FMODE_EXCL); goto Unlock; + } error = swsusp_read(&flags); swsusp_close(FMODE_READ | FMODE_EXCL); @@ -1328,7 +1330,7 @@ static int __init resumedelay_setup(char *str) int rc = kstrtouint(str, 0, &resume_delay); if (rc) - return rc; + pr_warn("resumedelay: bad option string '%s'\n", str); return 1; } diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c index d20526c5be15..b663a97f5867 100644 --- a/kernel/power/suspend_test.c +++ b/kernel/power/suspend_test.c @@ -157,22 +157,22 @@ static int __init setup_test_suspend(char *value) value++; suspend_type = strsep(&value, ","); if (!suspend_type) - return 0; + return 1; repeat = strsep(&value, ","); if (repeat) { if (kstrtou32(repeat, 0, &test_repeat_count_max)) - return 0; + return 1; } for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++) if (!strcmp(pm_labels[i], suspend_type)) { test_state_label = pm_labels[i]; - return 0; + return 1; } printk(warn_bad_state, suspend_type); - return 0; + return 1; } __setup("test_suspend", setup_test_suspend); diff --git a/kernel/power/swap.c b/kernel/power/swap.c index ad10359030a4..c51f5507b34f 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -89,7 +89,7 @@ struct swap_map_page_list { struct swap_map_page_list *next; }; -/** +/* * The swap_map_handle structure is used for handling swap in * a file-alike way */ @@ -117,7 +117,7 @@ struct swsusp_header { static struct swsusp_header *swsusp_header; -/** +/* * The following functions are used for tracing the allocated * swap pages, so that they can be freed in case of an error. */ @@ -171,7 +171,7 @@ static int swsusp_extents_insert(unsigned long swap_offset) return 0; } -/** +/* * alloc_swapdev_block - allocate a swap page and register that it has * been allocated, so that it can be freed in case of an error. */ @@ -190,7 +190,7 @@ sector_t alloc_swapdev_block(int swap) return 0; } -/** +/* * free_all_swap_pages - free swap pages allocated for saving image data. * It also frees the extents used to register which swap entries had been * allocated. diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 26778884d9ab..cffcd08f4ec8 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -539,7 +539,7 @@ ATTRIBUTE_GROUPS(sugov); static void sugov_tunables_free(struct kobject *kobj) { - struct gov_attr_set *attr_set = container_of(kobj, struct gov_attr_set, kobj); + struct gov_attr_set *attr_set = to_gov_attr_set(kobj); kfree(to_sugov_tunables(attr_set)); } |