summaryrefslogtreecommitdiff
path: root/drivers/cpufreq/cpufreq_stats_pass.c
blob: 15da16eaa9aa7d2140419b67c658164b0d569854 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
/*
 * PASS (Power Aware System Service) - Collect CPUs data
 *
 *  Copyright (C)  2013-2015 Samsung Electronics co. ltd
 *    Chanwoo Choi <cw00.choi@samsung.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/cpufreq.h>
#include <linux/slab.h>

#define MAX_CLUSTER	2
#define CLUSTER_0	0
#define CLUSTER_1	1
#define CLUSTER_0_FIRST_CPU	0	/* CPU0 */
#define CLUSTER_1_FIRST_CPU	4	/* CPU4 */

#define SAMPLING_RATE	10

struct runqueue_data {
	unsigned int avg_nr_runnings;
	int64_t last_time;
	int64_t total_time;
};

static spinlock_t lock;
static struct delayed_work gwork;
static struct runqueue_data rq_data[MAX_CLUSTER];

static cpumask_var_t cluster0_cpus;
static cpumask_var_t cluster1_cpus;

int get_avg_nr_runnings(unsigned int cpu)
{
	unsigned int avg_nr_runnings;
	unsigned long flags = 0;
	int cluster_id;

	if (cpu > NR_CPUS - 1)
		return 0;

	if (cpu < CLUSTER_1_FIRST_CPU)
		cluster_id = CLUSTER_0;
	else
		cluster_id = CLUSTER_1;

	spin_lock_irqsave(&lock, flags);

	avg_nr_runnings = rq_data[cluster_id].avg_nr_runnings;
	rq_data[cluster_id].avg_nr_runnings = 0;

	spin_unlock_irqrestore(&lock, flags);

	return avg_nr_runnings;
}
EXPORT_SYMBOL(get_avg_nr_runnings);

static void calculate_nr_running(struct runqueue_data *rq, cpumask_var_t cpus)
{
	int64_t time_diff = 0;
	int64_t avg_nr_runnings = 0;
	int64_t curr_time = ktime_to_ms(ktime_get());

	if (!rq->last_time)
		rq->last_time = curr_time;
	if (!rq->avg_nr_runnings)
		rq->total_time = 0;

	smp_rmb();

	avg_nr_runnings = nr_running_cpumask(cpus) * 100;
	time_diff = curr_time - rq->last_time;

	if (time_diff && rq->total_time != 0) {
		avg_nr_runnings = (avg_nr_runnings * time_diff) +
			(rq->avg_nr_runnings * rq->total_time);
		do_div(avg_nr_runnings, rq->total_time + time_diff);
	}

	rq->avg_nr_runnings = avg_nr_runnings;
	rq->last_time = curr_time;
	rq->total_time += time_diff;
}

static void avg_nr_runnings_work(struct work_struct *work)
{
	unsigned long flags = 0;

	spin_lock_irqsave(&lock, flags);
	calculate_nr_running(&rq_data[CLUSTER_0], cluster0_cpus);
	if (NR_CPUS > CLUSTER_1_FIRST_CPU)
		calculate_nr_running(&rq_data[CLUSTER_1], cluster1_cpus);
	spin_unlock_irqrestore(&lock, flags);

	schedule_delayed_work(&gwork, msecs_to_jiffies(SAMPLING_RATE));
}

static int __init pass_stats_init(void)
{
	int i;

	for (i = 0; i < MAX_CLUSTER; i++) {
		rq_data[i].avg_nr_runnings = 0;
		rq_data[i].last_time = 0;
		rq_data[i].total_time = 0;
	}

	alloc_cpumask_var(&cluster0_cpus, GFP_KERNEL);
	if (NR_CPUS > CLUSTER_1_FIRST_CPU)
		alloc_cpumask_var(&cluster1_cpus, GFP_KERNEL);

	for (i = 0; i < NR_CPUS; i++) {
		if (i < CLUSTER_1_FIRST_CPU)
			cpumask_set_cpu(i, cluster0_cpus);
		else
			cpumask_set_cpu(i, cluster1_cpus);
	}

	spin_lock_init(&lock);
	INIT_DEFERRABLE_WORK(&gwork, avg_nr_runnings_work);
	schedule_delayed_work(&gwork, msecs_to_jiffies(SAMPLING_RATE));

	return 0;
}

static void __exit pass_stats_exit(void)
{
	cancel_delayed_work_sync(&gwork);
}
module_init(pass_stats_init);
module_exit(pass_stats_exit);