cpuidle.h 8.5 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
/*
 * cpuidle.h - a generic framework for CPU idle power management
 *
 * (C) 2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
 *          Shaohua Li <shaohua.li@intel.com>
 *          Adam Belay <abelay@novell.com>
 *
 * This code is licenced under the GPL.
 */

#ifndef _LINUX_CPUIDLE_H
#define _LINUX_CPUIDLE_H

#include <linux/percpu.h>
#include <linux/list.h>
16
#include <linux/hrtimer.h>
17

18
#define CPUIDLE_STATE_MAX	10
19
#define CPUIDLE_NAME_LEN	16
20
#define CPUIDLE_DESC_LEN	32
21

22
23
struct module;

24
struct cpuidle_device;
25
struct cpuidle_driver;
26
27
28
29
30
31


/****************************
 * CPUIDLE DEVICE INTERFACE *
 ****************************/

32
struct cpuidle_state_usage {
33
	unsigned long long	disable;
34
35
36
37
	unsigned long long	usage;
	unsigned long long	time; /* in US */
};

38
39
struct cpuidle_state {
	char		name[CPUIDLE_NAME_LEN];
40
	char		desc[CPUIDLE_DESC_LEN];
41
42
43

	unsigned int	flags;
	unsigned int	exit_latency; /* in US */
44
	int		power_usage; /* in mW */
45
	unsigned int	target_residency; /* in US */
46
	bool		disabled; /* disabled on all CPUs */
47
48

	int (*enter)	(struct cpuidle_device *dev,
49
			struct cpuidle_driver *drv,
50
			int index);
51
52

	int (*enter_dead) (struct cpuidle_device *dev, int index);
53
54
55
56
57
58
59
60
61

	/*
	 * CPUs execute ->enter_freeze with the local tick or entire timekeeping
	 * suspended, so it must not re-enable interrupts at any point (even
	 * temporarily) or attempt to change states of clock event devices.
	 */
	void (*enter_freeze) (struct cpuidle_device *dev,
			      struct cpuidle_driver *drv,
			      int index);
62
63
64
};

/* Idle State Flags */
65
#define CPUIDLE_FLAG_COUPLED	(0x02) /* state applies to multiple cpus */
66
#define CPUIDLE_FLAG_TIMER_STOP (0x04)  /* timer is stopped on this state */
67
68
69

#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)

70
struct cpuidle_device_kobj;
71
72
struct cpuidle_state_kobj;
struct cpuidle_driver_kobj;
73

74
struct cpuidle_device {
75
	unsigned int		registered:1;
76
	unsigned int		enabled:1;
77
	unsigned int		use_deepest_state:1;
78
79
80
	unsigned int		cpu;

	int			last_residency;
81
	struct cpuidle_state_usage	states_usage[CPUIDLE_STATE_MAX];
82
	struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX];
83
	struct cpuidle_driver_kobj *kobj_driver;
84
	struct cpuidle_device_kobj *kobj_dev;
85
	struct list_head 	device_list;
86
87
88
89
90

#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
	cpumask_t		coupled_cpus;
	struct cpuidle_coupled	*coupled;
#endif
91
92
93
};

DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
94
DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev);
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110

/**
 * cpuidle_get_last_residency - retrieves the last state's residency time
 * @dev: the target CPU
 */
static inline int cpuidle_get_last_residency(struct cpuidle_device *dev)
{
	return dev->last_residency;
}


/****************************
 * CPUIDLE DRIVER INTERFACE *
 ****************************/

struct cpuidle_driver {
111
	const char		*name;
112
	struct module 		*owner;
113
	int                     refcnt;
114

115
116
        /* used by the cpuidle framework to setup the broadcast timer */
	unsigned int            bctimer:1;
117
	/* states array must be ordered in decreasing power consumption */
118
119
120
	struct cpuidle_state	states[CPUIDLE_STATE_MAX];
	int			state_count;
	int			safe_state_index;
121
122

	/* the driver handles the cpus in cpumask */
123
	struct cpumask		*cpumask;
124
125
126
};

#ifdef CONFIG_CPU_IDLE
127
extern void disable_cpuidle(void);
128
129
extern bool cpuidle_not_available(struct cpuidle_driver *drv,
				  struct cpuidle_device *dev);
130
131
132
133
134
135
136

extern int cpuidle_select(struct cpuidle_driver *drv,
			  struct cpuidle_device *dev);
extern int cpuidle_enter(struct cpuidle_driver *drv,
			 struct cpuidle_device *dev, int index);
extern void cpuidle_reflect(struct cpuidle_device *dev, int index);

137
extern int cpuidle_register_driver(struct cpuidle_driver *drv);
138
139
140
extern struct cpuidle_driver *cpuidle_get_driver(void);
extern struct cpuidle_driver *cpuidle_driver_ref(void);
extern void cpuidle_driver_unref(void);
141
142
143
extern void cpuidle_unregister_driver(struct cpuidle_driver *drv);
extern int cpuidle_register_device(struct cpuidle_device *dev);
extern void cpuidle_unregister_device(struct cpuidle_device *dev);
144
145
146
extern int cpuidle_register(struct cpuidle_driver *drv,
			    const struct cpumask *const coupled_cpus);
extern void cpuidle_unregister(struct cpuidle_driver *drv);
147
148
extern void cpuidle_pause_and_lock(void);
extern void cpuidle_resume_and_unlock(void);
149
150
extern void cpuidle_pause(void);
extern void cpuidle_resume(void);
151
152
extern int cpuidle_enable_device(struct cpuidle_device *dev);
extern void cpuidle_disable_device(struct cpuidle_device *dev);
153
154
extern int cpuidle_play_dead(void);

155
extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
156
157
static inline struct cpuidle_device *cpuidle_get_device(void)
{return __this_cpu_read(cpuidle_devices); }
158
#else
159
static inline void disable_cpuidle(void) { }
160
161
162
static inline bool cpuidle_not_available(struct cpuidle_driver *drv,
					 struct cpuidle_device *dev)
{return true; }
163
164
165
166
167
168
169
static inline int cpuidle_select(struct cpuidle_driver *drv,
				 struct cpuidle_device *dev)
{return -ENODEV; }
static inline int cpuidle_enter(struct cpuidle_driver *drv,
				struct cpuidle_device *dev, int index)
{return -ENODEV; }
static inline void cpuidle_reflect(struct cpuidle_device *dev, int index) { }
170
static inline int cpuidle_register_driver(struct cpuidle_driver *drv)
171
{return -ENODEV; }
172
static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; }
173
174
static inline struct cpuidle_driver *cpuidle_driver_ref(void) {return NULL; }
static inline void cpuidle_driver_unref(void) {}
175
176
static inline void cpuidle_unregister_driver(struct cpuidle_driver *drv) { }
static inline int cpuidle_register_device(struct cpuidle_device *dev)
177
{return -ENODEV; }
178
static inline void cpuidle_unregister_device(struct cpuidle_device *dev) { }
179
180
181
182
static inline int cpuidle_register(struct cpuidle_driver *drv,
				   const struct cpumask *const coupled_cpus)
{return -ENODEV; }
static inline void cpuidle_unregister(struct cpuidle_driver *drv) { }
183
184
static inline void cpuidle_pause_and_lock(void) { }
static inline void cpuidle_resume_and_unlock(void) { }
185
186
static inline void cpuidle_pause(void) { }
static inline void cpuidle_resume(void) { }
187
static inline int cpuidle_enable_device(struct cpuidle_device *dev)
188
{return -ENODEV; }
189
static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
190
static inline int cpuidle_play_dead(void) {return -ENODEV; }
191
192
static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
	struct cpuidle_device *dev) {return NULL; }
193
static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; }
194
195
#endif

196
#ifdef CONFIG_CPU_IDLE
197
198
199
200
extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
				      struct cpuidle_device *dev);
extern int cpuidle_enter_freeze(struct cpuidle_driver *drv,
				struct cpuidle_device *dev);
201
extern void cpuidle_use_deepest_state(bool enable);
202
#else
203
204
205
206
207
208
static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
					     struct cpuidle_device *dev)
{return -ENODEV; }
static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv,
				       struct cpuidle_device *dev)
{return -ENODEV; }
209
210
211
static inline void cpuidle_use_deepest_state(bool enable)
{
}
212
213
#endif

214
215
/* kernel/sched/idle.c */
extern void sched_idle_set_state(struct cpuidle_state *idle_state);
216
extern void default_idle_call(void);
217

218
219
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a);
220
221
222
223
#else
static inline void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a)
{
}
224
225
#endif

226
227
228
229
230
231
232
233
234
/******************************
 * CPUIDLE GOVERNOR INTERFACE *
 ******************************/

struct cpuidle_governor {
	char			name[CPUIDLE_NAME_LEN];
	struct list_head 	governor_list;
	unsigned int		rating;

235
236
237
238
	int  (*enable)		(struct cpuidle_driver *drv,
					struct cpuidle_device *dev);
	void (*disable)		(struct cpuidle_driver *drv,
					struct cpuidle_device *dev);
239

240
241
	int  (*select)		(struct cpuidle_driver *drv,
					struct cpuidle_device *dev);
242
	void (*reflect)		(struct cpuidle_device *dev, int index);
243
244
245
246
247
248
249
250
251
};

#ifdef CONFIG_CPU_IDLE
extern int cpuidle_register_governor(struct cpuidle_governor *gov);
#else
static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
{return 0;}
#endif

252
253
254
255
256
257
#ifdef CONFIG_ARCH_HAS_CPU_RELAX
#define CPUIDLE_DRIVER_STATE_START	1
#else
#define CPUIDLE_DRIVER_STATE_START	0
#endif

258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
#define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx)	\
({								\
	int __ret;						\
								\
	if (!idx) {						\
		cpu_do_idle();					\
		return idx;					\
	}							\
								\
	__ret = cpu_pm_enter();					\
	if (!__ret) {						\
		__ret = low_level_idle_enter(idx);		\
		cpu_pm_exit();					\
	}							\
								\
	__ret ? -1 : idx;					\
})

276
#endif /* _LINUX_CPUIDLE_H */