xref: /linux/kernel/power/power.h (revision 6ed7ffddcf61f668114edb676417e5fb33773b59)
1 #include <linux/suspend.h>
2 #include <linux/suspend_ioctls.h>
3 #include <linux/utsname.h>
4 #include <linux/freezer.h>
5 
6 struct swsusp_info {
7 	struct new_utsname	uts;
8 	u32			version_code;
9 	unsigned long		num_physpages;
10 	int			cpus;
11 	unsigned long		image_pages;
12 	unsigned long		pages;
13 	unsigned long		size;
14 } __attribute__((aligned(PAGE_SIZE)));
15 
16 #ifdef CONFIG_HIBERNATION
17 /* kernel/power/snapshot.c */
18 extern void __init hibernate_reserved_size_init(void);
19 extern void __init hibernate_image_size_init(void);
20 
21 #ifdef CONFIG_ARCH_HIBERNATION_HEADER
22 /* Maximum size of architecture specific data in a hibernation header */
23 #define MAX_ARCH_HEADER_SIZE	(sizeof(struct new_utsname) + 4)
24 
25 extern int arch_hibernation_header_save(void *addr, unsigned int max_size);
26 extern int arch_hibernation_header_restore(void *addr);
27 
28 static inline int init_header_complete(struct swsusp_info *info)
29 {
30 	return arch_hibernation_header_save(info, MAX_ARCH_HEADER_SIZE);
31 }
32 
33 static inline char *check_image_kernel(struct swsusp_info *info)
34 {
35 	return arch_hibernation_header_restore(info) ?
36 			"architecture specific data" : NULL;
37 }
38 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
39 
40 /*
41  * Keep some memory free so that I/O operations can succeed without paging
42  * [Might this be more than 4 MB?]
43  */
44 #define PAGES_FOR_IO	((4096 * 1024) >> PAGE_SHIFT)
45 
46 /*
47  * Keep 1 MB of memory free so that device drivers can allocate some pages in
48  * their .suspend() routines without breaking the suspend to disk.
49  */
50 #define SPARE_PAGES	((1024 * 1024) >> PAGE_SHIFT)
51 
52 /* kernel/power/hibernate.c */
53 extern bool freezer_test_done;
54 
55 extern int hibernation_snapshot(int platform_mode);
56 extern int hibernation_restore(int platform_mode);
57 extern int hibernation_platform_enter(void);
58 
59 #else /* !CONFIG_HIBERNATION */
60 
61 static inline void hibernate_reserved_size_init(void) {}
62 static inline void hibernate_image_size_init(void) {}
63 #endif /* !CONFIG_HIBERNATION */
64 
65 extern int pfn_is_nosave(unsigned long);
66 
67 #define power_attr(_name) \
68 static struct kobj_attribute _name##_attr = {	\
69 	.attr	= {				\
70 		.name = __stringify(_name),	\
71 		.mode = 0644,			\
72 	},					\
73 	.show	= _name##_show,			\
74 	.store	= _name##_store,		\
75 }
76 
77 /* Preferred image size in bytes (default 500 MB) */
78 extern unsigned long image_size;
79 /* Size of memory reserved for drivers (default SPARE_PAGES x PAGE_SIZE) */
80 extern unsigned long reserved_size;
81 extern int in_suspend;
82 extern dev_t swsusp_resume_device;
83 extern sector_t swsusp_resume_block;
84 
85 extern asmlinkage int swsusp_arch_suspend(void);
86 extern asmlinkage int swsusp_arch_resume(void);
87 
88 extern int create_basic_memory_bitmaps(void);
89 extern void free_basic_memory_bitmaps(void);
90 extern int hibernate_preallocate_memory(void);
91 
92 /**
93  *	Auxiliary structure used for reading the snapshot image data and
94  *	metadata from and writing them to the list of page backup entries
95  *	(PBEs) which is the main data structure of swsusp.
96  *
97  *	Using struct snapshot_handle we can transfer the image, including its
98  *	metadata, as a continuous sequence of bytes with the help of
99  *	snapshot_read_next() and snapshot_write_next().
100  *
101  *	The code that writes the image to a storage or transfers it to
102  *	the user land is required to use snapshot_read_next() for this
103  *	purpose and it should not make any assumptions regarding the internal
104  *	structure of the image.  Similarly, the code that reads the image from
105  *	a storage or transfers it from the user land is required to use
106  *	snapshot_write_next().
107  *
108  *	This may allow us to change the internal structure of the image
109  *	in the future with considerably less effort.
110  */
111 
112 struct snapshot_handle {
113 	unsigned int	cur;	/* number of the block of PAGE_SIZE bytes the
114 				 * next operation will refer to (ie. current)
115 				 */
116 	void		*buffer;	/* address of the block to read from
117 					 * or write to
118 					 */
119 	int		sync_read;	/* Set to one to notify the caller of
120 					 * snapshot_write_next() that it may
121 					 * need to call wait_on_bio_chain()
122 					 */
123 };
124 
125 /* This macro returns the address from/to which the caller of
126  * snapshot_read_next()/snapshot_write_next() is allowed to
127  * read/write data after the function returns
128  */
129 #define data_of(handle)	((handle).buffer)
130 
131 extern unsigned int snapshot_additional_pages(struct zone *zone);
132 extern unsigned long snapshot_get_image_size(void);
133 extern int snapshot_read_next(struct snapshot_handle *handle);
134 extern int snapshot_write_next(struct snapshot_handle *handle);
135 extern void snapshot_write_finalize(struct snapshot_handle *handle);
136 extern int snapshot_image_loaded(struct snapshot_handle *handle);
137 
138 /* If unset, the snapshot device cannot be open. */
139 extern atomic_t snapshot_device_available;
140 
141 extern sector_t alloc_swapdev_block(int swap);
142 extern void free_all_swap_pages(int swap);
143 extern int swsusp_swap_in_use(void);
144 
145 /*
146  * Flags that can be passed from the hibernatig hernel to the "boot" kernel in
147  * the image header.
148  */
149 #define SF_PLATFORM_MODE	1
150 #define SF_NOCOMPRESS_MODE	2
151 #define SF_CRC32_MODE	        4
152 
153 /* kernel/power/hibernate.c */
154 extern int swsusp_check(void);
155 extern void swsusp_free(void);
156 extern int swsusp_read(unsigned int *flags_p);
157 extern int swsusp_write(unsigned int flags);
158 extern void swsusp_close(fmode_t);
159 #ifdef CONFIG_SUSPEND
160 extern int swsusp_unmark(void);
161 #endif
162 
163 /* kernel/power/block_io.c */
164 extern struct block_device *hib_resume_bdev;
165 
166 extern int hib_bio_read_page(pgoff_t page_off, void *addr,
167 		struct bio **bio_chain);
168 extern int hib_bio_write_page(pgoff_t page_off, void *addr,
169 		struct bio **bio_chain);
170 extern int hib_wait_on_bio_chain(struct bio **bio_chain);
171 
172 struct timeval;
173 /* kernel/power/swsusp.c */
174 extern void swsusp_show_speed(struct timeval *, struct timeval *,
175 				unsigned int, char *);
176 
177 #ifdef CONFIG_SUSPEND
178 /* kernel/power/suspend.c */
179 extern const char *const pm_states[];
180 
181 extern bool valid_state(suspend_state_t state);
182 extern int suspend_devices_and_enter(suspend_state_t state);
183 #else /* !CONFIG_SUSPEND */
184 static inline int suspend_devices_and_enter(suspend_state_t state)
185 {
186 	return -ENOSYS;
187 }
188 static inline bool valid_state(suspend_state_t state) { return false; }
189 #endif /* !CONFIG_SUSPEND */
190 
191 #ifdef CONFIG_PM_TEST_SUSPEND
192 /* kernel/power/suspend_test.c */
193 extern void suspend_test_start(void);
194 extern void suspend_test_finish(const char *label);
195 #else /* !CONFIG_PM_TEST_SUSPEND */
196 static inline void suspend_test_start(void) {}
197 static inline void suspend_test_finish(const char *label) {}
198 #endif /* !CONFIG_PM_TEST_SUSPEND */
199 
200 #ifdef CONFIG_PM_SLEEP
201 /* kernel/power/main.c */
202 extern int pm_notifier_call_chain(unsigned long val);
203 #endif
204 
205 #ifdef CONFIG_HIGHMEM
206 int restore_highmem(void);
207 #else
208 static inline unsigned int count_highmem_pages(void) { return 0; }
209 static inline int restore_highmem(void) { return 0; }
210 #endif
211 
212 /*
213  * Suspend test levels
214  */
215 enum {
216 	/* keep first */
217 	TEST_NONE,
218 	TEST_CORE,
219 	TEST_CPUS,
220 	TEST_PLATFORM,
221 	TEST_DEVICES,
222 	TEST_FREEZER,
223 	/* keep last */
224 	__TEST_AFTER_LAST
225 };
226 
227 #define TEST_FIRST	TEST_NONE
228 #define TEST_MAX	(__TEST_AFTER_LAST - 1)
229 
230 extern int pm_test_level;
231 
232 #ifdef CONFIG_SUSPEND_FREEZER
233 static inline int suspend_freeze_processes(void)
234 {
235 	int error;
236 
237 	error = freeze_processes();
238 	/*
239 	 * freeze_processes() automatically thaws every task if freezing
240 	 * fails. So we need not do anything extra upon error.
241 	 */
242 	if (error)
243 		return error;
244 
245 	error = freeze_kernel_threads();
246 	/*
247 	 * freeze_kernel_threads() thaws only kernel threads upon freezing
248 	 * failure. So we have to thaw the userspace tasks ourselves.
249 	 */
250 	if (error)
251 		thaw_processes();
252 
253 	return error;
254 }
255 
256 static inline void suspend_thaw_processes(void)
257 {
258 	thaw_processes();
259 }
260 #else
261 static inline int suspend_freeze_processes(void)
262 {
263 	return 0;
264 }
265 
266 static inline void suspend_thaw_processes(void)
267 {
268 }
269 #endif
270 
271 #ifdef CONFIG_PM_AUTOSLEEP
272 
273 /* kernel/power/autosleep.c */
274 extern int pm_autosleep_init(void);
275 extern int pm_autosleep_lock(void);
276 extern void pm_autosleep_unlock(void);
277 extern suspend_state_t pm_autosleep_state(void);
278 extern int pm_autosleep_set_state(suspend_state_t state);
279 
280 #else /* !CONFIG_PM_AUTOSLEEP */
281 
282 static inline int pm_autosleep_init(void) { return 0; }
283 static inline int pm_autosleep_lock(void) { return 0; }
284 static inline void pm_autosleep_unlock(void) {}
285 static inline suspend_state_t pm_autosleep_state(void) { return PM_SUSPEND_ON; }
286 
287 #endif /* !CONFIG_PM_AUTOSLEEP */
288 
289 #ifdef CONFIG_PM_WAKELOCKS
290 
291 /* kernel/power/wakelock.c */
292 extern ssize_t pm_show_wakelocks(char *buf, bool show_active);
293 extern int pm_wake_lock(const char *buf);
294 extern int pm_wake_unlock(const char *buf);
295 
296 #endif /* !CONFIG_PM_WAKELOCKS */
297