Commit 9ffc6694 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'gcc-plugins-v4.9-rc1' of git://

Pull gcc plugins update from Kees Cook:
 "This adds a new gcc plugin named "latent_entropy". It is designed to
  extract as much possible uncertainty from a running system at boot
  time as possible, hoping to capitalize on any possible variation in
  CPU operation (due to runtime data differences, hardware differences,
  SMP ordering, thermal timing variation, cache behavior, etc).

  At the very least, this plugin is a much more comprehensive example
  for how to manipulate kernel code using the gcc plugin internals"

* tag 'gcc-plugins-v4.9-rc1' of git://
  latent_entropy: Mark functions with __latent_entropy
  gcc-plugins: Add latent_entropy plugin
parents 133d970e 0766f788
......@@ -383,6 +383,24 @@ config GCC_PLUGIN_SANCOV
gcc-4.5 on). It is based on the commit "Add fuzzing coverage support"
by Dmitry Vyukov <>.
bool "Generate some entropy during boot and runtime"
depends on GCC_PLUGINS
By saying Y here the kernel will instrument some kernel code to
extract some entropy from both original and artificially created
program state. This will help especially embedded systems where
there is little 'natural' source of entropy normally. The cost
is some slowdown of the boot process (about 0.5%) and fork and
irq processing.
Note that entropy extracted this way is not cryptographically
This plugin was ported from grsecurity/PaX. More information at:
......@@ -14,6 +14,11 @@ CFLAGS_prom_init.o += -fPIC
CFLAGS_btext.o += -fPIC
# Do not trace early boot code
CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
......@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
* Softirq action handler - move entries to local list and loop over them
* while passing them to the queue registered handler.
static void blk_done_softirq(struct softirq_action *h)
static __latent_entropy void blk_done_softirq(struct softirq_action *h)
struct list_head *cpu_list, local_list;
......@@ -479,8 +479,8 @@ static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
static void push_to_pool(struct work_struct *work);
static __u32 input_pool_data[INPUT_POOL_WORDS];
static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
static struct entropy_store input_pool = {
.poolinfo = &poolinfo_table[0],
......@@ -2824,6 +2824,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
return new_ns;
struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
struct user_namespace *user_ns, struct fs_struct *new_fs)
......@@ -188,6 +188,13 @@
#endif /* GCC_VERSION >= 40300 */
#if GCC_VERSION >= 40500
#ifndef __CHECKER__
#define __latent_entropy __attribute__((latent_entropy))
* Mark a position in code as unreachable. This can be used to
* suppress control flow warnings after asm blocks that transfer
......@@ -429,6 +429,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
# define __attribute_const__ /* unimplemented */
#ifndef __latent_entropy
# define __latent_entropy
* Tell gcc if a function is cold. The compiler will assume any path
* directly leading to the call is unlikely.
......@@ -105,7 +105,7 @@ struct files_struct *get_files_struct(struct task_struct *);
void put_files_struct(struct files_struct *fs);
void reset_files_struct(struct files_struct *);
int unshare_files(struct files_struct **);
struct files_struct *dup_fd(struct files_struct *, int *);
struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
void do_close_on_exec(struct files_struct *);
int iterate_fd(struct files_struct *, unsigned,
int (*)(const void *, struct file *, unsigned),
......@@ -437,7 +437,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
/* drivers/char/random.c */
extern void add_disk_randomness(struct gendisk *disk);
extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
extern void rand_initialize_disk(struct gendisk *disk);
static inline sector_t get_start_sect(struct block_device *bdev)
......@@ -39,7 +39,7 @@
/* These are for everybody (although not all archs will actually
discard it in modules) */
#define __init __section(.init.text) __cold notrace
#define __init __section(.init.text) __cold notrace __latent_entropy
#define __initdata __section(
#define __initconst __section(.init.rodata)
#define __exitdata __section(
......@@ -75,7 +75,8 @@
#define __exit __section(.exit.text) __exitused __cold notrace
/* Used for MEMORY_HOTPLUG */
#define __meminit __section(.meminit.text) __cold notrace
#define __meminit __section(.meminit.text) __cold notrace \
#define __meminitdata __section(
#define __meminitconst __section(.meminit.rodata)
#define __memexit __section(.memexit.text) __exitused __cold notrace
......@@ -18,9 +18,20 @@ struct random_ready_callback {
extern void add_device_randomness(const void *, unsigned int);
#if defined(CONFIG_GCC_PLUGIN_LATENT_ENTROPY) && !defined(__CHECKER__)
static inline void add_latent_entropy(void)
add_device_randomness((const void *)&latent_entropy,
static inline void add_latent_entropy(void) {}
extern void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value);
extern void add_interrupt_randomness(int irq, int irq_flags);
unsigned int value) __latent_entropy;
extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
extern void get_random_bytes(void *buf, int nbytes);
extern int add_random_ready_callback(struct random_ready_callback *rdy);
......@@ -789,6 +789,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
return ret;
......@@ -547,7 +547,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
static __latent_entropy int dup_mmap(struct mm_struct *mm,
struct mm_struct *oldmm)
struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
struct rb_node **rb_link, *rb_parent;
......@@ -1441,7 +1442,8 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
* parts of the process environment (as per the clone
* flags). The actual kick-off is left to the caller.
static struct task_struct *copy_process(unsigned long clone_flags,
static __latent_entropy struct task_struct *copy_process(
unsigned long clone_flags,
unsigned long stack_start,
unsigned long stack_size,
int __user *child_tidptr,
......@@ -1926,6 +1928,7 @@ long _do_fork(unsigned long clone_flags,
p = copy_process(clone_flags, stack_start, stack_size,
child_tidptr, NULL, trace, tls, NUMA_NO_NODE);
* Do this prior waking up the new thread - the thread pointer
* might get invalid after that point, if the thread exits quickly.
......@@ -170,7 +170,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
static void rcu_process_callbacks(struct softirq_action *unused)
static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
......@@ -3013,7 +3013,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
* Do RCU core processing for the current CPU.
static void rcu_process_callbacks(struct softirq_action *unused)
static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
struct rcu_state *rsp;
......@@ -8522,7 +8522,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
* run_rebalance_domains is triggered when needed from the scheduler tick.
* Also triggered for nohz idle balancing (with nohz_balancing_kick set).
static void run_rebalance_domains(struct softirq_action *h)
static __latent_entropy void run_rebalance_domains(struct softirq_action *h)
struct rq *this_rq = this_rq();
enum cpu_idle_type idle = this_rq->idle_balance ?
......@@ -496,7 +496,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
static void tasklet_action(struct softirq_action *a)
static __latent_entropy void tasklet_action(struct softirq_action *a)
struct tasklet_struct *list;
......@@ -532,7 +532,7 @@ static void tasklet_action(struct softirq_action *a)
static void tasklet_hi_action(struct softirq_action *a)
static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
struct tasklet_struct *list;
......@@ -1633,7 +1633,7 @@ static inline void __run_timers(struct timer_base *base)
* This function runs timers and the timer-tq in bottom half context.
static void run_timer_softirq(struct softirq_action *h)
static __latent_entropy void run_timer_softirq(struct softirq_action *h)
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
......@@ -74,7 +74,7 @@ void irq_poll_complete(struct irq_poll *iop)
static void irq_poll_softirq(struct softirq_action *h)
static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
int rearm = 0, budget = irq_poll_budget;
......@@ -47,7 +47,7 @@ static inline void prandom_state_selftest(void)
static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
* prandom_u32_state - seeded pseudo-random number generator.
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment