Merge branch 'linus' into tracing/ftrace
This commit is contained in:
@@ -1037,8 +1037,8 @@ int current_cpuset_is_being_rebound(void)
|
||||
|
||||
static int update_relax_domain_level(struct cpuset *cs, s64 val)
|
||||
{
|
||||
if ((int)val < 0)
|
||||
val = -1;
|
||||
if (val < -1 || val >= SD_LV_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
if (val != cs->relax_domain_level) {
|
||||
cs->relax_domain_level = val;
|
||||
@@ -1890,6 +1890,12 @@ static void common_cpu_mem_hotplug_unplug(void)
|
||||
top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
|
||||
scan_for_empty_cpusets(&top_cpuset);
|
||||
|
||||
/*
|
||||
* Scheduler destroys domains on hotplug events.
|
||||
* Rebuild them based on the current settings.
|
||||
*/
|
||||
rebuild_sched_domains();
|
||||
|
||||
cgroup_unlock();
|
||||
}
|
||||
|
||||
|
||||
@@ -217,8 +217,6 @@ long rcu_batches_completed(void)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_batches_completed);
|
||||
|
||||
EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
|
||||
|
||||
void __rcu_read_lock(void)
|
||||
{
|
||||
int idx;
|
||||
|
||||
@@ -1146,6 +1146,7 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void hotplug_hrtick_disable(int cpu)
|
||||
{
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
@@ -1201,6 +1202,7 @@ static void init_hrtick(void)
|
||||
{
|
||||
hotcpu_notifier(hotplug_hrtick, 0);
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
static void init_rq_hrtick(struct rq *rq)
|
||||
{
|
||||
@@ -6928,7 +6930,12 @@ static int default_relax_domain_level = -1;
|
||||
|
||||
static int __init setup_relax_domain_level(char *str)
|
||||
{
|
||||
default_relax_domain_level = simple_strtoul(str, NULL, 0);
|
||||
unsigned long val;
|
||||
|
||||
val = simple_strtoul(str, NULL, 0);
|
||||
if (val < SD_LV_MAX)
|
||||
default_relax_domain_level = val;
|
||||
|
||||
return 1;
|
||||
}
|
||||
__setup("relax_domain_level=", setup_relax_domain_level);
|
||||
@@ -7286,6 +7293,18 @@ void __attribute__((weak)) arch_update_cpu_topology(void)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Free current domain masks.
|
||||
* Called after all cpus are attached to NULL domain.
|
||||
*/
|
||||
static void free_sched_domains(void)
|
||||
{
|
||||
ndoms_cur = 0;
|
||||
if (doms_cur != &fallback_doms)
|
||||
kfree(doms_cur);
|
||||
doms_cur = &fallback_doms;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up scheduler domains and groups. Callers must hold the hotplug lock.
|
||||
* For now this just excludes isolated cpus, but could be used to
|
||||
@@ -7433,6 +7452,7 @@ int arch_reinit_sched_domains(void)
|
||||
get_online_cpus();
|
||||
mutex_lock(&sched_domains_mutex);
|
||||
detach_destroy_domains(&cpu_online_map);
|
||||
free_sched_domains();
|
||||
err = arch_init_sched_domains(&cpu_online_map);
|
||||
mutex_unlock(&sched_domains_mutex);
|
||||
put_online_cpus();
|
||||
@@ -7518,6 +7538,7 @@ static int update_sched_domains(struct notifier_block *nfb,
|
||||
case CPU_DOWN_PREPARE:
|
||||
case CPU_DOWN_PREPARE_FROZEN:
|
||||
detach_destroy_domains(&cpu_online_map);
|
||||
free_sched_domains();
|
||||
return NOTIFY_OK;
|
||||
|
||||
case CPU_UP_CANCELED:
|
||||
@@ -7536,8 +7557,16 @@ static int update_sched_domains(struct notifier_block *nfb,
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_CPUSETS
|
||||
/*
|
||||
* Create default domain partitioning if cpusets are disabled.
|
||||
* Otherwise we let cpusets rebuild the domains based on the
|
||||
* current setup.
|
||||
*/
|
||||
|
||||
/* The hotplug lock is already held by cpu_up/cpu_down */
|
||||
arch_init_sched_domains(&cpu_online_map);
|
||||
#endif
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
@@ -7677,7 +7706,6 @@ static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
|
||||
else
|
||||
rt_se->rt_rq = parent->my_q;
|
||||
|
||||
rt_se->rt_rq = &rq->rt;
|
||||
rt_se->my_q = rt_rq;
|
||||
rt_se->parent = parent;
|
||||
INIT_LIST_HEAD(&rt_se->run_list);
|
||||
@@ -8399,7 +8427,7 @@ static unsigned long to_ratio(u64 period, u64 runtime)
|
||||
#ifdef CONFIG_CGROUP_SCHED
|
||||
static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
|
||||
{
|
||||
struct task_group *tgi, *parent = tg->parent;
|
||||
struct task_group *tgi, *parent = tg ? tg->parent : NULL;
|
||||
unsigned long total = 0;
|
||||
|
||||
if (!parent) {
|
||||
|
||||
@@ -449,13 +449,19 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
||||
#endif
|
||||
}
|
||||
|
||||
static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
|
||||
static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
|
||||
{
|
||||
struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
|
||||
struct rt_prio_array *array = &rt_rq->active;
|
||||
struct rt_rq *group_rq = group_rt_rq(rt_se);
|
||||
|
||||
if (group_rq && rt_rq_throttled(group_rq))
|
||||
/*
|
||||
* Don't enqueue the group if its throttled, or when empty.
|
||||
* The latter is a consequence of the former when a child group
|
||||
* get throttled and the current group doesn't have any other
|
||||
* active members.
|
||||
*/
|
||||
if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
|
||||
return;
|
||||
|
||||
list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
|
||||
@@ -464,7 +470,7 @@ static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
|
||||
inc_rt_tasks(rt_se, rt_rq);
|
||||
}
|
||||
|
||||
static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
|
||||
static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
|
||||
{
|
||||
struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
|
||||
struct rt_prio_array *array = &rt_rq->active;
|
||||
@@ -480,11 +486,10 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
|
||||
* Because the prio of an upper entry depends on the lower
|
||||
* entries, we must remove entries top - down.
|
||||
*/
|
||||
static void dequeue_rt_stack(struct task_struct *p)
|
||||
static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
|
||||
{
|
||||
struct sched_rt_entity *rt_se, *back = NULL;
|
||||
struct sched_rt_entity *back = NULL;
|
||||
|
||||
rt_se = &p->rt;
|
||||
for_each_sched_rt_entity(rt_se) {
|
||||
rt_se->back = back;
|
||||
back = rt_se;
|
||||
@@ -492,7 +497,26 @@ static void dequeue_rt_stack(struct task_struct *p)
|
||||
|
||||
for (rt_se = back; rt_se; rt_se = rt_se->back) {
|
||||
if (on_rt_rq(rt_se))
|
||||
dequeue_rt_entity(rt_se);
|
||||
__dequeue_rt_entity(rt_se);
|
||||
}
|
||||
}
|
||||
|
||||
static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
|
||||
{
|
||||
dequeue_rt_stack(rt_se);
|
||||
for_each_sched_rt_entity(rt_se)
|
||||
__enqueue_rt_entity(rt_se);
|
||||
}
|
||||
|
||||
static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
|
||||
{
|
||||
dequeue_rt_stack(rt_se);
|
||||
|
||||
for_each_sched_rt_entity(rt_se) {
|
||||
struct rt_rq *rt_rq = group_rt_rq(rt_se);
|
||||
|
||||
if (rt_rq && rt_rq->rt_nr_running)
|
||||
__enqueue_rt_entity(rt_se);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -506,32 +530,15 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
|
||||
if (wakeup)
|
||||
rt_se->timeout = 0;
|
||||
|
||||
dequeue_rt_stack(p);
|
||||
|
||||
/*
|
||||
* enqueue everybody, bottom - up.
|
||||
*/
|
||||
for_each_sched_rt_entity(rt_se)
|
||||
enqueue_rt_entity(rt_se);
|
||||
enqueue_rt_entity(rt_se);
|
||||
}
|
||||
|
||||
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
|
||||
{
|
||||
struct sched_rt_entity *rt_se = &p->rt;
|
||||
struct rt_rq *rt_rq;
|
||||
|
||||
update_curr_rt(rq);
|
||||
|
||||
dequeue_rt_stack(p);
|
||||
|
||||
/*
|
||||
* re-enqueue all non-empty rt_rq entities.
|
||||
*/
|
||||
for_each_sched_rt_entity(rt_se) {
|
||||
rt_rq = group_rt_rq(rt_se);
|
||||
if (rt_rq && rt_rq->rt_nr_running)
|
||||
enqueue_rt_entity(rt_se);
|
||||
}
|
||||
dequeue_rt_entity(rt_se);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -542,8 +549,10 @@ static
|
||||
void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
|
||||
{
|
||||
struct rt_prio_array *array = &rt_rq->active;
|
||||
struct list_head *queue = array->queue + rt_se_prio(rt_se);
|
||||
|
||||
list_move_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
|
||||
if (on_rt_rq(rt_se))
|
||||
list_move_tail(&rt_se->run_list, queue);
|
||||
}
|
||||
|
||||
static void requeue_task_rt(struct rq *rq, struct task_struct *p)
|
||||
|
||||
@@ -198,6 +198,9 @@ static inline void sched_info_queued(struct task_struct *t)
|
||||
/*
|
||||
* Called when a process ceases being the active-running process, either
|
||||
* voluntarily or involuntarily. Now we can calculate how long we ran.
|
||||
* Also, if the process is still in the TASK_RUNNING state, call
|
||||
* sched_info_queued() to mark that it has now again started waiting on
|
||||
* the runqueue.
|
||||
*/
|
||||
static inline void sched_info_depart(struct task_struct *t)
|
||||
{
|
||||
@@ -206,6 +209,9 @@ static inline void sched_info_depart(struct task_struct *t)
|
||||
|
||||
t->sched_info.cpu_time += delta;
|
||||
rq_sched_info_depart(task_rq(t), delta);
|
||||
|
||||
if (t->state == TASK_RUNNING)
|
||||
sched_info_queued(t);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -49,12 +49,17 @@ static unsigned long get_timestamp(int this_cpu)
|
||||
return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */
|
||||
}
|
||||
|
||||
void touch_softlockup_watchdog(void)
|
||||
static void __touch_softlockup_watchdog(void)
|
||||
{
|
||||
int this_cpu = raw_smp_processor_id();
|
||||
|
||||
__raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu);
|
||||
}
|
||||
|
||||
void touch_softlockup_watchdog(void)
|
||||
{
|
||||
__raw_get_cpu_var(touch_timestamp) = 0;
|
||||
}
|
||||
EXPORT_SYMBOL(touch_softlockup_watchdog);
|
||||
|
||||
void touch_all_softlockup_watchdogs(void)
|
||||
@@ -80,7 +85,7 @@ void softlockup_tick(void)
|
||||
unsigned long now;
|
||||
|
||||
if (touch_timestamp == 0) {
|
||||
touch_softlockup_watchdog();
|
||||
__touch_softlockup_watchdog();
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -95,7 +100,7 @@ void softlockup_tick(void)
|
||||
|
||||
/* do not print during early bootup: */
|
||||
if (unlikely(system_state != SYSTEM_RUNNING)) {
|
||||
touch_softlockup_watchdog();
|
||||
__touch_softlockup_watchdog();
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -214,7 +219,7 @@ static int watchdog(void *__bind_cpu)
|
||||
sched_setscheduler(current, SCHED_FIFO, ¶m);
|
||||
|
||||
/* initialize timestamp */
|
||||
touch_softlockup_watchdog();
|
||||
__touch_softlockup_watchdog();
|
||||
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
/*
|
||||
@@ -223,7 +228,7 @@ static int watchdog(void *__bind_cpu)
|
||||
* debug-printout triggers in softlockup_tick().
|
||||
*/
|
||||
while (!kthread_should_stop()) {
|
||||
touch_softlockup_watchdog();
|
||||
__touch_softlockup_watchdog();
|
||||
schedule();
|
||||
|
||||
if (kthread_should_stop())
|
||||
|
||||
Reference in New Issue
Block a user