linux57-tkg: Update BMQ to v5.7-r0 - http://cchalpha.blogspot.com/2020/06/bmq-v57-r0-release.html
The psi.c compilation error fix was added to the patchset: 53c690361e
I have also added additional compilation fixes for arch_set_thermal_pressure and sched_thermal_decay_shift that I found while working with 5.7RCs. Alfred will likely end up with similar solutions to mine when he fixes it on his end, so it should be pretty transparent.
This commit is contained in:
parent
6317a54ef1
commit
22292cb5e0
@ -85,7 +85,7 @@ pkgname=("${pkgbase}" "${pkgbase}-headers")
|
||||
_basekernel=5.7
|
||||
_sub=0
|
||||
pkgver="${_basekernel}"."${_sub}"
|
||||
pkgrel=1
|
||||
pkgrel=2
|
||||
pkgdesc='Linux-tkg'
|
||||
arch=('x86_64') # no i686 in here
|
||||
url="http://www.kernel.org/"
|
||||
@ -135,7 +135,7 @@ sha256sums=('de8163bb62f822d84f7a3983574ec460060bf013a78ff79cd7c979ff1ec1d7e0'
|
||||
'cd225e86d72eaf6c31ef3d7b20df397f4cc44ddd04389850691292cdf292b204'
|
||||
'9fad4a40449e09522899955762c8928ae17f4cdaa16e01239fd12592e9d58177'
|
||||
'965a517a283f265a012545fbb5cc9e516efc9f6166d2aa1baf7293a32a1086b7'
|
||||
'70c729750e115bc50a61c3ebed20fa7c81867003064470f592084aae8718fe80'
|
||||
'af3317abac1a4aa03bcbf4717a60975e989618f95caf1ccf20e15d6b3b84a191'
|
||||
'49262ce4a8089fa70275aad742fc914baa28d9c384f710c9a62f64796d13e104')
|
||||
|
||||
export KBUILD_BUILD_HOST=archlinux
|
||||
|
@ -1,8 +1,8 @@
|
||||
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
|
||||
index c07815d230bc..468c45b89114 100644
|
||||
index 7bc83f3d9bdf..e549b4d53746 100644
|
||||
--- a/Documentation/admin-guide/kernel-parameters.txt
|
||||
+++ b/Documentation/admin-guide/kernel-parameters.txt
|
||||
@@ -436,6 +436,11 @@
|
||||
@@ -438,6 +438,11 @@
|
||||
embedded devices based on command line input.
|
||||
See Documentation/block/cmdline-partition.rst
|
||||
|
||||
@ -15,12 +15,12 @@ index c07815d230bc..468c45b89114 100644
|
||||
Values larger than 10 seconds (10000) are changed to
|
||||
no delay (0).
|
||||
diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
|
||||
index def074807cee..e4bc9350f192 100644
|
||||
index 0d427fd10941..e0e112c68fa5 100644
|
||||
--- a/Documentation/admin-guide/sysctl/kernel.rst
|
||||
+++ b/Documentation/admin-guide/sysctl/kernel.rst
|
||||
@@ -1173,3 +1174,13 @@ is 10 seconds.
|
||||
@@ -1230,3 +1230,13 @@ is 10 seconds.
|
||||
|
||||
The softlockup threshold is (2 * watchdog_thresh). Setting this
|
||||
The softlockup threshold is (``2 * watchdog_thresh``). Setting this
|
||||
tunable to zero will disable lockup detection altogether.
|
||||
+
|
||||
+yield_type:
|
||||
@ -165,7 +165,7 @@ index f18d5067cd0f..fe489fc01c73 100644
|
||||
* Frequency of the spu scheduler tick. By default we do one SPU scheduler
|
||||
* tick for every 10 CPU scheduler ticks.
|
||||
diff --git a/fs/proc/base.c b/fs/proc/base.c
|
||||
index c7c64272b0fa..3994241745ae 100644
|
||||
index eb2255e95f62..62b8cedbccb6 100644
|
||||
--- a/fs/proc/base.c
|
||||
+++ b/fs/proc/base.c
|
||||
@@ -479,7 +479,7 @@ static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns,
|
||||
@ -191,10 +191,10 @@ index 8874f681b056..59eb72bf7d5f 100644
|
||||
[RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \
|
||||
}
|
||||
diff --git a/include/linux/sched.h b/include/linux/sched.h
|
||||
index 04278493bf15..7b5838418378 100644
|
||||
index 4418f5cb8324..dc8799c314c9 100644
|
||||
--- a/include/linux/sched.h
|
||||
+++ b/include/linux/sched.h
|
||||
@@ -649,13 +649,18 @@ struct task_struct {
|
||||
@@ -652,13 +652,18 @@ struct task_struct {
|
||||
unsigned int flags;
|
||||
unsigned int ptrace;
|
||||
|
||||
@ -214,7 +214,7 @@ index 04278493bf15..7b5838418378 100644
|
||||
unsigned int wakee_flips;
|
||||
unsigned long wakee_flip_decay_ts;
|
||||
struct task_struct *last_wakee;
|
||||
@@ -669,6 +674,7 @@ struct task_struct {
|
||||
@@ -672,6 +677,7 @@ struct task_struct {
|
||||
*/
|
||||
int recent_used_cpu;
|
||||
int wake_cpu;
|
||||
@ -222,7 +222,7 @@ index 04278493bf15..7b5838418378 100644
|
||||
#endif
|
||||
int on_rq;
|
||||
|
||||
@@ -677,13 +683,23 @@ struct task_struct {
|
||||
@@ -680,13 +686,23 @@ struct task_struct {
|
||||
int normal_prio;
|
||||
unsigned int rt_priority;
|
||||
|
||||
@ -247,7 +247,7 @@ index 04278493bf15..7b5838418378 100644
|
||||
|
||||
#ifdef CONFIG_UCLAMP_TASK
|
||||
/* Clamp values requested for a scheduling entity */
|
||||
@@ -1298,6 +1314,15 @@ struct task_struct {
|
||||
@@ -1306,6 +1322,15 @@ struct task_struct {
|
||||
*/
|
||||
};
|
||||
|
||||
@ -336,10 +336,10 @@ index e5af028c08b4..6387c8ea9832 100644
|
||||
}
|
||||
|
||||
diff --git a/init/Kconfig b/init/Kconfig
|
||||
index 4f717bfdbfe2..ce4fb27057ee 100644
|
||||
index 74a5ac65644f..850f730faef5 100644
|
||||
--- a/init/Kconfig
|
||||
+++ b/init/Kconfig
|
||||
@@ -698,9 +698,20 @@ config GENERIC_SCHED_CLOCK
|
||||
@@ -689,9 +689,20 @@ config GENERIC_SCHED_CLOCK
|
||||
|
||||
menu "Scheduler features"
|
||||
|
||||
@ -360,7 +360,7 @@ index 4f717bfdbfe2..ce4fb27057ee 100644
|
||||
help
|
||||
This feature enables the scheduler to track the clamped utilization
|
||||
of each CPU based on RUNNABLE tasks scheduled on that CPU.
|
||||
@@ -786,6 +797,7 @@ config NUMA_BALANCING
|
||||
@@ -777,6 +788,7 @@ config NUMA_BALANCING
|
||||
depends on ARCH_SUPPORTS_NUMA_BALANCING
|
||||
depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
|
||||
depends on SMP && NUMA && MIGRATION
|
||||
@ -368,7 +368,7 @@ index 4f717bfdbfe2..ce4fb27057ee 100644
|
||||
help
|
||||
This option adds support for automatic NUMA aware memory/task placement.
|
||||
The mechanism is quite primitive and is based on migrating memory when
|
||||
@@ -887,7 +899,7 @@ menuconfig CGROUP_SCHED
|
||||
@@ -878,7 +890,7 @@ menuconfig CGROUP_SCHED
|
||||
bandwidth allocation to such task groups. It uses cgroups to group
|
||||
tasks.
|
||||
|
||||
@ -377,7 +377,7 @@ index 4f717bfdbfe2..ce4fb27057ee 100644
|
||||
config FAIR_GROUP_SCHED
|
||||
bool "Group scheduling for SCHED_OTHER"
|
||||
depends on CGROUP_SCHED
|
||||
@@ -1142,6 +1154,7 @@ config CHECKPOINT_RESTORE
|
||||
@@ -1134,6 +1146,7 @@ config CHECKPOINT_RESTORE
|
||||
|
||||
config SCHED_AUTOGROUP
|
||||
bool "Automatic process group scheduling"
|
||||
@ -386,10 +386,10 @@ index 4f717bfdbfe2..ce4fb27057ee 100644
|
||||
select CGROUP_SCHED
|
||||
select FAIR_GROUP_SCHED
|
||||
diff --git a/init/init_task.c b/init/init_task.c
|
||||
index 9e5cbe5eab7b..c293de91d90f 100644
|
||||
index bd403ed3e418..530a8cfc2c43 100644
|
||||
--- a/init/init_task.c
|
||||
+++ b/init/init_task.c
|
||||
@@ -66,9 +66,15 @@ struct task_struct init_task
|
||||
@@ -67,9 +67,15 @@ struct task_struct init_task
|
||||
.stack = init_stack,
|
||||
.usage = REFCOUNT_INIT(2),
|
||||
.flags = PF_KTHREAD,
|
||||
@ -405,7 +405,7 @@ index 9e5cbe5eab7b..c293de91d90f 100644
|
||||
.policy = SCHED_NORMAL,
|
||||
.cpus_ptr = &init_task.cpus_mask,
|
||||
.cpus_mask = CPU_MASK_ALL,
|
||||
@@ -78,6 +84,12 @@ struct task_struct init_task
|
||||
@@ -79,6 +85,12 @@ struct task_struct init_task
|
||||
.restart_block = {
|
||||
.fn = do_no_restart_syscall,
|
||||
},
|
||||
@ -418,7 +418,7 @@ index 9e5cbe5eab7b..c293de91d90f 100644
|
||||
.se = {
|
||||
.group_node = LIST_HEAD_INIT(init_task.se.group_node),
|
||||
},
|
||||
@@ -85,6 +97,7 @@ struct task_struct init_task
|
||||
@@ -86,6 +98,7 @@ struct task_struct init_task
|
||||
.run_list = LIST_HEAD_INIT(init_task.rt.run_list),
|
||||
.time_slice = RR_TIMESLICE,
|
||||
},
|
||||
@ -427,10 +427,10 @@ index 9e5cbe5eab7b..c293de91d90f 100644
|
||||
#ifdef CONFIG_SMP
|
||||
.pushable_tasks = PLIST_NODE_INIT(init_task.pushable_tasks, MAX_PRIO),
|
||||
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
|
||||
index 58f5073acff7..9301e25986d3 100644
|
||||
index 729d3a5c772e..88a05ddd5527 100644
|
||||
--- a/kernel/cgroup/cpuset.c
|
||||
+++ b/kernel/cgroup/cpuset.c
|
||||
@@ -632,7 +632,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
|
||||
@@ -636,7 +636,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -439,7 +439,7 @@ index 58f5073acff7..9301e25986d3 100644
|
||||
/*
|
||||
* Helper routine for generate_sched_domains().
|
||||
* Do cpusets a, b have overlapping effective cpus_allowed masks?
|
||||
@@ -1005,7 +1005,7 @@ static void rebuild_sched_domains_locked(void)
|
||||
@@ -1009,7 +1009,7 @@ static void rebuild_sched_domains_locked(void)
|
||||
/* Have scheduler rebuild the domains */
|
||||
partition_and_rebuild_sched_domains(ndoms, doms, attr);
|
||||
}
|
||||
@ -462,10 +462,10 @@ index 27725754ac99..769d773c7182 100644
|
||||
d->cpu_count += t1;
|
||||
|
||||
diff --git a/kernel/exit.c b/kernel/exit.c
|
||||
index 0b81b26a872a..0832436d88a0 100644
|
||||
index ce2a75bc0ade..f0f864bc1ab9 100644
|
||||
--- a/kernel/exit.c
|
||||
+++ b/kernel/exit.c
|
||||
@@ -131,7 +131,7 @@ static void __exit_signal(struct task_struct *tsk)
|
||||
@@ -122,7 +122,7 @@ static void __exit_signal(struct task_struct *tsk)
|
||||
sig->curr_target = next_thread(tsk);
|
||||
}
|
||||
|
||||
@ -474,7 +474,7 @@ index 0b81b26a872a..0832436d88a0 100644
|
||||
sizeof(unsigned long long));
|
||||
|
||||
/*
|
||||
@@ -152,7 +152,7 @@ static void __exit_signal(struct task_struct *tsk)
|
||||
@@ -143,7 +143,7 @@ static void __exit_signal(struct task_struct *tsk)
|
||||
sig->inblock += task_io_get_inblock(tsk);
|
||||
sig->oublock += task_io_get_oublock(tsk);
|
||||
task_io_accounting_add(&sig->ioac, &tsk->ioac);
|
||||
@ -500,7 +500,7 @@ index f6310f848f34..3ad290e9fed8 100644
|
||||
"%s: %s:%d is running\n", __func__, task->comm,
|
||||
task->pid);
|
||||
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
|
||||
index 851bbb10819d..019fdab7e329 100644
|
||||
index c9f090d64f00..063d15a1ab8b 100644
|
||||
--- a/kernel/locking/rtmutex.c
|
||||
+++ b/kernel/locking/rtmutex.c
|
||||
@@ -229,7 +229,7 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
|
||||
@ -563,10 +563,10 @@ index 21fb5a5662b5..ac31239aa51a 100644
|
||||
obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
|
||||
diff --git a/kernel/sched/bmq.c b/kernel/sched/bmq.c
|
||||
new file mode 100644
|
||||
index 000000000000..10560f7720e2
|
||||
index 000000000000..e4a8da074702
|
||||
--- /dev/null
|
||||
+++ b/kernel/sched/bmq.c
|
||||
@@ -0,0 +1,6026 @@
|
||||
@@ -0,0 +1,6024 @@
|
||||
+/*
|
||||
+ * kernel/sched/bmq.c
|
||||
+ *
|
||||
@ -639,7 +639,7 @@ index 000000000000..10560f7720e2
|
||||
+
|
||||
+static inline void print_scheduler_version(void)
|
||||
+{
|
||||
+ printk(KERN_INFO "bmq: BMQ CPU Scheduler 5.6-r4 by Alfred Chen.\n");
|
||||
+ printk(KERN_INFO "bmq: BMQ CPU Scheduler 5.7-r0 by Alfred Chen.\n");
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
@ -1497,7 +1497,6 @@ index 000000000000..10560f7720e2
|
||||
+
|
||||
+ raw_spin_lock(&rq->lock);
|
||||
+ __hrtick_restart(rq);
|
||||
+ rq->hrtick_csd_pending = 0;
|
||||
+ raw_spin_unlock(&rq->lock);
|
||||
+}
|
||||
+
|
||||
@ -1521,12 +1520,10 @@ index 000000000000..10560f7720e2
|
||||
+
|
||||
+ hrtimer_set_expires(timer, time);
|
||||
+
|
||||
+ if (rq == this_rq()) {
|
||||
+ if (rq == this_rq())
|
||||
+ __hrtick_restart(rq);
|
||||
+ } else if (!rq->hrtick_csd_pending) {
|
||||
+ else
|
||||
+ smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
|
||||
+ rq->hrtick_csd_pending = 1;
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+#else
|
||||
@ -1550,8 +1547,6 @@ index 000000000000..10560f7720e2
|
||||
+static void hrtick_rq_init(struct rq *rq)
|
||||
+{
|
||||
+#ifdef CONFIG_SMP
|
||||
+ rq->hrtick_csd_pending = 0;
|
||||
+
|
||||
+ rq->hrtick_csd.flags = 0;
|
||||
+ rq->hrtick_csd.func = __hrtick_start;
|
||||
+ rq->hrtick_csd.info = rq;
|
||||
@ -3399,6 +3394,7 @@ index 000000000000..10560f7720e2
|
||||
+ int cpu __maybe_unused = smp_processor_id();
|
||||
+ struct rq *rq = cpu_rq(cpu);
|
||||
+
|
||||
+ arch_scale_freq_tick();
|
||||
+ sched_clock_tick();
|
||||
+
|
||||
+ raw_spin_lock(&rq->lock);
|
||||
@ -3569,7 +3565,6 @@ index 000000000000..10560f7720e2
|
||||
+ if (cpu_is_offline(cpu))
|
||||
+ goto out_unlock;
|
||||
+
|
||||
+ curr = rq->curr;
|
||||
+ update_rq_clock(rq);
|
||||
+ if (!is_idle_task(curr)) {
|
||||
+ /*
|
||||
@ -4060,6 +4055,8 @@ index 000000000000..10560f7720e2
|
||||
+ */
|
||||
+ ++*switch_count;
|
||||
+
|
||||
+ psi_sched_switch(prev, next, !task_on_rq_queued(prev));
|
||||
+
|
||||
+ trace_sched_switch(preempt, prev, next);
|
||||
+
|
||||
+ /* Also unlocks the rq: */
|
||||
@ -4098,7 +4095,8 @@ index 000000000000..10560f7720e2
|
||||
+ * it wants to wake up a task to maintain concurrency.
|
||||
+ * As this function is called inside the schedule() context,
|
||||
+ * we disable preemption to avoid it calling schedule() again
|
||||
+ * in the possible wakeup of a kworker.
|
||||
+ * in the possible wakeup of a kworker and because wq_worker_sleeping()
|
||||
+ * requires it.
|
||||
+ */
|
||||
+ if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
|
||||
+ preempt_disable();
|
||||
@ -6632,10 +6630,10 @@ index 000000000000..375a1a805d86
|
||||
+{}
|
||||
diff --git a/kernel/sched/bmq_sched.h b/kernel/sched/bmq_sched.h
|
||||
new file mode 100644
|
||||
index 000000000000..fca42b270620
|
||||
index 000000000000..59b3c43c7d9f
|
||||
--- /dev/null
|
||||
+++ b/kernel/sched/bmq_sched.h
|
||||
@@ -0,0 +1,537 @@
|
||||
@@ -0,0 +1,547 @@
|
||||
+#ifndef BMQ_SCHED_H
|
||||
+#define BMQ_SCHED_H
|
||||
+
|
||||
@ -6779,7 +6777,6 @@ index 000000000000..fca42b270620
|
||||
+
|
||||
+#ifdef CONFIG_SCHED_HRTICK
|
||||
+#ifdef CONFIG_SMP
|
||||
+ int hrtick_csd_pending;
|
||||
+ call_single_data_t hrtick_csd;
|
||||
+#endif
|
||||
+ struct hrtimer hrtick_timer;
|
||||
@ -6867,6 +6864,13 @@ index 000000000000..fca42b270620
|
||||
+
|
||||
+#endif /* CONFIG_SMP */
|
||||
+
|
||||
+#ifndef arch_scale_freq_tick
|
||||
+static __always_inline
|
||||
+void arch_scale_freq_tick(void)
|
||||
+{
|
||||
+}
|
||||
+#endif
|
||||
+
|
||||
+#ifndef arch_scale_freq_capacity
|
||||
+static __always_inline
|
||||
+unsigned long arch_scale_freq_capacity(int cpu)
|
||||
@ -7172,6 +7176,10 @@ index 000000000000..fca42b270620
|
||||
+ return nr_cpu_ids;
|
||||
+}
|
||||
+#endif
|
||||
+
|
||||
+void swake_up_all_locked(struct swait_queue_head *q);
|
||||
+void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
|
||||
+
|
||||
+#endif /* BMQ_SCHED_H */
|
||||
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
|
||||
index 7fbaee24c824..af350d0afa56 100644
|
||||
@ -7237,7 +7245,7 @@ index 7fbaee24c824..af350d0afa56 100644
|
||||
+#endif
|
||||
#endif
|
||||
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
|
||||
index cff3e656566d..3f6feaaff3c0 100644
|
||||
index ff9435dee1df..0ee9967d2d74 100644
|
||||
--- a/kernel/sched/cputime.c
|
||||
+++ b/kernel/sched/cputime.c
|
||||
@@ -122,7 +122,7 @@ void account_user_time(struct task_struct *p, u64 cputime)
|
||||
@ -7303,10 +7311,10 @@ index b743bf38f08f..5b19fde0c0ca 100644
|
||||
};
|
||||
+#endif
|
||||
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
|
||||
index bd006b79b360..a3bf3c7301d5 100644
|
||||
index b647d04d9c8b..78fcac8198ab 100644
|
||||
--- a/kernel/sched/pelt.c
|
||||
+++ b/kernel/sched/pelt.c
|
||||
@@ -250,6 +250,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load, unsigned long runna
|
||||
@@ -250,6 +250,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load)
|
||||
WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
|
||||
}
|
||||
|
||||
@ -7314,16 +7322,16 @@ index bd006b79b360..a3bf3c7301d5 100644
|
||||
/*
|
||||
* sched_entity:
|
||||
*
|
||||
@@ -366,6 +367,7 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
|
||||
@@ -367,6 +368,7 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
|
||||
|
||||
return 0;
|
||||
}
|
||||
+#endif
|
||||
|
||||
#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
|
||||
#ifdef CONFIG_SCHED_THERMAL_PRESSURE
|
||||
/*
|
||||
diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
|
||||
index afff644da065..4da52afaeff8 100644
|
||||
index eb034d9f024d..48cddd35444d 100644
|
||||
--- a/kernel/sched/pelt.h
|
||||
+++ b/kernel/sched/pelt.h
|
||||
@@ -1,11 +1,13 @@
|
||||
@ -7338,9 +7346,9 @@ index afff644da065..4da52afaeff8 100644
|
||||
int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
|
||||
+#endif
|
||||
|
||||
#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
|
||||
int update_irq_load_avg(struct rq *rq, u64 running);
|
||||
@@ -17,6 +19,7 @@ update_irq_load_avg(struct rq *rq, u64 running)
|
||||
#ifdef CONFIG_SCHED_THERMAL_PRESSURE
|
||||
int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity);
|
||||
@@ -37,6 +39,7 @@ update_irq_load_avg(struct rq *rq, u64 running)
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -7348,7 +7356,7 @@ index afff644da065..4da52afaeff8 100644
|
||||
/*
|
||||
* When a task is dequeued, its estimated utilization should not be update if
|
||||
* its util_avg has not been updated at least once.
|
||||
@@ -137,9 +140,11 @@ static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
|
||||
@@ -157,9 +160,11 @@ static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
|
||||
return rq_clock_pelt(rq_of(cfs_rq));
|
||||
}
|
||||
#endif
|
||||
@ -7360,16 +7368,16 @@ index afff644da065..4da52afaeff8 100644
|
||||
static inline int
|
||||
update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
|
||||
{
|
||||
@@ -157,6 +162,7 @@ update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
|
||||
@@ -177,6 +182,7 @@ update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
+#endif
|
||||
|
||||
static inline int
|
||||
update_irq_load_avg(struct rq *rq, u64 running)
|
||||
update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
|
||||
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
|
||||
index 9ea647835fd6..f38d1343e2bf 100644
|
||||
index db3a57675ccf..f9002d310a06 100644
|
||||
--- a/kernel/sched/sched.h
|
||||
+++ b/kernel/sched/sched.h
|
||||
@@ -2,6 +2,10 @@
|
||||
@ -7383,19 +7391,16 @@ index 9ea647835fd6..f38d1343e2bf 100644
|
||||
#include <linux/sched.h>
|
||||
|
||||
#include <linux/sched/autogroup.h>
|
||||
@@ -2492,6 +2496,12 @@ static inline bool is_per_cpu_kthread(struct task_struct *p)
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
@@ -2546,3 +2550,9 @@ static inline bool is_per_cpu_kthread(struct task_struct *p)
|
||||
|
||||
void swake_up_all_locked(struct swait_queue_head *q);
|
||||
void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
|
||||
+
|
||||
+static inline int task_running_nice(struct task_struct *p)
|
||||
+{
|
||||
+ return (task_nice(p) > 0);
|
||||
+}
|
||||
+#endif /* !CONFIG_SCHED_BMQ */
|
||||
|
||||
void swake_up_all_locked(struct swait_queue_head *q);
|
||||
void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
|
||||
diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
|
||||
index 750fb3c67eed..0cc040a28d3f 100644
|
||||
--- a/kernel/sched/stats.c
|
||||
@ -7428,7 +7433,7 @@ index 750fb3c67eed..0cc040a28d3f 100644
|
||||
}
|
||||
return 0;
|
||||
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
|
||||
index dfb64c08a407..16f6a8c84cfc 100644
|
||||
index 8344757bba6e..a613249f2375 100644
|
||||
--- a/kernel/sched/topology.c
|
||||
+++ b/kernel/sched/topology.c
|
||||
@@ -4,6 +4,7 @@
|
||||
@ -7439,7 +7444,7 @@ index dfb64c08a407..16f6a8c84cfc 100644
|
||||
DEFINE_MUTEX(sched_domains_mutex);
|
||||
|
||||
/* Protected by sched_domains_mutex: */
|
||||
@@ -1182,8 +1183,10 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
|
||||
@@ -1190,8 +1191,10 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
|
||||
*/
|
||||
|
||||
static int default_relax_domain_level = -1;
|
||||
@ -7450,7 +7455,7 @@ index dfb64c08a407..16f6a8c84cfc 100644
|
||||
static int __init setup_relax_domain_level(char *str)
|
||||
{
|
||||
if (kstrtoint(str, 0, &default_relax_domain_level))
|
||||
@@ -1425,6 +1428,7 @@ sd_init(struct sched_domain_topology_level *tl,
|
||||
@@ -1424,6 +1427,7 @@ sd_init(struct sched_domain_topology_level *tl,
|
||||
|
||||
return sd;
|
||||
}
|
||||
@ -7458,7 +7463,7 @@ index dfb64c08a407..16f6a8c84cfc 100644
|
||||
|
||||
/*
|
||||
* Topology list, bottom-up.
|
||||
@@ -1454,6 +1458,7 @@ void set_sched_topology(struct sched_domain_topology_level *tl)
|
||||
@@ -1453,6 +1457,7 @@ void set_sched_topology(struct sched_domain_topology_level *tl)
|
||||
sched_domain_topology = tl;
|
||||
}
|
||||
|
||||
@ -7466,7 +7471,7 @@ index dfb64c08a407..16f6a8c84cfc 100644
|
||||
#ifdef CONFIG_NUMA
|
||||
|
||||
static const struct cpumask *sd_numa_mask(int cpu)
|
||||
@@ -2328,3 +2333,17 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
|
||||
@@ -2327,3 +2332,17 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
|
||||
partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
|
||||
mutex_unlock(&sched_domains_mutex);
|
||||
}
|
||||
@ -7485,7 +7490,7 @@ index dfb64c08a407..16f6a8c84cfc 100644
|
||||
+#endif /* CONFIG_NUMA */
|
||||
+#endif
|
||||
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
|
||||
index ad5b88a53c5a..c5f1c1995541 100644
|
||||
index 8a176d8727a3..5754e28ce21a 100644
|
||||
--- a/kernel/sysctl.c
|
||||
+++ b/kernel/sysctl.c
|
||||
@@ -132,6 +132,10 @@ static unsigned long one_ul = 1;
|
||||
@ -7499,7 +7504,7 @@ index ad5b88a53c5a..c5f1c1995541 100644
|
||||
#ifdef CONFIG_PRINTK
|
||||
static int ten_thousand = 10000;
|
||||
#endif
|
||||
@@ -300,7 +304,7 @@ static struct ctl_table sysctl_base_table[] = {
|
||||
@@ -288,7 +292,7 @@ static struct ctl_table sysctl_base_table[] = {
|
||||
{ }
|
||||
};
|
||||
|
||||
@ -7508,7 +7513,7 @@ index ad5b88a53c5a..c5f1c1995541 100644
|
||||
static int min_sched_granularity_ns = 100000; /* 100 usecs */
|
||||
static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */
|
||||
static int min_wakeup_granularity_ns; /* 0 usecs */
|
||||
@@ -317,6 +321,7 @@ static int max_extfrag_threshold = 1000;
|
||||
@@ -305,6 +309,7 @@ static int max_extfrag_threshold = 1000;
|
||||
#endif
|
||||
|
||||
static struct ctl_table kern_table[] = {
|
||||
@ -7516,7 +7521,7 @@ index ad5b88a53c5a..c5f1c1995541 100644
|
||||
{
|
||||
.procname = "sched_child_runs_first",
|
||||
.data = &sysctl_sched_child_runs_first,
|
||||
@@ -498,6 +503,7 @@ static struct ctl_table kern_table[] = {
|
||||
@@ -486,6 +491,7 @@ static struct ctl_table kern_table[] = {
|
||||
.extra2 = SYSCTL_ONE,
|
||||
},
|
||||
#endif
|
||||
@ -7524,7 +7529,7 @@ index ad5b88a53c5a..c5f1c1995541 100644
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
{
|
||||
.procname = "prove_locking",
|
||||
@@ -1061,6 +1067,17 @@ static struct ctl_table kern_table[] = {
|
||||
@@ -1049,6 +1055,17 @@ static struct ctl_table kern_table[] = {
|
||||
.proc_handler = proc_dointvec,
|
||||
},
|
||||
#endif
|
||||
@ -7543,10 +7548,10 @@ index ad5b88a53c5a..c5f1c1995541 100644
|
||||
{
|
||||
.procname = "spin_retry",
|
||||
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
|
||||
index 8ff6da77a01f..61df797efb04 100644
|
||||
index 2fd3b3fa68bf..8c417f3ea628 100644
|
||||
--- a/kernel/time/posix-cpu-timers.c
|
||||
+++ b/kernel/time/posix-cpu-timers.c
|
||||
@@ -226,7 +226,7 @@ static void task_sample_cputime(struct task_struct *p, u64 *samples)
|
||||
@@ -236,7 +236,7 @@ static void task_sample_cputime(struct task_struct *p, u64 *samples)
|
||||
u64 stime, utime;
|
||||
|
||||
task_cputime(p, &utime, &stime);
|
||||
@ -7555,7 +7560,7 @@ index 8ff6da77a01f..61df797efb04 100644
|
||||
}
|
||||
|
||||
static void proc_sample_cputime_atomic(struct task_cputime_atomic *at,
|
||||
@@ -796,6 +796,7 @@ static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples,
|
||||
@@ -806,6 +806,7 @@ static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples,
|
||||
}
|
||||
}
|
||||
|
||||
@ -7563,7 +7568,7 @@ index 8ff6da77a01f..61df797efb04 100644
|
||||
static inline void check_dl_overrun(struct task_struct *tsk)
|
||||
{
|
||||
if (tsk->dl.dl_overrun) {
|
||||
@@ -803,6 +804,7 @@ static inline void check_dl_overrun(struct task_struct *tsk)
|
||||
@@ -813,6 +814,7 @@ static inline void check_dl_overrun(struct task_struct *tsk)
|
||||
__group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
|
||||
}
|
||||
}
|
||||
@ -7571,7 +7576,7 @@ index 8ff6da77a01f..61df797efb04 100644
|
||||
|
||||
static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
|
||||
{
|
||||
@@ -830,8 +832,10 @@ static void check_thread_timers(struct task_struct *tsk,
|
||||
@@ -840,8 +842,10 @@ static void check_thread_timers(struct task_struct *tsk,
|
||||
u64 samples[CPUCLOCK_MAX];
|
||||
unsigned long soft;
|
||||
|
||||
@ -7582,7 +7587,7 @@ index 8ff6da77a01f..61df797efb04 100644
|
||||
|
||||
if (expiry_cache_is_inactive(pct))
|
||||
return;
|
||||
@@ -845,7 +849,7 @@ static void check_thread_timers(struct task_struct *tsk,
|
||||
@@ -855,7 +859,7 @@ static void check_thread_timers(struct task_struct *tsk,
|
||||
soft = task_rlimit(tsk, RLIMIT_RTTIME);
|
||||
if (soft != RLIM_INFINITY) {
|
||||
/* Task RT timeout is accounted in jiffies. RTTIME is usec */
|
||||
@ -7591,7 +7596,7 @@ index 8ff6da77a01f..61df797efb04 100644
|
||||
unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
|
||||
|
||||
/* At the hard limit, send SIGKILL. No further action. */
|
||||
@@ -1099,8 +1103,10 @@ static inline bool fastpath_timer_check(struct task_struct *tsk)
|
||||
@@ -1091,8 +1095,10 @@ static inline bool fastpath_timer_check(struct task_struct *tsk)
|
||||
return true;
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user