b5eb9af40f
This change is following 3a34034dba
546 lines
18 KiB
Diff
546 lines
18 KiB
Diff
From f7f49141a5dbe9c99d78196b58c44307fb2e6be3 Mon Sep 17 00:00:00 2001
|
|
From: Tk-Glitch <ti3nou@gmail.com>
|
|
Date: Wed, 4 Jul 2018 04:30:08 +0200
|
|
Subject: glitched
|
|
|
|
diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h
|
|
index 87f1fc9..b3be470 100755
|
|
--- a/scripts/mkcompile_h
|
|
+++ b/scripts/mkcompile_h
|
|
@@ -50,8 +50,8 @@ else
|
|
fi
|
|
|
|
UTS_VERSION="#$VERSION"
|
|
-CONFIG_FLAGS=""
|
|
-if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi
|
|
+CONFIG_FLAGS="TKG"
|
|
+if [ -n "$SMP" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS SMP"; fi
|
|
if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi
|
|
UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP"
|
|
|
|
diff --git a/fs/dcache.c b/fs/dcache.c
|
|
index 2acfc69878f5..3f1131431e06 100644
|
|
--- a/fs/dcache.c
|
|
+++ b/fs/dcache.c
|
|
@@ -69,7 +69,7 @@
|
|
* If no ancestor relationship:
|
|
* arbitrary, since it's serialized on rename_lock
|
|
*/
|
|
-int sysctl_vfs_cache_pressure __read_mostly = 100;
|
|
+int sysctl_vfs_cache_pressure __read_mostly = 50;
|
|
EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
|
|
|
|
__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
|
|
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
|
|
index 211890edf37e..37121563407d 100644
|
|
--- a/kernel/sched/core.c
|
|
+++ b/kernel/sched/core.c
|
|
@@ -41,7 +41,7 @@ const_debug unsigned int sysctl_sched_features =
|
|
* Number of tasks to iterate in a single balance run.
|
|
* Limited because this is done with IRQs disabled.
|
|
*/
|
|
-const_debug unsigned int sysctl_sched_nr_migrate = 32;
|
|
+const_debug unsigned int sysctl_sched_nr_migrate = 128;
|
|
|
|
/*
|
|
* period over which we average the RT time consumption, measured
|
|
@@ -61,9 +61,9 @@ __read_mostly int scheduler_running;
|
|
|
|
/*
|
|
* part of the period that we allow rt tasks to run in us.
|
|
- * default: 0.95s
|
|
+ * XanMod default: 0.98s
|
|
*/
|
|
-int sysctl_sched_rt_runtime = 950000;
|
|
+int sysctl_sched_rt_runtime = 980000;
|
|
|
|
/*
|
|
* __task_rq_lock - lock the rq @p resides on.
|
|
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
|
|
index 71f39410691b..288f9679e883 100755
|
|
--- a/scripts/setlocalversion
|
|
+++ b/scripts/setlocalversion
|
|
@@ -54,7 +54,7 @@ scm_version()
|
|
# If only the short version is requested, don't bother
|
|
# running further git commands
|
|
if $short; then
|
|
- echo "+"
|
|
+ # echo "+"
|
|
return
|
|
fi
|
|
# If we are past a tagged commit (like
|
|
|
|
From f85ed068b4d0e6c31edce8574a95757a60e58b87 Mon Sep 17 00:00:00 2001
|
|
From: Etienne Juvigny <Ti3noU@gmail.com>
|
|
Date: Mon, 3 Sep 2018 17:36:25 +0200
|
|
Subject: Zenify & stuff
|
|
|
|
|
|
diff --git a/init/Kconfig b/init/Kconfig
|
|
index b4daad2bac23..c1e59dc04209 100644
|
|
--- a/init/Kconfig
|
|
+++ b/init/Kconfig
|
|
@@ -1244,7 +1244,6 @@ config CC_OPTIMIZE_FOR_PERFORMANCE
|
|
|
|
config CC_OPTIMIZE_FOR_PERFORMANCE_O3
|
|
bool "Optimize more for performance (-O3)"
|
|
- depends on ARC
|
|
help
|
|
Choosing this option will pass "-O3" to your compiler to optimize
|
|
the kernel yet more for performance.
|
|
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
|
|
index 4f32c4062fb6..c0bf039e1b40 100644
|
|
--- a/drivers/infiniband/core/addr.c
|
|
+++ b/drivers/infiniband/core/addr.c
|
|
@@ -721,6 +721,7 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
|
|
struct sockaddr _sockaddr;
|
|
struct sockaddr_in _sockaddr_in;
|
|
struct sockaddr_in6 _sockaddr_in6;
|
|
+ struct sockaddr_ib _sockaddr_ib;
|
|
} sgid_addr, dgid_addr;
|
|
int ret;
|
|
|
|
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
|
|
index 79226ca8f80f..2a30060e7e1d 100644
|
|
--- a/include/linux/blkdev.h
|
|
+++ b/include/linux/blkdev.h
|
|
@@ -47,7 +47,11 @@ struct blk_queue_stats;
|
|
struct blk_stat_callback;
|
|
|
|
#define BLKDEV_MIN_RQ 4
|
|
+#ifdef CONFIG_ZENIFY
|
|
+#define BLKDEV_MAX_RQ 512
|
|
+#else
|
|
#define BLKDEV_MAX_RQ 128 /* Default maximum */
|
|
+#endif
|
|
|
|
/* Must be consistent with blk_mq_poll_stats_bkt() */
|
|
#define BLK_MQ_POLL_STATS_BKTS 16
|
|
diff --git a/init/Kconfig b/init/Kconfig
|
|
index 041f3a022122..5ed70eb1ad3a 100644
|
|
--- a/init/Kconfig
|
|
+++ b/init/Kconfig
|
|
@@ -45,6 +45,38 @@ config THREAD_INFO_IN_TASK
|
|
|
|
menu "General setup"
|
|
|
|
+config ZENIFY
|
|
+ bool "A selection of patches from Zen/Liquorix kernel and additional tweaks for a better gaming experience"
|
|
+ default y
|
|
+ help
|
|
+ Tunes the kernel for responsiveness at the cost of throughput and power usage.
|
|
+
|
|
+ --- Virtual Memory Subsystem ---------------------------
|
|
+
|
|
+ Mem dirty before bg writeback..: 10 % -> 20 %
|
|
+ Mem dirty before sync writeback: 20 % -> 50 %
|
|
+
|
|
+ --- Block Layer ----------------------------------------
|
|
+
|
|
+ Queue depth...............: 128 -> 512
|
|
+ Default MQ scheduler......: mq-deadline -> bfq
|
|
+
|
|
+ --- CFS CPU Scheduler ----------------------------------
|
|
+
|
|
+ Scheduling latency.............: 6 -> 3 ms
|
|
+ Minimal granularity............: 0.75 -> 0.3 ms
|
|
+ Wakeup granularity.............: 1 -> 0.5 ms
|
|
+ CPU migration cost.............: 0.5 -> 0.25 ms
|
|
+ Bandwidth slice size...........: 5 -> 3 ms
|
|
+ Ondemand fine upscaling limit..: 95 % -> 85 %
|
|
+
|
|
+ --- MuQSS CPU Scheduler --------------------------------
|
|
+
|
|
+ Scheduling interval............: 6 -> 3 ms
|
|
+ ISO task max realtime use......: 70 % -> 25 %
|
|
+ Ondemand coarse upscaling limit: 80 % -> 45 %
|
|
+ Ondemand fine upscaling limit..: 95 % -> 45 %
|
|
+
|
|
config BROKEN
|
|
bool
|
|
|
|
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
|
|
index 2f0a0be4d344..bada807c7e59 100644
|
|
--- a/kernel/sched/fair.c
|
|
+++ b/kernel/sched/fair.c
|
|
@@ -37,8 +37,13 @@
|
|
*
|
|
* (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
|
|
*/
|
|
+#ifdef CONFIG_ZENIFY
|
|
+unsigned int sysctl_sched_latency = 3000000ULL;
|
|
+static unsigned int normalized_sysctl_sched_latency = 3000000ULL;
|
|
+#else
|
|
unsigned int sysctl_sched_latency = 6000000ULL;
|
|
static unsigned int normalized_sysctl_sched_latency = 6000000ULL;
|
|
+#endif
|
|
|
|
/*
|
|
* The initial- and re-scaling of tunables is configurable
|
|
@@ -58,13 +63,22 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_L
|
|
*
|
|
* (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
|
|
*/
|
|
+#ifdef CONFIG_ZENIFY
|
|
+unsigned int sysctl_sched_min_granularity = 300000ULL;
|
|
+static unsigned int normalized_sysctl_sched_min_granularity = 300000ULL;
|
|
+#else
|
|
unsigned int sysctl_sched_min_granularity = 750000ULL;
|
|
static unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
|
|
+#endif
|
|
|
|
/*
|
|
* This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity
|
|
*/
|
|
+#ifdef CONFIG_ZENIFY
|
|
+static unsigned int sched_nr_latency = 10;
|
|
+#else
|
|
static unsigned int sched_nr_latency = 8;
|
|
+#endif
|
|
|
|
/*
|
|
* After fork, child runs first. If set to 0 (default) then
|
|
@@ -81,10 +95,17 @@ unsigned int sysctl_sched_child_runs_first __read_mostly;
|
|
*
|
|
* (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
|
|
*/
|
|
+#ifdef CONFIG_ZENIFY
|
|
+unsigned int sysctl_sched_wakeup_granularity = 500000UL;
|
|
+static unsigned int normalized_sysctl_sched_wakeup_granularity = 500000UL;
|
|
+
|
|
+const_debug unsigned int sysctl_sched_migration_cost = 50000UL;
|
|
+#else
|
|
unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
|
|
static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
|
|
|
|
const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
|
|
+#endif
|
|
|
|
#ifdef CONFIG_SMP
|
|
/*
|
|
@@ -107,8 +128,12 @@ int __weak arch_asym_cpu_priority(int cpu)
|
|
*
|
|
* (default: 5 msec, units: microseconds)
|
|
*/
|
|
+#ifdef CONFIG_ZENIFY
|
|
+unsigned int sysctl_sched_cfs_bandwidth_slice = 3000UL;
|
|
+#else
|
|
unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
|
|
#endif
|
|
+#endif
|
|
|
|
/*
|
|
* The margin used when comparing utilization with CPU capacity:
|
|
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
|
|
index 337c6afb3345..9315e358f292 100644
|
|
--- a/mm/page-writeback.c
|
|
+++ b/mm/page-writeback.c
|
|
@@ -71,7 +71,11 @@ static long ratelimit_pages = 32;
|
|
/*
|
|
* Start background writeback (via writeback threads) at this percentage
|
|
*/
|
|
+#ifdef CONFIG_ZENIFY
|
|
+int dirty_background_ratio = 20;
|
|
+#else
|
|
int dirty_background_ratio = 10;
|
|
+#endif
|
|
|
|
/*
|
|
* dirty_background_bytes starts at 0 (disabled) so that it is a function of
|
|
@@ -88,7 +92,11 @@ int vm_highmem_is_dirtyable;
|
|
/*
|
|
* The generator of dirty data starts writeback at this percentage
|
|
*/
|
|
+#ifdef CONFIG_ZENIFY
|
|
+int vm_dirty_ratio = 50;
|
|
+#else
|
|
int vm_dirty_ratio = 20;
|
|
+#endif
|
|
|
|
/*
|
|
* vm_dirty_bytes starts at 0 (disabled) so that it is a function of
|
|
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
|
|
index 80dad301361d..42b7fa7d01f8 100644
|
|
--- a/net/ipv4/Kconfig
|
|
+++ b/net/ipv4/Kconfig
|
|
@@ -702,6 +702,9 @@ choice
|
|
config DEFAULT_VEGAS
|
|
bool "Vegas" if TCP_CONG_VEGAS=y
|
|
|
|
+ config DEFAULT_YEAH
|
|
+ bool "YeAH" if TCP_CONG_YEAH=y
|
|
+
|
|
config DEFAULT_VENO
|
|
bool "Veno" if TCP_CONG_VENO=y
|
|
|
|
@@ -735,6 +738,7 @@ config DEFAULT_TCP_CONG
|
|
default "htcp" if DEFAULT_HTCP
|
|
default "hybla" if DEFAULT_HYBLA
|
|
default "vegas" if DEFAULT_VEGAS
|
|
+ default "yeah" if DEFAULT_YEAH
|
|
default "westwood" if DEFAULT_WESTWOOD
|
|
default "veno" if DEFAULT_VENO
|
|
default "reno" if DEFAULT_RENO
|
|
|
|
From: Nick Desaulniers <ndesaulniers@google.com>
|
|
Date: Mon, 24 Dec 2018 13:37:41 +0200
|
|
Subject: include/linux/compiler*.h: define asm_volatile_goto
|
|
|
|
asm_volatile_goto should also be defined for other compilers that
|
|
support asm goto.
|
|
|
|
Fixes commit 815f0dd ("include/linux/compiler*.h: make compiler-*.h
|
|
mutually exclusive").
|
|
|
|
Signed-off-by: Nick Desaulniers <ndesaulniers@google.com>
|
|
Signed-off-by: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com>
|
|
|
|
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
|
|
index ba814f1..e77eeb0 100644
|
|
--- a/include/linux/compiler_types.h
|
|
+++ b/include/linux/compiler_types.h
|
|
@@ -188,6 +188,10 @@ struct ftrace_likely_data {
|
|
#define asm_volatile_goto(x...) asm goto(x)
|
|
#endif
|
|
|
|
+#ifndef asm_volatile_goto
|
|
+#define asm_volatile_goto(x...) asm goto(x)
|
|
+#endif
|
|
+
|
|
/* Are two types/vars the same type (ignoring qualifiers)? */
|
|
#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
|
|
|
|
From: Andy Lavr <andy.lavr@gmail.com>
|
|
Date: Mon, 24 Dec 2018 14:57:47 +0200
|
|
Subject: avl: Use [defer+madvise] as default khugepaged defrag strategy
|
|
|
|
For some reason, the default strategy to respond to THP fault fallbacks
|
|
is still just madvise, meaning stall if the program wants transparent
|
|
hugepages, but don't trigger a background reclaim / compaction if THP
|
|
begins to fail allocations. This creates a snowball affect where we
|
|
still use the THP code paths, but we almost always fail once a system
|
|
has been active and busy for a while.
|
|
|
|
The option "defer" was created for interactive systems where THP can
|
|
still improve performance. If we have to fallback to a regular page due
|
|
to an allocation failure or anything else, we will trigger a background
|
|
reclaim and compaction so future THP attempts succeed and previous
|
|
attempts eventually have their smaller pages combined without stalling
|
|
running applications.
|
|
|
|
We still want madvise to stall applications that explicitely want THP,
|
|
so defer+madvise _does_ make a ton of sense. Make it the default for
|
|
interactive systems, especially if the kernel maintainer left
|
|
transparent hugepages on "always".
|
|
|
|
Reasoning and details in the original patch:
|
|
https://lwn.net/Articles/711248/
|
|
|
|
Signed-off-by: Andy Lavr <andy.lavr@gmail.com>
|
|
|
|
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
|
|
index e84a10b..21d62b7 100644
|
|
--- a/mm/huge_memory.c
|
|
+++ b/mm/huge_memory.c
|
|
@@ -53,7 +53,11 @@ unsigned long transparent_hugepage_flags __read_mostly =
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
|
|
(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
|
|
#endif
|
|
+#ifdef CONFIG_AVL_INTERACTIVE
|
|
+ (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG)|
|
|
+#else
|
|
(1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
|
|
+#endif
|
|
(1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
|
|
(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
|
|
|
|
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
|
|
--- a/net/sched/Kconfig
|
|
+++ b/net/sched/Kconfig
|
|
@@ -429,6 +429,9 @@
|
|
Select the queueing discipline that will be used by default
|
|
for all network devices.
|
|
|
|
+ config DEFAULT_CAKE
|
|
+ bool "Common Applications Kept Enhanced" if NET_SCH_CAKE
|
|
+
|
|
config DEFAULT_FQ
|
|
bool "Fair Queue" if NET_SCH_FQ
|
|
|
|
@@ -448,6 +451,7 @@
|
|
config DEFAULT_NET_SCH
|
|
string
|
|
default "pfifo_fast" if DEFAULT_PFIFO_FAST
|
|
+ default "cake" if DEFAULT_CAKE
|
|
default "fq" if DEFAULT_FQ
|
|
default "fq_codel" if DEFAULT_FQ_CODEL
|
|
default "sfq" if DEFAULT_SFQ
|
|
|
|
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
|
|
index a29043ea9..3fb219747 100644
|
|
--- a/mm/page_alloc.c
|
|
+++ b/mm/page_alloc.c
|
|
@@ -263,7 +263,7 @@ compound_page_dtor * const compound_page_dtors[] = {
|
|
#else
|
|
int watermark_boost_factor __read_mostly = 15000;
|
|
#endif
|
|
-int watermark_scale_factor = 10;
|
|
+int watermark_scale_factor = 200;
|
|
|
|
static unsigned long nr_kernel_pages __initdata;
|
|
static unsigned long nr_all_pages __initdata;
|
|
|
|
diff --git a/include/linux/mm.h b/include/linux/mm.h
|
|
index 80bb6408f..6c8b55cd1 100644
|
|
--- a/include/linux/mm.h
|
|
+++ b/include/linux/mm.h
|
|
@@ -146,8 +146,7 @@ extern int mmap_rnd_compat_bits __read_mostly;
|
|
* not a hard limit any more. Although some userspace tools can be surprised by
|
|
* that.
|
|
*/
|
|
-#define MAPCOUNT_ELF_CORE_MARGIN (5)
|
|
-#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
|
|
+#define DEFAULT_MAX_MAP_COUNT (524288)
|
|
|
|
extern int sysctl_max_map_count;
|
|
|
|
From adb1f9df27f08e6488bcd80b1607987c6114a77a Mon Sep 17 00:00:00 2001
|
|
From: Alexandre Frade <admfrade@gmail.com>
|
|
Date: Mon, 25 Nov 2019 15:13:06 -0300
|
|
Subject: [PATCH] elevator: set default scheduler to bfq for blk-mq
|
|
|
|
Signed-off-by: Alexandre Frade <admfrade@gmail.com>
|
|
---
|
|
block/elevator.c | 6 +++---
|
|
1 file changed, 3 insertions(+), 3 deletions(-)
|
|
|
|
diff --git a/block/elevator.c b/block/elevator.c
|
|
index 076ba7308e65..81f89095aa77 100644
|
|
--- a/block/elevator.c
|
|
+++ b/block/elevator.c
|
|
@@ -623,15 +623,15 @@ static inline bool elv_support_iosched(struct request_queue *q)
|
|
}
|
|
|
|
/*
|
|
- * For single queue devices, default to using mq-deadline. If we have multiple
|
|
- * queues or mq-deadline is not available, default to "none".
|
|
+ * For single queue devices, default to using bfq. If we have multiple
|
|
+ * queues or bfq is not available, default to "none".
|
|
*/
|
|
static struct elevator_type *elevator_get_default(struct request_queue *q)
|
|
{
|
|
if (q->nr_hw_queues != 1)
|
|
return NULL;
|
|
|
|
- return elevator_get(q, "mq-deadline", false);
|
|
+ return elevator_get(q, "bfq", false);
|
|
}
|
|
|
|
/*
|
|
From c3ec05777c46e19a8a26d0fc4ca0c0db8a19de97 Mon Sep 17 00:00:00 2001
|
|
From: Alexandre Frade <admfrade@gmail.com>
|
|
Date: Fri, 10 May 2019 16:45:59 -0300
|
|
Subject: [PATCH] block: set rq_affinity = 2 for full multithreading I/O
|
|
requests
|
|
|
|
Signed-off-by: Alexandre Frade <admfrade@gmail.com>
|
|
---
|
|
include/linux/blkdev.h | 3 ++-
|
|
1 file changed, 2 insertions(+), 1 deletion(-)
|
|
|
|
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
|
|
index f3ea78b0c91c..4dbacc6b073b 100644
|
|
--- a/include/linux/blkdev.h
|
|
+++ b/include/linux/blkdev.h
|
|
@@ -621,7 +621,8 @@ struct request_queue {
|
|
#define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */
|
|
|
|
#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
|
|
- (1 << QUEUE_FLAG_SAME_COMP))
|
|
+ (1 << QUEUE_FLAG_SAME_COMP) | \
|
|
+ (1 << QUEUE_FLAG_SAME_FORCE))
|
|
|
|
void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
|
|
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
|
|
From 8171d33d0b84a953649863538fdbe4c26c035e4f Mon Sep 17 00:00:00 2001
|
|
From: Alexandre Frade <admfrade@gmail.com>
|
|
Date: Fri, 10 May 2019 14:32:50 -0300
|
|
Subject: [PATCH] mm: set 2 megabytes for address_space-level file read-ahead
|
|
pages size
|
|
|
|
Signed-off-by: Alexandre Frade <admfrade@gmail.com>
|
|
---
|
|
include/linux/mm.h | 2 +-
|
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
|
|
|
diff --git a/include/linux/mm.h b/include/linux/mm.h
|
|
index a2adf95b3f9c..e804d9f7583a 100644
|
|
--- a/include/linux/mm.h
|
|
+++ b/include/linux/mm.h
|
|
@@ -2416,7 +2416,7 @@ int __must_check write_one_page(struct page *page);
|
|
void task_dirty_inc(struct task_struct *tsk);
|
|
|
|
/* readahead.c */
|
|
-#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE)
|
|
+#define VM_READAHEAD_PAGES (SZ_2M / PAGE_SIZE)
|
|
|
|
int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
|
|
pgoff_t offset, unsigned long nr_to_read);
|
|
From de7119e3db9fdb4c704355854a02a7e9fad931d4 Mon Sep 17 00:00:00 2001
|
|
From: Steven Barrett <steven@liquorix.net>
|
|
Date: Wed, 15 Jan 2020 20:43:56 -0600
|
|
Subject: [PATCH] ZEN: intel-pstate: Implement "enable" parameter
|
|
|
|
If intel-pstate is compiled into the kernel, it will preempt the loading
|
|
of acpi-cpufreq so you can take advantage of hardware p-states without
|
|
any friction.
|
|
|
|
However, intel-pstate is not completely superior to cpufreq's ondemand
|
|
for one reason. There's no concept of an up_threshold property.
|
|
|
|
In ondemand, up_threshold essentially reduces the maximum utilization to
|
|
compare against, allowing you to hit max frequencies and turbo boost
|
|
from a much lower core utilization.
|
|
|
|
With intel-pstate, you have the concept of minimum and maximum
|
|
performance, but no tunable that lets you define, maximum frequency
|
|
means 50% core utilization. For just this oversight, there's reasons
|
|
you may want ondemand.
|
|
|
|
Lets support setting "enable" in kernel boot parameters. This lets
|
|
kernel maintainers include "intel_pstate=disable" statically in the
|
|
static boot parameters, but let users of the kernel override this
|
|
selection.
|
|
---
|
|
Documentation/admin-guide/kernel-parameters.txt | 3 +++
|
|
drivers/cpufreq/intel_pstate.c | 2 ++
|
|
2 files changed, 5 insertions(+)
|
|
|
|
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
|
|
index ade4e6ec23e03..0b613370d28d8 100644
|
|
--- a/Documentation/admin-guide/kernel-parameters.txt
|
|
+++ b/Documentation/admin-guide/kernel-parameters.txt
|
|
@@ -1765,6 +1765,9 @@
|
|
disable
|
|
Do not enable intel_pstate as the default
|
|
scaling driver for the supported processors
|
|
+ enable
|
|
+ Enable intel_pstate in-case "disable" was passed
|
|
+ previously in the kernel boot parameters
|
|
passive
|
|
Use intel_pstate as a scaling driver, but configure it
|
|
to work with generic cpufreq governors (instead of
|
|
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
|
|
index d2fa3e9ccd97c..bd10cb02fc0ff 100644
|
|
--- a/drivers/cpufreq/intel_pstate.c
|
|
+++ b/drivers/cpufreq/intel_pstate.c
|
|
@@ -2826,6 +2826,8 @@ static int __init intel_pstate_setup(char *str)
|
|
pr_info("HWP disabled\n");
|
|
no_hwp = 1;
|
|
}
|
|
+ if (!strcmp(str, "enable"))
|
|
+ no_load = 0;
|
|
if (!strcmp(str, "force"))
|
|
force_load = 1;
|
|
if (!strcmp(str, "hwp_only"))
|