|
69 | 69 |
|
70 | 70 | /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
|
71 | 71 | static DEFINE_MUTEX(pcp_batch_high_lock);
|
| 72 | +#define MIN_PERCPU_PAGELIST_FRACTION (8) |
72 | 73 |
|
73 | 74 | #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
|
74 | 75 | DEFINE_PER_CPU(int, numa_node);
|
@@ -4145,7 +4146,7 @@ static void __meminit zone_init_free_lists(struct zone *zone)
|
4145 | 4146 | memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
|
4146 | 4147 | #endif
|
4147 | 4148 |
|
4148 |
| -static int __meminit zone_batchsize(struct zone *zone) |
| 4149 | +static int zone_batchsize(struct zone *zone) |
4149 | 4150 | {
|
4150 | 4151 | #ifdef CONFIG_MMU
|
4151 | 4152 | int batch;
|
@@ -4261,8 +4262,8 @@ static void pageset_set_high(struct per_cpu_pageset *p,
|
4261 | 4262 | pageset_update(&p->pcp, high, batch);
|
4262 | 4263 | }
|
4263 | 4264 |
|
4264 |
| -static void __meminit pageset_set_high_and_batch(struct zone *zone, |
4265 |
| - struct per_cpu_pageset *pcp) |
| 4265 | +static void pageset_set_high_and_batch(struct zone *zone, |
| 4266 | + struct per_cpu_pageset *pcp) |
4266 | 4267 | {
|
4267 | 4268 | if (percpu_pagelist_fraction)
|
4268 | 4269 | pageset_set_high(pcp,
|
@@ -5881,23 +5882,38 @@ int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
|
5881 | 5882 | void __user *buffer, size_t *length, loff_t *ppos)
|
5882 | 5883 | {
|
5883 | 5884 | struct zone *zone;
|
5884 |
| - unsigned int cpu; |
| 5885 | + int old_percpu_pagelist_fraction; |
5885 | 5886 | int ret;
|
5886 | 5887 |
|
| 5888 | + mutex_lock(&pcp_batch_high_lock); |
| 5889 | + old_percpu_pagelist_fraction = percpu_pagelist_fraction; |
| 5890 | + |
5887 | 5891 | ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
|
5888 |
| - if (!write || (ret < 0)) |
5889 |
| - return ret; |
| 5892 | + if (!write || ret < 0) |
| 5893 | + goto out; |
| 5894 | + |
| 5895 | + /* Sanity checking to avoid pcp imbalance */ |
| 5896 | + if (percpu_pagelist_fraction && |
| 5897 | + percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) { |
| 5898 | + percpu_pagelist_fraction = old_percpu_pagelist_fraction; |
| 5899 | + ret = -EINVAL; |
| 5900 | + goto out; |
| 5901 | + } |
| 5902 | + |
| 5903 | + /* No change? */ |
| 5904 | + if (percpu_pagelist_fraction == old_percpu_pagelist_fraction) |
| 5905 | + goto out; |
5890 | 5906 |
|
5891 |
| - mutex_lock(&pcp_batch_high_lock); |
5892 | 5907 | for_each_populated_zone(zone) {
|
5893 |
| - unsigned long high; |
5894 |
| - high = zone->managed_pages / percpu_pagelist_fraction; |
| 5908 | + unsigned int cpu; |
| 5909 | + |
5895 | 5910 | for_each_possible_cpu(cpu)
|
5896 |
| - pageset_set_high(per_cpu_ptr(zone->pageset, cpu), |
5897 |
| - high); |
| 5911 | + pageset_set_high_and_batch(zone, |
| 5912 | + per_cpu_ptr(zone->pageset, cpu)); |
5898 | 5913 | }
|
| 5914 | +out: |
5899 | 5915 | mutex_unlock(&pcp_batch_high_lock);
|
5900 |
| - return 0; |
| 5916 | + return ret; |
5901 | 5917 | }
|
5902 | 5918 |
|
5903 | 5919 | int hashdist = HASHDIST_DEFAULT;
|
|
0 commit comments