Newer
Older
/*
* split_page takes a non-compound higher-order page, and splits it into
* n (1<<order) sub-pages: page[0..n]
* Each sub-page must be freed individually.
*
* Note: this is probably too low level an operation for use in drivers.
* Please consult with lkml before using this in your driver.
*/
void split_page(struct page *page, unsigned int order)
{
int i;
VM_BUG_ON(PageCompound(page));
VM_BUG_ON(!page_count(page));
for (i = 1; i < (1 << order); i++)
set_page_refcounted(page + i);
/*
* Really, prep_compound_page() should be called from __rmqueue_bulk(). But
* we cheat by calling it from here, in the order > 0 path. Saves a branch
* or two.
*/
static struct page *buffered_rmqueue(struct zone *preferred_zone,
struct zone *zone, int order, gfp_t gfp_flags)
int migratetype = allocflags_to_migratetype(gfp_flags);
pcp = &zone_pcp(zone, cpu)->pcp;
pcp->count = rmqueue_bulk(zone, 0,
pcp->batch, &pcp->list, migratetype);
/* Find a page of the appropriate migrate type */
if (cold) {
list_for_each_entry_reverse(page, &pcp->list, lru)
if (page_private(page) == migratetype)
break;
} else {
list_for_each_entry(page, &pcp->list, lru)
if (page_private(page) == migratetype)
break;
}
/* Allocate more to the pcp list if necessary */
if (unlikely(&page->lru == &pcp->list)) {
pcp->count += rmqueue_bulk(zone, 0,
pcp->batch, &pcp->list, migratetype);
page = list_entry(pcp->list.next, struct page, lru);
}
list_del(&page->lru);
pcp->count--;
page = __rmqueue(zone, order, migratetype);
spin_unlock(&zone->lock);
if (!page)
goto failed;
__count_zone_vm_events(PGALLOC, zone, 1 << order);
zone_statistics(preferred_zone, zone);
if (prep_new_page(page, order, gfp_flags))
failed:
local_irq_restore(flags);
put_cpu();
return NULL;
#define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */
#define ALLOC_WMARK_MIN 0x02 /* use pages_min watermark */
#define ALLOC_WMARK_LOW 0x04 /* use pages_low watermark */
#define ALLOC_WMARK_HIGH 0x08 /* use pages_high watermark */
#define ALLOC_HARDER 0x10 /* try to alloc harder */
#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
#ifdef CONFIG_FAIL_PAGE_ALLOC
static struct fail_page_alloc_attr {
struct fault_attr attr;
u32 ignore_gfp_highmem;
u32 ignore_gfp_wait;
u32 min_order;
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
struct dentry *ignore_gfp_highmem_file;
struct dentry *ignore_gfp_wait_file;
struct dentry *min_order_file;
#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
} fail_page_alloc = {
.attr = FAULT_ATTR_INITIALIZER,
.ignore_gfp_wait = 1,
.ignore_gfp_highmem = 1,
.min_order = 1,
};
static int __init setup_fail_page_alloc(char *str)
{
return setup_fault_attr(&fail_page_alloc.attr, str);
}
__setup("fail_page_alloc=", setup_fail_page_alloc);
static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
{
if (order < fail_page_alloc.min_order)
return 0;
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
if (gfp_mask & __GFP_NOFAIL)
return 0;
if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
return 0;
if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
return 0;
return should_fail(&fail_page_alloc.attr, 1 << order);
}
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
static int __init fail_page_alloc_debugfs(void)
{
mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
struct dentry *dir;
int err;
err = init_fault_attr_dentries(&fail_page_alloc.attr,
"fail_page_alloc");
if (err)
return err;
dir = fail_page_alloc.attr.dentries.dir;
fail_page_alloc.ignore_gfp_wait_file =
debugfs_create_bool("ignore-gfp-wait", mode, dir,
&fail_page_alloc.ignore_gfp_wait);
fail_page_alloc.ignore_gfp_highmem_file =
debugfs_create_bool("ignore-gfp-highmem", mode, dir,
&fail_page_alloc.ignore_gfp_highmem);
fail_page_alloc.min_order_file =
debugfs_create_u32("min-order", mode, dir,
&fail_page_alloc.min_order);
if (!fail_page_alloc.ignore_gfp_wait_file ||
!fail_page_alloc.ignore_gfp_highmem_file ||
!fail_page_alloc.min_order_file) {
err = -ENOMEM;
debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
debugfs_remove(fail_page_alloc.min_order_file);
cleanup_fault_attr_dentries(&fail_page_alloc.attr);
}
return err;
}
late_initcall(fail_page_alloc_debugfs);
#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
#else /* CONFIG_FAIL_PAGE_ALLOC */
static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
{
return 0;
}
#endif /* CONFIG_FAIL_PAGE_ALLOC */
/*
* Return 1 if free pages are above 'mark'. This takes into account the order
* of the allocation.
*/
int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
long min = mark;
long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
min -= min / 4;
if (free_pages <= min + z->lowmem_reserve[classzone_idx])
return 0;
for (o = 0; o < order; o++) {
/* At the next order, this order's pages become unavailable */
free_pages -= z->free_area[o].nr_free << o;
/* Require fewer higher order pages to be free */
min >>= 1;
if (free_pages <= min)
return 0;
}
return 1;
}
#ifdef CONFIG_NUMA
/*
* zlc_setup - Setup for "zonelist cache". Uses cached zone data to
* skip over zones that are not allowed by the cpuset, or that have
* been recently (in last second) found to be nearly full. See further
* comments in mmzone.h. Reduces cache footprint of zonelist scans
* that have to skip over a lot of full or unallowed zones.
*
* If the zonelist cache is present in the passed in zonelist, then
* returns a pointer to the allowed node mask (either the current
* tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
*
* If the zonelist cache is not available for this zonelist, does
* nothing and returns NULL.
*
* If the fullzones BITMAP in the zonelist cache is stale (more than
* a second since last zap'd) then we zap it out (clear its bits.)
*
* We hold off even calling zlc_setup, until after we've checked the
* first zone in the zonelist, on the theory that most allocations will
* be satisfied from that first zone, so best to examine that zone as
* quickly as we can.
*/
static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
{
struct zonelist_cache *zlc; /* cached zonelist speedup info */
nodemask_t *allowednodes; /* zonelist_cache approximation */
zlc = zonelist->zlcache_ptr;
if (!zlc)
return NULL;
if (time_after(jiffies, zlc->last_full_zap + HZ)) {
bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
zlc->last_full_zap = jiffies;
}
allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
&cpuset_current_mems_allowed :
&node_states[N_HIGH_MEMORY];
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
return allowednodes;
}
/*
* Given 'z' scanning a zonelist, run a couple of quick checks to see
* if it is worth looking at further for free memory:
* 1) Check that the zone isn't thought to be full (doesn't have its
* bit set in the zonelist_cache fullzones BITMAP).
* 2) Check that the zones node (obtained from the zonelist_cache
* z_to_n[] mapping) is allowed in the passed in allowednodes mask.
* Return true (non-zero) if zone is worth looking at further, or
* else return false (zero) if it is not.
*
* This check -ignores- the distinction between various watermarks,
* such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is
* found to be full for any variation of these watermarks, it will
* be considered full for up to one second by all requests, unless
* we are so low on memory on all allowed nodes that we are forced
* into the second scan of the zonelist.
*
* In the second scan we ignore this zonelist cache and exactly
* apply the watermarks to all zones, even it is slower to do so.
* We are low on memory in the second scan, and should leave no stone
* unturned looking for a free page.
*/
static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
nodemask_t *allowednodes)
{
struct zonelist_cache *zlc; /* cached zonelist speedup info */
int i; /* index of *z in zonelist zones */
int n; /* node that zone *z is on */
zlc = zonelist->zlcache_ptr;
if (!zlc)
return 1;
i = z - zonelist->_zonerefs;
n = zlc->z_to_n[i];
/* This zone is worth trying if it is allowed but not full */
return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
}
/*
* Given 'z' scanning a zonelist, set the corresponding bit in
* zlc->fullzones, so that subsequent attempts to allocate a page
* from that zone don't waste time re-examining it.
*/
static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
{
struct zonelist_cache *zlc; /* cached zonelist speedup info */
int i; /* index of *z in zonelist zones */
zlc = zonelist->zlcache_ptr;
if (!zlc)
return;
i = z - zonelist->_zonerefs;
set_bit(i, zlc->fullzones);
}
#else /* CONFIG_NUMA */
static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
{
return NULL;
}
static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
nodemask_t *allowednodes)
{
return 1;
}
static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
{
}
#endif /* CONFIG_NUMA */
* get_page_from_freelist goes through the zonelist trying to allocate
* a page.
*/
static struct page *
get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
struct zonelist *zonelist, int high_zoneidx, int alloc_flags)
struct zoneref *z;
struct zone *zone, *preferred_zone;
nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
int zlc_active = 0; /* set if using zonelist_cache */
int did_zlc_setup = 0; /* just call zlc_setup() one time */
(void)first_zones_zonelist(zonelist, high_zoneidx, nodemask,
&preferred_zone);
if (!preferred_zone)
return NULL;
classzone_idx = zone_idx(preferred_zone);
* Scan zonelist, looking for a zone with enough free.
* See also cpuset_zone_allowed() comment in kernel/cpuset.c.
*/
for_each_zone_zonelist_nodemask(zone, z, zonelist,
high_zoneidx, nodemask) {
if (NUMA_BUILD && zlc_active &&
!zlc_zone_worth_trying(zonelist, z, allowednodes))
continue;
!cpuset_zone_allowed_softwall(zone, gfp_mask))
if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
unsigned long mark;
if (alloc_flags & ALLOC_WMARK_MIN)
mark = zone->pages_min;
else if (alloc_flags & ALLOC_WMARK_LOW)
mark = zone->pages_low;
mark = zone->pages_high;
if (!zone_watermark_ok(zone, order, mark,
classzone_idx, alloc_flags)) {
!zone_reclaim(zone, gfp_mask, order))
page = buffered_rmqueue(preferred_zone, zone, order, gfp_mask);
this_zone_full:
if (NUMA_BUILD)
zlc_mark_zone_full(zonelist, z);
try_next_zone:
if (NUMA_BUILD && !did_zlc_setup) {
/* we do zlc_setup after the first zone is tried */
allowednodes = zlc_setup(zonelist, alloc_flags);
zlc_active = 1;
did_zlc_setup = 1;
}
if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
/* Disable zlc cache for second zonelist scan */
zlc_active = 0;
goto zonelist_scan;
}
/*
* This is the 'heart' of the zoned buddy allocator.
*/
__alloc_pages_internal(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, nodemask_t *nodemask)
enum zone_type high_zoneidx = gfp_zone(gfp_mask);
struct zoneref *z;
struct zone *zone;
struct page *page;
struct reclaim_state reclaim_state;
struct task_struct *p = current;
int do_retry;
unsigned long did_some_progress;
unsigned long pages_reclaimed = 0;
if (should_fail_alloc_page(gfp_mask, order))
return NULL;
z = zonelist->_zonerefs; /* the list of zones suitable for gfp_mask */
if (unlikely(!z->zone)) {
/*
* Happens if we have an empty zonelist as a result of
* GFP_THISNODE being used on a memoryless node
*/
page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET);
/*
* GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
* __GFP_NOWARN set) should not cause reclaim since the subsystem
* (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
* using a larger set of nodes after it has established that the
* allowed per node queues are empty and that nodes are
* over allocated.
*/
if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
goto nopage;
for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
wakeup_kswapd(zone, order);
* OK, we're below the kswapd watermark and have kicked background
* reclaim. Now things get more complex, so set up alloc_flags according
* to how we want to proceed.
*
* The caller may dip into page reserves a bit more if the caller
* cannot run direct reclaim, or if the caller has realtime scheduling
* policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
* set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
alloc_flags |= ALLOC_HARDER;
if (gfp_mask & __GFP_HIGH)
alloc_flags |= ALLOC_HIGH;
if (wait)
alloc_flags |= ALLOC_CPUSET;
/*
* Go through the zonelist again. Let __GFP_HIGH and allocations
* coming from realtime tasks go deeper into reserves.
*
* This is the last chance, in general, before the goto nopage.
* Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
* See also cpuset_zone_allowed() comment in kernel/cpuset.c.
page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
high_zoneidx, alloc_flags);
/* This allocation should allow future memory freeing. */
rebalance:
if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
&& !in_interrupt()) {
if (!(gfp_mask & __GFP_NOMEMALLOC)) {
/* go through the zonelist yet again, ignoring mins */
page = get_page_from_freelist(gfp_mask, nodemask, order,
zonelist, high_zoneidx, ALLOC_NO_WATERMARKS);
congestion_wait(WRITE, HZ/50);
}
goto nopage;
}
/* Atomic allocations - we can't balance anything */
if (!wait)
goto nopage;
cond_resched();
/* We now go into synchronous reclaim */
p->flags |= PF_MEMALLOC;
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = &reclaim_state;
did_some_progress = try_to_free_pages(zonelist, order, gfp_mask);
p->reclaim_state = NULL;
p->flags &= ~PF_MEMALLOC;
cond_resched();
page = get_page_from_freelist(gfp_mask, nodemask, order,
zonelist, high_zoneidx, alloc_flags);
} else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
if (!try_set_zone_oom(zonelist, gfp_mask)) {
schedule_timeout_uninterruptible(1);
goto restart;
}
/*
* Go through the zonelist yet one more time, keep
* very high watermark here, this is only to catch
* a parallel oom killing, we must fail if we're still
* under heavy pressure.
*/
page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
order, zonelist, high_zoneidx,
ALLOC_WMARK_HIGH|ALLOC_CPUSET);
clear_zonelist_oom(zonelist, gfp_mask);
/* The OOM killer will not help higher order allocs so fail */
if (order > PAGE_ALLOC_COSTLY_ORDER) {
clear_zonelist_oom(zonelist, gfp_mask);
out_of_memory(zonelist, gfp_mask, order);
clear_zonelist_oom(zonelist, gfp_mask);
goto restart;
}
/*
* Don't let big-order allocations loop unless the caller explicitly
* requests that. Wait for some write requests to complete then retry.
*
* In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
* means __GFP_NOFAIL, but that may not be true in other
* implementations.
*
* For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
* specified, then we retry until we no longer reclaim any pages
* (above), or we've reclaimed an order of pages at least as
* large as the allocation's order. In both cases, if the
* allocation still fails, we stop retrying.
pages_reclaimed += did_some_progress;
do_retry = 0;
if (!(gfp_mask & __GFP_NORETRY)) {
if (order <= PAGE_ALLOC_COSTLY_ORDER) {
} else {
if (gfp_mask & __GFP_REPEAT &&
pages_reclaimed < (1 << order))
do_retry = 1;
}
if (gfp_mask & __GFP_NOFAIL)
do_retry = 1;
}
if (do_retry) {
congestion_wait(WRITE, HZ/50);
goto rebalance;
}
nopage:
if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
printk(KERN_WARNING "%s: page allocation failure."
" order:%d, mode:0x%x\n",
p->comm, order, gfp_mask);
dump_stack();
EXPORT_SYMBOL(__alloc_pages_internal);
unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
{
struct page * page;
page = alloc_pages(gfp_mask, order);
if (!page)
return 0;
return (unsigned long) page_address(page);
}
EXPORT_SYMBOL(__get_free_pages);
unsigned long get_zeroed_page(gfp_t gfp_mask)
{
struct page * page;
/*
* get_zeroed_page() returns a 32-bit address, which cannot represent
* a highmem page
*/
page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
if (page)
return (unsigned long) page_address(page);
return 0;
}
EXPORT_SYMBOL(get_zeroed_page);
void __pagevec_free(struct pagevec *pvec)
{
int i = pagevec_count(pvec);
while (--i >= 0)
free_hot_cold_page(pvec->pages[i], pvec->cold);
}
void __free_pages(struct page *page, unsigned int order)
if (order == 0)
free_hot_page(page);
else
__free_pages_ok(page, order);
}
}
EXPORT_SYMBOL(__free_pages);
void free_pages(unsigned long addr, unsigned int order)
__free_pages(virt_to_page((void *)addr), order);
}
}
EXPORT_SYMBOL(free_pages);
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
/**
* alloc_pages_exact - allocate an exact number physically-contiguous pages.
* @size: the number of bytes to allocate
* @gfp_mask: GFP flags for the allocation
*
* This function is similar to alloc_pages(), except that it allocates the
* minimum number of pages to satisfy the request. alloc_pages() can only
* allocate memory in power-of-two pages.
*
* This function is also limited by MAX_ORDER.
*
* Memory allocated by this function must be released by free_pages_exact().
*/
void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
{
unsigned int order = get_order(size);
unsigned long addr;
addr = __get_free_pages(gfp_mask, order);
if (addr) {
unsigned long alloc_end = addr + (PAGE_SIZE << order);
unsigned long used = addr + PAGE_ALIGN(size);
split_page(virt_to_page(addr), order);
while (used < alloc_end) {
free_page(used);
used += PAGE_SIZE;
}
}
return (void *)addr;
}
EXPORT_SYMBOL(alloc_pages_exact);
/**
* free_pages_exact - release memory allocated via alloc_pages_exact()
* @virt: the value returned by alloc_pages_exact.
* @size: size of allocation, same value as passed to alloc_pages_exact().
*
* Release the memory allocated by a previous call to alloc_pages_exact.
*/
void free_pages_exact(void *virt, size_t size)
{
unsigned long addr = (unsigned long)virt;
unsigned long end = addr + PAGE_ALIGN(size);
while (addr < end) {
free_page(addr);
addr += PAGE_SIZE;
}
}
EXPORT_SYMBOL(free_pages_exact);
static unsigned int nr_free_zone_pages(int offset)
{
struct zoneref *z;
struct zone *zone;
/* Just pick one node, since fallback list is circular */
struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
for_each_zone_zonelist(zone, z, zonelist, offset) {
unsigned long size = zone->present_pages;
unsigned long high = zone->pages_high;
if (size > high)
sum += size - high;
}
return sum;
}
/*
* Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
*/
unsigned int nr_free_buffer_pages(void)
{
EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
/*
* Amount of free RAM allocatable within all zones
*/
unsigned int nr_free_pagecache_pages(void)
{
return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
Christoph Lameter
committed
static inline void show_node(struct zone *zone)
Christoph Lameter
committed
if (NUMA_BUILD)
Andy Whitcroft
committed
printk("Node %d ", zone_to_nid(zone));
}
void si_meminfo(struct sysinfo *val)
{
val->totalram = totalram_pages;
val->sharedram = 0;
val->freeram = global_page_state(NR_FREE_PAGES);
val->bufferram = nr_blockdev_pages();
val->totalhigh = totalhigh_pages;
val->freehigh = nr_free_highpages();
val->mem_unit = PAGE_SIZE;
}
EXPORT_SYMBOL(si_meminfo);
#ifdef CONFIG_NUMA
void si_meminfo_node(struct sysinfo *val, int nid)
{
pg_data_t *pgdat = NODE_DATA(nid);
val->totalram = pgdat->node_present_pages;
val->freeram = node_page_state(nid, NR_FREE_PAGES);
#ifdef CONFIG_HIGHMEM
val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
NR_FREE_PAGES);
#else
val->totalhigh = 0;
val->freehigh = 0;
#endif
val->mem_unit = PAGE_SIZE;
}
#endif
#define K(x) ((x) << (PAGE_SHIFT-10))
/*
* Show free area list (used inside shift_scroll-lock stuff)
* We also calculate the percentage fragmentation. We do this by counting the
* memory on each free list with the exception of the first item on the list.
*/
void show_free_areas(void)
{
show_node(zone);
printk("%s per-cpu:\n", zone->name);
for_each_online_cpu(cpu) {
printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
cpu, pageset->pcp.high,
pageset->pcp.batch, pageset->pcp.count);
printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu\n"
" free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
global_page_state(NR_ACTIVE),
global_page_state(NR_INACTIVE),
global_page_state(NR_FILE_DIRTY),
Christoph Lameter
committed
global_page_state(NR_WRITEBACK),
Christoph Lameter
committed
global_page_state(NR_UNSTABLE_NFS),
global_page_state(NR_SLAB_RECLAIMABLE) +
global_page_state(NR_SLAB_UNRECLAIMABLE),
global_page_state(NR_FILE_MAPPED),
global_page_state(NR_PAGETABLE),
global_page_state(NR_BOUNCE));
if (!populated_zone(zone))
continue;
show_node(zone);
printk("%s"
" free:%lukB"
" min:%lukB"
" low:%lukB"
" high:%lukB"
" active:%lukB"
" inactive:%lukB"
" present:%lukB"
" pages_scanned:%lu"
" all_unreclaimable? %s"
"\n",
zone->name,
K(zone_page_state(zone, NR_FREE_PAGES)),
K(zone->pages_min),
K(zone->pages_low),
K(zone->pages_high),
K(zone_page_state(zone, NR_ACTIVE)),
K(zone_page_state(zone, NR_INACTIVE)),
(zone_is_all_unreclaimable(zone) ? "yes" : "no")
);
printk("lowmem_reserve[]:");
for (i = 0; i < MAX_NR_ZONES; i++)
printk(" %lu", zone->lowmem_reserve[i]);
printk("\n");
}
for_each_zone(zone) {
unsigned long nr[MAX_ORDER], flags, order, total = 0;
if (!populated_zone(zone))
continue;
show_node(zone);
printk("%s: ", zone->name);
spin_lock_irqsave(&zone->lock, flags);
for (order = 0; order < MAX_ORDER; order++) {
nr[order] = zone->free_area[order].nr_free;
total += nr[order] << order;
for (order = 0; order < MAX_ORDER; order++)
printk("%lu*%lukB ", nr[order], K(1UL) << order);
printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
{
zoneref->zone = zone;
zoneref->zone_idx = zone_idx(zone);
}
Christoph Lameter
committed
*
* Add all populated zones of a node to the zonelist.
static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
int nr_zones, enum zone_type zone_type)
Christoph Lameter
committed
struct zone *zone;
BUG_ON(zone_type >= MAX_NR_ZONES);
zone = pgdat->node_zones + zone_type;
Christoph Lameter
committed
if (populated_zone(zone)) {
zoneref_set_zone(zone,
&zonelist->_zonerefs[nr_zones++]);
check_highest_zone(zone_type);
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
/*
* zonelist_order:
* 0 = automatic detection of better ordering.
* 1 = order by ([node] distance, -zonetype)
* 2 = order by (-zonetype, [node] distance)
*
* If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
* the same zonelist. So only NUMA can configure this param.
*/
#define ZONELIST_ORDER_DEFAULT 0
#define ZONELIST_ORDER_NODE 1
#define ZONELIST_ORDER_ZONE 2
/* zonelist order in the kernel.
* set_zonelist_order() will set this to NODE or ZONE.
*/
static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
/* The value user specified ....changed by config */
static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
/* string for sysctl */
#define NUMA_ZONELIST_ORDER_LEN 16
char numa_zonelist_order[16] = "default";
/*
* interface for configure zonelist ordering.
* command line option "numa_zonelist_order"
* = "[dD]efault - default, automatic configuration.
* = "[nN]ode - order by node locality, then by zone within node
* = "[zZ]one - order by zone, then by locality within zone
*/
static int __parse_numa_zonelist_order(char *s)