前面已經分析了記憶體管理框架的構建實現過程,有部分內容未完全呈現出來,這裡主要做個補充。 如下圖,這是前面已經看到過的linux物理記憶體管理框架的層次關係。 現著重分析一下各個管理結構體的成員功能作用。 struct zone node_zones[MAX_NR_ZONES]; ——存放該pg_dat ...
前面已經分析了記憶體管理框架的構建實現過程,有部分內容未完全呈現出來,這裡主要做個補充。
如下圖,這是前面已經看到過的linux物理記憶體管理框架的層次關係。
現著重分析一下各個管理結構體的成員功能作用。
【file:/include/linux/mmzone.h】
typedef struct pglist_data {
struct zone node_zones[MAX_NR_ZONES];
struct zonelist node_zonelists[MAX_ZONELISTS];
int nr_zones;
#ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */
struct page *node_mem_map;
#ifdef CONFIG_MEMCG
struct page_cgroup *node_page_cgroup;
#endif
#endif
#ifndef CONFIG_NO_BOOTMEM
struct bootmem_data *bdata;
#endif
#ifdef CONFIG_MEMORY_HOTPLUG
/*
* Must be held any time you expect node_start_pfn, node_present_pages
* or node_spanned_pages stay constant. Holding this will also
* guarantee that any pfn_valid() stays that way.
*
* pgdat_resize_lock() and pgdat_resize_unlock() are provided to
* manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG.
*
* Nests above zone->lock and zone->span_seqlock
*/
spinlock_t node_size_lock;
#endif
unsigned long node_start_pfn;
unsigned long node_present_pages; /* total number of physical pages */
unsigned long node_spanned_pages; /* total size of physical page
range, including holes */
int node_id;
nodemask_t reclaim_nodes; /* Nodes allowed to reclaim from */
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
struct task_struct *kswapd; /* Protected by lock_memory_hotplug() */
int kswapd_max_order;
enum zone_type classzone_idx;
#ifdef CONFIG_NUMA_BALANCING
/* Lock serializing the migrate rate limiting window */
spinlock_t numabalancing_migrate_lock;
/* Rate limiting time interval */
unsigned long numabalancing_migrate_next_window;
/* Number of pages migrated during the rate limiting time interval */
unsigned long numabalancing_migrate_nr_pages;
#endif
} pg_data_t;
- struct zone node_zones[MAX_NR_ZONES];
——存放該pg_data_t裡面的zone;
- struct zonelist node_zonelists[MAX_ZONELISTS];
——其指向一個page結構的數組,數組中的每個成員為該節點中的一個物理頁面,於是整個數組就對應了該節點中所有的物理頁面;
- struct page_cgroup *node_page_cgroup;
——用於管理page_cgroup,原來的page_cgroup是page頁面管理結構的一個成員,現在移到這裡了,它將會在初始化時所有的page_cgroup都將申請下來;
- struct bootmem_data *bdata;
——該數據指向bootmem_node_data,可以通過system.map查到。原是用於bootmem記憶體分配器的信息存儲,當前改用memblock演算法,則不存在該成員;
- unsigned long node_start_pfn;
——指向當前pg_data_t結構管理的物理起始頁面;
- unsigned long node_present_pages;
——記錄物理頁面數總量,除開記憶體空洞的物理頁面數;
- unsigned long node_spanned_pages;
——最大和最小頁面號的差值,包括記憶體空洞的總的物理頁面大小;
- int node_id;
——pg_data_t對應的索引號,非NUMA架構下該值為0;
- nodemask_t reclaim_nodes;
——用於記錄可回收的記憶體管理節點node信息;
- wait_queue_head_t kswapd_wait;
——kswapd是頁面交換守護線程,該線程會阻塞在這個等待隊列,當滿足條件後,調用wake_up_interruptible()喚醒該隊列進行相關操作;
- wait_queue_head_t pfmemalloc_wait;
——用於減緩記憶體直接回收;
- struct task_struct *kswapd;
——指向kswapd守護線程的任務指針;
- int kswapd_max_order;
——用於表示kswapd守護線程每次回收的頁面個數;
- enum zone_type classzone_idx;
——該成員與kswapd有關;
【file:/include/linux/mmzone.h】
struct zone {
/* Fields commonly accessed by the page allocator */
/* zone watermarks, access with *_wmark_pages(zone) macros */
unsigned long watermark[NR_WMARK];
/*
* When free pages are below this point, additional steps are taken
* when reading the number of free pages to avoid per-cpu counter
* drift allowing watermarks to be breached
*/
unsigned long percpu_drift_mark;
/*
* We don't know if the memory that we're going to allocate will be freeable
* or/and it will be released eventually, so to avoid totally wasting several
* GB of ram we must reserve some of the lower zone memory (otherwise we risk
* to run OOM on the lower zones despite there's tons of freeable ram
* on the higher zones). This array is recalculated at runtime if the
* sysctl_lowmem_reserve_ratio sysctl changes.
*/
unsigned long lowmem_reserve[MAX_NR_ZONES];
/*
* This is a per-zone reserve of pages that should not be
* considered dirtyable memory.
*/
unsigned long dirty_balance_reserve;
#ifdef CONFIG_NUMA
int node;
/*
* zone reclaim becomes active if more unmapped pages exist.
*/
unsigned long min_unmapped_pages;
unsigned long min_slab_pages;
#endif
struct per_cpu_pageset __percpu *pageset;
/*
* free areas of different sizes
*/
spinlock_t lock;
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
/* Set to true when the PG_migrate_skip bits should be cleared */
bool compact_blockskip_flush;
/* pfns where compaction scanners should start */
unsigned long compact_cached_free_pfn;
unsigned long compact_cached_migrate_pfn;
#endif
#ifdef CONFIG_MEMORY_HOTPLUG
/* see spanned/present_pages for more description */
seqlock_t span_seqlock;
#endif
struct free_area free_area[MAX_ORDER];
#ifndef CONFIG_SPARSEMEM
/*
* Flags for a pageblock_nr_pages block. See pageblock-flags.h.
* In SPARSEMEM, this map is stored in struct mem_section
*/
unsigned long *pageblock_flags;
#endif /* CONFIG_SPARSEMEM */
#ifdef CONFIG_COMPACTION
/*
* On compaction failure, 1<<compact_defer_shift compactions
* are skipped before trying again. The number attempted since
* last failure is tracked with compact_considered.
*/
unsigned int compact_considered;
unsigned int compact_defer_shift;
int compact_order_failed;
#endif
ZONE_PADDING(_pad1_)
/* Fields commonly accessed by the page reclaim scanner */
spinlock_t lru_lock;
struct lruvec lruvec;
unsigned long pages_scanned; /* since last reclaim */
unsigned long flags; /* zone flags, see below */
/* Zone statistics */
atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
/*
* The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
* this zone's LRU. Maintained by the pageout code.
*/
unsigned int inactive_ratio;
ZONE_PADDING(_pad2_)
/* Rarely used or read-mostly fields */
/*
* wait_table -- the array holding the hash table
* wait_table_hash_nr_entries -- the size of the hash table array
* wait_table_bits -- wait_table_size == (1 << wait_table_bits)
*
* The purpose of all these is to keep track of the people
* waiting for a page to become available and make them
* runnable again when possible. The trouble is that this
* consumes a lot of space, especially when so few things
* wait on pages at a given time. So instead of using
* per-page waitqueues, we use a waitqueue hash table.
*
* The bucket discipline is to sleep on the same queue when
* colliding and wake all in that wait queue when removing.
* When something wakes, it must check to be sure its page is
* truly available, a la thundering herd. The cost of a
* collision is great, but given the expected load of the
* table, they should be so rare as to be outweighed by the
* benefits from the saved space.
*
* __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
* primary users of these fields, and in mm/page_alloc.c
* free_area_init_core() performs the initialization of them.
*/
wait_queue_head_t * wait_table;
unsigned long wait_table_hash_nr_entries;
unsigned long wait_table_bits;
/*
* Discontig memory support fields.
*/
struct pglist_data *zone_pgdat;
/* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
unsigned long zone_start_pfn;
/*
* spanned_pages is the total pages spanned by the zone, including
* holes, which is calculated as:
* spanned_pages = zone_end_pfn - zone_start_pfn;
*
* present_pages is physical pages existing within the zone, which
* is calculated as:
* present_pages = spanned_pages - absent_pages(pages in holes);
*
* managed_pages is present pages managed by the buddy system, which
* is calculated as (reserved_pages includes pages allocated by the
* bootmem allocator):
* managed_pages = present_pages - reserved_pages;
*
* So present_pages may be used by memory hotplug or memory power
* management logic to figure out unmanaged pages by checking
* (present_pages - managed_pages). And managed_pages should be used
* by page allocator and vm scanner to calculate all kinds of watermarks
* and thresholds.
*
* Locking rules:
*
* zone_start_pfn and spanned_pages are protected by span_seqlock.
* It is a seqlock because it has to be read outside of zone->lock,
* and it is done in the main allocator path. But, it is written
* quite infrequently.
*
* The span_seq lock is declared along with zone->lock because it is
* frequently read in proximity to zone->lock. It's good to
* give them a chance of being in the same cacheline.
*
* Write access to present_pages at runtime should be protected by
* lock_memory_hotplug()/unlock_memory_hotplug(). Any reader who can't
* tolerant drift of present_pages should hold memory hotplug lock to
* get a stable value.
*
* Read access to managed_pages should be safe because it's unsigned
* long. Write access to zone->managed_pages and totalram_pages are
* protected by managed_page_count_lock at runtime. Idealy only
* adjust_managed_page_count() should be used instead of directly
* touching zone->managed_pages and totalram_pages.
*/
unsigned long spanned_pages;
unsigned long present_pages;
unsigned long managed_pages;
/*
* Number of MIGRATE_RESEVE page block. To maintain for just
* optimization. Protected by zone->lock.
*/
int nr_migrate_reserve_block;
/*
* rarely used fields:
*/
const char *name;
} ____cacheline_internodealigned_in_smp;
- unsigned long watermark[NR_WMARK];
——該數組有三個值WMARK_MIN、WMARK_LOW、WMARK_HIGH,如命名所標識,min最小,low居中,high最大。記憶體分配過程中,當空閑頁面達到low時,記憶體分配器會喚醒kswapd守護進程來回收物理頁面;當空閑頁面達到min時,記憶體分配器就會喚醒kswapd以同步方式回收;如果kswapd被喚醒後,空閑頁面達到high時,則會使kswapd再次休眠;
- unsigned long percpu_drift_mark;
——當空閑頁面低於該值,將會引發附加操作的執行,用於避免前面的watermark被衝破;
- unsigned long lowmem_reserve[MAX_NR_ZONES];
——記錄每個管理區中必須保留的物理頁面數,以用於緊急狀況下的記憶體分配;
- unsigned long dirty_balance_reserve;
——用於表示不會被記憶體分配器分配出去的空閑頁面部分的近似值;
- struct per_cpu_pageset __percpu *pageset;
——該數組裡面的成員pcp用於實現冷熱頁面的管理;
- spinlock_t lock;
——spinlock鎖,用於解決該管理區的併發問題;
- struct free_area free_area[MAX_ORDER];
——主要用於Buddy記憶體管理演算法(伙伴演算法);
- unsigned long *pageblock_flags;
——與伙伴演算法的碎片遷移演算法有關;
- spinlock_t lru_lock;
——用於保護lruvec結構數據;
- struct lruvec lruvec;
——lruvec該數組裡面有一個lists是用於lru管理的鏈表,另外有一個reclaim_stat用於頁面回收的狀態標示;
- unsigned long pages_scanned;
——用於記錄上次物理頁面回收時,掃描過的頁描述符總數;
- unsigned long flags;
——用於表示當前記憶體管理區的狀態;
- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
——用於統計該記憶體管理區中各項狀態的數值;
- unsigned int inactive_ratio;
——不活躍的頁面比例;
wait_queue_head_t *wait_table;
unsigned long wait_table_hash_nr_entries;
unsigned long wait_table_bits;
struct pglist_data *zone_pgdat;
——指向該記憶體管理區的pg_data_list;
- unsigned long zone_start_pfn;
——記錄當前記憶體管理區中最小的物理頁面號;
- unsigned long spanned_pages;
——記錄記憶體管理區的總頁面數,包括記憶體空洞的頁面數,實則上是管理區末尾頁面號和起始頁面號的差值;
- unsigned long present_pages;
——除去記憶體空洞後的記憶體管理區實際有效的總頁面數;
- unsigned long managed_pages;
——用於記錄被記憶體管理演算法管理的物理頁面數,這是除去了在初始化階段被申請的頁面;
- int nr_migrate_reserve_block;
——用於優化的,記錄記憶體遷移保留的頁面數;
- const char *name;
——用於記錄該管理區的名字;
【file:/include/linux/mmzone.h】
/*
* Each physical page in the system has a struct page associated with
* it to keep track of whatever it is we are using the page for at the
* moment. Note that we have no way to track which tasks are using
* a page, though if it is a pagecache page, rmap structures can tell us
* who is mapping it.
*
* The objects in struct page are organized in double word blocks in
* order to allows us to use atomic double word operations on portions
* of struct page. That is currently only used by slub but the arrangement
* allows the use of atomic double word operations on the flags/mapping
* and lru list pointers also.
*/
struct page {
/* First double word block */
unsigned long flags; /* Atomic flags, some possibly
* updated asynchronously */
union {
struct address_space *mapping; /* If low bit clear, points to
* inode address_space, or NULL.
* If page mapped as anonymous
* memory, low bit is set, and
* it points to anon_vma object:
* see PAGE_MAPPING_ANON below.
*/
void *s_mem; /* slab first object */
};
/* Second double word */
struct {
union {
pgoff_t index; /* Our offset within mapping. */
void *freelist; /* sl[aou]b first free object */
bool pfmemalloc; /* If set by the page allocator,
* ALLOC_NO_WATERMARKS was set
* and the low watermark was not
* met implying that the system
* is under some pressure. The
* caller should try ensure
* this page is only used to
* free other pages.
*/
};
union {
#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
/* Used for cmpxchg_double in slub */
unsigned long counters;
#else
/*
* Keep _count separate from slub cmpxchg_double data.
* As the rest of the double word is protected by
* slab_lock but _count is not.
*/
unsigned counters;
#endif
struct {
union {
/*
* Count of ptes mapped in
* mms, to show when page is
* mapped & limit reverse map
* searches.
*
* Used also for tail pages
* refcounting instead of
* _count. Tail pages cannot
* be mapped and keeping the
* tail page _count zero at
* all times guarantees
* get_page_unless_zero() will
* never succeed on tail
* pages.
*/
atomic_t _mapcount;
struct { /* SLUB */
unsigned inuse:16;
unsigned objects:15;
unsigned frozen:1;
};
int units; /* SLOB */
};
atomic_t _count; /* Usage count, see below. */
};
unsigned int active; /* SLAB */
};
};
/* Third double word block */
union {
struct list_head lru; /* Pageout list, eg. active_list
* protected by zone->lru_lock !
*/
struct { /* slub per cpu partial pages */
struct page *next; /* Next partial slab */
#ifdef CONFIG_64BIT
int pages; /* Nr of partial slabs left */
int pobjects; /* Approximate # of objects */
#else
short int pages;
short int pobjects;
#endif
};
struct list_head list; /* slobs list of pages */
struct slab *slab_page; /* slab fields */
struct rcu_head rcu_head; /* Used by SLAB
* when destroying via RCU
*/
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
pgtable_t pmd_huge_pte; /* protected by page->ptl */
#endif
};
/* Remainder is not double word aligned */
union {
unsigned long private; /* Mapping-private opaque data:
* usually used for buffer_heads
* if PagePrivate set; used for
* swp_entry_t if PageSwapCache;
* indicates order in the buddy
* system if PG_buddy is set.
*/
#if USE_SPLIT_PTE_PTLOCKS
#if ALLOC_SPLIT_PTLOCKS
spinlock_t *ptl;
#else
spinlock_t ptl;
#endif
#endif
struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */
struct page *first_page; /* Compound tail pages */
};
/*
* On machines where all RAM is mapped into kernel address space,
* we can simply calculate the virtual address. On machines with
* highmem some memory is mapped into kernel virtual memory
* dynamically, so we need a place to store that address.
* Note that this field could be 16 bits on x86 ... ;)
*
* Architectures with slow multiplication can define
* WANT_PAGE_VIRTUAL in asm/page.h
*/
#if defined(WANT_PAGE_VIRTUAL)
void *virtual; /* Kernel virtual address (NULL if
not kmapped, ie. highmem) */
#endif /* WANT_PAGE_VIRTUAL */
#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
unsigned long debug_flags; /* Use atomic bitops on this */
#endif
#ifdef CONFIG_KMEMCHECK
/*
* kmemcheck wants to track the status of each byte in a page; this
* is a pointer to such a status block. NULL if not tracked.
*/
void *shadow;
#endif
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
int _last_cpupid;
#endif
}
(該結構很多union結構,主要是用於各種演算法不同數據的空間復用,暫時記錄部分常見的數據成員)
- unsigned long flags;
——用於記錄頁框的類型;
- struct address_space *mapping;
——用於區分該頁是映射頁框還是匿名頁框;
- atomic_t _mapcount;
——記錄了系統中頁表有多少項指向該頁;
- atomic_t _count;
——當前系統對該頁面的引用次數;
- struct list_head lru;
——當頁框處於分配狀態時,該成員用於zone的lruvec裡面的list,當頁框未被分配時則用於伙伴演算法;
- unsigned long private;
——指向“私有”數據的指針。根據頁的用途,可以用不同的方式使用該指針,通常用於與數據緩衝區關聯起來;
- void *virtual;
——用於高端記憶體區域的頁,即用於無法直接映射的頁,該成員用於存儲該頁的虛擬地址;