一文搞懂Linux内核内存常见分配方式


土土土
土土土 2023-11-24 16:48:52 50467
分类专栏: 资讯

alloc是C库实现的函数,C库维护了一个缓存,当内存够用时,malloc直接从C库缓存分配,只有当C库缓存不够用;通过系统调用brk,向内核申请,从堆空间申请一个vma。

malloc实现流程图:

图片

1.1_do_sys_brk函数

经过平台相关实现,malloc最终会调用SYSCALL_DEFINE1宏,扩展为__do_sys_brk函数:

SYSCALL_DEFINE1(brk, unsigned long, brk)
{
unsigned long retval;
unsigned long newbrk, oldbrk, origbrk;
struct mm_struct *mm = current->mm;
struct vm_area_struct *next;
unsigned long min_brk;
bool populate;
bool downgraded = false;
LIST_HEAD(uf);

if (down_write_killable(&mm->mmap_sem)) ///申请写类型读写信号量
return -EINTR;

origbrk = mm->brk; ///brk记录动态分配区的当前底部

#ifdef CONFIG_COMPAT_BRK
/*
* CONFIG_COMPAT_BRK can still be overridden by setting
* randomize_va_space to 2, which will still cause mm->start_brk
* to be arbitrarily shifted
*/
if (current->brk_randomized)
min_brk = mm->start_brk;
else
min_brk = mm->end_data;
#else
min_brk = mm->start_brk;
#endif
if (brk < min_brk)
goto out;

/*
* Check against rlimit here. If this check is done later after the test
* of oldbrk with newbrk then it can escape the test and let the data
* segment grow beyond its set limit the in case where the limit is
* not page aligned -Ram Gupta
*/
if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
mm->end_data, mm->start_data))
goto out;

newbrk = PAGE_ALIGN(brk);
oldbrk = PAGE_ALIGN(mm->brk);
if (oldbrk == newbrk) {
mm->brk = brk;
goto success;
}

/*
* Always allow shrinking brk.
* __do_munmap() may downgrade mmap_sem to read.
*/
if (brk <= mm->brk) { ///请求释放空间
int ret;

/*
* mm->brk must to be protected by write mmap_sem so update it
* before downgrading mmap_sem. When __do_munmap() fails,
* mm->brk will be restored from origbrk.
*/
mm->brk = brk;
ret = __do_munmap(mm, newbrk, oldbrk-newbrk, &uf, true);
if (ret < 0) {
mm->brk = origbrk;
goto out;
} else if (ret == 1) {
downgraded = true;
}
goto success;
}

/* Check against existing mmap mappings. */
next = find_vma(mm, oldbrk);
if (next && newbrk + PAGE_SIZE > vm_start_gap(next)) ///发现有重叠,不需要寻找
goto out;

/* Ok, looks good - let it rip. */
if (do_brk_flags(oldbrk, newbrk-oldbrk, 0, &uf) < 0) ///无重叠,新分配一个vma
goto out;
mm->brk = brk; ///更新brk地址

success:
populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0;
if (downgraded)
up_read(&mm->mmap_sem);
else
up_write(&mm->mmap_sem);
userfaultfd_unmap_complete(mm, &uf);
if (populate) ///调用mlockall()系统调用,mm_populate会立刻分配物理内存
mm_populate(oldbrk, newbrk - oldbrk);
return brk;

out:
retval = origbrk;
up_write(&mm->mmap_sem);
return retval;
}

总结下_do_sys_brk()功能:

  • (1)从旧的brk边界去查询,是否有可用vma,若发现有重叠,直接使用;

  • (2)若无发现重叠,新分配一个vma;

  • (3)应用程序若调用mlockall(),会锁住进程所有虚拟地址空间,防止内存被交换出去,且立刻分配物理内存;否则,物理页面会等到使用时,触发缺页异常分配;

1.2do_brk_flags函数

函数实现:

  • (1)寻找一个可使用的线性地址;

  • (2)查找最适合插入红黑树的节点;

  • (3)寻到的线性地址是否可以合并现有vma,所不能,新建一个vma;

  • (4)将新建vma插入mmap链表和红黑树中

/*
* this is really a simplified "do_mmap". it only handles
* anonymous maps. eventually we may be able to do some
* brk-specific accounting here.
*/
static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long flags, struct list_head *uf)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev;
struct rb_node **rb_link, *rb_parent;
pgoff_t pgoff = addr >> PAGE_SHIFT;
int error;
unsigned long mapped_addr;

/* Until we need other flags, refuse anything except VM_EXEC. */
if ((flags & (~VM_EXEC)) != 0)
return -EINVAL;
flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; ///默认属性,可读写

mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); ///返回未使用过的,未映射的线性地址区间的,起始地址
if (IS_ERR_VALUE(mapped_addr))
return mapped_addr;

error = mlock_future_check(mm, mm->def_flags, len);
if (error)
return error;

/* Clear old maps, set up prev, rb_link, rb_parent, and uf */
if (munmap_vma_range(mm, addr, len, &prev, &rb_link, &rb_parent, uf)) ///寻找适合插入的红黑树节点
return -ENOMEM;

/* Check against address space limits *after* clearing old maps... */
if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
return -ENOMEM;

if (mm->map_count > sysctl_max_map_count)
return -ENOMEM;

if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
return -ENOMEM;

/* Can we just expand an old private anonymous mapping? */ ///检查是否能合并addr到附近的vma,若不能,只能新建一个vma
vma = vma_merge(mm, prev, addr, addr + len, flags,
NULL, NULL, pgoff, NULL, NULL_VM_UFFD_CTX);
if (vma)
goto out;

/*
* create a vma struct for an anonymous mapping
*/
vma = vm_area_alloc(mm);
if (!vma) {
vm_unacct_memory(len >> PAGE_SHIFT);
return -ENOMEM;
}

vma_set_anonymous(vma);
vma->vm_start = addr;
vma->vm_end = addr + len;
vma->vm_pgoff = pgoff;
vma->vm_flags = flags;
vma->vm_page_prot = vm_get_page_prot(flags);
vma_link(mm, vma, prev, rb_link, rb_parent); ///新vma添加到mmap链表和红黑树
out:
perf_event_mmap(vma);
mm->total_vm += len >> PAGE_SHIFT;
mm->data_vm += len >> PAGE_SHIFT;
if (flags & VM_LOCKED)
mm->locked_vm += (len >> PAGE_SHIFT);
vma->vm_flags |= VM_SOFTDIRTY;
return 0;
}

mm_populate()函数

依次调用:

mm_populate()
->__mm_populate()
->populate_vma_page_range()
->__get_user_pages()

当设置VM_LOCKED标志时,表示要马上申请物理页面,并与vma建立映射;

否则,这里不操作,直到访问该vma时,触发缺页异常,再分配物理页面,并建立映射;

1.3get_user_pages()函数

static long __get_user_pages(struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas, int *locked)
{
long ret = 0, i = 0;
struct vm_area_struct *vma = NULL;
struct follow_page_context ctx = { NULL };

if (!nr_pages)
return 0;

start = untagged_addr(start);

VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));

/*
* If FOLL_FORCE is set then do not force a full fault as the hinting
* fault information is unrelated to the reference behaviour of a task
* using the address space
*/
if (!(gup_flags & FOLL_FORCE))
gup_flags |= FOLL_NUMA;

do { ///依次处理每个页面
struct page *page;
unsigned int foll_flags = gup_flags;
unsigned int page_increm;

/* first iteration or cross vma bound */
if (!vma || start >= vma->vm_end) {
vma = find_extend_vma(mm, start); ///检查是否可以扩增vma
if (!vma && in_gate_area(mm, start)) {
ret = get_gate_page(mm, start & PAGE_MASK,
gup_flags, &vma,
pages ? &pages[i] : NULL);
if (ret)
goto out;
ctx.page_mask = 0;
goto next_page;
}

if (!vma) {
ret = -EFAULT;
goto out;
}
ret = check_vma_flags(vma, gup_flags);
if (ret)
goto out;

if (is_vm_hugetlb_page(vma)) { ///支持巨页
i = follow_hugetlb_page(mm, vma, pages, vmas,
&start, &nr_pages, i,
gup_flags, locked);
if (locked && *locked == 0) {
/*
* We've got a VM_FAULT_RETRY
* and we've lost mmap_lock.
* We must stop here.
*/
BUG_ON(gup_flags & FOLL_NOWAIT);
BUG_ON(ret != 0);
goto out;
}
continue;
}
}
retry:
/*
* If we have a pending SIGKILL, don't keep faulting pages and
* potentially allocating memory.
*/
if (fatal_signal_pending(current)) { ///如果当前进程收到SIGKILL信号,直接退出
ret = -EINTR;
goto out;
}
cond_resched(); //判断是否需要调度,内核中常用该函数,优化系统延迟

page = follow_page_mask(vma, start, foll_flags, &ctx); ///查看VMA的虚拟页面是否已经分配物理内存,返回已经映射的页面的page
if (!page) {
ret = faultin_page(vma, start, &foll_flags, locked); ///若无映射,主动触发虚拟页面到物理页面的映射
switch (ret) {
case 0:
goto retry;
case -EBUSY:
ret = 0;
fallthrough;
case -EFAULT:
case -ENOMEM:
case -EHWPOISON:
goto out;
case -ENOENT:
goto next_page;
}
BUG();
} else if (PTR_ERR(page) == -EEXIST) {
/*
* Proper page table entry exists, but no corresponding
* struct page.
*/
goto next_page;
} else if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto out;
}
if (pages) {
pages[i] = page;
flush_anon_page(vma, page, start); ///分配完物理页面,刷新缓存
flush_dcache_page(page);
ctx.page_mask = 0;
}
next_page:
if (vmas) {
vmas[i] = vma;
ctx.page_mask = 0;
}
page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
if (page_increm > nr_pages)
page_increm = nr_pages;
i += page_increm;
start += page_increm * PAGE_SIZE;
nr_pages -= page_increm;
} while (nr_pages);
out:
if (ctx.pgmap)
put_dev_pagemap(ctx.pgmap);
return i ? i : ret;
}

follow_page_mask函数返回已经映射的页面的page,最终会调用follow_page_pte函数,其实现如下:

1.4follow_page_pte函数

static struct page *follow_page_pte(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd, unsigned int flags,
struct dev_pagemap **pgmap)
{
struct mm_struct *mm = vma->vm_mm;
struct page *page;
spinlock_t *ptl;
pte_t *ptep, pte;
int ret;

/* FOLL_GET and FOLL_PIN are mutually exclusive. */
if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
(FOLL_PIN | FOLL_GET)))
return ERR_PTR(-EINVAL);
retry:
if (unlikely(pmd_bad(*pmd)))
return no_page_table(vma, flags);

ptep = pte_offset_map_lock(mm, pmd, address, &ptl); ///获得pte和一个锁
pte = *ptep;
if (!pte_present(pte)) { ///处理页面不在内存中,作以下处理
swp_entry_t entry;
/*
* KSM's break_ksm() relies upon recognizing a ksm page
* even while it is being migrated, so for that case we
* need migration_entry_wait().
*/
if (likely(!(flags & FOLL_MIGRATION)))
goto no_page;
if (pte_none(pte))
goto no_page;
entry = pte_to_swp_entry(pte);
if (!is_migration_entry(entry))
goto no_page;
pte_unmap_unlock(ptep, ptl);
migration_entry_wait(mm, pmd, address); ///等待页面合并完成再尝试
goto retry;
}
if ((flags & FOLL_NUMA) && pte_protnone(pte))
goto no_page;
if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
pte_unmap_unlock(ptep, ptl);
return NULL;
}

page = vm_normal_page(vma, address, pte); ///根据pte,返回物理页面page(只返回普通页面,特殊页面不参与内存管理)
if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) { ///处理设备映射文件
/*
* Only return device mapping pages in the FOLL_GET or FOLL_PIN
* case since they are only valid while holding the pgmap
* reference.
*/
*pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap);
if (*pgmap)
page = pte_page(pte);
else
goto no_page;
} else if (unlikely(!page)) { ///处理vm_normal_page()没返回有效页面情况
if (flags & FOLL_DUMP) {
/* Avoid special (like zero) pages in core dumps */
page = ERR_PTR(-EFAULT);
goto out;
}

if (is_zero_pfn(pte_pfn(pte))) { ///系统零页,不会返回错误
page = pte_page(pte);
} else {
ret = follow_pfn_pte(vma, address, ptep, flags);
page = ERR_PTR(ret);
goto out;
}
}

/* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */
if (unlikely(!try_grab_page(page, flags))) {
page = ERR_PTR(-ENOMEM);
goto out;
}
/*
* We need to make the page accessible if and only if we are going
* to access its content (the FOLL_PIN case). Please see
* Documentation/core-api/pin_user_pages.rst for details.
*/
if (flags & FOLL_PIN) {
ret = arch_make_page_accessible(page);
if (ret) {
unpin_user_page(page);
page = ERR_PTR(ret);
goto out;
}
}
if (flags & FOLL_TOUCH) { ///FOLL_TOUCH, 标记页面可访问
if ((flags & FOLL_WRITE) &&
!pte_dirty(pte) && !PageDirty(page))
set_page_dirty(page);
/*
* pte_mkyoung() would be more correct here, but atomic care
* is needed to avoid losing the dirty bit: it is easier to use
* mark_page_accessed().
*/
mark_page_accessed(page);
}
if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
/* Do not mlock pte-mapped THP */
if (PageTransCompound(page))
goto out;

/*
* The preliminary mapping check is mainly to avoid the
* pointless overhead of lock_page on the ZERO_PAGE
* which might bounce very badly if there is contention.
*
* If the page is already locked, we don't need to
* handle it now - vmscan will handle it later if and
* when it attempts to reclaim the page.
*/
if (page->mapping && trylock_page(page)) {
lru_add_drain(); /* push cached pages to LRU */
/*
* Because we lock page here, and migration is
* blocked by the pte's page reference, and we
* know the page is still mapped, we don't even
* need to check for file-cache page truncation.
*/
mlock_vma_page(page);
unlock_page(page);
}
}
out:
pte_unmap_unlock(ptep, ptl);
return page;
no_page:
pte_unmap_unlock(ptep, ptl);
if (!pte_none(pte))
return NULL;
return no_page_table(vma, flags);
}
  • (1)malloc函数,从C库缓存分配内存,其分配或释放内存,未必马上会执行;

  • (2)malloc实际分配内存动作,要么主动设置mlockall(),人为触发缺页异常,分配物理页面;或者在访问内存时触发缺页异常,分配物理页面;

  • (3)malloc分配虚拟内存,有三种情况:

a.malloc()分配内存后,直接读,linux内核进入缺页异常,调用do_anonymous_page函数使用零页映射,此时PTE属性只读;

b.malloc()分配内存后,先读后写,linux内核第一次触发缺页异常,映射零页;第二次触发异常,触发写时复制;

c.malloc()分配内存后, 直接写,linux内核进入匿名页面的缺页异常,调用alloc_zeroed_user_highpage_movable分配一个新页面,这个PTE是可写的;

二、kmalloc

一般来说内核程序中对小于一页的小块内存的请求会通过slab分配器提供的接口kmalloc来完成(虽然它可分配32到131072字节的内存)。从内核内存分配角度讲kmalloc可被看成是get_free_page(s)的一个有效补充,内存分配粒度更灵活了。

kmalloc()函数类似与我们常见的malloc()函数,前者用于内核态的内存分配,后者用于用户态。

kmalloc()函数在物理内存中分配一块连续的存储空间,且和malloc()函数一样,不会清除里面的原始数据,如果内存充足,它的分配速度很快,其原型如下:

static inline void *kmalloc(size_t size, gfp_t flags);  /*返回的是虚拟地址*/

size:待分配的内存大小。由于Linux内存管理机制的原因,内存只能按照页面大小(一般32位机为4KB,64位机为8KB)进行分配,这样就导致了当我们仅需要几个字节内存时,系统仍会返回一个页面的内存,显然这是极度浪费的。所以,不同于malloc的是,kmalloc的处理方式是:内核先为其分配一系列不同大小(32B、64B、128B、… 、128KB)的内存池,当需要分配内存时,系统会分配大于等于所需内存的最小一个内存池给它。即kmalloc分配的内存,最小为32字节,最大为128KB。如果超过128KB,需要采样其它内存分配函数,例如vmalloc()。

flag:该参数用于控制函数的行为,最常用的是GFP_KERNEL,表示当当前没有足够内存分配时,进程进入睡眠,待系统将缓冲区中的内容SWAP到硬盘中后,获得足够内存后再唤醒进程,为其分配。

使用 GFP_ KERNEL 标志申请内存时,若暂时不能满足,则进程会睡眠等待页,即会引起阻塞,因此不能在中断上下文或持有自旋锁的时候使用GFP_KERNE 申请内存。所以,在中断处理函数、tasklet 和内核定时器等非进程上下文中不能阻塞,此时驱动应当使用 GFP_ATOMIC 标志来申请内存。当使用 GFP_ATOMIC 标志申请内存时,若不存在空闲页,则不等待,直接返回。

kmalloc函数

static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
if (__builtin_constant_p(size)) {
#ifndef CONFIG_SLOB
unsigned int index;
#endif
if (size > KMALLOC_MAX_CACHE_SIZE)
return kmalloc_large(size, flags);
#ifndef CONFIG_SLOB
index = kmalloc_index(size); ///查找使用的哪个slab缓冲区

if (!index)
return ZERO_SIZE_PTR;

return kmem_cache_alloc_trace( ///从slab分配内存
kmalloc_caches[kmalloc_type(flags)][index],
flags, size);
#endif
}
return __kmalloc(size, flags);
}

kmem_cache_alloc_trace分配函数

void *
kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
{
void *ret;

ret = slab_alloc(cachep, flags, size, _RET_IP_); ///分配slab缓存

ret = kasan_kmalloc(cachep, ret, size, flags);
trace_kmalloc(_RET_IP_, ret,
size, cachep->size, flags);
return ret;
}

可见,kmalloc()基于slab分配器实现,因此分配的内存,物理上都是连续的。

三、vmalloc

vmalloc()一般用在为只存在于软件中(没有对应的硬件意义)的较大的顺序缓冲区分配内存,当内存没有足够大的连续物理空间可以分配时,可以用该函数来分配虚拟地址连续但物理地址不连续的内存。由于需要建立新的页表,所以它的开销要远远大于kmalloc及后面将要讲到的__get_free_pages()函数。且vmalloc()不能用在原子上下文中,因为它的内部实现使用了标志为 GFP_KERNEL 的kmalloc(),其函数原型如下:

void *vmalloc(unsigned long size);
void vfree(const void *addr);

使用 vmalloc 函数的一个例子函数是create_module()系统调用,它利用 vmalloc()函数来获取被创建模块需要的内存空间。

内存分配是一项要求严格的任务,无论什么时候,都应该对返回值进行检测,在驱动编程中可以使用copy_from_user()对内存进行使用。下面举一个使用vmalloc函数的示例:

static int xxx(...)
{
...
cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
if(!cpuid_entries)
goto out;
if(copy_from_user(cpuid_entries, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry)))
goto out_free;
for(i=0; i<cpuid->nent; i++){
vcpuid->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
...
vcpuid->arch.cpuid_entries[i].index = 0;
}
...
out_free:
vfree(cpuid_entries);
out:
return r;
}

核心函数__vmalloc_node_range

static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
pgprot_t prot, unsigned int page_shift,
int node)
{
const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
unsigned long addr = (unsigned long)area->addr;
unsigned long size = get_vm_area_size(area); ///计算vm_struct包含多少个页面
unsigned long array_size;
unsigned int nr_small_pages = size >> PAGE_SHIFT;
unsigned int page_order;
struct page **pages;
unsigned int i;

array_size = (unsigned long)nr_small_pages * sizeof(struct page *);
gfp_mask |= __GFP_NOWARN;
if (!(gfp_mask & (GFP_DMA | GFP_DMA32)))
gfp_mask |= __GFP_HIGHMEM;

/* Please note that the recursion is strictly bounded. */
if (array_size > PAGE_SIZE) {
pages = __vmalloc_node(array_size, 1, nested_gfp, node,
area->caller);
} else {
pages = kmalloc_node(array_size, nested_gfp, node);
}

if (!pages) {
free_vm_area(area);
warn_alloc(gfp_mask, NULL,
"vmalloc size %lu allocation failure: "
"page array size %lu allocation failed",
nr_small_pages * PAGE_SIZE, array_size);
return NULL;
}

area->pages = pages; ///保存已分配页面的page数据结构的指针
area->nr_pages = nr_small_pages;
set_vm_area_page_order(area, page_shift - PAGE_SHIFT);

page_order = vm_area_page_order(area);

/*
* Careful, we allocate and map page_order pages, but tracking is done
* per PAGE_SIZE page so as to keep the vm_struct APIs independent of
* the physical/mapped size.
*/
for (i = 0; i < area->nr_pages; i += 1U << page_order) {
struct page *page;
int p;

/* Compound pages required for remap_vmalloc_page */
page = alloc_pages_node(node, gfp_mask | __GFP_COMP, page_order); ///分配物理页面
if (unlikely(!page)) {
/* Successfully allocated i pages, free them in __vfree() */
area->nr_pages = i;
atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
warn_alloc(gfp_mask, NULL,
"vmalloc size %lu allocation failure: "
"page order %u allocation failed",
area->nr_pages * PAGE_SIZE, page_order);
goto fail;
}

for (p = 0; p < (1U << page_order); p++)
area->pages[i + p] = page + p;

if (gfpflags_allow_blocking(gfp_mask))
cond_resched();
}
atomic_long_add(area->nr_pages, &nr_vmalloc_pages);

if (vmap_pages_range(addr, addr + size, prot, pages, page_shift) < 0) { ///建立物理页面到vma的映射
warn_alloc(gfp_mask, NULL,
"vmalloc size %lu allocation failure: "
"failed to map pages",
area->nr_pages * PAGE_SIZE);
goto fail;
}

return area->addr;

fail:
__vfree(area->addr);
return NULL;
}

可见,vmalloc是临时在vmalloc内存区申请vma,并且分配物理页面,建立映射;直接分配物理页面,至少一个页4K,因此vmalloc适合用于分配较大内存,并且物理内存不一定连续;

四、mmap函数

mmap一般用于用户程序分配内存,读写大文件,链接动态库,多进程内存共享等;

实现过程流程图:

图片

mmap根据文件关联性和映射区域是否共享等属性,其映射分为4类:

1.私有匿名映射

fd=-1,且flags=MAP_ANONYMOUS|MAP_PRIVATE,创建的mmap映射是私有匿名映射;

用途是在glibc分配大内存时,如果需分配内存大于MMAP_THREASHOLD(128KB),glibc默认用mmap代替brk分配内存;

2.共享匿名映射

fd=-1,且flags=MAP_ANONYMOUS|MAP_SHARED;

常用于父子进程的通信,共享一块内存区域;

do_mmap_pgoff()->mmap_region(),最终调用shmem_zero_setup打开/dev/zero设备文件;

另外直接打开/dev/zero设备文件,然后使用这个句柄创建mmap,也是最终调用shmem模块创建共享匿名映射;

3.私有文件映射

flags=MAP_PRIVATE;

常用场景是,加载动态共享库;

4.共享文件映射

flags=MAP_SHARED;有两个应用场景;

(1)读写文件:内核的会写机制会将内存数据同步到磁盘;

(2)进程间通信:多个独立进程,打开同一个文件,互相都可以观察到,可是实现多进程通信;

核心函数如下:

unsigned long mmap_region(struct file *file, unsigned long addr,
unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
struct list_head *uf)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev, *merge;
int error;
struct rb_node **rb_link, *rb_parent;
unsigned long charged = 0;

/* Check against address space limit. */
if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
unsigned long nr_pages;

/*
* MAP_FIXED may remove pages of mappings that intersects with
* requested mapping. Account for the pages it would unmap.
*/
nr_pages = count_vma_pages_range(mm, addr, addr + len);

if (!may_expand_vm(mm, vm_flags,
(len >> PAGE_SHIFT) - nr_pages))
return -ENOMEM;
}

/* Clear old maps, set up prev, rb_link, rb_parent, and uf */
if (munmap_vma_range(mm, addr, len, &prev, &rb_link, &rb_parent, uf))
return -ENOMEM;
/*
* Private writable mapping: check memory availability
*/
if (accountable_mapping(file, vm_flags)) {
charged = len >> PAGE_SHIFT;
if (security_vm_enough_memory_mm(mm, charged))
return -ENOMEM;
vm_flags |= VM_ACCOUNT;
}

/*
* Can we just expand an old mapping?
*/
vma = vma_merge(mm, prev, addr, addr + len, vm_flags, ///尝试合并vma
NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX);
if (vma)
goto out;

/*
* Determine the object being mapped and call the appropriate
* specific mapper. the address has already been validated, but
* not unmapped, but the maps are removed from the list.
*/
vma = vm_area_alloc(mm); ///分配一个新vma
if (!vma) {
error = -ENOMEM;
goto unacct_error;
}

vma->vm_start = addr;
vma->vm_end = addr + len;
vma->vm_flags = vm_flags;
vma->vm_page_prot = vm_get_page_prot(vm_flags);
vma->vm_pgoff = pgoff;

if (file) { ///文件映射
if (vm_flags & VM_DENYWRITE) {
error = deny_write_access(file);
if (error)
goto free_vma;
}
if (vm_flags & VM_SHARED) {
error = mapping_map_writable(file->f_mapping);
if (error)
goto allow_write_and_free_vma;
}

/* ->mmap() can change vma->vm_file, but must guarantee that
* vma_link() below can deny write-access if VM_DENYWRITE is set
* and map writably if VM_SHARED is set. This usually means the
* new file must not have been exposed to user-space, yet.
*/
vma->vm_file = get_file(file);
error = call_mmap(file, vma);
if (error)
goto unmap_and_free_vma;

/* Can addr have changed??
*
* Answer: Yes, several device drivers can do it in their
* f_op->mmap method. -DaveM
* Bug: If addr is changed, prev, rb_link, rb_parent should
* be updated for vma_link()
*/
WARN_ON_ONCE(addr != vma->vm_start);

addr = vma->vm_start;

/* If vm_flags changed after call_mmap(), we should try merge vma again
* as we may succeed this time.
*/
if (unlikely(vm_flags != vma->vm_flags && prev)) {
merge = vma_merge(mm, prev, vma->vm_start, vma->vm_end, vma->vm_flags,
NULL, vma->vm_file, vma->vm_pgoff, NULL, NULL_VM_UFFD_CTX);
if (merge) {
/* ->mmap() can change vma->vm_file and fput the original file. So
* fput the vma->vm_file here or we would add an extra fput for file
* and cause general protection fault ultimately.
*/
fput(vma->vm_file);
vm_area_free(vma);
vma = merge;
/* Update vm_flags to pick up the change. */
vm_flags = vma->vm_flags;
goto unmap_writable;
}
}

vm_flags = vma->vm_flags;
} else if (vm_flags & VM_SHARED) { ///共享映射
error = shmem_zero_setup(vma); ///共享匿名映射
if (error)
goto free_vma;
} else {
vma_set_anonymous(vma); ///匿名映射
}

/* Allow architectures to sanity-check the vm_flags */
if (!arch_validate_flags(vma->vm_flags)) {
error = -EINVAL;
if (file)
goto unmap_and_free_vma;
else
goto free_vma;
}

vma_link(mm, vma, prev, rb_link, rb_parent); ///vma加入mm系统
/* Once vma denies write, undo our temporary denial count */
if (file) {
unmap_writable:
if (vm_flags & VM_SHARED)
mapping_unmap_writable(file->f_mapping);
if (vm_flags & VM_DENYWRITE)
allow_write_access(file);
}
file = vma->vm_file;
out:
perf_event_mmap(vma);

vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
if (vm_flags & VM_LOCKED) {
if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
is_vm_hugetlb_page(vma) ||
vma == get_gate_vma(current->mm))
vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
else
mm->locked_vm += (len >> PAGE_SHIFT);
}

if (file)
uprobe_mmap(vma);

/*
* New (or expanded) vma always get soft dirty status.
* Otherwise user-space soft-dirty page tracker won't
* be able to distinguish situation when vma area unmapped,
* then new mapped in-place (which must be aimed as
* a completely new data area).
*/
vma->vm_flags |= VM_SOFTDIRTY;

vma_set_page_prot(vma);

return addr;

unmap_and_free_vma:
fput(vma->vm_file);
vma->vm_file = NULL;

/* Undo any partial mapping done by a device driver. */
unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
charged = 0;
if (vm_flags & VM_SHARED)
mapping_unmap_writable(file->f_mapping);
allow_write_and_free_vma:
if (vm_flags & VM_DENYWRITE)
allow_write_access(file);
free_vma:
vm_area_free(vma);
unacct_error:
if (charged)
vm_unacct_memory(charged);
return error;
}

以上的malloc,mmap函数,若无特别设定,默认都是指建立虚拟地址空间,但没有建立虚拟地址空间到物理地址空间的映射;

当访问未映射的虚拟空间时,触发缺页异常,linxu内核会处理缺页异常,缺页异常服务程序中,会分配物理页,并建立虚拟地址到物理页的映射;

补充两个问题:

1.当mmap重复申请相同地址,为什么不会失败?

find_vma_links()函数便利该进程所有的vma,当检查到当前要映射区域和已有vma重叠时,先销毁旧映射区,重新映射,所以第二次申请,不会报错。

2.mmap打开多个文件时,比如播放视频时,为什么会卡顿?

mmap只是建立vma,并未实际分配物理页面读取文件内存,当播放器真正读取文件时,会频繁触发缺页异常,再从磁盘读取文件到页面高速缓存中,会导致磁盘读性能较差;

madvise(add,len,MADV_WILLNEED|MADV_SEQUENTIAL)对文件内容进行预读和顺序读;

但是内核默认的预读功能就可以实现;且madvise不适合流媒体,只适合随机读取场景;

能够有效提高流媒体服务I/O性能的方法是最大内核默认预读窗口;内核默认是128K,可以通过“blockdev --setra”命令修改;

【文章福利】小编推荐自己的Linux内核技术交流群:【865977150】整理了一些个人觉得比较好的学习书籍、视频资料共享在群文件里面,有需要的可以自行添加哦!!!
图片

资料直通车:最新Linux内核源码资料文档+视频资料

内核学习地址:Linux内核源码/内存调优/文件系统/进程管理/设备驱动/网络协议栈

五、页分配函数

该函数定义在头文件/include/linux/gfp.h中,它既可以在内核空间分配,也可以在用户空间分配,它返回分配的第一个页的描述符而非首地址,其原型为:

#define alloc_page(gfp_mask)  alloc_pages(gfp_mask, 0)
#define alloc_pages(gfp_mask, order) alloc_pages_node(numa_node_id(), gfp_mask, order)//分配连续2^order个页面
static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
{
if(unlikely(order >= MAX_ORDER))
return NULL;
if(nid < 0)
nid = numa_node_id();
return __alloc_pages(gfp_mask, order, noed_zonelist(nid, gfp_mask));
}
图片

根据返回页面数目分类,分为仅返回单页面的函数和返回多页面的函数。

5.1alloc_page()和alloc_pages()函数

该函数定义在头文件/include/linux/gfp.h中,它既可以在内核空间分配,也可以在用户空间分配,它返回分配的第一个页的描述符而非首地址,其原型为:

#define alloc_page(gfp_mask)  alloc_pages(gfp_mask, 0)
#define alloc_pages(gfp_mask, order) alloc_pages_node(numa_node_id(), gfp_mask, order)//分配连续2^order个页面
static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
{
if(unlikely(order >= MAX_ORDER))
return NULL;
if(nid < 0)
nid = numa_node_id();
return __alloc_pages(gfp_mask, order, noed_zonelist(nid, gfp_mask));
}

5.2_get_free_pages()系列函数

它是kmalloc函数实现的基础,返回一个或多个页面的虚拟地址。该系列函数/宏包括 get_zeroed_page()、_ _get_free_page()和_ _get_free_pages()。在使用时,其申请标志的值及含义与 kmalloc()完全一样,最常用的是 GFP_KERNEL 和 GFP_ATOMIC。

/*分配多个页并返回分配内存的首地址,分配的页数为2^order,分配的页不清零。
order 允许的最大值是 10(即 1024 页)或者 11(即 2048 页),依赖于具体
的硬件平台。*/
unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
{
struct page *page;
page = alloc_pages(gfp_mask, order);
if(!page)
return 0;
return (unsigned long)page_address(page);
}

#define __get_free_page(gfp_mask) __get_free_pages(gfp_mask, 0)

/*该函数返回一个指向新页的指针并且将该页清零*/
unsigned long get_zeroed_page(unsigned int flags);

使用_ _get_free_pages()系列函数/宏申请的内存应使用free_page(addr)或free_pages(addr, order)函数释放:

#define __free_page(page) __free_pages((page), 0)
#define free_page(addr) free_pages((addr), 0)

void free_pages(unsigned long addr, unsigned int order)
{
if(addr != 0){
VM_BUG_ON(!virt_addr_valid((void*)addr));
__free_pages(virt_to_page((void *)addr), order);
}
}

void __free_pages(struct page *page, unsigned int order)
{
if(put_page_testzero(page)){
if(order == 0)
free_hot_page(page);
else
__free_pages_ok(page, order);
}
}

free_pages()函数是调用__free_pages()函数完成内存释放的。

六、slab缓存

当在驱动程序中,遇到反复分配、释放同一大小的内存块时(例如,inode、task_struct等),建议使用内存池技术(对象在前后两次被使用时均分配在同一块内存或同一类内存空间,且保留了基本的数据结构,这大大提高了效率)。在linux中,有一个叫做slab分配器的内存池管理技术,内存池使用的内存区叫做后备高速缓存。

salb相关头文件在linux/slab.h中,在使用后备高速缓存前,需要创建一个kmem_cache的结构体。

6.1创建slab缓存区

该函数创建一个slab缓存(后备高速缓冲区),它是一个可以驻留任意数目全部同样大小的后备缓存。其原型如下:

struct kmem_cache *kmem_cache_create(const char *name, size_t size, \
size_t align, unsigned long flags,\
void (*ctor)(void *, struct kmem_cache *, unsigned long),\
void (*dtor)(void *, struct kmem_cache *, unsigned ong)));

其中:

  • name:创建的缓存名;

  • size:可容纳的缓存块个数;

  • align:后备高速缓冲区中第一个内存块的偏移量(一般置为0);

  • flags:控制如何进行分配的位掩码,包括 SLAB_NO_REAP(即使内存紧缺也不自动收缩这块缓存)、SLAB_HWCACHE_ALIGN ( 每 个 数 据 对 象 被 对 齐 到 一 个 缓 存 行 )、SLAB_CACHE_DMA(要求数据对象在 DMA 内存区分配)等);

  • ctor:是可选的内存块对象构造函数(初始化函数);

  • dtor:是可选的内存对象块析构函数(释放函数)。

6.2分配slab缓存函数

一旦创建完后备高速缓冲区后,就可以调用kmem_cache_alloc()在缓存区分配一个内存块对象了,其原型如下:

void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags);

cachep指向开始分配的后备高速缓存,flags与传给kmalloc函数的参数相同,一般为GFP_KERNEL。

6.3释放slab缓存

该函数释放一个内存块对象:

void *kmem_cache_free(struct kmem_cache *cachep, void *objp);

6.4销毁slab缓存

与kmem_cache_create对应的是销毁函数,释放一个后备高速缓存:

int kmem_cache_destroy(struct kmem_cache *cachep);

它必须等待所有已经分配的内存块对象被释放后才能释放后备高速缓存区。

6.5slab缓存使用举例

创建一个存放线程结构体(struct thread_info)的后备高速缓存,因为在linux中涉及频繁的线程创建与释放,如果使用__get_free_page()函数会造成内存的大量浪费,效率也不高。所以在linux内核的初始化阶段就创建了一个名为thread_info的后备高速缓存,代码如下:

/* 创建slab缓存 */
static struct kmem_cache *thread_info_cache;
thread_info_cache = kmem_cache_create("thread_info", sizeof(struct thread_info), \
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);

/* 分配slab缓存 */
struct thread_info *ti;
ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);

/* 使用slab缓存 */
...
/* 释放slab缓存 */
kmem_cache_free(thread_info_cache, ti);
kmem_cache_destroy(thread_info_cache);

七、内存池

在 Linux 内核中还包含对内存池的支持,内存池技术也是一种非常经典的用于分配大量小对象的后备缓存技术。

7.1创建内存池

mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, \
mempool_free_t *free_fn, void *pool_data);

mempool_create()函数用于创建一个内存池,min_nr 参数是需要预分配对象的数目,alloc_fn 和 free_fn 是指向内存池机制提供的标准对象分配和回收函数的指针,其原型分别为:

typedef void *(mempool_alloc_t)(int gfp_mask, void *pool_data); 
typedef void (mempool_free_t)(void *element, void *pool_data);

pool_data 是分配和回收函数用到的指针,gfp_mask 是分配标记。只有当_ _GFP_WAIT 标记被指定时,分配函数才会休眠。

7.2分配和回收对象

在内存池中分配和回收对象需由以下函数来完成:

void *mempool_alloc(mempool_t *pool, int gfp_mask); 
void mempool_free(void *element, mempool_t *pool);

mempool_alloc()用来分配对象,如果内存池分配器无法提供内存,那么就可以用预分配的池。

7.3销毁内存池

void mempool_destroy(mempool_t *pool);

mempool_create()函数创建的内存池需由 mempool_destroy()来回收。

 

 原文来源微信公众号深度Linux

网站声明:如果转载,请联系本站管理员。否则一切后果自行承担。

本文链接:https://www.xckfsq.com/news/show.html?id=28959
赞同 0
评论 0 条
土土土L0
粉丝 0 发表 8 + 关注 私信
上周热门
如何使用 StarRocks 管理和优化数据湖中的数据?  2956
【软件正版化】软件正版化工作要点  2875
统信UOS试玩黑神话:悟空  2839
信刻光盘安全隔离与信息交换系统  2733
镜舟科技与中启乘数科技达成战略合作,共筑数据服务新生态  1267
grub引导程序无法找到指定设备和分区  1231
华为全联接大会2024丨软通动力分论坛精彩议程抢先看!  165
2024海洋能源产业融合发展论坛暨博览会同期活动-海洋能源与数字化智能化论坛成功举办  163
点击报名 | 京东2025校招进校行程预告  163
华为纯血鸿蒙正式版9月底见!但Mate 70的内情还得接着挖...  159
本周热议
我的信创开放社区兼职赚钱历程 40
今天你签到了吗? 27
如何玩转信创开放社区—从小白进阶到专家 15
信创开放社区邀请他人注册的具体步骤如下 15
方德桌面操作系统 14
用抖音玩法闯信创开放社区——用平台宣传企业产品服务 13
我有15积分有什么用? 13
如何让你先人一步获得悬赏问题信息?(创作者必看) 12
2024中国信创产业发展大会暨中国信息科技创新与应用博览会 9
信创再发力!中央国家机关台式计算机、便携式计算机批量集中采购配置标准的通知 8

加入交流群

请使用微信扫一扫!