进程地址管理

2020-02-06  本文已影响0人  牛逼人物888
一、进程fork对内存空间的copy

kernel/msm-4.19/kernel/fork.c

/*
 * Allocate a new mm structure and copy contents from the
 * mm structure of the passed in task structure.
 */
static struct mm_struct *dup_mm(struct task_struct *tsk)
{
    struct mm_struct *mm, *oldmm = current->mm;
    int err;

    mm = allocate_mm();
    if (!mm)
        goto fail_nomem;

    memcpy(mm, oldmm, sizeof(*mm));
    if (!mm_init(mm, tsk, mm->user_ns)) // mm_init最终通过mm_alloc_pgd来分配页表
        goto fail_nomem;

    err = dup_mmap(mm, oldmm);// 虚拟内存copy
    if (err)
        goto free_pt;

    mm->hiwater_rss = get_mm_rss(mm);
    mm->hiwater_vm = mm->total_vm;

    if (mm->binfmt && !try_module_get(mm->binfmt->module))
        goto free_pt;

    return mm;

free_pt:
    /* don't put binfmt in mmput, we haven't got module yet */
    mm->binfmt = NULL;
    mm_init_owner(mm, NULL);
    mmput(mm);

fail_nomem:
    return NULL;
}
二、物理地址映射到进程地址空间

Ion内存映射用户空间
kernel/msm-4.19/drivers/staging/android/ion/ion_heap.c

int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
              struct vm_area_struct *vma)
{
...
    for_each_sg(table->sgl, sg, table->nents, i) {
        struct page *page = sg_page(sg);
        unsigned long remainder = vma->vm_end - addr;
        unsigned long len = sg->length;

        if (offset >= sg->length) {
            offset -= sg->length;
            continue;
        } else if (offset) {
            page += offset / PAGE_SIZE;
            len = sg->length - offset;
            offset = 0;
        }
        len = min(len, remainder);
        ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
                      vma->vm_page_prot);
        if (ret)
            return ret;
        addr += len;
        if (addr >= vma->vm_end)
            return 0;
    }
    return 0;

}

kernel/msm-4.19/mm/memory.c

/**
 * remap_pfn_range - remap kernel memory to userspace
 * @vma: user vma to map to
 * @addr: target user address to start at
 * @pfn: physical address of kernel memory
 * @size: size of map area
 * @prot: page protection flags for this mapping
 *
 *  Note: this is only safe if the mm semaphore is held when called.
 */
int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
            unsigned long pfn, unsigned long size, pgprot_t prot)
{
...
    pfn -= addr >> PAGE_SHIFT;
    pgd = pgd_offset(mm, addr);
    flush_cache_range(vma, addr, end);
    do {
        next = pgd_addr_end(addr, end);
        err = remap_p4d_range(mm, pgd, addr, next,
                pfn + (addr >> PAGE_SHIFT), prot);
        if (err)
            break;
    } while (pgd++, addr = next, addr != end);
...
}

kgsl内存映射用户空间
kernel/msm-4.19/drivers/gpu/msm/kgsl.c

static int kgsl_mmap(struct file *file, struct vm_area_struct *vma)
{
...
    if (cache == KGSL_CACHEMODE_WRITEBACK
        || cache == KGSL_CACHEMODE_WRITETHROUGH) {
        int i;
        unsigned long addr = vma->vm_start;
        struct kgsl_memdesc *m = &entry->memdesc;

        for (i = 0; i < m->page_count; i++) {
            struct page *page = m->pages[i];

            vm_insert_page(vma, addr, page);
            addr += PAGE_SIZE;
        }
        m->mapsize = m->size;
        entry->priv->gpumem_mapped += m->mapsize;
    }

    vma->vm_file = file;

    entry->memdesc.useraddr = vma->vm_start;

}

kernel/msm-4.19/mm/memory.c

int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
            struct page *page)
{
    if (addr < vma->vm_start || addr >= vma->vm_end)
        return -EFAULT;
    if (!page_count(page))
        return -EINVAL;
    if (!(vma->vm_flags & VM_MIXEDMAP)) {
        BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
        BUG_ON(vma->vm_flags & VM_PFNMAP);
        vma->vm_flags |= VM_MIXEDMAP;
    }
    return insert_page(vma, addr, page, vma->vm_page_prot);
}
上一篇 下一篇

猜你喜欢

热点阅读