【内核源码学习笔记】slab分配器(5)释放slab对象

6.释放slab缓冲对象

释放slab缓冲对象主要是调用kmem_cache_free函数。node

void kmem_cache_free(struct kmem_cache *cachep, void *objp)
{
	unsigned long flags;
	cachep = cache_from_obj(cachep, objp);
	if (!cachep)
		return;

	local_irq_save(flags);
	debug_check_no_locks_freed(objp, cachep->object_size);
	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
		debug_check_no_obj_freed(objp, cachep->object_size);
	__cache_free(cachep, objp, _RET_IP_);
	local_irq_restore(flags);

	trace_kmem_cache_free(_RET_IP_, objp);
}

第一步,先获取这个obj的kmem_cache.数组

static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
{
	struct kmem_cache *cachep;
	struct page *page;

	/*
当不使用kmemcg时,两个赋值应该返回相同的值。但在这种状况下,咱们不想支付转让价格。若是它不是在中编译的,编译器应该足够聪明,甚至不作赋值。在这种状况下,slab_equal_or_root也将是一个常数。
	 */
	if (!memcg_kmem_enabled() &&
	    !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
		return s;

	page = virt_to_head_page(x);
	cachep = page->slab_cache;
	if (slab_equal_or_root(cachep, s))
		return cachep;

	pr_err("%s: Wrong slab cache. %s but object is from %s\n",
	       __func__, s->name, cachep->name);
	WARN_ON_ONCE(1);
	return s;
}

static inline struct page *virt_to_head_page(const void *x)
{
	struct page *page = virt_to_page(x);

	return compound_head(page);
}

#define virt_to_page(addr)	pfn_to_page(virt_to_pfn(addr))

在这个函数中,主要是先找到这个obj的虚拟地址所在的page,而后拿到slab_cache。缓存

而后回到kmem_cache_free函数中。这里会先关掉中断,而后调用__cache_free函数去释放这个obj。app

static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
					 unsigned long caller)
{
	/* Put the object into the quarantine, don't touch it for now. */
	if (kasan_slab_free(cachep, objp, _RET_IP_))
		return;

	___cache_free(cachep, objp, caller);
}

void ___cache_free(struct kmem_cache *cachep, void *objp,
		unsigned long caller)
{
	struct array_cache *ac = cpu_cache_get(cachep);

	check_irq_off();
	kmemleak_free_recursive(objp, cachep->flags);
	objp = cache_free_debugcheck(cachep, objp, caller);

	/*
当平台不是numa时,跳过调用cache_free_alien()。这将避免在访问slabp(每一个页面内存引用)以获取nodeid时发生的缓存未命中。相反,使用一个全局变量来跳过调用,这极可能出如今缓存中。
	 */
	if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
		return;

	if (ac->avail < ac->limit) {
		STATS_INC_FREEHIT(cachep);
	} else {
		STATS_INC_FREEMISS(cachep);
		cache_flusharray(cachep, ac);
	}

	if (sk_memalloc_socks()) {
		struct page *page = virt_to_head_page(objp);

		if (unlikely(PageSlabPfmemalloc(page))) {
			cache_free_pfmemalloc(cachep, page, objp);
			return;
		}
	}

	ac->entry[ac->avail++] = objp;
}

在这个函数中,第一步先获取本地对象缓冲池。检查中断是否已经关闭。若是共享缓冲池的可用对象大于等于limit,就调用cache_flusharray函数作flush动做来回收空闲对象。ide

static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
{
	int batchcount;
	struct kmem_cache_node *n;
	int node = numa_mem_id();
	LIST_HEAD(list);

	batchcount = ac->batchcount;

	check_irq_off();
	n = get_node(cachep, node);
	spin_lock(&n->list_lock);
	if (n->shared) {
		struct array_cache *shared_array = n->shared;
		int max = shared_array->limit - shared_array->avail;
		if (max) {
			if (batchcount > max)
				batchcount = max;
			memcpy(&(shared_array->entry[shared_array->avail]),
			       ac->entry, sizeof(void *) * batchcount);
			shared_array->avail += batchcount;
			goto free_done;
		}
	}

	free_block(cachep, ac->entry, batchcount, node, &list);
free_done:
#if STATS
	{
		int i = 0;
		struct page *page;

		list_for_each_entry(page, &n->slabs_free, lru) {
			BUG_ON(page->active);

			i++;
		}
		STATS_SET_FREEABLE(cachep, i);
	}
#endif
	spin_unlock(&n->list_lock);
	slabs_destroy(cachep, &list);
	ac->avail -= batchcount;
	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
}

这里会先从node数组中获取到kmem_cache_node节点。函数

若是这个node节点中有共享对象缓冲池,共享对象缓冲池的limit减去可用对象数量为max。而后会从本地对象缓冲池中复制batchcount个(若是max<batchcount的话就复制max个)空闲对象到共享对象缓冲池的entry中,共享对象缓冲池的数数量增长。此时程序会直接运行到free_done这里。ui

若是没有共享对象缓冲池的话,就会调用free_block函数。spa

static void free_block(struct kmem_cache *cachep, void **objpp,
			int nr_objects, int node, struct list_head *list)
{
	int i;
	struct kmem_cache_node *n = get_node(cachep, node);
	struct page *page;

	n->free_objects += nr_objects;

	for (i = 0; i < nr_objects; i++) {
		void *objp;
		struct page *page;

		objp = objpp[i];

		page = virt_to_head_page(objp);
		list_del(&page->lru);
		check_spinlock_acquired_node(cachep, node);
		slab_put_obj(cachep, page, objp);
		STATS_DEC_ACTIVE(cachep);

		/* fixup slab chains */
		if (page->active == 0) {
			list_add(&page->lru, &n->slabs_free);
			n->free_slabs++;
		} else {
			/* 无条件地将一个slab无条件地移动到部分列表的末尾,这也是释放其余对象的最大时间。*/
			list_add_tail(&page->lru, &n->slabs_partial);
		}
	}

	while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) {
		n->free_objects -= cachep->num;

		page = list_last_entry(&n->slabs_free, struct page, lru);
		list_move(&page->lru, list);
		n->free_slabs--;
		n->total_slabs--;
	}
}

这里先给这个kmem_cache_node中的freeobj个数加上nr。而后从0开始遍历到nr_objects。从本地缓冲池的entry中取出第i个obj,找到这个obj所在的page,删除这个page 的lru,调用slab_put_objdebug

static void slab_put_obj(struct kmem_cache *cachep,
			struct page *page, void *objp)
{
	unsigned int objnr = obj_to_index(cachep, page, objp);
#if DEBUG
	unsigned int i;

	/* Verify double free bug */
	for (i = page->active; i < cachep->num; i++) {
		if (get_free_obj(page, i) == objnr) {
			pr_err("slab: double free detected in cache '%s', objp %px\n",
			       cachep->name, objp);
			BUG();
		}
	}
#endif
	page->active--;
	if (!page->freelist)
		page->freelist = objp + obj_offset(cachep);

	set_free_obj(page, page->active, objnr);
}

static inline unsigned int obj_to_index(const struct kmem_cache *cache,
					const struct page *page, void *obj)
{
	u32 offset = (obj - page->s_mem);
	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
}

这个函数首先会找到这个obj的序号,page中的active对象数量减1,若是page中的freelist为空,那么就使page的freelist指向这个obj。最后调用set_free_obj将这个对象的序号填入freelist数组中。指针

而后再回到free_block函数中,此时,若是page中的active对象个数为0,就将这个epage的lru指针加入到node 的slab_free链表中去,不然就加入到部分空闲链表中partial_slabs。

当kmem_cache_node中的空闲对象个数大于free_limit个数,而且node的slabs_free链表不为空,就将个数减去kmem_cache的num.摘掉slabs_free链表的末尾的page,组成一个链表list。

 

而后咱们再次回到了cache_flusharray函数的free_done标签中,

static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
{
	struct page *page, *n;

	list_for_each_entry_safe(page, n, list, lru) {
		list_del(&page->lru);
		slab_destroy(cachep, page);
	}
}

static void slab_destroy(struct kmem_cache *cachep, struct page *page)
{
	void *freelist;

	freelist = page->freelist;
	slab_destroy_debugcheck(cachep, page);
	if (unlikely(cachep->flags & SLAB_TYPESAFE_BY_RCU))
		call_rcu(&page->rcu_head, kmem_rcu_free);
	else
		kmem_freepages(cachep, page);

	/*从如今开始,尽管能够在rcu上下文中释放实际页面,但咱们不使用freelist*/
	if (OFF_SLAB(cachep))
		kmem_cache_free(cachep->freelist_cache, freelist);
}

调用slabs_destory函数遍历list中的配一个page ,找到page的freelist,释放page.

static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
{
	int order = cachep->gfporder;

	BUG_ON(!PageSlab(page));
	__ClearPageSlabPfmemalloc(page);
	__ClearPageSlab(page);
	page_mapcount_reset(page);
	page->mapping = NULL;

	if (current->reclaim_state)
		current->reclaim_state->reclaimed_slab += 1 << order;
	uncharge_slab_page(page, order, cachep);
	__free_pages(page, order);
}

这里会将连续gfporder个页面都释放掉。

而后共享缓冲池中的avail会减小batchcount个。并将这些对象挪到ertry的头部。

相关文章
相关标签/搜索