/*
 * 在高速缓存中增加slabs的数量，当在高速缓存中不存在可用的对象的时候，这个函数就会被
 * kmem_cache_alloc()调用
 */
static int cache_grow(struct kmem_cache *cachep,
		gfp_t flags, int nodeid, void *objp)
{
	struct slab *slabp;
	size_t offset;
	gfp_t local_flags;
	struct kmem_list3 *l3;

	/*
	 * Be lazy and only check for valid flags here,  keeping it out of the
	 * critical path in kmem_cache_alloc().
	 */
	BUG_ON(flags & ~(GFP_DMA | __GFP_ZERO | GFP_LEVEL_MASK));

	local_flags = (flags & GFP_LEVEL_MASK);
	/* Take the l3 list lock to change the colour_next on this node */
	check_irq_off();
	
	/*
		获得kmem_list3
	*/
	l3 = cachep->nodelists[nodeid];
	/*
		给kmem_list3上锁
	*/
	spin_lock(&l3->list_lock);

	/*
		记录slab的颜色，并计算下一个颜色的值
	*/
	/* Get colour for the slab, and cal the next value. */
	offset = l3->colour_next;
	l3->colour_next++;
	/*
		如果颜色的数量达到最大值，就从0开始计算
	*/
	if (l3->colour_next >= cachep->colour)
		l3->colour_next = 0;
	/*
		解锁
	*/
	spin_unlock(&l3->list_lock);

	offset *= cachep->colour_off;

	if (local_flags & __GFP_WAIT)
		local_irq_enable();

	/*
	 * The test for missing atomic flag is performed here, rather than
	 * the more obvious place, simply to reduce the critical path length
	 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
	 * will eventually be caught here (where it matters).
	 */
	kmem_flagcheck(cachep, flags);

	/*
	 * 为对象获取内存，尝试从本地节点中分配一个物理页,这些页框在slab中的页框
	 *Interface to system's page allocator. No need to hold the cache-lock
	 * If we requested dmaable memory, we will get it. Even if we
	 * did not request dmaable memory, we might get it, but that
	 * would be relatively rare and ignorable.
	 */
	if (!objp)
		objp = kmem_getpages(cachep, local_flags, nodeid);
	if (!objp)
		goto failed;

	/* Get slab management. */
	slabp = alloc_slabmgmt(cachep, objp, offset,
			local_flags & ~GFP_THISNODE, nodeid);
	/*
	static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
				   int colour_off, gfp_t local_flags,
				   int nodeid)
	{
		struct slab *slabp;

		if (OFF_SLAB(cachep)) {
			// Slab management obj is off-slab. 
			slabp = kmem_cache_alloc_node(cachep->slabp_cache,
					      local_flags & ~GFP_THISNODE, nodeid);
			if (!slabp)
				return NULL;
		} else {
			slabp = objp + colour_off;
			colour_off += cachep->slab_size;
		}
		slabp->inuse = 0;
		slabp->colouroff = colour_off;
		slabp->s_mem = objp + colour_off;
		slabp->nodeid = nodeid;
		return slabp;
	}

	*/
	if (!slabp)
		goto opps1;

	slabp->nodeid = nodeid;
	slab_map_pages(cachep, slabp, objp);

	/*
		初始化高速缓存中的对象
	*/
	cache_init_objs(cachep, slabp);

	if (local_flags & __GFP_WAIT)
		local_irq_disable();
	check_irq_off();
	spin_lock(&l3->list_lock);

	/* Make slab active. */
	list_add_tail(&slabp->list, &(l3->slabs_free));
	STATS_INC_GROWN(cachep);
	l3->free_objects += cachep->num;
	spin_unlock(&l3->list_lock);
	return 1;
opps1:
	kmem_freepages(cachep, objp);
failed:
	if (local_flags & __GFP_WAIT)
		local_irq_disable();
	return 0;
}

