/*
 * JFFS3 -- Journalling Flash File System, Version 3.
 *
 * Copyright (C) 2001-2003 Red Hat, Inc.
 *
 * Created by David Woodhouse <dwmw2@infradead.org>
 *
 * For licensing information, see the file 'LICENCE' in this directory.
 *
 * JFFS2 Id: nodelist.c,v 1.90 2004/12/08 17:59:20 dwmw2  Exp
 * $Id: nodelist.c,v 3.5 2005/01/05 16:19:00 dedekind Exp $
 *
 */

#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/mtd/mtd.h>
#include <linux/rbtree.h>
#include <linux/crc32.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include "nodelist.h"

void jffs3_add_fd_to_list(struct jffs3_sb_info *c, struct jffs3_full_dirent *new, struct jffs3_full_dirent **list)
{
	struct jffs3_full_dirent **prev = list;
	DBG_BI(1, "c %p, new %p list %p, *list %p\n", new, new, list, *list);

	while ((*prev) && (*prev)->nhash <= new->nhash) {
		if ((*prev)->nhash == new->nhash && !strcmp((*prev)->name, new->name)) {
			/* Duplicate. Free one */
			if (new->version < (*prev)->version) {
				DBG_BI(1, "Eep! Marking new dirent node obsolete\n");
				DBG_BI(1, "New dirent is \"%s\"->ino #%u. Old is \"%s\"->ino #%u\n",
						new->name, new->ino, (*prev)->name, (*prev)->ino);
				jffs3_mark_node_obsolete(c, new->raw);
				jffs3_free_full_dirent(new);
			} else {
				DBG_BI(1, "Marking old dirent node (ino #%u) obsolete\n", (*prev)->ino);
				new->next = (*prev)->next;
				jffs3_mark_node_obsolete(c, ((*prev)->raw));
				jffs3_free_full_dirent(*prev);
				*prev = new;
			}
			goto out;
		}
		prev = &((*prev)->next);
	}
	new->next = *prev;
	*prev = new;

out:
	if (DEBUG2) {
		DBG_BI(2, "Direntries list dump:\n");
		jffs3_dbg_dump_dirents_list(*list);
	}
}

/* Put a new tmp_dnode_info into the list, keeping the list in
   order of increasing version
*/
static void jffs3_add_tn_to_list(struct jffs3_tmp_dnode_info *tn, struct jffs3_tmp_dnode_info **list)
{
	struct jffs3_tmp_dnode_info **prev = list;

	while ((*prev) && (*prev)->version < tn->version) {
		prev = &((*prev)->next);
	}
	tn->next = (*prev);
	*prev = tn;
}

static void jffs3_free_tmp_dnode_info_list(struct jffs3_tmp_dnode_info *tn)
{
	struct jffs3_tmp_dnode_info *next;

	while (tn) {
		next = tn;
		tn = tn->next;
		jffs3_free_full_dnode(next->fn);
		jffs3_free_tmp_dnode_info(next);
	}
}

static void jffs3_free_full_dirent_list(struct jffs3_full_dirent *fd)
{
	struct jffs3_full_dirent *next;

	while (fd) {
		next = fd->next;
		jffs3_free_full_dirent(fd);
		fd = next;
	}
}

/* Returns first valid node after 'ref'. May return 'ref' */
static struct jffs3_raw_node_ref *jffs3_first_valid_node(struct jffs3_raw_node_ref *ref)
{
	while (ref && ref->next_in_ino) {
		if (!ref_obsolete(ref))
			return ref;
		DBG_RI(1, "Node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref));
		ref = ref->next_in_ino;
	}
	return NULL;
}

/* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated
   with this ino, returning the former in order of version */

int jffs3_get_inode_nodes(struct jffs3_sb_info *c, struct jffs3_inode_info *f,
			  struct jffs3_tmp_dnode_info **tnp, struct jffs3_full_dirent **fdp,
			  uint32_t *highest_version, uint32_t *latest_mctime,
			  uint32_t *mctime_ver)
{
	struct jffs3_raw_node_ref *ref, *valid_ref;
	struct jffs3_tmp_dnode_info *tn, *ret_tn = NULL;
	struct jffs3_full_dirent *fd, *ret_fd = NULL;
	union jffs3_node_union node;
	size_t retlen;
	int err;

	*mctime_ver = 0;

	DBG_RI(1, "Read ino #%u, nlink %d, state %d\n",
		f->inocache->ino, f->inocache->nlink,
		f->inocache->state);

	spin_lock(&c->erase_completion_lock);

	valid_ref = jffs3_first_valid_node(f->inocache->nodes);

	if (!valid_ref && f->inocache->ino != 1)
		WARNING_MSG("Eep. No valid nodes for ino #%u\n", f->inocache->ino);

	while (valid_ref) {
		/* We can hold a pointer to a non-obsolete node without the spinlock,
		   but _obsolete_ nodes may disappear at any time, if the block
		   they're in gets erased. So if we mark 'ref' obsolete while we're
		   not holding the lock, it can go away immediately. For that reason,
		   we find the next valid node first, before processing 'ref'.
		*/
		ref = valid_ref;
		valid_ref = jffs3_first_valid_node(ref->next_in_ino);
		spin_unlock(&c->erase_completion_lock);

		cond_resched();

		/* FIXME: point() */
		err = jffs3_flash_read(c, (ref_offset(ref)),
				       min_t(uint32_t, ref_totlen(c, NULL, ref), sizeof(node)),
				       &retlen, (void *)&node);
		if (err) {
			WARNING_MSG("error %d reading node at 0x%08x in get_inode_nodes()\n",
						err, ref_offset(ref));
			goto free_out;
		}


			/* Check we've managed to read at least the common node header */
		if (retlen < min_t(uint32_t, ref_totlen(c, NULL, ref), sizeof(node.u))) {
			WARNING_MSG("short read in get_inode_nodes()\n");
			err = -EIO;
			goto free_out;
		}

		switch (je16_to_cpu(node.u.nodetype)) {
		case JFFS3_NODETYPE_DIRENT:
			DBG_RI(1, "Node at %08x (%d) is a dirent node\n", ref_offset(ref), ref_flags(ref));
			if (ref_flags(ref) == REF_UNCHECKED) {
				ERROR_MSG("Dirent node at 0x%08x never got checked? How?\n", ref_offset(ref));
				BUG();
			}
			if (retlen < sizeof(node.d)) {
				WARNING_MSG("short read in get_inode_nodes()\n");
				err = -EIO;
				goto free_out;
			}
			/* sanity check */
			if (PAD((node.d.nsize + sizeof (node.d))) != PAD(je32_to_cpu (node.d.totlen))) {
				WARNING_MSG("Illegal nsize in node at 0x%08x: nsize 0x%02x, totlen %04x\n",
						ref_offset(ref), node.d.nsize, je32_to_cpu(node.d.totlen));
				jffs3_mark_node_obsolete(c, ref);
				spin_lock(&c->erase_completion_lock);
				continue;
			}
			if (je32_to_cpu(node.d.version) > *highest_version)
				*highest_version = je32_to_cpu(node.d.version);
			if (ref_obsolete(ref)) {
				/* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
				ERROR_MSG("Dirent node at 0x%08x became obsolete while we weren't looking\n",
				       ref_offset(ref));
				BUG();
			}

			fd = jffs3_alloc_full_dirent(node.d.nsize+1);
			if (!fd) {
				err = -ENOMEM;
				goto free_out;
			}
			fd->raw = ref;
			fd->version = je32_to_cpu(node.d.version);
			fd->ino = je32_to_cpu(node.d.ino);
			fd->type = node.d.type;

			/* Pick out the mctime of the latest dirent */
			if(fd->version > *mctime_ver) {
				*mctime_ver = fd->version;
				*latest_mctime = je32_to_cpu(node.d.mctime);
			}

			/* memcpy as much of the name as possible from the raw
			   dirent we've already read from the flash
			*/
			if (retlen > sizeof(struct jffs3_raw_dirent))
				memcpy(&fd->name[0], &node.d.name[0], min_t(uint32_t, node.d.nsize, (retlen-sizeof(struct jffs3_raw_dirent))));

			/* Do we need to copy any more of the name directly
			   from the flash?
			*/
			if (node.d.nsize + sizeof(struct jffs3_raw_dirent) > retlen) {
				/* FIXME: point() */
				int already = retlen - sizeof(struct jffs3_raw_dirent);

				err = jffs3_flash_read(c, (ref_offset(ref)) + retlen,
						   node.d.nsize - already, &retlen, &fd->name[already]);
				if (!err && retlen != node.d.nsize - already)
					err = -EIO;

				if (err) {
					WARNING_MSG("Read remainder of name in jffs3_get_inode_nodes(): error %d\n", err);
					jffs3_free_full_dirent(fd);
					goto free_out;
				}
			}
			fd->nhash = full_name_hash(fd->name, node.d.nsize);
			fd->next = NULL;
			fd->name[node.d.nsize] = '\0';
				/* Wheee. We now have a complete jffs3_full_dirent structure, with
				   the name in it and everything. Link it into the list
				*/
			DBG_RI(1, "Adding fd \"%s\", ino #%u\n", fd->name, fd->ino);
			jffs3_add_fd_to_list(c, fd, &ret_fd);
			break;

		case JFFS3_NODETYPE_INODE:
			DBG_RI(1, "Node at %08x (%d) is a data node\n", ref_offset(ref), ref_flags(ref));
			if (retlen < sizeof(node.i)) {
				WARNING_MSG("read too short for dnode\n");
				err = -EIO;
				goto free_out;
			}
			if (je32_to_cpu(node.i.version) > *highest_version)
				*highest_version = je32_to_cpu(node.i.version);
			DBG_RI(1, "version %d, highest_version now %d\n",
					je32_to_cpu(node.i.version), *highest_version);

			if (ref_obsolete(ref)) {
				/* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
				ERROR_MSG("Inode node at 0x%08x became obsolete while we weren't looking\n",
				       ref_offset(ref));
				BUG();
			}

			/* If we've never checked the CRCs on this node, check them now. */
			if (ref_flags(ref) == REF_UNCHECKED) {
				uint32_t crc, len;
				struct jffs3_eraseblock *jeb;

				crc = crc32(0, &node, sizeof(node.i)-8);
				if (crc != je32_to_cpu(node.i.node_crc)) {
					WARNING_MSG("CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
					       ref_offset(ref), je32_to_cpu(node.i.node_crc), crc);
					jffs3_mark_node_obsolete(c, ref);
					spin_lock(&c->erase_completion_lock);
					continue;
				}

				/* sanity checks */
				if ( je32_to_cpu(node.i.offset) > je32_to_cpu(node.i.isize) ||
					PAD(je32_to_cpu(node.i.csize) + sizeof (node.i))
								!= PAD(je32_to_cpu(node.i.totlen))) {
					WARNING_MSG("Inode corrupted at 0x%08x, totlen %d, #ino  %d, "
						"version %d, isize %d, csize %d, dsize %d \n",
						ref_offset(ref),  je32_to_cpu(node.i.totlen),  je32_to_cpu(node.i.ino),
						je32_to_cpu(node.i.version),  je32_to_cpu(node.i.isize),
						je32_to_cpu(node.i.csize), je32_to_cpu(node.i.dsize));
					jffs3_mark_node_obsolete(c, ref);
					spin_lock(&c->erase_completion_lock);
					continue;
				}

				if (node.i.compr != JFFS3_COMPR_ZERO && je32_to_cpu(node.i.csize)) {
					unsigned char *buf=NULL;
					uint32_t pointed = 0;
#ifndef __ECOS
					if (c->mtd->point) {
						err = c->mtd->point (c->mtd, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize),
								     &retlen, &buf);
						if (!err && retlen < je32_to_cpu(node.i.csize)) {
							DBG_RI(1, "MTD point returned len too short: 0x%zx\n", retlen);
							c->mtd->unpoint(c->mtd, buf, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize));
						} else if (err){
							DBG_RI(1, "MTD point failed %d\n", err);
						} else
							pointed = 1; /* succefully pointed to device */
					}
#endif
					if(!pointed){
						buf = kmalloc(je32_to_cpu(node.i.csize), GFP_KERNEL);
						if (!buf)
							return -ENOMEM;

						err = jffs3_flash_read(c, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize),
								       &retlen, buf);
						if (!err && retlen != je32_to_cpu(node.i.csize))
							err = -EIO;
						if (err) {
							kfree(buf);
							return err;
						}
					}
					crc = crc32(0, buf, je32_to_cpu(node.i.csize));
					if(!pointed)
						kfree(buf);
#ifndef __ECOS
					else
						c->mtd->unpoint(c->mtd, buf, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize));
#endif

					if (crc != je32_to_cpu(node.i.data_crc)) {
						WARNING_MSG("Data CRC failed on node at 0x%08x: "
							"Read 0x%08x, calculated 0x%08x\n",
							ref_offset(ref), je32_to_cpu(node.i.data_crc), crc);
						jffs3_mark_node_obsolete(c, ref);
						spin_lock(&c->erase_completion_lock);
						continue;
					}

				}

				/* Mark the node as having been checked and fix the accounting accordingly */
				spin_lock(&c->erase_completion_lock);
				jeb = &c->blocks[ref->flash_offset / c->sector_size];
				len = ref_totlen(c, jeb, ref);

				jeb->used_size += len;
				jeb->unchecked_size -= len;
				c->used_size += len;
				c->unchecked_size -= len;

				/* If node covers at least a whole page, or if it starts at the
				   beginning of a page and runs to the end of the file, or if
				   it's a hole node, mark it REF_PRISTINE, else REF_NORMAL.

				   If it's actually overlapped, it'll get made NORMAL (or OBSOLETE)
				   when the overlapping node(s) get added to the tree anyway.
				*/
				if ((je32_to_cpu(node.i.dsize) >= PAGE_CACHE_SIZE) ||
				    ( ((je32_to_cpu(node.i.offset)&(PAGE_CACHE_SIZE-1))==0) &&
				      (je32_to_cpu(node.i.dsize)+je32_to_cpu(node.i.offset) ==  je32_to_cpu(node.i.isize)))) {
					DBG_RI(1, "Marking node at 0x%08x REF_PRISTINE\n", ref_offset(ref));
					ref->flash_offset = ref_offset(ref) | REF_PRISTINE;
				} else {
					DBG_RI(1, "Marking node at 0x%08x REF_NORMAL\n", ref_offset(ref));
					ref->flash_offset = ref_offset(ref) | REF_NORMAL;
				}
				spin_unlock(&c->erase_completion_lock);
			}

			tn = jffs3_alloc_tmp_dnode_info();
			if (!tn) {
				DBG_RI(1, "alloc tn failed\n");
				err = -ENOMEM;
				goto free_out;
			}

			tn->fn = jffs3_alloc_full_dnode();
			if (!tn->fn) {
				DBG_RI(1, "alloc fn failed\n");
				err = -ENOMEM;
				jffs3_free_tmp_dnode_info(tn);
				goto free_out;
			}
			tn->version = je32_to_cpu(node.i.version);
			tn->fn->ofs = je32_to_cpu(node.i.offset);
			/* There was a bug where we wrote hole nodes out with
			   csize/dsize swapped. Deal with it */
			if (node.i.compr == JFFS3_COMPR_ZERO && !je32_to_cpu(node.i.dsize) && je32_to_cpu(node.i.csize))
				tn->fn->size = je32_to_cpu(node.i.csize);
			else // normal case...
				tn->fn->size = je32_to_cpu(node.i.dsize);
			tn->fn->raw = ref;
			DBG_RI(1, "dnode @%08x: ver %u, offset %04x, dsize %04x\n",
				  ref_offset(ref), je32_to_cpu(node.i.version),
				  je32_to_cpu(node.i.offset), je32_to_cpu(node.i.dsize));
			jffs3_add_tn_to_list(tn, &ret_tn);
			break;

		default:
			if (ref_flags(ref) == REF_UNCHECKED) {
				struct jffs3_eraseblock *jeb;
				uint32_t len;

				WARNING_MSG("Eep. Unknown node type %04x at %08x was marked REF_UNCHECKED\n",
				       je16_to_cpu(node.u.nodetype), ref_offset(ref));

				/* Mark the node as having been checked and fix the accounting accordingly */
				spin_lock(&c->erase_completion_lock);
				jeb = &c->blocks[ref->flash_offset / c->sector_size];
				len = ref_totlen(c, jeb, ref);

				jeb->used_size += len;
				jeb->unchecked_size -= len;
				c->used_size += len;
				c->unchecked_size -= len;

				mark_ref_normal(ref);
				spin_unlock(&c->erase_completion_lock);
			}
			node.u.nodetype = cpu_to_je16(JFFS3_NODE_ACCURATE | je16_to_cpu(node.u.nodetype));
			if (crc32(0, &node, sizeof(struct jffs3_unknown_node)-4) != je32_to_cpu(node.u.hdr_crc)) {
				/* Hmmm. This should have been caught at scan time. */
				WARNING_MSG("Node header CRC failed at %08x. But it must have been OK earlier.\n",
				       ref_offset(ref));
				WARNING_MSG("Node was: { %04x, %04x, %08x, %08x }\n",
				       je16_to_cpu(node.u.magic), je16_to_cpu(node.u.nodetype), je32_to_cpu(node.u.totlen),
				       je32_to_cpu(node.u.hdr_crc));
				jffs3_mark_node_obsolete(c, ref);
			} else switch(je16_to_cpu(node.u.nodetype) & JFFS3_COMPAT_MASK) {
			case JFFS3_FEATURE_INCOMPAT:
				ERROR_MSG("Unknown INCOMPAT nodetype %04X at %08x\n",
					je16_to_cpu(node.u.nodetype), ref_offset(ref));
				/* EEP */
				BUG();
				break;
			case JFFS3_FEATURE_ROCOMPAT:
				ERROR_MSG("Unknown ROCOMPAT nodetype %04X at %08x\n",
					je16_to_cpu(node.u.nodetype), ref_offset(ref));
				if (!(c->flags & JFFS3_SB_FLAG_RO))
					BUG();
				break;
			case JFFS3_FEATURE_RWCOMPAT_COPY:
				DBG_RI(1, "Unknown RWCOMPAT_COPY nodetype %04X at %08x\n",
					je16_to_cpu(node.u.nodetype), ref_offset(ref));
				break;
			case JFFS3_FEATURE_RWCOMPAT_DELETE:
				DBG_RI(1, "Unknown RWCOMPAT_DELETE nodetype %04X at %08x\n",
					je16_to_cpu(node.u.nodetype), ref_offset(ref));
				jffs3_mark_node_obsolete(c, ref);
				break;
			}

		}
		spin_lock(&c->erase_completion_lock);

	}
	spin_unlock(&c->erase_completion_lock);
	*tnp = ret_tn;
	*fdp = ret_fd;

	return 0;

free_out:
	jffs3_free_tmp_dnode_info_list(ret_tn);
	jffs3_free_full_dirent_list(ret_fd);
	return err;
}

void jffs3_set_inocache_state(struct jffs3_sb_info *c, struct jffs3_inode_cache *ic, int state)
{
	spin_lock(&c->inocache_lock);
	ic->state = state;
	wake_up(&c->inocache_wq);
	spin_unlock(&c->inocache_lock);
}

/* During mount, this needs no locking. During normal operation, its
   callers want to do other stuff while still holding the inocache_lock.
   Rather than introducing special case get_ino_cache functions or
   callbacks, we just let the caller do the locking itself. */

struct jffs3_inode_cache *jffs3_get_ino_cache(struct jffs3_sb_info *c, uint32_t ino)
{
	struct jffs3_inode_cache *ret;

	DBG(1, "Searching jffs3_inode_cache object for ino %u\n", ino);

	ret = c->inocache_list[ino % INOCACHE_HASHSIZE];
	while (ret && ret->ino < ino) {
		ret = ret->next;
	}

	if (ret && ret->ino != ino)
		ret = NULL;

	if (DEBUG1) {
		if (ret != NULL)
			DBG(1, "Found %p\n", ret);
		else
			DBG(1, "Not found\n");
	}

	return ret;
}

void jffs3_add_ino_cache (struct jffs3_sb_info *c, struct jffs3_inode_cache *new)
{
	struct jffs3_inode_cache **prev;

	DBG(2, "Add jffs3_inode_cache object (%p) for ino #%u\n", new, new->ino);
	if (SANITY)
		BUG_ON(new->ino == 0);

	spin_lock(&c->inocache_lock);

	prev = &c->inocache_list[new->ino % INOCACHE_HASHSIZE];

	while ((*prev) && (*prev)->ino < new->ino) {
		prev = &(*prev)->next;
	}
	new->next = *prev;
	*prev = new;

	spin_unlock(&c->inocache_lock);
}

void jffs3_del_ino_cache(struct jffs3_sb_info *c, struct jffs3_inode_cache *old)
{
	struct jffs3_inode_cache **prev;
	DBG(2, "Del %p (ino #%u)\n", old, old->ino);
	spin_lock(&c->inocache_lock);

	prev = &c->inocache_list[old->ino % INOCACHE_HASHSIZE];

	while ((*prev) && (*prev)->ino < old->ino) {
		prev = &(*prev)->next;
	}
	if ((*prev) == old) {
		*prev = old->next;
	}

	spin_unlock(&c->inocache_lock);
}

void jffs3_free_ino_caches(struct jffs3_sb_info *c)
{
	int i;
	struct jffs3_inode_cache *this, *next;

	for (i=0; i<INOCACHE_HASHSIZE; i++) {
		this = c->inocache_list[i];
		while (this) {
			next = this->next;
			DBG(2, "Freeing ino #%u at %p\n", this->ino, this);
			jffs3_free_inode_cache(this);
			this = next;
		}
		c->inocache_list[i] = NULL;
	}
}

void jffs3_free_raw_node_refs(struct jffs3_sb_info *c)
{
	int i;
	struct jffs3_raw_node_ref *this, *next;

	for (i=0; i<c->nr_blocks; i++) {
		this = c->blocks[i].first_node;
		while(this) {
			next = this->next_phys;
			jffs3_free_raw_node_ref(this);
			this = next;
		}
		c->blocks[i].first_node = c->blocks[i].last_node = NULL;
	}
}

struct jffs3_node_frag *jffs3_lookup_node_frag(struct rb_root *fragtree, uint32_t offset)
{
	/* The common case in lookup is that there will be a node
	   which precisely matches. So we go looking for that first */
	struct rb_node *next;
	struct jffs3_node_frag *prev = NULL;
	struct jffs3_node_frag *frag = NULL;

	DBG_BI(2, "fragtree %p, ofset %d\n", fragtree, offset);

	next = fragtree->rb_node;

	while(next) {
		frag = rb_entry(next, struct jffs3_node_frag, rb);

		DBG_BI(2, "Considering frag %d-%d (%p). left %p, right %p\n",
			  frag->ofs, frag->ofs+frag->size, frag, frag->rb.rb_left, frag->rb.rb_right);
		if (frag->ofs + frag->size <= offset) {
			DBG_BI(2, "Going right from frag %d-%d, before the region we care about\n",
				  frag->ofs, frag->ofs+frag->size);
			/* Remember the closest smaller match on the way down */
			if (!prev || frag->ofs > prev->ofs)
				prev = frag;
			next = frag->rb.rb_right;
		} else if (frag->ofs > offset) {
			DBG_BI(2, "Going left from frag %d-%d, after the region we care about\n",
				  frag->ofs, frag->ofs+frag->size);
			next = frag->rb.rb_left;
		} else {
			DBG_BI(2, "Returning frag %d,%d, matched\n",
				  frag->ofs, frag->ofs+frag->size);
			return frag;
		}
	}

	/* Exact match not found. Go back up looking at each parent,
	   and return the closest smaller one */

	if (prev)
		DBG_BI(2, "No match. Returning frag %d,%d, closest previous\n",
			  prev->ofs, prev->ofs+prev->size);
	else
		DBG_BI(2, "Returning NULL, empty fragtree\n");

	return prev;
}

/* Pass 'c' argument to indicate that nodes should be marked obsolete as
   they're killed. */
void jffs3_kill_fragtree(struct rb_root *root, struct jffs3_sb_info *c)
{
	struct jffs3_node_frag *frag;
	struct jffs3_node_frag *parent;

	if (!root->rb_node)
		return;

	frag = (rb_entry(root->rb_node, struct jffs3_node_frag, rb));

	while(frag) {
		if (frag->rb.rb_left) {
			DBG_BI(2, "Going left from frag (%p) %d-%d\n",
				  frag, frag->ofs, frag->ofs+frag->size);
			frag = frag_left(frag);
			continue;
		}
		if (frag->rb.rb_right) {
			DBG_BI(2, "Going right from frag (%p) %d-%d\n",
				  frag, frag->ofs, frag->ofs+frag->size);
			frag = frag_right(frag);
			continue;
		}

		DBG_BI(2, "frag at 0x%x-0x%x: node %p, frags %d--\n",
			  frag->ofs, frag->ofs+frag->size, frag->node,
			  frag->node?frag->node->frags:0);

		if (frag->node && !(--frag->node->frags)) {
			/* Not a hole, and it's the final remaining frag
			   of this node. Free the node */
			if (c)
				jffs3_mark_node_obsolete(c, frag->node->raw);

			jffs3_free_full_dnode(frag->node);
		}
		parent = frag_parent(frag);
		if (parent) {
			if (frag_left(parent) == frag)
				parent->rb.rb_left = NULL;
			else
				parent->rb.rb_right = NULL;
		}

		jffs3_free_node_frag(frag);
		frag = parent;

		cond_resched();
	}
}

void jffs3_fragtree_insert(struct jffs3_node_frag *newfrag, struct jffs3_node_frag *base)
{
	struct rb_node *parent = &base->rb;
	struct rb_node **link = &parent;

	DBG_BI(2, "newfrag %p frag range %d-%d, base %p)\n", newfrag,
		  newfrag->ofs, newfrag->ofs+newfrag->size, base);

	while (*link) {
		parent = *link;
		base = rb_entry(parent, struct jffs3_node_frag, rb);

		DBG_BI(2, "considering frag at 0x%x\n", base->ofs);
		if (newfrag->ofs > base->ofs)
			link = &base->rb.rb_right;
		else if (newfrag->ofs < base->ofs)
			link = &base->rb.rb_left;
		else {
			ERROR_MSG("Duplicate frag at %08x (%p,%p)\n", newfrag->ofs, newfrag, base);
			BUG();
		}
	}

	rb_link_node(&newfrag->rb, &base->rb, link);
}
