package org.bookshare.document.navigation.chapters;

import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.benetech.beans.Filter;
import org.benetech.beans.FilterCallback;
import org.benetech.beans.RelativeText;
import org.benetech.collections.IdTree;
import org.benetech.collections.ListMap;
import org.benetech.collections.Tree;
import org.benetech.collections.TreeFilter;
import org.benetech.collections.Tree.ListDirection;
import org.benetech.event.EventListener;
import org.benetech.ref.XML;
import org.benetech.util.NumberUtils;
import org.benetech.util.XMLParseUtils;
import org.benetech.util.XMLTreeUtils;
import org.jdom.Comment;
import org.jdom.Content;
import org.jdom.Element;

/**
 * Given an XML file in DAISY DTBook format, find chapter divisions based on a number of strategies:
 * 1) Valid table of contents (this always overrides other alternatives).
 * 2) Depth structure of the XML tree; is there a magnitude jump that indicates chapter division?
 * 3) The word "chapter" appearing at intervals, in the same language.
 * 4) Headings of a given strength.
 * 5) Level elements of a given strength.
 * If a table of contents is not found, the 2..5 are evaluated, and compared to try to deduce the most accurate
 * choice. It's possible that no strategies work.
 * @author Reuben Firmin
 */
public final class ChapterFinder {

	// NoCheck -- MAGIC
	/** Magnitude of jump from one depth to another that is considered "significant". */
	private static final double SIGNIFICANCE_MAGNITUDE = 8.5;
	/** Minimum number of elements per chapter */
	private static final int MIN_ELEMENTS_PER_CHAPTER = 90;
	/** Minimum number of pages per chapter */
	private static final int MIN_PAGES_PER_CHAPTER = 7;
	/** Minimum number of chapter divisions */
	private static final int MIN_CHAPTERS = 5;
	/** Percentage cutoff for TOC link detection - NB this is number of elements, not percentage of book. */
	private static final int TOC_PERCENTAGE = 15;
	// CheckOn -- END MAGIC

	private static final ModifyingHeadingsFilter MODIFYING_HEADINGS_FILTER = new ModifyingHeadingsFilter();
	private static final ChapterContentsFilter CHAPTER_NAME_FILTER = new ChapterContentsFilter();
	private static final CommentsIdentFilter COMMENTS_IDENT_FILTER = new CommentsIdentFilter();

	private static String[] chapterLanguages = new String[] {
		"chapter", "hoofdstuk", "chapitre", "kapitel", "κεφάλαιο", "capitolo", "capítulo",
	};

	/**
	 * Filter to find headings from a tree. Tags trees that pass with {@link #TAG} = "1|2|3..."
	 * @author Reuben Firmin
	 * XXX this can be reimplemented to use the callback mechanism
	 */
	private static class ModifyingHeadingsFilter<T extends Tree<Element, String, String>> implements Filter<T , Object>
	{
		public static final String TAG = "headingLevel";
		private static final Pattern HEADINGS_PATTERN = Pattern.compile("h([1-6])+");

		/** Constructor. */
		public ModifyingHeadingsFilter() { }

		/**
		 * {@inheritDoc}
		 */
		public boolean accept(final T... trees) {
			if (trees.length == 1) {
				final T tree = trees[0];
				final Matcher matcher = HEADINGS_PATTERN.matcher(tree.getContent().getName());

				if (matcher.matches()) {
					tree.addMetaContent(TAG, matcher.group(1));
					return true;
				}
			}
			return false;
		}

		/**
		 * {@inheritDoc}
		 */
		public boolean acceptWithCallback(final FilterCallback<Object> callback, final T... trees)
		{
			return accept(trees);
		}
	}

	/**
	 * Filters elements with contents containing a word and a number. Tags trees that pass with
	 * {@link TEXT_TAG} = e.g. "chapter", {@link #NUMBER_TAG} = e.g. 3.
	 * @author Reuben Firmin
	 * XXX reimpl w/ callback
	 */
	private static class ChapterContentsFilter<T extends Tree<Element, String, String>> implements Filter<T, Object> {

		public static final String TEXT_TAG = "textTag";
		public static final String NUMBER_TAG = "numberTag";

		private static final Pattern BEFORE_PATTERN = Pattern.compile("([a-zA-Z\\.\\,-]+)[ ]*([0-9\\.]+)");
		private static final Pattern AFTER_PATTERN = Pattern.compile("([0-9\\.]+)[ ]*([a-zA-Z\\.\\,-]+)");

		/** Constructor. */
		public ChapterContentsFilter() { }

		/**
		 * {@inheritDoc}
		 */
		public boolean accept(final T... trees) {
			if (trees.length == 1) {
				final T tree = trees[0];
				final Element element = tree.getContent();
				final String content = element.getText();
				final Matcher before = BEFORE_PATTERN.matcher(content);
				if (before.matches()) {
					tree.addMetaContent(TEXT_TAG, before.group(1));
					tree.addMetaContent(NUMBER_TAG, before.group(2));
				} else {
					final Matcher after = AFTER_PATTERN.matcher(content);
					if (after.matches()) {
						tree.addMetaContent(NUMBER_TAG, after.group(1));
						tree.addMetaContent(TEXT_TAG, after.group(2));
					} else {
						return false;
					}
				}
				return true;
			}
			return false;
		}

		/**
		 * {@inheritDoc}
		 */
		public boolean acceptWithCallback(final FilterCallback<Object> callback, final T... trees) {
			return accept(trees);
		}
	}

	private static class CommentsIdentFilter<T extends Tree<Element, String, String>> implements Filter<T, Object> {

		/** Constructor. */
	public CommentsIdentFilter() { }

	/**
	 * {@inheritDoc}
	 */
	public boolean accept(final T... trees) {
		if (trees.length == 1) {
			final T tree = trees[0];
			final Content con = tree.getContent();
			if (con instanceof Comment) {
				return Pattern.matches("BookshareChapterIdent", ((Comment) con).getText());
			}
		}
		return false;
	}

	/**
	 * {@inheritDoc}
	 */
	public boolean acceptWithCallback(final FilterCallback<Object> callback, final T... trees) {
		return accept(trees);
	}
}


	/** Maximum number of chapter divisions. */
	private int maxChapters;

	/**
	 * Default constructor.
	 */
	public ChapterFinder() {
	}

	/**
	 * Attempt to determine the chapter divisions within this XML book. Uses a series of rules to interpret the XML
	 * structure.
	 * @param frontMatter The start of the document. May not be null
	 * @param bodyMatter The main part of the document. May not be null
	 * @param listener The event listener
	 * @return Null if no conclusions can be accurately drawn. Tree with body matter as root, chapters as children.
	 */
	public Chapters getChapters(final IdTree<Element, String, String> frontMatter,
			final IdTree<Element, String, String> bodyMatter, int totalPages, final EventListener listener)
	{
		listener.message("Document contains : " + bodyMatter.getSize() + " nodes");
		if (totalPages / MIN_PAGES_PER_CHAPTER > bodyMatter.getSize() / MIN_ELEMENTS_PER_CHAPTER) {
			maxChapters =  totalPages / MIN_PAGES_PER_CHAPTER;
		} else {
			maxChapters = bodyMatter.getSize() / MIN_ELEMENTS_PER_CHAPTER;
		}
		listener.message("Maximum allowed chapters : " + maxChapters);

		// first try to detect the table of contents in the frontmatter or bodymatter. if that works, go with it;
		// otherwise, run several strategies, and let them duke it out between them to see which is best.
		Chapters toc = getChaptersByTOC(frontMatter, bodyMatter, false, listener);
		if (toc == null || toc.size() < MIN_CHAPTERS) {
			listener.message("Looking in bodymatter for TOC");
			toc = getChaptersByTOC(bodyMatter, bodyMatter, true, listener);
		}
		Chapters bestChapterList;
		if (toc != null && toc.size() >= MIN_CHAPTERS) {
			bestChapterList = toc;
			listener.message("Found valid table of contents");

		} else {
			final Chapters chptsByComments = getChaptersByBookshareComments(bodyMatter, listener);
			if (chptsByComments != null && chptsByComments.size() >= MIN_CHAPTERS) {
				bestChapterList = chptsByComments;
				listener.message("Found Bookshare Inserted Comments");
			} else {
				final ChapterSets candidates = new ChapterSets(MIN_CHAPTERS, maxChapters, bodyMatter);
				candidates.addCandidate(getChaptersByDepthMagnitudes(bodyMatter, listener), listener);
				candidates.addCandidate(getChaptersByHeadings(bodyMatter, listener), listener);
				candidates.addCandidate(getChaptersByName(bodyMatter, listener), listener);
				// this strategy "cheats" and uses the average size so far to base its decision on
				candidates.addCandidate(getChaptersByLevel(bodyMatter, listener, candidates.avgSize()), listener);
				/* TODO add more strategies here */
				bestChapterList = candidates.findBest(listener);
			}
		}
		return bestChapterList;
	}

	/**
	 * Try to determine where the chapter breaks occur by looking at the size increases between depths in the tree.
	 * Relies on nesting of elements being consistent.
	 * @param docTree The tree to find the chapters in
	 * @param listener The event listener
	 * @return null if no good call can be made.
	 */
	private Chapters getChaptersByDepthMagnitudes(final IdTree<Element, String, String> docTree,
			final EventListener listener)
	{
		listener.message("Attempting to find chapters by depth magnitude changes");
		final int[] depthSizes = docTree.getDepthChart();
		int targetDepth = -1;
		for (int i = 0; i < depthSizes.length; i++) {
			listener.message(depthSizes[i] + " elements at depth " + i);
			// if there's less than maxChapters nodes at this depth, and the next depth has a significant
			// jump in magnitude
			if ((i + 1 < depthSizes.length)
					&& depthSizes[i + 1] < maxChapters
					&& depthSizes[i + 1] > MIN_CHAPTERS
					&& depthSizes[i] * SIGNIFICANCE_MAGNITUDE < depthSizes[i + 1]) {
				targetDepth = i + 1;
				break;
			}
		}

		if (targetDepth > 0) {
			listener.message("Found " + depthSizes[targetDepth] + " chapters at depth: " + targetDepth);
			return new Chapters(docTree,
					(List<IdTree<Element, String, String>>) docTree.getChildrenAtDepth(targetDepth), "depth analysis");
 		}
		return null;
	}

	/**
	 * Try to divide the tree based on chapter headings, by finding a reasonable number of heading elements.
	 * @param docTree the tree being parsed, i.e. the document
	 * @param listener user feedback
	 * @return Null if none can be reliably found
	 */
	private Chapters getChaptersByHeadings(final IdTree<Element, String, String> docTree, final EventListener listener)
	{
		listener.message("Attempting to find chapters by heading divisions");
		final List<IdTree<Element, String, String>> headings =
			(List<IdTree<Element, String, String>>) docTree.toList(MODIFYING_HEADINGS_FILTER);
		listener.message("Found : " + headings.size() + " headings in the document");
		// find the appropriate number of highest level headings
		int headingLevel = -1;
		final List<IdTree<Element, String, String>> chapterHeadings = new LinkedList<IdTree<Element, String, String>>();
		final List<IdTree<Element, String, String>> confHeadings = new LinkedList<IdTree<Element, String, String>>();
		for (int i = 1; i <= 6; i++) {
			for (int j = 0; j < headings.size(); j++) {
				final IdTree<Element, String, String> heading = headings.get(j);
				if (heading.getFirstMetaContent(MODIFYING_HEADINGS_FILTER.TAG).equals(String.valueOf(i))) {
					chapterHeadings.add(heading);
				}
			}
			if (chapterHeadings.size() > MIN_CHAPTERS && chapterHeadings.size() < maxChapters
					&& chapterHeadings.size() > confHeadings.size()) {
				headingLevel = i;
				confHeadings.clear();
				for (IdTree<Element, String, String> heading : chapterHeadings) {
					confHeadings.add(heading);
				}
				// continue so that we get the lowest heading level that works here
			}
			chapterHeadings.clear();
		}
		if (headingLevel > 0) {
			listener.message("Found: " + confHeadings.size() + " headings at level: " + headingLevel);
			return new Chapters(docTree, (List<IdTree<Element, String, String>>) confHeadings, "heading structure");
		}

		return null;
	}

	/**
	 * Tries to find chapter headings by looking for nodes whose contents look like "chapter 1". Copes with major
	 * western languages.
	 * @param docTree the tree being parsed, i.e. the document
	 * @param listener user feedback
	 * @return Null if chapters couldn't be located in this method
	 */
	private Chapters getChaptersByName(final IdTree<Element, String, String> docTree, final EventListener listener) {

		final List<IdTree<Element, String, String>> flattened =
			(List<IdTree<Element, String, String>>) docTree.toList(CHAPTER_NAME_FILTER);
		listener.message("Found: "  + flattened.size() + " possible chapter headings by word choice");
		final List<IdTree<Element, String, String>> realHeadings = new LinkedList<IdTree<Element, String, String>>();
		String chapterTitleInLang = null;
		for (IdTree<Element, String, String> possibleHeading : flattened) {
			final String titleName = (String) possibleHeading.getFirstMetaContent(CHAPTER_NAME_FILTER.TEXT_TAG);
			// if we haven't yet determined what language we're working in
			if (chapterTitleInLang == null) {
				for (String chapterTitle : chapterLanguages) {
					if (chapterTitle.equals(titleName)) {
						chapterTitleInLang = chapterTitle;
						break;
					}
				}
			}
			if (chapterTitleInLang != null && chapterTitleInLang.equals(titleName)) {
				realHeadings.add(possibleHeading);
			}
		}
		listener.message("Found: " + realHeadings.size() + " chapter headings");
		if (realHeadings.size() < MIN_CHAPTERS) {
			return null;
		}

		final Chapters ch = new Chapters(docTree, realHeadings, "word recognition");
		return ch;
	}

	/**
	 * Chapters by table of contents.
	 * Tries to find chapter headings by looking for nodes whose contents look like "chapter 1". Copes with major
	 * western languages.
	 * @param docTree the tree being parsed, i.e. the document
	 * @param listener user feedback
	 * @return Null if chapters couldn't be located in this method
	 */
	private Chapters getChaptersByBookshareComments(final IdTree<Element, String, String> docTree,
			final EventListener listener)
	{

		final List<IdTree<Element, String, String>> flattened =
			(List<IdTree<Element, String, String>>) docTree.toList(ListDirection.DEPTH);

		final List<IdTree<Element, String, String>> realHeadings = new LinkedList<IdTree<Element, String, String>>();
		for (IdTree<Element, String, String> possibleHeading : flattened) {
			final Content[] content = (Content[]) possibleHeading.getContent().getContent().toArray(new Content[0]);
			for (Content con : content){
				if (con instanceof Comment && ((Comment) con).getText().equals("BookshareChapterIdent")) {
					realHeadings.add(new IdTree(possibleHeading));
				}
			}
		}
		listener.message("Found: "  + realHeadings.size() + " Chapter headings by Inserted Bookshare Comments");
		if (realHeadings.size() < MIN_CHAPTERS) {
			return null;
		}

		final Chapters ch = new Chapters(docTree, realHeadings, "Bookshare Comments");
		return ch;
	}




	/**
	 * Chapters by table of contents
	 * @param treeWithLinks The part of the book with the links; either the bodymatter or frontmatter
	 * @param treeWithBreaks The part of the book with the breaks
	 * @param examineFirstNPercentOnly Whether to look at the first ten percent of the treewithlinks
	 * @param listener User feedback
	 * @return Null if chapters couldn't be located in this method
	 */
	private Chapters getChaptersByTOC(final IdTree<Element, String, String> treeWithLinks,
			final IdTree<Element, String, String> treeWithBreaks, boolean examineFirstNPercentOnly,
			final EventListener listener)
	{
		final Map<String, Element> tocHrefMap = new LinkedHashMap<String, Element>();
		final TreeFilter linkFilter = new TreeFilter(XML.Find.LINK_FILTER);

		if (examineFirstNPercentOnly) {
			final List<IdTree<Element, String, String>> flattened =
				(List<IdTree<Element, String, String>>) treeWithLinks.toList(ListDirection.DEPTH);

			final Map<Integer, IdTree<Element, String, String>> toc =
				new HashMap<Integer, IdTree<Element, String, String>>();
			final int nPercent = (treeWithLinks.getSize() / 100) * TOC_PERCENTAGE;

			// get all the links in the first N percent of the book
			final List<Integer> indices = new LinkedList<Integer>();
			for (int i = 0; i < nPercent && i < flattened.size(); i++) {
				if (linkFilter.accept(flattened.get(i))) {
					toc.put(i, flattened.get(i));
					indices.add(i);
				}
			}
			if (indices.size() == 0) {
				listener.message("No links in the first " + TOC_PERCENTAGE + "% of the book; couldn't find TOC");
				return null;
			}

			// now find the first big chunk of links; this will trim off other links in the first 10% of the book that
			// aren't part of the table of contents
			final int[] indicesA = new int[indices.size()];
			for (int i = 0; i < indices.size(); i++) {
				indicesA[i] = indices.get(i);
			}
			final int[] tocIndices = NumberUtils.getFirstGroup(indicesA);
			listener.message(indicesA.length + " links in first " + TOC_PERCENTAGE + "%; "
					+ tocIndices.length + " toc links");
			for (int tocIndex : tocIndices) {
				final String href = toc.get(tocIndex).getContent().getAttributeValue("href");
				if (href != null) {
					tocHrefMap.put(href.replace("#", ""), toc.get(tocIndex).getContent());
				}
			}
		} else {
			final List<IdTree<Element, String, String>> tocLinks =
				(List<IdTree<Element, String, String>>) treeWithLinks.toList(linkFilter);
			for (IdTree<Element, String, String> tocLink : tocLinks) {
				final String href = tocLink.getContent().getAttributeValue("href");
				if (tocLink.getContent().getAttribute("href") != null) {
					tocHrefMap.put(href.replace("#", ""), tocLink.getContent());
				}
			}
		}

		listener.message("Found " + tocHrefMap.size() + " TOC links");
		if (tocHrefMap.size() < MIN_CHAPTERS) {
			return null;
		}

		// now resolve the links to find the chapter breaks
		final List<IdTree<Element, String, String>> namedAnchors =
			(List<IdTree<Element, String, String>>) treeWithBreaks.toList(new TreeFilter(XML.Find.ANCHOR_NAMED_FILTER));
		final Map<String, IdTree<Element, String, String>> nameToAnchorMap =
			new HashMap<String, IdTree<Element, String, String>>();
		for (IdTree<Element, String, String> anchor : namedAnchors) {
			nameToAnchorMap.put(anchor.getContent().getAttributeValue("name"), anchor);
		}
		final List<Chapter> tocChapterBreaks = new LinkedList<Chapter>();
		for (Map.Entry<String, Element> hrefElement : tocHrefMap.entrySet()) {
			final IdTree<Element, String, String> a = nameToAnchorMap.get(hrefElement.getKey());
			if (a != null) {
				final Chapter chapter = new Chapter(a);
				// get the text of the link, not the point of navigation
				RelativeText text = XMLParseUtils.getNearestText(hrefElement.getValue(), treeWithBreaks, true);
				if (text == null) {
					text = XMLParseUtils.getNearestText(a.getContent(), treeWithBreaks, true);
				}
				if (text != null) {
					chapter.setText(text.getText());
				}
				tocChapterBreaks.add(chapter);
			} else {
				listener.message("No matching element with name or id: " + hrefElement.getKey());
			}
		}

		final Chapters ch = new Chapters(tocChapterBreaks, "table of contents");
		return ch;
 	}

	/**
	 * Get chapters by level #.
	 * @param docTree The document tree
	 * @param listener Event listener
	 * @param avgSizeOfOthers Average size of other candidates
	 * @return Null if none are applicable
	 */
	private Chapters getChaptersByLevel(final IdTree<Element, String, String> docTree, final EventListener listener,
			final double avgSizeOfOthers)
	{
		final List<IdTree<Element, String, String>> levels =
			(List<IdTree<Element, String, String>>) docTree.toList(new TreeFilter(XML.Find.LEVEL_FILTER));
		final ListMap<Integer, IdTree<Element, String, String>> levelMap =
			new ListMap<Integer, IdTree<Element, String, String>>();
		for (IdTree<Element, String, String> level : levels) {
			final String levelName = level.getContent().getName();
			levelMap.put(Integer.parseInt(levelName.charAt(levelName.length() - 1) + ""), level);
		}

		int bestLevel = 0;
		double bestDistanceFromAvg = 0;
		// only scan the first three levels. it's really unlikely that chapters would be below level3.
		for (int i = 1; i < 3; i++) {
			if (levelMap.get(i) != null) {
				listener.message(levelMap.get(i).size() + " levels at " + i);
				if (levelMap.get(i).size() >= MIN_CHAPTERS && levelMap.get(i).size() < maxChapters) {
					final double levelDistance = Math.abs((double) levelMap.get(i).size() - avgSizeOfOthers);
					if (bestLevel < 1 || levelDistance < bestDistanceFromAvg) {
						bestLevel = i;
						bestDistanceFromAvg = levelDistance;
					}
				}
			}
		}
		if (bestLevel > 0) {
			listener.message("Selecting level depth " + bestLevel + ", with " + levelMap.get(bestLevel).size()
					+ " elements, which is " + bestDistanceFromAvg + " away from the average candidate size so far");
			return new Chapters(docTree, levelMap.get(bestLevel), "level structure");
		} else {
			return null;
		}
	}

}
