package org.essilab.analyzer.util;

import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URI;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.FileChannel.MapMode;
import java.util.*;
import java.util.Map.Entry;
import org.essilab.analyzer.Configuration.Config;
import org.essilab.analyzer.analyzer.AnalyzedTerm;
import org.essilab.analyzer.analyzer.Files;
import org.essilab.analyzer.analyzer.Term;
import org.essilab.analyzer.analyzer.db.Parser;

/**
 * This class is used to analyse a text and extract the main keywords.
 * 
 * @author Mohamed-Amine Chouchene, Ahcène Idinarere, Vincent Nock, Alain
 *         Janin-Manificat
 */
public class Analyzer {

	public ArrayList<AnalyzedTerm> linkedAnalyzedTerms;
	int depth;
	HashMap<String, Integer> wordsFrequency;

	/**
	 * Constructor, init the research depth from the property "depth" in
	 * config.ini.
	 * 
	 * @see Config#setFile(java.lang.String)
	 */
	public Analyzer() {

		Config.setFile("config.ini");

		linkedAnalyzedTerms = new ArrayList<>();
		depth = Integer.valueOf(Config.get("depth"));

		try {
			loadWordsFrequency();
		} catch (IOException e) {
			e.printStackTrace();
		}
	}

	/**
	 * TODO : get the lexical field
	 * 
	 * @param text
	 * @return
	 */
	public String getLexicalFields(String text) {

		return null;
	}

	/**
	 * Parse a text in order to delete the words considered as "useless".
	 * 
	 * @param sampleText
	 *            String - the text to analyse.
	 * @return String - the given text without the useless tokens.
	 * @throws IOException
	 * @see Analyzer#getUselessTokens()
	 */
	public String cleanUselessTokens(String sampleText) throws IOException {

		// Clean punctuation
		sampleText = sampleText.replaceAll("[\\p{Punct}&&[^-]]+", " ").replaceAll(
				"\\s+", " ");

		ArrayList<String> stopWords = getUselessTokens();

		StringBuilder clean = new StringBuilder();
		int index = 0;

		while (index < sampleText.length()) {

			int nextIndex = sampleText.indexOf(" ", index);
			int nextIndex2 = sampleText.indexOf("’", index);

			if (nextIndex2 < nextIndex && nextIndex2 > 0) {
				nextIndex = nextIndex2;
			} else if (nextIndex == -1) {
				nextIndex = sampleText.length() - 1;
			}

			String word = sampleText.substring(index, nextIndex);
			if (!stopWords.contains(word.toLowerCase())) {
				clean.append(word);
				if (nextIndex < sampleText.length()) {
					// this adds the word delimiter, e.g. the following space
					clean.append(sampleText.substring(nextIndex, nextIndex + 1));
				}
			}
			index = nextIndex + 1;
		}

		System.out.println("clean text : " + clean.toString());

		return clean.toString();
	}

	/**
	 * Extract the list of useless words from config file
	 * 
	 * @return the list of useless words
	 * @throws IOException
	 */
	private ArrayList<String> getUselessTokens() throws IOException {

		ArrayList<String> uselessTokens = new ArrayList<>();

		BufferedReader in = new BufferedReader(new FileReader(
				"useless_words.ini"));
		String s = null;
		while ((s = in.readLine()) != null) {
			uselessTokens.add(s.toLowerCase());
		}

		return uselessTokens;
	}

	public void loadWordsFrequency() throws IOException {

		wordsFrequency = new HashMap<>();

		File f = new File("words_frequency_FR.ini");
		FileInputStream fis = new FileInputStream(f);

		FileChannel fc = fis.getChannel();
		MappedByteBuffer mbb = fc.map(MapMode.READ_ONLY, 0, fc.size());
		byte[] buffer = new byte[(int) fc.size()];
		mbb.get(buffer);
		fis.close();

		BufferedReader in = new BufferedReader(new InputStreamReader(
				new ByteArrayInputStream(buffer)));

		int iBeginNumber = 2;
		int iEndNumber;
		int iBeginWord;
		int iEndWord;

		String word;
		Integer number;

		for (String line = in.readLine(); line != null; line = in.readLine()) {

//			iEndNumber = line.indexOf(".");
//			iBeginWord = line.indexOf("[") + 2;
//			iEndWord = line.indexOf("]");
			
			iEndNumber = line.indexOf("\t");
			iBeginWord = iEndNumber + 1;
			iEndWord = line.indexOf("\t", iBeginWord);
			
			word = line.substring(iBeginWord, iEndWord);
			number = Integer.parseInt(line.substring(0, iEndNumber));
			
			wordsFrequency.put(word, number);
		}
	}

	/**
	 * Create a list of AnalyzedTerm from the given map of words (key : word
	 * label -> value : occurrence number).
	 * 
	 * @param wordsList
	 *            Map<String, Integer> - the list of words ordered by decreasing
	 *            occurrence number
	 * @return ArrayList<AnalyzedTerm> - the list of AnalyzedTerm create from
	 *         given Map.
	 */
	public ArrayList<AnalyzedTerm> createTermsList(
			Map<String, Integer> wordsList) {

		ArrayList<AnalyzedTerm> termsList = new ArrayList<>();

		Files files = new Files();
		String fileName = "";
		Term t = null;
		Parser p = new Parser(false);

		Iterator<Entry<String, Integer>> it = wordsList.entrySet().iterator();
		while (it.hasNext()) {
			Map.Entry<String, Integer> pairs = it.next();
			fileName = files.getFilePathFromTitle(pairs.getKey());
			t = p.parser(new File(fileName));
			if (t != null && t.getTitle() != null) { 
				AnalyzedTerm at = new AnalyzedTerm(t);
				at.setWeight(1);
				at.setOccurences(pairs.getValue());
				// at.setRootTerm(at); // Root term is its own root
				at.getAncestors().put(at, (double) 1);
				termsList.add(at);
			}
			it.remove(); // avoids a ConcurrentModificationException
		}

		return termsList;
	}

	/**
	 * Read the content of a file.
	 * 
	 * @param strFile
	 *            String - path to the file to read.
	 * @return String - the content of the file.
	 */
	public String readFromFile(String strFile) {
		File file = new File(strFile);
		URI uri = file.toURI();
		byte[] bytes = null;
		try {
			bytes = java.nio.file.Files.readAllBytes(java.nio.file.Paths
					.get(uri));
		} catch (IOException e) {
			e.printStackTrace();
			return "ERROR loading file " + strFile;
		}

		return new String(bytes);
	}

	/**
	 * Load the weight associated to each kind of relation.
	 * 
	 * @param analysedTerms
	 *            ArrayList<AnalyzedTerm> -
	 */
	public void loadLinks(ArrayList<AnalyzedTerm> analysedTerms) {

		Config.setFile("weight.conf");

		linkedAnalyzedTerms.clear();

		for (AnalyzedTerm analyzedTerm : analysedTerms) {
			linkedAnalyzedTerms.add(analyzedTerm.clone());
                    if(analyzedTerm.getTerm().getTitle().equals("film")){
                        int ff=0;
                    }
			loadLinks(analyzedTerm, analyzedTerm, 1);
		}
	}

	/**
	 * Search for an already processed AnalyzedTerm. If a AnalyzedTerm is found,
	 * apply weights on root Terms If not, create one with the word passed in
	 * parameter
	 * 
	 * @param root
	 *            - The root Term of the processed Term
	 * @param predecessor
	 *            - The Term from the one this word derives
	 * @param title
	 *            - The word to process
	 * @param weight
	 *            - The weight of the word to process
	 * @return
	 */
	private AnalyzedTerm processAnalyzedTerm(AnalyzedTerm root,
			AnalyzedTerm predecessor, String title, double weight) {

		AnalyzedTerm analyzedTerm = null;

		for (AnalyzedTerm at : linkedAnalyzedTerms) {
			if (at.getTerm().getTitle() != null
					&& at.getTerm().getTitle().toLowerCase()
							.equals(title.toLowerCase())) {
				analyzedTerm = at;
				break;
			}
		}

		if (analyzedTerm != null) {

			if (!analyzedTerm.getAncestors().containsKey(root)) {
				for (Entry<AnalyzedTerm, Double> e : analyzedTerm.getAncestors()
						.entrySet()) {
					if (!root.equals(e.getKey())) {
						double weightToAdd = weight * e.getValue();

						root.addWeight(weightToAdd);
						e.getKey().addWeight(weightToAdd);
					}
				}
				analyzedTerm.getAncestors().put(root, weight);
			} else {
				double currentWeight = analyzedTerm.getAncestors().get(root);
				if (currentWeight < weight) {
					analyzedTerm.getAncestors().put(root, weight);
				}
			}

		} else {
			Files files = new Files();
			Parser p = new Parser(false);
			Term term = p.parser(new File(files.getFilePathFromTitle(title)));
			if (term.getTitle() != null) {
				analyzedTerm = new AnalyzedTerm(term);
				analyzedTerm.setWeight(predecessor.getWeight() * weight);
				analyzedTerm.getAncestors().put(root, weight);
				linkedAnalyzedTerms.add(analyzedTerm);
			}
		}

		if (analyzedTerm != null) {
			analyzedTerm.setWeight(weight);
		}

		return analyzedTerm;
	}

	/**
	 * Load recursively all relations from a given Term
	 * 
	 * @param root
	 *            - The root Term of the analyzed Term
	 * @param predecessor
	 *            - The Term from the one this Term derives
	 * @param level
	 *            - the current depth of the analyzed branch
	 */
	public void loadLinks(AnalyzedTerm root, AnalyzedTerm predecessor, int level) {

		if (level > depth) {
			return;
		}

		if (predecessor == null) {
			return;
		}

		for (String word : predecessor.getTerm().getSynonymous()) {
			double weight = predecessor.getWeight()
					* Double.valueOf(Config.get("syn"));
			loadLinks(root,
					processAnalyzedTerm(root, predecessor, word, weight),
					level + 1);
		}

		for (String word : predecessor.getTerm().getRelated()) {
			double weight = predecessor.getWeight()
					* Double.valueOf(Config.get("apr"));
			loadLinks(root,
					processAnalyzedTerm(root, predecessor, word, weight),
					level + 1);
		}

		// for (String word : predecessor.getTerm().getDerives()) {
		// double weight = predecessor.getWeight()
		// * Double.valueOf(Config.get("drv"));
		// // loadLinks(root,
		// // processAnalyzedTerm(root, predecessor, word, weight),
		// // level + 1);
		// processAnalyzedTerm(root, predecessor, word, weight);
		// }

		for (String word : predecessor.getTerm().getHyperonyms()) {
			double weight = predecessor.getWeight()
					* Double.valueOf(Config.get("hyper"));
			loadLinks(root,
					processAnalyzedTerm(root, predecessor, word, weight),
					level + 1);
		}

		for (String word : predecessor.getTerm().getHyponyms()) {
			double weight = predecessor.getWeight()
					* Double.valueOf(Config.get("hypo"));
			loadLinks(root,
					processAnalyzedTerm(root, predecessor, word, weight),
					level + 1);
		}

		for (String word : predecessor.getTerm().getVocabulary()) {
			double weight = predecessor.getWeight()
					* Double.valueOf(Config.get("voc"));
			loadLinks(root,
					processAnalyzedTerm(root, predecessor, word, weight),
					level + 1);
		}

		for (String word : predecessor.getTerm().getAntonym()) {
			double weight = predecessor.getWeight()
					* Double.valueOf(Config.get("ant"));
			loadLinks(root,
					processAnalyzedTerm(root, predecessor, word, weight),
					level + 1);
		}

		for (String word : predecessor.getTerm().getAbbreviations()) {
			double weight = predecessor.getWeight()
					* Double.valueOf(Config.get("abrév"));
			loadLinks(root,
					processAnalyzedTerm(root, predecessor, word, weight),
					level + 1);
		}

		for (String word : predecessor.getTerm().getMeronymes()) {
			double weight = predecessor.getWeight()
					* Double.valueOf(Config.get("méro"));
			loadLinks(root,
					processAnalyzedTerm(root, predecessor, word, weight),
					level + 1);
		}

		for (String word : predecessor.getTerm().getHolonyms()) {
			double weight = predecessor.getWeight()
					* Double.valueOf(Config.get("holo"));
			loadLinks(root,
					processAnalyzedTerm(root, predecessor, word, weight),
					level + 1);
		}

		for (String word : predecessor.getTerm().getVariantOrthoraphic()) {
			double weight = predecessor.getWeight()
					* Double.valueOf(Config.get("var-ortho"));
			loadLinks(root,
					processAnalyzedTerm(root, predecessor, word, weight),
					level + 1);
		}

		for (String word : predecessor.getTerm().getVariants()) {
			double weight = predecessor.getWeight()
					* Double.valueOf(Config.get("var"));
			loadLinks(root,
					processAnalyzedTerm(root, predecessor, word, weight),
					level + 1);
		}

		for (String word : predecessor.getTerm().getAlmostSynonymous()) {
			double weight = predecessor.getWeight()
					* Double.valueOf(Config.get("q-syn"));
			loadLinks(root,
					processAnalyzedTerm(root, predecessor, word, weight),
					level + 1);
		}

		for (String word : predecessor.getTerm().getTroponyms()) {
			double weight = predecessor.getWeight()
					* Double.valueOf(Config.get("tropo"));
			loadLinks(root,
					processAnalyzedTerm(root, predecessor, word, weight),
					level + 1);
		}

		for (String word : predecessor.getTerm().getTypographicalVariants()) {
			double weight = predecessor.getWeight()
					* Double.valueOf(Config.get("var-typo"));
			loadLinks(root,
					processAnalyzedTerm(root, predecessor, word, weight),
					level + 1);
		}

		for (String word : predecessor.getTerm().getError()) {
			double weight = predecessor.getWeight()
					* Double.valueOf(Config.get("erreur"));
			loadLinks(root,
					processAnalyzedTerm(root, predecessor, word, weight),
					level + 1);
		}

		for (String word : predecessor.getTerm().getDimintives()) {
			double weight = predecessor.getWeight()
					* Double.valueOf(Config.get("dimin"));
			loadLinks(root,
					processAnalyzedTerm(root, predecessor, word, weight),
					level + 1);
		}

		for (String word : predecessor.getTerm().getfAdj()) {
			loadLinks(
					root,
					processAnalyzedTerm(root, predecessor, word,
							predecessor.getWeight()), level);
		}

		for (String word : predecessor.getTerm().getfLAdv()) {
			loadLinks(
					root,
					processAnalyzedTerm(root, predecessor, word,
							predecessor.getWeight()), level);
		}

		for (String word : predecessor.getTerm().getfLNoun()) {
			loadLinks(
					root,
					processAnalyzedTerm(root, predecessor, word,
							predecessor.getWeight()), level);
		}

		for (String word : predecessor.getTerm().getfLVerb()) {
			loadLinks(
					root,
					processAnalyzedTerm(root, predecessor, word,
							predecessor.getWeight()), level);
		}

		for (String word : predecessor.getTerm().getfNoun()) {
			loadLinks(
					root,
					processAnalyzedTerm(root, predecessor, word,
							predecessor.getWeight()), level);
		}

		for (String word : predecessor.getTerm().getfVerb()) {
			loadLinks(
					root,
					processAnalyzedTerm(root, predecessor, word,
							predecessor.getWeight()), level);
		}
	}

	/**
	 * Sort the given list of rootTerms by their weight value.
	 * 
	 * @param rootTerms
	 *            ArrayList<AnalyzedTerm> - list of AnalyzedTerm
	 */
	public void sortTermsByWeight(ArrayList<AnalyzedTerm> rootTerms) {

		//
		Collections.sort(rootTerms, new Comparator<AnalyzedTerm>() {

			@Override
			public int compare(AnalyzedTerm o1, AnalyzedTerm o2) {
				Double f1 = Double.valueOf(o1.getWeight());
				Double f2 = Double.valueOf(o2.getWeight());
				return f2.compareTo(f1);
			}
		});

	}

	/**
	 * Multiplies the weight of AnalyzedTerms by a factor of their occurrences
	 * 
	 * @param rootTerms
	 *            ArrayList<AnalyzedTerm> - list of AnalyzedTerm
	 */
	public void applyOccurrences(ArrayList<AnalyzedTerm> rootTerms) {

		Config.setFile("config.ini");
		float occurrencesFactor = Float.parseFloat(Config
				.get("occurrences_factor"));

		for (AnalyzedTerm at : rootTerms) {
			at.setWeight(at.getWeight()
					* (at.getOccurences() * occurrencesFactor));
		}

	}

	public void applyWordsFrequency(ArrayList<AnalyzedTerm> rootTerms) {
		for (AnalyzedTerm at : rootTerms) {

			Integer frequency = wordsFrequency.get(at.getTerm().getTitle());
			if (frequency != null)
				at.setWeight(at.getWeight() * (frequency / 20000));
		}
	}
}
