package com.fitc.tokeniser;

import java.io.BufferedWriter;
import java.io.File;
import java.io.FileNotFoundException;

import java.io.IOException;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;

import java.util.Arrays;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.ListIterator;
import java.util.Map;

import java.util.Scanner;

import java.util.Stack;

import org.ardverk.collection.PatriciaTrie;
import org.ardverk.collection.StringKeyAnalyzer;

import com.fitc.treestructure.Node;
import com.fitc.treestructure.StringTree;


public class Tokeniser {
	static PatriciaTrie<String, String> wordTrie;
	static HashSet<String> stopWords;
	
	static final int MINIMUM_WORD_LENGTH = 2;
	static final int MAXIMUM_WORD_LENGTH = 20;
	
	private String _sequenceToTokenise;
	StringTree tree;
 
	public Tokeniser(String sequence) {
		_sequenceToTokenise = sequence;
	}
	
	public String[] process(){
		HashSet<String> results = new HashSet<String>();
		
		// If user has not yet created dictionary for class, do it
	
			try {
				if (wordTrie == null) Tokeniser.setDictionaries();
			} catch (FileNotFoundException e) {
				// TODO Auto-generated catch block
				e.printStackTrace();
			}

		// split sequence by spaces
		List<String> tokens = splitBySpaces(_sequenceToTokenise);

		// Remove non alphanumeric and blanks
		tokens = scrubOutNonLetters(tokens);

		
		//Run word recognition
		for (String token : tokens) {
			tree = new StringTree();
			Node root = tree.createNode(Node.ROOT);
			tree.setLetterSequence(token);
			if (token.length()<=40){ //only process if length limited. Otherwise gets stuck on really long ones
				extract(token, root);
				String[] s = tree.getBestGuess().toStringArray();
				int i=0;
				for (String str : s) {
					if (!results.contains(str)){ //avoid repeats
						if (!stopWords.contains(str)){ //remove any stop words - 'and' 'that' etc...
							results.add(str);
						}
					}
				}
			}
		}
		
		
		
		
		String[] a = results.toArray(new String[0]);
		Arrays.sort(a);
		return a;
	}

	private List<String> scrubOutNonLetters(List<String> tokens) {
		// Remove non alphanumeric and blanks
		ListIterator<String> iterator = tokens.listIterator();
		while (iterator.hasNext()) {
			String str = iterator.next().replaceAll("[^A-Za-z]", "");

			if (str.length() < MINIMUM_WORD_LENGTH || str.trim().equals("")) {
				iterator.remove();
			} else {
				iterator.set(str);
			}
		}
		return tokens;
	}

	public static void setDictionaries() throws FileNotFoundException {
		wordTrie = new PatriciaTrie<String, String>(
					new StringKeyAnalyzer());
		List<File> fileList = getDictionaries();

		for (File file : fileList) {
			Scanner input = new Scanner(file);
			while (input.hasNext()) {
				String word = input.nextLine().trim().toLowerCase();

				if (!wordTrie.containsKey(word) && word.length() >= MINIMUM_WORD_LENGTH && word.length()<=MAXIMUM_WORD_LENGTH) {
					wordTrie.put(word, word);
				}

			}
			input.close();
		}
		
		setStopWords();
	}
	
	private static void setStopWords() throws FileNotFoundException {
	stopWords = new HashSet<String>();	
	List<File> fileList = getStopWords();

	for (File file : fileList) {
		Scanner input = new Scanner(file);
		while (input.hasNext()) {
			String word = input.nextLine().trim().toLowerCase();

			if (!stopWords.contains(word)) {
				stopWords.add(word);
			}

		}
		input.close();
	}
}
	
	public boolean isStopWord(String s){
		if (stopWords.contains(s)) return true;
		return false;
	}

	private static List<String> splitBySpaces(String s) {
		List<String> tokens = new ArrayList<String>();
		// Split by spaces
		for (String str : s.split("\\s")) {
			tokens.add(str.trim());
		}
		return tokens;
	}

	private void extract(String word, Node parent) {
		Iterator<String> iterator;
		String currentWord;
		if (word.length() - parent.getLetterDepth() >= MINIMUM_WORD_LENGTH) {
			Map<String, String> map = wordTrie.getPrefixedBy(word,
					parent.getLetterDepth(), MINIMUM_WORD_LENGTH);

			iterator = map.keySet().iterator();
			while (iterator.hasNext()) {
				currentWord = iterator.next();
				if (word.contains(currentWord)) {
					// System.out.println(currentWord + " parent: "
					// +parent.getIdentifier() + " id: "
					// +parent.getParentPointer());
					Node n = tree.createNode(currentWord, parent);
					extract(word, n);
				}
			}
		}

	}

	private static void outputTofile(ArrayList<String> wordList)
			throws IOException {
		final Charset ENCODING = StandardCharsets.UTF_8;

		Path path = Paths.get("words.txt");
		try (BufferedWriter writer = Files.newBufferedWriter(path, ENCODING)) {
			for (String line : wordList) {
				writer.write(line);
				writer.newLine();
			}
		}
	}

	public static List<File> getDictionaries() {
		// Directory path here
		String path = "dictionaries";

		ArrayList<File> fileList = new ArrayList<File>();

		String files;
		File folder = new File(path);
		File[] listOfFiles = folder.listFiles();

		for (int i = 0; i < listOfFiles.length; i++) {

			if (listOfFiles[i].isFile()) {
				files = listOfFiles[i].getName();
				if (files.endsWith(".txt") || files.endsWith(".TXT")) {
					fileList.add(listOfFiles[i]);
				}
			}

		}
		return fileList;
	}
	
	public static List<File> getStopWords() {
		// Directory path here
		String path = "dictionaries/stop";

		ArrayList<File> fileList = new ArrayList<File>();

		String files;
		File folder = new File(path);
		File[] listOfFiles = folder.listFiles();

		for (int i = 0; i < listOfFiles.length; i++) {

			if (listOfFiles[i].isFile()) {
				files = listOfFiles[i].getName();
				if (files.endsWith(".txt") || files.endsWith(".TXT")) {
					fileList.add(listOfFiles[i]);
				}
			}

		}
		return fileList;
	}

}
