package moteur;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Set;
import java.util.Stack;

import utils.Index;
import utils.DirParser;
import utils.PosInfo;

import tools.FrenchStemmer;
import tools.FrenchTokenizer;
import tools.Normalizer;

/**
 * Main class for the search engine.
 * @author haonan
 *
 */

public class Moteur 
{
	private Set<String>	allFiles;
	
	private boolean boolSearch = false;
	
	private Normalizer stemmer,tokenizer;		//Two normalizers
	private Index tokenizer_index;				//Two indices
	private Index stemmer_index;
	
	private ArrayList<String> nonNormalizationWords = new ArrayList<String>();
	private   String FILE_ID_MAP="file_id_map.txt";
	
	public Moteur()
	{
		stemmer_index = new Index();
		tokenizer_index = new Index();
		allFiles = new HashSet<String>();
		try {
				stemmer = new FrenchStemmer(this.getClass().getResource("/frenchST").getPath().toString());
				tokenizer = new FrenchTokenizer(this.getClass().getResource("/frenchST").getPath().toString());
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
	}
	
	//Construct the index for resource files using their directory.
	public void constructIndex(String srcDir) throws IOException
	{
		/**
		 *  construct index from the corpus
		 */
		ArrayList<File> files = DirParser.parseDir(srcDir);			//Get all the files
		//System.out.println(files.size());
		this.stemmer_index.setNbDoc(files.size());
		this.tokenizer_index.setNbDoc(files.size());
		
		for(File f: files)
		{
			ArrayList<String> stemmer_words = stemmer.normalize(f);			//Get all the words, including stop words.
			ArrayList<String> tokenizer_words = tokenizer.normalize(f);
			String filename = f.getName();									//current file name
			
			allFiles.add(filename);
			
			for(int i=0; i<stemmer_words.size(); ++i)
			{
				String w = stemmer_words.get(i);
				stemmer_index.insert(w, filename, i+1);						//The position of words start at 1.
			}
			for(int i=0; i<tokenizer_words.size(); ++i)
			{
				String w = tokenizer_words.get(i);
				tokenizer_index.insert(w, filename, i+1);
			}
		}
	}
	
	private HashMap<String, String> readFileIdMap(String filename) throws IOException
	{
		/**
		 *  Read File-Id Map
		 *  in the File-Id Map, each File corresponds to a Id unique
		 */
		HashMap<String, String> idFileMap=new HashMap<String, String>();
		InputStream ips=new FileInputStream(filename); 
		InputStreamReader ipsr=new InputStreamReader(ips,"UTF-8");
		BufferedReader br=new BufferedReader(ipsr);
		String line=null;
		while((line=br.readLine()) !=null)
		{
			String[] kv= line.split(":");
			idFileMap.put(kv[1], kv[0]);
			this.allFiles.add(kv[0]);// add filename to all Files
		}
		br.close();
		return idFileMap;
	}
	
	public void constructIndexFromFile(String indexfile) throws IOException
	{
		/**
		 *  construct index from the index file in disk
		 */
		HashMap<String, String> idFileMap=this.readFileIdMap(this.getFILE_ID_MAP());
		String[] indexFiles={"stemmer_"+indexfile,  "tokenizer_"+indexfile}; 
		for(String indexFile : indexFiles)
		{
			InputStream ips=new FileInputStream(indexFile); 
			InputStreamReader ipsr=new InputStreamReader(ips,"UTF-8");
			BufferedReader br=new BufferedReader(ipsr);
			String word=null;
			while((word=br.readLine()) !=null)
			{
				if(word.trim().length()==0)
						continue;
				else
				{
						String map= br.readLine();
						//kvs : an Array of Key:Value
						String[] kvs=map.split("\\t");
						for(String kv : kvs)
						{
							if(kv.length()==0)
									continue;
							//key:value
							String[] idFreq=kv.split(":");
							//key is the file id
							String filename =idFileMap.get(idFreq[0]);
							int freq=Integer.parseInt(idFreq[1]);
							PosInfo pos=new PosInfo(freq);
							if(indexFile.startsWith("stemmer"))
							{
								this.stemmer_index.insert(word, filename, pos);
								this.stemmer_index.setNbDoc(idFileMap.keySet().size());
							}
							else
							{
								this.tokenizer_index.insert(word, filename, pos);
								this.tokenizer_index.setNbDoc(idFileMap.keySet().size());
							}
						}//endfor
				}//endelse
			}//endwhile
			br.close();
		}//endfor
	}
	
	
	private void printFileIdMap(String filename, ArrayList<String> files ) throws IOException
	{
		/**
		 * In order to decrease the size of index on disk, we replace filename with only one id
		 *  Print the FileName -- ID Map 
		 *  filename:id
		 */
		FileOutputStream fos = new FileOutputStream(filename);  
		Writer os = new OutputStreamWriter(fos, "UTF-8");  
		int i=0;
		for(String file : files)
		{
			os.write(file+":"+i+"\n");
			i++;
		}
		os.close();
	}
	//Print the index into a file
	public void printIndex(String filename)
	{
		/**
		 * Print Index 
		 */
		ArrayList<String> files=new ArrayList<String>();
		files.addAll(this.allFiles);
		try{
			//print file map id
			this.printFileIdMap(this.getFILE_ID_MAP(), files);
			String indexFileStemm = "stemmer_"+filename;
			stemmer_index.printToFile(indexFileStemm, files);
			String indexFileToken= "tokenizer_"+filename;
			tokenizer_index.printToFile(indexFileToken , files);
		}catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
	}
	
	//Get the 'Search' according to the user input(key words)
	public Search setupSearch(ArrayList<String> inputs)
	{
		Search result = null;
		//Boolean search model
		if(boolSearch)
		{
			if(!InputErrorChecker.checkBoolExp(inputs))
			{
				System.out.println("Ilegal boolean expression!");
				return null;
			}
			//Parse the input to postfix form
			// 	A & B  =>  A B &
			//	( A | B ) & ( C | D )  =>  A, B, |, C, D, |, &
			ArrayList<String> postfix = new ArrayList<String>();
			Stack<String> stack = new Stack<String>();
			
			for(String str: inputs)
			{
				if(str.equals("(") || str.equals("|"))
				{
					stack.push(str);
				}
				else if(str.equals("&"))
				{
					while(!stack.isEmpty() && (stack.peek().equals("|") && stack.peek().equals("&")))
					{
						postfix.add(stack.pop());
					}
					stack.push(str);
				}
				else if(str.equals(")"))
				{
					while(!stack.isEmpty() && !stack.peek().equals("("))
					{
						postfix.add(stack.pop());
					}
					stack.pop();
				}
				else
				{
					postfix.add(str);
				}
			}
			
			while(!stack.isEmpty())
			{
				postfix.add(stack.pop());
			}
			//end
			
			//Input already in postfix form, now construct the 'Search'
			Stack<Search> searchStack = new Stack<Search>();
			for(int i=0; i<postfix.size(); ++i)
			{
				String str = postfix.get(i);
				if(str.equals("&"))
				{
					Search s = new AndSearch(searchStack.pop(),searchStack.pop());
					searchStack.push(s);
				}
				else if(str.equals("|"))
				{
					Search s = new OrSearch(searchStack.pop(),searchStack.pop());
					searchStack.push(s);
				}
				else if(str.equals("~"))
				{
					String word = postfix.get(i);
					Index index = null;
					if(nonNormalizationWords.contains(word))
					{
						index = tokenizer_index;
					}
					else
					{
						index = stemmer_index;
					}
					Search s = new NotSearch(new BasicSearch(postfix.get(++i),index),allFiles);
					searchStack.push(s);
				}
				else
				{
					String word = postfix.get(i);
					Index index = null;
					if(nonNormalizationWords.contains(word))
					{
						index = tokenizer_index;
					}
					else
					{
						index = stemmer_index;
					}
					Search s = new BasicSearch(postfix.get(i), index);
					searchStack.push(s);
				}
			}
			
			result = searchStack.pop();
			
		}
		//Normal search model
		else
		{
			if(this.nonNormalizationWords.size()==0)
			{
				return new NearSearch(inputs,stemmer_index);
			}
			else
			{
				return new NearSearchDoubleIndex(this.tokenizer_index, this.stemmer_index, inputs, this.nonNormalizationWords);
			}
		}
		
		return result;
	}
	
	//Parse the user input
	public boolean parseInput(String input,ArrayList<String> out) throws IOException
	{
		boolean stat = true;
		nonNormalizationWords.clear();
		input=input.trim();
		String[] tokens = input.split(" ");
		
		for(int i=0; i<tokens.length; ++i)
		{
			if(tokens[i].equals("\t") || tokens[i].equals(""))
				continue;
			
			if(tokens[i].charAt(0) == '+')
			{
				tokens[i] = tokens[i].substring(1);	
				if(tokens[i].contains("+") || tokens[i].length() == 0)
				{
					System.out.println("Input ilegal!['+']");
					return false;
				}
				out.add(tokens[i]);
				nonNormalizationWords.add((tokens[i]));
			}
			else
			{
				ArrayList<String> newWord = stemmer.normalize(tokens[i]);
				out.add(newWord.get(0));
			}
		}
		return stat;
	}
	
	//Configure the search model to boolearn search or normal search
	public void setBoolSearch(boolean bool)
	{
		boolSearch = bool;
	}
	
	public boolean isBoolSearch()
	{
		return boolSearch;
	}

	public String getFILE_ID_MAP() {
		return FILE_ID_MAP;
	}

	public void setFILE_ID_MAP(String fILE_ID_MAP) {
		FILE_ID_MAP = fILE_ID_MAP;
	}
	
}
