package com.sjtu.vfact.textual.model;

import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;

import com.sjtu.vfact.textual.util.Generator;
import com.sjtu.vfact.textual.util.TextUtil;

public class FACTAModel {

	private List<Document> documents;
	private Map<String, Set<Integer>> singleTermMap;
	private String[] ssaRanges;
	private List<Map<TermSet, Set<Document>>> ssaDictList;
		
	private MasterCollection collection;
		
	public FACTAModel(){
		this.documents = new ArrayList<Document>();
		this.singleTermMap = new HashMap<String, Set<Integer>>();
		this.collection = new MasterCollection();
	}
	
	public List<Document> getDocuments(){
		return this.documents;
	}
	
	public Map<String, Set<Integer>> getSingleTermMap(){
		return singleTermMap;
	}
	
	public MasterCollection getMasterCollection(){
		return collection;
	}
	
	public void setMasterCollection(MasterCollection collection){
		this.collection = collection;
	}
	
	public String[] getSSARanges(){
		return ssaRanges;
	}
	
	public List<Map<TermSet, Set<Document>>> getSSADictList(){
		return ssaDictList;
	}
	
	public Map<TermSet, Set<Document>> getSSADict(int index){
		return ssaDictList.get(index);
	}
	
	/**
	 * add documents to documents
	 * @param tokens
	 */
	private void addDocument(Document doc){
		documents.add(doc);
	}
	
	/**
	 * load documents from text file provided
	 * @param file
	 * @return
	 */
	public int loadDocument(File file){
		BufferedReader in = null;
		try {
			in = new BufferedReader(new FileReader(file));
			String line = null;
			int id = 1;
			while((line = in.readLine()) != null){
				Document doc = new Document(id, line);
				addDocument(doc);

				//generate single term table with its corresponding docIds
				Generator.fillSingleTermMap(singleTermMap, doc);
				
				id++;
				
			}
						
		} catch (FileNotFoundException e1) {
			e1.printStackTrace();
		} catch (IOException e2){
			e2.printStackTrace();
		} finally{
			if(in != null){
				try {
					in.close();
				} catch (IOException e) {
					e.printStackTrace();
				}
			}
		}
		
		return getDocumentsSize();
	}
	
	/**
	 * get the total amount of documents loaded
	 * @return
	 */
	public int getDocumentsSize(){
		return documents.size();
	}
	
	/**
	 * calculate coverage of the master collection
	 * @param minSupport
	 */
	public double calculateCoverage(int minSupport){
		collection.calculateCoverage(getFrequentSingleTerms(minSupport));
		return collection.getCoverage();
	}
	
	/**
	 * calculate specificity of the master collection
	 * @param minSupport
	 */
	public double calculateSpecificity(int minSupport){
		collection.calculateSpecificity(getFrequentSingleTerms(minSupport));
		return collection.getSpecificity();
	}
	
	/**
	 * calculate density of the master collection
	 * @param minSupport
	 */
	public double calculateDensity(){
		collection.calculateDensity();
		return collection.getDensity();
	}
	
	/**
	 * get terms that its support bigger or equal to minSupport
	 * @param minSupport
	 * @return
	 */
	private Set<String> getFrequentSingleTerms(int minSupport){
		Set<String> terms = new LinkedHashSet<String>();
		for(Map.Entry<String, Set<Integer>> entry : singleTermMap.entrySet()){
			String term = entry.getKey();
			int support = entry.getValue().size();
			if(support >= minSupport){
				terms.add(term);
			}
		}
		
		return terms;
	}
	
	public List<Pair<Integer, Integer>> convertRanges(String ranges){
		//convert ranges given in String format to list of pairs format
		List<Pair<Integer, Integer>> rangeList = new ArrayList<Pair<Integer,Integer>>();
		ssaRanges = ranges.trim().split(";");
		for(String s : ssaRanges){
			s.trim();
		}

		for(String s : ssaRanges){
			String[] r = s.split("-");
			int min = Integer.parseInt(r[0].replace("[", "").trim());
			int max = Integer.parseInt(r[1].replace("]", "").trim());
			Pair<Integer, Integer> range = new Pair<Integer, Integer>(min, max);
			rangeList.add(range);
		}
		
		return rangeList;
	}
	
	public void runSSA(List<Document> documents, String ranges){
		//convert ranges given in String format to list of pairs format
		List<Pair<Integer, Integer>> rangeList = convertRanges(ranges);
		
		ssaDictList = new ArrayList<Map<TermSet,Set<Document>>>();
		BufferedWriter writer = null;
		
		try {
			writer = new BufferedWriter(new FileWriter("result.txt"));
			
			for(int p = 0; p < rangeList.size(); p++){
				Set<Integer> noSummaryIndexs = new LinkedHashSet<Integer>();
				for(int i = 0; i < documents.size(); i++){
					noSummaryIndexs.add(i);
				}
				
				String s = "";
				int min = rangeList.get(p).getFirst();
				int max = rangeList.get(p).getSecond();
				
				s += "Clustering range : [" + min + ", " + max + "]\n";
				Map<TermSet, Set<Document>> map = runSSA(documents, min, max, noSummaryIndexs);
				ssaDictList.add(map);
				int i = 1;
				s += "#" + i + " -> ";
				for(Map.Entry<TermSet, Set<Document>> entry : map.entrySet()){
					TermSet key = entry.getKey();
//					Set<Integer> value = entry.getValue();
					s += key + " : "; 
//					s += "Job# " + i + " : " + key + "( " + value.size() + " ) " + " \n "; 
				}
				
				s += "\n";
				
				for(int id : noSummaryIndexs){
					i++;
					Document doc = documents.get(id);
					s += "#" + i + " -> " + doc.toString() + "\n";
				}
				
				s += "\n";				
				writer.write(s);
				
			}
			
		} catch (IOException e) {
			e.printStackTrace();
		} finally{
			if(writer != null){
				try {
					writer.close();
				} catch (IOException e) {
					e.printStackTrace();
				}
			}
		}
		
		
	}
	
	public Map<TermSet, Set<Document>> runSSA(List<Document> documents, int min, int max, Set<Integer> noSummaryIndexs){
		
		//run the SSA algorithms
		int collectionSize = documents.size();
		
 		Map<TermSet, Set<Document>> sumDict = new HashMap<TermSet, Set<Document>>();
		for(int i = 0; i < collectionSize - 1; i++){
			Document doc_i = documents.get(i);
			TermSet ts_i = new TermSet(doc_i.getTerms());
			
			for(int j = i + 1; j < collectionSize; j++){
				Document doc_j = documents.get(j);
				TermSet ts_j = new TermSet(doc_j.getTerms());
				
				TermSet summaryTS = TextUtil.intersect(ts_i, ts_j);
				int sumTSSupport = 0;
				if(!summaryTS.isEmpty()){
					sumTSSupport = calculateTermsetSupport(summaryTS);
				}
				if(TextUtil.inRangeOf(sumTSSupport, min, max)){
					if(sumDict.containsKey(summaryTS)){
						Set<Document> set = sumDict.get(summaryTS);
						set.add(doc_i);
						set.add(doc_j);
						noSummaryIndexs.remove(i);
						noSummaryIndexs.remove(j);
					}else{
						Set<Document> set = new LinkedHashSet<Document>();
						set.add(doc_i);
						set.add(doc_j);
						noSummaryIndexs.remove(i);
						noSummaryIndexs.remove(j);
						sumDict.put(summaryTS, set);
					}
				}else{

				}
			}
		}
		
		return sumDict;
	}
	
	/**
	 * calculate support of a given termset
	 * @param termset
	 * @return
	 */
	public int calculateTermsetSupport(TermSet termset){
		return getDocIDsFromTermset(termset).size();
	}
	
	/**
	 * get the DocIDs from all of the documents that contains the given termset
	 * @return
	 */
	public Set<Integer> getDocIDsFromTermset(TermSet termset){
		Set<Integer> intersectSet = singleTermMap.get(termset.getTerms().get(0));
		for(String term : termset.getTerms()){
			Set<Integer> set = singleTermMap.get(term);
			intersectSet = TextUtil.<Integer>intersect(intersectSet, set);
		}
		
		return intersectSet;
	}
	
	public List<TermSet> getTermsetFromCollection(int[] selectedRows){
		List<TermSet> list = new ArrayList<TermSet>();
		for(int index : selectedRows){
			list.add(collection.getTermset(index));
		}
		return list;
	}
	
	/**
	 * run the Quick Summarizer
	 * one of After Mining Task
	 */
	public TermSet runQuickSummarizer(List<TermSet> termsets){
		TermSet intersectTermset = termsets.get(0);
		for(TermSet ts : termsets){
			intersectTermset = intersectTermset.intersect(ts);
		}
		return intersectTermset;
	}
	
	/**
	 * get all terms that uncovered in master collection corresponding to given support
	 * @param support
	 * @return
	 */
	public Map<String, Set<Integer>> getUncoveredTerms(int support){
		Map<String, Set<Integer>> map = new HashMap<String, Set<Integer>>();
		for(Map.Entry<String, Set<Integer>> entry : singleTermMap.entrySet()){
			String term = entry.getKey();
			Set<Integer> docIDs = entry.getValue();
			int termSupport = docIDs.size();
			
			if(termSupport >= support && !(collection.getTerms().contains(term))){
				map.put(term, docIDs);
			}
		}
		
		return map;
	}
	
}
