package clustering;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.*;

import preprocess.Text_Clean;
public class Cluster_Global_Profile {
	private ArrayList<ArrayList<String>> clusters;
	private HashMap<String,String> article_tiab;
	
	public ArrayList<ArrayList<Ngram_Profile>> BuildClusters_Global_Profile(int n){
		//ReadClustersFromFile(cluster_file);
		//ReadArticleTiab_fromFile(article_tiab_file);
		HashMap<String,Integer> ngrams_freq = new HashMap<String,Integer>();
		ArrayList<ArrayList<Ngram_Profile>> ngram_profiles = new ArrayList<ArrayList<Ngram_Profile>>();
		Text_Clean cleanser = new Text_Clean();
		for(ArrayList<String> members: clusters){
			List<Ngram_Profile> cluster_ngrams= GetNgramsfromCluster(members, article_tiab,n, cleanser);
			ngram_profiles.add((ArrayList<Ngram_Profile>) cluster_ngrams);
			for(Ngram_Profile ngram: cluster_ngrams){
				if(!ngrams_freq.containsKey(ngram.getNgram()))
					ngrams_freq.put(ngram.getNgram(), 1);
				else{
					int value = ngrams_freq.get(ngram.getNgram());
					value ++;
					ngrams_freq.remove(ngram.getNgram());
					ngrams_freq.put(ngram.getNgram(), value);
				}
			}
		}
		// update doc-freq and tf-idf.
		for(ArrayList<Ngram_Profile> ngrams: ngram_profiles){
			for(Ngram_Profile ngram: ngrams){
				int value = ngrams_freq.get(ngram.getNgram());
				ngram.setDoc_freq(value);
				ngram.setTf_idf(clusters.size());
			}
		}
		return ngram_profiles;
	}
	public ArrayList<ArrayList<String>> ReadClustersFromFile(String filename){
		clusters = new ArrayList<ArrayList<String>>();
		File f = new File(filename);
		FileInputStream fis;
		try{
			fis = new FileInputStream(f);
			InputStreamReader isr=new InputStreamReader(fis);
			BufferedReader br=new BufferedReader(isr);
			String line = br.readLine();
			while(line != null){
				line = line.trim();
				if(line.length()>0){
					String s[] = line.split("\t");
					ArrayList<String> cluster = new ArrayList<String>(Arrays.asList(s));
					cluster.remove(0);
					clusters.add(cluster);
				}
				line = br.readLine();
			}
			br.close();
		}catch(IOException e)
		{
			e.printStackTrace();
		}
		return clusters;
	}
	public void ReadArticleTiab_fromFile(String filename){
		article_tiab = new HashMap<String,String>();
		File f = new File(filename);
		FileInputStream fis;
		try{
			fis = new FileInputStream(f);
			InputStreamReader isr=new InputStreamReader(fis);
			BufferedReader br=new BufferedReader(isr);
			String line = br.readLine();
			while(line != null){
				line = line.trim();
				if(line.length()>0){
					int index = line.indexOf("\t");
					article_tiab.put(line.substring(0,index), line.substring(index+1));
				}
				line = br.readLine();
			}
			br.close();
		}catch(IOException e)
		{
			e.printStackTrace();
		}
	}
	public List<Ngram_Profile> GetNgramsfromCluster(ArrayList<String> members, HashMap<String,String> article_tiab, int n, Text_Clean cleanser){
		List<Ngram_Profile> profiles = new ArrayList<Ngram_Profile>();
		HashMap<String,Integer> visited_ngrams = new HashMap<String,Integer>();
		for(String member: members){
			List<Ngram_Profile> article_ngrams = GetNgramsfromArticle(article_tiab.get(member), n, cleanser);
			for(Ngram_Profile ngram: article_ngrams){
				if(visited_ngrams.containsKey(ngram.getNgram())){
					profiles.get(visited_ngrams.get(ngram.getNgram())).Increase_Termfreq();
				}
				else{
					Ngram_Profile profile = new Ngram_Profile(ngram.getNgram());
					visited_ngrams.put(ngram.getNgram(), profiles.size());
					profiles.add(profile);
				}
			}
		}
		return profiles;
	}
	public List<Ngram_Profile> GetNgramsfromArticle(String tiab, int n, Text_Clean cleanser){
		List<String> sentences = GetSentenceChunk(tiab, cleanser);
		return GetNgramsfromSentences(sentences, n, cleanser);
	}
	public List<Ngram_Profile> GetNgramsfromSentences(List<String> sentences, int n, Text_Clean cleanser){
		if(sentences == null || sentences.size()==0)
			return null;
		List<Ngram_Profile> ngrams = new ArrayList<Ngram_Profile>();
		HashMap<String, Integer> visited_ngrams = new HashMap<String,Integer>();
		for(String sentence: sentences){
			String tokens[] = sentence.split(" ");
			for(int i=0; i<=tokens.length-n; i++){
				if(cleanser.Is_StopWord(tokens[i]))
					continue;
				StringBuffer sb = new StringBuffer();
				sb.append(tokens[i]);
				int j=i+1;
				for(; j<i+n; j++){
					if(cleanser.Is_StopWord(tokens[j]))
						break;
					sb.append(" "+tokens[j]);
				}
				if(j==i+n){
					if(!visited_ngrams.containsKey(sb.toString())){
						Ngram_Profile ngram = new Ngram_Profile(sb.toString());
						visited_ngrams.put(sb.toString(), ngrams.size());
						ngrams.add(ngram);
					}
					/*else{
						int ngram_index = visited_ngrams.get(sb.toString());
						ngrams.get(ngram_index).Increase_Termfreq();
					}*/
				}
					
			}
		}
		return ngrams;
	}
	
	public List<String> GetSentenceChunk(String line, Text_Clean cleanser){
		if(line == null || line.length()==0)
			return null;
		line = line.toLowerCase();
		List<String> segments = new ArrayList<String>();
		StringTokenizer st = new StringTokenizer(line,"\t[],.:;?!()");
		while(st.hasMoreTokens()){
			String token = st.nextToken();
			segments.add(cleanser.cleanSentence(token));
		}
		return segments;
	}
}
