import java.io.*;
import java.util.*;
import java.util.Map.Entry;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;

import libsvm.*;

public class EmoDetect_m2 {

	
	public static HashMap<Integer,String> test = new HashMap<Integer,String>();//将过滤到路径信息的文本写入test
	public static HashMap<Integer,String> indexFile = new HashMap<Integer,String>();//将过滤到路径信息的文本写入test
	public static HashMap<String, Double> wordsIDF= new HashMap<String, Double>(); //存放所有词的IDF
	public static HashMap<String, Integer> wordsIndex= new HashMap<String, Integer>(); //存放所有词和其index
	public static HashMap<Integer,String> svmtest = new HashMap<Integer,String>();//存放svmtest特征文本
	public static List<String> result = new ArrayList<String>();//存放svm的预测result
	public static HashMap<String, Double> txt_result = new HashMap<String, Double>(); 
	public static HashMap<String, String> speech_result = new HashMap<String, String>(); 
	public static HashMap<String, String> txt_speech_result = new HashMap<String, String>(); 
	
	
	/***********************文本预处理，特征提取*********************/
	public void readFile(String path) throws Exception{
		
		InputStreamReader read = new InputStreamReader(new FileInputStream(path),"utf-8");
		BufferedReader reader = new BufferedReader(read);
		String temp ="";
		int index = 0;
		while((temp = reader.readLine()) != null){
			String[] tokens = temp.split("\t");
			test.put(index,tokens[0]);
			indexFile.put(index, tokens[1]);
			index += 1;
		}
		reader.close();
	}
	
	public void getIDF(String path) throws Exception{
		
		InputStreamReader read = new InputStreamReader(new FileInputStream(path),"utf-8");
		BufferedReader reader = new BufferedReader(read);
		String temp ="";
		while((temp = reader.readLine()) != null){
			String[] tokens = temp.split(" ");
			String index = tokens[0];
			String word = tokens[1];
			String idf = tokens[2];
			wordsIDF.put(word, Double.parseDouble(idf));
			wordsIndex.put(word, Integer.valueOf(index));
		}
		reader.close();
	}	
	
	public void getTFIDF() throws Exception{
		
		String content = "";
		int index = 0;
		Iterator<Integer> iter = test.keySet().iterator();
		while (iter.hasNext()) {
			index = iter.next();
			content = test.get(index);
			HashMap<String, Integer> wordsinSen = new HashMap<String, Integer>();//存放每句里面的word及其词次
			for(int i=0; i<content.split(" ").length; i++){
				String word = content.split(" ")[i];
				if(wordsinSen.containsKey(word)){
					wordsinSen.put(word, wordsinSen.get(word)+1);
				}else{
					wordsinSen.put(word, 1);
				}
			}
			HashMap<Integer, Double> TfIdf = new HashMap<Integer, Double>();//存放每句中的TfIdf
			Iterator<String> wordIterator = wordsinSen.keySet().iterator();
			while (wordIterator.hasNext()) {
				String word = wordIterator.next();
				if (wordsIDF.containsKey(word)){
					double tf = (double)(wordsinSen.get(word)) / wordsinSen.size();
					double idf = wordsIDF.get(word);
					double tfidf = tf * idf;
					TfIdf.put(wordsIndex.get(word), tfidf);
//					System.out.print(wordsIndex.get(word) + ":" + String.valueOf(tfidf) + " ");
				}
			}
			String fea_word = "";
		    Object[] key =  TfIdf.keySet().toArray();
		    Arrays.sort(key); 
		    for(int i = 0; i<key.length; i++) {    
		    	fea_word = fea_word +  String.valueOf(key[i]) + ":" +  String.valueOf(TfIdf.get(key[i])) + " ";    		    	
		    }
		    svmtest.put(index, fea_word);
		}
	}
	
	public void proTest(String testdata, String idf) throws Exception{
		
		readFile(testdata);
		getIDF(idf);
		getTFIDF();
		System.out.println("测试Feature提取结束！");		
	}
	
	/******************************SVM预测**************************/
	public String predict(String line, svm_model model, int predict_probability) throws IOException{
		
		String output = "";
		int svm_type=svm.svm_get_svm_type(model);
		int nr_class=svm.svm_get_nr_class(model);
		double[] prob_estimates=null;
		
		if(predict_probability == 1){
			int[] labels=new int[nr_class];
			svm.svm_get_labels(model,labels);
			prob_estimates = new double[nr_class];
		}
		
		StringTokenizer st = new StringTokenizer(line," \t\n\r\f:");
		
		int m = st.countTokens()/2;
		svm_node[] x = new svm_node[m];
		for(int j=0;j<m;j++){
			x[j] = new svm_node();
			x[j].index = Integer.parseInt(st.nextToken());
			x[j].value = Double.valueOf(st.nextToken()).doubleValue();
		}
		
		double v;
		if (predict_probability==1 && (svm_type==svm_parameter.C_SVC || svm_type==svm_parameter.NU_SVC)){
			v = svm.svm_predict_probability(model,x,prob_estimates);
			output = v+" ";
			for(int j=0;j<nr_class;j++)
				output= output + prob_estimates[j]+" ";
//			output = output + "\n";
		}else{
			v = svm.svm_predict(model,x);
			output= v+"\n";
		}		
		return output;
	}
	
	public List<String> multipredict(List<Map.Entry<Integer, String>> list, Integer treadcount) throws Exception{
		
		final svm_model model = svm.svm_load_model("model");
		HashMap<Integer, String> resultTemp = new HashMap<Integer, String>();//存放预测结果
		List<String> resultLs = new ArrayList<String>();//存放预测结果
		
//		int threadCounts = 8;//此处定义使用的线程数    
		int threadCounts = treadcount;//此处定义使用的线程数  
		ExecutorService exec=Executors.newFixedThreadPool(threadCounts);//根据定义的线程数目进行开辟线程
		List<Callable<HashMap<Integer,String>>> callList=new ArrayList<Callable<HashMap<Integer,String>>>();//生成很大的List
		
		int len=list.size()/threadCounts;//根据线程数目，平均分割List，这是每段sublist的大小		
		if(len==0){//List中的数量没有线程数多（很少存在）
			threadCounts=list.size();//采用一个线程处理List中的一个元素
			len=list.size()/threadCounts;//重新平均分割List
		}//此处是为了防止线程数目大于list的大小，可不用管
//		long startTime = System.currentTimeMillis();//程序开始时间，可去掉
		for(int i=0;i<threadCounts;i++){//这个for循环是为了启动各个线程，并且定义每个线程要处理的内容						
			final List<Entry<Integer, String>> subList;
			if(i==threadCounts-1){
				subList=list.subList(i*len,list.size());
			}else{
				subList=list.subList(i*len, len*(i+1)>list.size()?list.size():len*(i+1));
			}//此处if-else是为了在for循环中均分前面定义的list
			callList.add(new Callable<HashMap<Integer,String>>(){
				public HashMap<Integer,String> call() throws Exception {
//					List<Entry<Integer, String>> alist = new ArrayList<Entry<Integer, String>>();
					HashMap<Integer,String> alistMap = new HashMap<Integer, String>();
					for(Entry<Integer, String> i:subList){
						int index = i.getKey();
						String line = i.getValue();
						alistMap.put(index, predict(line, model, 1));//对每一句话进行预测
//						alistMap.put(index, line);
//						alist = new ArrayList<Map.Entry<Integer, String>>(alistMap.entrySet());
					}
					return alistMap;//此处将上述处理得到的结果进行返回，并保存到callList中
				}
			});
		}
		//等待所有的for循环处理完毕，将每个线程中的结果集合进行汇总
		List<Future<HashMap<Integer, String>>> futureList=exec.invokeAll(callList);
//		long endtime = System.currentTimeMillis();
//		System.err.println("-------------------------------------------------------");
//		System.out.print(endtime-startTime);
		for(Future<HashMap<Integer, String>> future:futureList){
			resultTemp.putAll(future.get());
		}		
		Object[] key =  resultTemp.keySet().toArray();//将结果进行排序，以免组合sublist时乱序
		Arrays.sort(key); 
	    for(int i = 0; i<key.length; i++)  {    
	    	resultLs.add(resultTemp.get(key[i])); 
//	    	System.out.println(resultTemp.get(key[i]));
	    }		
		exec.shutdown();//结束线程集合
		
		return resultLs;
	}
	
	public void Predict(Integer treadcount) throws Exception{	
		
//		String[] parg = {"-b","1","svm.test","model2_gailv","result"};//加上-b 1 输出概率
//		svm_predict.main(parg);	
//		System.out.println("预测结束！");	
		List<Map.Entry<Integer, String>> svmtestList = new ArrayList<Map.Entry<Integer, String>>(svmtest.entrySet());
		result = multipredict(svmtestList, treadcount);	
//		System.out.println(result.size());
	}
	
	/************txt结果和speech结果融合;得到segment打分*************/
	public void getTxtResult() throws Exception {
		
		for(int i=0; i<result.size(); i++){	
			String val = result.get(i).split(" ")[1];
			txt_result.put(indexFile.get(i), Double.valueOf(val));//储存fileid和0类打分
		}
	}
	
	public void getSpeechResult(String speechresult) throws IOException {
		
		InputStreamReader read = new InputStreamReader(new FileInputStream(speechresult),"utf-8");
		BufferedReader reader = new BufferedReader(read);
		String temp ="";
		while((temp = reader.readLine()) != null){
			String[] tokens = temp.trim().split("\t");
			String filename = tokens[0].replace(".plp","");
			String label = tokens[4];
			speech_result.put(filename, label);//此处的tokens[0]需要和txt的filename一致
		}
		reader.close();
	}
	
	public void combineResult() {
		
		for (int i=0; i<test.size(); i++) {
//			if (txt_result.get(indexFile.get(i)) > 0.9) {//若是文本的0类值大于0.9，则给予ANGRY
			if (txt_result.get(indexFile.get(i)) > 0.9 && test.get(i).length()>5) {//若是文本的0类值大于0.9且说的话语大于5个字，则给予ANGRY
				System.out.println(indexFile.get(i) + "\t" + "ANGRY");
				txt_speech_result.put(indexFile.get(i), "ANGRY");
//				System.out.println(indexFile.get(i) + '\t' + test.get(i) + "\t" + txt_result.get(indexFile.get(i)));
			} else {//否则，遵循语音的结果
				System.out.println(indexFile.get(i) + "\t" + speech_result.get(indexFile.get(i)));
				txt_speech_result.put(indexFile.get(i), speech_result.get(indexFile.get(i)));
			}
		}		
	}
	
	public void getsegmentResult(String speechresult) throws Exception {
		
		getTxtResult();
		getSpeechResult(speechresult);
		combineResult();
	}
		
	
	/*******************************Main****************************/
	public static void main(String[] args) {		
		try {
			long starttime = System.currentTimeMillis();
			
			EmoDetect_m2 emo2 = new EmoDetect_m2();
			emo2.proTest("test.dy.data", "IDF_w_com_2");
			emo2.Predict(1);
			emo2.getsegmentResult("log_dy.txt");
//			emo2.dialogue();
			
			long endtime = System.currentTimeMillis();
			System.err.println(endtime - starttime);
			
		} catch (Exception e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
	}
}
