package Server;
import java.io.File;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

public class FeatureExtractor {
	static Segmentor segmentor = new Segmentor();
	static final double minrate = 0.01;
	/**
	 * 提取特征
	 */
	String AdvanceFeature(String sourcedatapath,String outpath){
		String outputpath = outpath;
		String datapath = sourcedatapath;
		//分词
		List<String> seglist = segmentor.listSegment(IOHandler.readAsList(new File(datapath)));
		//计算特征值
		String keywordpath = outputpath+"/keyword.txt";
		String idfpath = outputpath+"/idf.txt";
		rankKeyWordByTfIdf(seglist,keywordpath,idfpath);
		//生成特征矩阵
		String feaurespath = outputpath+"/features.txt";
		String featureMarix = getMergedClusterFeaturesByTFIDF(
				idfpath, keywordpath, feaurespath, seglist);
		return featureMarix;
	}
	 
	private void rankKeyWordByTfIdf(List<String> seglist,String keywordpath,String idfpath){
		
		Map<String, Float> term_idf = getGlobalIDF(idfpath,seglist);  //idf
		
		List<String> result = new ArrayList<String>();
		for(String s:seglist){
			Map<String, Float> term_tf = new HashMap<String,Float>();  //某条工单的词频
			Map<String, Float> term_tfidf = new HashMap<String,Float>();
			String[] splitstring = s.split("\\+\\+");
			String id = splitstring[0];
			result.add("#"+id);
			String segment = splitstring[1];
			String time = splitstring[2];
			String[] seg = segment.split(",");
			for(String word:seg){
				if(word.equals("")) continue;
				word = word.replace(" ", "");
				Float tf = term_tf.get(word);
				term_tf.put(word, tf==null?1.0f:tf+1);
			}
			for(String word:term_tf.keySet()){
				term_tfidf.put(word, term_tf.get(word)*term_idf.get(word));
			}
			//词频-逆文档频率排序
			List<Map.Entry<String, Float>> keyWordEntries = 
					new ArrayList<Map.Entry<String, Float>>(term_tfidf.entrySet());
			Collections.sort(keyWordEntries, new Comparator<Map.Entry<String, Float>>() {
				public int compare(Map.Entry<String, Float> e1, Map.Entry<String, Float> e2) {
					if ((e2.getValue() - e1.getValue()) > 0)
						return 1;
					else if ((e2.getValue() - e1.getValue()) == 0)
						return 0;
					else
						return -1;
				}
			});
			StringBuilder content = new StringBuilder();
			for(Map.Entry<String, Float> entry : keyWordEntries){
				if(entry.getValue()>0.01){
					content.append(entry.getKey() + "=").append(String.format("%.2f", entry.getValue()) + " ");
				}
			}
			result.add(content.toString());
		}
		IOHandler.serialize2File(result, new File(keywordpath));
	}
	
	private String getMergedClusterFeaturesByTFIDF(String idfpath,String keywordpath,String feaurespath,List<String> seglist){
		List<String> keywordlist = IOHandler.readAsList(new File(keywordpath));
		List<String> idflist = IOHandler.readAsList(new File(idfpath));
		List<String> resultlist = new ArrayList<>();
		for(String s:keywordlist){
			if(s.startsWith("#")) {
				resultlist.add(s);
			}
			else {
				Map<String, String> segmentmap = new HashMap<String,String>();
				String thisline = "";
				String[] temp = s.split(" ");
				for(String word:temp){
					if(word.equals("")||word.isEmpty()){
						break;
					}
					String[] w = word.split("\\=");
					segmentmap.put(w[0], w[1]);
				}
				for(String keyword:idflist){
					String word = keyword.split("\\=")[0];
					if(segmentmap.containsKey(word)){
						thisline+=segmentmap.get(word)+",";
						continue;
					}
					else {
						thisline+="0,";
					}
				}
				resultlist.add(thisline);
			}	
		}
		IOHandler.serialize2File(resultlist, new File(feaurespath));
		return feaurespath;
	}
	/**
	 * 以类为文档 统计IDF值
	 * @param globalIdfPath
	 * @param kclusterFolder
	 * @return
	 */
	private static Map<String, Float> getGlobalIDF(String globalIdfPath, List<String> seglist) {
		//统计每个类的词和所有类的词
		Map<String, Float>  IDFMap = new HashMap<String, Float>();
		for(String s:seglist){
			Map<String, Boolean> isfirstMap = new HashMap<String, Boolean>();
				String[] splitstring = s.split("\\+\\+");
				String id = splitstring[0];
				String segment = splitstring[1];
				String time = splitstring[2];
				String[] seg = segment.split(",");
				for(String word:seg){
					if(word.equals("")||word==null) continue;
					word = word.replace(" ", "");
					//isclassMap 记录词是否在该条工单中第一次出现 
					if(!isfirstMap.containsKey(word))
						isfirstMap.put(word, true);
					else isfirstMap.put(word, false);
					//IDFMap 统计词出现的工单数
					if(isfirstMap.get(word)){
						if(!IDFMap.containsKey(word))
							IDFMap.put(word, 1.0f);
						else
							IDFMap.put(word, IDFMap.get(word)+1);
					}
				}
 			}
		float counts = seglist.size();   //总工单数
		for(String word:IDFMap.keySet()){
			float value = IDFMap.get(word);
			int minnum =(int) (counts*minrate);
			if(value<minnum){
				IDFMap.put(word, 0.0f);
				continue;
			}
			value = (float) Math.log10((counts+0.1)/value);
			IDFMap.put(word, value);
		}
		
		//词频-逆文档频率排序
		List<Map.Entry<String, Float>> Entries = new ArrayList<Map.Entry<String, Float>>(
				IDFMap.entrySet());
		Collections.sort(Entries,
				new Comparator<Map.Entry<String, Float>>() {
					public int compare(Map.Entry<String, Float> e1,
							Map.Entry<String, Float> e2) {
						if ((e2.getValue() - e1.getValue()) > 0)
							return 1;
						else if ((e2.getValue() - e1.getValue()) == 0)
							return 0;
						else
							return -1;
					}
				});
		List<String> result = new ArrayList<String>();
		for(Map.Entry<String, Float> entry : Entries) {
			if(entry.getValue()>0.01){
				StringBuilder termIdfBuilder = new StringBuilder();
			termIdfBuilder.append(entry.getKey() + "=")
				.append(String.format("%.2f", entry.getValue()) + " ");
			result.add(termIdfBuilder.toString());
			}
		}
		
		File resfile = new File(globalIdfPath);
		IOHandler.serialize2File(result, resfile);
		
		return IDFMap;
	}
	
	
	 static Map<String, Float> loadGloablIdf(String globalIdfPath) {
		Map<String, Float> gloablIdf = new HashMap<String, Float>();
		List<String> list = IOHandler.readAsList(new File(globalIdfPath));
		for(String line : list) {
			String[] tmp = line.split("=");
			gloablIdf.put(tmp[0], Float.parseFloat(tmp[1]));
		}
		return gloablIdf;
	}
	
	 static Map<Integer, Map<String, Float>> loadTFIDF(String keywordOutTXT) {
		Map<Integer, Map<String, Float>> class_tfidfMap = new HashMap<Integer, Map<String, Float>>();
		
		//读入文件
		File file = new File(keywordOutTXT);
		List<String> dataList = IOHandler.readAsList(file);
		//加载tfidf
		int num=0;
		for(String line:dataList){
			if(line.startsWith("#")){
				String[] tmp = line.split("\\s|\t");
				if(tmp.length!=3) {System.out.println("loadTFIDF error\n");continue;}
				num = Integer.parseInt(tmp[1]);
			}
			else{
				Map<String, Float> tfidfMap = new HashMap<String, Float>();
				String[] tmp = line.split(" ");
				for(String seg:tmp){
					String word = seg.split("=")[0];
					float values = Float.parseFloat(seg.split("=")[1]);
					tfidfMap.put(word, values);
				}
				class_tfidfMap.put(num, tfidfMap);
			}
		}
		
		return class_tfidfMap;
	}
	 
	 public void getFeature(){
		 FeatureExtractor featureExtractor = new FeatureExtractor();
		 String sourcedatapath = "D:/workspace/HotEventMonitoring/2017年1月-2017年12月嘉兴的单子信息/save.txt";
		 String outpath = "D:/workspace/HotEventMonitoring/2017年1月-2017年12月嘉兴的单子信息";
		 String feaurespath = outpath+"/features.txt";
		//分词
		 String segfile = "D:/workspace/HotEventMonitoring/2017年1月-2017年12月嘉兴的单子信息/seg.txt";
		 List<String> seglist = segmentor.listSegment(IOHandler.readAsList(new File(sourcedatapath)));
		 IOHandler.serialize2File(seglist, new File(segfile));
			//计算特征值
			String keywordpath = outpath+"/keyword.txt";
			String idfpath = outpath+"/idf.txt";
			featureExtractor.rankKeyWordByTfIdf(seglist,keywordpath,idfpath);
			System.out.println("特征值计算完成");
			//生成矩阵
			String path = featureExtractor.getMergedClusterFeaturesByTFIDF(idfpath, keywordpath, feaurespath, seglist);
	}
	 public static void main(String[] args) {
		 FeatureExtractor featureExtractor = new FeatureExtractor();
		 String sourcedatapath = "D:/workspace/HotEventMonitoring/2017年1月-2017年12月嘉兴的单子信息/save.txt";
		 String outpath = "D:/workspace/HotEventMonitoring/2017年1月-2017年12月嘉兴的单子信息";
		 String feaurespath = outpath+"/features.txt";
		//分词
			//List<String> seglist = segmentor.listSegment(IOHandler.readAsList(new File(sourcedatapath)));
		 List<String> seglist = IOHandler.readAsList(new File("D:/workspace/HotEventMonitoring/2017年1月-2017年12月嘉兴的单子信息/seg.txt"));
			//计算特征值
			String keywordpath = outpath+"/keyword.txt";
			String idfpath = outpath+"/idf.txt";
			featureExtractor.rankKeyWordByTfIdf(seglist,keywordpath,idfpath);
			System.out.println("特征值计算完成");
			//生成矩阵
			String path = featureExtractor.getMergedClusterFeaturesByTFIDF(idfpath, keywordpath, feaurespath, seglist);
	}
}
