package com.ansj.vec;

import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;

import org.apache.commons.math3.optim.nonlinear.vector.Weight;
import org.deeplearning4j.text.sentenceiterator.labelaware.LabelAwareListSentenceIterator;

/**
 * 
 * @author liyao 2017-04-20 1.将词转化成向量 2.将文档转化成向量
 */
public class DocToVec {
	// 词Map <行号，<词位置，词>>
	static Map<Integer, Map<Integer, String>> wordsMap = new HashMap<Integer, Map<Integer, String>>();
	// tfidf Map<行号，<与词对应位置，tfidf值>>
	static Map<Integer, Map<Integer, String>> tfidfMap = new HashMap<Integer, Map<Integer, String>>();
	// 类标
	static List<String> labelList = new ArrayList<String>();
	// 资源文件父层路径
	static String sourcePath = "/home/liyao/tools/TBD/word2vecFile";

	// static String sourcePath ="/home/liyao/tools/TBD/backup/0421/files";
	public static void main(String[] args) throws IOException {
		// 训练模型，将训练集的词转换成向量
		// try {
		// wordtoVec();
		// } catch (IOException e) {
		// // TODO Auto-generated catch block
		// e.printStackTrace();
		// }

		// 读取词文件和tfidf文件，将数据存储在map中
		String inputPath = sourcePath + "/words.txt";
		String outputPath = sourcePath + "/normalizedtf.txt";
		File wordsFile = new File(inputPath);
		File idfFile = new File(outputPath);

		// 模型路径
		String modelFile = sourcePath + "/word2vecmodel/javaSkip1";

		// 训练数据向量文件
		String trainFile = sourcePath + "/NewWeightTrain.txt";
		readWords(wordsFile, wordsMap);
		readTFIDF(idfFile, tfidfMap);
		// 读取训练模型,将文档按照词向量转化成文档向量 Di(第i篇文档的向量) = sum(Wi*TFIDFi)
		// 对于该文档中的每个词向量乘以该词的tfidf值并累加该文档的所有词，并作累加
		Word2VEC vec = new Word2VEC();
		vec.loadJavaModel(modelFile);
		// String str = "徐州";
		// System.out.println(Arrays.toString(vec.getWordVector(str)));
		docToVec(wordsMap, tfidfMap, vec, trainFile);
	}

	// 通过文档集合训练词向量，保存成模型Word2Vec
	public static void wordtoVec() throws IOException {
		Learn learn = new Learn();
		learn.learnFile(new File(sourcePath + "/words.txt"));
		learn.saveModel(new File(sourcePath + "/javaSkip1"));
	}

	// 将文档中的所有词累加求和
	public static void docCount(float[] countDoc, float[] wordVec) {
		for (int i = 0; i < countDoc.length; i++) {
			countDoc[i] += wordVec[i];
		}

	}

	// 读取词文件, 将数据按照<行号,<词序号,词>>存储在wordMap中
	public static void readWords(File wordsFile,
			Map<Integer, Map<Integer, String>> wordsMap)
			throws FileNotFoundException, IOException {
		try (BufferedReader br = new BufferedReader(
				new InputStreamReader(
						new FileInputStream(wordsFile)))) {
			String temp;
			int lineNum = 0;
			while ((temp = br.readLine()) != null) {
				String[] split = temp.split("\t")[1].split(" "); // 每行格式"类标+\t+词+空格+词+.....
				Map<Integer, String> word = new HashMap<Integer, String>();
				for (int i = 0; i < split.length; i++) {
					word.put(i, split[i]);
				}
				wordsMap.put(lineNum, word);
				lineNum++;
			}
		}
	}

	// 读取tfidf文件, 将数据按照<行号,<词序号,词的tfidf值>>存储在tfidfMap中
	public static void readTFIDF(File tfidfFile,
			Map<Integer, Map<Integer, String>> tfidfMap)
			throws FileNotFoundException, IOException {
		try (BufferedReader br = new BufferedReader(
				new InputStreamReader(
						new FileInputStream(tfidfFile)))) {
			String temp;
			int lineNum = 0;
			while ((temp = br.readLine()) != null) {
				labelList.add(temp.split("\t")[0]); // 类标存储
				String[] split = temp.split("\t")[1].split(" "); // 每行格式"类标+\t+tfidf+空格+tfidf+.....
				Map<Integer, String> word = new HashMap<Integer, String>();
				for (int i = 0; i < split.length; i++) {
					word.put(i, split[i]);
				}
				tfidfMap.put(lineNum, word);
				lineNum++;
			}
		}
	}

	// 将tfidf作为权重值对词向量加权VEC*tf-idf
	public static void vecToWeight(float[] wordVec, float weight) {
		for (int i = 0; i < wordVec.length; i++) {
			wordVec[i] = wordVec[i] * weight;
		}
	}

	// 将文档转换成向量并存入文件
	public static void docToVec(
			Map<Integer, Map<Integer, String>> wordsMap,
			Map<Integer, Map<Integer, String>> tfidfMap, Word2VEC vec,
			String trainFilePath) throws IOException {
		BufferedWriter writer = new BufferedWriter(
				new OutputStreamWriter(
						new FileOutputStream(trainFilePath),
						"utf-8"));
		int lineNum = 0;
		Iterator<Entry<Integer, Map<Integer, String>>> iter = wordsMap
				.entrySet().iterator();
		while (iter.hasNext()) {
			StringBuffer line = new StringBuffer();
			float[] countDoc = new float[200];
			Entry<Integer, Map<Integer, String>> lines = iter.next();
			lineNum = lines.getKey();
			Map<Integer, String> words = lines.getValue();
			for (Map.Entry<Integer, String> entry : words
					.entrySet()) {
				int wordIndex = entry.getKey(); // 获取自定义词位置
				String word = entry.getValue().trim(); // 获取词
				if (vec.getWordVector(word) != null) {
					float[] wordVec = vec.getWordVector(word).clone(); // 获取词向量(深复制clone()，否则将改变原模型中的数据)
					float weight = Float.parseFloat(
							tfidfMap.get(lineNum).get(wordIndex)); // 获取词对应的tfidf作为权重值
					vecToWeight(wordVec, weight); // 对词向量进行加权
					docCount(countDoc, wordVec); // 对文档中的词向量进行求和
				}
			}
			// 将文档转化成固定长度的向量后写入文件
			line.append(labelList.get(lineNum) + " ");
			for (int index = 0; index < countDoc.length; index++) {
				line.append(
						(index + 1) + ":" + countDoc[index] + " ");
			}
			String element = line.toString();
			writer.write(element);
			writer.newLine();
		}
		writer.close();
	}

}
