package com.yanhui.utils.file.word;

import org.wltea.analyzer.core.IKSegmenter;
import org.wltea.analyzer.core.Lexeme;

import java.io.*;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

//import org.hfutec.preprocess.WordFiltering;
//import org.hfutec.preprocess.wordseg.NLPIR;


public class ReadFiles {

	/**
	 * 读取文件下的文件
	 * 返回文件路径列表
	 * @param filepath
	 * @return
	 */
	public List<String> readFile(String filepath){
		File file=new File(filepath);
		List<String> fileLists=new ArrayList<String>();
		if(!file.isDirectory()){
			System.out.println("输入的参数应该为[文件夹名]");
		}else if(file.isDirectory()){
			String[] fileList=file.list();
			for(int i=0;i<fileList.length;i++){
				File readfile=new File(filepath+"\\"+fileList[i]);
				if(readfile.isFile()){
					fileLists.add(readfile.getAbsolutePath());
				}
			}
		}
		return fileLists;
	}

	/**
	 * 将所有的文档读到一起用一个map去存储
	 * Map<String,String>===>(filename,content)
	 * @param filePath
	 * @return Map<String,String>
	 */
	public Map<String,String> readFileAllContent(String filePath){
		Map<String,String> doc_content=new HashMap<String, String>();
		List<String> fileList=readFile(filePath);
		for(String filename:fileList){
			String filecontent=readContent(filename);
			doc_content.put(filename, filecontent);
		}
		return doc_content;
	}
	/**
	 * 读取文件成一个content
	 * @param file
	 * @return String
	 */
	public String readContent(String file){
		String result="";
		InputStreamReader is;
		try {
			is = new InputStreamReader(new FileInputStream(file), "gbk");
			BufferedReader br = new BufferedReader(is);
			String line = br.readLine();
			while(line !=null){
				line=br.readLine();
				result+=line;
			}
			br.close();
		}  catch (IOException e) {
			e.printStackTrace();
		}

		return result;
	}
	/**
	 * 将读取好的文本（doc，content）传入分词处理
	 * 返回的是一个分词好的map===》（docname,wordsContent）
	 * @param doc_words
	 * @return Map<String,String>
	 * @throws FileNotFoundException
	 * @throws IOException
	 */
//	public Map<String,String> cutWordtoMap(HashMap<String,String> doc_words) throws FileNotFoundException, IOException{
		/*Map<String,String> wordsmap=new HashMap<String, String>();
		Map<String,String> words_d_stopword=new HashMap<String, String>();
		NLPIR nlpir = new NLPIR("f:/nlpir/lib/win64/NLPIR","f:/nlpir/");
		wordsmap=nlpir.segMapValue(doc_words, 1);

		for(Map.Entry<String, String> entry:wordsmap.entrySet()){
			String key=entry.getKey();
			String words=entry.getValue();

			String words_delete_stopword=WordFiltering.removeSentenceStopWords(WordFiltering.filterWordsByPOS(words," ", ""), " ","f:/stopword/stop_words_hit.hit");
			words_d_stopword.put(key, words_delete_stopword);
		}

		return words_d_stopword;*/

//		Map<String,String> wordsmap=new HashMap<String, String>();
//		Map<String,String> words_d_stopword=new HashMap<String, String>();
////		NLPIR nlpir = new NLPIR("f:/nlpir/lib/win64/NLPIR","f:/nlpir/");
////		wordsmap=nlpir.segMapValue(doc_words, 1);
//
//		for(Map.Entry<String, String> entry:doc_words.entrySet()){
//			String key=entry.getKey();
//			String words=entry.getValue();
//
//			String words_delete_stopword=WordFiltering.removeSentenceStopWords(WordFiltering.filterWordsByPOS(words," ", ""), " ","f:/stopword/stop_words_hit.hit");
//			words_d_stopword.put(key, words_delete_stopword);
//		}
//
//		return words_d_stopword;
//
//		text = "基于java语言开发的轻量级的中文分词工具包";
//		//独立Lucene实现
//		StringReader re = new StringReader(text);
//		IKSegmenter ik = new IKSegmenter(re, true);
//		Lexeme lex = null;
//		try {
//			while ((lex = ik.next()) != null) {
//				System.out.println(lex.getLexemeText());
////                    System.out.print(lex.getLexemeText() + "|");
//			}
//		} catch (Exception e) {
//		}

//	}

	/**
	 * 将读取好的文本（doc，content）传入分词处理2
	 * 返回的是一个分词好的map===》（docname,wordsContent）
	 * @param doc_words
	 * @return Map<String,String>
	 * @throws FileNotFoundException
	 * @throws IOException
	 */
	public Map<String,String> cutWordtoMap2(Map<String, String> doc_words) throws FileNotFoundException, IOException{
//		text = "基于java语言开发的轻量级的中文分词工具包";
//		StringReader re = new StringReader(text);
//		IKSegmenter ik = new IKSegmenter(re, true);
//		Lexeme lex = null;
//		try {
//			while ((lex = ik.next()) != null) {
//				System.out.println(lex.getLexemeText());
//			}
//

		Map<String,String> words_d_stopword=new HashMap<String, String>();

		for(Map.Entry<String, String> entry:doc_words.entrySet()){
			String key=entry.getKey();
			String words=entry.getValue();
//			System.out.println("-2-"+key);
String text = words;
			StringReader re = new StringReader(text);

			IKSegmenter ik = new IKSegmenter(re, true);
		Lexeme lex = null;

//			String words_delete_stopword=WordFiltering.removeSentenceStopWords(WordFiltering.filterWordsByPOS(words," ", ""), " ","f:/stopword/stop_words_hit.hit");
//			try {
			while ((lex = ik.next()) != null) {
//				System.out.println(lex.getLexemeText());
				words_d_stopword.put(key, lex.getLexemeText());
			}
		}

		return words_d_stopword;
		}

}

