package com.qq.BFMRSE.extractWord;

import java.io.File;
import java.io.IOException;
import java.nio.file.FileSystems;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;

import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.BytesRef;

import com.qq.BFMRSE.entity.tfWordInfo;
import com.qq.BFMRSE.serialize.objectSerializeUtil;
import com.qq.BFMRSE.util.BFConstants;

public class wordTFAndIDFInfo {
	//private String userName;
	public  static Map<String, Double> idfMap;
	public  static Map<String, List<tfWordInfo>> tfWordInfos;
	public static String idfMapFile=BFConstants.WORDINFOFILE+File.separator+BFConstants.IDFMAPSERNAME;
	public static String tfWordInfoFile=BFConstants.WORDINFOFILE+File.separator+BFConstants.TFWORDINFOS;
//	public wordTFAndIDFInfo(String userName){
//		this.userName=userName;
//		idfMapFile=userName+File.separator+idfMapFile;
//		tfWordInfoFile=userName+File.separator+tfWordInfoFile;
//		File idfMapSerFile = new File(idfMapFile);
//		if(idfMapSerFile.exists()){
//			try {
//				idfMap=(Map<String, Double>) objectSerializeUtil.deserialize(idfMapFile);
//			} catch (IOException e) {
//				// TODO Auto-generated catch block
//				e.printStackTrace();
//			}
//		}
//		File tfMapSerFile = new File(tfWordInfoFile);
//		if(tfMapSerFile.exists()){
//			try {
//				tfWordInfos=(Map<String, List<tfWordInfo>>) objectSerializeUtil.deserialize(tfWordInfoFile);
//			} catch (IOException e) {
//				// TODO Auto-generated catch block
//				e.printStackTrace();
//			}
//		}
//		
//	}
	//静态块每次使用该类时，如果存在序列化文件，则反序列化出idfMap 和 tfWordInfos
	static{
		File idfMapSerFile = new File(idfMapFile);
		if(idfMapSerFile.exists()){
			try {
				idfMap=(Map<String, Double>) objectSerializeUtil.deserialize(idfMapFile);
			} catch (IOException e) {
				// TODO Auto-generated catch block
				e.printStackTrace();
			}
		}
		File tfMapSerFile = new File(tfWordInfoFile);
		if(tfMapSerFile.exists()){
			try {
				tfWordInfos=(Map<String, List<tfWordInfo>>) objectSerializeUtil.deserialize(tfWordInfoFile);
			} catch (IOException e) {
				// TODO Auto-generated catch block
				e.printStackTrace();
			}
		}
	}
	/**
	 * 访问idfMap,如果idfMap为空的话，那么首先查看序列化文件是否存在，如果存在直接反序列化出对象返回
	 * @return
	 */
	public static Map<String, Double> getIdfMap(){
		if(idfMap!=null){
			return idfMap;
		}else{
			File idfMapSerFile = new File(idfMapFile);
			if(idfMapSerFile.exists()){
				try {
					idfMap=(Map<String, Double>) objectSerializeUtil.deserialize(idfMapFile);
				} catch (IOException e) {
					// TODO Auto-generated catch block
					e.printStackTrace();
				}
			}
			return idfMap;
		}
	}
	public static Map<String, List<tfWordInfo>> getTfWordInfos(){
		if(tfWordInfos!=null){
			return tfWordInfos;
		}else {
			File tfMapSerFile = new File(tfWordInfoFile);
			if(tfMapSerFile.exists()){
				try {
					tfWordInfos=(Map<String, List<tfWordInfo>>) objectSerializeUtil.deserialize(tfWordInfoFile);
				} catch (IOException e) {
					// TODO Auto-generated catch block
					e.printStackTrace();
				}
			}
			return tfWordInfos;
		}
	}
	//序列化两个对象到相应的文件中
	public static void ser2Info(){
		try {
			objectSerializeUtil.serialize(idfMap, idfMapFile);
			objectSerializeUtil.serialize(tfWordInfos, tfWordInfoFile);
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		
	}
	/**
	 * 返回所有关键字列表
	 * @return
	 */
	public static List<String> getAllDistinctWord(){
		List<String> resList=new ArrayList<>();
		if(idfMap!=null){
			Set<String> keySet=idfMap.keySet();
			Iterator<String>iterator=keySet.iterator();
			while(iterator.hasNext()){
				resList.add(iterator.next());
			}
		}
		return resList;
	}
	/**
	 * 根据文件名返回该文件包含的关键字及对用的idf信息map
	 * @param fileName
	 * @return
	 */
	public static Map<String, Double> getWordAndIdfsByFileName(String fileName){
		Map<String, Double>resMap=new HashMap<String, Double>();
		List<tfWordInfo> list=tfWordInfos.get(fileName);
		for(int i=0;i<list.size();i++){
			resMap.put(list.get(i).getWord(), list.get(i).getIdf());
		}
		return resMap;
	}
	/**
	 * 返回所有文件名称list
	 * @return
	 */
	public static List<String> getAllFileName(){
		List<String>resList=new ArrayList<>();
		if(tfWordInfos==null){
			tfWordInfos=getTfWordInfos();
		}
		Set<String> keySet=tfWordInfos.keySet();
		Iterator<String> iterator=keySet.iterator();
		while(iterator.hasNext()){
			resList.add(iterator.next());
		}
		return resList;
	}
	public static boolean isFileExist(String fileName){
		boolean ret=false;
		List<String>fileNames=getAllFileName();
		if(fileNames.contains(fileName)){
			ret=true;
		}
		return ret;
	}
	public static double getIDFForWord(String word){
		return idfMap.get(word);
	}
	public static void update(String index,String fileName){
		try {
			//idfMap=getAllWordIDF(index);
			System.out.println("test keyword size="+idfMap.keySet().size());
			updateTFAndIDF(index,fileName,BFConstants.A);
			System.out.println("test keyword size(after updating)="+idfMap.keySet().size());
			ser2Info();
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		
	}
	/**
	 * 根据idfMap从每个文件中提取出a个最具有代表性的关键字
	 * @param idfMap
	 * @param a
	 * @return
	 * @throws IOException
	 */
	public static void updateTFAndIDF(String index,String fileName,int a) throws IOException
	{
		Path indexPath=FileSystems.getDefault().getPath(index);
		IndexReader indexReader=DirectoryReader.open(FSDirectory.open(indexPath));
		int docNum=indexReader.numDocs();
		for(int i=0;i<docNum;i++)
		{
			int docId=i;
			if(fileName.equals(indexReader.document(docId).get("path"))){
				Terms terms=indexReader.getTermVector(docId, "body");
				if(terms==null)
				{
					continue;
				}
				else {
					TermsEnum termsEnum=terms.iterator(null);
					BytesRef thisTerms=null;
					List<tfWordInfo>temList=new LinkedList<tfWordInfo>();
					while((thisTerms=termsEnum.next())!=null)
					{
						String termtemp=thisTerms.utf8ToString();
						DocsEnum docsEnum=termsEnum.docs(null, null);
						while ((docsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) 
						{
							if(SelectHelper.isContainDigit(termtemp))
							{
								break;
							}
							tfWordInfo tempWordInfo=new tfWordInfo();
							tempWordInfo.setWord(termtemp);
							tempWordInfo.setTf(docsEnum.freq());
							double idf=(double)docNum/termsEnum.docFreq();
							tempWordInfo.setIdf(idf);
							//tempWordInfo.setIdf(wordTFAndIDFInfo.idfMap.get(termtemp));
							temList.add(tempWordInfo);
			            } 
					}
					List<tfWordInfo>resultList=SelectHelper.getPouWordInfos(temList, a);
					for(int k=0;k<resultList.size();k++){
						if(!idfMap.keySet().contains(resultList.get(k).getWord())){
							idfMap.put(resultList.get(k).getWord(), resultList.get(k).getIdf());
						}
					}
					tfWordInfos.put(fileName,resultList);
					
				}
				break;
			}else {
				continue;
			}
		}
		//wordTFAndIDFInfo.idfMap=newIdfMap;//替换精简idfMap
		indexReader.close();
	}

}
