package com.qq.BFMRSE.extractWord;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.ObjectInputStream.GetField;
import java.io.Serializable;
import java.nio.file.FileSystems;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.BytesRef;

import com.qq.BFMRSE.entity.PostStopAndLowCaseAnalyzer;
import com.qq.BFMRSE.entity.tfWordInfo;
import com.qq.BFMRSE.util.BFConstants;

public class SelectWord implements Serializable{
	private static final long serialVersionUID = 1L;
	private int n;//提取出的关键字个数
	private String index;//luence索引的位置
	private String filePath;//lunence索引对应文件夹的位置
	private transient Integer docNum;
	private transient Integer dealedNum;//忽略这两个属性不行序列化
	private String skPath;
	public String getSkPath(){
		return skPath;
	}
	public void setSkPath(String skPath){
		this.skPath=skPath;
	}
	public Integer getDocNum() {
		return docNum;
	}
	public void setDocNum(Integer docNum) {
		this.docNum = docNum;
	}
	public Integer getDealedNum() {
		return dealedNum;
	}
	public void setDealedNum(Integer dealedNum) {
		this.dealedNum = dealedNum;
	}
	public int getN() {
		return n;
	}
	public void setN(int n) {
		this.n = n;
	}
	public String getIndex() {
		return index;
	}
	public void setIndex(String index) {
		this.index = index;
	}
	public String getFilePath() {
		return filePath;
	}
	public void setFilePath(String filePath) {
		this.filePath = filePath;
	}
	public SelectWord(int n,String index,String filePath,String skPath){
		this.n=n;
		this.index=index;
		this.filePath=filePath;
		this.skPath=skPath;
		docNum=new Integer(0);
		dealedNum=new Integer(0);
		File files=new File(filePath);
		docNum=files.listFiles().length;
	}
	public SelectWord(String index,String filePath,String skPath){
		this(0,index,filePath,skPath);
	}
	public SelectWord(){}
	/**
	 * 获取IDF字典
	 * @return
	 * @throws IOException
	 */
	public Map<String, Double> getAllWordIDF() throws IOException
	{
		Map<String, Double>idfMap=new HashMap<String, Double>();
		Path indexPath=FileSystems.getDefault().getPath(index);
		IndexReader indexReader=DirectoryReader.open(FSDirectory.open(indexPath));
		int docuSum=indexReader.numDocs();
		//System.out.println(docuSum);
		List<LeafReaderContext> list=indexReader.leaves();
		for(LeafReaderContext lfc:list)
		{
			String field="body";
			LeafReader lfrReader=lfc.reader();
			Terms terms=lfrReader.terms(field);
			TermsEnum termsEnum=terms.iterator(null);
			BytesRef thisTerms=null;
			while((thisTerms=termsEnum.next())!=null)
			{
				double idf=(double)docuSum/termsEnum.docFreq();
				idfMap.put(thisTerms.utf8ToString(), idf);
			}
		}
		return idfMap;
	}
	/**
	 * 根据idfMap从每个文件中提取出a个最具有代表性的关键字
	 * @param idfMap
	 * @param a
	 * @return
	 * @throws IOException
	 */
	public Map<String, List<tfWordInfo>> getMostPopuWord(int a) throws IOException
	{
		Map<String, List<tfWordInfo>> tfFileWordInfos = new HashMap<>();
		Map<String, Double> newIdfMap = new HashMap<>();
		Path indexPath=FileSystems.getDefault().getPath(index);
		IndexReader indexReader=DirectoryReader.open(FSDirectory.open(indexPath));
		docNum=indexReader.numDocs();
		dealedNum=0;//更新这两个值
		for(int i=0;i<docNum;i++)
		{
			int docId=i;
			String tempdocName=indexReader.document(docId).get("path");
			String docName=SelectHelper.getReFileName(tempdocName);
			Terms terms=indexReader.getTermVector(docId, "body");
			if(terms==null)
			{
				continue;
			}
			else {
				TermsEnum termsEnum=terms.iterator(null);
				BytesRef thisTerms=null;
				List<tfWordInfo>temList=new LinkedList<tfWordInfo>();
				while((thisTerms=termsEnum.next())!=null)
				{
					String termtemp=thisTerms.utf8ToString();
					DocsEnum docsEnum=termsEnum.docs(null, null);
					while ((docsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) 
					{
						if(SelectHelper.isContainDigit(termtemp))
						{
							break;
						}
						tfWordInfo tempWordInfo=new tfWordInfo();
						tempWordInfo.setWord(termtemp);
						tempWordInfo.setTf(docsEnum.freq());
						tempWordInfo.setIdf(wordTFAndIDFInfo.idfMap.get(termtemp));
						temList.add(tempWordInfo);
		            } 
				}
				List<tfWordInfo>resultList=SelectHelper.getPouWordInfos(temList, a);
				for(int k=0;k<resultList.size();k++){
					if(!newIdfMap.keySet().contains(resultList.get(k).getWord())){
						newIdfMap.put(resultList.get(k).getWord(), resultList.get(k).getIdf());
					}
				}
				tfFileWordInfos.put(docName,resultList);
				dealedNum++;
			}
			
		}
		n=newIdfMap.keySet().size()+BFConstants.DUMMUYKWYWORDCOUNT;
		wordTFAndIDFInfo.idfMap=newIdfMap;//替换精简idfMap
		indexReader.close();
		return tfFileWordInfos;
	}
	/**
	 * 返回luence索引中的所有文件名
	 * @return
	 */
	public List<String>getAllFileName()
	{
		List<String>resultList=new ArrayList<String>();
		Path indexPath=FileSystems.getDefault().getPath(index);
		IndexReader indexReader=null;
		try {
			indexReader = DirectoryReader.open(FSDirectory.open(indexPath));
			int numDoc=indexReader.numDocs();
			for(int i=0;i<numDoc;i++)
			{
				int docId=i;
				String docName=indexReader.document(docId).get("path");
				resultList.add(SelectHelper.getReFileName(docName));
			}
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		finally
		{
			if(indexReader!=null)
			{
				try {
					indexReader.close();
				} catch (IOException e) {
					// TODO Auto-generated catch block
					e.printStackTrace();
				}
			}
		}
		return resultList;
		
	}
	public void buildLuceneIndex()
	{
		File testFile=new File(filePath);
		Path indexPath=FileSystems.getDefault().getPath(index);
		Directory directory;
		IndexWriter iWriter=null;
		try {
			directory = FSDirectory.open(indexPath);
			Analyzer analyzer = new PostStopAndLowCaseAnalyzer();
			IndexWriterConfig iwc=new IndexWriterConfig(analyzer);
			iwc.setOpenMode(OpenMode.CREATE);
			iWriter=new IndexWriter(directory,iwc);
			File []textFiles=testFile.listFiles();
			docNum=textFiles.length;
			dealedNum=0;//更新这两个值
			long startTime=new Date().getTime();
			for(int i=0;i<textFiles.length;i++)
			{
				if(textFiles[i].isFile()&&textFiles[i].getName().endsWith(".txt"))
				{
					String temp=SelectHelper.getText(textFiles[i].getCanonicalPath());
					Document document=new Document();
					Field fieldPath=new Field("path", SelectHelper.getReFileName(textFiles[i].getPath()),Field.Store.YES,Field.Index.ANALYZED);
					Field fieldBody=new Field("body",temp,Field.Store.YES,Field.Index.ANALYZED,    
	                        Field.TermVector.WITH_POSITIONS_OFFSETS); 
					document.add(fieldPath);    
	                document.add(fieldBody);    
	                iWriter.addDocument(document);   
				}
				dealedNum++;
			}
			iWriter.close();
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		finally
		{
			if(iWriter!=null)
			{
				try {
					iWriter.close();
				} catch (IOException e) {
					// TODO Auto-generated catch block
					e.printStackTrace();
				}
			}
		}
	}
	public void test(){
		new Thread(new Runnable() {
			
			@Override
			public void run() {
				// TODO Auto-generated method stub
				for(int i=0;i<100;i++)
				{
					try {
						Thread.sleep(100);
						dealedNum++;
					} catch (InterruptedException e) {
						// TODO Auto-generated catch block
						e.printStackTrace();
					}
				}
				
			}
		}).start();
	}
	
}
