package com.zcm.utils;

import java.io.File;
import java.io.IOException;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.List;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryParser.MultiFieldQueryParser;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.Hits;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.wltea.analyzer.IKSegmentation;
import org.wltea.analyzer.Lexeme;
import com.zcm.vo.LuceneVO;

public class LuceneUtil {
	
	private static String LucenePath = null;
	
	static {
		LucenePath = ReadPropertity.getProperty("lucenePath");
	}
	
	/**
	 * 测试DEMO
	 * @param args
	 * @throws Exception
	 */
	public static void main(String[] args) throws Exception{
		String str = LuceneUtil.searchString("白金账号");
		System.out.println(str);
	}
	
	
	/**
	 * 分词
	 * @param word
	 * @return
	 * @throws IOException
	 */
	public static List<String> tokenWord(String word) throws IOException{
		List<String> tokenArr = new ArrayList<String>();
		StringReader reader = new StringReader(word);
		/**当为true时，分词器进行最大词长切分**/
		IKSegmentation ik = new IKSegmentation(reader, true);
		Lexeme lexeme = null;
		while ((lexeme = ik.next()) != null){
			tokenArr.add(lexeme.getLexemeText());
		}
		return tokenArr;
	}
	
	/**
	 * 创建索引（单个）
	 * @param list
	 * @throws Exception
	 */
	public static void create(LuceneVO search) throws Exception {  
        /**这里放索引文件的位置**/
        File indexDir = new File(LucenePath);   
        Analyzer luceneAnalyzer = new StandardAnalyzer();   
        IndexWriter indexWriter = new IndexWriter(indexDir, luceneAnalyzer,true); 
        /**增加document到索引去   **/
        Document doc = new Document();   
        Field FieldId = new Field("aid", String.valueOf(search.getAid()),Field.Store.YES, Field.Index.NO);   
        Field FieldTitle = new Field("title", search.getTitle(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS);   
        Field FieldRemark = new Field("remark", search.getRemark(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); 
        doc.add(FieldId);   
        doc.add(FieldTitle);  
        doc.add(FieldRemark);   
        indexWriter.addDocument(doc);   
        /**optimize()方法是对索引进行优化 **/  
        indexWriter.optimize();   
        indexWriter.close();   
    }  
	
	/**
	 * 更新索引（单个）
	 * @param list
	 * @throws Exception
	 */
	public static void update(LuceneVO search) throws Exception {  
        /**这里放索引文件的位置**/
        File indexDir = new File(LucenePath);   
        Analyzer luceneAnalyzer = new StandardAnalyzer();   
        IndexWriter indexWriter = new IndexWriter(indexDir, luceneAnalyzer,true); 
        
        /**增加document到索引去   **/
        Document doc = new Document();   
        Field FieldId = new Field("aid", String.valueOf(search.getAid()),Field.Store.YES, Field.Index.NO);   
        Field FieldTitle = new Field("title", search.getTitle(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS);   
        Field FieldRemark = new Field("remark", search.getRemark(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); 
        doc.add(FieldId);   
        doc.add(FieldTitle);  
        doc.add(FieldRemark);   
        Term term = new Term("aid",String.valueOf(search.getAid())); 
        indexWriter.updateDocument(term, doc);
        /**optimize()方法是对索引进行优化 **/  
        indexWriter.optimize();   
        indexWriter.close();   
    } 
	
	/**
	 * 创建索引（单个）
	 * @param list
	 * @throws Exception
	 */
	public static void delete(LuceneVO search) throws Exception {  
        /**这里放索引文件的位置**/
        File indexDir = new File(LucenePath);   
        Analyzer luceneAnalyzer = new StandardAnalyzer();   
        IndexWriter indexWriter = new IndexWriter(indexDir, luceneAnalyzer,true); 
        Term term = new Term("aid",String.valueOf(search.getAid())); 
        indexWriter.deleteDocuments(term);
        /**optimize()方法是对索引进行优化 **/  
        indexWriter.optimize();   
        indexWriter.close();   
    }  
	
	
	/**
	 * 创建索引（多个）
	 * @param list
	 * @throws Exception
	 */
	public static void create(List<?> list) throws Exception {  
        /**这里放索引文件的位置**/
        File indexDir = new File(LucenePath);   
        Analyzer luceneAnalyzer = new StandardAnalyzer();   
        IndexWriter indexWriter = new IndexWriter(indexDir, luceneAnalyzer,true); 
        /**增加document到索引去   **/
        for (int i=0; i<list.size();i++){   
        	    LuceneVO search = (LuceneVO)list.get(i);
                Document doc = new Document();   
                Field FieldId = new Field("aid", String.valueOf(search.getAid()),Field.Store.YES, Field.Index.NO);   
                Field FieldTitle = new Field("title", search.getTitle(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS);   
                Field FieldRemark = new Field("remark", search.getRemark(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); 
                doc.add(FieldId);   
                doc.add(FieldTitle);  
                doc.add(FieldRemark);   
                indexWriter.addDocument(doc);   
        }   
        /**optimize()方法是对索引进行优化 **/  
        indexWriter.optimize();   
        indexWriter.close();   
    }  
	
	/**
	 * 创建索引（多个）
	 * @param list
	 * @throws Exception
	 */
	public static void update(List<?> list) throws Exception {  
        /**这里放索引文件的位置**/
        File indexDir = new File(LucenePath);   
        Analyzer luceneAnalyzer = new StandardAnalyzer();   
        IndexWriter indexWriter = new IndexWriter(indexDir, luceneAnalyzer,true); 
        /**增加document到索引去   **/
        for (int i=0; i<list.size();i++){   
        	    LuceneVO search = (LuceneVO)list.get(i);
                Document doc = new Document();   
                Field FieldId = new Field("aid", String.valueOf(search.getAid()),Field.Store.YES, Field.Index.NO);   
                Field FieldTitle = new Field("title", search.getTitle(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS);   
                Field FieldRemark = new Field("remark", search.getRemark(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); 
                doc.add(FieldId);   
                doc.add(FieldTitle);  
                doc.add(FieldRemark);   
                Term term = new Term("aid",String.valueOf(search.getAid())); 
                indexWriter.updateDocument(term, doc);
        }   
        /**optimize()方法是对索引进行优化 **/  
        indexWriter.optimize();   
        indexWriter.close();   
    }  
	
	/**
	 * 创建索引（多个）
	 * @param list
	 * @throws Exception
	 */
	public static void delete(List<?> list) throws Exception {  
        /**这里放索引文件的位置**/
        File indexDir = new File(LucenePath);   
        Analyzer luceneAnalyzer = new StandardAnalyzer();   
        IndexWriter indexWriter = new IndexWriter(indexDir, luceneAnalyzer,true); 
        /**删除索引   **/
        for (int i=0; i<list.size();i++){   
        	    LuceneVO search = (LuceneVO)list.get(i);
                Term term = new Term("aid",String.valueOf(search.getAid())); 
                indexWriter.deleteDocuments(term);
        }   
        /**optimize()方法是对索引进行优化 **/  
        indexWriter.optimize();   
        indexWriter.close();   
    }  
	
	/**
	 * 检索数据
	 * @param word
	 * @return
	 */
	public static List<LuceneVO> search(String word) {
		List<LuceneVO> list = new ArrayList<LuceneVO>();
		Hits hits = null;
		try {
			IndexSearcher searcher = new IndexSearcher(LucenePath);
		    String[] queries = {word,word};
		    String[] fields = {"title", "remark"};
		    BooleanClause.Occur[] flags  = {BooleanClause.Occur.SHOULD, BooleanClause.Occur.SHOULD};
		    Query query = MultiFieldQueryParser.parse(queries, fields, flags, new StandardAnalyzer());
			if (searcher != null) {
				/**hits结果**/
				hits = searcher.search(query);
				LuceneVO search = null;
				for (int i = 0; i < hits.length(); i++) {
					Document doc = hits.doc(i);
					search = new LuceneVO();
					search.setAid(Integer.parseInt(doc.get("aid")));
					search.setRemark(doc.get("remark"));
					search.setTitle(doc.get("title"));
					list.add(search);
				}
			}
		} catch (Exception ex) {
			ex.printStackTrace();
		}
		return list;
	}
	
	/**
	 * 检索文章ID
	 * @param word
	 * @return
	 */
	public static String searchString(String word) {
		String search = "";
		StringBuffer aids = new StringBuffer();
		Hits hits = null;
		try {
			IndexSearcher searcher = new IndexSearcher(LucenePath);
		    String[] queries = {word,word};
		    String[] fields = {"title", "remark"};
		    BooleanClause.Occur[] flags  = {BooleanClause.Occur.SHOULD, BooleanClause.Occur.SHOULD };
		    Query query = MultiFieldQueryParser.parse(queries, fields, flags, new StandardAnalyzer());
			/**hits结果**/
			hits = searcher.search(query);
			for (int i = 0; i < hits.length(); i++) {
				Document doc = hits.doc(i);
				aids.append(Integer.parseInt(doc.get("aid")));
				aids.append(",");
			}
			if(aids.length()>0){
				search = aids.substring(0, aids.length()-1);
			}
		} catch (Exception ex) {
			ex.printStackTrace();
		}
		return search;
	}

}
