import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.cjk.CJKAnalyzer;
import org.apache.lucene.analysis.cn.ChineseAnalyzer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.*;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.Version;
import org.wltea.analyzer.lucene.IKAnalyzer;

import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;

/**
 * Created by fengli on 2018/12/24 .
 * Lucene 索引管理工具类
 */
public class IndexManagerUtils {
    //用IKAnaylzer进行划词建立的索引
    private static Directory ikDirectory;
    //用ChineseAnaylzer进行划词建立的索引
    private static Directory chDirectory;

    public static void setQuerySize(int querySize) {
        IndexManagerUtils.querySize = querySize;
    }

    //统一设置所有查询的最大查询条数（除了全部查询）,默认为10条
    private static int querySize=10;

    /**
     * 此方法只建立两个索引即用IKAnaylzer进行划词建立的索引，用ChineseAnaylzer进行划词建立的索引。并且都是建立在内存上面的
     * @param marks
     */
    public static void createIndex(Map<String,String> marks) throws IOException {
        ikDirectory=createIndex(marks,true,new IKAnalyzer());
        chDirectory=createIndex(marks,true,new ChineseAnalyzer());
    }

    //添加新文档
    public static void addDocument(Map<String,String> someMarks) throws IOException {
        //创建IKAnaylzer
        Analyzer analyzer=new IKAnalyzer();
        //先更新ikDirectory
        IndexWriterConfig writerConfig = new IndexWriterConfig(Version.LUCENE_4_10_3, analyzer);
        IndexWriter indexWriter = new IndexWriter(ikDirectory, writerConfig);
        for(String key:someMarks.keySet()){
            Document document=new Document();
            document.add(new Field("label",someMarks.get(key), TextField.TYPE_STORED));
            document.add(new Field("id",key, TextField.TYPE_STORED));
            indexWriter.addDocument(document);
        }
        indexWriter.close();

        //然后更新chDirectory
        analyzer=new ChineseAnalyzer();
        writerConfig=new IndexWriterConfig(Version.LUCENE_4_10_3,analyzer);
        indexWriter=new IndexWriter(chDirectory, writerConfig);
        for(String key:someMarks.keySet()){
            Document document=new Document();
            document.add(new Field("label",someMarks.get(key), TextField.TYPE_STORED));
            document.add(new Field("id",key, TextField.TYPE_STORED));
            indexWriter.addDocument(document);
        }
        indexWriter.close();
    }

    //完全删除索引
    public static void deleteIndex() throws IOException {
        Analyzer analyzer=new IKAnalyzer();
        IndexWriterConfig writerConfig = new IndexWriterConfig(Version.LUCENE_4_10_3, analyzer);
        IndexWriter indexWriter = new IndexWriter(ikDirectory, writerConfig);
        //删除索引
        indexWriter.deleteAll();
        indexWriter.close();

        analyzer=new ChineseAnalyzer();
        writerConfig = new IndexWriterConfig(Version.LUCENE_4_10_3, analyzer);
        indexWriter = new IndexWriter(chDirectory, writerConfig);
        //删除索引
        indexWriter.deleteAll();
        indexWriter.close();
    }

    //根据id更新索引
    public static void updateIndex(Map<String,String> someMarks) throws IOException {
          //创建IKAnaylzer
        Analyzer analyzer=new IKAnalyzer();
        //先更新ikDirectory
        IndexWriterConfig writerConfig = new IndexWriterConfig(Version.LUCENE_4_10_3, analyzer);
        IndexWriter indexWriter = new IndexWriter(ikDirectory, writerConfig);
        for(String key:someMarks.keySet()){
            Document document=new Document();
            document.add(new Field("label",someMarks.get(key), TextField.TYPE_STORED));
            document.add(new Field("id",key,TextField.TYPE_STORED));
            Term term=new Term("id",key);
            indexWriter.updateDocument(term,document);
        }
        indexWriter.close();
        //再更新chDirectory
        analyzer=new ChineseAnalyzer();
        writerConfig=new IndexWriterConfig(Version.LUCENE_4_10_3,analyzer);
        indexWriter=new IndexWriter(chDirectory,writerConfig);
        for(String key:someMarks.keySet()){
            Document document=new Document();
            document.add(new Field("label",someMarks.get(key), TextField.TYPE_STORED));
            document.add(new Field("id",key,TextField.TYPE_STORED));
            Term term=new Term("id",key);
            indexWriter.updateDocument(term,document);
        }
        indexWriter.close();


    }


    //查询索引
    public static Map<String,String> queryAll() throws IOException {
        //封装结果
        HashMap<String,String> map=new HashMap<String, String>();

        DirectoryReader ireader = DirectoryReader.open(ikDirectory);
        IndexSearcher searcher = new IndexSearcher(ireader);
        //int count= ireader.maxDoc();//这个包含已经删除的文档
        int count= ireader.numDocs();//这个不包含已经删除的文档
        for(int i=0;i<count;++i){
             Document document= searcher.doc(i);
             map.put(document.getField("id").stringValue(),document.getField("label").stringValue());
        }
        return map;
    }




    /**
     * 创建索引到内存
     * @param marks 标签库
     * @return 返回内存目录
     * @throws IOException
     */
    @Deprecated
    public static RAMDirectory createIndextIntoRAM(Map<String,String> marks) throws IOException {
        RAMDirectory ramIndex=new RAMDirectory();
        //Analyzer analyzer= new ChineseAnalyzer();//单字分词
        //Analyzer analyzer=new IKAnalyzer();
        //Analyzer analyzer=new PaodingAnalyzer();//这个好像要安装词典
        Analyzer analyzer=new CJKAnalyzer(Version.LUCENE_4_10_3);//二元分词

        IndexWriterConfig writerConfig = new IndexWriterConfig(Version.LUCENE_4_10_3, analyzer);
        IndexWriter indexWriter = new IndexWriter(ramIndex, writerConfig);
        for(String key:marks.keySet()){
            Document document=new Document();
            document.add(new Field("label",marks.get(key), TextField.TYPE_STORED));
            document.add(new Field("id",key, TextField.TYPE_STORED));
            indexWriter.addDocument(document);
        }
        indexWriter.close();
        return ramIndex;
    }

    /**
     * 创建索引到磁盘
     * @param marks 标签库
     * @return 返回磁盘索引的路径
     * @throws IOException
     */
    @Deprecated
    public static String createIndextIntoFSD(Map<String,String> marks) throws IOException {
        //获取当前项目的路径
        String relativelyPath=System.getProperty("user.dir");
        //下面分隔符要看是部署到那种系统中决定
        File indexDir = new File(relativelyPath+"/luceneIndex");
        //Analyzer analyzer= new ChineseAnalyzer();//单字分词
        //Analyzer analyzer=new IKAnalyzer();
        //Analyzer analyzer=new PaodingAnalyzer();//这个好像要安装词典
        Analyzer analyzer=new CJKAnalyzer(Version.LUCENE_4_10_3);//二元分词
        IndexWriterConfig writerConfig = new IndexWriterConfig(Version.LUCENE_4_10_3, analyzer);
        IndexWriter indexWriter = new IndexWriter(FSDirectory.open(indexDir), writerConfig);
        for(String key:marks.keySet()){
            Document document=new Document();
            document.add(new Field("label",marks.get(key), TextField.TYPE_STORED));
            document.add(new Field("id",key, TextField.TYPE_STORED));
            indexWriter.addDocument(document);
        }
        indexWriter.close();
        return indexDir.getAbsolutePath();
    }



    /**
     * 对创建索引到磁盘和创建索引到内存的整合
     * @param marks 标签库
     * @param intoRAM 是否将索引建立到内存中
     * @return 返回索引的目录
     * @throws IOException
     */
    public static Directory createIndex(Map<String,String> marks,boolean intoRAM) throws IOException {
        Directory directory;
        if(intoRAM==true){
            directory=new RAMDirectory();
        }else {
            //获取当前项目的路径
            String relativelyPath=System.getProperty("user.dir");
            //下面分隔符要看是部署到那种系统中决定
            File indexDir = new File(relativelyPath+"/luceneIndex");
            directory=FSDirectory.open(indexDir);
        }
        //Analyzer analyzer= new ChineseAnalyzer();//单字分词
        Analyzer analyzer=new IKAnalyzer();
        //Analyzer analyzer=new PaodingAnalyzer();//这个好像要安装词典
        //Analyzer analyzer=new CJKAnalyzer(Version.LUCENE_4_10_3);//二元分词
        IndexWriterConfig writerConfig = new IndexWriterConfig(Version.LUCENE_4_10_3, analyzer);
        IndexWriter indexWriter = new IndexWriter(directory, writerConfig);
        //删除索引
        indexWriter.deleteAll();
        for(String key:marks.keySet()){
            Document document=new Document();
            document.add(new Field("label",marks.get(key), TextField.TYPE_STORED));
            document.add(new Field("id",key, TextField.TYPE_STORED));
            indexWriter.addDocument(document);
        }
        indexWriter.close();
        return directory;
    }

    /**
     * 根据指定分析器，是否将索引存入内存建立索引
     * @param marks 标签库
     * @param intoRAM 是否将索引存入内存
     * @param analyzer 分析器（单字分析器ChineseAnaylzer,IKAnalyzer,）
     * @return
     */
    public static Directory createIndex(Map<String,String> marks,boolean intoRAM,Analyzer analyzer) throws IOException {
        Directory directory;
        if(intoRAM==true){
            directory=new RAMDirectory();
        }else {
            //获取当前项目的路径
            String relativelyPath=System.getProperty("user.dir");
            //下面分隔符要看是部署到那种系统中决定
            File indexDir = new File(relativelyPath+"/luceneIndex");
            directory=FSDirectory.open(indexDir);
        }
        //Analyzer analyzer= new ChineseAnalyzer();//单字分词
        //Analyzer analyzer=new IKAnalyzer();
        //Analyzer analyzer=new PaodingAnalyzer();//这个好像要安装词典
        //Analyzer analyzer=new CJKAnalyzer(Version.LUCENE_4_10_3);//二元分词

        IndexWriterConfig writerConfig = new IndexWriterConfig(Version.LUCENE_4_10_3, analyzer);
        IndexWriter indexWriter = new IndexWriter(directory, writerConfig);
        //删除索引
        indexWriter.deleteAll();
        for(String key:marks.keySet()){
            Document document=new Document();
            document.add(new Field("label",marks.get(key), TextField.TYPE_STORED));
            document.add(new Field("id",key, TextField.TYPE_STORED));
            indexWriter.addDocument(document);
        }
        indexWriter.close();
        return directory;
    }

    /**
     * 根据所给索引目录与条件查询结果
     * @param conditions 多组条件
     * @param directory 索引的目录
     * @return
     */
    public static Map<String,String> searchByConditions(ArrayList<String> conditions,Directory directory) throws IOException {
        IndexSearcher searcher = getIndexSearcher(directory);
        BooleanQuery booleanQuery = new BooleanQuery();
        for(int i=0;i<conditions.size();++i){
            Term term=new Term("label",conditions.get(i));
            TermQuery termQuery=new TermQuery(term);
            booleanQuery.add(termQuery, BooleanClause.Occur.SHOULD);
        }
        ScoreDoc[] scoreDocs = searcher.search(booleanQuery,null, querySize).scoreDocs;
        Map<String, String> result = getResultMapByScoreDoc(searcher, scoreDocs);
        return  result;
    }

    /**
     * 获取indexSearcher。
     * @param directory 参数为ikDirectory或chDirectory
     * @return
     * @throws IOException
     */
    private static IndexSearcher getIndexSearcher(Directory directory) throws IOException {
        DirectoryReader ireader = DirectoryReader.open(directory);
        return new IndexSearcher(ireader);
    }

    /**
     * 组合查询（以或的形式）
     * @param conditions 条件
     * @param ramDirectory 索引在内存中的位置
     * @return 结果
     * @throws IOException
     */
    @Deprecated
    public static Map<String,String> searchByConditions(ArrayList<String> conditions,RAMDirectory ramDirectory) throws IOException {
        IndexSearcher searcher = getIndexSearcher(ramDirectory);
        BooleanQuery booleanQuery = new BooleanQuery();
        for(int i=0;i<conditions.size();++i){
            Term term=new Term("label",conditions.get(i));
            TermQuery termQuery=new TermQuery(term);
            booleanQuery.add(termQuery, BooleanClause.Occur.SHOULD);
        }
        ScoreDoc[] scoreDocs = searcher.search(booleanQuery,null, querySize).scoreDocs;
        //如果没有查询到结果就进行单字查询
        Map<String, String> result = getResultMapByScoreDoc(searcher, scoreDocs);
        return  result;
    }

    /**
     * 根据 searchStr在内存中的索引中查找
     * @param searchStr
     * @param ramDirectory
     * @return
     * @throws IOException
     */
    @Deprecated
    public static  Map<String,String> serachByStr(String searchStr,RAMDirectory ramDirectory) throws IOException {
        IndexSearcher searcher = getIndexSearcher(ramDirectory);
        Query query = new TermQuery(new Term("label", searchStr));
        ScoreDoc[] scoreDocs=  searcher.search(query, 100).scoreDocs;
        Map<String, String> result = getResultMapByScoreDoc(searcher, scoreDocs);
        return  result;
    }

    public static Map<String,String> searchByStr(String searchStr,Directory directory) throws IOException {
        IndexSearcher searcher = getIndexSearcher(directory);
        Query query = new TermQuery(new Term("label", searchStr));
        ScoreDoc[] scoreDocs=  searcher.search(query, querySize).scoreDocs;
        Map<String, String> result = getResultMapByScoreDoc(searcher, scoreDocs);
        return  result;
    }

    /**
     * 从查询结果scoreDocs中生成Map
     * @param searcher
     * @param scoreDocs
     * @return
     * @throws IOException
     */
    private static Map<String, String> getResultMapByScoreDoc(IndexSearcher searcher, ScoreDoc[] scoreDocs) throws IOException {
        Map<String,String> result=new HashMap<String, String>();
        for(int i=0;i<scoreDocs.length;++i){
            Document hit = searcher.doc(scoreDocs[i].doc);
            result.put(hit.getField("id").stringValue(),hit.getField("label").stringValue());
        }
        return result;
    }

    /**
     * 使用IKAnalyzer堆输入进行分词
     * @param input
     * @return
     */
    @Deprecated
    public static ArrayList<String> getQueryArray(String input){
        ArrayList<String> result=new ArrayList<String>();
        //Analyzer analyzer= new ChineseAnalyzer();//单字分词
        Analyzer analyzer=new IKAnalyzer();
        //Analyzer analyzer=new PaodingAnalyzer();//这个好像要安装词典
        //Analyzer analyzer = new IKAnalyzer();
        //Analyzer analyzer=new JcsegAnalyzer5X(JcsegTaskConfig.SIMPLE_MODE);//jcseg2.1.0,1.9.9,2.0.1好像和lucene4.10.3不兼容
        TokenStream stream = null;

        try {
            stream = analyzer.tokenStream("myfield", input);
            stream.reset();
            CharTermAttribute offsetAtt = stream.addAttribute(CharTermAttribute.class);
            while (stream.incrementToken()) {
                //System.out.println(offsetAtt.toString());
                result.add(offsetAtt.toString());
            }

            stream.end();
        } catch (IOException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }finally{
            try {
                stream.close();
            } catch (IOException e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            }
        }
        return result;
    }


    /**
     * 使用指定的分词器对输入进行中文分词
     * @param input 输入字符
     * @param analyzer 分析器(单字分析用ChineseAnalyzer,其它可以选择IKAnalyzer)
     * @return
     */
    public static ArrayList<String> getQueryArray(String input,Analyzer analyzer){
        ArrayList<String> result=new ArrayList<String>();
        //Analyzer analyzer= new ChineseAnalyzer();//单字分词
        // Analyzer analyzer=new IKAnalyzer();
        //Analyzer analyzer=new PaodingAnalyzer();//这个好像要安装词典
        //Analyzer analyzer = new IKAnalyzer();
        //Analyzer analyzer=new JcsegAnalyzer5X(JcsegTaskConfig.SIMPLE_MODE);//jcseg2.1.0,1.9.9,2.0.1好像和lucene4.10.3不兼容
        TokenStream stream = null;

        try {
            stream = analyzer.tokenStream("myfield", input);
            stream.reset();
            CharTermAttribute offsetAtt = stream.addAttribute(CharTermAttribute.class);
            while (stream.incrementToken()) {
                //System.out.println(offsetAtt.toString());
                result.add(offsetAtt.toString());
            }

            stream.end();
        } catch (IOException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }finally{
            try {
                stream.close();
            } catch (IOException e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            }
        }
        return result;
    }



    /**
     * 总是要查询出东西出来
     * @param input 输入
     * @return 结果集
     * @throws Exception 当ikDirectory、chDirectory为空时抛出异常！
     */
    public static Map<String,String> findSomthing(String input) throws Exception {
        //如果ikDirectory（即用IKAnyalzer建立的索引）为空，抛出异常
        if(ikDirectory==null){
            throw new Exception("ikDirectory为空！");
        }

        //结果集
        Map<String,String> result;
        //首先进行使用IKAnalyzer进行划词的查询
        ArrayList<String> conditions=getQueryArray(input,new IKAnalyzer());
        result= searchByConditions(conditions,ikDirectory);

        //如果result数目为0或为null，则要进行单字的查询
        if(result==null|| result.size()==0){
            conditions=getQueryArray(input,new ChineseAnalyzer());
            if(chDirectory==null){
                throw new Exception("chDirectory为空！");
            }
            result=searchByConditions(conditions,chDirectory);
        }
        return result;
    }

    /**
     * 根据标签id删除一条标签
     * @param id
     * @throws IOException
     */
    public static void delete(String id) throws IOException {
        IndexWriter indexWriter = getIndexWriter(0);

        Term term=new Term("id", id);
        indexWriter.deleteDocuments(term);
        indexWriter.close();

        indexWriter = getIndexWriter(1);
        term=new Term("id", id);
        indexWriter.deleteDocuments(term);
        indexWriter.close();
    }

    /**
     * 返回indexWriter。option=0 返回对应ikDirectory的IndexWriter；option=其它整数时，返回chDirectory的IndexWriter
     * @param option
     * @return
     * @throws IOException
     */
    private static IndexWriter getIndexWriter(int option) throws IOException {
        if(option==0) {
            Analyzer analyzer = new IKAnalyzer();
            IndexWriterConfig writerConfig = new IndexWriterConfig(Version.LUCENE_4_10_3, analyzer);
            return new IndexWriter(ikDirectory, writerConfig);
        }else {
            Analyzer analyzer = new ChineseAnalyzer();
            IndexWriterConfig writerConfig = new IndexWriterConfig(Version.LUCENE_4_10_3, analyzer);
            return new IndexWriter(chDirectory, writerConfig);
        }
    }

    /**
     * 根据标签id批量删除标签
     * @param ids
     */
    public static void delete(String [] ids) throws IOException {
        //删除对应于ikDirectory的索引
        IndexWriter indexWriter=getIndexWriter(0);
        Term[] terms=new Term[ids.length];
        Term term;
        for(int i=0;i<ids.length;++i){
            term=new Term("id", ids[i]);
            terms[i]=term;
        }
        indexWriter.deleteDocuments(terms);
        indexWriter.close();

        //删除对应于chDirectory的索引
        indexWriter=getIndexWriter(1);
        terms=new Term[ids.length];
        for(int i=0;i<ids.length;++i){
            term=new Term("id", ids[i]);
            terms[i]=term;
        }
        indexWriter.deleteDocuments(terms);
        indexWriter.close();
    }

}
