package com.fqg.weblucene.lucene.utils;

import cn.hutool.core.bean.BeanUtil;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.StringField;
import org.apache.lucene.index.*;
import org.apache.lucene.queryparser.classic.ParseException;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.*;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;

import java.io.IOException;
import java.nio.file.Paths;
import java.text.SimpleDateFormat;
import java.util.*;


public class LuceneUtils {
private final static String indexDbPath=ConfigUtils.getLuceneDb();
private  static List<String> indexList=null;
private final  static SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
static {

}
   private static String formatDateStr(){
       Date date = new Date();

       return sdf.format(date);

   }
    public static void createIndexDB(String logFile,String content) throws Exception {
        IndexWriter indexWriter =null;

      try {
          //创建Document对象【导入的是Lucene包下的Document对象】
          Document document = new Document();

          //将JavaBean对象所有的属性值，均放到Document对象中去，属性名可以和JavaBean相同或不同


          /**
           * 向Document对象加入一个字段
           * 参数一：字段的关键字
           * 参数二：字符的值
           * 参数三：是否要存储到原始记录表中
           *      YES表示是
           *      NO表示否
           * 参数四：是否需要将存储的数据拆分到词汇表中
           *      ANALYZED表示拆分
           *      NOT_ANALYZED表示不拆分
           *
           * */
          document.add(new StringField("logFile", logFile, Field.Store.YES));
          document.add(new StringField("content", content, Field.Store.YES));
          document.add(new StringField("createDate", formatDateStr(), Field.Store.YES));

          //创建IndexWriter对象
          //目录指定为E:/createIndexDB
          ;
          Directory directory = FSDirectory.open(Paths.get(indexDbPath));

          //使用标准的分词算法对原始记录表进行拆分
          Analyzer analyzer = new StandardAnalyzer();

          //LIMITED默认是1W个

          /**
           * IndexWriter将我们的document对象写到硬盘中
           *
           * 参数一：Directory d,写到硬盘中的目录路径是什么
           * 参数二：Analyzer a, 以何种算法来对document中的原始记录表数据进行拆分成词汇表
           * 参数三：MaxFieldLength mfl 最多将文本拆分出多少个词汇
           *
           * */
          IndexWriterConfig conf = new IndexWriterConfig(analyzer);
           indexWriter = new IndexWriter(directory, conf);

          //将Document对象通过IndexWriter对象写入索引库中
          indexWriter.addDocument(document);

          //关闭IndexWriter对象
          System.out.println(" to lucene");
      }catch (Exception e){
          e.printStackTrace();
      }finally {
          indexWriter.close();
      }

    }
    private static Query getQuery(String indexId,String keyWordExpression ) throws ParseException {
        Analyzer analyzer = new StandardAnalyzer( );
        QueryParser queryParser = new QueryParser(  indexId, analyzer);
        queryParser.setAllowLeadingWildcard(true);
        //创建Query对象来封装关键字
        Query query = queryParser.parse("*"+keyWordExpression+"*");
        return query;
    }


    public static List<HashMap<String,String>> searchByCondition(Map<String,String> searchKeys) throws Exception {
        List<HashMap<String,String>> allSearchResults=new ArrayList<>();

        Directory directory = FSDirectory.open(Paths.get(indexDbPath));

        ReaderManager manager=new ReaderManager(directory);

        IndexSearcher indexSearcher = new IndexSearcher(manager.acquire());

        //
        //  1．MUST和MUST：取得连个查询子句的交集。
        //  2．MUST和MUST_NOT：表示查询结果中不能包含MUST_NOT所对应得查询子句的检索结果。
        // 3．SHOULD与MUST_NOT：连用时，功能同MUST和MUST_NOT。
        // 4．SHOULD与MUST连用时，结果为MUST子句的检索结果,但是SHOULD可影响排序。
        // 5．SHOULD与SHOULD：表示“或”关系，最终检索结果为所有检索子句的并集。
        // 6．MUST_NOT和MUST_NOT：无意义，检索无结果。
        BooleanQuery.Builder  builder=new BooleanQuery.Builder();
        for(String str:searchKeys.keySet()) {
            builder.add(getQuery(str, searchKeys.get(str)), BooleanClause.Occur.MUST);
        }

        TopDocs topDocs = indexSearcher.search(builder.build(), 100);
        //获取符合条件的编号
        System.out.println(topDocs.totalHits);
        for (int i = 0; i < topDocs.scoreDocs.length; i++) {

            ScoreDoc scoreDoc = topDocs.scoreDocs[i];
            int no = scoreDoc.doc;
            //用indexSearcher对象去索引库中查询编号对应的Document对象
            Document document = indexSearcher.doc(no);

            //将Document对象中的所有属性取出，再封装回JavaBean对象中去
            for(String index:getAllIndex()){
                HashMap<String,String> value=new HashMap<>();
                value.put(index,document.get(index));
                allSearchResults.add(value);

            }


        }
              return allSearchResults;
    }

    public static List<String> getAllIndex() throws IOException {
                     if(indexList!=null){
                               return indexList;
                     }

                  List<String> allIndex=new ArrayList<>();
        Directory directory = FSDirectory.open(Paths.get(indexDbPath));

        ReaderManager manager=new ReaderManager(directory);

        //  this.reader = ReaderManager.getInstance().getIndexReader(dir);
        //创建IndexSearcher对象
        IndexSearcher searcher = new IndexSearcher(manager.acquire());
        IndexReader reader = searcher.getIndexReader();//得到搜索器的索引阅读器

        int maxid = reader.maxDoc();//得到索引的索引词表
        for(int i=0;i<maxid;i++){
            Document doc= reader.document(i);
            StringBuilder stringBuilder=new StringBuilder();
            for(IndexableField index: doc.getFields()){
               // stringBuilder.append(index.name());
                //stringBuilder.append(index.stringValue());
                if(!allIndex.contains(index.name())) {
                    allIndex.add(index.name());
                }
            }
           // System.out.println( stringBuilder.toString());
        }
        indexList=allIndex;
        return allIndex;
    }
    public static List<HashMap<String,String>> getAllIndexValues() throws IOException {
        List<HashMap<String,String>> allIndexVa=new ArrayList<>();

        Directory directory = FSDirectory.open(Paths.get(indexDbPath));

        ReaderManager manager=new ReaderManager(directory);

        //  this.reader = ReaderManager.getInstance().getIndexReader(dir);
        //创建IndexSearcher对象
        IndexSearcher searcher = new IndexSearcher(manager.acquire());
        IndexReader reader = searcher.getIndexReader();//得到搜索器的索引阅读器

        int maxid = reader.maxDoc();//得到索引的索引词表
        for(int i=0;i<maxid;i++){
            Document doc= reader.document(i);
            StringBuilder stringBuilder=new StringBuilder();
            for(IndexableField index: doc.getFields()){
                // stringBuilder.append(index.name());
                //stringBuilder.append(index.stringValue());
                HashMap<String,String> value=new HashMap<>();
                value.put(index.name(),index.stringValue());
                allIndexVa.add(value);
            }
            // System.out.println( stringBuilder.toString());
        }
        return allIndexVa;
    }
    public static Page<HashMap<String,String>>  searchByConditionPage(Map<String,String> searchKeys,int currentPage,int pageSize) throws Exception {
        List<HashMap<String,String>> allSearchResults=new ArrayList<>();

        Directory directory = FSDirectory.open(Paths.get(indexDbPath));

        ReaderManager manager=new ReaderManager(directory);

        IndexSearcher indexSearcher = new IndexSearcher(manager.acquire());
        BooleanQuery.Builder  builder=new BooleanQuery.Builder();
        for(String str:searchKeys.keySet()) {
            if(searchKeys.get(str)==null||searchKeys.get(str).trim().length()<1){
                continue;
            }
            builder.add(getQuery(str, searchKeys.get(str)), BooleanClause.Occur.MUST);
        }
        Page<Document> page = new Page<Document>(currentPage,pageSize);
        pageQuery(indexSearcher, builder.build(), page);
        Page<HashMap<String,String>> pageResult = new Page<HashMap<String,String>>(currentPage,pageSize);

        for(Document document : page.getItems()) {
            //将Document对象中的所有属性取出，再封装回JavaBean对象中去
            HashMap<String,String> value=new HashMap<>();
            for(String index:getAllIndex()){
                value.put(index,document.get(index));
            }
            allSearchResults.add(value);
        }
        BeanUtil.copyProperties(page,pageResult);
        pageResult.setItems(allSearchResults);
        return pageResult;
    }
    private static ScoreDoc[] searchAllRecord(IndexSearcher searcher,Query query) throws IOException {
        TopDocs topDocs = searcher.search(query, Integer.MAX_VALUE);
        if(topDocs == null || topDocs.scoreDocs == null || topDocs.scoreDocs.length == 0) {
            return null;
        }
        ScoreDoc[] docs = topDocs.scoreDocs;
        return docs;
    }
    private static void pageQuery(IndexSearcher searcher,Query query,Page<Document> page) throws IOException {
        ScoreDoc[] totalRecord = searchAllRecord(searcher,query);
        //设置总记录数
        page.setTotalRecord(totalRecord==null?0:totalRecord.length);
        int lastPageRecord=   (page.getCurrentPage()-1)*page.getPageSize()-1;
        ScoreDoc lastScoreDoc=null;
        if(lastPageRecord>=0){
            lastScoreDoc=totalRecord[lastPageRecord];
        }
        TopDocs topDocs = searcher.searchAfter(lastScoreDoc,query, page.getPageSize());
        List<Document> docList = new ArrayList<Document>();
        ScoreDoc[] docs = topDocs.scoreDocs;
        for (ScoreDoc scoreDoc : docs) {
            int docID = scoreDoc.doc;
            Document document = searcher.doc(docID);
            docList.add(document);
        }
        page.setItems(docList);
        searcher.getIndexReader().close();
    }

    public static void main(String[] args) throws Exception {
		// TODO Auto-generated method stub
       //for( int i=0;i<35;i++) {
         //   LuceneUtils.createIndexDB(i+".txt", "锄禾日当午"+i);
        //}


      Map<String,String> searchKeys=new HashMap<>();
        searchKeys.put("content","锄禾");
        searchKeys.put("logFile","txt");
    Page<HashMap<String,String>> mapPage= LuceneUtils.searchByConditionPage(searchKeys,5,10);
                       mapPage.getItems().iterator().forEachRemaining(p->{
                           try {
                               for(String  str:LuceneUtils.getAllIndex()){
                                   System.out.println(str+"->"+p.get(str));
                              }
                               System.out.println("---------------------------------");


                           } catch (IOException e) {
                               e.printStackTrace();
                           }



                       });
      //  for(HashMap<String,String>  str:LuceneUtils.getAllIndexValues()){
        //    for(String v:str.keySet()){
          //      System.out.println(v+"->"+str.get(v));

            //}
        //}
      //  luceneService.printAllIndex();
		//lucen.findIndexDB();
	}

}
