package group.rober.jeki.service;

import group.rober.runtime.holder.ApplicationContextHolder;
import group.rober.runtime.kit.IOKit;
import group.rober.runtime.kit.StringKit;
import group.rober.runtime.lang.RoberException;
import group.rober.search.entity.SearchResult;
import group.rober.search.service.SearchService;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.*;
import org.apache.lucene.queryparser.classic.MultiFieldQueryParser;
import org.apache.lucene.queryparser.classic.ParseException;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.*;
import org.apache.lucene.search.highlight.*;
import org.apache.lucene.store.Directory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;

import java.io.IOException;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.List;

@Service
public class SearchServiceImpl implements SearchService {
    @Autowired
    protected Analyzer analyzer;
    @Autowired
    protected Directory directory;

    public Analyzer getAnalyzer() {
        return analyzer;
    }

    public void setAnalyzer(Analyzer analyzer) {
        this.analyzer = analyzer;
    }

    public Directory getDirectory() {
        return directory;
    }

    public void setDirectory(Directory directory) {
        this.directory = directory;
    }

    public IndexWriter getIndexWriter() {
        return ApplicationContextHolder.getBean(IndexWriter.class);
    }

    public void addDocument(Document document) throws IOException {
        IndexWriter writer = getIndexWriter();

        writer.addDocument(document);

        writer.commit();
        IOKit.close(writer);
    }

    public void addDocuments(Iterable<? extends Iterable<? extends IndexableField>> documents) throws IOException {
        IndexWriter writer = getIndexWriter();

        writer.addDocuments(documents);

        writer.commit();
        IOKit.close(writer);
    }

    public void deleteDocument(Term... terms) throws IOException {
        IndexWriter writer = getIndexWriter();

        writer.deleteDocuments(terms);

        writer.commit();
        IOKit.close(writer);
    }

    public void deleteAllDocument() throws IOException {
        IndexWriter writer = getIndexWriter();
        writer.deleteAll();
//        writer.forceMergeDeletes();//此时删除的文档并未完全删除，而是存储在回收站中，可以恢复的
        IOKit.close(writer);
    }

    public void deleteDocument(Iterable<? extends Iterable<? extends IndexableField>> documents) {

    }

    public void updateDocument(Term term,Iterable<? extends IndexableField> documents) throws IOException {
        IndexWriter writer = getIndexWriter();

        writer.updateDocument(term,documents);

        writer.commit();
        IOKit.close(writer);
    }

    public void updateDocument(Iterable<? extends Iterable<? extends IndexableField>> documents) {

    }

    public SearchResult searchDocument(String[] fields,String keyWord, Sort sort,int index, int size) throws ParseException, IOException {
        // 2. query
//        Query q = new QueryParser(field, analyzer).parse(querystr);
        QueryParser queryParser = new MultiFieldQueryParser(fields,analyzer);
        Query query = queryParser.parse(keyWord);

        // 3. search
        int hitsPerPage = size;
        IndexReader reader = DirectoryReader.open(directory);
        IndexSearcher searcher = new IndexSearcher(reader);
        TopFieldCollector collector = TopFieldCollector.create(sort,index*size+size,false,false,false);
//        TopScoreDocCollector collector = TopScoreDocCollector.create(hitsPerPage);
        searcher.search(query, collector);

        //高亮部分的处理
        SimpleHTMLFormatter formatter = new SimpleHTMLFormatter("<span style=\"color:#F00\">","</span>");
        QueryScorer scorer=new QueryScorer(query);
        Fragmenter fragmenter = new SimpleSpanFragmenter(scorer);
        Highlighter highlight=new Highlighter(formatter,scorer);
        highlight.setTextFragmenter(fragmenter);

        ScoreDoc[] hits = collector.topDocs(index*size,size).scoreDocs;

        SearchResult result = new SearchResult();

        List<Document> dataList = new ArrayList<Document>();
        for (int i = 0; i < hits.length; ++i) {
            int docId = hits[i].doc;
            Document d = searcher.doc(docId);

            applyHighlightText(highlight,d,"title","hlTitle");
            applyHighlightText(highlight,d,"summary","hlSummary");
            applyHighlightText(highlight,d,"content","hlContent");

            dataList.add(d);
        }
        result.setDataList(dataList);
        result.setHitsCount(hits.length);

        IOKit.close(reader);

        return result;
    }

    private void applyHighlightText(Highlighter highlight,Document document,String field,String hlField){
        String value = document.get(field);
        if(StringKit.isBlank(value))return;
        TokenStream tokenStream = analyzer.tokenStream(field, new StringReader(value));
        try {
            String hlText = highlight.getBestFragment(tokenStream, value);
            if(StringKit.isBlank(hlText)){
                hlText = value;
            }
            document.add(new TextField(hlField, hlText, Field.Store.NO));
        } catch (IOException e) {
            throw new RoberException("设置高亮出错",e);
        } catch (InvalidTokenOffsetsException e) {
            throw new RoberException("设置高亮出错",e);
        }
    }
}
