package com.article.lucene;

import com.article.model.ArticleInfo;
import com.article.utils.DateUtil;
import com.article.utils.HtmlUtil;
import com.article.utils.StringUtil;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.cn.smart.SmartChineseAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.*;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.*;
import org.apache.lucene.search.highlight.*;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;

import java.io.StringReader;
import java.nio.file.Paths;
import java.sql.Timestamp;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.LinkedList;
import java.util.List;

/**
 * 索引类
 *
 * @author Administrator
 */
public class ArticleIndex {

    private Directory dir = null;
    //private String path = "C://luceneart";
    private String path = "//home//article//luceneart";
    private SimpleDateFormat simpleDateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");

    public static void main(String[] args) throws Exception {
        ArticleIndex questionIndex = new ArticleIndex();
        questionIndex.searchArticle("今天我们");
    }

    /**
     * 获取IndexWriter实例
     *
     * @return
     * @throws Exception
     */
    private IndexWriter getWriter() throws Exception {
        dir = FSDirectory.open(Paths.get(path));
        SmartChineseAnalyzer analyzer = new SmartChineseAnalyzer();
        IndexWriterConfig iwc = new IndexWriterConfig(analyzer);
        IndexWriter writer = new IndexWriter(dir, iwc);
        return writer;
    }

    /**
     * 添加索引
     *
     * @param articleInfo
     */
    public void addIndex(ArticleInfo articleInfo) throws Exception {
        IndexWriter writer = getWriter();
        Document doc = new Document();
        doc.add(new StringField("id", String.valueOf(articleInfo.getId()), Field.Store.YES));
        doc.add(new TextField("title", articleInfo.getTitle(), Field.Store.YES));
        doc.add(new TextField("content", HtmlUtil.getTextFromTHML(articleInfo.getContent()), Field.Store.YES));
        doc.add(new StringField("gmtCreate", DateUtil.formatDate(articleInfo.getGmtCreate(), "yyyy-MM-dd HH:mm:ss"), Field.Store.YES));
        writer.addDocument(doc);
        writer.close();
    }

    /**
     * 更新索引
     *
     * @param articleInfo
     * @throws Exception
     */
    public void updateIndex(ArticleInfo articleInfo) throws Exception {
        IndexWriter writer = getWriter();
        Document doc = new Document();
        doc.add(new StringField("id", String.valueOf(articleInfo.getId()), Field.Store.YES));
        doc.add(new TextField("title", articleInfo.getTitle(), Field.Store.YES));
        doc.add(new TextField("content", HtmlUtil.getTextFromTHML(articleInfo.getContent()), Field.Store.YES));
        doc.add(new StringField("gmtCreate", DateUtil.formatDate(articleInfo.getGmtCreate(), "yyyy-MM-dd HH:mm:ss"), Field.Store.YES));
        //doc.add(new TextField("content", articleInfo.getContentNoTag(), Field.Store.YES));
        writer.updateDocument(new Term("id", "" + articleInfo.getId()), doc);
        writer.close();
    }

    /**
     * 删除指定的索引
     *
     * @param id
     * @throws Exception
     */
    public void deleteIndex(String id) throws Exception {
        IndexWriter writer = getWriter();
        writer.deleteDocuments(new Term("id", id));
        writer.forceMergeDeletes(); // 强制删除
        writer.commit();
        writer.close();
    }

    /**
     * 查询信息
     *
     * @param q 查询关键字
     * @return
     * @throws Exception
     */
    public List<ArticleInfo> searchArticle(String q) throws Exception {
        dir = FSDirectory.open(Paths.get(path));
        IndexReader reader = DirectoryReader.open(dir);
        IndexSearcher is = new IndexSearcher(reader);

        BooleanQuery.Builder booleanQuery = new BooleanQuery.Builder();
        //中文分词
        SmartChineseAnalyzer analyzer = new SmartChineseAnalyzer();
        QueryParser parser = new QueryParser("title", analyzer);
        Query query = parser.parse(q);
        QueryParser parser2 = new QueryParser("content", analyzer);
        Query query2 = parser2.parse(q);

        //组合查询
        booleanQuery.add(query, BooleanClause.Occur.SHOULD);
        booleanQuery.add(query2, BooleanClause.Occur.SHOULD);
        //查询命中
        TopDocs hits = is.search(booleanQuery.build(), 100);
        //计算得分
        QueryScorer scorer = new QueryScorer(query);
        //设置高亮样式
        Fragmenter fragmenter = new SimpleSpanFragmenter(scorer);
        SimpleHTMLFormatter simpleHTMLFormatter = new SimpleHTMLFormatter("<b><font color='red'>", "</font></b>");
        //根据计算获得高亮的部分
        Highlighter highlighter = new Highlighter(simpleHTMLFormatter, scorer);
        highlighter.setTextFragmenter(fragmenter);


        QueryScorer scorer2 = new QueryScorer(query2);
        //设置高亮样式
        Fragmenter fragmenter2 = new SimpleSpanFragmenter(scorer2);
        SimpleHTMLFormatter simpleHTMLFormatter2 = new SimpleHTMLFormatter("<b><font color='red'>", "</font></b>");
        //根据计算获得高亮的部分
        Highlighter highlighter2 = new Highlighter(simpleHTMLFormatter2, scorer2);
        highlighter2.setTextFragmenter(fragmenter2);


        List<ArticleInfo> articleInfos = new LinkedList<ArticleInfo>();
        for (ScoreDoc scoreDoc : hits.scoreDocs) {
            Document doc = is.doc(scoreDoc.doc);
            ArticleInfo articleInfo = new ArticleInfo();
            articleInfo.setId(Long.parseLong(doc.get(("id"))));
            //System.out.println(doc.get("gmtCreate"));
            articleInfo.setGmtCreate(simpleDateFormat.parse(doc.get("gmtCreate")));
            String title = doc.get("title");
            //String content = StringEscapeUtils.escapeHtml(doc.get("content"));
            String content = doc.get("content");
            if (title != null) {
                //获得高亮文本
                TokenStream tokenStream = analyzer.tokenStream("title", new StringReader(title));
                String htitle = highlighter.getBestFragment(tokenStream, title);
                if (StringUtil.isEmpty(htitle)) {
                    articleInfo.setTitle(title);
                } else {
                    articleInfo.setTitle(htitle);
                }
            }
            if (content != null) {
                TokenStream tokenStream = analyzer.tokenStream("content", new StringReader(content));
                String hContent = highlighter2.getBestFragment(tokenStream, content);
                if (StringUtil.isEmpty(hContent)) {
                    if (content.length() <= 200) {
                        articleInfo.setContent(content);
                    } else {
                        articleInfo.setContent(content.substring(0, 200));
                    }
                } else {
                    articleInfo.setContent(hContent);
                }
            }
            articleInfos.add(articleInfo);
        }
        //for (ArticleInfo question1 : questionList) {
        //    System.out.println(question1);
        //}
        return articleInfos;
    }
}
