package com.faxsun.web.controller;

import java.io.IOException;
import java.net.URLEncoder;
import java.util.Collections;
import java.util.List;

import javax.servlet.http.HttpServletRequest;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.highlight.Highlighter;
import org.apache.lucene.search.highlight.InvalidTokenOffsetsException;
import org.apache.lucene.search.highlight.QueryScorer;
import org.apache.lucene.search.highlight.SimpleFragmenter;
import org.apache.lucene.search.highlight.SimpleHTMLFormatter;
import org.apache.lucene.util.Version;
import org.hibernate.Session;
import org.hibernate.SessionFactory;
import org.hibernate.search.FullTextSession;
import org.hibernate.search.Search;
import org.hibernate.search.query.dsl.BooleanJunction;
import org.hibernate.search.query.dsl.QueryBuilder;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Controller;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.ui.ModelMap;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import org.thymeleaf.util.StringUtils;

import com.faxsun.core.persist.entity.FaxsunAnalyzer;
import com.faxsun.core.persist.entity.PostPO;
import com.faxsun.core.persist.entity.UserPO;

@Controller
@RequestMapping(value = "/search")
@Transactional(rollbackFor = { Exception.class })
public class HeadSearchController {

    @Value("${hibernate.search.indexs}")
    private String indexBasePath = null;
    
    @Value("${hibernate.search.init}")
    private String indexInit = null;
    
    private boolean initIndex = false;

    private static final int PAGINATION_SIZE = 10;
    private static final int HIGHLIGHT_SUMMARY_SIZE = 230;
    
    @Autowired
    private SessionFactory sessionFactory;

    @RequestMapping(value = "/paging", method = RequestMethod.POST)
    public String paging(ModelMap model, HttpServletRequest request) 
            throws NumberFormatException, InterruptedException, IOException, InvalidTokenOffsetsException {
        String targetPage = request.getParameter("page");
        loadPage(model, request, Integer.parseInt(targetPage));
        return FrontViewConstants.VIEW_SEARCH_LIST;
    }

    @RequestMapping(method = RequestMethod.GET)
    public String view(ModelMap model, HttpServletRequest request) 
            throws NumberFormatException, InterruptedException, IOException, InvalidTokenOffsetsException {
        loadPage(model, request, 1);
        return FrontViewConstants.VIEW_SEARCH_RESULT;
    }

    @SuppressWarnings("unchecked")
    @Transactional(rollbackFor = { Exception.class })
    private void loadPage(ModelMap model, HttpServletRequest request, int targetPageId) 
            throws InterruptedException, IOException, InvalidTokenOffsetsException {
        
        String keyword = request.getParameter("keyword");
        
        model.addAttribute("keyword", keyword);
        
        List<PostPO> result = Collections.EMPTY_LIST;
        
        if(keyword != null && !keyword.isEmpty()){
            
            int totalNewsSize = 0;
            
            Session session = sessionFactory.openSession();
            
            initIndexing(session); 
            
            FullTextSession fullTextSession = org.hibernate.search.Search.getFullTextSession(session);
            
            QueryBuilder qb = fullTextSession.getSearchFactory().buildQueryBuilder().forEntity(PostPO.class).get();
            
            //
            // use default analyzer/tokenizer
            //
            //TermMatchingContext onFields = qb.keyword().onFields("title", "summary", "tags", "content"); 
            //
            
            // user jcseg analyzer
            Analyzer analyzer = getAnalyzer();
            
            QueryParser titleParser = new QueryParser("title", analyzer);            
            QueryParser summaryParser = new QueryParser("summary", analyzer);
            QueryParser tagsParser = new QueryParser("tags", analyzer);
            QueryParser contentParser = new QueryParser("content", analyzer);
            
            //注意这里用的query是lucen下面的，把关键词转换为查询语句
            org.apache.lucene.search.Query titleQuery = getQueryFromKeyword(qb, keyword, "title", titleParser);
            org.apache.lucene.search.Query summaryQuery = getQueryFromKeyword(qb, keyword, "summary", summaryParser);
            org.apache.lucene.search.Query tagsQuery = getQueryFromKeyword(qb, keyword, "tags", tagsParser);
            org.apache.lucene.search.Query contentQuery = getQueryFromKeyword(qb, keyword, "content", contentParser);
            
            //创建一个布尔查询,来结合上面的查询
            BooleanQuery bQuery = new BooleanQuery();            
            bQuery.add(titleQuery, Occur.SHOULD);
            bQuery.add(summaryQuery, Occur.SHOULD);            
            bQuery.add(tagsQuery, Occur.SHOULD);
            bQuery.add(contentQuery, Occur.SHOULD);
            
            // wrap Lucene query in a javax.persistence.Query
            org.hibernate.search.FullTextQuery ftQuery = fullTextSession.createFullTextQuery(bQuery, PostPO.class);
            
            totalNewsSize = ftQuery.getResultSize();
            
            int curPageId = targetPageId;

            // add max page
            int maxPageId = (totalNewsSize / PAGINATION_SIZE) + (totalNewsSize % PAGINATION_SIZE == 0 ? 0 : 1);
            if (curPageId > maxPageId) {
                curPageId = maxPageId;
            }
            model.addAttribute("maxPage", maxPageId);
            model.addAttribute("curPage", curPageId);
            model.addAttribute("prevPage", curPageId - 1 < 1 ? 1 : curPageId - 1);
            model.addAttribute("nextPage", curPageId + 1 > maxPageId ? maxPageId : curPageId + 1);

            // add paging post url
            String contextPath = request.getContextPath();
            String url = contextPath + "/search/paging?keyword=" + URLEncoder.encode(keyword, "UTF-8");
            model.addAttribute("url", url); 
            
            // execute search     
            ftQuery.setFirstResult((targetPageId - 1 ) * PAGINATION_SIZE); 
            ftQuery.setMaxResults(PAGINATION_SIZE);
            result = ftQuery.list();            
            
            // for highlight
            highlightResult(analyzer, result, titleQuery, tagsQuery, summaryQuery);
            
            // output result with id descending order
            Collections.sort(result, PostPO.Comparators.DFLT_ID);
            
            session.close();
        }
        
        model.addAttribute("posts", result);
    }
    
    private synchronized void initIndexing(Session session) throws InterruptedException{        

        if(indexInit != null && !initIndex){           
           initIndex = Boolean.parseBoolean(indexInit);           
        }
        
        if(initIndex){
           FullTextSession fullTextSession = Search.getFullTextSession(session);
           fullTextSession.createIndexer().startAndWait();
           initIndex = false;
        }
    }
    
    private Analyzer getAnalyzer() throws IOException {
        Analyzer analyzer = new FaxsunAnalyzer(Version.LUCENE_4_10_2);        
        return analyzer;
    }
    
    private org.apache.lucene.search.Query getQueryFromKeyword(QueryBuilder qb, String keyword, String field, QueryParser qp) { 

        @SuppressWarnings("rawtypes")
		BooleanJunction<BooleanJunction> bool = qb.bool();  
        String[] searchTerms = keyword.split("\\s+");
        
        for (int j = 0; j < searchTerms.length; j++) {
            String currentTerm = searchTerms[j];
            
            //use same analyzer with indexing
            Query q = qp.createPhraseQuery(field, currentTerm);
            bool.must(q);
        }

        org.apache.lucene.search.Query luceneQuery = bool.createQuery();
        return luceneQuery; 
    }
    
    private void highlightResult(Analyzer analyzer, List<PostPO> result, 
            org.apache.lucene.search.Query titleQuery,
            org.apache.lucene.search.Query tagsQuery,
            org.apache.lucene.search.Query summaryQuery) throws IOException, InvalidTokenOffsetsException {
        SimpleHTMLFormatter formatter = new SimpleHTMLFormatter("<font class=\"search-highlight\" >", "</font>");        

        // 片段，设置要显示的字数
        SimpleFragmenter ftTitle = new SimpleFragmenter(255);
        SimpleFragmenter ftTags = new SimpleFragmenter(255);
        SimpleFragmenter ftSummary = new SimpleFragmenter(HIGHLIGHT_SUMMARY_SIZE);

        // 高亮显示 :用来标记搜索词的html标签，分数查询  按照分数的高低显示
        Highlighter hlTitle = new Highlighter(formatter, new QueryScorer(titleQuery));
        Highlighter hlTags = new Highlighter(formatter, new QueryScorer(tagsQuery));
        Highlighter hlSummary = new Highlighter(formatter, new QueryScorer(summaryQuery));

        // 设置显示的个数
        hlTitle.setTextFragmenter(ftTitle);
        hlTags.setTextFragmenter(ftTags);
        hlSummary.setTextFragmenter(ftSummary);
        
        // 把高亮显示的结果设置到pojo类中专门为高亮显示增加的字段中去
        for (PostPO post : result) {            
            // for lazy initialization            
            UserPO author = post.getAuthor();
            @SuppressWarnings("unused")
			String name = author.getName();
            @SuppressWarnings("unused")
			String avatar = author.getAvatar();
            
            // 得到分数最高的片段（分词器，字段名，要搜索的字段内容）
            String title = hlTitle.getBestFragment(analyzer, "title", post.getTitle());
            String tags = hlTags.getBestFragment(analyzer, "tags", post.getTags());
            String summary = hlSummary.getBestFragment(analyzer, "summary", post.getSummary());
            
            if (title != null) {
                post.setSearchTitle(title);
            }else{
                post.setSearchTitle(post.getTitle());
            }
            
            if (tags != null) {
                post.setSearchTags(tags);
            }else{
                post.setSearchTags(StringUtils.isEmpty(post.getTags())? "资讯/测评" : post.getTags());
            }
            
            if (summary != null) {
                post.setSearchSummary(summary);
            }else{
                post.setSearchSummary(post.getSummary());
            }
        }
    }

}
