from querier.esquerier import ElasticSearchQuerier
import querier.weibo.utils as utils
from utils import utils as uc

CATEGORY_CUTOFF = 0.5
MAX_KEYWORDS = 50

MINIMUM_SHOULD_MATCH = '5<85% 10<9'


class WeiboPostSearchMXQuerier(ElasticSearchQuerier):
    def __init__(self, es, index, doc_type, nlp_service):
        super(WeiboPostSearchMXQuerier, self).__init__(es, index, doc_type)
        self.nlp_service = nlp_service

    def _build_query(self, args):
        """输入 文案, 节日, 热点事件, 类目, 返回匹配文章"""

        # 文案
        term = args.get('term', '')
        term = term if term else ''

        # 节气
        festivals = args.get('festivals')
        festivals = festivals if festivals else []
        festival_keywords = [utils.get_festival_keywords(f.strip()) for f in festivals]
        festival_keywords = [i for sublist in festival_keywords for i in sublist]

        # 需要对标题和关键词过滤
        hot_events = args.get('hot_events')
        hot_events = hot_events if hot_events else []
        hot_events = [f.strip() for f in hot_events]

        # category in filters
        filters = args.get('filters', {})
        if filters is None:
            filters = {}
        order = args.get('order_by', utils.ORDER_OVERALL)
        from_ = args.get('from', 0)
        size_ = args.get('size', 10)
        highlight = args.get('highlight', False)

        # wordcloud = args.get('wordcloud', False)

        keyword_type = 'keywordcloud'

        # 处理查询文本
        term2, keywords, ex_keywords, weights = utils.process_query_term(term, self.nlp_service, keyword_type,
                                                                         allowed_num_words=MAX_KEYWORDS)

        ex_kw = utils.get_kv_json(ex_keywords, weights)
        # ex_category = self.nlp_service.classify(keywords)
        # ex_category = ex_category.get('classify', {})
        # ex_category = utils.get_kv_json(ex_category.get('category', []), ex_category.get('category_prob', []))
        # keywords = keywords * 2 + ex_keywords

        h_kv = {}

        for i in range(0, len(keywords)):
            if h_kv.get(keywords[i]) is None:
                h_kv[keywords[i]] = i
        keywords = keywords

        query = self._gen_query(term.strip(), ' '.join(keywords[0:MAX_KEYWORDS]), hot_events,
                                festival_keywords, filters, order, from_, size_, highlight)

        return query, {}, {'keywords': keywords, 'order': order, 'ex_keywords': ex_kw}

    def _build_result(self, es_result, param):
        keywords = param['keywords']

        # order = param['order']
        total = es_result['hits']['total']
        posts = []
        for hit in es_result['hits']['hits']:
            posts.append(extract_result(hit))
        return {
            'total': total,
            'keywords': keywords,
            'posts': posts
        }

    @staticmethod
    def _gen_query(term, query_keywords, hot_events, festival_keywords, filters, order, from_, size_, highlight):
        must_clause = []
        filter_clause = []
        should_clause = []

        if filters:
            utils.get_post_filters(filter_clause, filters)

        if query_keywords.strip():
            must_clause.append(
                {
                    'multi_match': {
                        'analyzer': 'whitespace',
                        'query': query_keywords,
                        'fields': ['keywords', 'seg_weibo_text', 'seg_r_weibo_text'],
                    }
                }
            )

        if festival_keywords:
            filter_clause = utils.filter_festivals(filter_clause, festival_keywords)

        if hot_events:
            for he in hot_events:
                must_clause.append(
                    {
                        'match_phrase': {
                            "weibo_text": {
                                'query': he.strip(),
                                'slop': 2,
                                'boost': 10
                            },
                        }}
                )

        if 0 < len(term) <= 5:
            should_clause.append(
                {
                    'match_phrase': {
                        "title": {
                            'query': term,
                            'slop': 1,
                            'boost': 3,
                        },

                    },
                }
            )

        query = {"query": {
            "bool": {
                # "must": must_clause,
                # "should": should_clause,
                "filter": filter_clause,
                # "must": {'bool': {}},
                # "minimum_should_match": 1
            }
        }, 'from': from_, 'size': size_}

        if must_clause:
            query['query']['bool']['must'] = must_clause

        if should_clause:
            query['query']['bool']['should'] = should_clause

        query = utils.get_post_sort(query, order, term, filters)

        if highlight:
            query['highlight'] = {
                "pre_tags": ["<span class='keyword'>"],
                "post_tags": ["</span>"],
                "fields": {"keywords": {}, "weibo_text": {}, "r_weibo_text": {}}
            }

        return query


def extract_result(hit):
    source_ = hit['_source']
    # score_ = hit['_score']
    res = utils.extract_post_from_source(source_)
    res['category'] = uc.category_smzdm_2_decode(source_['category'])
    keywords = res['keywords']
    highlight = hit.get('highlight')
    if highlight:
        h_keywords = highlight.get('keywords')
        if h_keywords:
            hk2 = [s.replace("<span class='keyword'>", '').replace("</span>", '') for s in h_keywords]
            if hk2:
                h_keywords += [k for k in keywords if k not in hk2][0:10]
                res['keywords'] = h_keywords
        h_weibo_text = highlight.get('weibo_text')
        h_r_weibo_text = highlight.get('r_weibo_text')
        res['summary'] = (source_.get('weibo_text', '') if not h_weibo_text else h_weibo_text[0]) + '//' + (source_.get('r_weibo_text', '') if not h_r_weibo_text else h_r_weibo_text[0])

    return res
