from querier.esquerier import ElasticSearchQuerier
import utils.utils as utils
import html
from utils.wechat_article_dedup import article_deduplicate
from utils.simhash import get_b64_hash

DAYS = 15
MINIMUM_SHOULD_MATCH = '5<85% 10<8'
MAX_CHARACTER = 10
CATEGORY_CUTOFF = 0.7
MAX_KEYWORDS = 100


class SearchParagraphQuerier(ElasticSearchQuerier):
    def __init__(self, es, index, doc_type, nlp_service=None):
        super(SearchParagraphQuerier, self).__init__(es, index, doc_type)
        self.nlp_service = nlp_service

    def _build_query(self, args):
        term = args.get('term', '')
        term = term if term else ''
        filters = args.get('filters', {})
        if filters is None:
            filters = {}
        order = args.get('order_by', utils.ORDER_OVERALL)
        from_ = args.get('from', 0)
        size_ = args.get('size', 10)
        highlight = args.get('highlight', False)
        deduplicate = args.get('deduplicate', False)

        # 处理查询文本
        term2, keywords, ex_keywords, weights = utils.process_query_term(term, self.nlp_service, 'keywords',
                                                                         allowed_num_words=MAX_KEYWORDS)

        query = self._gen_query(' '.join(keywords), term, filters, order, from_, size_, highlight)

        return query, {}, {'order': order, 'keywords': keywords, 'deduplicate': True, 'highlight': highlight}

    def _build_result(self, es_result, param):
        # order = param['order']
        deduplicate = param['deduplicate']
        total = es_result['hits']['total']
        keywords = param['keywords']
        articles = []
        for hit in es_result['hits']['hits']:
            articles.append(self.extract_result(hit, keywords, param['highlight']))

        if deduplicate:
            try:
                articles = article_deduplicate(articles, key='simhash_text')
            except Exception as e:
                pass

        return {
            'total': total,
            'articles': articles,
            'keywords': param['keywords']
        }

    def _gen_query(self, query_keywords, term, filters, order, from_, size_, highlight):
        must_clause = []
        should_clause = []
        filter_clause = []
        if filters:
            filter_clause = self._add_filter_range_clause(filter_clause, filters, 'text_len')
            filter_clause = self._add_filter_range_clause(filter_clause, filters, 'publish_timestamp')
            filter_clause = self._add_filter_range_clause(filter_clause, filters, 'read_num')
            filter_clause = self._add_filter_range_clause(filter_clause, filters, 'like_num')
            filter_clause = self._add_filter_range_clause(filter_clause, filters, 'like_read_ratio')

            filter_clause = self._add_filter_clause(filter_clause, filters, 'from')

            if filters.get('category'):
                filters['category'] = [utils.category_smzdm_2_encode(c) for c in filters['category']]
                filters['category_weight'] = [CATEGORY_CUTOFF]
                filter_clause = self._add_filter_clause(filter_clause, filters, 'category', 'should')
                filter_clause = self._add_filter_range_clause(filter_clause, filters, 'category_weight')

        if term:
            term = term.strip()
            if len(term) <= 9:
                should_clause.append(
                    {
                        'match_phrase': {
                            "text": {
                                'query': term[0:MAX_CHARACTER],
                                'slop': 2,
                                'boost': 30,
                            },

                        },
                    }
                )
            else:
                should_clause.append(
                    {
                        'match': {
                            "text": {
                                'query': term,
                                # 'slop': 2,
                                'boost': 30,
                                'minimum_should_match': MINIMUM_SHOULD_MATCH
                            },

                        }
                    }
                )

        if query_keywords.strip():
            must_clause.append(
                {
                    'multi_match': {
                        'analyzer': 'whitespace',
                        'query': query_keywords,
                        'fields': ['keywords'],
                        # 'minimum_should_match': ""
                    }
                }
            )

        query = {"query": {
            "bool": {
                "filter": filter_clause,
            }
        }, 'from': from_, 'size': size_}

        if must_clause:
            query['query']['bool']['must'] = must_clause

        if should_clause:
            query['query']['bool']['should'] = should_clause
            # query['query']['bool']['minimum_should_match'] = 1

        if highlight:
            query['highlight'] = {
                "pre_tags": ["<span class='keyword'>"],
                "post_tags": ["</span>"],
                "fields": {"keywords": {}, "text": {}}
            }

        # query['track_scores'] = True

        return query

    @staticmethod
    def _add_filter_match(self, must_clause, filters, key, cond='must'):
        if key in filters:
            if filters[key]:
                clause = []
                must_clause.append({
                    'bool': {cond: clause}
                })
                values = filters[key]
                if isinstance(values, str):
                    values = values.split(' ')
                for fk in values:
                    clause.append({'match': {key: {'query': fk, 'minimum_should_match': '20<100% 20<20'}}})
        return must_clause

    @staticmethod
    def _add_filter_clause(filter_clause, filters, key, cond='must'):
        if key in filters:
            if filters[key]:
                clause = []
                filter_clause.append({
                    'bool': {
                        cond: clause
                    }
                })
                for fk in filters[key]:
                    clause.append({'term': {key: fk}})
        return filter_clause

    @staticmethod
    def _add_filter_range_clause(filter_clause, filters, key):
        if key in filters:
            if filters[key]:
                clause = []
                filter_clause.append({
                    'bool': {
                        'must': clause
                    }
                })
                fk = filters[key]
                if not isinstance(fk, list) or len(fk) < 1:
                    pass
                else:
                    min_fk = fk[0]
                    if len(fk) >= 2:
                        max_fk = fk[1]
                    else:
                        max_fk = None
                    if min_fk is not None and min_fk != 'null':
                        clause.append({'range': {key: {"gte": min_fk}}})
                    if max_fk is not None and max_fk != 'null':
                        clause.append({'range': {key: {"lte": max_fk}}})
        return filter_clause

    @staticmethod
    def extract_result(hit, keyword_params, is_highlight):
        source_ = dict(hit['_source'])
        url = source_.get('url', '')
        highlight = hit.get('highlight', {})
        h_text = source_.get('text')
        from_ = source_.get('from')
            
        if is_highlight:
            h_text = utils.highlight_text(h_text, keyword_params)
            if h_text == source_.get('text', '') or str(from_) == '9':
                h_text = highlight.get('text')
                if h_text:
                    h_text = h_text[0]
                else:
                    h_text = source_.get('text', '')

        return {
            'biz_code': source_.get('biz_code'),
            'article_id': source_.get('article_id'),
            'paragraph_id': source_.get('paragraph_id'),
            'title': html.unescape(source_.get('title', '')),
            'simhash_text': get_b64_hash(source_.get('keywords', []), 64),
            'url': url,
            'article_para_id': source_.get('article_para_id'),
            'publish_timestamp': source_.get('publish_timestamp'),
            'from': source_.get('from'),
            'text': h_text,
            'prev_pic': source_.get('prev_pic', ''),
            'post_pic': source_.get('post_pic', ''),
            'keywords': source_.get('keywords', [])
        }

