import operator

from querier.esquerier import ElasticSearchQuerier
import utils.utils as utils
import math
import re
import datetime
import html
from utils.wechat_article_dedup import article_deduplicate
from utils.media_biz import media_biz_dict

READ = 'read_num'
LIKE = 'like_num'
RATIO = 'like_read_ratio'
RELATIVE = 'relative'

DAYS = 15
MINIMUM_SHOULD_MATCH = '5<85% 10<9'
MAX_CHARACTER = 30
CATEGORY_CUTOFF = 0.6
MAX_KEYWORDS = 10


class WechatArticleSearchMiniQuerier(ElasticSearchQuerier):
    def __init__(self, es, index, doc_type, nlp_service=None):
        super(WechatArticleSearchMiniQuerier, self).__init__(es, index, doc_type)
        self.nlp_service = nlp_service

    def _build_query(self, args):
        term = args.get('term', '')
        term = term if term else ''
        filters = args.get('filters', {})
        if filters is None:
            filters = {}
        order = args.get('order_by', utils.ORDER_OVERALL)
        from_ = args.get('from', 0)
        size_ = args.get('size', 10)
        highlight = args.get('highlight', False)
        filter_keywords = args.get('filter_keywords', [])
        filter_keywords = filter_keywords if filter_keywords else []

        if not filters.get('text_len'):
            filters['text_len'] = [20]
        else:
            if filters['text_len'][0] < 20:
                filters['text_len'][0] = 20

        filters['from'] = 1

        deduplicate = args.get('deduplicate', False)

        term2, keywords, ex_keywords, weights = utils.process_query_term(term, self.nlp_service, 'keywords',
                                                                         allowed_num_words=MAX_KEYWORDS)

        query = self._gen_query(term, ' '.join(keywords), filter_keywords, filters, order, from_, size_, highlight)

        return query, {}, {'order': order, 'deduplicate': deduplicate}

    def _build_result(self, es_result, param):
        order = param['order']
        deduplicate = param['deduplicate']
        total = es_result['hits']['total']
        articles = []
        for hit in es_result['hits']['hits']:
            articles.append(self.extract_result(hit, order))

        if deduplicate:
            try:
                articles = article_deduplicate(articles, remove=True)
            except Exception as e:
                pass

        return {
            'total': total,
            'articles': articles
        }

    def _gen_query(self, term, query_keywords, filter_words, filters, order, from_, size_, highlight):
        must_clause = []
        should_clause = []
        filter_clause = []
        if filters:
            # filter_clause = self._add_filter_clause(filter_clause, filters, 'biz_code', 'should')
            # filter_clause = self._add_filter_match(filter_clause, filters, 'biz_code', 'should')
            filter_clause = self._add_filter_clause(filter_clause, filters, 'has_copyright', 'should')
            filter_clause = self._add_filter_range_clause(filter_clause, filters, 'publish_timestamp')
            filter_clause = self._add_filter_range_clause(filter_clause, filters, 'read_num')
            filter_clause = self._add_filter_range_clause(filter_clause, filters, 'like_num')
            filter_clause = self._add_filter_range_clause(filter_clause, filters, 'like_read_ratio')
            filter_clause = self._add_filter_range_clause(filter_clause, filters, 'image_num')
            filter_clause = self._add_filter_range_clause(filter_clause, filters, 'video_num')
            filter_clause = self._add_filter_range_clause(filter_clause, filters, 'text_len')

            cm = filters.get('category_media')
            if cm:
                filters['biz_code'] = media_biz_dict[cm[0]]

            filter_clause = self._add_filter_match_biz(filter_clause, filters, 'biz_code')

            filters['from'] = [1]
            filter_clause = self._add_filter_clause(filter_clause, filters, 'from')
            if filters.get('category'):
                filters['category'] = [utils.category_smzdm_2_encode(c) for c in filters['category']]
                filters['category_weight'] = [CATEGORY_CUTOFF]
                filter_clause = self._add_filter_clause(filter_clause, filters, 'category', 'should')
                filter_clause = self._add_filter_range_clause(filter_clause, filters, 'category_weight')

        if query_keywords.strip():
            should_clause.append(
                {
                    'multi_match': {
                        'analyzer': 'whitespace',
                        'query': query_keywords,
                        'fields': ['title_seg^3'],
                        # 'minimum_should_match': ""
                    }
                }
            )

        for words in filter_words:
            filter_clause = self.filter_keywords(filter_clause, words)

        if term:
            term = term.strip()
            if len(term) <= 5:
                should_clause.append(
                    {
                        'match_phrase': {
                            "title": {
                                'query': term[0:MAX_CHARACTER],
                                'slop': 2,
                                'boost': 3,
                            },

                        },
                        # 'match_phrase': {
                        #     "biz_name": {
                        #         'query': term[0:MAX_CHARACTER],
                        #         'slop': 2,
                        #         'boost': 30,
                        #     },
                        #
                        # },
                    }
                )
            else:
                should_clause.append(
                    {
                        'match': {
                            "title": {
                                'query': term[0:MAX_CHARACTER],
                                'boost': 30,
                                'minimum_should_match': MINIMUM_SHOULD_MATCH
                            },

                        }
                    }
                )

        query = {"query": {
            "bool": {
                # "must": must_clause,
                # "should": should_clause,
                "filter": filter_clause,
                # "must": {'bool': {}},
                # "minimum_should_match": 1
            }
        }, 'from': from_, 'size': size_}

        if must_clause:
            query['query']['bool']['must'] = must_clause

        if should_clause:
            query['query']['bool']['should'] = should_clause
            query['query']['bool']['minimum_should_match'] = 1

        if order == 'relative':
            query['sort'] = [
                {
                    '_script': {
                        "type": "number",
                        "script": {
                            "lang": "painless",
                            # Todo use stepwise function, use publish_date when changing to all search
                            "inline": "Math.log(doc.publish_timestamp.value) * _score * Math.log(doc.read_num.value + 1000.0)"
                        },
                        "order": "desc",
                    },
                },
                '_score'
            ]

            # if len(term) > 5:
            #
            #     query['sort'] = [
            #         '_score',
            #         {'publish_timestamp': 'desc'}
            #     ]

            if not term.strip():
                query['sort'] = [{'publish_timestamp': 'desc'}]

            if filters.get('category'):
                query['sort'] = [{'category_weight': 'desc'}] + query['sort']

        elif order == 'read_num':
            query['sort'] = [
                # '_score',
                {'read_num': 'desc'}
            ]
        elif order == 'like_num':
            query['sort'] = [
                # '_score',
                {'like_num': 'desc'}
            ]
        elif order == 'like_read_ratio':
            query['sort'] = [
                # '_score',
                {'like_read_ratio': 'desc'}
            ]

        elif order == 'overall':
        # else:
            query['sort'] = [
                {
                    '_script': {
                        "type": "number",
                        "script": {
                            "lang": "painless",
                            "inline":  "doc.publish_timestamp.value * Math.log(doc.category_weight.value + 1.0)"
                        },
                        "order": "desc",
                    },
                },
                '_score'
            ]
        else:
            query['sort'] = [{'publish_timestamp': 'desc'}]

        query['track_scores'] = True
        if highlight:
            query['highlight'] = {
                "pre_tags": ["<span class='keyword'>"],
                "post_tags": ["</span>"],
                "fields": {"title": {}}
            }
        # else:
        #     query['highlight'] = {
        #         "pre_tags": [""],
        #         "post_tags": [""],
        #         "fields": {"keywords": {}, "title_seg": {}}
        #     }

        return query

    @staticmethod
    def filter_keywords(must_clause, keywords):
        clause = []
        must_clause.append({
            'bool': {'should': clause, 'minimum_should_match': 1}
        })
        for fk in keywords:
            q = {
                'match_phrase': {
                    "title": {
                        'query': fk,
                        'slop': 1,
                    },
                }}
            clause.append(q)
        return must_clause

    def _add_filter_match(self, must_clause, filters, key, cond='must'):
        if key in filters:
            if filters[key]:
                clause = []
                must_clause.append({
                    'bool': {cond: clause}
                })
                values = filters[key]
                if isinstance(values, str):
                    values = values.split(' ')
                for fk in values:
                    clause.append({'match': {key: {'query': fk, 'minimum_should_match': '20<100% 20<20'}}})
        return must_clause

    def _add_filter_match_biz(self, must_clause, filters, key):
        if key in filters:
            if filters[key]:
                clause = []
                must_clause.append({
                    'bool': {'must': clause}
                })
                values = filters[key]
                if isinstance(values, str):
                    values = values.split(' ')
                clause.append({'terms': {key: values}})
                # for fk in values:
                #     clause.append({'match': {key: {'query': fk, 'minimum_should_match': MINIMUM_SHOULD_MATCH}}})
        return must_clause

    def _add_filter_clause(self, filter_clause, filters, key, cond='must'):
        if key in filters:
            if filters[key]:
                clause = []
                filter_clause.append({
                    'bool': {
                        cond: clause
                    }
                })
                for fk in filters[key]:
                    clause.append({'term': {key: fk}})
        return filter_clause

    def _add_filter_range_clause(self, filter_clause, filters, key):
        if key in filters:
            if filters[key]:
                clause = []
                filter_clause.append({
                    'bool': {
                        'must': clause
                    }
                })
                fk = filters[key]
                if not isinstance(fk, list) or len(fk) < 1:
                    pass
                else:
                    min_fk = fk[0]
                    if len(fk) >= 2:
                        max_fk = fk[1]
                    else:
                        max_fk = None
                    if min_fk is not None and min_fk != 'null':
                        clause.append({'range': {key: {"gte": min_fk}}})
                    if max_fk is not None and max_fk != 'null':
                        clause.append({'range': {key: {"lte": max_fk}}})
        return filter_clause

    def extract_result(self, hit, order):
        source_ = hit['_source']
        url = source_['url']
        likes = source_['like_num']
        reads = source_['read_num']

        score_ = hit['_score']
        keywords = source_['keywords']

        highlight = hit.get('highlight')
        h_keywords = []

        h_title = source_['title']
        if highlight:
            h_title = highlight.get('title')
            if h_title:
                h_title = h_title[0]
            else:
                h_title = source_['title']

        h_keywords = h_keywords if h_keywords else keywords[0:10]

        h_kv = {}

        for i in range(0, len(h_keywords)):
            if h_kv.get(h_keywords[i]) is None:
                h_kv[h_keywords[i]] = i

        h_keywords = [k[0] for k in sorted(h_kv.items(), key=operator.itemgetter(1))]

        return {
            'id': source_['id'],
            'biz_code': source_['biz_code'],
            'biz_name': source_['biz_name'],
            'title': utils.clean_text(h_title),
            'title_seg': source_.get('title_seg', []),
            'title_simhash': source_.get('title_simhash'),
            'text_simhash': source_.get('text_simhash'),
            # 'keywords2': source_.get('keywords', []),
            'url': url,
            'qrcode': utils.get_qrcode(url),
            'msg_cdn_url': source_['msg_cdn_url'],
            'keywords': h_keywords,
            'read_num': reads if reads < 100000 else 100001,
            'like_num': likes if likes < 100000 else 100001,
            'has_copyright': source_.get('has_copyright'),
            'publish_timestamp': source_['publish_timestamp'],
            'crawler_timestamp': source_['crawler_timestamp'],
            'category_weight': source_.get('category_weight'),
            'category': utils.category_smzdm_2_decode(source_.get('category', -1)),
            'image_num': source_.get('image_num'),
            'video_num': source_.get('video_num'),
            'text_len': source_.get('text_len'),
            'abstract': source_.get('abstract'),
            'brands': []  # self.nlp_service.get_brands(source_.get('keywords', []))
        }

