from querier.esquerier import ElasticSearchQuerier
import utils
import math

CATEGORY = 'category'
CATEGORY_0 = 'category_0'
CONTENT_TYPE = 'content_type'

CONTENT_TYPE_CODECS = {
    "1": "文本",
    "2": "图片",
    "3": "视频"
}
NOT_EXIST = "未知"


class WeiboContentAnalyzeQuerier(ElasticSearchQuerier):
    def __init__(self, es, index, doc_type):
        super(WeiboContentAnalyzeQuerier, self).__init__(es, index, doc_type)
        # self.nlp_service = nlp_service

    def _build_query(self, args):
        user_id = args.get('user_id', None)
        from_date = args.get('from', None)
        to_date = args.get('to', None)
        by_field = args.get('by_field', None)
        if user_id is None:
            raise ValueError('"user_id" is needed.')
        if not by_field:
            by_field = CATEGORY

        if by_field == CATEGORY:
            by_field = CATEGORY_0

        query = self._gen_query(user_id, from_date, to_date, by_field)
        return query, {}, {'user_id': user_id, 'from': from_date, 'to': to_date, 'by_field': by_field}

    def _build_result(self, es_result, param):
        total = es_result['hits']['total']
        agg = es_result['aggregations']
        data = extract_result(agg, param['by_field'])
        total = sum(data['doc_counts'])
        data2 = {
            'user_id': param['user_id'],
            'from': param['from'],
            'to': param['to'],
            'by_field': param['by_field'],
            'doc_counts': data['doc_counts'],
            'doc_ratios': [d / total for d in data['doc_counts']],
            'sum_likes': data['sum_likes'],
            'avg_likes': data['avg_likes'],
            'max_likes': data['max_likes'],

            'sum_retweets': data['sum_retweets'],
            'avg_retweets': data['avg_retweets'],
            'max_retweets': data['max_retweets'],

            'sum_comments': data['sum_comments'],
            'avg_comments': data['avg_comments'],
            'max_comments': data['max_comments'],

            'sum_engagement': data['sum_engagement'],
            'avg_engagement': data['avg_engagement'],
            'max_engagement': data['max_engagement'],

            'categories': decode(param['by_field'], data['dates'])
        }
        ret = []
        length = len(data2['categories'])
        for i in range(0, length):
            ret.append({
                'category': data2['categories'][i],
                'doc_counts': data2['doc_counts'][i],
                'doc_ratios': data2['doc_ratios'][i],

                'sum_likes': data['sum_likes'][i],
                'avg_likes': data['avg_likes'][i],
                'max_likes': data['max_likes'][i],

                'sum_retweets': data['sum_retweets'][i],
                'avg_retweets': data['avg_retweets'][i],
                'max_retweets': data['max_retweets'][i],

                'sum_comments': data['sum_comments'][i],
                'avg_comments': data['avg_comments'][i],
                'max_comments': data['max_comments'][i],

                'sum_engagement': data['sum_engagement'][i],
                'avg_engagement': data['avg_engagement'][i],
                'max_engagement': data['max_engagement'][i],
            })

        return ret

    @staticmethod
    def _gen_query(user_id, from_date, to_date, by_field):
        query = {
            "query": {
                "bool": {
                    "filter": [
                        {'term': {'user_id': user_id}},
                        {"range": {"publish_timestamp": {"from": from_date, "to": to_date}}}
                    ]
                }
            },
            "aggs": {
                "by_field": {
                    "terms": {
                        "field": by_field,
                        "order": {"sum_engagement": "desc"}
                    },
                    "aggs": {
                        "sum_likes": {"sum": {"field": "likes", "missing": 0}},
                        "max_likes": {"max": {"field": "likes", "missing": 0}},

                        "sum_retweets": {"sum": {"field": "retweets", "missing": 0}},
                        "max_retweets": {"max": {"field": "retweets", "missing": 0}},

                        "sum_comments": {"sum": {"field": "comments", "missing": 0}},
                        "max_comments": {"max": {"field": "comments", "missing": 0}},

                        "sum_engagement": {"sum": {"field": "sum_engagement", "missing": 0}},
                        "max_engagement": {"max": {"field": "sum_engagement", "missing": 0}},

                    }
                }
            },

            "size": 0
        }

        return query


def extract_result(agg, by_field):
    doc_counts = []
    dates = []
    sum_likes = []
    max_likes = []
    avg_likes = []
    sum_retweets = []
    max_retweets = []
    avg_retweets = []
    sum_comments = []
    max_comments = []
    avg_comments = []

    sum_engagement = []
    max_engagement = []
    avg_engagement = []

    buckets = agg['by_field']['buckets']
    for b in buckets:
        doc_counts.append(b['doc_count'])

        sum_likes.append(b['sum_likes']['value'])
        avg_likes.append(0 if b['doc_count'] == 0 else b['sum_likes']['value'] / b['doc_count'])
        max_likes.append(0 if b['doc_count'] == 0 else b['max_likes']['value'])

        sum_retweets.append(b['sum_retweets']['value'])
        avg_retweets.append(0 if b['doc_count'] == 0 else b['sum_retweets']['value'] / b['doc_count'])
        max_retweets.append(0 if b['doc_count'] == 0 else b['max_retweets']['value'])

        sum_comments.append(b['sum_comments']['value'])
        avg_comments.append(0 if b['doc_count'] == 0 else b['sum_comments']['value'] / b['doc_count'])
        max_comments.append(0 if b['doc_count'] == 0 else b['max_comments']['value'])

        sum_engagement.append(b['sum_engagement']['value'])
        avg_engagement.append(0 if b['doc_count'] == 0 else b['sum_engagement']['value'] / b['doc_count'])
        max_engagement.append(0 if b['doc_count'] == 0 else b['max_engagement']['value'])

        dates.append(b['key'])

    return {
        'doc_counts': doc_counts,
        "sum_likes": sum_likes,
        "max_likes": max_likes,
        "avg_likes": avg_likes,
        "sum_retweets": sum_retweets,
        "max_retweets": max_retweets,
        "avg_retweets": avg_retweets,
        "sum_comments": sum_comments,
        "max_comments": max_comments,
        "avg_comments": avg_comments,
        'sum_engagement': sum_engagement,
        'max_engagement': max_engagement,
        'avg_engagement': avg_engagement,

        'dates': dates
    }


def decode(by_field, data):
    if by_field == CATEGORY:
        ret = data
    else:
        ret = [CONTENT_TYPE_CODECS.get(x, NOT_EXIST) for x in data]
    return ret


def decode_one(by_field, x):
    if by_field == CATEGORY:
        ret = x
    else:
        ret = CONTENT_TYPE_CODECS.get(x, NOT_EXIST)
    return ret


class WeiboContentAnalyzeAllQuerier(ElasticSearchQuerier):
    def __init__(self, es, index, doc_type):
        super(WeiboContentAnalyzeAllQuerier, self).__init__(es, index, doc_type)
        self.analyzeQuerier = WeiboContentAnalyzeQuerier(es, index, doc_type)

    def search(self, args):
        by_fields = args.get('by')
        user_id = args.get('user_id', None)
        from_date = args.get('from', None)
        to_date = args.get('to', None)

        if user_id is None:
            raise ValueError('"user_id" is needed.')

        if by_fields is None:
            by_fields = ['category', 'content_type']
        res = {}
        for value in by_fields:
            a = {"user_id": user_id, "by_field": value, "from": from_date, "to": to_date}
            res['category' if value == 'category' else value] = self.analyzeQuerier.search(a)

        return res

    def _build_query(self, args): pass

    def _build_result(self, es_result, param): pass
