# -*- coding: utf-8 -*-
# content_analyze.py
# Created by Hardy on 19th, Feb
# Copyright 2017 杭州网川教育有限公司. All rights reserved.

from querier.esquerier import ElasticSearchQuerier

CATEGORY = 'category'
CATEGORY_0 = 'category_0'

CONTENT_TYPE = 'content_type'

CONTENT_TYPE_CODECS = {
    "text": "文本",
    "image": "图片",
    "video": "视频"
}
NOT_EXIST = "未知"


class WechatContentAnalyzeQuerier(ElasticSearchQuerier):
    def __init__(self, es, index, doc_type):
        super(WechatContentAnalyzeQuerier, self).__init__(es, index, doc_type)
        # self.nlp_service = nlp_service

    def _build_query(self, args):
        biz_code = args.get('biz_code', None)
        from_date = args.get('from', None)
        to_date = args.get('to', None)
        by_field = args.get('by_field', None)
        if biz_code is None:
            raise ValueError('"biz_code" is needed.')
        if not by_field:
            by_field = CATEGORY
        if by_field == CATEGORY:
            by_field = CATEGORY_0
        query = self._gen_query(biz_code, from_date, to_date, by_field)
        return query, {}, {'biz_code': biz_code, 'from': from_date, 'to': to_date, 'by_field': by_field}

    def _build_result(self, es_result, param):
        # total = es_result['hits']['total']
        agg = es_result['aggregations']
        data = extract_result(agg)
        total = sum(data['doc_counts'])
        data2 = {
            'biz_code': param['biz_code'],
            'from': param['from'],
            'to': param['to'],
            'by_field': param['by_field'],
            'doc_counts': data['doc_counts'],
            'doc_ratios': [d / total for d in data['doc_counts']],
            'sum_reads': data['sum_reads'],
            'avg_reads': data['avg_reads'],
            'max_reads': data['max_reads'],
            'sum_likes': data['sum_likes'],
            'like_rates': [0 if r == 0 else l / r for l, r in zip(data['sum_likes'], data['sum_reads'])],
            'categories': decode(param['by_field'], data['dates'])
        }
        ret = []
        length = len(data2['categories'])
        for i in range(0, length):
            ret.append({
                'category': data2['categories'][i],
                'doc_counts': data2['doc_counts'][i],
                'doc_ratios': data2['doc_ratios'][i],
                'sum_reads': data2['sum_reads'][i],
                'avg_reads': data2['avg_reads'][i],
                'max_reads': data2['max_reads'][i],
                'sum_likes': data2['sum_likes'][i],
                'like_rates': data2['like_rates'][i]
            })

        return ret

    @staticmethod
    def _gen_query(biz_code, from_date, to_date, by_field):
        query = {
            "query": {
                "bool": {
                    "filter": [
                        {'term': {'biz_code': biz_code}},
                        {
                            "range": {
                                "publish_timestamp": {
                                    "from": from_date,
                                    "to": to_date
                                }
                            }
                        }
                    ]
                }
            },
            "aggs": {
                "by_field": {
                    "terms": {
                        "field": by_field,
                        "order": {"sum_read_num": "desc"}
                    },
                    "aggs": {
                        "sum_read_num": {
                            "sum": {
                                "field": "read_num", "missing": 0
                            }
                        },
                        "max_read_num": {
                            "max": {
                                "field": "read_num", "missing": 0
                            }
                        },
                        "sum_like_num": {
                            "sum": {
                                "field": "like_num", "missing": 0
                            }
                        }

                    }
                }
            },

            "size": 0
        }

        return query


def extract_result(agg):
    buckets = agg['by_field']['buckets']
    doc_counts = []
    sum_reads = []
    # head_reads = []
    avg_reads = []
    max_reads = []
    sum_likes = []
    dates = []

    for b in buckets:
        doc_counts.append(b['doc_count'])
        sum_reads.append(b['sum_read_num']['value'])
        # head_reads.append(b['sum_head_read_num']['value'])
        avg_reads.append(0 if b['doc_count'] == 0 else b['sum_read_num']['value'] / b['doc_count'])
        max_reads.append(0 if b['doc_count'] == 0 else b['max_read_num']['value'])
        sum_likes.append(b['sum_like_num']['value'])
        dates.append(b['key'])

    return {
        'doc_counts': doc_counts,
        'sum_reads': sum_reads,
        # 'head_reads': head_reads,
        'avg_reads': avg_reads,
        'max_reads': max_reads,
        'sum_likes': sum_likes,
        'dates': dates
    }


def decode(by_field, data):
    if by_field == CATEGORY or by_field == CATEGORY_0:
        ret = data
    else:
        ret = [CONTENT_TYPE_CODECS.get(x, NOT_EXIST) for x in data]
    return ret


def decode_one(by_field, x):
    if by_field == CATEGORY:
        ret = x
    else:
        ret = CONTENT_TYPE_CODECS.get(x, NOT_EXIST)
    return ret
