# -*- coding: utf-8 -*-
# time_slot_analyze.py
# Created by Hardy on 26th, Jan
# Copyright 2017 杭州网川教育有限公司. All rights reserved.

from querier.esquerier import ElasticSearchQuerier

WEEKDAYS = ["周一", "周二", "周三", "周四", "周五", "周六", "周日"]

WEEKDAY = 'weekday'
HOUR = 'hour'


class WechatTimeSlotAnalyzeQuerier(ElasticSearchQuerier):
    def __init__(self, es, index, doc_type):
        super(WechatTimeSlotAnalyzeQuerier, self).__init__(es, index, doc_type)
        # self.nlp_service = nlp_service

    def _build_query(self, args):
        biz_code = args.get('biz_code', None)
        from_date = args.get('from', None)
        to_date = args.get('to', None)
        time_slot_type = args.get('time_slot_type', None)
        if not time_slot_type:
            time_slot_type = WEEKDAY

        if biz_code is None:
            raise ValueError('"biz_code" is needed.')

        query = self._gen_query(biz_code, from_date, to_date, time_slot_type)
        return query, {}, {'biz_code': biz_code, 'from': from_date, 'to': to_date, 'time_slot_type': time_slot_type}

    def _build_result(self, es_result, param):
        # total = es_result['hits']['total']
        agg = es_result['aggregations']
        data = extract_result(agg, param['time_slot_type'])
        return {
            "times": decode(param['time_slot_type'], data['dates']),
            'biz_code': param['biz_code'],
            'from': param['from'],
            'to': param['to'],
            'time_slot_type': param['time_slot_type'],
            "values": {
                'doc_counts': data['doc_counts'],
                'sum_reads': data['sum_reads'],
                'head_reads': data['head_reads'],
                'avg_reads': data['avg_reads'],
                'max_reads': data['max_reads'],
                'sum_likes': data['sum_likes']
            }

        }

    @staticmethod
    def _gen_query(biz_code, from_date, to_date, time_slot_type):
        query = {
            "query": {
                "bool": {
                    "filter": [
                        {'term': {'biz_code': biz_code}},
                        {
                            "range": {
                                "publish_timestamp": {
                                    "from": from_date,
                                    "to": to_date
                                }
                            }
                        }
                    ]
                }
            },
            "aggs": {
                "time_hist": {
                    "histogram": {
                        "script": {
                            'lang': "painless",
                            'inline': "doc['publish_timestamp'].date.getDayOfWeek()" if time_slot_type == WEEKDAY else
                            "doc['publish_timestamp'].date.getHourOfDay()",
                        },
                        "interval": 1,
                        "order": {"_key": "asc"}
                    },
                    "aggs": {
                        "sum_read_num": {
                            "sum": {
                                "field": "read_num", "missing": 0
                            }
                        },
                        "max_read_num": {
                            "max": {
                                "field": "read_num", "missing": 0
                            }
                        },
                        "sum_like_num": {
                            "sum": {
                                "field": "like_num", "missing": 0
                            }
                        },
                        "sum_head_read_num": {
                            "sum": {
                                "script": {
                                    'lang': 'painless',
                                    'inline': "if (doc.idx.value == \"1\") {return doc.read_num.value} else {return 0}"
                                }
                            }
                        },
                        "sum_head_count": {
                            "sum": {
                                "script": {
                                    'lang': 'painless',
                                    'inline': "if (doc.idx.value == \"1\") {return 1} else {return 0}"
                                }
                            }
                        }

                    }
                }
            },

            "size": 0
        }

        return query


def extract_result(agg, slot_type):
    buckets = agg['time_hist']['buckets']
    dates = range(0, 24) if slot_type == 'hour' else range(1, 8)
    values = [0] * len(dates)
    doc_counts = dict(zip(dates, values))
    sum_reads = dict(zip(dates, values))
    head_reads = dict(zip(dates, values))
    avg_reads = dict(zip(dates, values))
    max_reads = dict(zip(dates, values))
    sum_likes = dict(zip(dates, values))

    for b in buckets:
        k = b['key']
        doc_counts[k] = b['doc_count']
        sum_reads[k] = b['sum_read_num']['value']
        head_reads[k] = b['sum_head_read_num']['value']
        avg_reads[k] = 0 if b['doc_count'] == 0 else b['sum_read_num']['value'] / b['doc_count']
        max_reads[k] = 0 if b['doc_count'] == 0 else b['max_read_num']['value']
        sum_likes[k] = b['sum_like_num']['value']

    return {
        'doc_counts': [doc_counts[k] for k in dates],
        'sum_reads': [sum_reads[k] for k in dates],
        'head_reads': [head_reads[k] for k in dates],
        'avg_reads': [avg_reads[k] for k in dates],
        'max_reads': [max_reads[k] for k in dates],
        'sum_likes': [sum_likes[k] for k in dates],
        'dates': list(dates)
    }


def decode(time_slot_type, data):
    if time_slot_type == WEEKDAY:
        ret = [WEEKDAYS[i - 1] if i <= len(WEEKDAYS) else '' for i in data]
    else:
        ret = [d if len(d) == 2 else '0' + d for d in [str(i) for i in data]]
    return ret
