# -*- coding: utf-8 -*-
# trend_analyze.py
# Created by Hardy on 26th, Jan
# Copyright 2017 杭州网川教育有限公司. All rights reserved.

from querier.esquerier import ElasticSearchQuerier


class WechatTrendAnalyzeQuerier(ElasticSearchQuerier):
    def __init__(self, es, index, doc_type):
        super(WechatTrendAnalyzeQuerier, self).__init__(es, index, doc_type)
        # self.nlp_service = nlp_service

    def _build_query(self, args):
        biz_code = args.get('biz_code', None)
        from_date = args.get('from', None)
        to_date = args.get('to', None)
        if biz_code is None:
            raise ValueError('"biz_code" is needed.')

        query = self._gen_query(biz_code, from_date, to_date)
        return query, {}, {'biz_code': biz_code, 'from': from_date, 'to': to_date}

    def _build_result(self, es_result, param):
        total = es_result['hits']['total']
        agg = es_result['aggregations']
        data = extract_result(agg)
        return {
            'biz_code': param['biz_code'],
            'from': param['from'],
            'to': param['to'],
            'doc_counts': data['doc_counts'],
            'sum_reads': data['sum_reads'],
            'head_reads': data['head_reads'],
            'avg_reads': data['avg_reads'],
            'max_reads': data['max_reads'],
            'sum_likes': data['sum_likes'],
            'avg_likes':  data['avg_likes'],
            'dates': data['dates']
        }

    @staticmethod
    def _gen_query(biz_code, from_date, to_date):
        query = {
            "query": {
                "bool": {
                    "filter": [
                        {'term': {'biz_code': biz_code}},
                        {
                            "range": {
                                        "publish_timestamp": {
                                            "from": from_date,
                                            "to": to_date
                                        }
                                    }
                        }
                    ]
                }
            },
            "aggs": {
                "time_hist": {
                    "date_histogram": {
                        "field": "publish_timestamp",
                        "interval": "1d"
                    },
                    "aggs": {
                        "sum_read_num": {
                            "sum": {
                                "field": "read_num", "missing": 0
                            }
                        },
                        "max_read_num": {
                            "max": {
                                "field": "read_num", "missing": 0
                            }
                        },
                        "sum_like_num": {
                            "sum": {
                                "field": "like_num", "missing": 0
                            }
                        },
                        "sum_head_read_num": {
                            "sum": {
                                "script": {
                                    'lang': 'painless',
                                    'inline': "if (doc.idx.value == \"1\") {return doc.read_num.value} else {return 0}"
                                }
                            }
                        },
                        "sum_head_count": {
                            "sum": {
                                "script": {
                                    'lang': 'painless',
                                    'inline': "if (doc.idx.value == \"1\") {return 1} else {return 0}"
                                }
                            }
                        }

                    }
                }
            },

            "size": 0
        }

        return query


def extract_result(agg):
    buckets = agg['time_hist']['buckets']
    doc_counts = []
    sum_reads = []
    head_reads = []
    avg_reads = []
    max_reads = []
    sum_likes = []
    avg_likes = []
    dates = []

    for b in buckets:
        doc_counts.append(b['doc_count'])
        sum_reads.append(b['sum_read_num']['value'])
        head_reads.append(b['sum_head_read_num']['value'])
        avg_reads.append(0 if b['doc_count'] == 0 else b['sum_read_num']['value'] / b['doc_count'])
        max_reads.append(0 if b['doc_count'] == 0 else b['max_read_num']['value'])
        sum_likes.append(b['sum_like_num']['value'])
        avg_likes.append(0 if b['doc_count'] == 0 else b['sum_like_num']['value'] / b['doc_count'])
        dates.append(b['key_as_string'])

    return {
        'doc_counts': doc_counts,
        'sum_reads': sum_reads,
        'head_reads': head_reads,
        'avg_reads': avg_reads,
        'max_reads': max_reads,
        'sum_likes': sum_likes,
        'avg_likes': avg_likes,
        'dates': dates
    }
