# -*- coding: UTF-8 -*-

import re

import pandas as pd
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Search, A, Q

from common.utils_base import DtUtil

__all__ = ['Elastic']


class Elastic(object):
    def __init__(self, server=[], scheme="http", port=9200):
        self.server = server
        self.port = port
        self.scheme = scheme
        self.es = self.es_init()

    def es_init(self):
        try:
            if isinstance(self.server, list):
                self.es = Elasticsearch(self.server, port=self.port, scheme=self.scheme)
                # print(self.es.info())
                return self.es
        except Exception as e:
            print(e)
            return False

    def product_code(self, field, index, start_datetime, end_datetime):
        s = Search(using=self.es, index=index)
        f = Q("range", **{'@timestamp': {"from": int(DtUtil.collection(start_datetime, dtype='cst_ts_ms')),
                                         "to": int(DtUtil.collection(end_datetime, dtype='cst_ts_ms'))}}) & \
            Q("term", fields__app__keyword='jrocket2_openresty')
        s = s.filter(f)
        s.aggs.bucket(f'{field}_terms', A('terms', field=f'{field}.keyword'))
        response = s.execute()

        if hasattr(response.aggs, f'{field}_terms'):
            # https://stackoverflow.com/questions/59632290/elasticsearch-aggregation-to-pandas-dataframe
            # elasticsearch_dsl.utils.AttrList的elasticsearch_dsl.utils.AttrDict 类型转dict类型
            data = [i._d_ for i in getattr(response.aggs, f'{field}_terms').buckets]

        df = pd.DataFrame(data)
        df['key'] = df['key'].replace('-', 'APP')
        df.rename(columns={'key': 'name', 'doc_count': 'value'}, inplace=True)

        return df.to_dict(orient='records')

    def req_time(self, field, index, start_datetime, end_datetime):
        s = Search(using=self.es, index=index)
        f = Q("range", **{'@timestamp': {"from": int(DtUtil.collection(start_datetime, dtype='cst_ts_ms')),
                                         "to": int(DtUtil.collection(end_datetime, dtype='cst_ts_ms'))}}) & \
            Q("term", fields__app__keyword='jrocket2_openresty')
        s = s.filter(f)
        max = 7
        ranges = []
        for i in range(1, max):
            ranges.append({"from": i, "to": i + 1})
        ranges.append({"from": max + 1})
        s.aggs.bucket(f'{field}_terms', A('range', field=field, ranges=ranges))
        response = s.execute()
        if hasattr(response.aggs, f'{field}_terms'):
            data = [i._d_ for i in getattr(response.aggs, f'{field}_terms').buckets]
        df = pd.DataFrame(data)
        df.drop(['from', 'to'], axis=1, inplace=True)
        df['key'] = df.apply(lambda x: (
            f"{x['key'].split('-')[0].replace('.0','s')}-{round(float(x['doc_count']) / response.hits.total * 100, 1)}%"),
                             axis=1)
        df.rename(columns={'key': 'category', 'doc_count': 'value'}, inplace=True)

        return df.to_dict(orient='list')

    def map_applied(self, index, start_datetime, end_datetime):

        s = Search(using=self.es, index=index.get('sms_index_name'))
        f = Q("range", **{'@timestamp': {"from": int(DtUtil.collection(start_datetime, dtype='cst_ts_ms')),
                                         "to": int(DtUtil.collection(end_datetime, dtype='cst_ts_ms'))}}) & \
            Q("term", fields__app__keyword='message-sms')
        q = Q("match_phrase", message='applied_notification') & Q("match", message='tags') & Q("match_phrase",
                                                                                               message='mobile')
        s = s.filter(f).query(q).extra(from_=0, size=10000)
        response = s.execute()
        mobiles = []
        # # 匹配用户手机号码query_string
        # pattern = re.compile("mobile=\'(\d{11})\'")
        # 匹配脱敏后用户手机号码
        pattern = re.compile("mobile=\'(\d{8}\*\*\*)\'")
        for hit in response.hits:
            try:
                res_find_mobile = pattern.findall(hit.message)
            except AttributeError:
                continue
            if res_find_mobile:
                mobile = res_find_mobile[0]
                mobiles.append(mobile)
        if mobiles:
            search_key = " OR ".join(mobiles)
            s = Search(using=self.es, index=index.get('access_index_name'))
            f = Q("range", **{'@timestamp': {"from": int(DtUtil.collection(start_datetime, dtype='cst_ts_ms')),
                                             "to": int(DtUtil.collection(end_datetime, dtype='cst_ts_ms'))}}) & \
                Q("term", fields__app__keyword='jrocket2_openresty') & \
                Q('exists', field='geoip.latitude') & Q('exists', field='geoip.latitude') & Q('exists', field='user_mobile')
            q = Q('query_string', query=search_key)
            s = s.filter(f).query(q).extra(from_=0, size=10000)
            try:
                response = s.execute()
                location = []
                for hit in response.hits:
                    if hit.user_mobile not in location:
                        location.append(hit.geoip.to_dict())
            except Exception as e:
                print(e)
                return False

            # from app.datav.utils.data import location
            df = pd.DataFrame(location)
            df = df[['city_name', 'latitude', 'longitude']].dropna(axis=0, how='any')
            df = df.groupby(['city_name', 'longitude', 'latitude']).size().reset_index(name='counts')
            # https://zhuanlan.zhihu.com/p/41202576
            # 想要一个副本，请确保强制让 Pandas 创建副本copy()
            df_city_location = df[['city_name', 'longitude', 'latitude']].copy()
            # 最后一行新增Shenzhen坐标数据
            df_city_location.loc[int(df_city_location.shape[0]) + 1] = ['Shenzhen', 114.1333, 22.5333]
            # 删除[city_name]重复坐标保留第一次出现的值
            df_city_location.drop_duplicates(subset=['city_name'], keep='first', inplace=True)
            df_city_value = df.rename(columns={'city_name': 'name', 'counts': 'value'}, inplace=False)
            df_city_value.drop(['longitude', 'latitude'], axis=1, inplace=True)

            return {'df_city_location': df_city_location.set_index('city_name').T.to_dict(orient='list'),
                    'df_city_value': [[i] for i in df_city_value.to_dict(orient='records')]}

        return False
