from elasticsearch import Elasticsearch
from typing import Dict, Optional
from elasticsearch_dsl import Search, Q, A
from app.monitor_logs.utils import collection
from common.utils import cst_to_utc
from app.monitor_logs.utils import format_string_len

__all__ = ['abnormal_service_agg_query', 'abnormal_service_count_query', 'abnormal_service_log_query']


def abnormal_service_agg_query(es_client: Elasticsearch, index: str, cst_begin_time: str, cst_end_time: str,
                               log_level: str = "ERROR", data_size=50) -> Dict:
    f = Q({'range': {'@timestamp': {"from": cst_to_utc(cst_begin_time), "to": cst_to_utc(cst_end_time)}}}) & \
        Q({'match': {'level': {"query": f'{log_level}'}}})
    s = Search(using=es_client, index=index)
    s = s.filter(f)
    a = A('terms', field='fields.app.keyword', size=300)
    s.aggs.bucket('apps_bucket', a)
    s = s.extra(from_=0, size=data_size)
    res = s.execute()
    service = {bucket['key']: bucket['doc_count'] for bucket in res.aggs.apps_bucket}
    return service  # sample {'message-sms': 794, 'user-center': 2953, 'iam': 1533}


def abnormal_service_query(es_client: Elasticsearch, index: str, service: str, cst_begin_time: str, cst_end_time: str,
                           log_level: str, exclude_keywords=None, data_size=5) -> Dict:
    if exclude_keywords is None:
        exclude_keywords = []
    f = Q({'range': {'@timestamp': {"from": cst_to_utc(cst_begin_time), "to": cst_to_utc(cst_end_time)}}}) & \
        Q('term', **{'fields.app.keyword': service}) & \
        Q({'match': {'level': {"query": f'{log_level}'}}})
    e = Q('bool', should=[Q('match_phrase', message=keyword) for keyword in exclude_keywords if exclude_keywords],
          minimum_should_match=1)
    s = Search(using=es_client, index=index).extra(track_total_hits=True)
    if exclude_keywords:
        s = s.filter(f).exclude(e)
    else:
        s = s.filter(f)
    s = s.extra(from_=0, size=data_size)
    res = s.execute()
    return res


def abnormal_service_count_query(es_client: Elasticsearch, index: str, service: str, cst_begin_time: str, cst_end_time: str,
                                 log_level: str, exclude_keywords: Optional[list] = list) -> int:
    res = abnormal_service_query(es_client, index, service, cst_begin_time, cst_end_time, log_level, exclude_keywords)
    count = res.hits.total  # sample {'value': 794, 'relation': 'eq'}
    return count


def abnormal_service_log_query(es_client: Elasticsearch, index: str, service: str, cst_begin_time: str, cst_end_time: str,
                               log_level: str, exclude_keywords: Optional[list] = list) -> list:
    """
    :param es_client:
    :param index:
    :param service:
    :param cst_begin_time:
    :param cst_end_time:
    :param log_level:
    :param exclude_keywords:
    :return: 输出截取异常日志
    """
    res = abnormal_service_query(es_client, index, service, cst_begin_time, cst_end_time, log_level, exclude_keywords)
    data = [format_string_len(hit.message) for hit in res]
    # [
    #     '[INFO ] [ifast-cms] 2022-07-27 15:59:53.447 [http-nio-8080-exec-1] [] [] [] com.welab.filter.AuthorizeFilter - 拦截地址[/wlb/ifast-cms/api/v1/app/system/desire/tags]',
    #     '[INFO ] [ifast-cms] 2022-07-27 15:59:53.447 [http-nio-8080-exec-1] [] [] [] com.welab.web.controller.AppController - 获取系统的愿望标签'
    # ]
    return data


def abnormal_nginx_agg_query(es_client: Elasticsearch, index: str, cst_begin_time: str, cst_end_time: str, data_size=100) -> Dict:
    f = Q({'range': {'@timestamp': {"from": cst_to_utc(cst_begin_time), "to": cst_to_utc(cst_end_time)}}})
    s = Search(using=es_client, index=index)
    s = s.filter(f)
    a = A('terms', field='fields.app.keyword', size=300)
    s.aggs.bucket('response_bucket', a)
    s = s.extra(from_=0, size=data_size)
    res = s.execute()
    service = {bucket['key']: bucket['doc_count'] for bucket in res.aggs.apps_bucket}
    return service


def abnormal_service_date_histogram(es_client: Elasticsearch, index: str, service: str, cst_begin_time: str, cst_end_time: str,
                                    interval_min: int = 5):
    """
    :param es_client:
    :param index:
    :param service:
    :param cst_begin_time:
    :param cst_end_time:
    :param interval_min: 单位(分钟)
    :return:
    """
    s = Search(using=es_client, index=index)
    # 添加范围过滤器，指定@timestamp字段的开始时间和结束时间，以及时间格式
    f = Q({'range': {'@timestamp': {"from": cst_to_utc(cst_begin_time), "to": cst_to_utc(cst_end_time)}}}) & \
        Q('term', **{'fields.app.keyword': service})
    s = s.filter(f)
    # 添加日期直方图聚合器，指定@timestamp字段作为间隔字段，以及间隔大小为5分钟, 在日期直方图聚合器下再添加过滤器聚合器，指定level字段等于ERROR
    s.aggs.bucket("time_buckets", "date_histogram", field="@timestamp", interval=f"{interval_min}m") \
        .bucket("count", "filter", term={"level.keyword": "ERROR"})

    # 执行搜索对象，并获取返回结果
    response = s.execute()

    # 遍历每个时间桶
    res = []
    for time_bucket in response.aggregations.time_buckets.buckets:
        # 获取时间桶的键值和文档数
        # {'key_as_string': '2023-03-27T05:50:00.000Z', 'key': 1679896200000, 'doc_count': 162, 'count': {'doc_count': 31}}
        if time_bucket.key_as_string:
            cst_datetime = collection(time_bucket.key_as_string, cst=False, dtype='cst_str')
            doc_count = time_bucket.count.doc_count
            res.append({"cst_datetime": cst_datetime, "doc_count": doc_count})
    return res
    # [{'cst_datetime': '2023-03-27 14:20:00', 'doc_count': 27},{'cst_datetime': '2023-03-27 14:25:00', 'doc_count': 61},{'cst_datetime': '2023-03-27 14:30:00', 'doc_count': 61}]


if __name__ == '__main__':
    from common.elastic import get_es
    from pprint import pprint

    es = get_es()
    client = Elasticsearch(hosts=['192.168.31.11'])
    import datetime
    from datetime import timedelta

    now = datetime.datetime.now()
    cst_begin_time = (now - timedelta(minutes=15)).strftime("%Y-%m-%d %H:%M:%S")
    cst_end_time = now.strftime("%Y-%m-%d %H:%M:%S")
    f = Q({'range': {'@timestamp': {"from": cst_to_utc(cst_begin_time), "to": cst_to_utc(cst_end_time)}}})
    s = Search(using=es, index="logstash-openresty_access*")
    s = s.filter(f)
    a = A('terms', field='response', size=100000000)
    s.aggs.bucket('apps_bucket', a)
    s = s.extra(from_=0, size=10000)
    result = s.execute()
    pprint({bucket['key']: bucket['doc_count'] for bucket in result.aggs.apps_bucket})

    pprint(abnormal_service_date_histogram(es_client=es, index="logstash-applog*", service="welab-skyscanner-matsu",
                                           cst_begin_time=cst_begin_time, cst_end_time=cst_end_time, interval_min=5))
