import json
from typing import Union, List
from datetime import datetime, date
from dateutil.relativedelta import relativedelta
from dateutil.parser import parse, ParserError
from app.monitor_logs.models import MonitorLogService, MonitorLogGeneralPolicy

import requests
from sqlalchemy.orm import Session

from common.utils import to_b64encode


class Opensearch(object):
    def __init__(self, server: str = '127.0.0.1', scheme: str = "http", port: int = 9200, username: str = None,
                 password: str = None, timeout=5):
        self.url = f"{scheme}://{server}:{port}"
        self.headers = {
            'Content-Type': 'application/json',
            'Authorization': f'Basic {to_b64encode(f"{username}:{password}")}'
        }
        self.timeout = timeout

    def cluster_info(self):
        response = requests.request("GET", self.url, headers=self.headers, timeout=self.timeout)
        if response.status_code != 200:
            return response.text
        return response.json()

    def cluster_health(self):
        response = requests.request("GET", f"{self.url}/_cluster/health", headers=self.headers, timeout=self.timeout)
        if response.status_code != 200:
            return response.text
        return response.json()

    @classmethod
    def cst_to_utc(cls, cst_datetime: Union[datetime, date, str]):
        cst_datetime = (parse(cst_datetime) - relativedelta(hours=8)).strftime("%Y-%m-%dT%H:%M:%S,000Z")
        return cst_datetime

    @classmethod
    def time_compare(cls, begin_time: Union[datetime, str], end_time: Union[datetime, str]):
        try:
            if parse(end_time) > datetime.now() > parse(begin_time):
                return True
            else:
                return False
        except (TypeError, ParserError):
            return False

    # 先粗略查询出满足阈值错误的应用和排除维护时间的应用
    def get_alarm_apps(self, db: Session, index: str, cst_begin_time: str, cst_end_time: str, log_level: str = "ERROR"):

        # url = "http://106.52.172.23:9201/_plugins/_sql?format=json"
        # payload = json.dumps({
        #     "query": "SELECT fields.app FROM logstash-java-* WHERE @timestamp BETWEEN '2022-01-28T16:00:00.000' AND '2022-01-28T16:01:00.000' AND MATCH_QUERY(level, 'INFO') GROUP BY fields.app"
        # })
        utc_begin_time = self.cst_to_utc(cst_begin_time)
        utc_end_time = self.cst_to_utc(cst_end_time)
        payload = json.dumps({
            "query": f"""
                        SELECT fields.app.keyword FROM {index}
                        WHERE @timestamp BETWEEN '{utc_begin_time}' AND '{utc_end_time}' AND MATCH_QUERY(level, '{log_level}')
                        GROUP BY fields.app.keyword
                     """
        })
        res = requests.request("POST", f"{self.url}/_plugins/_sql?format=json", headers=self.headers, data=payload)
        if res.status_code != 200:
            return res.text

        alarm_apps = []
        for app in res.json().get('aggregations').get('fields.app.keyword').get('buckets'):
            policy = MonitorLogService.get_policy_by_app_name(db, app.get("key"))
            if policy.get("is_maintain"):
                continue
            if self.time_compare(policy.get("maintain_begin_time"), policy.get("maintain_end_time")):
                continue
            if policy:
                if app.get("doc_count") > policy.get("alarm_threshold"): alarm_apps.append(app.get("key"))
            else:
                if app.get("doc_count") > 2: alarm_apps.append(app.get("key"))
        return alarm_apps

    # 单个服务处理错误日志，过滤错误、满足阈值告警、格式化message长度、获取到格式化的日志
    def get_alarm_messages(self, apps: List, index: str, cst_begin_time: str, cst_end_time: str, log_level: str):
        utc_begin_time = self.cst_to_utc(cst_begin_time)
        utc_end_time = self.cst_to_utc(cst_end_time)
        payload = json.dumps({
            "query": f"""
                        SELECT fields.app.keyword FROM {index}
                        WHERE @timestamp BETWEEN '{utc_begin_time}' AND '{utc_end_time}' AND MATCH_QUERY(level, '{log_level}')
                        GROUP BY fields.app.keyword
                     """
        })
        response = requests.request("POST", f"{self.url}/_plugins/_sql?format=json", headers=self.headers, data=payload)
        #
        #
        #
        # q1 = Q({'range': {'@timestamp': {"from": self.begin_time, "to": self.end_time}}})
        # q2 = Q({'match': {'level': {"query": 'ERROR'}}})
        # q3 = Q({'match': {'fields.app.keyword': {"query": self.service}}})
        #
        # exclude_keywords = self.service_alarm_config[self.service][
        #     'exclude_keywords'] if self.service in self.service_alarm_config.keys() and self.service_alarm_config[
        #     self.service].get(
        #     'exclude_keywords') else str()
        # exclude_keywords_list = exclude_keywords.split(',')
        #
        # if exclude_keywords:
        #     q4 = Q('bool', should=[Q('match_phrase', message=keyword) for keyword in exclude_keywords_list],
        #            minimum_should_match=1)
        #     s = Search(using=es_client, index=self.index_name) \
        #         .filter(q1) \
        #         .query(q2) \
        #         .query(q3) \
        #         .exclude(q4) \
        #         .sort('@timestamp', {'@timestamp': {"order": "asc", "mode": "avg"}}) \
        #         .extra(from_=0, size=500)
        # else:
        #     s = Search(using=es_client, index=self.index_name) \
        #         .filter(q1) \
        #         .query(q2) \
        #         .query(q3) \
        #         .sort('@timestamp', {'@timestamp': {"order": "asc", "mode": "avg"}}) \
        #         .extra(from_=0, size=500)
        # a1 = A('terms', field='fields.app.keyword', size=3)
        # s.aggs.bucket('service_bucket', a1)
        #
        # res = s.execute()
        #
        # alarm_threshold = self.service_alarm_config[self.service][
        #     'alarm_threshold'] if self.service_alarm_config.get(
        #     self.service) and isinstance(self.service_alarm_config[self.service].get('alarm_threshold'),
        #                                  int) else self.default_alarm_threshold
        #
        # if len(res.aggs.service_bucket) == 1:
        #     doc_count = res.aggs.service_bucket[0]['doc_count']
        # elif len(res.aggs.service_bucket) == 0:
        #     doc_count = 0
        # else:
        #     raise ValueError
        #
        # format_res = dict()
        # if doc_count < alarm_threshold:
        #     logger.info(f'[{self.service}] 错误数为{doc_count}，低于阈值{alarm_threshold}，忽略错误')
        # else:
        #     logger.warning(f'{self.service} 错误数为{doc_count}，高于阈值{alarm_threshold}，处理错误')
        #     format_res = {'service': self.service, 'count': doc_count,
        #                   'exclude_keywords': exclude_keywords, 'time_interval': self.time_interval, 'message': []}
        #     for hit in res:
        #         message = hit.message
        #         format_message = Utils.format_message_len(message)
        #         format_res['message'].append({'raw': format_message, 'word': []})
        #     format_res = self.compare_message(format_res)
        #     logger.info('完成日志比较')
        # return format_res

        # q1 = Q({'range': {'@timestamp': {"from": self.begin_time, "to": self.end_time}}})
        # q2 = Q({'match': {'level': {"query": 'ERROR'}}})
        #
        # s = Search(using=es_client, index=self.index_name) \
        #     .filter(q1) \
        #     .query(q2) \
        #     .sort('@timestamp', {'@timestamp': {"order": "asc", "mode": "avg"}}) \
        #     .extra(from_=0, size=10000)
        # a1 = A('terms', field='fields.app.keyword', size=50)
        # s.aggs.bucket('service_bucket', a1)
        # res = s.execute()
        #
        # all_services = {bucket['key']: bucket['doc_count'] for bucket in res.aggs.service_bucket}
        # logger.info(f'所有错误服务：{all_services}')
        #
        # # 告警阈值过滤
        # services = {item: all_services[item] for item in all_services if
        #             all_services[item] >= self.default_alarm_threshold}
        # logger.info(f'默认阈值50过滤后的所有服务：{services}')


if __name__ == '__main__':
    opensearch = Opensearch(server='106.52.172.23', port=9201, username='admin', password='admin')
    from common.database import SessionLocal

    # def time_compare(begin_time: Union[datetime, str], end_time: Union[datetime, str]):
    #     try:
    #         if parse(end_time) > datetime.now() > parse(begin_time):
    #             return True
    #         else:
    #             return False
    #     except TypeError:
    #         return False
    #
    #
    SessionLocal = SessionLocal()
    app_list = opensearch.get_alarm_apps(SessionLocal, "logstash-java-*", '2022-01-29 14:00:00', '2022-01-29 16:00:00',
                                         'ERROR')
    print(app_list)
    # filter_error_apps = []
    # for app in apps:
    #     print(app.get("key"))
    #     policy = MonitorLogService.get_policy_by_app_name(db, app.get("key"))
    #     if policy.get("is_maintain"):
    #         continue
    #     if time_compare(policy.get("maintain_begin_time"), policy.get("maintain_end_time")):
    #         continue
    #     if policy:
    #         if app.get("doc_count") > policy.get("alarm_threshold"): filter_error_apps.append(app.get("key"))
    #     else:
    #         if app.get("doc_count") > 2: filter_error_apps.append(app.get("key"))
    #
    # print(filter_error_apps)
