from __future__ import absolute_import

import logging
import traceback
from datetime import datetime, timezone, timedelta

from django.core.cache import cache
from django_elasticsearch_dsl.test.testcases import is_es_online
from elasticsearch_dsl.connections import connections
from huaweicloudsdkces.v2 import ListAlarmHistoriesRequest

from common.obs_tools import obs_client
from common.tools import ces_client

from ploto.celery import app as celery_app

from .apps import TraceConfig
from .middleware import (generate_alarm_cache, get_template, is_trigger,
                         save_alarm_record)

logger = logging.getLogger(__name__)

template_alarm = get_template(TraceConfig.template_alarm)


@celery_app.task
def monitor_event_alarm():
    """监测事件告警记录"""
    alarm_keys = cache.keys("alarm_*_event")
    event_alarm_rules = template_alarm.get("alarm_rules")["EventAlarm"]
    for key in alarm_keys:
        cache_alarm = cache.get(key)
        for rule_name in cache_alarm:
            cache_rule = cache_alarm[rule_name]
            if not isinstance(cache_rule, dict):
                continue
            rule = event_alarm_rules[rule_name]
            trigger = is_trigger(template_alarm, cache_alarm, rule, rule_name, "EventAlarm")
            if trigger:
                logger.warning("The alarm: %s has been recorded", rule_name)
    return


@celery_app.task
def monitor_resource_alarm():
    """从华为云ces中监测资源类型告警记录"""
    resource_alarm_rules = template_alarm.get("alarm_rules")["ResourceAlarm"]
    dt_to = datetime.today().replace(tzinfo=timezone(timedelta(hours=8)))
    dt_from = dt_to - timedelta(minutes=resource_alarm_rules["monitor_cycle"])
    dt_to = dt_to.strftime("%Y-%m-%dT%H:%M:%S%Z").replace("UTC", "")
    dt_from = dt_from.strftime("%Y-%m-%dT%H:%M:%S%Z").replace("UTC", "")
    for rule_name in resource_alarm_rules:
        if not isinstance(resource_alarm_rules[rule_name], dict):
            continue
        rule = resource_alarm_rules[rule_name]
        if not rule["enable"]:
            continue
        search_info = {
            "namespace": rule["namespace"],
            "_from": dt_from,
            "to": dt_to,
            "limit": 100
        }
        req = ListAlarmHistoriesRequest(**search_info)
        res = ces_client.list_alarm_histories(req).alarm_histories
        names = rule.get("name")
        if len(res) == 0:
            continue
        if len(names) > 0:
            res = list(filter(lambda x, h_names=names: (x.name in h_names), res))
        if len(res) == 0:
            continue
        for alarm_history in res:
            save_alarm_record(template_alarm.get("alarm"),
                alarm_type="ResourceAlarm",
                alarm_name=alarm_history.name,
                alarm_source="ces",
                state=1,
                level=alarm_history.level,
                last_operate_time=str(datetime.today()),
                details=alarm_history.to_dict())
    return


@celery_app.task
def monitor_network_alarm():
    """监控网络类型告警"""
    network_alarm_rules = template_alarm.get("alarm_rules")["NetworkAlarm"]
    alarm_cache_config = template_alarm.get("alarm_cache_conf")

    cache_name = alarm_cache_config.get("cache_name").format("remote", "network")
    if not cache.get(cache_name):
        cache_content = generate_alarm_cache(template_alarm, "network", "NetworkAlarm")
    else:
        cache_content = cache.get(cache_name)
    for rule_name in network_alarm_rules:
        rule = network_alarm_rules[rule_name]
        if not isinstance(rule, dict):
            continue
        if not rule["enable"]:
            continue
        if rule_name not in cache_content:
            init_time = alarm_cache_config.get("init_time")
            cache_content.update({rule_name: {"last_alarm_time": init_time}})
        is_online, detail = globals()[f"is_{rule_name}_online"]()
        if is_online:
            continue
        cache_content[rule_name]["details"].append(detail)
        trigger = is_trigger(template_alarm, cache_content, rule, rule_name, alarm_type="NetworkAlarm")
        if trigger:
            logger.warning("The alarm: %s has been recorded", rule_name)
    cache.set(cache_name, cache_content, timeout=alarm_cache_config.get("expiration"))
    return


def is_db_network_online():
    """判断mysql连接是否正常"""
    detail = ""
    return True, detail


def is_obs_network_online():
    """判断obs连接是否正常"""
    detail = ""
    server = obs_client.server
    try:
        res = obs_client.listBucketAlias()
        if res["status"] == 403:
            detail = f"connect to OBS server: {server} failed, reason: {res['reason']}"
            logger.error("obs server %s connect failed: %s", server, detail)
            return False, detail
    except Exception as e:
        lost_time = str(datetime.today())
        detail = f"lost_time: {lost_time}, Try connect to OBS server: {server} timeout."
        logger.error("OBS server connect failed: %s, %s", repr(e), traceback.format_exc())
        return False, detail
    return True, detail


def is_redis_network_online():
    """判断redis连接是否正常"""
    detail = ""
    return True, detail


def is_es_network_online():
    """判断es连接是否正常"""
    detail = ""
    alias = "default"
    ping_res = is_es_online()
    if not ping_res:
        host = connections._kwargs[alias].get("hosts")
        lost_time = str(datetime.today())
        detail = f"lost_time: {lost_time}, Try connect to ElasticSearch server: {host} failed."
        logger.error("ElasticSearch connect failed: %s", detail)
        return False, detail
    return True, detail
