import datetime
import json
import time
from elasticsearch import Elasticsearch
from typing import Dict, Optional
from sqlalchemy.orm import Session
from elasticsearch_dsl import Search, Q, A
from elasticsearch_dsl.aggs import Avg, Filters
import prometheus_client
from prometheus_client import Counter, Gauge
from prometheus_client.core import CollectorRegistry

from core.response import resp_500
from core.logger import logger
from app.monitor_logs.models.monitor_log_http_record import MonitorLogHttpRecord
from app.monitor_logs.models.monitor_log_http_url import MonitorLogHttpUrl
from common.utils import CommonTimeUtils, cst_to_utc

class MonitorLogHttpRecordHandler():
    """  MonitorLogHttpRecord Handler
    """
    def __init__(self):
        pass


    def get_http_record_search(self, db: Session, page: int, per_page: int, field: str, value: str):
        """
        根据表字段模糊匹配，获取http的url记录信息
        """
        try:
            field_info = {field: value}
            order_info = {"update_at": "desc"}
            result = MonitorLogHttpRecord.filter_by_field_search(db, page, per_page, field_info, order_info)
            if not result:
                logger.warning(f"get http url record search, result is null => {field}:{value}")
                return False, None

        except Exception as e:
            message = f"get http url record search by {field}:{value} error"
            logger.exception(f"{message} => {e}")
            return resp_500(message)

        return True, result



    def get_http_record_match(self, db: Session, page: int, per_page: int, field: str, value: str):
        """
        根据表字段完全匹配，获取http的url记录信息
        """
        try:
            result = MonitorLogHttpRecord.filter_by_field_match(db, page, per_page, {field: value})
            if not result:
                logger.warning(f"get http url record match, result is null => {field}:{value}")
                return False, None

        except Exception as e:
            message = f"get http url record match by {field}:{value} error"
            logger.exception(f"{message} => {e}")
            return resp_500(message)

        return True, result




    def delete_http_record(self, db: Session, data: dict):
        """
        删除http的url记录信息
        """
        try:
            apprecord_object = MonitorLogHttpRecord.get_object_by_uuid(db, data['uuid'])
            if not apprecord_object:
                # 如果不存在， 则异常返回
                logger.error(f"delete http url record failed, uuid:{data['uuid']} not exist")
                return False

            # 删除告警记录
            MonitorLogHttpRecord.delete_object_by_uuid(db, data['uuid'])
        except Exception as e:
            message = f"delete http url record uuid:{data['uuid']}  error"
            logger.exception(f"{message} => {e}")
            return resp_500(message)

        return True



    def get_http_record_state(self, db: Session):
        """
        获取http接口状态记录是否正常更新
        """
        try:
            end_time = datetime.datetime.now()
            begin_time = end_time - datetime.timedelta(minutes=6)
            http_records = MonitorLogHttpRecord.get_object_info_for_time(db, begin_time, end_time)
            if not http_records:
                logger.error("get http url records for time failed")
                return False
        except Exception as e:
            message = f"get http url records for time error => {e}"
            logger.exception(f"{message} => {e}")
            return False

        return True
    
    
    
    def get_es_agg_for_http_code(self, es_client, utc_begin_time, utc_end_time, url_info) -> Dict:
        """
        从es数据库获取接口的状态码信息
        """
        try:
            url_record = {
                "domain": url_info["domain"],
                "url": url_info["url"],
                "method": url_info["method"],
                "provider": url_info["provider"],
                "production": url_info["production"],
                "total": 0,
                "comment": url_info["comment"]
            }
            query = Q({'range': {'@timestamp': {"from": utc_begin_time, "to": utc_end_time}}}) & \
                    Q('match_phrase', domain_name=url_info["domain"]) & \
                    Q('match_phrase_prefix', request=url_info["url"]) & \
                    Q('match', verb=url_info["method"])
            filters_agg = Filters(
                filters={
                    '2xx': Q('range', upstream_status={'gte': 200, 'lt': 300}),
                    '3xx': Q('range', upstream_status={'gte': 300, 'lt': 400}),
                    '4xx': Q('range', upstream_status={'gte': 400, 'lt': 500}),
                    '5xx': Q('range', upstream_status={'gte': 500, 'lte': 599}),
                },
                aggs={
                    'avg_req_time': Avg(field='req_time'),
                    'avg_resp_time': Avg(field='resp_time')
                }
            )
            s = Search(using=es_client, index="logstash-openresty_access*")
            s = s.query(query)
            s.aggs.bucket('status_groups', filters_agg)
            s = s.extra(size=0)
            result = s.execute()
            if result == False:
                logger.error(f"elasticsearch execute {utc_begin_time}~{utc_end_time} {json.dumps(url_record)} failed")
                return False
        
            for bucket, agg_info in result.aggregations.status_groups.buckets.to_dict().items():  
                url_record.update({
                    f"status{bucket}": agg_info["doc_count"],
                    f"status{bucket}_req_avg": round(agg_info["avg_req_time"]["value"], 2) if agg_info["avg_req_time"]["value"] else 0,
                    f"status{bucket}_resp_avg": round(agg_info["avg_resp_time"]["value"], 2) if agg_info["avg_resp_time"]["value"] else 0
                })
                url_record["total"] += agg_info["doc_count"]
            logger.info(f"elasticsearch execute {utc_begin_time}~{utc_end_time} {json.dumps(url_record)} => success")
        except Exception as e:
            logger.error(f"elasticsearch execute {utc_begin_time}~{utc_end_time} {json.dumps(url_record)} => error => {e}")
            return False
            
        return url_record
    
    
    
    def task_update_from_es_handle(self, es_client: Elasticsearch, db: Session, interval_min: int):
        """
        从es数据库获取接口的状态码信息保存到mysql数据库的任务处理
        """
        # 获取搜索时间段
        end_time = datetime.datetime.now()
        begin_time = end_time - datetime.timedelta(minutes=interval_min)
        utc_begin = cst_to_utc(begin_time.strftime("%Y-%m-%d %H:%M:%S"))
        utc_end = cst_to_utc(end_time.strftime("%Y-%m-%d %H:%M:%S"))
        
        # 获取所有的接口信息
        url_infos = MonitorLogHttpUrl.get_object_info_for_used(db)
        for url_info in url_infos:
            # 从es数据库获取接口的状态码信息
            url_record = self.get_es_agg_for_http_code(es_client, utc_begin, utc_end, url_info)
            if not url_record:
                logger.error(f"get es agg for http_code failed")
                return False
            
            url_record["begin_time"] = begin_time
            url_record["end_time"] = end_time
            
            # 结果保存到mysql数据库
            result = MonitorLogHttpRecord.add(db, url_record)
            if not result:
                logger.error(f"add http record to database failed")
                return False
            
        logger.info("all url infos handle success")
        return True
            
            
        
    def get_http_all_metrics(self, db: Session):
        """
        将http的url状态信息返回到prometheus
        """
        # 配置两个指标
        registry = CollectorRegistry(auto_describe=False)
        welab_http_code = Gauge("welab_http_code", "url http code number",
                             ["domain", "url", "method", "provider", "http_code"], registry=registry)
        
        welab_http_time = Gauge("welab_http_time", "url http avg time",
                             ["domain", "url", "method", "provider", "http_time"], registry=registry)
        
        # 获取最近的http接口状态信息
        is_update = []
        end_time = datetime.datetime.now()
        begin_time = end_time - datetime.timedelta(minutes=6)
        http_records = MonitorLogHttpRecord.get_object_info_for_time(db, begin_time, end_time)
        if not http_records:
            logger.error("get http url records for time failed")
            return False
        
        for http_record in http_records:
            # 避免重复的接口信息
            http_record_id = http_record["domain"]+http_record["url"]+http_record["method"]+http_record["provider"]
            if http_record_id in is_update:
                continue
            
            # 配置welab_http_code指标
            for http_code in ["total", "status2xx", "status3xx", "status4xx", "status5xx"]:
                welab_http_code.labels(domain=http_record["domain"],
                                    url=http_record["url"],
                                    method=http_record["method"],
                                    provider=http_record["provider"],
                                    http_code=http_code).set(http_record[http_code])
                
            # 配置welab_http_time指标
            for http_time in ["status2xx_req_avg", "status2xx_resp_avg",
                              "status3xx_req_avg", "status3xx_resp_avg",
                              "status4xx_req_avg", "status4xx_resp_avg",
                              "status5xx_req_avg", "status5xx_resp_avg"]:
                welab_http_time.labels(domain=http_record["domain"],
                                    url=http_record["url"],
                                    method=http_record["method"],
                                    provider=http_record["provider"],
                                    http_time=http_time).set(http_record[http_time])
            is_update.append(http_record_id)
                
        return prometheus_client.generate_latest(registry)
    
    
def main():
    from common.database import SessionLocal
    from common.elastic import get_task_es
    session = SessionLocal()
    es_client = get_task_es()
    interval_min = 5


    monitor_log_http_record_handler = MonitorLogHttpRecordHandler()
    result = monitor_log_http_record_handler.task_update_from_es_handle(es_client, session, interval_min)
    es_client.close()
    print(result)



if __name__ == "__main__":
    main()

            

