import json

from elasticsearch import Elasticsearch
from elasticsearch.exceptions import ConnectionTimeout
from datetime import datetime, timedelta
from typing import Union
import time
import pandas as pd
import tqdm, os


"""
问题
这个 LogConnection 的数据是全局保存的, 没有做到每个用户一个单独的数据
逻辑需要简化
"""


LOG_SETTINGS = {
    "url": "https://1.92.152.201:9200",
    "username": "elastic",
    "password": "elastic",
    "pods": [
        "cartservice",
        "checkoutservice",
        "currencyservice",
        "emailservice",
        "frontend",
        "paymentservice",
        "productcatalogservice",
        "recommendationservice",
        "redis",
        "shippingservice",
    ],
}


class LogConnection:
    def __init__(self, config):
        self.connection = Elasticsearch(
            [config["url"]],
            basic_auth=(config["username"], config["password"]),
            verify_certs=False,
            request_timeout=120,
            max_retries=5,
            retry_on_timeout=True,
            connections_per_node=10,
        )
        # 保证search的数据一致性, and 全局数据保存
        self.start_time = None
        self.end_time = None
        self.node = None
        self.pod = None
        self.indices = None
        self.config = config
        self.keep_time = "1m"
        self.query_size = 7500

    def scroll_query(self, query, indices):
        # 使用Elasticsearch搜索查询并返回结果
        data = []
        for index in indices:
            try:
                page = self.connection.search(
                    index=index, body=query, scroll=self.keep_time
                )
                data.extend(page["hits"]["hits"])
                scroll_id = page["_scroll_id"]

                while True:
                    page = self.connection.scroll(
                        scroll_id=scroll_id, scroll=self.keep_time
                    )
                    hits_len = len(page["hits"]["hits"])
                    data.extend(page["hits"]["hits"])
                    if hits_len < self.query_size:
                        break
                    scroll_id = page["_scroll_id"]
            except ConnectionTimeout as e:
                print("Connection Timeout:", e)
        return data

    def query(
        self,
        node: str,
        pod: str,
        start_time: Union[int, datetime, str],
        end_time: Union[int, datetime, str],
    ):
        # 处理 start_time
        if isinstance(start_time, str):
            start_time = int(start_time)
        # 处理 end_time
        if isinstance(end_time, str):
            end_time = int(end_time)

        # 获取时间段内需要使用的indices
        indices = self.choose_index_template(start_time, end_time)

        self.indices = indices

        start_time = datetime.fromtimestamp(start_time)
        end_time = datetime.fromtimestamp(end_time)


        self.node = node
        self.pod = pod
        self.start_time = start_time
        self.end_time = end_time

        # 定义Elasticsearch查询
        query = {
            "query": {
                "bool": {
                    "must": [
                        {"range": {"@timestamp": {"gte": start_time, "lte": end_time}}}
                    ]
                }
            },
            "sort": {"@timestamp": {"order": "asc"}},
            "size": self.query_size,
        }

        return self.filter_by_pods(self.scroll_query(query, indices))

    # log数据search功能 key value
    def data_search(self, key: str, value: str):
        query = {
            "query": {
                "bool": {
                    "must": [
                        {
                            "range": {
                                "@timestamp": {
                                    "gte": self.start_time,
                                    "lte": self.end_time,
                                }
                            }
                        },
                        {"wildcard": {key: {"value": "*" + value + "*"}}},
                    ]
                }
            },
            "sort": {"@timestamp": {"order": "asc"}},
            "size": self.query_size,
        }
        return self.filter_by_pods(self.scroll_query(query, self.indices))

    # log数据search功能 value
    # 目前可以在"message", "kubernetes.*", "event.*", "agent.*"里进行模
    def full_text_search(self, value: str):
        query = {
            "query": {
                "bool": {
                    "must": [
                        {
                            "range": {
                                "@timestamp": {
                                    "gte": self.start_time,
                                    "lte": self.end_time,
                                }
                            }
                        },
                        {
                            "multi_match": {
                                "query": value,
                                "fields": [
                                    "message",
                                    "kubernetes.*",
                                    "event.*",
                                    "agent.*",
                                ],
                                "fuzziness": "AUTO",
                            }
                        },
                    ]
                }
            },
            "sort": {"@timestamp": {"order": "asc"}},
            "size": self.query_size,
        }
        return self.filter_by_pods(self.scroll_query(query, self.indices))

    # log数据导出功能
    def export_logs(self, start_time=None, end_time=None):
        # 获取时间段内需要使用的indices
        indices = self.choose_index_template(start_time, end_time)

        start_time = datetime.fromtimestamp(start_time)
        end_time = datetime.fromtimestamp(end_time)
        query = {
            "size": self.query_size,
            "query": {
                "bool": {
                    "must": [
                        {"range": {"@timestamp": {"gte": start_time, "lte": end_time}}}
                    ]
                }
            },
            "sort": ["_doc"],
        }

        return self.format(self.scroll_query(query, indices))

    def get_log_number_by_day(self):
        data = []
        try:
            indices = self.connection.indices.get(index="logstash-*")
            logs_per_day = {}  # 用于存储每天的日志数量

            for index in indices:
                response = self.connection.count(index=index)
                index_date_str = index.split("-")[-1]  # 获取日期部分
                index_date = datetime.strptime(
                    index_date_str, "%Y.%m.%d.%H"
                )  # 将索引日期部分转换为日期对象

                # 如果在过去15天内
                if index_date >= datetime.now() - timedelta(days=15):
                    day_key = index_date.strftime(
                        "%Y-%m-%d"
                    )  # 将日期对象转换为 %Y-%m-%d 格式的字符串作为键

                    if day_key not in logs_per_day:
                        logs_per_day[day_key] = 0
                    logs_per_day[day_key] += response["count"]

            # 整理数据
            for date_str, log_count in logs_per_day.items():
                data.append({"date": date_str, "log_count": log_count})
        except ConnectionTimeout as e:
            print("Connection Timeout:", e)
            raise Exception(repr(e))
        return data

    def message_extract(self, json_str):
        message = None
        data = json.loads(json_str)
        try:
            if "severity" in data.keys():
                message = "".join(
                    ["severity:", data["severity"], ",", "message:", data["message"]]
                )
            elif "level" in data.keys():
                message = "".join(
                    ["level:", data["level"], ",", "message:", data["message"]]
                )
            else:
                raise Exception(f"Not found 'severity' or 'level' in {message}")
        except Exception as e:
            print(str(e))
        return message

    def format(self, logs):
        log_id_list = []
        ts_list = []
        date_list = []
        pod_list = []
        ms_list = []
        for log in logs:
            try:
                cmdb_id = log["_source"]["kubernetes"]["labels"]["app"]
                if cmdb_id not in self.config["pods"]:
                    continue
                timestamp = log["_source"]["@timestamp"]
                timestamp = datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S.%fZ")
                timestamp = timestamp.timestamp()
                format_ts = log["_source"]["@timestamp"]
                message = self.message_extract(log["_source"]["message"])
            except Exception as e:
                continue
            log_id_list.append(log["_id"])
            pod_list.append(cmdb_id)
            date_list.append(format_ts)
            ts_list.append(timestamp)
            ms_list.append(message)
        dt = pd.DataFrame(
            {
                "log_id": log_id_list,
                "timestamp": ts_list,
                "date": date_list,
                "cmdb_id": pod_list,
                "message": ms_list,
            }
        )
        return dt

    def filter_by_pods(self, logs):
        filtered_log = []
        for log in logs:
            try:
                cmdb_id = log["_source"]["kubernetes"]["labels"]["app"]
                if cmdb_id not in self.config["pods"]:
                    continue
            except Exception as e:
                continue
            filtered_log.append(log)
        return filtered_log

    def choose_index_template(self, start_time, end_time):
        indices = self.connection.indices.get(index="logstash-*")
        indices_template = set()
        for indice in tqdm.tqdm(indices):
            date_str = ".".join(indice.split("-")[1].split(".")[:-1])
            indices_template.add("logstash-" + date_str + "*")

        start_datetime = datetime.fromtimestamp(start_time)
        end_datetime = datetime.fromtimestamp(end_time)

        dates_in_range = set()
        current_datetime = start_datetime

        while current_datetime <= end_datetime:
            dates_in_range.add(
                "logstash-" + current_datetime.strftime("%Y.%m.%d") + "*"
            )
            current_datetime += timedelta(days=1)

        dates_in_range.add("logstash-" + end_datetime.strftime("%Y.%m.%d") + "*")

        selected_patterns = indices_template.intersection(dates_in_range)

        return selected_patterns
