from elasticsearch import Elasticsearch
from typing import Union
from datetime import datetime
from datetime import datetime
from elasticsearch.exceptions import ConnectionTimeout
from concurrent.futures import ThreadPoolExecutor
import pandas as pd
from datetime import datetime, timedelta
from datetime import datetime
import pytz

"""
问题
这个 TraceConnection 的数据是全局保存的，没有做到每个用户一个单独的数据
逻辑需要简化
"""

# 全局变量
query_size = 2500
scroll_time = "15s"
thread_nums = 1

TRACE_SETTINGS = {
    "url": "https://1.92.152.201:9200",
    "username": "elastic",
    "password": "elastic",
    "pods": [
        "cartservice",
        "checkoutservice",
        "currencyservice",
        "emailservice",
        "frontend",
        "paymentservice",
        "productcatalogservice",
        "recommendationservice",
        "redis",
        "shippingservice",
    ],
}


def deal_template(
    grouped_trace: dict, name_conditions: list, all_need_datas: dict, method: str
):
    """
    对查询所得数据的公共处理接口 (处理完成后，不同的调用方法仍需进一步处理).

    :return all_need_datas (dict): {"need_key1": [1, 2, 3], "need_key2": [4, 5, 6]}

    :param grouped_trace (dict): 以 trace_id 为 key, 以 data['_source] 为 value.
    :param name_conditions (list): ...
    :param all_need_datas (dict): 以所需数据为 key (具体对应关系见 `key.txt`, 随时补充新的 key)、value 为空列表.
    :param method: 对每一类数据，多个子列表之间的合并方式  `extend` or `append`.

    """
    # [[...], [...], [...]]
    for trace_id, trace_list in grouped_trace.items():
        trace_list = sorted(trace_list, key=lambda el: el["timestamp"]["us"])
        temp_data_list = {key: [] for key in all_need_datas}
        for trace in trace_list:
            try:
                # 判断 [processor][name] 是 span 还是 transaction
                processor_name = trace["processor"]["event"]
                name = trace[processor_name]["name"]
                if any(n in name for n in name_conditions):
                    continue
                # 获取每 key 对应的值，并加入到相应列表中。
                # 不同 key 获取 value 的方式有的不同，暂时未想出更普遍的模板
                for key, value in temp_data_list.items():
                    if key == "trace_id":
                        value.append(trace_id)
                    elif key == "timestamp":
                        value.append(trace["timestamp"]["us"])
                    elif key == "duration":
                        value.append(trace[processor_name]["duration"]["us"])
                    elif key == "status_code":
                        value.append(
                            trace.get("http", {})
                            .get("response", {})
                            .get("status_code", 0)
                        )
                    elif key == "parent_span":
                        value.append(trace.get("parent", {}).get("id", ""))
                    elif key == "cmdb_id":
                        if trace["service"]["name"] == "recommendationservie":
                            trace["service"]["name"] = "recommendationservice"
                        value.append(trace["service"]["name"])
                    elif key == "span_id":
                        value.append(trace[processor_name]["id"])
                    elif key == "type":
                        value.append(trace[processor_name]["type"])
                    elif key == "operation_name":
                        value.append(trace[processor_name]["name"])

            except Exception as e:
                print(trace)
        for key, value in temp_data_list.items():
            if method == "append":
                all_need_datas[key].append(value)
            else:
                all_need_datas[key].extend(value)


def get_grouped_trace(traces: list):
    """
    以 trace_id 为 key, 创建字典, 存储 data['_source'] 的内容

    :param traces: 查询所得的原始数据.
    :retuen grouped_trace: 以 trace_id 为 key, 以 data['_source] 为 value 的字典.

    """
    grouped_trace = {}
    for trace in traces:
        try:
            trace_id = trace["_source"]["trace"]["id"]
            grouped_trace.setdefault(trace_id, []).append(trace["_source"])
        except Exception as e:
            continue
    return grouped_trace


def calculate_duration(trace: list):
    endTime = 0
    startTime = trace[0]["timestamp"]["us"]

    for item in trace:
        try:
            start_time_may = item["timestamp"]["us"]
            end_time_may = (
                item[item["processor"]["event"]]["duration"]["us"]
                + item["timestamp"]["us"]
            )

            startTime = start_time_may if start_time_may < startTime else startTime
            endTime = end_time_may if end_time_may > endTime else endTime
        except:
            print(item)

    return endTime - startTime


def choose_index_template(indices, start_time, end_time):
    selected_patterns = set()

    for index in indices:
        try:
            index_date_str = (
                index[len(".ds-traces-apm-default-") :].split("-")[0].replace(".", "-")
            )
            index_start_date = datetime.strptime(index_date_str, "%Y-%m-%d")
            index_end_date = index_start_date + timedelta(days=1)
            index_start_date = index_start_date.timestamp()
            index_end_date = index_end_date.timestamp()

            if start_time <= index_end_date and index_start_date <= end_time:
                # 提取日期部分，构建索引模板
                index_template = (
                    f".ds-traces-apm-default-{index_date_str.replace('-', '.')}-*"
                )
                selected_patterns.add(index_template)
        except ValueError:
            # 处理索引名称解析错误
            print("index choose error")

    return selected_patterns


def timezone_adjust(local_datetime: datetime):
    """格式化为 ISO 8601 格式字符串"""
    utc_time = local_datetime.astimezone(pytz.utc)
    timestamp_str = utc_time.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
    return timestamp_str


def get_query_command(start_time, end_time, sort, query_size):
    """统一自定义查询语句接口 (很糟糕)"""
    query = {
        "query": {
            "bool": {
                "must": [
                    {"range": {"@timestamp": {"gte": start_time, "lte": end_time}}}
                ]
            }
        },
        "sort": sort,
        "size": query_size,
    }
    return query


class TraceConnection:
    def __init__(self, config):
        self.connection = Elasticsearch(
            [config["url"]],
            basic_auth=(config["username"], config["password"]),
            verify_certs=False,
            request_timeout=120,
            max_retries=5,
            retry_on_timeout=True,
            connections_per_node=10,
        )
        self.indices = None
        self.query_size = 7500
        self.keep_time = "1m"
        self.thread_num = 1

    # 统一格式化时间
    def parse_time(
        self, start_time: Union[int, str, datetime], end_time: Union[int, str, datetime]
    ):
        if isinstance(end_time, str):
            end_time = int(end_time)
        if start_time == -1:
            start_time = end_time - 60
        elif isinstance(start_time, str):
            start_time = int(start_time)

        if isinstance(end_time, datetime):
            end_time = end_time.timestamp()
        if isinstance(start_time, datetime):
            start_time = start_time.timestamp()

        indices = self.connection.indices.get(index=".ds-traces-apm-default-*")
        self.indices = choose_index_template(indices, start_time, end_time)

        start_time = timezone_adjust(datetime.fromtimestamp(start_time))
        end_time = timezone_adjust(datetime.fromtimestamp(end_time))

        return start_time, end_time

    # 统一查询接口
    def query_template(self, index, query: dict):
        """
        根据查询语句, 获取查询结果 (多线程处理)

        :param query (dict): 查询语句

        :return data (list): 查询结果
        """
        data = []
        try:
            page = self.connection.search(
                index=index, body=query, scroll=self.keep_time
            )

            data.extend(page["hits"]["hits"])
            scroll_id = page["_scroll_id"]

            while True:
                page = self.connection.scroll(
                    scroll_id=scroll_id, scroll=self.keep_time
                )
                data.extend(page["hits"]["hits"])
                if len(page["hits"]["hits"]) < self.query_size:
                    break
                scroll_id = page["_scroll_id"]

        except ConnectionTimeout as e:
            print("Connection Timeout:", e)
        return data

    def multi_threads_query(self, query):
        datas = []  # 用于存储所有线程的数据
        print(f"indices: {self.indices}")
        with ThreadPoolExecutor(max_workers=self.thread_num) as executor:
            futures = []
            for index in self.indices:
                print(f"cur_indice: {index}, all_indices: {self.indices}")
                future = executor.submit(self.query_template, index, query)
                futures.append(future)

            for future in futures:
                datas.extend(future.result())

        return datas

    # topology 数据获取
    def trace_topology(self, end_time: Union[int, datetime, str]):
        # 定义 Elasticsearch 查询
        query = get_query_command(
            *self.parse_time(-1, end_time),
            {"@timestamp": {"order": "asc"}},
            self.query_size,
        )

        # 使用 Elasticsearch 搜索查询
        data = self.multi_threads_query(query)

        # 对结果进行处理后返回
        return self.topology_data(data)

    # trace 数据获取
    def query(
        self,
        start_time: Union[int, datetime, str, None],
        end_time: Union[int, datetime, str, None],
    ):
        # 定义 Elasticsearch 查询
        query = get_query_command(
            *self.parse_time(start_time, end_time),
            {"@timestamp": {"order": "asc"}},
            self.query_size,
        )

        # 使用 Elasticsearch 搜索查询
        data = self.multi_threads_query(query)

        # 对结果进行处理后返回
        return self.trace_data(data)

    # trace_id 数据获取
    def trace_byId(self, trace_id: str):
        # 定义 Elasticsearch 查询
        query = {
            "query": {"bool": {"must": [{"term": {"trace.id": trace_id}}]}},
            "size": self.query_size,
        }

        # 使用 Elasticsearch 搜索查询
        data = self.multi_threads_query(query)

        # 对结果进行处理后返回
        return self.traceId_data(data, trace_id)

    # trace数据导出 DataFrame    (这里的 While 中最后 sleep(1))
    def trace_extract(self, start_time=None, end_time=None):
        # 定义 Elasticsearch 查询
        query = get_query_command(
            *self.parse_time(start_time, end_time), ["_doc"], self.query_size
        )
        print("over1")
        print("=" * 50)
        # 使用 Elasticsearch 搜索查询
        data = self.multi_threads_query(query)
        print("over2")
        print("=" * 50)

        # 对结果进行处理后返回
        return self.trace_processing(data)

    # DataFrame 数据处理
    def trace_processing(self, traces):
        # 初步处理数据, 获取 grouped_trace
        grouped_trace = get_grouped_trace(traces)
        # 定义需要的数据
        all_need_datas = {
            "timestamp": [],
            "cmdb_id": [],
            "span_id": [],
            "trace_id": [],
            "duration": [],
            "type": [],
            "status_code": [],
            "operation_name": [],
            "parent_span": [],
        }
        # 获取所需的数据
        deal_template(
            grouped_trace, ["health", "POST unknown route"], all_need_datas, "extend"
        )
        print("over3")

        # 获取所需数据后的内部处理
        return pd.DataFrame(all_need_datas)

    # topology 数据处理
    def topology_data(self, traces):
        # 初步处理数据, 获取 grouped_trace
        grouped_trace = get_grouped_trace(traces)
        # 定义需要的数据
        all_need_datas = {"cmdb_id": []}
        # 获取所需的数据
        deal_template(grouped_trace, ["health"], all_need_datas, "append")
        # 获取所需数据后的内部处理
        edges = set()
        for arr in all_need_datas["cmdb_id"]:
            if len(arr) > 1:
                for i in range(len(arr) - 1):
                    if arr[i] == arr[i + 1]:
                        continue
                    edge = {"from": arr[i], "to": arr[i + 1]}
                    edges.add(tuple(edge))

        return [dict(d) for d in edges]

    # trace 数据处理
    def trace_data(self, traces):
        # 初步处理数据, 获取 grouped_trace
        grouped_trace = get_grouped_trace(traces)

        result = []
        # 将 grouped_trace 中每 个trace_id 下的 list 按照时间戳从小到大排序，并构建字典
        for trace_id, trace_list in grouped_trace.items():
            trace_list = sorted(trace_list, key=lambda el: el["timestamp"]["us"])
            # 从每条 span 中获取 status_code
            is_health = False
            status = "Success"
            for span in trace_list:
                try:
                    # 判断processor下的name是span还是transaction
                    processor_name = span["processor"]["event"]
                    if (
                        "health" in span[processor_name]["name"]
                        or "POST unknown route" == span[processor_name]["name"]
                    ):
                        is_health = True
                        break
                    # 判断status_code是否存在
                    status_code = (
                        span.get("http", {}).get("response", {}).get("status_code", 0)
                    )
                    if status != "Error" and status_code != 200 and status_code != 0:
                        status = "Error"
                except Exception as e:
                    print("error")

            if is_health:
                continue

            try:
                # 获取第一个 span 的 operation_name 和 timestamp
                first_span = trace_list[0]
                operation_name = first_span[first_span["processor"]["event"]]["name"]

                if operation_name != "POST unknown route":
                    result.append(
                        {
                            "trace_id": trace_id,
                            "duration": calculate_duration(trace_list),
                            "operation_name": operation_name,
                            "timestamp": first_span["timestamp"]["us"],
                            "status": status,
                        }
                    )
            except Exception as e:
                print(e, trace_list)
                continue
        return result

    # trace_id 数据处理
    def traceId_data(self, traces, trace_id):
        # 初步处理数据，获取 grouped_trace
        traces = [item["_source"] for item in traces]
        grouped_trace = {trace_id: traces}

        # 获取所需的数据
        all_need_datas = {
            "timestamp": [],
            "cmdb_id": [],
            "span_id": [],
            "duration": [],
            "type": [],
            "status_code": [],
            "operation_name": [],
            "parent_span": [],
        }
        deal_template(
            grouped_trace, ["health", "POST unknown route"], all_need_datas, "extend"
        )

        # 获取所需数据后的内部处理
        # {"a": [1, 2, 3], "b": [4, 5, 6]}
        #        => [{"a": 1, "b": 4}  {"a": 2, "b": 5}  {"a": 3, "b": 6}]
        result = []
        for i in range(len(all_need_datas["timestamp"])):
            result.append({key: value[i] for key, value in all_need_datas.items()})

        return result
