import asyncio
import functools
import hashlib
import json
import logging
import os
import re
import subprocess
import time
from datetime import datetime, timedelta
from typing import Any, Callable

import yaml

CONFIG_PATH = "config/config.yaml"


def setup_logger() -> logging.Logger:
    """
    配置并返回一个全局 logger 实例。
    """
    # 确保日志目录存在
    log_dir = "logs"
    os.makedirs(log_dir, exist_ok=True)

    # 日志文件路径
    log_file = os.path.join(log_dir, "app.log")

    # 创建 logger（使用模块名，便于追踪来源）
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.INFO)

    # 避免重复添加 handler（重要！）
    if logger.hasHandlers():
        logger.handlers.clear()

    # 创建格式器
    formatter = logging.Formatter(
        "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
    )

    # 控制台处理器
    console_handler = logging.StreamHandler()
    console_handler.setLevel(logging.INFO)
    console_handler.setFormatter(formatter)

    # 文件处理器
    file_handler = logging.FileHandler(log_file, encoding="utf-8", mode="a")
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)

    # 添加处理器
    logger.addHandler(console_handler)
    logger.addHandler(file_handler)

    return logger


logger = setup_logger()


def timer(func: Callable) -> Callable:
    if asyncio.iscoroutinefunction(func):

        @functools.wraps(func)
        async def async_wrapper(*args: Any, **kwargs: Any) -> Any:
            start_time = time.perf_counter()
            try:
                result = await func(*args, **kwargs)
            finally:
                end_time = time.perf_counter()
                run_time = end_time - start_time

            # 性能等级
            if run_time > 3.0:
                cost = "HIGH"
            elif run_time > 1.0:
                cost = "MEDIUM"
            else:
                cost = "LOW"

            # 特定函数附加信息
            additional = ""
            if func.__name__ == "pack_message":
                response = kwargs.get("response", "")
                if response:
                    additional = f"[response={response}]"

            # 构建日志消息
            msg = (
                f"===> timer:[{cost}] # "
                f"[{func.__module__}] # "
                f"[{func.__name__}] # "
                f"{additional} # "
                f"{run_time:.4f} secs <==="
            )
            logger.info(msg)

            return result

        return async_wrapper

    else:

        @functools.wraps(func)
        def sync_wrapper(*args: Any, **kwargs: Any) -> Any:
            start_time = time.perf_counter()
            try:
                result = func(*args, **kwargs)
            finally:
                end_time = time.perf_counter()
                run_time = end_time - start_time

            # 性能等级
            if run_time > 3.0:
                cost = "HIGH"
            elif run_time > 1.0:
                cost = "MEDIUM"
            else:
                cost = "LOW"

            # 特定函数附加信息
            additional = ""
            if func.__name__ == "pack_message":
                response = kwargs.get("response", "")
                if response:
                    additional = f"[response={response}]"

            # 构建日志消息
            msg = (
                f"===> timer:[{cost}] # "
                f"[{func.__module__}] # "
                f"[{func.__name__}] # "
                f"{additional} # "
                f"{run_time:.4f} secs <==="
            )
            logger.info(msg)

            return result

        return sync_wrapper


def read_txt(file_path):
    """
    读取指定的 .txt 文件，将每一行作为一个问题返回。

    Args:
        file_path (str): 要读取的 .txt 文件的路径。

    Returns:
        list: 包含文件中每一行（作为问题）的字符串列表。如果文件为空或出错，则返回空列表。
    """
    datas = []  # 用于存储问题的列表
    try:
        # 使用 'r' 模式打开文件，指定 encoding='utf-8' 以正确处理中文等字符
        with open(file_path, "r", encoding="utf-8") as file:
            # 逐行读取文件
            for line in file:
                # 去除行尾的换行符、回车符以及行首尾的空白字符
                cleaned_line = line.strip()
                # 只有非空行才添加到问题列表中
                if cleaned_line:
                    datas.append(cleaned_line)
    except FileNotFoundError:
        print(f"错误：找不到文件 '{file_path}'。请检查文件路径是否正确。")
    except PermissionError:
        print(f"错误：没有权限读取文件 '{file_path}'。")
    except Exception as e:
        print(f"读取文件 '{file_path}' 时发生未知错误：{e}")

    return datas


def parse_json_from_text(text):
    results = []
    # 处理 \_ -> _
    text = text.replace(r"\_", "_")
    text = text.replace(r"{{", "{").replace(r"}}", "}")
    matches = []
    # 1. 提取完整的 ```json ... ```
    full_match_pattern = r"```(?:json|JSON|)\s*(\{(?:.|\n)*?\})\s*```"
    matches += re.findall(full_match_pattern, text, re.DOTALL)
    # 提取完整的 ```json [...] ```
    full_array_pattern = r"```(?:json|JSON|)\s*(\[(?:.|\n)*?\])\s*```"
    matches += re.findall(full_array_pattern, text, re.DOTALL)
    # 2. 提取 ``` 开头但未闭合的内容（可能被截断）
    # 例如：```json {"name": "zhang
    incomplete_pattern = r"```(?:json|JSON|)\s*([^{`\n][\s\S]*?)(?:```|$)"
    for match in re.finditer(incomplete_pattern, text, re.DOTALL | re.IGNORECASE):
        content = match.group(1).strip()
        if content:
            matches.append(content)
    # 3. 如果上面没找到，直接在全文找 { ... } 和 [ ... ]
    if not matches:
        # 尝试找最外层的 { ... }
        obj_match = re.search(r"\{[^{}]*\{[^{}]*\}[^{}]*\}|\{[^{}]*\}", text, re.DOTALL)
        if obj_match:
            matches.append(obj_match.group())
        # 尝试找 [ ... ]
        arr_match = re.search(r"$$[^$$]*$$[^$$]*$$|$$[^$$]*$$", text, re.DOTALL)
        if arr_match:
            matches.append(arr_match.group())

    # 4. 尝试解析每一个候选
    for match in matches:
        # 去头去尾空格
        match = match.strip()
        if not match:
            continue
        # 如果太短，跳过
        if len(match) < 2:
            continue
        # 尝试修复常见问题
        if match.startswith("{") and not match.endswith("}"):
            if match.count("{") == match.count("}") + 1:
                match += "}"
        elif match.startswith("[") and not match.endswith("]"):
            if match.count("[") == match.count("]") + 1:
                match += "]"

        # 尝试解析
        try:
            data = json.loads(match)
            results.append(data)
        except json.JSONDecodeError:
            # 再试试看能不能去掉多余的引号或换行
            try:
                cleaned = match.strip(' \n\r\t"')
                data = json.loads(cleaned)
                results.append(data)
            except Exception:
                continue
    return results


def extract_fields_as_int_list(text):
    """
    从输入的文本中解析 JSON，提取 fields 字段，按逗号分割并转换为整数列表。

    Returns:
        List[int]: 解析后的整数列表，如 [2, 7]。失败则返回空列表。
    """
    try:
        json_data_list = parse_json_from_text(text)

        # 确保返回的是列表，并尝试找一个包含 "fields" 的 dict
        for item in json_data_list:
            if isinstance(item, dict) and "fields" in item:
                fields_str = item["fields"]
                if isinstance(fields_str, str):
                    return [
                        int(x.strip())
                        for x in fields_str.split(",")
                        if x.strip().isdigit()
                    ]
                elif isinstance(fields_str, (list, tuple)):
                    # 防止 fields 是数组，如 ["14", "17"] 或 [14, 17]
                    return [int(x) for x in fields_str if str(x).strip().isdigit()]

        # 没找到有效字段
        logger.warning("No valid 'fields' found in parsed JSON.")
        return []

    except Exception as e:
        logger.error(f"Failed to extract fields from text: {e}")
        return []


def hash_string(text: str) -> str:
    return hashlib.md5(text.encode("utf-8")).hexdigest()[:32]


def get_project_home() -> str:
    # 获取当前文件的绝对路径
    current_file_path = os.path.abspath(__file__)
    # 获取当前文件所在目录
    current_dir = os.path.dirname(current_file_path)
    # 向上两级目录作为项目根目录（根据你的目录结构调整）
    project_home = os.path.normpath(os.path.join(current_dir, ".."))
    return project_home


def get_path(rel_path):
    return os.path.join(get_project_home(), rel_path)


def read_yaml(file_path: str):
    try:
        with open(file_path, "r", encoding="utf-8") as f:
            return yaml.safe_load(f) or {}
    except FileNotFoundError:
        raise FileNotFoundError(f"配置文件 {file_path} 未找到，请检查路径是否正确")
    except yaml.YAMLError as e:
        raise ValueError(f"配置文件格式错误: {e}")


def get_config(key: str, sub_key: str = None):
    if sub_key is not None:
        obj = _get_config_by_keys(key)
        assert sub_key in obj, f"sub_key {sub_key} 不存在于 {key}"
        return obj[sub_key]
    else:
        return _get_config_by_keys(key)


def _get_config_by_keys(key):
    keys = key.split(".")
    obj = get_path(CONFIG_PATH)
    obj = read_yaml(obj)
    for k in keys:
        assert k in obj, f"key {k} 没有找到, 请在这个目录下检查:{CONFIG_PATH}"
        obj = obj.get(k)
    return obj


def get_git_commit_hash() -> str:
    try:
        # 执行git命令获取当前commit哈希值
        result = subprocess.run(
            ["git", "rev-parse", "HEAD"],
            capture_output=True,
            text=True,
            check=True,
            cwd=get_project_home(),
        )
        # 去除输出中的换行符并返回
        return result.stdout.strip()
    except subprocess.CalledProcessError as e:
        logger.error(f"Git命令执行失败: {e}")
        return ""
    except FileNotFoundError:
        logger.error("未找到git命令，请确保已安装git")
        return ""
    except Exception as e:
        logger.error(f"获取git哈希值时发生未知错误: {e}")
        return ""


def get_extra_key() -> str:
    git_commit = get_git_commit_hash()
    if git_commit:
        return git_commit
    else:
        return ""


def get_datetime_earlier(days) -> str:
    now = datetime.now()
    date_str = (now - timedelta(days)).strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
    return date_str


def get_datetime_now() -> str:
    now = datetime.now()
    date_str = now.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
    return date_str
