import argparse
import os
import sys
from dotenv import load_dotenv
from functools import lru_cache

from pydantic import BaseModel
from pydantic_settings import BaseSettings
import typing as t

from server.module_chat.chat.core.agent.agent import Agent
from server.module_chat.chat.core.agent.tool.tool import Tool, ToolApi, ToolFunction


class AppSettings(BaseSettings):
    """
    应用配置
    """

    app_env: str = 'dev'
    app_name: str = 'Act-Chat'
    app_root_path: str = '/dev-api'
    app_host: str = '0.0.0.0'
    app_port: int = 9527
    app_version: str = '1.0.0'
    app_reload: bool = True
    app_ip_location_query: bool = True
    app_same_time_login: bool = True


class JwtSettings(BaseSettings):
    """
    Jwt配置
    """

    jwt_secret_key: str = 'b01c66dc2c58dc6a0aabfe2144256be36226de378bf87f72c0c795dda67f4d55'
    jwt_algorithm: str = 'HS256'
    jwt_expire_minutes: int = 1440
    jwt_redis_expire_minutes: int = 30


class DataBaseSettings(BaseSettings):
    """
    数据库配置
    """

    db_type: t.Literal['mysql', 'postgresql'] = 'mysql'
    db_host: str = '127.0.0.1'
    db_port: int = 3306
    db_username: str = 'root'
    db_password: str = 'mysqlroot'
    db_database: str = 'ruoyi-fastapi'
    db_echo: bool = True
    db_max_overflow: int = 10
    db_pool_size: int = 50
    db_pool_recycle: int = 3600
    db_pool_timeout: int = 30
    # 从库配置
    secondary_db_type: t.Literal['mysql', 'postgresql'] = 'mysql'
    secondary_db_host: str = '10.128.8.189'
    secondary_db_port: int = 3306
    secondary_db_username: str = 'root'
    secondary_db_password: str = 'Rzxmaxc@123'
    secondary_db_database: str = 'dsit-cu-show'
    secondary_db_echo: bool = True
    secondary_db_max_overflow: int = 10
    secondary_db_pool_size: int = 50
    secondary_db_pool_recycle: int = 3600
    secondary_db_pool_timeout: int = 30
    # 图数据库配置
    NEBULA_ADDRESS: str = "10.128.14.28:9669"
    NEBULA_HOST: str = "10.128.14.28"
    NEBULA_PORT: int = 9669
    NEBULA_USER: str = "root"
    NEBULA_PASSWORD: str = "nebula"
    NEBULA_SPACE_NAME: str = "flfj"


class RedisSettings(BaseSettings):
    """
    Redis配置
    """

    redis_host: str = '127.0.0.1'
    redis_port: int = 6379
    redis_username: str = ''
    redis_password: str = ''
    redis_database: int = 2


class UploadSettings:
    """
    上传配置
    """

    UPLOAD_PREFIX = '/profile'
    UPLOAD_PATH = 'static/upload_path'
    UPLOAD_MACHINE = 'A'
    DEFAULT_ALLOWED_EXTENSION = [
        # 图片
        'bmp',
        'gif',
        'jpg',
        'jpeg',
        'png',
        # word excel powerpoint
        'doc',
        'docx',
        'xls',
        'xlsx',
        'ppt',
        'pptx',
        'html',
        'htm',
        'txt',
        # 压缩文件
        'rar',
        'zip',
        'gz',
        'bz2',
        # 视频格式
        'mp4',
        'avi',
        'rmvb',
        # pdf
        'pdf',
    ]
    DOWNLOAD_PATH = 'static/download_path'

    def __init__(self):
        if not os.path.exists(self.UPLOAD_PATH):
            os.makedirs(self.UPLOAD_PATH)
        if not os.path.exists(self.DOWNLOAD_PATH):
            os.makedirs(self.DOWNLOAD_PATH)


class CachePathConfig:
    """
    缓存目录配置
    """

    PATH = os.path.join(os.path.abspath(os.getcwd()), 'caches')
    PATHSTR = 'caches'


class PlatformConfig(BaseModel):
    """模型加载平台配置"""

    platform_name: str = "xinference"
    """平台名称"""

    platform_type: t.Literal["xinference", "ollama", "oneapi", "fastchat", "openai", "custom openai"] = "xinference"
    """平台类型"""

    api_base_url: str = "http://127.0.0.1:9997/v1"
    """openai api url"""

    api_key: str = "EMPTY"
    """api key if available"""

    api_proxy: str = ""
    """API 代理"""

    api_concurrencies: int = 5
    """该平台单模型最大并发数"""

    auto_detect_model: bool = False
    """是否自动获取平台可用模型列表。设为 True 时下方不同模型类型可自动检测"""

    llm_models: t.Union[t.Literal["auto"], t.List[str]] = [
        "glm4-chat",
        "qwen1.5-chat",
        "qwen2-instruct",
        "gpt-3.5-turbo",
        "gpt-4o",
    ]
    """该平台支持的大语言模型列表，auto_detect_model 设为 True 时自动检测"""

    embed_models: t.Union[t.Literal["auto"], t.List[str]] = [
        "bge-large-zh-v1.5",
    ]
    """该平台支持的嵌入模型列表，auto_detect_model 设为 True 时自动检测"""

    text2image_models: t.Union[t.Literal["auto"], t.List[str]] = []
    """该平台支持的图像生成模型列表，auto_detect_model 设为 True 时自动检测"""

    image2text_models: t.Union[t.Literal["auto"], t.List[str]] = []
    """该平台支持的多模态模型列表，auto_detect_model 设为 True 时自动检测"""

    rerank_models: t.Union[t.Literal["auto"], t.List[str]] = []
    """该平台支持的重排模型列表，auto_detect_model 设为 True 时自动检测"""

    speech2text_models: t.Union[t.Literal["auto"], t.List[str]] = []
    """该平台支持的 STT 模型列表，auto_detect_model 设为 True 时自动检测"""

    text2speech_models: t.Union[t.Literal["auto"], t.List[str]] = []
    """该平台支持的 TTS 模型列表，auto_detect_model 设为 True 时自动检测"""


class ModelSettings(BaseSettings):
    """模型配置项"""
    DEFAULT_API_BASE_URL: str = "http://10.12.130.188:9997/v1"

    DEFAULT_API_KEY: str = "sk-URHSBTj4hxIpC"

    DEFAULT_LLM_MODEL: str = "qwen2.5-instruct"
    """默认选用的 LLM 名称"""

    DEFAULT_EMBEDDING_MODEL: str = "bge-large-zh-v1.5"
    """默认选用的 Embedding 名称"""

    DEFAULT_RERANK_MODEL: str = "bge-reranker-v2-gemma"
    """默认选用的 Rerank 名称"""

    HISTORY_LEN: int = 3
    """默认历史对话轮数"""

    MAX_TOKENS: t.Optional[int] = 4096
    """大模型最长支持的长度，如果不填写，则使用模型默认的最大长度，如果填写，则为用户设定的最大长度"""

    TEMPERATURE: float = 0.7
    """LLM通用对话参数"""

    SUPPORT_AGENT_MODELS: t.List[str] = [
        "chatglm3-6b",
        "glm-4",
        "openai-api",
        "Qwen-2",
        "qwen2-instruct",
        "gpt-3.5-turbo",
        "gpt-4o",
    ]
    """支持的Agent模型"""

    LLM_MODEL_CONFIG: t.Dict[str, t.Dict] = {
        # 意图识别不需要输出，模型后台知道就行
        "preprocess_model": {
            "model": "",
            "temperature": 0.05,
            "max_tokens": 4096,
            "history_len": 10,
            "prompt_name": "default",
            "callbacks": False,
        },
        "llm_model": {
            "model": "",
            "temperature": 0.9,
            "max_tokens": 4096,
            "history_len": 10,
            "prompt_name": "default",
            "callbacks": True,
        },
        "action_model": {
            "model": "",
            "temperature": 0.01,
            "max_tokens": 4096,
            "history_len": 10,
            "prompt_name": "ChatGLM3",
            "callbacks": True,
        },
        "postprocess_model": {
            "model": "",
            "temperature": 0.01,
            "max_tokens": 4096,
            "history_len": 10,
            "prompt_name": "default",
            "callbacks": True,
        },
        "image_model": {
            "model": "sd-turbo",
            "size": "256*256",
        },
    }
    """
    LLM模型配置，包括了不同模态初始化参数。
    `model` 如果留空则自动使用 DEFAULT_LLM_MODEL
    """

    MODEL_PLATFORMS: t.List[PlatformConfig] = [
        PlatformConfig(**{
            "platform_name": "xinference-auto",
            "platform_type": "xinference",
            "api_base_url": "http://127.0.0.1:9997/v1",
            "api_key": "EMPTY",
            "api_concurrencies": 5,
            "auto_detect_model": True,
            "llm_models": [],
            "embed_models": [],
            "text2image_models": [],
            "image2text_models": [],
            "rerank_models": [],
            "speech2text_models": [],
            "text2speech_models": [],
        }),
        PlatformConfig(**{
            "platform_name": "xinference",
            "platform_type": "xinference",
            "api_base_url": "http://127.0.0.1:9997/v1",
            "api_key": "EMPTY",
            "api_concurrencies": 5,
            "llm_models": [
                "glm4-chat",
                "qwen1.5-chat",
                "qwen2-instruct",
            ],
            "embed_models": [
                "bge-large-zh-v1.5",
            ],
            "text2image_models": [],
            "image2text_models": [],
            "rerank_models": [],
            "speech2text_models": [],
            "text2speech_models": [],
        }),
        PlatformConfig(**{
            "platform_name": "ollama",
            "platform_type": "ollama",
            "api_base_url": "http://127.0.0.1:11434/v1",
            "api_key": "EMPTY",
            "api_concurrencies": 5,
            "llm_models": [
                "qwen:7b",
                "qwen2:7b",
            ],
            "embed_models": [
                "quentinz/bge-large-zh-v1.5",
            ],
        }),
        PlatformConfig(**{
            "platform_name": "oneapi",
            "platform_type": "oneapi",
            "api_base_url": "http://127.0.0.1:3000/v1",
            "api_key": "sk-",
            "api_concurrencies": 5,
            "llm_models": [
                # 智谱 API
                "chatglm_pro",
                "chatglm_turbo",
                "chatglm_std",
                "chatglm_lite",
                # 千问 API
                "qwen-turbo",
                "qwen-plus",
                "qwen-max",
                "qwen-max-longcontext",
                # 千帆 API
                "ERNIE-Bot",
                "ERNIE-Bot-turbo",
                "ERNIE-Bot-4",
                # 星火 API
                "SparkDesk",
            ],
            "embed_models": [
                # 千问 API
                "text-embedding-v1",
                # 千帆 API
                "Embedding-V1",
            ],
            "text2image_models": [],
            "image2text_models": [],
            "rerank_models": [],
            "speech2text_models": [],
            "text2speech_models": [],
        }),
        PlatformConfig(**{
            "platform_name": "openai",
            "platform_type": "openai",
            "api_base_url": "https://api.openai.com/v1",
            "api_key": "sk-proj-",
            "api_concurrencies": 5,
            "llm_models": [
                "gpt-4o",
                "gpt-3.5-turbo",
            ],
            "embed_models": [
                "text-embedding-3-small",
                "text-embedding-3-large",
            ],
        }),
        PlatformConfig(**{
            "platform_name": "custom openai",
            "platform_type": "custom openai",
            "api_base_url": "https://api.deepseek.com/v1",
            "api_key": "sk-7eb49341b8ff4b4885db74ec34f3136f",
            "api_concurrencies": 5,
            "llm_models": [
                "deepseek-chat"
            ]
        }),
    ]
    """模型平台配置"""


class AgentSettings(BaseSettings):
    AGENTS_CONFIG: t.List[Agent] = [
        Agent(**{
            "id": "dcag_assistant",
            "agent_name": "分类分级助手",
            "agent_desc": "作为你的智能体，我可以检测文件、定义分类及标准、答疑解惑",
            "tools": [
                Tool(**{
                    "id": "policy_qa",
                    "name": "数据分类分级政策问答",
                    "description": "数据分类分级政策问答"
                }),
                Tool(**{
                    "id": "define_standard",
                    "name": "定义分类分级标准",
                    "description": "输入需要分类的文件类别，自动生成分类分级标准"
                }),
                # Tool(**{
                #     "id": "file_detection",
                #     "name": "文件检测",
                #     "description": "上传文件，出分类分级结果"
                #
                # }),
                Tool(**{
                    "id": "gen_prompt",
                    "name": "分类提示词编写",
                    "description": "输入需要编写提示词的类别，自动生成分类提示词"
                }),
                Tool(**{
                    "id": "gen_comment",
                    "name": "生成字段注释",
                    "description": "输入数据库表字段，自动生成字段注释"
                }),
                Tool(**{
                    "id": "data_analysis",
                    "name": "数据智能分析",
                    "description": "输入需要统计的数据，自动生成分析结果"
                })
            ]
        })
    ]

    TOOLS_CONFIG: t.List[Tool] = [
        {
            "id": "policy_qa",
            "name": "数据分类分级政策问答",
            "description": "数据分类分级政策问答",
            "type": "api",
            "api": ToolApi(**{
                "api_mode": "Dify",
                "api_key": "app-nO1obBChZcV7qkACWDPDiQ47",
                "api_base_url": "http://10.128.14.28/v1",
                "api_endpoint": "/chat-messages",
                "model": "",
            })
        },
        # {
        #     "id": "define_standard",
        #     "name": "定义分类分级标准",
        #     "description": "输入需要分类的文件类别，自动生成分类分级标准",
        #     "type": "api",
        #     "api": ToolApi(**{
        #         "api_mode": "Dify",
        #         "api_key": "app-dG0U3DqK6GncsY3xEkKuHjOi",
        #         "api_base_url": "http://10.128.14.28/v1",
        #         "api_endpoint": "/chat-messages",
        #         "model": "",
        #     })
        # },
        {
            "id": "define_standard",
            "name": "定义分类分级标准",
            "description": "输入需要分类的文件类别，自动生成分类分级标准",
            "type": "function",
            "function": ToolFunction(**{
                "name": "define_standard",
                "description": "定义标准",
                "parameters": {
                    "type": "object",
                    "properties": {
                        "data": {
                            "type": "string",
                            "description": "数据内容"
                        }
                    },
                    "required": [
                        "data"
                    ]
                }
            })
        },
        {
            "id": "file_detection",
            "name": "文件检测",
            "description": "上传文件，出分类分级结果",
            "type": "api",
            "api": ToolApi(**{
                "api_mode": "Act",
                "api_key": "sk-dc42f486d6874ff3adb7a9b30d721fff",
                "api_base_url": "http://127.0.0.1:9527",
                "api_endpoint": "",
                "model": "deepseek-chat",
            })
        },
        {
            "id": "gen_prompt",
            "name": "分类提示词生成",
            "description": "输入需要分类的文件类别，自动生成分类提示词",
            "type": "api",
            "api": ToolApi(**{
                "api_mode": "Dify",
                "api_key": "app-AA91Ddkb439wNaYmoseJ5rub",
                "api_base_url": "http://10.128.14.28/v1",
                "api_endpoint": "/chat-messages",
                "model": "",
            })
        },
        {
            "id": "gen_comment",
            "name": "生成字段注释",
            "description": "输入数据库表字段，自动生成字段注释",
            "type": "api",
            "api": ToolApi(**{
                "api_mode": "Dify",
                "api_key": "app-1CI4sUwkjLx8JgHSlegN5A5N",
                "api_base_url": "http://10.128.14.28/v1",
                "api_endpoint": "/chat-messages",
                "model": "",
            })
        },
        {
            "id": "data_analysis",
            "name": "数据智能分析",
            "description": "输入需要统计的数据，自动生成分析结果",
            "type": "function",
            "function": ToolFunction(**{
                "name": "chat_db",
                "description": "数据智能分析",
                "parameters": {
                    "type": "object",
                    "properties": {
                        "data": {
                            "type": "string",
                            "description": "数据内容"
                        }
                    },
                    "required": [
                        "data"
                    ]
                }
            })
        }
    ]


class GetConfig:
    """
    获取配置
    """

    def __init__(self):
        self.parse_cli_args()

    @lru_cache()
    def get_app_config(self):
        """
        获取应用配置
        """
        # 实例化应用配置模型
        return AppSettings()

    @lru_cache()
    def get_jwt_config(self):
        """
        获取Jwt配置
        """
        # 实例化Jwt配置模型
        return JwtSettings()

    @lru_cache()
    def get_database_config(self):
        """
        获取数据库配置
        """
        # 实例化数据库配置模型
        return DataBaseSettings()

    @lru_cache()
    def get_redis_config(self):
        """
        获取Redis配置
        """
        # 实例化Redis配置模型
        return RedisSettings()

    @lru_cache()
    def get_upload_config(self):
        """
        获取数据库配置
        """
        # 实例上传配置
        return UploadSettings()

    @lru_cache()
    def get_model_config(self):
        """
        获取模型配置
        """
        # 实例模型配置
        return ModelSettings()

    @lru_cache()
    def get_agent_config(self):
        """
        获取模型配置
        """
        # 实例模型配置
        return AgentSettings()

    @staticmethod
    def parse_cli_args():
        """
        解析命令行参数
        """
        if 'uvicorn' in sys.argv[0]:
            # 使用uvicorn启动时，命令行参数需要按照uvicorn的文档进行配置，无法自定义参数
            pass
        else:
            # 使用argparse定义命令行参数
            parser = argparse.ArgumentParser(description='命令行参数')
            parser.add_argument('--env', type=str, default='', help='运行环境')
            # 解析命令行参数
            args = parser.parse_args()
            # 设置环境变量，如果未设置命令行参数，默认APP_ENV为dev
            os.environ['APP_ENV'] = args.env if args.env else 'dev'
        # 读取运行环境
        run_env = os.environ.get('APP_ENV', '')
        # 运行环境未指定时默认加载.env.dev
        env_file = '.env.dev'
        # 运行环境不为空时按命令行参数加载对应.env文件
        if run_env != '':
            env_file = f'.env.{run_env}'
        # 加载配置
        load_dotenv(env_file)


class SettingsContainer:
    """
    配置类
    """
    # 实例化获取配置类
    get_config = GetConfig()
    # 应用配置
    app_settings: AppSettings = get_config.get_app_config()
    # Jwt配置
    jwt_settings: JwtSettings = get_config.get_jwt_config()
    # 数据库配置
    database_settings: DataBaseSettings = get_config.get_database_config()
    # Redis配置
    redis_settings: RedisSettings = get_config.get_redis_config()
    # 上传配置
    upload_settings: UploadSettings = get_config.get_upload_config()
    # 模型配置
    model_settings: ModelSettings = get_config.get_model_config()
    # Agent配置
    agent_settings: AgentSettings = get_config.get_agent_config()


Settings = SettingsContainer()
