import json
import os
import random
import re
import requests
import base64
import os
import time
from pathlib import Path

class MetadataHelper():
    def __init__(self) -> None:
        self.current_dir = os.path.dirname(os.path.abspath(__file__))
        self.office_list_path = os.path.join(self.current_dir, "map-list-office.json")
        self.cursor_list_path = os.path.join(self.current_dir, "mcp-list-cursor-directory.json")
        self.merged_list_path_v1 = os.path.join(self.current_dir, "mcp-list-v1.json")
        self.merged_list_path_v2 = os.path.join(self.current_dir, "mcp-list-v2.json")
        self.origin_cloudscope_json = os.path.join(self.current_dir, "modelscope", "mcp-list-modelscope-origin-dict.json")
        self.modelscope_json = os.path.join(self.current_dir, "mcp-list-modelscope.json")

    def load_json(self, filepath):
        if not os.path.exists(filepath):
            print(f"错误：文件未找到 {filepath}")
            return None
        try:
            with open(filepath, 'r', encoding='utf-8') as f:
                return json.load(f)
        except json.JSONDecodeError as e:
            print(f"错误：解码 JSON 文件时出错 {filepath}: {e}")
            return None
        except Exception as e:
            print(f"错误：读取文件时出错 {filepath}: {e}")
            return None
        
    def write_json(self, filepath, data):
        with open(filepath, 'w', encoding='utf-8') as f:
            json.dump(data, f, ensure_ascii=False, indent=2)

    def extract_github_owner(self, url):
        if not isinstance(url, str):
            return None
        match = re.match(r"https?://github\.com/([^/]+)/?", url)
        return match.group(1) if match else None

    def extract_id_from_github_url(self, url):
        if not isinstance(url, str):
            return None
        match = re.match(r"https?://github\.com/([^/]+)/([^/]+)/?", url)
        if not match:
            raise ValueError(f"无法从 {url} 中提取 id")
        id = f"{match.group(1)}-{match.group(2)}"
        return id.replace("@", "").replace(".", "-").replace(" ", "-")

    def merge_office_and_cursor_list(self):
        office_data = self.load_json(self.office_list_path)
        cursor_data = self.load_json(self.cursor_list_path)
        merged_servers = self.load_json(self.merged_list_path_v1)

        if office_data is None or cursor_data is None:
            print("无法加载一个或多个输入文件。正在中止。")
            return

        # office_data 结构是 {"servers": [...]}
        office_servers = office_data.get("servers", []) if isinstance(office_data, dict) else []
        # cursor_data 结构是 [...]
        cursor_servers = cursor_data if isinstance(cursor_data, list) else []

        print(f"处理来自 {self.office_list_path} 的 {len(office_servers)} 个 Server ...")
        # 首先处理 office 列表，因为它有更丰富的元数据
        for server in office_servers:
            github_url = server.get("github")
            if not github_url or not isinstance(github_url, str) or not github_url.startswith("http"):
                print(f"警告：跳过 office 列表中的无效或缺失 github URL 的条目：{server.get('id') or server.get('name')}")
                continue

            # 确保基础字段存在
            id = self.extract_id_from_github_url(github_url)
            if id in merged_servers:
                print(f"警告：跳过已存在的条目：{id}")
                continue

            merged_servers[id] = {
                "id": id,
                "name": server.get("name"),
                "provider": server.get("provider"),
                "description": server.get("description_en") or server.get("description"), # 优先使用英文描述（如果 office 文件以后添加了的话）
                "description_cn": server.get("description"), # office 的 description 字段通常是中文
                "category": server.get("category"),
                "category_name": server.get("category_name"),
                "tags": server.get("tags", []),
                "github": github_url
            }
            # 如果英文描述为空，尝试将中文描述作为备选（或保持为空）
            if not merged_servers[id]["description"]:
                merged_servers[id]["description"] = "" # 或者可以设置为 None，或者保留中文描述


        print(f"处理来自 {self.cursor_list_path} 的 {len(cursor_servers)} 个 Server ...")
        # 处理 cursor 列表，只添加不在 merged_servers 中的条目
        added_from_cursor = 0
        skipped_duplicates = 0
        for server in cursor_servers:
            github_url = server.get("link") # cursor 列表使用 'link' 字段
            if not github_url or not isinstance(github_url, str) or not github_url.startswith("http"):
                print(f"警告：跳过 cursor 列表中的无效或缺失 link URL 的条目：{server.get('id') or server.get('name')}")
                continue

            if not re.match(r"https?://github\.com/([^/]+)/([^/]+)/?", github_url):
                print(f"警告：跳过非 github 的 link URL 的条目：{server.get('id') or server.get('name')}")
                continue

            provider = self.extract_github_owner(github_url)
            id = self.extract_id_from_github_url(github_url)
            if id in merged_servers:
                skipped_duplicates += 1
                continue

            merged_servers[id] = {
                "id": id,
                "name": server.get("name"),
                "provider": provider, # 使用提取的所有者，或者备选 owner_id
                "description": server.get("description"), # cursor 的 description 字段通常是英文
                "description_cn": "", # cursor 列表没有中文描述
                "category": "uncategorized", # 默认值
                "category_name": "未分类", # 默认值
                "tags": [], # 默认值
                "github": github_url
            }
            added_from_cursor += 1

        print(f"合并完成。总计独立 Server ：{len(merged_servers.keys())}")
        print(f"  来自 office 列表：{len(office_servers)}")
        print(f"  来自 cursor 列表（新增）：{added_from_cursor}")
        print(f"  来自 cursor 列表（重复跳过）：{skipped_duplicates}")

        # 写入新的 JSON 文件
        try:
            self.write_json(self.merged_list_path_v1, merged_servers)
            print(f"成功将合并后的列表写入 {self.merged_list_path_v1}")
        except Exception as e:
            print(f"错误：写入文件时出错 {self.merged_list_path_v1}: {e}")

    def fetch_and_save_readme_from_github(self):
        """
        从合并后的JSON文件中读取MCP Server 列表，获取每个 Server 的GitHub仓库README内容，
        并将其保存到docs/servers/<server_id>/README.md    """
        servers = self.load_json(self.merged_list_path_v1)
        print("开始获取GitHub仓库README文件...")
        
        # 创建基础目录
        base_dir = Path("docs/servers")
        base_dir.mkdir(parents=True, exist_ok=True)
        
        # GitHub API请求头
        headers = {
            "Accept": "application/vnd.github.v3+json"
        }
        
        # 如果有GitHub令牌，添加到请求头中
        github_token = os.environ.get("GITHUB_TOKEN")
        if not github_token:
            raise ValueError("GITHUB_TOKEN 未设置")
        headers["Authorization"] = f"token {github_token}"
        
        success_count = 0
        error_count = 0
        
        for server_id, server in servers.items():
            github_url = server.get("github")
            # 创建 Server 目录
            server_dir = base_dir / server_id
            server_dir.mkdir(exist_ok=True)
            readme_path = server_dir / "README.md"
            
            if not server_id or not github_url:
                print(f"警告：跳过缺少ID或GitHub URL的 Server ")
                continue

            if server.get("readme"):
                server["readme_content"] = server["readme"]["content"]
                del server["readme"]
                print(f"警告：跳过已存在的README: {server_id}")
                continue

            if os.path.exists(readme_path):
                with open(readme_path, "r", encoding="utf-8") as f:
                    content =f.read()
                content = base64.b64encode(content.encode("utf-8")).decode("utf-8")
                    
                server["readme_content"] = content
                print(f"警告：跳过已存在的README: {server_id}")
                continue
            
            if not server.get("readme_content"):
                print(f"警告：跳过已存在的README: {server_id}")
                continue

            # 从GitHub URL中提取所有者和仓库名
            try:
                # 处理URL格式：https://github.com/owner/repo
                parts = github_url.strip("/").split("/")
                if len(parts) < 5:
                    print(f"警告：无法解析GitHub URL: {github_url}")
                    error_count += 1
                    continue
                    
                owner = parts[-2]
                repo = parts[-1]
                
                # 构建GitHub API URL
                api_url = f"https://api.github.com/repos/{owner}/{repo}/readme"
                
                print(f"获取 {server_id} 的README: {api_url}")
                
                # 发送请求到GitHub API
                response = requests.get(api_url, headers=headers)
                
                if response.status_code == 200:
                    # 解析响应
                    data = response.json()
                    # server["readme"] = data
                    # write_json(merged_list_path_v1, servers)
                    # README内容是Base64编码的
                    content = base64.b64decode(data["content"]).decode("utf-8")
                    
                    # 保存README到文件
                    with open(readme_path, "w", encoding="utf-8") as f:
                        f.write(content)
                    
                    print(f"成功保存 {server_id} 的README")
                    success_count += 1
                    time.sleep(random.randint(1, 3))
                else:
                    print(f"错误：获取 {server_id} 的README失败，状态码: {response.status_code}")
                    print(f"响应: {response.text}")
                    error_count += 1
                
                # 避免GitHub API速率限制
                time.sleep(1)
                
            except Exception as e:
                print(f"错误：处理 {server_id} 时出现异常: {e}")
                error_count += 1
        
        print(f"README获取完成。成功: {success_count}, 失败: {error_count}")
        self.write_json(self.merged_list_path_v1, servers)

    def remove_404_servers(self):
        """
        从合并后的JSON文件中读取MCP Server 列表，删除404的条目
        """
        servers = self.load_json(self.merged_list_path_v1)
        for server_id, server in servers.items():
            if server.get("readme_content"):
                print(f"删除 {server_id} 的README")
                del server["readme_content"]
        self.write_json(self.merged_list_path_v1, servers)

    def parse_modelscope_file_to_dict_by_url(self, filepath):
        """
        将modelscope的dict转换为array
        """
        modelscope_data = self.load_json(filepath)
        modelscope_dict = {}
        for page in modelscope_data:
            for server in page["Data"]["McpServer"]["McpServers"]:
                modelscope_dict[server["FromSiteUrl"]] = server
        return modelscope_dict
    
    def parse_modelscope_dict_to_array(self):
        """
        将modelscope的dict转换为array
        """
        data = self.parse_modelscope_file_to_dict_by_url(self.origin_cloudscope_json)
        
        for filename in os.listdir(os.path.join(self.current_dir, "modelscope", "分类")):
            filepath = os.path.join(self.current_dir, "modelscope", "分类", filename)
            name, ext = os.path.splitext(filename)
            if not os.path.isfile(filepath) or ext != ".json":
                continue
            match = re.match(r"^\[(.*)\]-\[(.*)\]\.json$", filename)
            if not match:
                continue
            category = match.group(1)
            category_cn = match.group(2)
            
            category_data = self.parse_modelscope_file_to_dict_by_url(filepath)
            for url, server in category_data.items():
                if url in data:
                    data[url]["Category"] = list(set(data[url].get("Category", []) + [category])) if data[url].get("Category") else [category]
                    data[url]["CategoryCn"] = list(set(data[url].get("CategoryCn", []) + [category_cn])) if data[url].get("CategoryCn") else [category_cn]

        self.write_json(self.modelscope_json, list(data.values()))

    def summary_modelscope_array(self):
        modelscope_array = self.load_json(self.modelscope_json)
        from_list = set()
        for server in modelscope_array:
            from_list.add(server["FromSite"])
        print(f"来自的列表: {from_list}")

    def get_language_from_config(self, config_str):
        config = json.loads(config_str)
        for item in config:
            if item.get("mcpServers"):
                for server in item["mcpServers"].values():
                    if server.get("command") in ["python", "python3", "python2", "uv", "uv2", "uv3", "pip", "pip3", "pip2", "uvx", "uvx3", "uvx2"]:
                        return "python"
                    if server.get("command") in ["node", "nodejs", "npm", "yarn", "pnpm", "bun", "bunx", "npx"]:
                        return "nodejs"
                    if server.get("command") in ["bash", "sh", "zsh", "fish", "powershell", "pwsh"]:
                        return "shell"
                    if server.get("command") in ["go", "golang"]:
                        return "go"
                    if server.get("command") in ["rust", "cargo"]:
                        return "rust"
        return None
    
    def merge_modelscope_into_merged_list(self):
        """
        将modelscope的array合并到merged_list中
        """
        office_list = self.load_json(self.office_list_path)
        cursor_list = self.load_json(self.cursor_list_path)
        modelscope_array = self.load_json(self.modelscope_json)
        items_dict = {}

        hot_count = 0
        for server in modelscope_array:
            id = f"{server['FromSitePath']}-{server['Name']}-mcp".lower()
            url = server['FromSiteUrl']
            tags = list(set(server['Tags'] + ["热门"])) if server["ViewCount"] > 500 else server['Tags']
            if server["ViewCount"] > 500:
                hot_count += 1

            items_dict[url] = {
                "id": id,
                "name": server['Name'],
                "name_cn": server['ChineseName'],
                "icon": server['FromSiteIcon'],
                "provider": server['FromSitePath'],
                "description": server['Abstract'],
                "description_cn": server['AbstractCN'],
                "category": server.get('Category', None),
                "category_cn": server.get('CategoryCn', None),
                "is_official": False,
                "is_internal": False,
                "auth_method": "",
                "dev_language": self.get_language_from_config(server['ServerConfig']),
                "tags": tags,
                "from_site": server['FromSite'],
                "from_site_url": server['FromSiteUrl'],
                "server_config": server['ServerConfig'],
                "env_schema": server['EnvSchema'],
                "tools": server['Tools'],
                "readme": server['Readme'],
                "readme_cn": server['ReadmeCN'],
                "verified": server['Verifed'],
            }
        
        # for item in office_list["servers"]:
        #     url = item.get('github', None)
        #     if url and url in items_dict:
        #         if item.get("is_official"):
        #             items_dict[url]["is_official"] = True
        #         if item.get("language"):
        #             items_dict[url]["dev_language"] = item["language"][0]
        #         if item["category"]:
        #             items_dict[url]["category"] = list(set(items_dict[url]["category"] + [item["category"]])) if items_dict[url].get("category") else [item["category"]]
        #         if item["category_name"]:
        #             items_dict[url]["category_cn"] = list(set(items_dict[url]["category_cn"] + [item["category_name"]])) if items_dict[url].get("category_cn") else [item["category_name"]]

        self.write_json(self.merged_list_path_v2, list(items_dict.values()))
        print(f"热门Server数量: {hot_count}")

if __name__ == "__main__":
    helper = MetadataHelper()
    # helper.merge_office_and_cursor_list()
    # helper.fetch_and_save_readme_from_github()
    # helper.remove_404_servers()
    # helper.parse_modelscope_dict_to_array()
    # helper.summary_modelscope_array()
    helper.merge_modelscope_into_merged_list()