import requests
import json
import time
import os
from decrypt_spider import DecryptSpider

# 分类ID映射表
CATEGORY_IDS = {
    "jvid": "1737018067636858880",
    "xiuren": "1711283599160651776",
    "weimimao": "1686327847767257088",
    "fuliji": "1706995405599289344",
    "mote": "1793271534551576576",
    "sentian": "1715215116599963648",
    "hanfu": "1791110818802446336",
}


def get_vmq_data(page_num=1, page_size=20, category_suffix=None, use_proxy=True, silent=False):
    """获取VMQ数据的主函数 - 使用经过验证的配置

    Args:
        page_num: 页码
        page_size: 每页大小
        category_suffix: 分类后缀，必须提供，如 'jvid', 'weimimao', 'fuliji' 等
        silent: 是否静默模式（不输出日志）
        use_proxy: 是否使用代理
    """

    if not category_suffix:
        raise ValueError("category_suffix 参数是必需的，请提供分类后缀")

    # 获取对应的 categoryId
    category_id = CATEGORY_IDS.get(category_suffix)
    if not category_id:
        # 如果不在映射表中，提示用户可用的分类
        available_categories = ", ".join(CATEGORY_IDS.keys())
        raise ValueError(f"不支持的分类 '{category_suffix}'。可用分类: {available_categories}")

    # 代理设置 - 根据你的Clash配置
    proxies = {
        'http': 'http://127.0.0.1:7890',
        'https': 'http://127.0.0.1:7890'
    } if use_proxy else None

    # 使用经过验证的完整请求头
    headers = {
        'Accept-Language': 'zh-CN,zh;q=0.9,ja;q=0.8',
        'Connection': 'keep-alive',
        'Origin': 'https://wmqxz.net',
        'Referer': f'https://wmqxz.net/category/{category_suffix}',
        'Sec-Fetch-Dest': 'empty',
        'Sec-Fetch-Mode': 'cors',
        'Sec-Fetch-Site': 'same-origin',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36',
        'accept': 'application/json',
        'content-type': 'application/json;charset=utf-8',
        'language': 'en_US',
        'macct': 'sf06',  # 重要的商户账户头
        'os': '2',
        'sec-ch-ua': '"Not)A;Brand";v="8", "Chromium";v="138", "Google Chrome";v="138"',
        'sec-ch-ua-mobile': '?0',
        'sec-ch-ua-platform': '"Windows"',
        'ver': '1.0'
    }

    # 使用经过验证的完整Cookie
    cookies = {
        '__51uvsct__KisFAjMmWonEmmaa': '1',
        '__51vcke__KisFAjMmWonEmmaa': '124af7b9-a489-54e1-9e9f-2aad28de65e5',
        '__51vuft__KisFAjMmWonEmmaa': '1747203033573',
        '__51vcke__KpRFHkppObUqVbFN': 'af7f4762-a1c2-542c-b776-2e8c1f96f3bb',
        '__51vuft__KpRFHkppObUqVbFN': '1753076527969',
        'deviceId': '5Bnzbb6FGhh3eXhdNbbjGaHQ',
        'userStore': '%7B%22info%22%3A%7B%22userId%22%3A%221947474922350616576%22%2C%22merchantAcct%22%3A%22sf555%22%2C%22masterAcct%22%3A%22sf555_m%22%2C%22agentAcct%22%3A%22sf555_m_no_agent%22%2C%22userAcct%22%3A%226CJB9M69%22%2C%22acctType%22%3A3%2C%22referCode%22%3Anull%2C%22shareCode%22%3A%226CJB9M69%22%2C%22isPartner%22%3A0%2C%22phoneNumber%22%3Anull%2C%22background%22%3Anull%2C%22headUrl%22%3A%22%2Fadmin%2Fuser%2Fhead%2Fb1f9ce42dc8341c587a9f9ff5c28e887%22%2C%22nickName%22%3A%226CJB9M69%22%2C%22signature%22%3Anull%2C%22loginType%22%3Anull%2C%22coinBalance%22%3A0%2C%22balance%22%3A0%2C%22exp%22%3A0%2C%22expLevel%22%3A0%2C%22iconFree%22%3A1%2C%22vipBegin%22%3Anull%2C%22vipEnd%22%3Anull%2C%22vipFlag%22%3Afalse%2C%22vipTitle%22%3Anull%2C%22vipPackageId%22%3Anull%2C%22userStatus%22%3A0%2C%22followers%22%3Anull%2C%22followed%22%3Anull%2C%22lastLoginDate%22%3A%222025-07-22%22%2C%22currentLoginDate%22%3A%222025-07-22%22%2C%22city%22%3A%22%E9%A6%99%E6%B8%AF%22%2C%22gender%22%3A0%2C%22videoFreeBegin%22%3Anull%2C%22videoFreeEnd%22%3Anull%2C%22actorFreeBegin%22%3Anull%2C%22actorFreeEnd%22%3Anull%2C%22expand%22%3Anull%7D%2C%22searchList%22%3A%5B%5D%2C%22scrollLeft%22%3A0%2C%22dialogTime%22%3A1753149166786%2C%22dialog4Time%22%3A0%2C%22dialog9Time%22%3A1753149166786%2C%22dialog16Time%22%3A0%2C%22agentCode%22%3A%22TDV6MC4J%22%2C%22inviteCode%22%3A%22%22%7D',
        'token': 'c10b205604144861a63ae042369462b9.XcUp61ZDpRMflkP274FoxYmcz8D2kaaoqyiL1dzNmPYUd4aEwsXUZc%2BY5hTxYN7mfBY9ol05xRT3zvt4vradox5F4IOs748CbYjpA%2FhxX%2BmT40LZqxObs6f%2FHCAGvGKXS2DOqCYKkXc0zw4wXiO%2B9506ab0iJKQl.00b4680784e9fffacbbc6c8a8eab820c',
        '__51uvsct__KpRFHkppObUqVbFN': '6',
        '__vtins__KpRFHkppObUqVbFN': '%7B%22sid%22%3A%20%2229142d1b-dcf9-545c-83d1-aa54883438b8%22%2C%20%22vd%22%3A%204%2C%20%22stt%22%3A%201231681%2C%20%22dr%22%3A%20379461%2C%20%22expires%22%3A%201753157389220%2C%20%22ct%22%3A%201753155589220%7D'
    }

    url = "https://wmqxz.net/member/media/stationGroup/listMediaBySearchType"
    data = {
        "categoryId": category_id,
        "orderType": "SORT_PUBLISH_WEIGHT",
        "mediaType": 3,
        "pageNo": page_num,
        "pageSize": page_size
    }

    try:
        if not silent:
            print(f"🚀 正在获取第 {page_num} 页数据 (分类: {category_suffix})...")
            if use_proxy:
                print("🌐 使用代理: http://127.0.0.1:7890")

        # 发送请求
        response = requests.post(url, json=data, headers=headers, cookies=cookies, proxies=proxies, timeout=30)

        if not silent:
            print(f"📥 响应状态码: {response.status_code}")

        if response.status_code == 200:
            # 处理加密响应
            response_text = response.text.strip()

            # 检查是否是加密的Base64响应
            if response_text.startswith('"') and response_text.endswith('"'):
                clean_response = response_text[1:-1]

                # 使用解密器
                spider = DecryptSpider(use_proxy=False)

                if spider.is_base64(clean_response):
                    if not silent:
                        print("🔓 检测到加密响应，正在解密...")
                    decrypted = spider.decrypt_response(clean_response)

                    if decrypted:
                        try:
                            result = json.loads(decrypted)
                            if not silent:
                                print("✅ 解密成功")
                            return result
                        except json.JSONDecodeError:
                            if not silent:
                                print("❌ 解密结果不是有效JSON")
                            return None
                    else:
                        if not silent:
                            print("❌ 解密失败")
                        return None
                else:
                    if not silent:
                        print("⚠️ 响应不是Base64格式")
                    return None
            else:
                # 尝试直接解析JSON
                try:
                    result = response.json()
                    return result
                except json.JSONDecodeError:
                    if not silent:
                        print("❌ 响应不是有效JSON")
                    return None
        else:
            if not silent:
                print(f"❌ 请求失败: {response.status_code}")
                print(f"响应内容: {response.text[:500]}...")
            return None

    except Exception as e:
        if not silent:
            print(f"❌ 请求出错: {str(e)}")
        return None


def extract_posts_from_response(data, silent=False):
    """从API响应中提取媒体信息"""
    posts = []

    if not data:
        return posts

    try:
        # 检查响应结构
        if not isinstance(data, dict):
            if not silent:
                print("⚠️ 响应不是字典格式")
            return posts

        # 检查响应代码
        code = data.get('code', -1)
        msg = data.get('msg', 'N/A')

        if code != 0:
            if not silent:
                print(f"❌ API返回错误: code={code}, msg={msg}")
            return posts

        if not silent:
            print(f"✅ API响应成功: {msg}")

        # 提取数据列表
        data_obj = data.get('data', {})
        if not isinstance(data_obj, dict):
            if not silent:
                print("⚠️ data字段不是字典格式")
            return posts

        # 根据实际API响应结构提取数据
        if 'dataList' in data_obj:
            items = data_obj['dataList']
            total = data_obj.get('total', 0)
            current_page = data_obj.get('current', 'N/A')
            page_size = data_obj.get('size', 'N/A')

            if not silent:
                print(f"📊 第{current_page}页，共{total}个项目，本页{len(items)}个")
        else:
            if not silent:
                print("⚠️ 无法找到dataList字段")
                print(f"data字段包含: {list(data_obj.keys())}")
            return posts

        # 提取每个媒体项目
        for item in items:
            if isinstance(item, dict):
                # 获取文章信息
                article = item.get('article', {})
                user_info = item.get('userInfoPo', {})

                post = {
                    'id': item.get('mediaId', ''),
                    'title': article.get('articleTitle', ''),
                    'url': f"https://wmqxz.net/archives/{item.get('mediaId', '')}",
                    'create_time': article.get('createTime', ''),
                    'view_count': item.get('viewCount', 0),
                    'favor_count': item.get('favorCount', 0),
                    'comment_count': item.get('commentCount', 0),
                    'author': user_info.get('nickName', ''),
                    'author_id': user_info.get('userAcct', ''),
                    'category_id': item.get('categoryId', ''),
                    'media_type': item.get('mediaType', 0),
                    'pay_type': item.get('payType', 0),
                    'recommend': item.get('recommend', 0),
                    'cover_img': article.get('articleCoverImg', ''),
                    'content_path': article.get('content', ''),
                    'cover_width': article.get('coverWidth', 0),
                    'cover_height': article.get('coverHeight', 0),
                    'tag_list': [tag.get('tagName', '') for tag in item.get('tagList', [])],
                }

                if post['title'] and post['id']:
                    posts.append(post)

        if not silent:
            print(f"✅ 提取到 {len(posts)} 个媒体项目")
        return posts

    except Exception as e:
        if not silent:
            print(f"❌ 提取媒体信息时出错: {str(e)}")
            import traceback
            traceback.print_exc()
        return posts


def batch_crawl_vmq(start_page=1, end_page=5, category_suffix=None, use_proxy=True, delay=2):
    """批量爬取VMQ媒体数据

    Args:
        start_page: 起始页码
        end_page: 结束页码
        category_suffix: 分类后缀，必须提供，如 'jvid', 'weimimao', 'fuliji' 等
        use_proxy: 是否使用代理
        delay: 页面间延迟秒数
    """

    if not category_suffix:
        raise ValueError("category_suffix 参数是必需的，请提供分类后缀")
    all_posts = []

    print(f"🎯 开始批量爬取 VMQ 媒体数据")
    print(f"📂 分类: {category_suffix}")
    print(f"📄 页面范围: {start_page} - {end_page}")
    print(f"🌐 使用代理: {'是' if use_proxy else '否'}")
    print("=" * 50)

    for page in range(start_page, end_page + 1):
        # 获取数据
        data = get_vmq_data(page, category_suffix=category_suffix, use_proxy=use_proxy)

        if data:
            # 保存原始响应
            with open(f"vmq_page_{page}_raw.json", 'w', encoding='utf-8') as f:
                json.dump(data, f, ensure_ascii=False, indent=2)

            # 提取媒体项目
            posts = extract_posts_from_response(data)
            all_posts.extend(posts)

            print(f"📊 第 {page} 页: {len(posts)} 个媒体项目")
        else:
            print(f"❌ 第 {page} 页获取失败")

        # 延迟避免请求过快
        if page < end_page:
            print(f"⏳ 等待 {delay} 秒...")
            time.sleep(delay)

    print("=" * 50)
    print(f"🎉 爬取完成！总共获取 {len(all_posts)} 个媒体项目")

    # 保存结果
    if all_posts:
        output_file = f"vmq_{category_suffix}_{start_page}_{end_page}.txt"
        with open(output_file, 'w', encoding='utf-8') as f:
            f.write(f"VMQ 批量爬取结果 - {category_suffix} (第{start_page}-{end_page}页)\n")
            f.write("=" * 80 + "\n\n")

            for i, post in enumerate(all_posts, 1):
                f.write(f"{i}. {post['title']}\n")
                f.write(f"   ID: {post['id']}\n")
                f.write(f"   URL: {post['url']}\n")
                f.write(f"   作者: {post['author']} ({post['author_id']})\n")
                f.write(f"   统计: 观看{post['view_count']} | 收藏{post['favor_count']} | 评论{post['comment_count']}\n")
                f.write(f"   标签: {', '.join(post['tag_list'])}\n")
                f.write(f"   时间: {post['create_time']}\n")
                f.write(f"   封面: {post['cover_img']}\n")
                f.write(f"   内容路径: {post['content_path']}\n\n")

        print(f"📁 结果已保存到: {output_file}")

        # 显示前5个项目预览
        print("\n前5个媒体项目预览:")
        for i, post in enumerate(all_posts[:5], 1):
            print(f"{i}. {post['title'][:60]}...")
            print(f"   作者: {post['author']} | 观看: {post['view_count']}")

    return all_posts


def get_user_input():
    """获取用户输入"""
    print("🎯 VMQ 单页爬取工具")
    print("=" * 50)

    # 显示可用分类
    available_categories = list(CATEGORY_IDS.keys())
    print("📂 可用分类:")
    for i, category in enumerate(available_categories, 1):
        print(f"   {i}. {category}")
    print()

    # 获取用户输入的分类后缀 - 必须输入
    while True:
        print("请输入分类后缀:")
        category_suffix = input("分类后缀: ").strip().lower()

        if category_suffix:
            if category_suffix in CATEGORY_IDS:
                print(f"✅ 选择的分类: {category_suffix}")
                print(f"🔗 目标URL: https://wmqxz.net/category/{category_suffix}")
                break
            else:
                available = ", ".join(available_categories)
                print(f"❌ 不支持的分类 '{category_suffix}'")
                print(f"   可用分类: {available}")
                print()
        else:
            print("❌ 分类后缀不能为空，请重新输入！")
            print()

    # 获取页码
    while True:
        try:
            page_input = input("请输入要爬取的页码 (默认2): ").strip()
            page_num = int(page_input) if page_input else 2

            if page_num < 1:
                print("❌ 页码必须大于0，请重新输入！")
                continue

            print(f"✅ 选择的页码: {page_num}")
            break

        except ValueError:
            print("❌ 页码格式无效，请输入数字！")

    return category_suffix, page_num

if __name__ == "__main__":
    # 获取用户输入
    category_suffix, page_num = get_user_input()
    print()

    # 单页爬取
    print(f"🚀 开始爬取 {category_suffix} 分类第 {page_num} 页...")
    data = get_vmq_data(page_num=page_num, category_suffix=category_suffix, use_proxy=True)

    if data:
        print("✅ 数据获取成功！")

        # 保存原始数据
        filename = f"vmq_{category_suffix}_page_{page_num}_raw.json"
        with open(filename, 'w', encoding='utf-8') as f:
            json.dump(data, f, ensure_ascii=False, indent=2)
        print(f"📁 原始数据已保存: {filename}")

        # 提取媒体项目
        posts = extract_posts_from_response(data)

        if posts:
            print(f"📊 成功提取到 {len(posts)} 个媒体项目")

            # 保存结果
            output_file = f"vmq_{category_suffix}_page_{page_num}.txt"
            with open(output_file, 'w', encoding='utf-8') as f:
                f.write(f"VMQ 单页爬取结果 - {category_suffix} (第{page_num}页)\n")
                f.write("=" * 80 + "\n\n")

                for i, post in enumerate(posts, 1):
                    f.write(f"{i}. {post['title']}\n")
                    f.write(f"   ID: {post['id']}\n")
                    f.write(f"   URL: {post['url']}\n")
                    f.write(f"   作者: {post['author']} ({post['author_id']})\n")
                    f.write(f"   统计: 观看{post['view_count']} | 收藏{post['favor_count']} | 评论{post['comment_count']}\n")
                    f.write(f"   标签: {', '.join(post['tag_list'])}\n")
                    f.write(f"   时间: {post['create_time']}\n")
                    f.write(f"   封面: {post['cover_img']}\n")
                    f.write(f"   内容路径: {post['content_path']}\n\n")

            print(f"📁 结果已保存: {output_file}")

            # 显示前5个项目预览
            print("\n📋 前5个媒体项目预览:")
            for i, post in enumerate(posts[:5], 1):
                print(f"{i}. {post['title'][:60]}...")
                print(f"   作者: {post['author']} | 观看: {post['view_count']}")

        else:
            print("❌ 未能提取到媒体项目")
            print("💡 可能的原因:")
            print("  1. 该分类在此页码没有数据")
            print("  2. 数据结构发生变化")
    else:
        print("❌ 数据获取失败")
        print("💡 可能的问题:")
        print("  1. Cookie/Token 已过期，需要更新")
        print("  2. 代理连接问题")
        print("  3. 网站API变化")
        print("  4. 分类不存在或页码超出范围")