import requests
from bs4 import BeautifulSoup
import time
import re
from urllib.parse import urljoin

import db


def get_headers():
    return {
        'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Mobile Safari/537.36 Edg/139.0.0.0',
        'Host': 'www.mcmod.cn'
    }


def scrape_mod_list(url):
    print(f'--- 开始抓取列表页: {url} ---')

    try:
        response = requests.get(url, headers=get_headers(), timeout=10)
        response.raise_for_status()
    except requests.exceptions.RequestException as e:
        print(f"错误：请求失败。原因: {e}")
        return [], []

    soup = BeautifulSoup(response.text, 'lxml')
    link_tags = soup.select('.name a')

    if not link_tags:
        print("提示：在页面上没有找到任何模组链接。")
        return [], []

    print(f'\n--- 共找到 {len(link_tags)} 个模组 ---\n')

    mod_names = [tag.get_text(strip=True) for tag in link_tags]
    mod_urls = []
    for tag in link_tags:
        relative_url = tag.get('href')
        if relative_url:
            full_url = f"https://www.mcmod.cn{relative_url}"
            mod_urls.append(full_url)

    print("【模组名称列表】")
    print(mod_names)
    print('\n' + '=' * 30 + '\n')
    print("【模组详情链接列表】")
    print(mod_urls)
    print('\n--- 列表页抓取完成 ---')

    return mod_names, mod_urls


def scrape_mod_details(mod_names, mod_urls):
    if not mod_urls or not mod_names:
        print("没有可用的名称或URL，跳过详情抓取。")
        return

    if len(mod_names) != len(mod_urls):
        print("[严重错误] 模组名称列表和URL列表长度不匹配，无法继续！")
        return

    print("\n[数据库] 正在检查数据表...")
    db.create_table()
    print("[数据库] 数据表准备就绪。")

    print('\n' + '=' * 50 + '\n')
    print("--- 开始进入详情页，抓取模组分类、浏览量、红黑票和封面图URL ---")

    all_mod_info = {}

    for i, url in enumerate(mod_urls):
        mod_name = mod_names[i]

        print(f"\n[{i + 1}/{len(mod_urls)}] 正在抓取: {mod_name} | {url}")

        try:
            response = requests.get(url, headers=get_headers(), timeout=10)
            response.raise_for_status()
        except requests.exceptions.RequestException as e:
            print(f"  -> 请求此页面失败: {e}")
            continue

        detail_soup = BeautifulSoup(response.text, 'lxml')

        categories = []
        category_tags = detail_soup.select('div.common-class-category > ul > li > a.normal')
        for a_tag in category_tags:
            category_name = a_tag.get_text(strip=True)
            categories.append(category_name)

        views_info = {}
        view_count_p_tag = detail_soup.select_one('div.span > p.n')
        if view_count_p_tag:
            formatted_views = view_count_p_tag.get_text(strip=True)
            exact_views = view_count_p_tag.parent.get('title')
            views_info = {'formatted': formatted_views, 'exact': exact_views}
        else:
            views_info = {'formatted': '无', 'exact': '0'}

        votes_info = {'red': {'count': '0', 'percentage': '0'}, 'black': {'count': '0', 'percentage': '0'}}
        text_block = detail_soup.select_one('div.text-block')
        if text_block:
            vote_spans = text_block.find_all('span')
            for span in vote_spans:
                text = span.get_text(strip=True)
                match = re.search(r'(红票|黑票)(\d+)\s*\((\d+)%\)', text)
                if match:
                    vote_type, count, percentage = match.groups()
                    if vote_type == '红票':
                        votes_info['red']['count'] = count
                        votes_info['red']['percentage'] = percentage
                    elif vote_type == '黑票':
                        votes_info['black']['count'] = count
                        votes_info['black']['percentage'] = percentage

        cover_image_url = '无'
        cover_img_tag = detail_soup.select_one('div.class-cover-image img')
        if cover_img_tag and cover_img_tag.has_attr('src'):
            img_src = cover_img_tag['src']
            full_img_url = urljoin(url, img_src)
            cover_image_url = full_img_url

        mod_id = url.split('/')[-1].replace('.html', '')

        mod_data_to_insert = {
            'modID': mod_id,
            'mod名称': mod_name,
            'mod标签': ', '.join(categories) if categories else '无',
            'mod浏览量': views_info['exact'],
            'mod封面': cover_image_url,
            '红票': votes_info['red']['count'],
            '黑票': votes_info['black']['count']
        }

        print(f"  -> 正在将数据存入数据库...")

        # 1. 存入主表 mod列表
        success_main = db.insert_mod_info(mod_data_to_insert)
        if success_main:
            print(f"  -> [成功] 模组 '{mod_name}' 数据已存入 'mod列表'。")

            # 2. 存入 mod类型表
            success_type = db.insert_mod_type(mod_data_to_insert)
            if success_type:
                print(f"  -> [成功] 模组 '{mod_name}' 类型数据已存入 'mod类型表'。")
            else:
                print(f"  -> [失败] 模组 '{mod_name}' 类型数据存入 'mod类型表' 失败。")

            # 3. 存入 mod热度表
            success_popularity = db.insert_mod_popularity(mod_data_to_insert)
            if success_popularity:
                print(f"  -> [成功] 模组 '{mod_name}' 热度数据已存入 'mod热度表'。")
            else:
                print(f"  -> [失败] 模组 '{mod_name}' 热度数据存入 'mod热度表' 失败。")
        else:
            print(f"  -> [失败] 模组 '{mod_name}' 数据存入 'mod列表' 表失败，跳过其他表。")

        all_mod_info[mod_id] = {
            'mod_name': mod_name,
            'categories': categories,
            'views': views_info,
            'votes': votes_info,
            'cover_image_url': cover_image_url
        }
        print(f"     分类: {categories if categories else '无'}")
        print(f"     浏览量: {views_info['formatted']} (精确值: {views_info['exact']})")
        print(f"     红票: {votes_info['red']['count']} ({votes_info['red']['percentage']}%)")
        print(f"     黑票: {votes_info['black']['count']} ({votes_info['black']['percentage']}%)")
        print(f"     封面图URL: {cover_image_url}")

        time.sleep(1.5)

    print('\n--- 所有模组详情抓取并存储完成！ ---')
    print('\n' + '=' * 50 + '\n')
    print("【汇总：所有模组及其详情】")
    for mod_id, info in all_mod_info.items():
        print(f"模组ID: {mod_id}")
        print(f"  模组名称: {info['mod_name']}")
        if info['categories']:
            print(f"  分类标签: {', '.join(info['categories'])}")
        else:
            print("  分类标签: 无")
        print(f"  总浏览量: {info['views']['formatted']} (精确值: {info['views']['exact']})")
        print(f"  红票: {info['votes']['red']['count']} ({info['votes']['red']['percentage']}%)")
        print(f"  黑票: {info['votes']['black']['count']} ({info['votes']['black']['percentage']}%)")
        print(f"  封面图URL: {info['cover_image_url']}")
        print("-" * 20)


if __name__ == '__main__':
    base_url = 'https://www.mcmod.cn/modlist.html'
    pages_to_scrape = 15

    all_mod_names = []
    all_mod_detail_urls = []

    for page_num in range(11, pages_to_scrape + 1):
        if page_num == 1:
            target_url = base_url
        else:
            target_url = f"{base_url}?&page={page_num}"

        print(f"\n>>> 正在抓取第 {page_num} 页: {target_url}")
        mod_names, mod_detail_urls = scrape_mod_list(target_url)
        all_mod_names.extend(mod_names)
        all_mod_detail_urls.extend(mod_detail_urls)

        if page_num < pages_to_scrape:
            print(">>> 等待 2 秒后继续下一页...")
            time.sleep(2)

    print(f"\n>>> 所有页面抓取完成，共获取到 {len(all_mod_detail_urls)} 个模组信息。")

    if all_mod_detail_urls:
        scrape_mod_details(all_mod_names, all_mod_detail_urls)

        print("\n" + "=" * 50)
        print("--- 从数据库中读取所有已存储的模组信息以供验证 ---")
        try:
            all_mods_from_db = db.fetch_all_mods()
            if all_mods_from_db:
                import pandas as pd

                df = pd.DataFrame(all_mods_from_db)
                print("\n--- 数据库 'mod列表' 内容预览 (使用Pandas展示) ---")
                print(df.to_string())
            else:
                print("\n[数据库] 没有从数据库 'mod列表' 中查询到任何数据。")
        except ImportError:
            all_mods_from_db = db.fetch_all_mods()
            if all_mods_from_db:
                for mod in all_mods_from_db:
                    print(mod)
