import io
import random
import time
from urllib.parse import urljoin

import pandas as pd
import requests
import yaml
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.edge.service import Service
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait

origin_href = "https://terraria.wiki.gg/zh/wiki"
#解析页面
def analyze_url(url):
    response = requests.get(url)
    print(f'状态：{response.status_code}')
    # 解析页面
    soup = BeautifulSoup(response.text, 'lxml')
    return soup

#将结果写入csv
def write_to_file(results,file_title,path):
    if results:
        df_data = pd.DataFrame(results)
        df_data.to_excel(f"{path}{file_title}.xlsx", index=False)
        print(f"数据已写入Excel，共 {len(results)} 条记录")
    else:
        print("⚠️ 未找到有效链接数据")

#获取所有id页面的urls
def get_id_url(main_url):
    #存储数据
    results = []
    urls = []
    Id_url = ["物品 ID","NPC ID","前缀 ID"]

    #解析页面
    soup = analyze_url(main_url)

    h1 = soup.find('h1').get_text(strip=True)
    #第一个h2后的所有a元素
    a = soup.find('main').find_all('a',title=True)

    for tag_a in a:
        text = tag_a.get_text(strip=True)
        if ("ID" in text) and (h1 not in text) and (text in Id_url) :
            results.append({"标题":tag_a["title"],"链接":"https://terraria.wiki.gg"+tag_a['href']})
            urls.append("https://terraria.wiki.gg"+tag_a['href'])
    # 将数据写入Excel
    write_to_file(results,h1,'data/url/')
    return results


def get_id(url):
    """获取ID表格数据（完整版，含列数对齐/链接提取/空列过滤）"""
    # 1. URL参数验证
    if isinstance(url, dict):
        url = url.get('链接', '')
    if not url or not isinstance(url, str) or not url.startswith('http'):
        print(f"❌ 无效URL: {url}")
        return []

    # 2. 解析网页
    soup = analyze_url(url)
    if not soup:
        return []

    # 3. 异步加载处理（需安装Selenium）
    abers = soup.find_all('aber')
    if abers and any(aber.get("title") == "此内容存于外部。点击后，会有稍许延迟以等待其下载。" for aber in abers):
        try:
            from selenium import webdriver
            from selenium.webdriver.common.by import By
            from selenium.webdriver.support.ui import WebDriverWait
            from selenium.webdriver.support import expected_conditions as EC
            from selenium.webdriver.edge.service import Service

            service = Service(executable_path=r"driver\msedgedriver.exe")
            driver = webdriver.Edge(service=service)
            driver.get(url)

            load_link = WebDriverWait(driver, 10).until(
                EC.element_to_be_clickable((By.CSS_SELECTOR, "a.ajax-load-link"))
            )
            load_link.click()

            WebDriverWait(driver, 10).until(
                EC.presence_of_element_located((By.CSS_SELECTOR, "div.loaded-content"))
            )
            soup = BeautifulSoup(driver.page_source, 'html.parser')
        except Exception as e:
            print(f"⚠️ 异步加载失败: {e}")

    # 4. 表格处理
    tables = soup.find_all('table')
    if not tables:
        print("⚠️ 未找到数据表格")
        return []

    h1 = soup.find('h1').get_text(strip=True) if soup.find('h1') else "未知标题"
    results = []

    for table_idx, table in enumerate(tables):
        rows = table.find_all('tr')
        if not rows:
            continue

        # 4.1 动态检测表头列数[6](@ref)
        header_cols = rows[0].find_all(['td', 'th'])
        n_columns = len(header_cols)
        print(f"📊 表格{table_idx + 1}检测到列数: {n_columns}")

        # 4.2 统一数据列数（核心对齐逻辑）[2,4](@ref)
        table_data = []
        second_col_links = []

        for row in rows:
            cols = row.find_all(['td', 'th'])

            # 列数对齐：不足填充空值，超出截断[4](@ref)
            if len(cols) < n_columns:
                cols += [None] * (n_columns - len(cols))
            elif len(cols) > n_columns:
                cols = cols[:n_columns]

            # 提取文本内容（处理合并单元格）[6,8](@ref)
            row_text = []
            for col in cols:
                if not col:
                    row_text.append("")
                    continue

                # 处理colspan合并单元格
                colspan = int(col.get('colspan', 1))
                cell_text = col.get_text(strip=True)
                row_text.append(cell_text)
                if colspan > 1:
                    row_text.extend([""] * (colspan - 1))

            table_data.append(row_text[:n_columns])  # 确保不超列数

            # 4.3 提取第二列链接[7](@ref)
            link_info = {'href': '', 'text': ''}
            if len(cols) >= 2 and cols[1]:
                a_tag = cols[1].find('a')
                if a_tag:
                    link_info['href'] = origin_href + a_tag.get('href', '')
            second_col_links.append(link_info)

        # 4.4 创建DataFrame（带列数兜底）[1,2](@ref)
        try:
            # 使用表头作为列名
            headers = [col.get_text(strip=True) for col in header_cols][:n_columns]
            df = pd.DataFrame(table_data[1:], columns=headers)
        except ValueError as e:
            print(f"列数异常")
            # 动态生成列名（如col_0, col_1）
            df = pd.DataFrame(
                [row + [''] * (n_columns - len(row)) for row in table_data[1:]],
                columns=[f"col_{i}" for i in range(n_columns)]
            )

        # 4.5 添加链接数据
        if not df.empty:
            df['来源'] = [link['href'] for link in second_col_links[1:len(df) + 1]]
            df['来源标题'] = [link['text'] for link in second_col_links[1:len(df) + 1]]

        # 4.6 空列过滤（非空值<10则移除）[3](@ref)
        if not df.empty:
            non_null_counts = df.apply(lambda col: col.astype(bool).sum())
            valid_cols = non_null_counts[non_null_counts >= 10].index
            df = df[valid_cols]
            print(f"🔄 列过滤: 保留{len(valid_cols)}列（原{len(df.columns)}列）")

        results.extend(df.to_dict(orient='records'))

    # 5. 结果输出
    print(f"✅ 成功获取 {len(results)} 条数据")
    write_to_file(results, h1, "data/id/")
    return results


def scrape_boss_data(base_url):
    """爬取表格内boss数据及详情页攻略信息"""
    # 初始化数据存储
    all_data = []

    try:
        main_res = requests.get(base_url)
        main_res.raise_for_status()
        main_soup = BeautifulSoup(main_res.text, 'html.parser')

        # 定位表格并获取所有<a>标签
        table = main_soup.find('table')
        if not table:
            print("❌ 未找到表格")
            return

        # 获取所有<a>标签的链接和名称
        boss_links = []
        a_tags = table.find_all('a')  # 直接获取表格内所有a标签
        for a_tag in a_tags:
            if a_tag.get('href'):
                boss_name = a_tag.get_text(strip=True)
                boss_url = a_tag['href']
                boss_links.append((boss_name, boss_url))
                print(f"✅ 发现Boss: {boss_name} | 链接: {boss_url}")

        # 2. 遍历每个Boss详情页
        for boss_name, boss_url in boss_links:
            time.sleep(1.5)  # 防止请求过快
            try:
                detail_res = requests.get(boss_url)
                detail_res.raise_for_status()
                detail_soup = BeautifulSoup(detail_res.text, 'html.parser')

                # 初始化攻略数据
                tactics_data = {
                    "召唤方式": "",
                    "BOSS特性": "",  # 修改为BOSS特性匹配
                    "击杀攻略": ""
                }

                # 3. 定位目标<h3>并提取后续<p>
                for h3 in detail_soup.find_all('h3'):
                    h3_text = h3.get_text(strip=True)

                    # 召唤方式/出现方式合并处理
                    if h3_text in ["召唤方式", "出现方式"]:
                        next_p = h3.find_next_sibling('p')
                        if next_p:
                            tactics_data["召唤方式"] = next_p.get_text(strip=True)

                    # BOSS特性匹配
                    elif h3_text == "BOSS特性":  # 精确匹配BOSS特性
                        next_p = h3.find_next_sibling('p')
                        if next_p:
                            tactics_data["BOSS特性"] = next_p.get_text(strip=True)

                    # 击杀攻略/击杀技巧合并处理
                    elif h3_text in ["击杀攻略", "击杀技巧"]:
                        next_p = h3.find_next_sibling('p')
                        if next_p:
                            tactics_data["击杀攻略"] = next_p.get_text(strip=True)

                # 合并数据
                boss_data = {
                    "Boss": boss_name,
                    "链接": boss_url,
                    **tactics_data
                }
                all_data.append(boss_data)
                print(f"📊 已提取: {boss_name} | 召唤方式: {tactics_data['召唤方式'][:20]}...")

            except Exception as e:
                print(f"⚠️ 处理 {boss_name} 失败: {str(e)}")

        # 4. 写入Excel
        if all_data:
            write_to_file(all_data,"boss","data/final_data/")
            print(f"\n🎉 成功保存 {len(all_data)} 条数据到 boss.xlsx")
        else:
            print("❌ 未爬取到有效数据")

    except Exception as e:
        print(f"💥 全局错误: {str(e)}")

#获得数据库界面的链接
def get_database_url(url):
    try:
        # 获取网页内容并解析
        response = requests.get(url)
        response.raise_for_status()
        soup = BeautifulSoup(response.text, 'html.parser')

        # 定位第一个表格[2,6](@ref)
        table = soup.find('table')
        if not table:
            return []

        results = []
        urls = []
        blacklist = ["Equipinfo","Imageinfo"]
        # 遍历表格行
        for row in table.find_all('tr'):
            # 获取第一列单元格（兼容td/th）[6](@ref)
            first_col = row.find(['td', 'th'])
            if not first_col:
                continue

            # 提取第一列中所有<a>标签[5,8](@ref)
            for a_tag in first_col.find_all('a', href=True):
                text = a_tag.get_text(strip=True)
                href = a_tag['href']
                # 处理相对路径转换为绝对URL[5](@ref)
                absolute_url = urljoin(url, href)
                if text not in blacklist:
                    results.append({'标题': text, '链接': absolute_url})
                urls.append(absolute_url)

        write_to_file(results, "database", "data/url/")

        return results

    except Exception as e:
        print(f"⚠️ 提取失败: {str(e)}")
        return []

def scrape_terraria_cargo_data(initial_url,title):
    base_url = "https://terraria.wiki.gg"
    all_data = []
    current_url = initial_url

    try:
        # 1. 获取初始页面的分页入口
        print(f"🌐 解析初始页面: {initial_url}")
        response = requests.get(initial_url, timeout=15)
        response.raise_for_status()
        soup = BeautifulSoup(response.text, 'html.parser')

        # 定位"查看更多结果"链接
        more_link = soup.select_one('a[title="Special:Cargo查询"]')
        if not more_link:
            print("⚠️ 未找到分页入口链接")
            return []

        # 构建第一页分页URL
        current_url = base_url + more_link['href']
        print(f"🔗 分页入口: {current_url}")

        # 2. 循环处理所有分页
        page_count = 0
        while current_url:
            page_count += 1
            print(f"📖 抓取第 {page_count} 页: {current_url}")
            response = requests.get(current_url, timeout=15)
            soup = BeautifulSoup(response.text, 'html.parser')

            # 定位目标表格
            table = soup.find('table', class_='wikitable')
            if not table:
                print("⚠️ 本页未找到数据表格")
                break

            # 提取表头
            headers = []
            header_row = table.find('tr')
            if header_row:
                header_cells = header_row.find_all(['th', 'td'])
                headers = [cell.get_text(strip=True) for cell in header_cells]

            # 提取数据行
            data_rows = table.find_all('tr')[1:]  # 跳过表头
            page_data = []

            for row in data_rows:
                cells = row.find_all(['td', 'th'])
                cell_data = [cell.get_text(strip=True) for cell in cells]

                # 构建数据字典
                if headers and len(headers) == len(cell_data):
                    row_dict = dict(zip(headers, cell_data))
                    page_data.append(row_dict)
                elif cell_data:
                    row_dict = {f"col_{i}": val for i, val in enumerate(cell_data)}
                    page_data.append(row_dict)

            all_data.extend(page_data)
            print(f"✅ 本页获取 {len(page_data)} 行，累计 {len(all_data)} 行")

            # 3. 查找下一页链接
            next_link = None
            for link in soup.find_all('a'):
                link_text = link.get_text(strip=True)
                if link_text == '下100条':
                    next_link = link
                    break

            # 更新URL或结束循环
            if next_link and 'href' in next_link.attrs:
                current_url = base_url + next_link['href']
                time.sleep(0.5)  # 基础延时
            else:
                print(f"🏁 已到达末页，共 {page_count} 页")
                current_url = None

    except Exception as e:
        print(f"⚠️ 处理失败: {str(e)}")

    write_to_file(all_data, title, "data/database/")
    return all_data


def scrape_npc_table(base_url):
    """
    爬取表格中的NPC信息并保存到Excel
    参数:
        base_url: 包含NPC表格的网页URL
    """
    try:
        # 1. 获取主页面并提取表格链接
        print("🔍 开始爬取NPC表格数据...")
        response = requests.get(base_url)
        response.raise_for_status()
        soup = BeautifulSoup(response.text, 'html.parser')

        # 提取表格中的所有<a>标签
        npc_links = []
        tables = soup.find_all('table')
        for table in tables:
            for a_tag in table.find_all('a'):
                href = a_tag.get('href')
                text = a_tag.get_text(strip=True)
                if href:
                    # 处理相对链接
                    full_url = urljoin(base_url, href)
                    npc_links.append({"npc": text, "链接": full_url})
                    print(f"✅ 发现NPC: {text} | 链接: {full_url}")

        if not npc_links:
            print("❌ 未找到表格中的NPC链接")
            return None

        # 2. 遍历每个NPC详情页
        all_data = []
        for npc in npc_links:
            time.sleep(1)  # 防止请求过快
            try:
                print(f"📡 正在爬取: {npc['npc']}")
                detail_res = requests.get(npc['链接'], timeout=15)
                detail_res.raise_for_status()
                detail_soup = BeautifulSoup(detail_res.text, 'html.parser')

                # 初始化NPC数据
                npc_data = {
                    "NPC介绍": "",
                    "入住条件": "",
                    "NPC作用": ""
                }

                # 3. 提取目标h3后的第一个<p>内容
                for h3 in detail_soup.find_all('h3'):
                    h3_text = h3.get_text(strip=True)

                    # 匹配三种信息类型
                    if h3_text in ["NPC介绍","介绍NPC介绍"]:
                        p_tag = h3.find_next('p')
                        if p_tag:
                            npc_data["NPC介绍"] = p_tag.get_text(strip=True)

                    elif h3_text in ["入住条件","入驻条件"]:
                        p_tag = h3.find_next('p')
                        if p_tag:
                            npc_data["入住条件"] = p_tag.get_text(strip=True)

                    elif h3_text == "NPC作用":
                        p_tag = h3.find_next('p')
                        if p_tag:
                            npc_data["NPC作用"] = p_tag.get_text(strip=True)

                # 合并数据
                npc_info = {
                    "npc": npc['npc'],
                    "链接": npc['链接'],
                    **npc_data
                }
                all_data.append(npc_info)
                print(f"📊 已提取: {npc['npc']} | 介绍: {npc_data['NPC介绍'][:30]}...")

            except Exception as e:
                print(f"⚠️ 处理 {npc['npc']} 失败: {str(e)}")

        # 4. 创建DataFrame并保存为Excel
        write_to_file(all_data, "城镇npc", "data/final_data/")
        print(f"\n🎉 成功保存 {len(all_data)} 条NPC数据")

    except Exception as e:
        print(f"💥 全局错误: {str(e)}")


def scrape_events_data(base_url):
    """
    爬取表格中的事件数据并提取详情页信息
    参数:
        base_url: 包含事件表格的网页URL
    返回:
        DataFrame格式的事件数据
    """
    # 初始化数据存储
    all_data = []

    try:
        # 1. 获取主页面并提取表格链接
        print("🔍 开始爬取事件表格数据...")
        response = requests.get(base_url, timeout=15)
        response.raise_for_status()
        soup = BeautifulSoup(response.text, 'html.parser')

        # 提取表格中的所有<a>标签
        event_links = []
        tables = soup.find_all('table')
        for table in tables:
            for a_tag in table.find_all('a'):
                href = a_tag.get('href')
                text = a_tag.get_text(strip=True)
                if href:
                    # 处理相对链接
                    full_url = urljoin(base_url, href)
                    event_links.append({"事件": text, "链接": full_url})
                    print(f"✅ 发现事件: {text} | 链接: {full_url}")

        if not event_links:
            print("❌ 未找到表格中的事件链接")
            return None

        # 2. 遍历每个事件详情页
        for event in event_links:
            time.sleep(1)  # 防止请求过快
            try:
                print(f"📡 正在爬取: {event['事件']}")
                detail_res = requests.get(event['链接'], timeout=20)
                detail_res.raise_for_status()
                detail_soup = BeautifulSoup(detail_res.text, 'html.parser')

                # 初始化详情数据
                detail_data = {
                    "触发条件": "",
                    "事件介绍": ""
                }

                # 3. 提取目标h3后的第一个<p>内容
                for h3 in detail_soup.find_all('h3'):
                    h3_text = h3.get_text(strip=True)

                    # 精确匹配目标标题
                    if h3_text == "触发条件":
                        next_p = h3.find_next('p')
                        if next_p:
                            detail_data["触发条件"] = next_p.get_text(strip=True)

                    elif h3_text == "事件介绍":
                        next_p = h3.find_next('p')
                        if next_p:
                            detail_data["事件介绍"] = next_p.get_text(strip=True)

                # 合并数据
                event_info = {
                    "事件": event['事件'],
                    "链接": event['链接'],
                    **detail_data
                }
                all_data.append(event_info)
                print(f"📊 已提取: {event['事件']} | 触发条件: {detail_data['触发条件'][:20]}...")

            except Exception as e:
                print(f"⚠️ 处理 {event['事件']} 失败: {str(e)}")

        # 4. 创建DataFrame并保存为Excel
        write_to_file(all_data, "事件", "data/final_data/")

    except Exception as e:
        print(f"💥 全局错误: {str(e)}")
        return None


if __name__ == '__main__':
    # 读取sources.yaml
    with open('sources.yaml', 'r', encoding='utf-8') as file:
        data = yaml.load(file, Loader=yaml.FullLoader)

    for config in data['sources']:
    #     #处理网页url
    #     if config['type'] == 'url_id':
    #         print(f"\n网页：{config['url']}")
    #         id_urls = get_id_url(config['url'])
    #         for id_url in id_urls:
    #             print(f"\n网页：{id_url['链接']}")
    #             get_id(id_url)
    #
    #     if config['type'] == "database_url":
    #         print(f"\n网页：{config['url']}")
    #         id_urls = get_database_url(config['url'])
    #         for id_url in id_urls:
    #             scrape_terraria_cargo_data(id_url['链接'],id_url['标题'])
    #
    #     if config['type'] == "boss_url":
    #         scrape_boss_data(config['url'])
    #
        # if config['type'] == "npc_url":
        #     scrape_npc_table(config['url'])

        if config['type'] == "invasion_url":
            scrape_events_data(config['url'])