# 简单输入界面
import asyncio
import json
import os
from tkinter import Tk, Label, Entry, Button, messagebox, ttk
from .crawler import WebCrawler
from .ai_processor import process_with_ai


class CrawlerApp:
    def __init__(self):
        """
        初始化 GUI 应用。
        """
        self.root = Tk()
        self.root.title("网页爬虫工具")
        self.root.geometry("500x400")  # 调整窗口大小以适应新输入框

        # 目标网站输入框
        Label(self.root, text="目标网站:").grid(row=0, column=0, padx=10, pady=10)
        self.url_entry = Entry(self.root, width=30)
        self.url_entry.grid(row=0, column=1, padx=10, pady=10)

        # 起始页输入框
        Label(self.root, text="起始页:").grid(row=1, column=0, padx=10, pady=10)
        self.start_page_entry = Entry(self.root, width=30)
        self.start_page_entry.grid(row=1, column=1, padx=10, pady=10)

        # 结束页输入框
        Label(self.root, text="结束页:").grid(row=2, column=0, padx=10, pady=10)
        self.end_page_entry = Entry(self.root, width=30)
        self.end_page_entry.grid(row=2, column=1, padx=10, pady=10)

        # 自定义字段输入框
        Label(self.root, text="自定义字段:").grid(row=3, column=0, padx=10, pady=10)
        self.fields_entry = Entry(self.root, width=30)
        self.fields_entry.grid(row=3, column=1, padx=10, pady=10)

        # 模型选择下拉菜单
        Label(self.root, text="选择模型:").grid(row=4, column=0, padx=10, pady=10)
        self.model_combobox = ttk.Combobox(self.root, width=27)
        self.model_combobox.grid(row=4, column=1, padx=10, pady=10)
        self._load_models()

        # 文件名输入框
        Label(self.root, text="爬取数据文件名:").grid(row=5, column=0, padx=10, pady=10)
        self.filename_entry = Entry(self.root, width=30)
        self.filename_entry.grid(row=5, column=1, padx=10, pady=10)

        # 开始爬取按钮
        Button(self.root, text="开始爬取", command=self.start_crawler).grid(row=6, column=0, columnspan=2, pady=20)

    def _load_models(self):
        """
        加载模型列表到下拉菜单。
        """
        models = [
            "Qwen/Qwen2.5-72B-Instruct",
            "Qwen/Qwen2.5-Coder-32B-Instruct",
            "deepseek-ai/DeepSeek-V3",
            "deepseek-ai/DeepSeek-R1",
            "Qwen/QwQ-32B-Preview",
            "Qwen/QwQ-32B",
            "internlm/internlm2_5-7b-chat",
            "Qwen/Qwen2-7B-Instruct",
            "THUDM/glm-4-9b-chat",
            "THUDM/chatglm3-6b"
        ]
        self.model_combobox['values'] = models
        self.model_combobox.current(0)  # 默认选择第一个模型

    def start_crawler(self):
        """
        启动爬虫任务。
        """
        url = self.url_entry.get()
        start_page = self.start_page_entry.get()
        end_page = self.end_page_entry.get()
        fields = self.fields_entry.get()
        model = self.model_combobox.get()
        filename = self.filename_entry.get() or "file"  # 如果用户未输入文件名，则默认为 "file"

        if not url:
            messagebox.showwarning("输入错误", "请填写目标网站")
            return

        try:
            start_page = int(start_page)  # 将起始页转换为整数
            end_page = int(end_page)      # 将结束页转换为整数
            if start_page < 1 or end_page < start_page:
                raise ValueError("页数范围无效")
        except ValueError:
            messagebox.showwarning("输入错误", "起始页和结束页必须为整数，且结束页 >= 起始页")
            return

        if not fields:
            messagebox.showwarning("输入错误", "请填写自定义字段")
            return

        if not model:
            messagebox.showwarning("输入错误", "请选择模型")
            return


        asyncio.run(self._run_crawler(url, start_page, end_page, fields, model, filename))

    async def _run_crawler(self, url: str, start_page: int, end_page: int, fields: str, model: str, filename: str):
        """
        异步运行爬虫任务。
        :param url: 目标网站
        :param start_page: 起始页
        :param end_page: 结束页
        :param fields: 自定义字段
        :param model: 选择的模型
        :param filename: 用户输入的文件名
        """
        crawler = WebCrawler(headless=False)

        # 获取当前运行目录的父目录
        parent_dir = os.path.dirname(os.getcwd())
        # 定义输出目录为父目录下的 ai_web_crawler-File
        output_dir = os.path.join(parent_dir, "File")

        # 确保输出目录存在
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)

        try:
            html_contents = await crawler.run_playwright(url, start_page, end_page)
            print(f"共爬取 {start_page} 到 {end_page} 页的 HTML 内容。")

            all_processed_data = []
            for i, html_content in enumerate(html_contents):
                print(f"\n正在处理第 {start_page + i} 页内容...")
                processed_data = process_with_ai(html_content, fields, model)
                if processed_data:
                    print(f"第 {start_page + i} 页的爬取结果:", processed_data)
                    all_processed_data.extend(processed_data)
                else:
                    print(f"第 {start_page + i} 页未解析出有效数据")

            # 将所有页面的数据保存为一个 txt 文件
            if all_processed_data:
                output_file = os.path.join(output_dir, f"{filename}.txt")
                with open(output_file, "w", encoding="utf-8") as f:
                    for index, data in enumerate(all_processed_data):
                        # 如果data是字符串，直接写入
                        if isinstance(data, str):
                            f.write(data)
                        else:
                            f.write(str(data))

                print(f"所有页面的爬取结果已保存到 {output_file}")

            else:
                print("未解析出有效数据")
        except Exception as e:
            print(f"处理 {url} 时出错:", e)
