import requests
from bs4 import BeautifulSoup
import tkinter as tk
from tkinter import messagebox, ttk
import json
from typing import List, Dict
import time
from datetime import datetime
import schedule
import threading
from tkinter import StringVar


class NewsScraper:
    def __init__(self, root):
        self.root = root
        self.root.title("新闻爬取工具")
        self.root.geometry('600x500')
        self.scheduler_running = False
        self.schedule_thread = None
        self.is_scheduled_run = False
        self.setup_ui()

    def setup_ui(self):
        # 主框架
        main_frame = ttk.Frame(self.root)
        main_frame.pack(fill=tk.BOTH, expand=True, padx=20, pady=10)

        # 输入区域
        input_frame = ttk.LabelFrame(main_frame, text="配置")
        input_frame.pack(fill=tk.X, padx=5, pady=5)

        ttk.Label(input_frame, text="请输入要爬取的文章数量（留空爬取所有）：").pack(pady=5)
        self.entry = ttk.Entry(input_frame)
        self.entry.pack(pady=5)

        # 定时任务配置
        schedule_frame = ttk.LabelFrame(main_frame, text="定时任务配置")
        schedule_frame.pack(fill=tk.X, padx=5, pady=5)

        # 时间选择
        time_frame = ttk.Frame(schedule_frame)
        time_frame.pack(fill=tk.X, padx=5, pady=5)

        self.hour_var = StringVar(value="00")
        self.minute_var = StringVar(value="00")

        ttk.Label(time_frame, text="执行时间：").pack(side=tk.LEFT, padx=5)

        # 小时选择
        hour_spinbox = ttk.Spinbox(
            time_frame,
            from_=0,
            to=23,
            width=5,
            format="%02.0f",
            textvariable=self.hour_var
        )
        hour_spinbox.pack(side=tk.LEFT, padx=2)

        ttk.Label(time_frame, text=":").pack(side=tk.LEFT)

        # 分钟选择
        minute_spinbox = ttk.Spinbox(
            time_frame,
            from_=0,
            to=59,
            width=5,
            format="%02.0f",
            textvariable=self.minute_var
        )
        minute_spinbox.pack(side=tk.LEFT, padx=2)

        # 定时控制按钮
        self.schedule_button = ttk.Button(
            schedule_frame,
            text="启动定时任务",
            command=self.toggle_scheduler
        )
        self.schedule_button.pack(pady=5)

        # 定时状态显示
        self.schedule_status = ttk.Label(
            schedule_frame,
            text="定时任务未启动"
        )
        self.schedule_status.pack(pady=5)

        # 进度条
        self.progress_frame = ttk.LabelFrame(main_frame, text="进度")
        self.progress_frame.pack(fill=tk.X, padx=5, pady=5)

        self.progress_var = tk.DoubleVar()
        self.progress_bar = ttk.Progressbar(
            self.progress_frame,
            variable=self.progress_var,
            maximum=100
        )
        self.progress_bar.pack(fill=tk.X, padx=5, pady=5)

        self.status_label = ttk.Label(self.progress_frame, text="就绪")
        self.status_label.pack(pady=5)

        # 选项卡
        self.notebook = ttk.Notebook(main_frame)
        self.notebook.pack(fill=tk.BOTH, expand=True, pady=10)

        # SoccerOdd标签页
        self.setup_tab("SoccerOdd News", self.scrape_soccerodd)

        # FreeSuperTips标签页
        self.setup_tab("FreeSuperTips News", self.scrape_freesupertips)

    def toggle_scheduler(self):
        if not self.scheduler_running:
            self.start_scheduler()
        else:
            self.stop_scheduler()

    def start_scheduler(self):
        if self.scheduler_running:
            return

        # 获取设定的时间
        hour = self.hour_var.get()
        minute = self.minute_var.get()
        schedule_time = f"{hour}:{minute}"

        # 清除现有的任务
        schedule.clear()

        # 设置每日定时任务
        schedule.every().day.at(schedule_time).do(self.run_scheduled_tasks)

        # 启动调度线程
        self.scheduler_running = True
        self.schedule_thread = threading.Thread(target=self.run_scheduler, daemon=True)
        self.schedule_thread.start()

        # 更新UI
        self.schedule_button.config(text="停止定时任务")
        self.schedule_status.config(
            text=f"定时任务已启动，将在每天 {schedule_time} 执行"
        )

        messagebox.showinfo(
            "定时任务",
            f"定时任务已启动，将在每天 {schedule_time} 执行爬取任务"
        )

    def stop_scheduler(self):
        if not self.scheduler_running:
            return

        # 停止调度
        self.scheduler_running = False
        schedule.clear()

        # 更新UI
        self.schedule_button.config(text="启动定时任务")
        self.schedule_status.config(text="定时任务未启动")

        messagebox.showinfo("定时任务", "定时任务已停止")

    def run_scheduler(self):
        while self.scheduler_running:
            schedule.run_pending()
            time.sleep(1)

    def run_scheduled_tasks(self):
        """执行定时任务"""
        try:
            self.is_scheduled_run = True  # 设置为定时任务执行模式

            # 执行两个网站的爬取任务
            self.scrape_soccerodd()
            self.scrape_freesupertips()

            # 记录日志
            self.log_schedule_execution()

        except Exception as e:
            error_msg = f"定时任务执行出错: {str(e)}"
            print(error_msg)
            self.log_schedule_execution(error=error_msg)
        finally:
            self.is_scheduled_run = False  # 恢复为手动执行模式

    def log_schedule_execution(self, error=None):
        """记录定时任务执行日志"""
        log_entry = {
            "timestamp": datetime.now().isoformat(),
            "status": "error" if error else "success",
            "error": error
        }

        try:
            # 读取现有日志
            try:
                with open('schedule_log.json', 'r', encoding='utf-8') as f:
                    logs = json.load(f)
            except FileNotFoundError:
                logs = []

            # 添加新日志
            logs.append(log_entry)

            # 保存日志
            with open('schedule_log.json', 'w', encoding='utf-8') as f:
                json.dump(logs, f, ensure_ascii=False, indent=2)

        except Exception as e:
            print(f"写入日志出错: {str(e)}")

    def setup_tab(self, name: str, command):
        tab = ttk.Frame(self.notebook)
        self.notebook.add(tab, text=name)
        ttk.Label(tab, text=f"{name}爬取").pack(pady=10)
        ttk.Button(tab, text="开始爬取", command=command).pack(pady=20)

    def update_progress(self, current: int, total: int, status: str):
        progress = (current / total) * 100
        self.progress_var.set(progress)
        self.status_label.config(text=status)
        self.root.update()

    def get_article_count(self) -> int:
        num_articles_input = self.entry.get()
        if not num_articles_input:
            return -1  # 表示爬取所有文章

        try:
            num_articles = int(num_articles_input)
            if num_articles < 1:
                messagebox.showerror("错误", "请输入一个大于0的数字！")
                return 0
            return num_articles
        except ValueError:
            messagebox.showerror("错误", "请输入一个有效的数字！")
            return 0

    def save_articles(self, articles: List[Dict], filename: str):
        data = {
            "metadata": {
                "timestamp": datetime.now().isoformat(),
                "total_articles": len(articles)
            },
            "articles": articles
        }

        with open(filename, 'w', encoding='utf-8') as f:
            json.dump(data, f, ensure_ascii=False, indent=2)

    def scrape_soccerodd(self):
        url = 'https://www.soccerodd.com/news'
        try:
            response = requests.get(url, timeout=10)
            soup = BeautifulSoup(response.text, 'html.parser')
            post_links = [a['href'] for a in soup.select('.postList a')]

            num_articles = self.get_article_count()
            if num_articles == 0:
                return

            if num_articles == -1:
                num_articles = len(post_links)
            else:
                num_articles = min(num_articles, len(post_links))

            articles = []
            for i in range(num_articles):
                link = post_links[i]
                post_url = link if link.startswith('http') else f'https://www.soccerodd.com{link}'

                self.update_progress(i, num_articles, f"正在爬取: {post_url}")

                try:
                    post_response = requests.get(post_url, timeout=10)
                    post_soup = BeautifulSoup(post_response.text, 'html.parser')

                    article = {
                        "title": post_soup.select_one('.page-title').text.strip(),
                        "description": post_soup.select_one('.post-description p').text.strip(),
                        "url": post_url,
                        "timestamp": datetime.now().isoformat()
                    }
                    articles.append(article)

                    time.sleep(0.5)  # 防止请求过快
                except Exception as e:
                    print(f"Error processing {post_url}: {str(e)}")
                    continue

            self.save_articles(articles, 'soccerodd_articles.json')
            self.update_progress(100, 100, "完成！")

            # 只在手动执行时显示消息框
            if not self.is_scheduled_run:
                messagebox.showinfo("完成", f'数据已保存到 soccerodd_articles.json，爬取了 {len(articles)} 篇文章。')

        except Exception as e:
            error_msg = f"爬取过程中发生错误: {str(e)}"
            print(error_msg)
            if not self.is_scheduled_run:
                messagebox.showerror("错误", error_msg)
            self.status_label.config(text="发生错误")

    def scrape_freesupertips(self):
        url = 'https://www.freesupertips.com/news/'
        try:
            response = requests.get(url, timeout=10)
            soup = BeautifulSoup(response.text, 'html.parser')
            post_links = [a['href'] for a in soup.select('.Grid a')]

            num_articles = self.get_article_count()
            if num_articles == 0:
                return

            if num_articles == -1:
                num_articles = len(post_links)
            else:
                num_articles = min(num_articles, len(post_links))

            articles = []
            for i in range(num_articles):
                link = post_links[i]
                post_url = link if link.startswith('http') else f'https://www.freesupertips.com{link}'

                self.update_progress(i, num_articles, f"正在爬取: {post_url}")

                try:
                    post_response = requests.get(post_url, timeout=10)
                    post_soup = BeautifulSoup(post_response.text, 'html.parser')

                    paragraphs = post_soup.select('.Wysiwyg p')
                    content = '\n'.join([p.text.strip() for p in paragraphs])

                    article = {
                        "title": post_soup.select_one('h1').text.strip(),
                        "content": content,
                        "url": post_url,
                        "timestamp": datetime.now().isoformat()
                    }
                    articles.append(article)

                    time.sleep(0.5)  # 防止请求过快
                except Exception as e:
                    print(f"Error processing {post_url}: {str(e)}")
                    continue

            self.save_articles(articles, 'freesupertips_articles.json')
            self.update_progress(100, 100, "完成！")

            # 只在手动执行时显示消息框
            if not self.is_scheduled_run:
                messagebox.showinfo("完成", f'数据已保存到 freesupertips_articles.json，爬取了 {len(articles)} 篇文章。')

        except Exception as e:
            error_msg = f"爬取过程中发生错误: {str(e)}"
            print(error_msg)
            if not self.is_scheduled_run:
                messagebox.showerror("错误", error_msg)
            self.status_label.config(text="发生错误")


def main():
    root = tk.Tk()
    app = NewsScraper(root)
    root.mainloop()


if __name__ == "__main__":
    main()
