import requests
from bs4 import BeautifulSoup
import sqlite3
import csv
import tkinter as tk
from tkinter import messagebox
from tkinter import ttk
from tkinter import scrolledtext
import threading
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.font_manager import FontProperties


# 设置 matplotlib 全局字体
plt.rcParams['font.sans-serif'] = ['SimHei']  # 设置字体为 SimHei
plt.rcParams['axes.unicode_minus'] = False  # 解决坐标轴负号显示问题

# 网站配置，定义不同网站的结构
SITES_CONFIG = {
    "人民网": {
        "url": "http://www.people.com.cn/GB/59476/index.html",
        "news_container": {"tag": "td", "class": "indexfont13"},
        "news_item": {"tag": "div"},
        "title": {"tag": "a", "attr": "get_text"},
        "link": {"tag": "a", "attr": "href"},
        "date": {"method": "custom", "split_char": "[", "replace_char": "]"},
        "today_news_container": {"tag": "td", "class": "p6"},
        "today_news_item": {"tag": "li"}
    },
    # 可以在这里添加更多网站的配置
}

DEFAULT_SITE_KEY = "人民网"

# Step 1: Web Request with Proxy Support
def fetch_page(url, proxies=None):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}
    response = requests.get(url, headers=headers, proxies=proxies, timeout=10)
    response.raise_for_status()  # Ensure we notice bad responses
    response.encoding = 'GB2312'  # 指定页面编码
    return response.text

# Step 2: Parse HTML based on site configuration
def parse_html(html, config):
    soup = BeautifulSoup(html, 'html.parser')
    data_list = []

    news_containers = soup.find_all(config["news_container"]["tag"], class_=config["news_container"]["class"])
    for news_container in news_containers:
        for item in news_container.find_all(config["news_item"]["tag"]):
            title = item.find(config["title"]["tag"]).get_text(strip=True)
            link = item.find(config["link"]["tag"])[config["link"]["attr"]]
            date_text = item.get_text(strip=True)
            date = date_text.split(config["date"]["split_char"])[-1].replace(config["date"]["replace_char"], "")
            data_list.append({'title': title, 'link': link, 'date': date})

    today_news_container = soup.find(config["today_news_container"]["tag"], class_=config["today_news_container"]["class"])
    if today_news_container:
        for item in today_news_container.find_all(config["today_news_item"]["tag"]):
            title = item.find(config["title"]["tag"]).get_text(strip=True)
            link = item.find(config["link"]["tag"])[config["link"]["attr"]]
            sibling_text = item.next_sibling.strip()
            date_text = sibling_text.split(config["date"]["split_char"])[-1].replace(config["date"]["replace_char"], "")
            data_list.append({'title': title, 'link': link, 'date': date_text})

    return data_list

# Step 3: Save to SQLite
def save_to_db(data_list, db_name='data.db'):
    conn = sqlite3.connect(db_name)
    cursor = conn.cursor()
    cursor.execute('''
        CREATE TABLE IF NOT EXISTS Data (
            id INTEGER PRIMARY KEY AUTOINCREMENT,
            title TEXT,
            link TEXT,
            date TEXT
        )
    ''')
    for data in data_list:
        cursor.execute('''
            INSERT INTO Data (title, link, date) VALUES (?, ?, ?)
        ''', (data['title'], data['link'], data['date']))
    conn.commit()
    conn.close()

# Step 4: Save to CSV
def save_to_csv(data_list, filename='data.csv'):
    with open(filename, mode='w', newline='', encoding='utf-8') as file:
        writer = csv.writer(file)
        writer.writerow(['标题', '链接', '日期'])
        for data in data_list:
            writer.writerow([data['title'], data['link'], data['date']])

# Step 5: Data Analysis (Simple Example)
def analyze_data(data_list):
    date_count = {}
    for data in data_list:
        date = data['date']
        if date in date_count:
            date_count[date] += 1
        else:
            date_count[date] = 1
    return date_count

# Step 6: Main function to execute the steps with multithreading
def main(site_key, url, proxies=None):
    try:
        html = fetch_page(url, proxies)
        config = SITES_CONFIG[site_key]
        data_list = parse_html(html, config)
        save_to_db(data_list)
        save_to_csv(data_list)
        analysis = analyze_data(data_list)
        return data_list, analysis
    except requests.RequestException as e:
        messagebox.showerror("错误", f"获取页面时发生错误：{e}")
    except Exception as e:
        messagebox.showerror("错误", f"发生意外错误：{e}")

# Tkinter GUI
class CrawlerApp:
    def __init__(self, root):
        self.root = root
        self.root.title("人民网爬虫")
        self.root.geometry("1600x700")

        self.main_frame = tk.Frame(root, padx=5, pady=5)
        self.main_frame.pack(fill=tk.BOTH, expand=True)

        self.input_frame = tk.Frame(self.main_frame, padx=5, pady=5)
        self.input_frame.pack(fill=tk.X)

        self.url_label = tk.Label(self.input_frame, text="请输入URL：", font=("Arial", 10))
        self.url_label.grid(row=0, column=0, sticky=tk.W, pady=2)

        self.url_entry = tk.Entry(self.input_frame, width=40, font=("Arial", 10), fg="grey")
        self.url_entry.grid(row=0, column=1, pady=2)
        self.url_entry.insert(0, SITES_CONFIG[DEFAULT_SITE_KEY]["url"])

        self.url_entry.bind("<FocusIn>", self.on_focus_in)
        self.url_entry.bind("<FocusOut>", self.on_focus_out)

        self.proxy_label = tk.Label(self.input_frame, text="代理服务器（可选）：", font=("Arial", 10))
        self.proxy_label.grid(row=1, column=0, sticky=tk.W, pady=2)

        self.proxy_entry = tk.Entry(self.input_frame, width=40, font=("Arial", 10))
        self.proxy_entry.grid(row=1, column=1, pady=2)

        self.site_label = tk.Label(self.input_frame, text="选择网站：", font=("Arial", 10))
        self.site_label.grid(row=2, column=0, sticky=tk.W, pady=2)

        self.site_var = tk.StringVar(value=DEFAULT_SITE_KEY)
        self.site_menu = ttk.Combobox(self.input_frame, textvariable=self.site_var, values=list(SITES_CONFIG.keys()), font=("Arial", 10))
        self.site_menu.grid(row=2, column=1, pady=2)

        self.crawl_button = tk.Button(self.input_frame, text="开始爬取", command=self.start_crawl, font=("Arial", 10))
        self.crawl_button.grid(row=3, column=1, pady=2)

        self.progress_label = tk.Label(self.input_frame, text="进度：", font=("Arial", 10))
        self.progress_label.grid(row=4, column=0, sticky=tk.W, pady=2)

        self.progress = ttk.Progressbar(self.input_frame, orient="horizontal", length=300, mode="determinate")
        self.progress.grid(row=4, column=1, pady=2)

        self.output_frame = tk.Frame(self.main_frame, padx=5, pady=5)
        self.output_frame.pack(fill=tk.BOTH, expand=True)

        self.tree = ttk.Treeview(self.output_frame, columns=("标题", "链接", "日期"), show='headings', selectmode='browse')
        self.tree.heading("标题", text="标题")
        self.tree.heading("链接", text="链接")
        self.tree.heading("日期", text="日期")
        self.tree.column("标题", stretch=tk.YES)
        self.tree.column("链接", stretch=tk.YES)
        self.tree.column("日期", stretch=tk.YES)
        self.tree.pack(fill=tk.BOTH, expand=True, side=tk.TOP)

        self.scrollbar = ttk.Scrollbar(self.output_frame, orient="vertical", command=self.tree.yview)
        self.tree.configure(yscroll=self.scrollbar.set)
        self.scrollbar.pack(side=tk.RIGHT, fill=tk.Y)

        self.analysis_frame = tk.Frame(self.output_frame, padx=5, pady=5)
        self.analysis_frame.pack(fill=tk.BOTH, expand=True)

        self.analysis_label = tk.Label(self.analysis_frame, text="数据分析结果：", font=("Arial", 10))
        self.analysis_label.pack(side=tk.TOP, anchor=tk.W, pady=2)

        self.analysis_text = scrolledtext.ScrolledText(self.analysis_frame, width=30, height=10, font=("Arial", 10))
        self.analysis_text.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)

        self.figure = plt.Figure(figsize=(8, 6), dpi=100)
        self.canvas = FigureCanvasTkAgg(self.figure, master=self.analysis_frame)
        self.canvas.get_tk_widget().pack(side=tk.RIGHT, fill=tk.BOTH, expand=True)

        # 设置中文字体
        self.font_properties = FontProperties(fname="C:/Windows/Fonts/simhei.ttf")  # 请确认该路径是否正确

    def on_focus_in(self, event):
        if self.url_entry.get() == SITES_CONFIG[DEFAULT_SITE_KEY]["url"]:
            self.url_entry.delete(0, tk.END)
            self.url_entry.config(fg="black")

    def on_focus_out(self, event):
        if self.url_entry.get() == "":
            self.url_entry.insert(0, SITES_CONFIG[DEFAULT_SITE_KEY]["url"])
            self.url_entry.config(fg="grey")

    def start_crawl(self):
        url = self.url_entry.get()
        site_key = self.site_var.get()
        if url == SITES_CONFIG[DEFAULT_SITE_KEY]["url"]:
            url = SITES_CONFIG[site_key]["url"]
        proxy = self.proxy_entry.get()
        proxies = None
        if proxy:
            proxies = {"http": proxy, "https": proxy}
        self.progress["value"] = 0
        self.progress["maximum"] = 100
        threading.Thread(target=self.crawl, args=(site_key, url, proxies)).start()

    def crawl(self, site_key, url, proxies):
        try:
            self.update_progress(20)
            data_list, analysis = main(site_key, url, proxies)
            self.update_progress(60)
            if data_list:
                for item in self.tree.get_children():
                    self.tree.delete(item)
                for data in data_list:
                    self.tree.insert("", tk.END, values=(data['title'], data['link'], data['date']))
                self.update_progress(90)
                self.analysis_text.delete('1.0', tk.END)
                for date, count in analysis.items():
                    self.analysis_text.insert(tk.END, f"{date}: {count} 条新闻\n")
                self.update_progress(100)
                self.plot_analysis(analysis)
                messagebox.showinfo("成功", "数据爬取并保存成功！")
        except Exception as e:
            messagebox.showerror("错误", f"发生意外错误：{e}")

    def update_progress(self, value):
        self.progress["value"] = value
        self.root.update_idletasks()

    def plot_analysis(self, analysis):
        dates = list(analysis.keys())
        counts = list(analysis.values())

        # 将日期转换为数字表示
        date_numbers = list(range(1, len(dates) + 1))

        self.figure.clear()
        ax = self.figure.add_subplot(111)
        ax.bar(date_numbers, counts, color='skyblue')
        ax.set_xlabel("日期", fontproperties=self.font_properties)
        ax.set_ylabel("新闻数量", fontproperties=self.font_properties)
        ax.set_title("每日新闻数量分析", fontproperties=self.font_properties)

        # 设置横轴标签
        ax.set_xticks(date_numbers)
        ax.set_xticklabels(date_numbers)

        self.canvas.draw()


if __name__ == "__main__":
    root = tk.Tk()
    app = CrawlerApp(root)
    root.mainloop()