import tkinter as tk
from tkinter import messagebox
from tkinter import filedialog
import pandas as pd
import requests
from fake_useragent import UserAgent
from bs4 import BeautifulSoup


def scrape_apple_community():
    url = "https://discussionschinese.apple.com/browse"
    headers = {"User-Agent": UserAgent().random}
    res = requests.get(url=url, headers=headers).content

    with open('apple_community.html', 'wb') as f:
        f.write(res)

    with open('apple_community.html', 'r', encoding='utf-8') as f:
        html = f.read()
    soup = BeautifulSoup(html, 'html.parser')

    data_rows = soup.find_all('tr', class_='topics-table-row')
    df = pd.DataFrame(columns=['URL路径', '帖子标题', '作者名字'])
    for row in data_rows:
        url_path = "https://discussionschinese.apple.com" + row.find('a', class_='topic-title-link')['href']
        title = row.find('a', class_='topic-title-link').text
        author = row.find('a', class_='post-author-profile').text.strip()
        df = pd.concat([df, pd.DataFrame({'URL路径': [url_path], '帖子标题': [title], '作者名字': [author]})],
                       ignore_index=True)
    df.to_excel('data.xlsx', index=False)
    messagebox.showinfo("完成", "数据已保存到data.xlsx文件！")


def scrape_and_save():
    try:
        scrape_apple_community()
    except Exception as e:
        messagebox.showerror("错误", str(e))


def browse_directory():
    directory = filedialog.askdirectory()
    messagebox.showinfo("选择目录", "选择的目录是：" + directory)


# 创建主窗口
window = tk.Tk()
window.title("数据抓取面板")

# 创建按钮和标签
scrape_button = tk.Button(window, text="抓取并保存数据", command=scrape_and_save)
scrape_button.pack(pady=10)

browse_button = tk.Button(window, text="选择保存目录", command=browse_directory)
browse_button.pack(pady=10)

# 运行主循环
window.mainloop()
