import tkinter as tk
from tkinter import scrolledtext, messagebox
import requests
from bs4 import BeautifulSoup
import re


def scrape_single_news():
    output_text.delete(1.0, tk.END)
    url = url_entry.get().strip()
    if not url:
        messagebox.showwarning("输入错误", "请输入网址")
        return

    scrape_news_from_url(url)


def scrape_all_news():
    output_text.delete(1.0, tk.END)
    urls = [
        "https://www.3618med.com/info/ccompanynews.html",
        "https://www.3618med.com/info/c6093.html",
        "https://www.3618med.com/info/c6095.html",
        "https://www.3618med.com/info/c6096.html",
        "https://www.3618med.com/info/c6097.html",
        "https://www.3618med.com/info/c6098.html",
        "https://www.3618med.com/info/c6099.html",
        "https://www.3618med.com/info/c6101.html",
        "https://www.3618med.com/info/c7938.html",
        "https://www.3618med.com/info/c9864.html",
        # 这里可以添加更多网址
    ]
    output_text.delete(1.0, tk.END)
    for url in urls:
        scrape_news_from_url(url)
        root.update()


def scrape_news_from_url(url):
    try:
        response = requests.get(url)
        response.raise_for_status()
    except requests.exceptions.RequestException as e:
        output_text.insert(tk.END, f"请求失败: {url},该网址尚未爬取！！！\n")
        return

    response.encoding = 'utf-8'
    soup = BeautifulSoup(response.text, 'html.parser')

    news_items = soup.find_all('dd')

    if not news_items:
        output_text.insert(tk.END, f"没有找到任何新闻项目，可能需要检查选择器或页面结构。\nURL: {url}\n\n")
    else:
        for index, news in enumerate(news_items, start=1):
            # 首先尝试找到 h2 标签，如果没有则直接查找 a 标签
            title_tag = news.find('h2')
            if title_tag:
                title_tag = title_tag.find('a')
            else:
                title_tag = news.find('a')

            index_str = f"{index}."

            description_tag = news.find('p')
            if title_tag:
                title = title_tag.get_text().strip()
                description = re.sub(r'[^\w\s]', '',
                                     description_tag.get_text().strip()) if description_tag else "无描述信息"
                output_text.insert(tk.END, f"{index_str} {title}\n")
                output_text.insert(tk.END, f"{description}\n\n")
            else:
                output_text.insert(tk.END, f"新闻项目格式异常，无法提取标题。\nURL: {url}\n\n")


# 创建主窗口
root = tk.Tk()
root.title("医疗新闻抓取工具")

url_label = tk.Label(root, text="输入网址:")
url_label.pack(pady=5)

url_entry = tk.Entry(root, width=50)
url_entry.pack(pady=5)

# 创建并放置抓取单个网址新闻按钮
scrape_single_button = tk.Button(root, text="抓取单个网址新闻", command=scrape_single_news)
scrape_single_button.pack(pady=10)

# 创建并放置抓取全部网址新闻按钮
scrape_all_button = tk.Button(root, text="抓取全部网址新闻", command=scrape_all_news)
scrape_all_button.pack(pady=10)

# 创建并放置输出框（可滚动的文本框）
output_text = scrolledtext.ScrolledText(root, width=80, height=20, wrap=tk.WORD)
output_text.pack(pady=5)

# 运行主循环
root.mainloop()
