import tkinter as tk
import requests
# 使用 BeautifulSoup 第四版，用于解析html文档
from bs4 import BeautifulSoup

# 根目录主域名，如果是html文件在本地服务器也可以，比如LIVE或者Apache。
domain = "http://127.0.0.1:5500/1/4/"
# domain = 'https://www.sina.com'
# 要爬取的HTML页面
urls = ['data/test.html', 'data/test1.html', 'data/test2.html', 'data/test3.html', 'data/test4.html', 'data/index.html']
def send():
    try:
        key_words = words.get()
        print(key_words)
        # 发送 HTTP 请求
        for url in urls:
            print(url)
            response = requests.get(domain + url)
            # 检查请求是否成功
            if response.status_code == 200:
                # 解析 HTML 内容
                soup = BeautifulSoup(response.text, 'html.parser')
                # 标题
                title = soup.title.string
                ok(title,key_words,url)
                # 关键词
                keywords = soup.find('meta',attrs={"name":"keywords"})['content']
                ok(keywords,key_words,url)
                # 描述
                description = soup.find('meta',attrs={"name":"description"})['content']
                ok(description,key_words,url)
                # 查找所有的 <a> 标签，方便爬取更多HTML页面
                links = soup.find_all('a')
                for link in links:
                    # 打印链接的 href 属性和文本内容
                    print(f"超链接: {link.get('href')}, 文本: {link.text}")
            else:
                print(f"状态码: {response.status_code}")
    except requests.RequestException as e:
        print(f"错误: {e}")

# 是否包含关键词
def ok(s,k,url):
    if k in s:
        show.insert(tk.END,s +"("+url+")\n")

# 创建主窗口
root = tk.Tk()
root.title("搜索引擎")

# ID选择区域
frame1 = tk.Frame(root)
frame1.pack()
words = tk.Entry(frame1)
words.pack(side="left")
words.insert(0,"人工智能")
generate_button = tk.Button(frame1, text="搜索", command=send)
generate_button.pack(side="left")
# 结果县市区
show = tk.Text(root, width=50, height=14)
show.pack(fill=tk.BOTH, expand=True)

root.mainloop()