# BSD3.0
# cili@163.com


import re
import urllib.request
import urllib.parse
import csv
import indexer

#
def ver():
    #return 0
    return 1


def write_csv(title,url):
    with open('web/pages.csv', 'a', newline='', encoding='utf-8') as csvfile:
        link_writer = csv.writer(csvfile)
        link_writer.writerow([title,url])

# 获取网页标题
def get_title(html_content):
    title_pattern = re.compile(r'<title>(.*?)</title>')
    title_match = title_pattern.search(html_content)
    try:
        return title_match.group(1)
    except:
        return None

#获取超链接
def get_links(url, html_content):
    link_pattern = re.compile(r'<a[^>]*?href=[\'"]([^>\'"\s]*?)[\'"][^>]*?>')
    links = link_pattern.findall(html_content)
    absolute_links = set()
    for link in links:
        absolute_link = urllib.parse.urljoin(url, link)
        if not absolute_link.startswith("javascript:") and not absolute_link.endswith(".css") and '?' not in absolute_link and '#' not in absolute_link:
            absolute_links.add(absolute_link)
    sorted_links = sorted(absolute_links, key=lambda x: x.endswith(('.html', '.htm', '.shtml')),reverse=True)
    return sorted_links if len(sorted_links)<9 else sorted_links[:9]

def save_page(url,content):
    try:
        title = get_title(content)
        if not title:return False
        if title in indexer.pages_name:return False
        with open(f'web/{title}.html','w',encoding='utf-8') as f:
            f.write(content)
        write_csv(title,url)
        print(title,url)
        indexer.pages_name.append(title)
        indexer.pages_url.append(url)
        return True
    except:
        return False

def crawl(url):
    try:
        content = get_page(url)
        title = get_title(content)
        save_page(title,url)

        links = get_links(url,content)
        for link in links:
            content = get_page(link)
            save_page(link, content)

    except Exception as e:
        print(f"Error crawling {url}: {e}")

headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.190 Safari/537.36'
    }
def get_page(url):
    req = urllib.request.Request(url, headers=headers)
    with urllib.request.urlopen(req) as response:
        if response.status == 200:
            for encoding in ['utf-8','gbk']:
                try:
                    html_content = response.read().decode(encoding)
                    return html_content
                except :
                    continue
    return None

if __name__ == "__main__":
    import tkinter as tk
    from tkinter import ttk

    root = tk.Tk()
    root.title("网络爬虫测试")

    def click_button():
        url = spider_text.get().strip()
        if not url.startswith('http') : url = f'https://{url}'
        content = get_page(url)
        content_text.delete(1.0, "end")  # 清空多行文本框
        content_text.insert("end", content)  # 在多行文本框中显示获取的content

    spider_label = ttk.Label(root, text="需要自动获取的网址：")
    spider_label.grid(row=0, column=0, sticky="w", padx=5, pady=5)
    spider_text = ttk.Entry(root, width=50)
    spider_text.insert(0, 'https://www.moe.gov.cn')
    spider_text.grid(row=1, column=0, sticky="w", padx=5, pady=5)
    spider_button = ttk.Button(root, text="启动网络爬虫", command=click_button)
    spider_button.grid(row=1, column=2, padx=10, sticky="w", pady=5)
    content_label = ttk.Label(root, text="获取的内容：")
    content_label.grid(row=2, column=0, sticky="w", padx=5, pady=5)
    content_text = tk.Text(root, wrap="word", height=10)
    content_text.grid(row=3, column=0, padx=5, pady=5, columnspan=3)

    root.update_idletasks()
    window_width = root.winfo_width()
    window_height = root.winfo_height()
    screen_width = root.winfo_screenwidth()
    screen_height = root.winfo_screenheight()
    x = int((screen_width - window_width) / 2)
    y = int((screen_height - window_height) / 2)
    root.geometry(f"+{x}+{y}")

    root.mainloop()
