from urllib.request import urlopen, Request
from bs4 import BeautifulSoup
import re

def getText(url):
    try:
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36 Edg/139.0.0.0'
        }
        # 打开 URL 并获取 HTML 内容
        req = Request(url, headers=headers)
        html = urlopen(req)

        if html.getcode() != 200:
            return None

        # 使用 BeautifulSoup 解析 HTML
        bs = BeautifulSoup(html, 'html.parser')
        # 查找所有 class 为 'cont' 的 div 标签
        content_divs = bs.find_all('div', {'class': 'cont'})
        results = []

        for cont_div in content_divs:
            if cont_div.find('b') and cont_div.find('p'):
                # 提取标题、作者和朝代
                title_tag = cont_div.find('a', {'style': 'font-size:20px;line-height:24px; height:24px;'})
                source_tag = cont_div.find('p',{'class':'source'})
                author_tag = source_tag.find('a', {'href': re.compile('^(https://m.gsw6.com/shiren/).')})
                dynasty_tag = source_tag.find('a', {'href': re.compile('^(https://m.gsw6.com/shici/).')})

                if title_tag and author_tag and dynasty_tag:
                    poem_title = title_tag.get_text(strip=True)
                    author = author_tag.get_text(strip=True)
                    dynasty = dynasty_tag.get_text(strip=True)

                    # 提取诗句
                    verses = []
                    contson_div = cont_div.find('div', {'style': 'display:none;'})
                    if contson_div:
                        for p_tag in contson_div.find_all('p'):
                            text = p_tag.get_text(strip=True)
                            if text:
                                verses.append(text)

                    # 将提取的信息组合成字典
                    result = {
                        '标题': poem_title,
                        '作者': author,
                        '朝代': dynasty,
                        '文章': verses
                    }
                    results.append(result)
                    # print(result)

        return results

    except Exception as e:
        print(f"获取或解析网页时出错: {e}")
        return None


def start():
    with open('a.txt','w',encoding='UTF-8') as file:
        i = 1
        times = 1
        file.write("=====================================" + "\n")
        while True:
            url = f"https://m.gsw6.com/shici/list_{i}.html"
            text = getText(url)
            for poem in text:
                file.write(poem['标题'] + '\n')
                file.write(poem['作者'] + poem['朝代'] + '\n')
                for line in poem['文章']:
                    file.write(line + '\n')
                file.write("=====================================" + "\n")
                print(f"成功爬取第{times}首")
                times += 1
            file.flush()
            i += 1

start()

