import requests #爬虫
from bs4 import BeautifulSoup
import pandas as pd
#定义get_data函数，爬取数据
def get_data():
    #1. 使用requests 伪装浏览器，发出请求
    #2. BeautifulSoup解析网页
    #

    # 伪请求头，伪装成浏览器
    cookie=''
    header={
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36 Edg/134.0.0.0',
        # 浏览器名称
        'Referer': 'https://so.gushiwen.cn/',  # 来源页面的地址
        'Cookie': cookie  # cookie
    }
    # 目标网址
    url = "https://so.gushiwen.cn/gushi/tangshi.aspx"
    # 各首唐诗的链接列表
    url_list = []
    # 请求数据：发送请求获取目标网页内容
    response=requests.get(url,headers=header)
    html=response.content.decode('utf-8')
    # print(html.txt)
    #   解析HTML
    soup = BeautifulSoup(html, 'html.parser')
    # 解析HTML获取各个类型的诗歌，并将链接和类型分别存储在两个列表中
    div_ = soup.find_all('div', class_='typecont')
    type_count = []
    for i in range(0, len(div_)):
        bookMl_div = div_[i].find('div', class_='bookMl')
        strong_elm = bookMl_div.find('strong')
        type = strong_elm.contents[0]  # 类型
        span_ = div_[i].find_all('span')
        for j in range(0, len(span_)):
            link = span_[j].find('a')['href']
            url_list.append(link)
            type_count.append(type)

    # 创建一个空的DataFrame用于存储古诗数据
    son_data = pd.DataFrame(columns=['类型', '标题', '作者', '朝代', '正文'])
    # 爬取完每一首古诗的链接后，循环每一个链接爬取每一首古诗
    x = 0
    # 循环遍历诗歌链接列表，依次爬取每一首古诗的数据，包括标题、作者、朝代和正文，并将其存储到DataFrame中
    for link in url_list:
        # 请求数据
        link = 'https://so.gushiwen.cn' + link
        response = requests.get(link, headers=header)  # 添加headers参数
        html = response.content.decode("utf-8")
        type = type_count[x]
        soup = BeautifulSoup(html, 'html.parser')
        div_ = soup.find('div',class_='sons')
        if div_:
            title = div_.find('h1').contents[0]
        else:
            print("找不到这个标签")
        try:
            # name = div_.find('p',class_='source').find_all('a')[0].find('img')['alt']
            a_name = div_.find('p', class_='source').find_all('a')[0]
            name = a_name.find('img')['alt']
        except:
            x += 1
            continue
        p_source = div_.find('p', class_='source')
        a_time = p_source.find_all('a')[1]
        time = a_time.contents[0]
        # time = div_.find('p', class_='source').find_all('a')[1].contents[0]
        content = div_.find('div', class_='contson').contents
        text = ''
        for con in content:
            if (str(con) != '<br/>'):
                text += str(con).replace('\n', '')
        print('已经爬取：' + title)  # + "，作者：" + name + "，朝代："+time+",正文：" +text
        son_data.loc[len(son_data.index)] = [type, title, name, time, text]
        x += 1
    son_data.to_csv('./data/data.csv')  # 将爬取的古诗数据保存为CSV文件

get_data()