import requests
from bs4 import BeautifulSoup
import pandas as pd

'''https://sewen.cc/ 爬去小说网'''


def get_sewen_data(url):
    headers = {
        'User-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.82'
    }
    response = requests.get(url, headers=headers)
    response.encoding = 'gbk'
    return response.text


def parse_sewen_data(html_text):
    soup = BeautifulSoup(html_text, 'html.parser')
    items = soup.find_all('div', class_='item')
    data = []
    for item in items:
        title = item.find('dt').text.strip()
        url = item.find('a')['href']
        intro = item.find('dd').text.strip()
        data.append({
            '标题': title,
            'URL 地址': url,
            '简介': intro
        })
    return data


def scrape_sewen_data():
    base_url = 'https://sewen.cc/shuku/{}'
    current_page = 1
    all_data = []

    while True:
        url = base_url.format(current_page)
        html_text = get_sewen_data(url)  # 自定义函数，用于获取网页内容
        data = parse_sewen_data(html_text)  # 自定义函数，用于解析网页数据
        all_data.extend(data)

        # 判断是否存在下一页的元素
        soup = BeautifulSoup(html_text, 'html.parser')
        next_page_link = soup.find('a', string='下一页')
        if not next_page_link:
            break

        current_page += 1

    df = pd.DataFrame(all_data, columns=['标题', 'URL 地址', '简介'])
    df.to_excel('output.xlsx', index=False)


if __name__ == '__main__':
    scrape_sewen_data()
