'''
Author: xubing
Date: 2024-01-19 21:54:43
LastEditors: xubing
LastEditTime: 2024-01-23 15:11:34
Description: file content
'''
import json
import random
import sys
import time

import pandas as pd
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm


# from utils import current_date, current_time, logger
def get_next_suffix(soup):
    next_button = soup.find('a', string='下一章')
    # 如果找到了按钮，并且它有一个href属性
    if next_button and 'href' in next_button.attrs:
        next_href = next_button['href']
        # print(f'The href of the "Next" button is: {next_href}')
        return next_href
    else:
        print('Could not find the "Next" button or it does not have an href attribute.')
        return None
def write_to_file(title,content,suffix):
    print(title)
    with open('fiction/%s-%s.txt' % (suffix, title), 'w') as f:
        f.write(title + '\n\n')
        f.write(content)
def crawler(start_suffix):
    # 爬虫主程序
    # 定义请求头，模拟浏览器访问
    headers = {
        "User-Agent":
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36"
    }
    
    # 打印当前页码
    # logger.info(f"正在爬取{url}上内容...")
    prefix_url = 'https://www.doupocangqiong.org/shuku/81807/'
    
    suffix = start_suffix
    while suffix:
        # 随机休眠1-5秒, 防止检测为机器人无法继续爬取
        time.sleep(random.randint(1, 5))
        # 拼接完整的网址
        full_url = prefix_url+ suffix
        # 发送请求，获取响应
        response = requests.get(full_url, headers=headers)
        response.encoding = 'gbk'
        # 判断响应状态码是否为200，即请求成功
        if response.status_code == 200:
            # 解析响应内容，得到一个BeautifulSoup对象
            soup = BeautifulSoup(response.text, "html.parser")
            # # 找到所有的楼盘信息的div标签，返回一个列表
            # divs = soup.find_all("div", class_="info clear")
            # 提取标题，并去除空白字符
            title = soup.find("h1", class_="title").text.strip()
            # 提取正文，并去除空白字符和空行
            content = soup.find("div", class_="content").text.strip()
            content = content.replace("\n\n", "\n")
            content = content.replace("\n\r\n\xa0\xa0\xa0\xa0", "\n\n")
            write_to_file(title,content,str(suffix.split('.')[0]))
            suffix = get_next_suffix(soup)
    print("爬取完毕!")

if __name__=="__main__":
    start_suffix = '44449043.html'
    crawler(start_suffix)

