import random
import re
import sys
import time

import requests
from bs4 import BeautifulSoup


def get_novel_chapters(novel_directory_url):
    data = []

    r = requests.get(novel_directory_url)
    r.encoding = 'utf-8'
    soup = BeautifulSoup(r.text, 'html.parser')

    novels_name = soup.find('h1').text  # 获取小说名字
    novels_author = re.findall(r'/author/(.*?)/', r.text)[0]  # 获取小说作者

    for link in soup.find('div', id='readerlist').find_all('li'):
        if not link.find('a')['href'].startswith('/so'):
            continue
        else:
            data.append(['http://www.liudatxt.org' + link.find('a')['href'], link.find('a')['title']])
    return novels_name, novels_author, data


def get_chapter_content(chapter_directory_url):
    r = requests.get(chapter_directory_url)
    r.encoding = 'utf-8'
    soup = BeautifulSoup(r.text, 'html.parser')
    i_tags = soup.find('div', id='content').find_all('i')
    if i_tags:
        for i_tag in i_tags:
            i_tag.decompose()
    novel_content = '    ' + soup.find('div', id='content').text.strip().replace('    ', '\n    ') + '\n\n'
    return novel_content


# 小说目录链接
directory_url = 'http://www.liudatxt.org/so/3014/'

# 获取小说的名字，目录
novel_name, novel_author, novel_directory = get_novel_chapters(directory_url)

print(f'即将开始爬取小说 《{novel_name}》 作者：{novel_author}')

confirm = input('是否开始爬取(y/Y):')
if not (confirm == 'y' or confirm == 'Y'):
    sys.exit()

f = open(f'novel/{novel_name} - {novel_author}.txt', 'a', encoding='utf-8')
for chapter in novel_directory:
    content = None
    time_factor = 20
    status = True

    while status:
        try:
            print(f'开始抓取 {chapter[1]}')
            content = get_chapter_content(chapter[0])
            status = False
        except:
            if time_factor >= 120:
                print('重试次数已达5次，程序即将退出')
                sys.exit()
            sleep_time = round(random.uniform(time_factor, time_factor + 10), 0)
            print(f'{chapter[1]} 爬取失败，将在{sleep_time}秒后重试')
            time.sleep(sleep_time)
            time_factor += 20

    f.write(chapter[1] + '\n\n')
    f.write(content)

f.close()

print('爬取完成')
