import requests
from bs4 import BeautifulSoup
import bs4
from tqdm import tqdm
import time

def get_one_page(url):
    try:
        response = requests.get(url,timeout=30)
        response.raise_for_status()
        response.encoding = response.apparent_encoding
        return response.text
    except:
        print("产生异常")
        return None

def parse_all_pages(html,urls,chapter_name):
    soup = BeautifulSoup(html,'html.parser')
    chapters = soup.find('div',id = 'list')
    for dd in chapters.find_all('a'):
        urls.append(dd.get('href'))
        chapter_name.append(dd.text)

def parse_one_chapter(content):
    soup = BeautifulSoup(content,'html.parser')
    texts = soup.find_all('div',id='content')[0].text.replace('\xa0'*4,'')
    return texts

def write_to_file(chapter_name,texts):
    with open('《寒门状元》.txt','a',encoding='utf-8') as f:
        f.write(chapter_name+'\n\n')
        f.write(texts)
        f.write('\n\n')
        f.write('________________________________________________ ')
        f.write('\n\n')

def main():
    urls = []
    chapter_name = []
    all_url = 'http://www.biquge.com.tw/16_16289/'
    html = get_one_page(all_url)
    parse_all_pages(html,urls,chapter_name)
 

    print('《寒门状元》开始下载。。。。。')
    for i in tqdm(range(len(chapter_name))):
        url = 'http://www.biquge.com.tw'+urls[i]
        content = get_one_page(url)
        texts = parse_one_chapter(content)
        write_to_file(chapter_name[i],texts)
        time.sleep(1)
    print('《寒门状元》下载完成')


if __name__ == '__main__':
	main()