# -*- coding:UTF-8 -*-
import urllib

from Tools.scripts.treesync import raw_input
from bs4 import BeautifulSoup
import urllib.request
import ssl

ssl._create_default_https_context = ssl._create_unverified_context  # 全局取消ssl验证


class downloader(object):

    def __init__(self):
        self.name = ''
        self.nums = 0  # 章节数

    def get_html(self, url):
        html = urllib.request.urlopen(url)
        html_code = html.read().decode('utf-8', 'ignore')
        html.close()
        return html_code

    def get_contain(self, html_code):
        soup_texts = BeautifulSoup(html_code, 'lxml')
        title = soup_texts.title.string
        contain = soup_texts.find_all(id="content")
        soup_text = BeautifulSoup(str(contain), 'lxml')
        soup_text = str(soup_text).replace('<br/>', '\n')
        soup_text = BeautifulSoup(str(soup_text), 'lxml')
        box = soup_text.div.text
        # 将\xa0无法解码的字符删除
        contain = soup_text.div.text.replace('<br>', '\n')
        contain = contain.replace('\ue822', '')
        if box.find("readx();") != -1:
            contain = contain.replace("readx();", '')
            contain = contain.replace("chaptererror();", '')
        return str(title) + str(contain)

    def get_all_url(self, url):
        html = self.get_html(url)
        main_tree = BeautifulSoup(html, 'lxml')
        # print(main_tree)
        target_tree = main_tree.find_all('div', id='list')
        # print(target_tree)
        lists = BeautifulSoup(str(target_tree), 'lxml')
        lists_a = lists.find_all('dd')
        return lists_a

    def write_section(self, contain):
        # 保存
        fp = open((str(self.nums)+'.txt').replace('\n', ''), 'a', encoding='utf-8')  # 打开文件
        # print(contain)
        fp.write(contain)  # 写入内容
        fp.close()  # 关闭文件


if __name__ == "__main__":
    dl = downloader()
    dl.nums = raw_input('小说名字:')
    target_url = raw_input('目标目录url(https://www.biqudu.com/0_633/):')
    ed = raw_input('删除章节【0~】：')
    index = 1
    lists = dl.get_all_url(target_url)
    del lists[0:int(ed)]
    for child in lists:
        child = BeautifulSoup(str(child), 'lxml')
        download_url = "https://www.biqudu.com" + child.a.get('href')
        print(download_url)
        html = dl.get_html(download_url)
        # print(html)
        contain = dl.get_contain(html)
        # print(contain)
        dl.write_section(contain)
        print('已下载:%.3f%%' % float(100 * index / len(lists)))
        index += 1
