# -*- coding:UTF-8 -*-
import urllib

from Tools.scripts.treesync import raw_input
from bs4 import BeautifulSoup
import urllib.request
import ssl

ssl._create_default_https_context = ssl._create_unverified_context  # 全局取消ssl验证


class pageCode(object):

    def __init__(self):
        self.name = ''
        self.nums = 0

    def get_html(self, url):
        html = urllib.request.urlopen(url)
        html_code = html.read().decode('utf-8', 'ignore')
        html.close()
        return html_code

    def get_contain(self, html_code):
        soup_texts = BeautifulSoup(html_code, 'lxml')
        title = soup_texts.title.string
        contain = soup_texts.find_all(id="content")
        soup_text = BeautifulSoup(str(contain), 'lxml')
        soup_text = str(soup_text).replace('<br/>', '\n')
        soup_text = BeautifulSoup(str(soup_text), 'lxml')
        box = soup_text.div.text
        # 将\xa0无法解码的字符删除
        contain = soup_text.div.text.replace('<br>', '\n')
        contain = contain.replace('\ue822', '')

        if box.find("readx();") != -1:
            contain = contain.replace("readx();", '')
            contain = contain.replace("chaptererror();", '')

        print(contain)
        return str(title) + str(contain)

    def get_all_url(self, url):
        html = self.get_html(url)
        main_tree = BeautifulSoup(html, 'lxml')
        # print(main_tree)
        target_tree = main_tree.find_all('div', id='list')
        # print(target_tree)
        lists = BeautifulSoup(str(target_tree), 'lxml')
        lists_a = lists.find_all('dd')
        return lists_a

    def write_section(self, contain):
        # 保存save_context
        fp = open("hello.txt", 'a', encoding='utf-8')  # 打开文件
        # print(contain)
        fp.write(contain)  # 写入内容
        fp.close()  # 关闭文件


if __name__ == "__main__":
    pc = pageCode()
    target_url = raw_input('url:')
    code = pc.get_html(target_url)
    pc.write_section(code)
