# -*- coding:utf-8 _*-
import re
import requests
from lxml import etree
from fake_useragent import UserAgent


def write(name, content):
    with open('./txt/{}.txt'.format(name), 'a+', encoding='utf-8') as f:
        f.write(content)


def format(string):
    # 文件命名不能出现 \ / : * ? " < > |
    string = str(string).replace('\\', '')
    string = (((string.replace('/', '')).replace(':', '')).replace('*', '')).replace('?', '')
    string = (((string.replace('"', '')).replace('<', '')).replace('>', '')).replace('|', '')
    return string


def spider(url):
    main_response = requests.get(url, headers={'User-Agent': UserAgent().random}, timeout=5).text
    docurl_list = re.findall('"docurl":"(.*?)"', main_response)
    for docurl in docurl_list:
        response = requests.get(url=docurl, headers={'User-Agent': UserAgent().random}, timeout=5).text
        html = etree.HTML(response)
        title_list = html.xpath('//div[@class="post_content_main"]/h1/text()')
        p_list = html.xpath('//div[@class="post_text"]//p/text()')
        title = format(title_list[0]) if len(title_list) != 0 else None
        if title is not None and len(p_list) != 0:
            write(name=title, content='<title={}>'.format(title) + '\n<neirong=\n')
            for p in p_list:
                if p != '' and p != '\n':
                    write(name=title, content='<p>{}</p>'.format(p) + '\n')
            write(name=title, content='>')
            print('《{}》'.format(title), '   写入完毕......')


def run():
    base_url = 'http://edu.163.com/special/0029881L/datalist_liuxue'
    url_list, num = [base_url + '.js'], 1
    while True:
        num += 1
        if len(str(num)) == 1:
            page_num = '0' + str(num)
        else:
            page_num = num
        next_url = base_url + '_' + str(page_num) + '.js'
        resposne = requests.get(url=next_url, headers={'User-Agent': UserAgent().random}, timeout=5)
        if resposne.status_code == 200:
            url_list.append(next_url)
        else:
            break
    for url in url_list:
        spider(url)


if __name__ == '__main__':
    run()
