# -!- coding: utf-8 -!-
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
#作者：cacho_37967865
#博客：https://blog.csdn.net/sinat_37967865
#文件：get_story.py
#日期：2019-11-20
#备注：Python爬虫爬取特殊类型的小说
<a href="javascript:go_page_from_id(84348,96533,49412053)">
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''

import requests
import math
import time
from pycacho.cachobase.deal_replace import replace_one_list
from pycacho.cachobase.file_deal import an_save_txt
from pycacho.cachobase.logger import Logger
from bs4 import BeautifulSoup
import random

import re

logger = Logger("getWebTxt").get_log()

agent = ["Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0b13pre) Gecko/20110307 Firefox/4.0b13pre"]

headers = {
    'User-Agent': random.choice(agent)
}

# 在requests做请求的时候，为了避免ssl认证，可以将requests.get(url,verify=False), 但是这么设置会带来一个问题，日志中会有大量的warning信息, 可以配置下面4行
session = requests.Session()
session.keep_alive = False
requests.adapters.DEFAULT_RETRIES = 5
requests.packages.urllib3.disable_warnings()


# 获取所有章节的chapterid、chaptername
def get_json_chapter(story_url,mybookid):
    chapter_info = []
    control = 60*5
    xid = math.floor(mybookid / 1000)
    mybookid = str(mybookid)
    url = '/files/' + str(xid) +'/' + mybookid + "/" + mybookid + ".json?" + 'cdnversions=' + str(math.ceil(int(time.time()) / (control)))
    fur_url = story_url + url
    #print(fur_url)
    resp = requests.get(fur_url, headers=headers,timeout=20,verify=False)
    intro = resp.json()['info']['intro']
    book_id = resp.json()['info']['articleid']
    out_txt = resp.json()['info']['articlename'] + ' 作者：' + resp.json()['info']['author']
    #print(out_txt,intro)
    chapter_info.append(out_txt)
    an_save_txt(out_txt + '.txt', out_txt)
    an_save_txt(out_txt + '.txt', intro)
    chapter = resp.json()['list']
    for info in chapter:
        row = []
        chapter_id = info['chapterid']
        chapter_name = info['chaptername']
        chapter_url = get_chapter_url(book_id, chapter_id,chapter_name)
        row.append(chapter_name)
        row.append(chapter_url)
        chapter_info.append(row)
    #print(chapter_info)
    return chapter_info


# 获取文章的url
def get_chapter_url(bookid,chapterid,chapter_name):
    xid = math.floor(bookid / 1000)
    fur_url = story_url + '/files/article/html555/' + str(xid) + '/' + str(bookid) + '/' + str(chapterid) + '.html'
    #print(fur_url)
    return fur_url


def get_content():
    chapter_info = get_json_chapter(story_url,89798)
    out_txt = chapter_info[0]
    print(chapter_info)
    for i in range(len(chapter_info)):
        n =  chapter_info.index(['第四百一十六章 拜见杨潇殿下', 'https://www.wjcyhg.com/files/article/html555/96/96533/46374495.html'])
        if i >= n:
            info = chapter_info[i]
            resp = requests.get(info[1], headers=headers,timeout=20,verify=False)
            html = resp.text
            try:
                rep_info = re.findall(r'cctxt=cctxt\.replace\(/(.*)\);',html.split('\';')[1].replace('\'','').replace('/g',''))
                #print(rep_info)
                content = html.split('var cctxt=\'')[1].split('\';')[0].replace('&nbsp;','').replace('<br /><br />','\n\n')
                r_content = replace_one_list(content,rep_info)
                #print(r_content)
                an_save_txt(out_txt + '.txt', info[0])
                an_save_txt(out_txt + '.txt', r_content)
            except:
                print(info)

def temp(url):
    resp = requests.get(url, headers=headers, timeout=20, verify=False)
    html = resp.text
    print(html)


if __name__ == '__main__':
    story_url = ''
    get_content()
    #temp('https://www.sztjgold.com/chapter.html?1#mybookid=48&bookid=1426&chapterid=1395293')
    # https://www.va-etong.com  全本小说网
    # https://www.sztjgold.com  唐金文学
    # https://www.wjcyhg.com    成宇文学