# -!- coding: utf-8 -!-
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
#作者：cacho_37967865
#博客：https://blog.csdn.net/sinat_37967865
#文件：get_Web_json.py
#日期：2023-02-26
#备注：Python爬虫爬取json格式小说
章节列表是json格式，章节详情通过BeautifulSoup处理
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''

import requests
import random
import os
from bs4 import BeautifulSoup
from pycacho.cachobase.file_deal import an_save_txt,ann_save_txt
from pycacho.cachobase.logger import Logger


logger = Logger("get_Web_json").get_log()

agent = ["Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0b13pre) Gecko/20110307 Firefox/4.0b13pre",
         'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36']

headers = {
    'User-Agent': random.choice(agent)
}

# 在requests做请求的时候，为了避免ssl认证，可以将requests.get(url,verify=False), 但是这么设置会带来一个问题，日志中会有大量的warning信息, 可以配置下面4行
session = requests.Session()
session.keep_alive = False
requests.adapters.DEFAULT_RETRIES = 5
requests.packages.urllib3.disable_warnings()


# 获取某个小说所有章节 (site-->网站地址，article_no-->文章名,total_no-->总章节数量，start_chapter-->开始章节)
def get_article(site,article_no,total_no,start_chapter):
    url = site + '/ajaxService?action=chapterlist&articleno=%s&index=0&size=%s&sort=1'% (article_no,total_no)
    resp = requests.get(url, headers=headers)
    data = resp.json()['items']
    article_info = []
    n = 1
    for article in data:
        row =[]
        chapter_name=article['chaptername'].replace('章：','章 ').replace('章、','章 ')
        chapter_url=article['url']
        chapter_no = article['chapterno']
        if chapter_no==start_chapter:
            n =0                 # 如果相等，给n赋值，后面值不变
        if n!=0:
            continue             # 跳出当前循环，进入下个循环
        row.append(chapter_name)
        row.append(chapter_url)
        article_info.append(row)
    return (article_info)


# 具体章节内容
def get_chapter(out_txt,site,chapter_name,chapter_url):
    url = site + chapter_url
    resp = requests.get(url, headers=headers)
    resp.encoding = 'utf-8'
    html = resp.text.replace('</p><p>','\n\t').replace('<p>','\t')  # '\t'==tab键
    soup = BeautifulSoup(html,'html5lib')
    content = soup.find(id='nr1').get_text()
    print(chapter_name)
    ann_save_txt(out_txt, chapter_name)
    an_save_txt(out_txt, content)


if __name__ == '__main__':
    jad_url = 'https://m.92dxs.cc'       # 就爱读小说网 https://m.92dxs.cc/info/187/187642.html
    story_id = ''                  # 小说id
    start_chapter = 67543596             # 初始章节,int

    out_txt = 'F:\PythonProject\Python\\' + story_id + '.txt'
    if os.path.exists(out_txt):
        os.remove(out_txt)
    article_info = get_article(jad_url,story_id,'100',start_chapter)  # 100-->一页显示100章
    for article in article_info:
        get_chapter(out_txt,jad_url,article[0],article[1])
