"""
问题一：请求参数加密
解法：
    1、找到加密的请求参数 sign
    2、看源码中是否有 sign 的明文
    3、若没有sign明文则在 initiator（发起程序） 中找到生成 sign 的JS代码，进行JS逆向

接口参数分析
URL = https://c13.shuqireader.com/pcapi/chapter/contentfree/?bookId=8867235&chapterId=2049843&ut=1695276629&num=1&ver=1&aut=1695276648&sign=42ec06931613daef128b0df099b7e226
?bookId=8867235&chapterId=2049843&ut=1695276629&num=1&ver=1&aut=1695276648&sign=42ec06931613daef128b0df099b7e226
bookId=8867235&chapterId=2049843 这个两个bid和cid可以在“小说章节采集.py”中获取
num=1&ver=1 这两个是固定的
aut=1695276648 是固定的
ut=1695276629  是时间戳
sign=42ec06931613daef128b0df099b7e226 这个签名是动态生成的，并且是加密的

问题二：ChapterContent字段是加密的
解法：
    1、找到加密ChapterContent的JS代码，进行逆向，得到明文
"""
import requests
import execjs
from novel_chapter_collection import ChapterCrawler
from parsel import Selector


class SingleChapterCrawler:

    def __init__(self, chapter_url, chapter_url_list):
        self.chapter_url_list = chapter_url_list
        self.url = chapter_url
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
        }
        self.js_code = None

    def get_sign(self, i, url):
        resp = requests.get(url=url, headers=self.headers)
        selector = Selector(text=resp.text)
        result = selector.css('.js-dataChapters::text').re('"contUrlSuffix":"(.*?)",')
        self.get_data(result[i])

    def get_data(self, params):
        try:
            print(self.url+params)
            resp = requests.get(url=self.url+params, headers=self.headers)
            chapter_content = resp.json()['ChapterContent']
            # print(chapter_content)
            self.parse_data(chapter_content)
        except Exception as e:
            print(e)

    def parse_data(self, chapter_content):
        result = self.js_code.call('_decodeCont', chapter_content).replace('<br/><br/>', '').replace('<br/>', '')
        print(result)

    def run(self):
        with open('decodeContent.js', 'r', encoding='utf-8') as f:
            text = f.read()
            self.js_code = execjs.compile(text)
            print(self.js_code)

        for i, url in enumerate(self.chapter_url_list):
            self.get_sign(i, url)


if __name__ == '__main__':
    url_list = ChapterCrawler('https://ognv.shuqi.com/chapter?bid=8867235').run()
    url = 'https://c13.shuqireader.com/pcapi/chapter/contentfree/'
    spider = SingleChapterCrawler(url, url_list)
    spider.run()
