import os

import requests
from collection_catalogue import CollectionCatalogue
import re
import execjs


class ChapterCrawler:

    def __init__(self, book_id=1011591):
        self.book_id = book_id
        self.chapter_id = ''
        self.chapter_url_list = []
        self.url = ''
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36'
        }
        self.js_code = None

    def get_chapter(self):
        try:
            resp = requests.get(url=self.url, headers=self.headers)
            self.parse_chapter(resp.json())
        except:
            pass

    def execute_js(self):
        with open('encipher_data_limit.js', 'r', encoding='utf-8') as f:
            self.js_code = execjs.compile(f.read())

    def ad_free(self, content):
        data_limit = self.js_code.call('encipher', self.chapter_id)
        content = re.sub(rf'<p data-limit="{data_limit}">(.*?)</p>', '', content)
        return content

    def parse_chapter(self, data):
        content = data['data']['content']
        content = self.ad_free(content)
        p_list = re.findall(r'<p data-limit=".*?">(.*?)</p>', content, re.S)
        text = '\n'.join(p_list)
        self.save_text(text)

    def save_text(self, text):
        if not os.path.exists(f'novels/{self.book_id}'):
            os.mkdir(f'novels/{self.book_id}')
        with open(f'novels/{self.book_id}/{self.chapter_id}.txt', 'w', encoding='utf-8') as f:
            f.write(text)

    def run(self):
        catalogue_url = f'https://www.tadu.com/book/catalogue/{self.book_id}'
        self.chapter_url_list = CollectionCatalogue(catalogue_url).run()
        self.execute_js()
        for i, u in enumerate(self.chapter_url_list):
            self.chapter_id = u.split('/')[-2]
            self.url = f'https://www.tadu.com/getPartContentByCodeTable/{self.book_id}/{i+1}'
            self.get_chapter()


if __name__ == '__main__':
    book_id = int(input('(例如：1011591)\nbook id = '))
    spider = ChapterCrawler(book_id)
    spider.run()
