"""
一、数据来源分析
    1、需求分析
        小说章节名称、小说内容
    2、接口分析
        搜索接口：https://fanqienovel.com/search/%E6%88%91%E4%BB%8E%E8%9C%80%E5%B1%B1%E6%9D%A5
        章节接口：https://fanqienovel.com/page/7272613779400035382
        单章小说接口：https://fanqienovel.com/reader/7272713318308807187

二、爬虫代码实现
    1、发送请求
    2、获取数据
    3、解析数据
    4、保存数据


"""
import time
from tqdm import tqdm
import requests
from parsel import Selector


class TomatoCrawler:

    def __init__(self, name):
        self.name = name
        self.search_url = ''
        self.chapter_url = ''
        self.novel_url = ''
        self.novel_content = ''
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36"
        }
        self.params = {
            'filter': '127, 127, 127, 127',
            'page_count': '10',
            'page_index': '0',
            'query_type': '0',
            'query_word': self.name,
            'msToken': '-uVUH11WGS2Pu6bA4TVVFmjqaejjQeAc0k33VGQWKVUGUZ-1M6lTxBYB-ZZiioBd559FGZDAtsxJRE0R6OT60-yQER_T3GYorpJ3Nl5orRjAGqHPSt8JPe77XULFlA==',
            'a_bogus': 'YXlxDOgUMsm1VEVl4wkz99smyRS0YW5kgZEz37C5tUqz',
        }
        self.search_book_data_list = []
        self.book_choosed = 0  # 默认第一本书被选择
        self.session = requests.Session()

    def search_book(self):
        self.search_url = f'https://fanqienovel.com/api/author/search/search_book/v1'
        response = self.session.get(url=self.search_url, params=self.params,headers=self.headers)
        json_data = response.json()
        for book in json_data['data']['search_book_data_list']:
            dit = {
                'book_name': book['book_name'],
                'book_id': book['book_id'],
                'category': book['category'],
                'last_chapter_time': time.ctime(int(book['last_chapter_time'])),
                'read_count': book['read_count'],
                'word_count': book['word_count'],
            }
            self.search_book_data_list.append(dit)

    def get_chapter(self):
        book_id = self.search_book_data_list[0]['book_id']
        self.chapter_url = f'https://fanqienovel.com/page/{book_id}'
        response = self.session.get(url=self.chapter_url, headers=self.headers)
        # print(response.text)
        selector1 = Selector(text=response.text)
        chapter_list = selector1.css('.chapter-item a::attr(href)').getall()
        chapter_list = [x.split('/')[-1] for x in chapter_list]
        self.novel_content = ''
        for chapter_id in tqdm(chapter_list):
            self.novel_url = f'https://fanqienovel.com/reader/{chapter_id}'
            resp = self.session.get(url=self.novel_url, headers=self.headers)
            # print(resp.text)
            selector2 = Selector(text=resp.text)
            p_text = selector2.xpath('//div[contains(@class, "muye-reader-content")]/div/p/text()').getall()
            p_string = '\n'.join(p_text)
            self.novel_content += p_string

        # print(novel_content)

    def save_novel(self):
        with open(self.name+'.txt', 'a', encoding='utf-8') as f:
            f.write(self.novel_content)

    def run(self):
        self.search_book()
        self.get_chapter()
        self.save_novel()


if __name__ == '__main__':
    noval_name = '我从蜀山来'
    spider = TomatoCrawler(noval_name)
    spider.run()
