import time
from urllib.parse import urlencode

import scrapy
import os
import shutil


class ZonghengSpider(scrapy.Spider):
    name = "zongheng"

    async def start(self):
        cookies = {
            'acw_tc': '2760820317624090089501059ed0444193a9b6961608ae0046bb66a5e9df60',
            'zhffr': 'www.baidu.com',
            'sajssdk_2015_cross_new_user': '1',
            'sensorsdata2015jssdkcross': '%7B%22distinct_id%22%3A%2219a57c352408e0-0d7fa3837fa78b8-26061851-1384801-19a57c352411854%22%2C%22%24device_id%22%3A%2219a57c352408e0-0d7fa3837fa78b8-26061851-1384801-19a57c352411854%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E8%87%AA%E7%84%B6%E6%90%9C%E7%B4%A2%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22https%3A%2F%2Fwww.baidu.com%2Flink%22%2C%22%24latest_referrer_host%22%3A%22www.baidu.com%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC%22%7D%7D',
            'ZHID': '8E4D0628617FE1A606BCCF162B095684',
        }

        headers = {
            'Accept': 'application/json, text/plain, */*',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Cache-Control': 'no-cache',
            'Connection': 'keep-alive',
            'Content-Type': 'application/x-www-form-urlencoded',
            'Origin': 'https://www.zongheng.com',
            'Pragma': 'no-cache',
            'Referer': 'https://www.zongheng.com/books?worksTypes=0&subWorksTypes=0&totalWord=0&serialStatus=1&vip=9&bookType=0',
            'Sec-Fetch-Dest': 'empty',
            'Sec-Fetch-Mode': 'cors',
            'Sec-Fetch-Site': 'same-origin',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/141.0.0.0 Safari/537.36',
            'sec-ch-ua': '"Google Chrome";v="141", "Not?A_Brand";v="8", "Chromium";v="141"',
            'sec-ch-ua-mobile': '?0',
            'sec-ch-ua-platform': '"Windows"',
            # 'Cookie': 'acw_tc=2760820317624090089501059ed0444193a9b6961608ae0046bb66a5e9df60; zhffr=www.baidu.com; sajssdk_2015_cross_new_user=1; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%2219a57c352408e0-0d7fa3837fa78b8-26061851-1384801-19a57c352411854%22%2C%22%24device_id%22%3A%2219a57c352408e0-0d7fa3837fa78b8-26061851-1384801-19a57c352411854%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E8%87%AA%E7%84%B6%E6%90%9C%E7%B4%A2%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22https%3A%2F%2Fwww.baidu.com%2Flink%22%2C%22%24latest_referrer_host%22%3A%22www.baidu.com%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC%22%7D%7D; ZHID=8E4D0628617FE1A606BCCF162B095684',
        }

        data = {
            'worksTypes': '0',
            'bookType': '0',
            'subWorksTypes': '0',
            'totalWord': '0',
            'serialStatus': '1',
            'vip': '9',
            'categoryId': '0',
            'categoryPid': '0',
            'naodongFilter': '0',
            'pageNum': '1'
        }
        yield scrapy.FormRequest(url='https://www.zongheng.com/api2/catefine/storeSearch', method='POST',
                                 headers=headers, formdata=data, cookies=cookies)

    async def parse(self, response):
        text = response.json()['result']['bookList']
        for book in text:
            book_name = book['name']
            img_url = book['picUrl']
            book_id = book['bookId']
            if os.path.exists(f'static/{book_name}'):
                shutil.rmtree(f'static/{book_name}')
            os.makedirs(f'static/{book_name}')

            cookies = {
                'zhffr': 'www.baidu.com',
                'sajssdk_2015_cross_new_user': '1',
                'sensorsdata2015jssdkcross': '%7B%22distinct_id%22%3A%2219a57c352408e0-0d7fa3837fa78b8-26061851-1384801-19a57c352411854%22%2C%22%24device_id%22%3A%2219a57c352408e0-0d7fa3837fa78b8-26061851-1384801-19a57c352411854%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E8%87%AA%E7%84%B6%E6%90%9C%E7%B4%A2%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22https%3A%2F%2Fwww.baidu.com%2Flink%22%2C%22%24latest_referrer_host%22%3A%22www.baidu.com%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC%22%7D%7D',
                'ZHID': '8E4D0628617FE1A606BCCF162B095684',
            }

            headers = {
                'accept': 'application/json, text/plain, */*',
                'accept-language': 'zh-CN,zh;q=0.9',
                'cache-control': 'no-cache',
                'content-type': 'application/x-www-form-urlencoded',
                'origin': 'https://www.zongheng.com',
                'pragma': 'no-cache',
                'priority': 'u=1, i',
                'referer': 'https://www.zongheng.com/',
                'sec-ch-ua': '"Google Chrome";v="141", "Not?A_Brand";v="8", "Chromium";v="141"',
                'sec-ch-ua-mobile': '?0',
                'sec-ch-ua-platform': '"Windows"',
                'sec-fetch-dest': 'empty',
                'sec-fetch-mode': 'cors',
                'sec-fetch-site': 'same-site',
                'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/141.0.0.0 Safari/537.36',
                # 'cookie': 'zhffr=www.baidu.com; sajssdk_2015_cross_new_user=1; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%2219a57c352408e0-0d7fa3837fa78b8-26061851-1384801-19a57c352411854%22%2C%22%24device_id%22%3A%2219a57c352408e0-0d7fa3837fa78b8-26061851-1384801-19a57c352411854%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E8%87%AA%E7%84%B6%E6%90%9C%E7%B4%A2%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22https%3A%2F%2Fwww.baidu.com%2Flink%22%2C%22%24latest_referrer_host%22%3A%22www.baidu.com%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC%22%7D%7D; ZHID=8E4D0628617FE1A606BCCF162B095684',
            }

            data = {
                'bookId': f'{book_id}',
            }
            yield scrapy.Request(url='https://bookapi.zongheng.com/api/chapter/getChapterList',method='post',callback=self.parse2,body=urlencode(data),headers=headers,cookies=cookies,meta={'book_name':book_name})
            break


    async def parse2(self, response):
        book_name = response.meta['book_name']
        page_id = 0
        for book_detail in response.json()['result']['chapterList']:
            if book_detail['tome']['tomeNo'] >= 200 or book_detail['tome']['tomeNo'] == 1:
                for chapter_detail in book_detail['chapterViewList']:
                    page_id += 1
                    book_id = chapter_detail['bookId']
                    chapter_id = chapter_detail['chapterId']
                    # print(book_id, chapter_id)
                    yield scrapy.Request(f'https://read.zongheng.com/chapter/{book_id}/{chapter_id}.html',callback=self.parse3,meta={'chapter_id':chapter_id,'book_name':book_name,'page_id':page_id})


    async def parse3(self, response):
        book_name = response.meta['book_name']
        chapter_id = response.meta['chapter_id']
        page_id = response.meta['page_id']
        title = response.xpath(f"//div[@id='chapter_{chapter_id}']/div[@class='title']/div[@class='title_txtbox']/text()").get()
        detail = response.xpath(f"//div[@id='chapter_{chapter_id}']/div[@class='content']/p/text()").getall()
        content = '\n'.join(detail)
        with open(f'static/{book_name}/{page_id}.{title}.txt', 'w', encoding='utf-8') as f:
            f.write(content)










