import json
from urllib.parse import urlencode

import scrapy
from lxml import etree


class XiaoshuoSpider(scrapy.Spider):
    name = "xiaoshuo"

    async def start(self):
        data = {
            'worksTypes': 0,
            "bookType": 0,
            "subWorksTypes": 0,
            "totalWord": 0,
            "serialStatus": 1,
            "vip": 9,
            "totalWold": 0,
            "categoryId": 0,
            "categoryPid": 0,
            "naodongFilter": 0
        }
        headers = {
            'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/141.0.0.0 Safari/537.36',
            'Content-Type':'application/x-www-form-urlencoded',
            'Referer':'https://www.zongheng.com/books?worksTypes=0&subWorksTypes=0&totalWold=0&serialStatus=1&vip=9&bookType=0',
            'cookie':'acw_tc=ac11000117624089814982767eb265f1043f05a8df5a1d3e3036c3da088b6a; zhffr=www.baidu.com; sajssdk_2015_cross_new_user=1; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%2219a57c2eebf301-04424d38b3ef1c-26061851-1327104-19a57c2eec04ac%22%2C%22%24device_id%22%3A%2219a57c2eebf301-04424d38b3ef1c-26061851-1327104-19a57c2eec04ac%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E8%87%AA%E7%84%B6%E6%90%9C%E7%B4%A2%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22https%3A%2F%2Fwww.baidu.com%2Flink%22%2C%22%24latest_referrer_host%22%3A%22www.baidu.com%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC%22%7D%7D; ZHID=99F111E4E05100119681A6969CCB432F'
        }
        yield scrapy.Request("https://www.zongheng.com/api2/catefine/storeSearch",body=urlencode(data), headers=headers ,method="POST", callback=self.parse)

    async def parse(self, response):
        # print(response.json())
        items = response.json()['result']['bookList']
        for item in items:
            name = item['name']
            detail_url = {'bookId':item['bookId']}
            print(name, detail_url)
            headers = {
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/141.0.0.0 Safari/537.36',
                'Content-Type': 'application/json; charset=utf-8',
                'Referer': 'https://www.zongheng.com/',
                'cookie': 'zhffr=www.baidu.com; sajssdk_2015_cross_new_user=1; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%2219a57c2eebf301-04424d38b3ef1c-26061851-1327104-19a57c2eec04ac%22%2C%22%24device_id%22%3A%2219a57c2eebf301-04424d38b3ef1c-26061851-1327104-19a57c2eec04ac%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E8%87%AA%E7%84%B6%E6%90%9C%E7%B4%A2%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22https%3A%2F%2Fwww.baidu.com%2Flink%22%2C%22%24latest_referrer_host%22%3A%22www.baidu.com%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC%22%7D%7D; ZHID=99F111E4E05100119681A6969CCB432F'
            }
            yield scrapy.Request(url="https://bookapi.zongheng.com/api/chapter/getChapterList",headers=headers, body=json.dumps(detail_url), callback=self.parse_detail, cb_kwargs={'name':name},method="POST")
            break

    async def parse_detail(self,  response, name):
        items_1 = response.json()['result']['chapterList'][1]['chapterViewList']
        for item_1 in items_1:
            title = item_1['chapterName']
            book_id = item_1['bookId']
            article_id = item_1['chapterId']
            create_time = item_1['createTime']
            article_url = f"https://read.zongheng.com/chapter/{book_id}/{article_id}.html"
            print(article_url, title, create_time)
            # headers = {
            #     'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/141.0.0.0 Safari/537.36',
            #     'Content-Type': 'text/html; charset=utf-8',
            #     'Referer': 'https://read.zongheng.com/chapter/957220/60179232.html',
            #     'cookie': 'zhffr=www.baidu.com; sajssdk_2015_cross_new_user=1; ZHID=99F111E4E05100119681A6969CCB432F; acw_tc=ac11000117624116548028969e71f9cb5fa5ce249e35040fe6d74c3c04f283; PassportCaptchaId=bd9500910e7a40e3bb4e6b7983aa8b14; zh_visitTime=1762411655224; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%2219a57c2eebf301-04424d38b3ef1c-26061851-1327104-19a57c2eec04ac%22%2C%22%24device_id%22%3A%2219a57c2eebf301-04424d38b3ef1c-26061851-1327104-19a57c2eec04ac%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22%22%2C%22%24latest_referrer_host%22%3A%22%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%7D%7D; Hm_lvt_c202865d524849216eea846069349eb9=1762411656; HMACCOUNT=1948ECCCEB863B4F; acw_sc__v2=197d84838-b932542d01c5485c7005fa2dea15bfee35ae5757025005150a; Hm_lpvt_c202865d524849216eea846069349eb9=1762412892'
            # }
            yield scrapy.Request(url=article_url, method="GET", callback=self.parse_article, cb_kwargs={'article_url':article_url, 'title':title})
            break
    async def parse_article(self, response, article_url, title):
        print(article_url, title)
        # print(response.text)
        items = response.xpath("//div[@class='reader-main']//div[@class='content']//p/text()").getall()
        for item in items:
            print(item)