import json
import logging
import os.path
import re
import shutil
import time

import scrapy
import requests
from lxml import etree

logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('spider.log', encoding='utf-8'),  # 日志文件
        logging.StreamHandler()  # 控制台输出
    ]
)




headers = {
    "Accept": "application/json, text/plain, */*",
    "Accept-Language": "zh",
    "Connection": "keep-alive",
    "Content-Type": "application/x-www-form-urlencoded",
    "Origin": "https://www.zongheng.com",
    "Referer": "https://www.zongheng.com/books?worksTypes=0&bookType=0&subWorksTypes=0&totalWord=0&serialStatus=9&vip=9",
    "Sec-Fetch-Dest": "empty",
    "Sec-Fetch-Mode": "cors",
    "Sec-Fetch-Site": "same-origin",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/142.0.0.0 Safari/537.36",
    "sec-ch-ua": "\"Chromium\";v=\"142\", \"Google Chrome\";v=\"142\", \"Not_A Brand\";v=\"99\"",
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": "\"Windows\""
}
cookies = {
    "zhffr": "www.google.com.hk",
    "sajssdk_2015_cross_new_user": "1",
    "ZHID": "1AC28BF78A009FD0FE7B4C3BD7260234",
    "zh_visitTime": "1762408999868",
    "v_user": "https%3A%2F%2Fwww.zongheng.com%2F%7Chttps%3A%2F%2Fhuayu.zongheng.com%2F%7C70693084",
    "Hm_lvt_c202865d524849216eea846069349eb9": "1762409000",
    "HMACCOUNT": "9A0E65D276AEF91B",
    "PassportCaptchaId": "ce34e861b2bf240fcfd3eb97af0d7bc8",
    "Hm_lpvt_c202865d524849216eea846069349eb9": "1762410575",
    "sensorsdata2015jssdkcross": "%7B%22distinct_id%22%3A%2219a57c2e145203-0fc733c6ead1398-26061b51-1638720-19a57c2e14612c2%22%2C%22%24device_id%22%3A%2219a57c2e145203-0fc733c6ead1398-26061b51-1638720-19a57c2e14612c2%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22%22%2C%22%24latest_referrer_host%22%3A%22%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%7D%7D",
    "acw_tc": "ac11000117624126755437434e002a97158165adbe6d6d3da5f03b7fff8bd5"
}
url = "https://www.zongheng.com/api2/catefine/storeSearch"
data = {
    "worksTypes": "0",
    "bookType": "0",
    "subWorksTypes": "0",
    "totalWord": "0",
    "serialStatus": "9",
    "vip": "9",
    "pageNum": "1",
    "pageSize": "20",
    "categoryId": "0",
    "categoryPid": "0",
    "order": "weekOrder",
    "naodongFilter": "0"
}

if os.path.exists("./zhongwen"):
    shutil.rmtree('./zhongwen')
else:
    os.makedirs('./zhongwen')







class ZongHengZhongWenSpider(scrapy.Spider):
    name = "zong"



    async def start(self):

        url = "https://www.zongheng.com/api2/catefine/storeSearch"
        for i in range(1,100):
            yield scrapy.FormRequest(
                url=url,
                formdata={
        f"worksTypes": "0",
        f"bookType": "0",
        f"subWorksTypes": "0",
        f"totalWord": "0",
        f"serialStatus": "9",
        "vip": "9",
        "pageNum": f"{i}",
        f"pageSize": "20",
        f"categoryId": "0",
        f"categoryPid": "0",
        f"order": "weekOrder",
        f"naodongFilter": "0"
    },
                headers=headers,
                cookies=cookies,
                callback=self.parse
            )
            break



    async def parse(self, response):
        text = response.text
        book_json = json.loads(text)
        book_list = book_json["result"]["bookList"]
        for book in book_list:
            book_id = book["bookId"]
            book_name = book["name"]
            print(book_id,book_name)
            logging.info(f"开始爬取 {book_name}")
            if not os.path.exists(f"./zhongwen/{book_name}"):
                os.makedirs(f"./zhongwen/{book_name}")


            yield scrapy.FormRequest(
                url="https://bookapi.zongheng.com/api/chapter/getChapterList",
                formdata={
                    "bookId": f"{book_id}",
                },
                cb_kwargs={"book":book},
                callback=self.parse_book
            )

    async def parse_book(self,response,book):
        time.sleep(1)
        title = response.text

        title_json = json.loads(title)
        try:
            chapterList = title_json["result"]["chapterList"]
            for chapter in chapterList:
                    chapter_view_list = chapter["chapterViewList"]
                    for chapter_item  in chapter_view_list:
                        chapter_name = chapter_item["chapterName"]
                        logging.info(f"开始爬取 {book['name']} {chapter_name}")
                        chapter_id = chapter_item["chapterId"]
                        book_id = book["bookId"]
                        book_name = book["name"]

                        yield scrapy.FormRequest(
                            url=f"https://read.zongheng.com/chapter/{book_id}/{chapter_id}.html",
                            cb_kwargs={"chapter":chapter_item,"book_name":book_name},
                            callback=self.parse_chapter

                        )
        except Exception as e:
            logging.error(f"{book['name']} 爬取失败 {e}")




    async def parse_chapter(self,response,chapter,book_name):
        text = response.text

        tree = etree.HTML(text)
        try:
            cont = tree.xpath("//div[@class='content']/p/text()")
            content = "\n".join( cont)
            logging.info(f"{book_name} {chapter['chapterName']} 爬取成功")
            with open(f"./zhongwen/{book_name}/{chapter['chapterName']}.txt",'w',encoding="utf-8") as f:
                f.write(content)
            print("------------------------")
            print(content)
        except Exception as e:
            logging.error(f"{book_name} {chapter['chapterName']} 爬取失败 {e}")







