import scrapy
import re
from zongheng.items import ZonghengItem, ChapterItem


class NovelSpider(scrapy.Spider):
    #  爬虫名字 和启动项目相关
    name = "novel"
    # 爬虫允许的域名
    # allowed_domains = ["zongheng.com"]
    # 爬取的起始请求连接
    start_urls = [f"https://www.haodf.com/hospital/list-{l}.html" for l in range(11, 147)]
    counter = 0

    def parse(self, response):
        """解析响应数据"""
        # 这个会直接返回html页面
        story_list = response.xpath("//div[@class='bookinfo']")
        for story in story_list:
            book_name = story.xpath(".//div[@class='bookname']/a/text()").get()
            # getall是一个列表，可以使用解包的方式取值
            author, book_type = story.xpath(".//div[@class='bookilnk']/a/text()").getall()
            story_link = story.xpath("../div[@class='bookimg']/a/@href").get()
            book_id = re.search(r"\d+", story_link).group()

            # 将数据封装到item中，以生成器方式返回
            yield ZonghengItem(book_id=book_id, book_name=book_name, author=author, book_type=book_type,
                               story_link=story_link)

            # 获取所有章节链接
            url = "https://bookapi.zongheng.com/api/chapter/getChapterList"
            yield scrapy.Request(url=url,
                                 callback=self.parse_chapter,  # 当前请求对应的解析函数
                                 method="POST",
                                 headers={"content-type": "application/x-www-form-urlencoded; charset=UTF-8"},
                                 body=f"bookId={book_id}",  # 请求体 放post参数
                                 meta={"story_name": book_name})  # 上下文传递数据

    def parse_chapter(self, response):
        """解析章节"""
        story_name = response.meta["story_name"]
        for vol in response.json()["result"]["chapterList"]:
            for chapter in vol["chapterViewList"]:
                chapter_name = chapter["chapterName"]
                chapter_id = chapter["chapterId"]
                book_id = chapter["bookId"]
                chapter_url = f"https://read.zongheng.com/chapter/{book_id}/{chapter_id}.html"
                # print(self.chapter_count,book_id,chapter_id,chapter_name,chapter_url)
                yield scrapy.Request(
                    url=chapter_url,
                    callback=self.parse_content,
                    # 数据传递给该请求对应的回调函数，比如parse_content
                    meta={
                        "story_name": story_name,
                        "chapter_name": chapter_name,
                        "book_id": book_id,
                        "chapter_id": chapter_id,
                    })

    def parse_content(self, response):
        """解析小说内容"""
        story_name = response.meta["story_name"]
        chapter_name = response.meta["chapter_name"]
        book_id = response.meta["book_id"]
        chapter_id = response.meta["chapter_id"]
        self.counter += 1
        content = "\n".join(response.xpath("//div[@class='content']/p/text()").getall())
        print(self.counter, story_name, chapter_name)
        yield ChapterItem(chapter_id=chapter_id, chapter_name=chapter_name, content=content, book_id=book_id)
