from typing import Iterable

import scrapy
import os
from scrapy import Request


class ZongbangSpider(scrapy.Spider):
    name = "zongbang"
    # allowed_domains = ["fozhidaoxs.cc"]
    # start_urls = ["https://fozhidaoxs.cc"]

    def start_requests(self):
        url =  "https://fozhidaoxs.cc/paihang.html"
        yield scrapy.Request(url)

    def parse(self, response):
        tlis = response.xpath('//ul[@class="tli"]')
        if tlis:
            tli = tlis[0]
            books = tli.xpath('./li/a')
            for book in books:
                book_href = book.xpath('./@href').extract_first()
                book_title = book.xpath('./text()').extract_first()
                # book_href = f"https://fozhidaoxs.cc{book_href}"
                book_href = response.urljoin(book_href)
                print(f"获取书名以连接....",book_title,book_href)

                # 发起获取详情页请求
                yield scrapy.Request(book_href,callback=self.parse_chapter_list,cb_kwargs={"book_title":book_title})
                # break

    def parse_chapter_detail(self,response):
        print(response.meta,"111111-")
        book_title = response.meta.get('book_title')
        capter_title = response.meta.get('capter_title')

        lines = response.xpath('//div[@class="showtxt"]/text()').extract()
        content = ""
        for line in lines:
            content += line.replace('\r', '').replace('\xa0', '')
            content += '\n'

        root = "./books"
        if not os.path.exists(root):
            os.mkdir(root)
        book_path = f"{root}/{book_title}"
        if not os.path.exists(book_path):
            os.mkdir(book_path)
        capter_path = f"{book_path}/{capter_title}.txt"
        with open(capter_path, "w", encoding="utf8") as f:
            f.write(content)
