import scrapy
import os

class ZongbangSpider(scrapy.Spider):
	name = "zongbang"

	def start_requests(self):
		url = "https://fozhidaoxs.cc/paihang.html"
		yield scrapy.Request(url)

	def parse(self, response):
		tlis = response.xpath('//ul[@class="tli"]')
		if tlis:
			tli = tlis[0]
			books = tli.xpath('./li/a')
			for book in books:
				book_href = book.xpath('./@href').extract_first()
				book_title = book.xpath('./text()').extract_first()
				# book_href = f"https://fozhidaoxs.cc{book_href}"
				book_href = response.urljoin(book_href)
				print(f"获取书名以及连接....", book_title, book_href)

				# 发起获取详情页请求
				yield scrapy.Request(book_href, callback=self.parse_chapter_list, cb_kwargs={"book_title": book_title})
				# break

	def parse_chapter_list(self, response, book_title):
		print(f"书籍目录", response, book_title)
		capters = response.css('.listmain dd a')[9:]
		for capter in capters:
			capter_title = capter.css('::text').extract_first()
			capter_href = capter.css('::attr("href")').extract_first()
			capter_href = response.urljoin(capter_href)
			print(f"获取章节以及连接....", capter_title, capter_href)

			# 发起获取章节详情请求
			yield scrapy.Request(capter_href, callback=self.parse_chapter_detail, meta={
				"book_title": book_title,
				"capter_title": capter_title
			})
			# break

	def parse_chapter_detail(self, response):
		print(response.meta, "111111-")
		book_title = response.meta.get('book_title')
		capter_title = response.meta.get('capter_title')

		lines = response.xpath('//div[@class="showtxt"]/text()').extract()
		content = ""
		for line in lines:
			content += line.replace('\r', '').replace('\xa0', '')
			content += '\n'

		root = "./books"
		if not os.path.exists(root):
			os.mkdir(root)
		book_path = f"{root}/{book_title}"
		if not os.path.exists(book_path):
			os.mkdir(book_path)
		capter_path = f"{book_path}/{capter_title}.txt"
		with open(capter_path, "w", encoding="utf8") as f:
			f.write(content)
