# import scrapy
# from bs4 import BeautifulSoup
# from scrapy_movie_099.items import ScrapyMovie099Item
# import os
# import re
# import json
# class MvSpider(scrapy.Spider):
#     # 构建JSON文件路径（在当前工作目录下）
#     json_file_path = 'movie.json'

#     name = "xs"
#     allowed_domains = ["www.qudvdu.com"]
#     # start_url_template = "https://www.qudvdu.com/book/453/453170_{}" // 被关
#     # start_url_template = "https://www.qudvdu.com/book/422/422616_{}" # 一品布衣
#     start_url_template = "https://www.qudvdu.com/book/435/435820_{}" # 反派：谁说我是来退婚的？
#     download_delay = 0.5  # 设置延迟为0.5秒

#     start_page = 1
#     end_page = 25


#     def start_requests(self):
#         # os.environ["HTTPS_PROXY"] = 'http://120.46.137.97:8080'
#         # 检查文件是否存在
#         if not os.path.exists(self.json_file_path):
#             # 如果文件不存在，则创建文件并写入空列表[]
#             with open(self.json_file_path, 'w') as file:
#                 json.dump([], file)
#             self.log(f'文件 {self.json_file_path} 不存在，已创建并写入空列表.')
#         else:
#             # 如果文件存在，清空内容并写入空列表[]
#             with open(self.json_file_path, 'w') as file:
#                 json.dump([], file)
#             self.log(f'文件 {self.json_file_path} 已存在，已清空内容并写入空列表.')

#         # 这里设置页数范围，例如从1到10页

#         for  page in range(self.start_page, self.end_page + 1):
#             start_url = self.start_url_template.format(page)
#             yield scrapy.Request(url=start_url, callback=self.parse)

#     def parse(self, response):
#         a_list = response.xpath('//div[@class="container"]//div[@class="section-box"][2]//a')
#             # 清理文件名中的非法字符
#         for  a in a_list:
#             # 确保a是Selector对象
#             if isinstance(a, scrapy.selector.unified.Selector):
#                 name = a.xpath('text()').extract_first()

#                 # 构建章节链接
#                 href = a.xpath('@href').extract_first()
#                 chapter_url = response.urljoin(href)


#                 # 读取已有JSON数据或创建一个新的空列表
#                 existing_data = []
#                 if os.path.exists(self.json_file_path) and os.path.getsize(self.json_file_path) > 0:
#                     with open(self.json_file_path, 'r', encoding='utf-8') as json_file:
#                         try:
#                             existing_data = json.load(json_file)
#                         except json.JSONDecodeError:
#                             pass

#                 # 添加当前章节信息到JSON数据中
#                 existing_data.append({'name': name, 'url': chapter_url})

#                 # 写入更新后的JSON数据
#                 with open(self.json_file_path, 'w', encoding='utf-8') as json_file:
#                     json.dump(existing_data, json_file, ensure_ascii=False, indent=4)


#     def parse_second(self, response):
#         print('parse_secondparse_secondparse_secondparse_secondparse_secondparse_secondparse_secondparse_secondparse_secondparse_second')
#         # 从meta中取回传递的item
#         # item = response.meta['item']
#         # 读取 JSON 文件
#         # with open('movie.json', 'r', encoding='utf-8') as file:
#         #     data = json.load(file)

#         # 打印数据
#         # for chapter in data:
#         #     print(chapter['name'])
#         #     print(chapter['url'])
#         #     print(chapter['plain_text'])
#         #     print('---')

#         pass
#         # 将结果存储到item中

#         # 最终返回item
#         # yield item


#     # def parse(self, response):
#     #     print(response.text)
#     #     # a_list = response.xpath('//div[@id="catalog"]/ul/li[@data-num > 82]//a/text()')
#     #     # link_list = response.xpath('//div[@id="catalog"]/ul/li[@data-num > 82]//a/@href')

#     #     a_list = response.xpath('//div[3]//div[2]//a[position() > 0]')
#     #     link_list = response.xpath('//div[3]//div[2]//a[position() > 0]/@href')

#     #     for i in range(len(a_list)):
#     #         name = a_list[i].extract()
#     #         url = link_list[i].extract()
#     #         print(name, url)
#     #         yield scrapy.Request(url=url, callback=self.parse_second, meta={'name': name})

#     # def parse_second(self, response):

#     #     # html_content = response.xpath('//div[@class="txtnav" and not(@id="txtright")]').extract_first()
#     #     # html_header = response.xpath('//div[@class="content"]//header/h1/text()').extract_first()

#     #     html_content = response.xpath('//article').extract_first()
#     #     html_header = response.xpath('//div[@class="reader-main"]/h1').extract_first()

#     #     print(html_content)

#     #     name = response.meta['name']
#     #     # print(html_content)
#     #     # 解析HTML
#     #     soup = BeautifulSoup(html_content, 'html.parser')

#     #     # 获取文本内容，并处理多余的空行
#     #     plain_text = soup.get_text("\n")  # 使用换行符分隔文本内容

#     #     # 清除多余的空行
#     #     lines = plain_text.splitlines()
#     #     cleaned_lines = [line.strip() for line in lines if line.strip()]
#     #     cleaned_text = "\n".join(cleaned_lines)

#     #     # 创建ScrapyMovie099Item对象
#     #     movie = ScrapyMovie099Item(plain_text=cleaned_text, name=name)

#     #     # 写入到文件
#     #     file_path = os.path.join(self.folder_name, f'{name}.txt')
#     #     with open(file_path, 'w', encoding='utf-8') as file:
#     #         file.write(cleaned_text)

#     #     # 返回movie对象
#     #     yield movie