import scrapy
from bs4 import BeautifulSoup
from scrapy_movie_099.items import ScrapyMovie099Item
import os
import re
import json

class MvSpider(scrapy.Spider):
    # 构建JSON文件路径（在当前工作目录下）
    json_file_path = 'movie.json'

    name = "bqg"
    allowed_domains = ["www.xsbiquge.la"]
    start_url_template = "http://www.xsbiquge.la/book/34776"
    download_delay = 0.5  # 设置延迟为0.5秒

    # start_page = 1
    # end_page = 1

    custom_settings = {
        'DOWNLOADER_MIDDLEWARES': {
            'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': 1,
        },
        # 'HTTP_PROXY': 'http://223.112.53.2:1025',
        # 'HTTPS_PROXY': 'https://223.112.53.2:1025',
        # 'HTTPS_PROXY': 'https://36.111.143.232:15777',
        # 'HTTPS_PROXY': 'https://202.98.164.200:8123',
        # 'HTTPS_PROXY': 'https://23.152.40.14:3128',
        'HTTPS_PROXY': 'https://37.19.220.178:8443',
    }


    def start_requests(self):
  

        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36",
            # # 'referer': 'https://www.zhenhunxiaoshuo.com/',
            # 'referer': 'https://www.hafuktxt.com/chapter/74402343/61868454.html',
        }
        yield scrapy.Request(url=self.start_url_template, headers=headers)

        # 这里设置页数范围，例如从1到10页

        # for  page in range(self.start_page, self.end_page + 1):
        #     start_url = self.start_url_template.format(page)
        # 使用meta参数传递代理信息
        # yield scrapy.Request(url=start_url, callback=self.parse, meta={'proxy': self.custom_settings['HTTPS_PROXY']})

    def parse(self, response):
        try:
            # 打印当前使用的代理
            print('-------------------------------------')
        
            # 在这里处理正常的解析逻辑
            # ...
        except Exception as e:
            # 捕获并打印异常信息
            self.logger.error(f"An error occurred in parse method: {e}")

        a_list = response.xpath('/html/body/div[4]/div[2]/dl/dd[position() > 5]/a')
            # 清理文件名中的非法字符
        for  a in a_list:
            # 确保a是Selector对象
            if isinstance(a, scrapy.selector.unified.Selector):
                name = a.xpath('text()').extract_first()

                # 清除格式：去除空白和换行符
                cleaned_name = ' '.join(name.split()).strip()
                # 构建章节链接
                href = a.xpath('@href').extract_first()
                chapter_url = response.urljoin(href)


                # 读取已有JSON数据或创建一个新的空列表
                existing_data = []
                if os.path.exists(self.json_file_path) and os.path.getsize(self.json_file_path) > 0:
                    with open(self.json_file_path, 'r', encoding='utf-8') as json_file:
                        try:
                            existing_data = json.load(json_file)
                        except json.JSONDecodeError:
                            pass

                # 添加当前章节信息到JSON数据中
                existing_data.append({'name': cleaned_name, 'url': chapter_url})

                # 写入更新后的JSON数据
                with open(self.json_file_path, 'w', encoding='utf-8') as json_file:
                    json.dump(existing_data, json_file, ensure_ascii=False, indent=4)


    def parse_second(self, response):
        print('parse_secondparse_secondparse_secondparse_secondparse_secondparse_secondparse_secondparse_secondparse_secondparse_second')
        # 从meta中取回传递的item
        # item = response.meta['item']
        # 读取 JSON 文件
        # with open('movie.json', 'r', encoding='utf-8') as file:
        #     data = json.load(file)

        # 打印数据
        # for chapter in data:
        #     print(chapter['name'])
        #     print(chapter['url'])
        #     print(chapter['plain_text'])
        #     print('---')

        pass
        # 将结果存储到item中

        # 最终返回item
        # yield item


    # def parse(self, response):
    #     print(response.text)
    #     # a_list = response.xpath('//div[@id="catalog"]/ul/li[@data-num > 82]//a/text()')
    #     # link_list = response.xpath('//div[@id="catalog"]/ul/li[@data-num > 82]//a/@href')

    #     a_list = response.xpath('//div[3]//div[2]//a[position() > 0]')
    #     link_list = response.xpath('//div[3]//div[2]//a[position() > 0]/@href')

    #     for i in range(len(a_list)):
    #         name = a_list[i].extract()
    #         url = link_list[i].extract()
    #         print(name, url)
    #         yield scrapy.Request(url=url, callback=self.parse_second, meta={'name': name})

    # def parse_second(self, response):

    #     # html_content = response.xpath('//div[@class="txtnav" and not(@id="txtright")]').extract_first()
    #     # html_header = response.xpath('//div[@class="content"]//header/h1/text()').extract_first()

    #     html_content = response.xpath('//article').extract_first()
    #     html_header = response.xpath('//div[@class="reader-main"]/h1').extract_first()

    #     print(html_content)

    #     name = response.meta['name']
    #     # print(html_content)
    #     # 解析HTML
    #     soup = BeautifulSoup(html_content, 'html.parser')

    #     # 获取文本内容，并处理多余的空行
    #     plain_text = soup.get_text("\n")  # 使用换行符分隔文本内容

    #     # 清除多余的空行
    #     lines = plain_text.splitlines()
    #     cleaned_lines = [line.strip() for line in lines if line.strip()]
    #     cleaned_text = "\n".join(cleaned_lines)

    #     # 创建ScrapyMovie099Item对象
    #     movie = ScrapyMovie099Item(plain_text=cleaned_text, name=name)

    #     # 写入到文件
    #     file_path = os.path.join(self.folder_name, f'{name}.txt')
    #     with open(file_path, 'w', encoding='utf-8') as file:
    #         file.write(cleaned_text)

    #     # 返回movie对象
    #     yield movie