import scrapy
from bs4 import BeautifulSoup
from scrapy_movie_099.items import ScrapyMovie099Item
import os


class MvSpider(scrapy.Spider):
    def __init__(self, *args, **kwargs):
        super(MvSpider, self).__init__(*args, **kwargs)
        self.folder_name = 'movie_texts'  # Name of the folder
        if not os.path.exists(self.folder_name):
            os.makedirs(self.folder_name)

    name = "xs"
    allowed_domains = ["www.69shuba.com"] 
    # allowed_domains = ["www.hafuktxt.com"]
    # start_urls = ["https://www.69shuba.com/book/45562/"]  # 渊天尊
    # start_urls = ["https://www.69shuba.com/book/51256/"]  # 光阴之外
    # start_urls = ["https://www.69shuba.com/book/51315/"]  # 灵境行者
    # start_urls = ["https://www.69shuba.com/book/28480/"]  # 大王饶命
    start_urls = ["https://www.69shuba.com/book/1274/"]  # 武动乾坤
    # start_urls = ["https://www.hafuktxt.com/book/74402343/"]  # 被关十万年，我疯了，也无敌了

    # allowed_domains = ["www.zhenhunxiaoshuo.com"]
    # start_urls = ["https://www.zhenhunxiaoshuo.com/tianguancifu/"]
    download_delay = 0.5  # 设置延迟为2秒

    def start_requests(self):
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36",
            # # 'referer': 'https://www.zhenhunxiaoshuo.com/',
            # 'referer': 'https://www.hafuktxt.com/chapter/74402343/61868454.html',
        }
        yield scrapy.Request(url=self.start_urls[0], headers=headers)

    def parse(self, response):
        print(response.text)
        a_list = response.xpath('//div[@id="catalog"]/ul/li[@data-num > 0]//a/text()')
        link_list = response.xpath('//div[@id="catalog"]/ul/li[@data-num > 0]//a/@href')

        # a_list = response.xpath('//div[3]//div[2]//a[position() > 0]')
        # link_list = response.xpath('//div[3]//div[2]//a[position() > 0]/@href')

        for i in range(len(a_list)):
            name = a_list[i].extract()
            url = link_list[i].extract()
            print(name, url)
            yield scrapy.Request(url=url, callback=self.parse_second, meta={'name': name})

    def parse_second(self, response):

        html_content = response.xpath('//div[@class="txtnav" and not(@id="txtright")]').extract_first()
        html_header = response.xpath('//div[@class="content"]//header/h1/text()').extract_first()

        # html_content = response.xpath('//article').extract_first()
        # html_header = response.xpath('//div[@class="reader-main"]/h1').extract_first()

        print(html_content)

        name = response.meta['name']
 
        # print(html_content)
        # 解析HTML
        soup = BeautifulSoup(html_content, 'html.parser')

        # 获取文本内容，并处理多余的空行
        plain_text = soup.get_text("\n")  # 使用换行符分隔文本内容

        # 清除多余的空行
        lines = plain_text.splitlines()
        cleaned_lines = [line.strip() for line in lines if line.strip()]
        cleaned_text = "\n".join(cleaned_lines)

        # 创建ScrapyMovie099Item对象
        movie = ScrapyMovie099Item(plain_text=cleaned_text, name=name)

        # 写入到文件
        file_path = os.path.join(self.folder_name, f'{name}.txt')
        with open(file_path, 'w', encoding='utf-8') as file:
            file.write(cleaned_text)

        # 返回movie对象
        yield movie