# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
import hashlib
import re

from scrapy.exceptions import CloseSpider


# class TutorialPipeline:
#     def process_item(self, item, spider):
#         return item


class HtmlSavePipeline:
    def __init__(self, save_dir='html_files'):
        self.save_dir = save_dir
        os.makedirs(self.save_dir, exist_ok=True)

    @classmethod
    def from_crawler(cls, crawler):
        return cls(save_dir=crawler.settings.get('HTML_SAVE_DIR', 'html_files'))

    def process_item(self, item, spider):
        # 从 item 中获取 URL 和 HTML 内容
        url = item['url']
        html = item['html']
        # print("haha3: ", item)
        # raise CloseSpider('test: 满足停止条件')

        # 解析 URL 生成目录结构
        parsed = urlparse(url)
        domain = parsed.netloc
        path = parsed.path.lstrip('/')
        print('path: ', url, parsed, parsed.netloc, parsed.path)
        if not path:
            path = 'index.html'

        # 创建域名子目录
        domain_dir = os.path.join(self.save_dir, domain)
        os.makedirs(domain_dir, exist_ok=True)

        # 保存文件（按 URL 路径）
        filepath = os.path.join(domain_dir, path)
        os.makedirs(os.path.dirname(filepath), exist_ok=True)

        with open(filepath, 'w', encoding='utf-8') as f:
            f.write(html)
        return item


# pipelines.py

import os
from urllib.parse import urlparse
from scrapy.pipelines.files import FilesPipeline


class CustomFilesPipeline(FilesPipeline):
    def file_path(self, request, response=None, info=None, *, item=None):
        # 获取原始 URL
        url = request.url

        # 解析域名和路径

        parsed = urlparse(url)
        domain = parsed.netloc  # 例如: example.com
        path = parsed.path.lstrip('/')  # 例如: css/style.css
        full_path = os.path.join(domain, path)
        # print("haha0: ", url, domain, path, full_path)

        # 按文件类型分目录（css/js）
        # file_type = 'js' if url.endswith('.js') else 'css'

        # 构建保存路径（格式: domain/css|js/path/filename）
        # filename = os.path.basename(path) or f"{file_type}_{hashlib.sha1(url.encode()).hexdigest()[:8]}.{file_type}"
        # dir_path = os.path.join(domain, file_type, os.path.dirname(path))
        #
        # # 确保路径安全（替换非法字符）
        # safe_path = re.sub(r'[\\/:*?"<>|]', '_', dir_path)
        # full_path = os.path.join(safe_path, filename)

        # full_path = domain

        # print("haha1: ", full_path)

        return full_path
