import scrapy
import time
import requests
from scrapy_redis.spiders import RedisSpider
from minio import Minio
from w3lib.html import remove_tags
import re


def convert_supsub(text: str) -> str:
    # 下标与上标映射
    sub_map = str.maketrans('0123456789+-=()', '₀₁₂₃₄₅₆₇₈₉₊₋₌₍₎')
    sup_map = str.maketrans('0123456789+-=()', '⁰¹²³⁴⁵⁶⁷⁸⁹⁺⁻⁼⁽⁾')

    # 处理 <sub>...</sub>，先去掉内部标签，只保留文字
    text = re.sub(r'<sub>(.*?)</sub>', lambda m: remove_tags(m.group(1)).translate(sub_map), text, flags=re.S)

    # 处理 <sup>...</sup>，先去掉内部标签，只保留文字
    text = re.sub(r'<sup>(.*?)</sup>', lambda m: remove_tags(m.group(1)).translate(sup_map), text, flags=re.S)

    return text

# MinIO 配置
MINIO_ENDPOINT = "192.168.3.118:9091"
ACCESS_KEY = "root"
SECRET_KEY = "Yhxd123456"
BUCKET_NAME = "dms"

# 初始化 MinIO 客户端
minio_client = Minio(
    MINIO_ENDPOINT,
    access_key=ACCESS_KEY,
    secret_key=SECRET_KEY,
    secure=False
)

# 确保桶存在
if not minio_client.bucket_exists(BUCKET_NAME):
    minio_client.make_bucket(BUCKET_NAME)


class NatureRedisSpider(RedisSpider):
    name = "toscrape-nature"
    
    # Redis 中的起始 URL 队列键名
    redis_key = "nature:start_urls"

    custom_settings = {
        'ROBOTSTXT_OBEY': False,
        'CONCURRENT_REQUESTS': 1,
        'DOWNLOAD_DELAY': 5,
        'USER_AGENT': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
        'RANDOMIZE_DOWNLOAD_DELAY': True,
        'LOG_ENABLED': True,
        'LOG_STDOUT': False,

        # scrapy-redis 相关配置
        'DUPEFILTER_CLASS': "scrapy_redis.dupefilter.RFPDupeFilter",
        'SCHEDULER': "scrapy_redis.scheduler.Scheduler",
        'SCHEDULER_PERSIST': True,
        'SCHEDULER_QUEUE_CLASS': "scrapy_redis.queue.SpiderQueue",

        # Redis 连接信息（带密码）
        'REDIS_HOST': '192.168.3.118',
        'REDIS_PORT': 6379,
        'REDIS_PARAMS': {
            'password': '123456',  # <—— 在这里添加
        },

        'JOBDIR': 'crawls/nature-redis',
        'DUPEFILTER_DEBUG': True,
    }


    def parse(self, response):
        start_time = time.time()
        self.logger.info(f"=== 解析列表页: {response.url} ===")

        article_count = 0
        for article in response.css('article'):
            article_count += 1
            title = article.css('h3 > a::text').get()
            authors = article.css('div.authors > span::text').getall()
            summary = article.css('p.abstract::text').get()
            url = article.css('h3 > a::attr(href)').get()

            # yield {
            #     'title': title,
            #     'authors': authors,
            #     'summary': summary,
            #     'url': url,
            #     'source_page': response.url,
            # }

            if url:
                detail_url = response.urljoin(url)
                yield response.follow(detail_url, callback=self.parse_detail)

        next_page_url = response.css('li[data-page="next"] a.c-pagination__link::attr(href)').get()
        if next_page_url:
            yield response.follow(next_page_url, callback=self.parse)
        else:
            self.logger.info("没有找到下一页")

        self.logger.info(f"列表页共 {article_count} 篇文章，用时 {time.time()-start_time:.2f}s")

    def parse_detail(self, response):
        self.logger.info(f"🎯 解析详情页: {response.url}")

        title = response.css('.c-article-title::text').get()
        authors = response.css('.c-article-author-list a::text').getall()
        publication_date = response.css('time::text').get()
        # --- scrapy 处理 ---
        html = response.css('.c-article-body').get()
        # 1. 去掉 script, style, noscript
        clean_html = re.sub(r'<(script|style|noscript)[^>]*>.*?</\1>', '', html, flags=re.S)
        # 2. 转换上下标（兼容内部嵌套标签）
        clean_html = convert_supsub(clean_html)
        # 3. 给块级标签加换行
        block_tags = ['p', 'div', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'li', 'section', 'article']
        for tag in block_tags:
            # 开始标签前加换行（避免多个块标签连在一起）
            clean_html = re.sub(fr'<{tag}[^>]*>', '\n', clean_html, flags=re.I)
            # 结束标签后加换行
            clean_html = re.sub(fr'</{tag}>', '\n', clean_html, flags=re.I)
        # 4. 去掉剩余 HTML 标签
        full_text = remove_tags(clean_html)

        # 5. 统一换行和多余空格
        full_text = re.sub(r'\n+', '\n', full_text)  # 多个换行合并
        full_text = full_text.strip()

        pdf_url = response.css('a[data-article-pdf="true"]::attr(href)').get()

        yield {
            'title': title.strip() if title else None,
            'authors': [a.strip() for a in authors],
            'publication_date': publication_date.strip() if publication_date else None,
            'url': response.url,
            'pdf_url': response.urljoin(pdf_url) if pdf_url else None,
            'full_text': full_text.strip() if full_text else None,
        }

        if pdf_url:
            full_pdf_url = response.urljoin(pdf_url)
            try:
                with requests.get(full_pdf_url, stream=True) as r:
                    r.raise_for_status()
                    size = int(r.headers.get('Content-Length', 0))
                    
                    # 使用域名作为“文件夹”
                    domain = response.url.split("/")[2]  # 获取域名
                    filename = full_pdf_url.split("/")[-1]
                    object_name = f"{domain}/{filename}"  # dms/<domain>/<filename>

                    minio_client.put_object(
                        bucket_name="dms",
                        object_name=object_name,
                        data=r.raw,
                        length=size if size > 0 else -1
                    )
                self.logger.info(f"✅ PDF 上传成功: {object_name}")
            except Exception as e:
                self.logger.error(f"❌ PDF 上传失败: {e}")


    def closed(self, reason):
        self.logger.info(f"🕷️ 爬虫关闭: {reason}")
