import scrapy
import pymssql
import re
import time
import urllib.parse
from w3lib.html import remove_tags
from minio import Minio
from io import BytesIO


def convert_supsub(text: str) -> str:
    """转换 HTML 上下标为 Unicode 字符"""
    sub_map = str.maketrans('0123456789+-=()', '₀₁₂₃₄₅₆₇₈₉₊₋₌₍₎')
    sup_map = str.maketrans('0123456789+-=()', '⁰¹²³⁴⁵⁶⁷⁸⁹⁺⁻⁼⁽⁾')
    text = re.sub(r'<sub>(.*?)</sub>', lambda m: remove_tags(m.group(1)).translate(sub_map), text, flags=re.S)
    text = re.sub(r'<sup>(.*?)</sup>', lambda m: remove_tags(m.group(1)).translate(sup_map), text, flags=re.S)
    return text


# === MinIO 初始化 ===
MINIO_ENDPOINT = "192.168.3.118:9091"
ACCESS_KEY = "root"
SECRET_KEY = "Yhxd123456"
BUCKET_NAME = "dms"

minio_client = Minio(
    MINIO_ENDPOINT,
    access_key=ACCESS_KEY,
    secret_key=SECRET_KEY,
    secure=False
)
if not minio_client.bucket_exists(BUCKET_NAME):
    minio_client.make_bucket(BUCKET_NAME)


class NatureSpider(scrapy.Spider):

    name = "toscrape-nature-local"

    base_url = "https://www.nature.com"

    custom_settings = {
        'ROBOTSTXT_OBEY': False,
        'CONCURRENT_REQUESTS': 1,
        'DOWNLOAD_DELAY': 2,
        'USER_AGENT': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
        'RANDOMIZE_DOWNLOAD_DELAY': True,
        'COOKIES_ENABLED': True,  # ✅ 启用 Cookie
        'REDIRECT_ENABLED': True, # ✅ 允许重定向

        # ✅ Redis 去重 & 调度持久化
        'DUPEFILTER_CLASS': "scrapy_redis.dupefilter.RFPDupeFilter",
        'SCHEDULER': "scrapy_redis.scheduler.Scheduler",
        'SCHEDULER_PERSIST': True,
        'REDIS_HOST': '192.168.3.118',
        'REDIS_PORT': 6379,
        'REDIS_PARAMS': {'password': '123456'},
        'DUPEFILTER_DEBUG': True,
        'LOG_LEVEL': 'INFO',
        # ✅ 本地断点续爬
        'JOBDIR': 'crawls/nature-local',
        "LOG_FILE": "logs/nature-local.log"
    }

    conn = pymssql.connect(
            server='192.168.3.147',
            port=1433,
            user='sa',
            password='cde3CDE#',
            database='dms',
            charset='utf8'
        )
    
    cursor = conn.cursor()

    def start_requests(self):
        """从 MSSQL 读取关键词并生成搜索 URL"""

        # ✅ 使用参数化查询，避免 { } 格式错误和 SQL 注入风险
        self.cursor.execute("""
            SELECT DISTINCT 
                ckm.keyword,
                ckm.department,
                cwm.id AS website_id
            FROM crawler_keyword_manager ckm
            JOIN crawler_website_manager cwm
                ON cwm.website_url = %s
            WHERE cwm.search_scope = 0 OR ckm.department = cwm.search_scope;
        """, (self.base_url,))  # 👈 这里传入参数
        rows = self.cursor.fetchall()

        # ✅ 搜索页 URL 模板
        search_url_template = self.base_url + "/search?q={query}&order=relevance"

        # 先访问首页（可以保留，也可去掉）
        yield scrapy.Request(self.base_url, callback=self.parse)

        # ✅ 遍历关键词
        for idx, (keyword, department, website_id) in enumerate(rows, start=1):
            if not keyword:
                continue

            keyword = keyword.strip()

            # URL 编码
            encoded = urllib.parse.quote_plus(keyword)
            full_url = search_url_template.format(query=encoded)

            self.logger.info(f"📘 [{idx}] 生成搜索URL: {full_url}, 部门: {department}")

            # ✅ 传递 meta 信息
            yield scrapy.Request(
                full_url,
                callback=self.parse,
                meta={
                    'keyword': keyword,
                    'department': department,
                    'website_id': website_id,
                    'idx': idx,
                },
                dont_filter=True
            )

    
    def parse(self, response):
        idx = response.meta.get('idx')  

        start_time = time.time()
        self.logger.info(f"=== 解析列表页: {response.url} ,序列：{idx}===")
        

        article_count = 0
        for article in response.css('article'):
            article_count += 1
            title = article.css('h3 > a::text').get()
            authors = article.css('div.authors > span::text').getall()
            summary = article.css('p.abstract::text').get()
            url = article.css('h3 > a::attr(href)').get()
            
            # 修复：正确设置 source_url
            meta = response.meta.copy()

            if url:
                detail_url = response.urljoin(url)
                yield response.follow(detail_url, callback=self.parse_detail, meta=meta, dont_filter=True)

        next_page_url = response.css('li[data-page="next"] a.c-pagination__link::attr(href)').get()
        if next_page_url:
            yield response.follow(next_page_url, callback=self.parse, meta=response.meta, dont_filter=True)
            
        else:
            self.logger.info("没有找到下一页")

        self.logger.info(f"列表页共 {article_count} 篇文章，用时 {time.time()-start_time:.2f}s")

    def parse_detail(self, response):
        
        self.logger.info(f"🎯 解析详情页: {response.url}")
 
        # 判断是否存在
        self.cursor.execute("""
            SELECT 1 
            FROM crawler_data_source AS cds 
            WHERE cds.source_url = %s
        """, (response.url,))
        exists = self.cursor.fetchone() is not None

        if exists:
            return

        author = response.css('.c-article-header .c-article-author-list::text').get()
        title = response.css('.c-article-title::text').get()
        authors = response.css('.c-article-author-list a::text').getall()
        publication_date = response.css('.c-article-identifiers time::text').get()
        
        # --- scrapy 处理 ---
        full_text = ''
        html = response.css('.main-content').get()
        
        # 修复：只有当 html 不为 None 时才进行处理
        if html is not None:
            try:
                # 1. 去掉 script, style, noscript
                clean_html = re.sub(r'<(script|style|noscript)[^>]*>.*?</\1>', '', html, flags=re.S)
                # 2. 转换上下标（兼容内部嵌套标签）
                clean_html = convert_supsub(clean_html)
                # 3. 给块级标签加换行
                block_tags = ['p', 'div', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'li', 'section', 'article']
                for tag in block_tags:
                    # 开始标签前加换行（避免多个块标签连在一起）
                    clean_html = re.sub(fr'<{tag}[^>]*>', '\n', clean_html, flags=re.I)
                    # 结束标签后加换行
                    clean_html = re.sub(fr'</{tag}>', '\n', clean_html, flags=re.I)
                # 4. 去掉剩余 HTML 标签
                full_text = remove_tags(clean_html)

                # 5. 统一换行和多余空格
                full_text = re.sub(r'\n+', '\n', full_text)  # 多个换行合并
                full_text = full_text.strip()
            except Exception as e:
                self.logger.error(f"处理 HTML 内容时出错: {e}")
        else:
            self.logger.warning("未找到 .main-content 元素")

        pdf_url = response.css('a[data-article-pdf="true"]::attr(href)').get()

        keyword = response.meta.get('keyword')  
        website_id = response.meta.get('website_id') 
        department = response.meta.get('department') 
        source_url = response.url 

        summary = response.css('#Abs1-content *::text').getall()
        summary = ' '.join(t.strip() for t in summary if t.strip())

        # 修复：正确的条件判断语法
        is_paid = 0 if full_text and full_text.strip() else 1

        self.logger.info(f"关键字：{keyword}")

        yield {
            'keyword': keyword,
            'website': self.base_url,
            'website_id': website_id,
            'department': department,
            'title': title.strip() if title else None,
            'author': [a.strip() for a in authors] if authors else [],
            'publication_date': publication_date.strip() if publication_date else None,
            'url': response.url,
            'attachment_url': response.urljoin(pdf_url) if pdf_url else None,
            'full_text': full_text if full_text else None,
            'publish_date': publication_date,
            'is_paid': is_paid,
            'summary': summary,
            'corresponding_author': author,  # 修复：避免字段名重复
            'source_url': source_url
        }

        # if pdf_url:
        #     full_pdf_url = response.urljoin(pdf_url)
        #     domain = response.url.split("/")[2]
        #     filename = full_pdf_url.split("/")[-1]
        #     object_name = f"{domain}/{filename}"
        #     # 使用 Scrapy 异步请求下载 PDF
        #     yield scrapy.Request(
        #         full_pdf_url,
        #         callback=self.save_pdf,
        #         meta={'object_name': object_name},
        #         dont_filter=True
        #     )

    def save_pdf(self, response):
        object_name = response.meta['object_name']
        data = BytesIO(response.body)
        minio_client.put_object(bucket_name="dms", object_name=object_name, data=data, length=len(response.body))
        self.logger.info(f"✅ PDF 上传成功: {object_name}")

    def closed(self, reason):
        self.logger.info(f"🕷️ 爬虫关闭: {reason}")