import json
import scrapy
import re
from urllib.parse import urljoin
from minio import Minio
from io import BytesIO
from w3lib.html import remove_tags


# === MinIO 初始化 ===
MINIO_ENDPOINT = "192.168.3.118:9091"
ACCESS_KEY = "root"
SECRET_KEY = "Yhxd123456"
BUCKET_NAME = "dms"

minio_client = Minio(
    MINIO_ENDPOINT,
    access_key=ACCESS_KEY,
    secret_key=SECRET_KEY,
    secure=False
)
if not minio_client.bucket_exists(BUCKET_NAME):
    minio_client.make_bucket(BUCKET_NAME)


class EastmoneyReportSpider(scrapy.Spider):
    name = "toscrape-eastmoney-reports"
    allowed_domains = ["data.eastmoney.com", "pdf.dfcfw.com"]

    start_urls = [
        "https://data.eastmoney.com/report/orgpublish.jshtml?orgcode=80000031",  # 东吴证券
        # "https://data.eastmoney.com/report/orgpublish.jshtml?orgcode=80000007",  # 国信证券
        # "https://data.eastmoney.com/report/orgpublish.jshtml?orgcode=80042949",  # 华源证券
    ]

    # 可以替换成你要抓取的机构
    start_orgs = ["80000031"]  
    begin_time = "2023-10-16"
    end_time = "2025-10-16"

    # ✅ 标题关键词过滤列表
    target_keywords = [
        "电网设备", "电力行业", "电子化学品", "电源设备", "电池", "电机",
        "风电设备", "非金属材料", "光伏设备", "化学制品", "化学原料",
        "环保行业", "能源金属", "汽车零部件", "汽车整车", "塑料制品",
        "消费电子", "小金属", "橡胶制品", "有色金属", "专用设备"
    ]

    custom_settings = {
        "ROBOTSTXT_OBEY": False,
        "CONCURRENT_REQUESTS": 1,
        "DOWNLOAD_DELAY": 3,
        "RANDOMIZE_DOWNLOAD_DELAY": True,
        "USER_AGENT": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
        # "LOG_LEVEL": "DEBUG",
        # ✅ Redis 去重 + 持久化
        "DUPEFILTER_CLASS": "scrapy_redis.dupefilter.RFPDupeFilter",
        "SCHEDULER": "scrapy_redis.scheduler.Scheduler",
        "SCHEDULER_PERSIST": True,
        "REDIS_HOST": "192.168.3.118",
        "REDIS_PORT": 6379,
        "REDIS_PARAMS": {"password": "123456"},
        "JOBDIR": "crawls/eastmoney_reports",
        "LOG_FILE": "logs/crawler_item.log",
    }

    def start_requests(self):
        for org in self.start_orgs:
            url = (
                f"https://reportapi.eastmoney.com/report/dg?"
                f"cb=callback&pageNo=1&pageSize=50&author=*&orgCode={org}"
                f"&beginTime={self.begin_time}&endTime={self.end_time}"
            )
            yield scrapy.Request(url, callback=self.parse, meta={"org": org, "pageNo": 1},dont_filter=True)

    def parse(self, response):
        org = response.meta["org"]
        pageNo = response.meta["pageNo"]

        self.logger.info(f"=== 解析列表页: {response.url} ===")

        raw = response.text.strip()
        # 去掉 JSONP 包装
        clean = re.sub(r'^callback\(|\)$', '', raw)

        try:
            data = json.loads(clean)
        except Exception as e:
            self.logger.error(f"JSON 解析失败: {e}")
            return

        # 3️⃣ 提取报告列表
        reports = data.get("data", {})
        if not reports:
            return

        for r in reports:
            infoCode = r.get('infoCode')
            detail_url = f"https://data.eastmoney.com/report/zw_industry.jshtml?infocode={infoCode}"
            yield scrapy.Request(detail_url, callback=self.parse_detail,dont_filter=True)
            return


        # 4️⃣ 翻页
        total_pages = data.get("data", {}).get("pages", 1)
        if pageNo < total_pages:
            next_page = pageNo + 1
            next_url = (
                f"https://reportapi.eastmoney.com/report/dg?"
                f"cb=callback&pageNo={next_page}&pageSize=50&author=*&orgCode={org}"
                f"&beginTime={self.begin_time}&endTime={self.end_time}"
            )
            yield scrapy.Request(next_url, callback=self.parse, meta={"org": org, "pageNo": next_page})

    def parse_detail(self, response):
        """解析详情页，提取正文与 PDF 链接"""
        self.logger.info(f"📄 正在解析详情页: {response.url}")

        # === 标题 ===
        title = response.css("div.c-title h1::text").get(default="").strip()

        # === 信息块 ===
        info = response.css("div.c-infos")

        website = info.css("a::attr(href)").get(default="")
        if website.startswith("//"):
            website = "https:" + website

        publish_date = info.css("#publish-date::text").get(default="").strip()
        org_name = info.css("span:nth-of-type(3)::text").get(default="").strip()

        authors_text = info.css("span:nth-of-type(4)::text").get(default="")
        authors = [a.strip() for a in authors_text.split(",") if a.strip()] if authors_text else []

        pdf_url = info.css("a.pdf-link::attr(href)").get()
        if pdf_url:
            if pdf_url.startswith("//"):
                pdf_url = "https:" + pdf_url
            elif not pdf_url.startswith("http"):
                pdf_url = urljoin(response.url, pdf_url)

        # === 正文提取 ===
        full_text = response.css("#ctx-content").xpath("string(.)").get()
        full_text = full_text.strip() if full_text else ""

        # === 整理 item ===
        item = {
            "title": title,
            "website": website,
            "publication_date": publish_date,
            "organization": org_name,
            "authors": authors,
            "pdf_url": pdf_url,
            "full_text": full_text,
            "url": response.url,
        }

        yield item

        # === 下载 PDF 并上传 ===
        if pdf_url:
            yield scrapy.Request(
                pdf_url,
                callback=self.save_pdf,
                meta={"organization": org_name, "source_url": response.url},
                dont_filter=True
            )

    def save_pdf(self, response):
        """上传 PDF 到 MinIO"""
        source_url = response.meta.get("source_url")
        org_name = response.meta.get("organization", "unknown") or "unknown"
        pdf_name = response.url.split("/")[-1].split("?")[0]
        object_name = f"eastmoney/{org_name}/{pdf_name}"

        try:
            data = BytesIO(response.body)
            minio_client.put_object(
                bucket_name=BUCKET_NAME,
                object_name=object_name,
                data=data,
                length=len(response.body),
                content_type="application/pdf"
            )
            self.logger.info(f"✅ PDF 上传成功: {object_name}")
        except Exception as e:
            self.logger.error(f"❌ PDF 上传失败 ({object_name}): {e}")
