import scrapy
import pymssql
import re
import time
import urllib.parse
import json
from w3lib.html import remove_tags
from minio import Minio
from io import BytesIO


# ========== MinIO 初始化 ==========
MINIO_ENDPOINT = "192.168.3.118:9091"
ACCESS_KEY = "root"
SECRET_KEY = "Yhxd123456"
BUCKET_NAME = "dms"

minio_client = Minio(
    MINIO_ENDPOINT,
    access_key=ACCESS_KEY,
    secret_key=SECRET_KEY,
    secure=False
)
if not minio_client.bucket_exists(BUCKET_NAME):
    minio_client.make_bucket(BUCKET_NAME)


class CnkiSpider(scrapy.Spider):
    name = "toscrape-cnki-grid"
    base_url = "https://kns.cnki.net"
    grid_url = f"{base_url}/kns8s/brief/grid"

    custom_settings = {
        'ROBOTSTXT_OBEY': False,
        'CONCURRENT_REQUESTS': 1,
        'DOWNLOAD_DELAY': 2,
        'USER_AGENT': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
        'RANDOMIZE_DOWNLOAD_DELAY': True,
        'COOKIES_ENABLED': True,
        'REDIRECT_ENABLED': True,

        # Redis 去重 & 持久化
        'DUPEFILTER_CLASS': "scrapy_redis.dupefilter.RFPDupeFilter",
        'SCHEDULER': "scrapy_redis.scheduler.Scheduler",
        'SCHEDULER_PERSIST': True,
        'REDIS_HOST': '192.168.3.118',
        'REDIS_PORT': 6379,
        'REDIS_PARAMS': {'password': '123456'},
        'DUPEFILTER_DEBUG': True,

        # 日志与断点续爬
        'LOG_LEVEL': 'INFO',
        'JOBDIR': 'crawls/cnki-grid',
        # "LOG_FILE": "logs/cnki-grid.log"
    }

    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36',
        'Accept': '*/*',
        'Accept-Language': 'zh-CN,zh;q=0.8',
        'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
        'X-Requested-With': 'XMLHttpRequest',
        'Origin': 'https://kns.cnki.net',
        'Referer': 'https://kns.cnki.net',
    }

    formdata_template = {
        "boolSearch": "true",
        "pageNum": "1",
        "pageSize": "20",
        "dstyle": "listmode",
        "searchFrom": "资源范围：学术期刊",
        "language": "CHS",
        "uniplatform": "NZKPT",
        "CurPage": "1"
    }

    conn = pymssql.connect(
        server='192.168.3.147',
        port=1433,
        user='sa',
        password='cde3CDE#',
        database='dms',
        charset='utf8'
    )
    cursor = conn.cursor()

    def start_requests(self):
        """从数据库读取关键词并构造 POST 请求"""
        self.cursor.execute("""
            SELECT DISTINCT 
                ckm.keyword,
                ckm.department,
                cwm.id AS website_id
            FROM crawler_keyword_manager ckm
            JOIN crawler_website_manager cwm
                ON cwm.website_url = %s
            WHERE cwm.search_scope = 0 OR ckm.department = cwm.search_scope;
        """, (self.base_url,))
        rows = self.cursor.fetchall()

        for idx, (keyword, department, website_id) in enumerate(rows, start=1):
            if not keyword:
                continue
            keyword = keyword.strip()
            self.logger.info(f"📘 [{idx}] 开始抓取关键字: {keyword}")

            # 构造 CNKI 查询 JSON
            query_json = {
                "Platform": "NZKPT",
                "Resource": "JOURNAL",
                "Classid": "YSTT4HG0",
                "QNode": {
                    "QGroup": [{
                        "Key": "Subject",
                        "Items": [{
                            "Field": "SU",
                            "Value": keyword,
                            "Operator": "TOPRANK"
                        }]
                    }]
                },
                "SearchType": 2,
                "Rlang": "CHINESE"
            }

            formdata = self.formdata_template.copy()
            formdata["QueryJson"] = json.dumps(query_json, ensure_ascii=False)
            formdata["pageNum"] = "1"

            yield scrapy.FormRequest(
                url=self.grid_url,
                formdata=formdata,
                headers=self.headers,
                callback=self.parse_list,
                meta={
                    "keyword": keyword,
                    "department": department,
                    "website_id": website_id,
                    "page": 1
                },
                dont_filter=True
            )

    def parse_list(self, response):
        """解析 CNKI 列表页"""
        keyword = response.meta['keyword']
        page = response.meta['page']

        self.logger.info(f"📄 正在解析第 {page} 页，关键词: {keyword}")

        rows = response.css('tr')
        if not rows:
            self.logger.warning(f"⚠️ 没有解析到任何行（可能被拦截），URL: {response.url}")
            return

        for row in rows:
            title = row.css('.name a::text').get()
            url = row.css('.name a::attr(href)').get()
            if not url:
                continue

            full_url = response.urljoin(url)
            yield response.follow(
                full_url,
                headers=self.headers,
                callback=self.parse_detail,
                meta=response.meta,
                dont_filter=True
            )

        # 翻页逻辑
        has_next = response.xpath('//a[contains(@class,"next") or contains(text(),"下一页")]')
        if has_next:
            next_page = page + 1
            formdata = self.formdata_template.copy()
            formdata["pageNum"] = str(next_page)
            formdata["QueryJson"] = response.request.body.decode().split("QueryJson=")[-1].split("&")[0]
            yield scrapy.FormRequest(
                url=self.grid_url,
                formdata=formdata,
                headers=self.headers,
                callback=self.parse_list,
                meta={**response.meta, "page": next_page},
                dont_filter=True
            )

    def parse_detail(self, response):
        """解析文章详情"""
        keyword = response.meta.get("keyword")
        website_id = response.meta.get("website_id")
        department = response.meta.get("department")

        title = response.css('h1::text').get()
        authors = response.css('.author a::text').getall()
        summary = response.css('#ChDivSummary::text').get()
        pdf_url = response.css('a.icon-download::attr(href)').get()

        full_text = ''
        html = response.text
        if html:
            clean_html = re.sub(r'<(script|style|noscript)[^>]*>.*?</\1>', '', html, flags=re.S)
            full_text = remove_tags(clean_html).strip()

        yield {
            'keyword': keyword,
            'website': self.base_url,
            'website_id': website_id,
            'department': department,
            'title': title.strip() if title else None,
            'author': authors,
            'summary': summary,
            'url': response.url,
            'attachment_url': response.urljoin(pdf_url) if pdf_url else None,
            'full_text': full_text,
            'is_paid': 0 if full_text else 1,
            'source_url': response.url
        }

    def closed(self, reason):
        self.logger.info(f"🕷️ 爬虫关闭: {reason}")
