import mysql.connector
import hashlib
import json
from scrapy.exceptions import DropItem


class DatabasePipeline:
    """
    负责执行哈希对比、增量更新，并将数据写入MySQL数据库的管道。
    """

    def __init__(self, db_settings):
        self.db_settings = db_settings
        self.conn = None
        self.cursor = None

    @classmethod
    def from_crawler(cls, crawler):
        # 从Scrapy的settings.py中读取数据库配置
        db_settings = crawler.settings.getdict('DB_SETTINGS')
        return cls(db_settings)

    def open_spider(self, spider):
        # 爬虫启动时，连接数据库
        try:
            self.conn = mysql.connector.connect(**self.db_settings)
            self.cursor = self.conn.cursor(dictionary=True)
            spider.log("数据库连接成功。")
        except mysql.connector.Error as err:
            spider.log(f"数据库连接失败: {err}")

    def close_spider(self, spider):
        # 爬虫关闭时，关闭数据库连接
        if self.cursor:
            self.cursor.close()
        if self.conn:
            self.conn.close()
            spider.log("数据库连接已关闭。")

    def process_item(self, item, spider):
        # 1. 计算新哈希值
        raw_content = item.get('raw_content', '').encode('utf-8')
        new_hash = hashlib.sha256(raw_content).hexdigest()
        item['content_hash'] = new_hash

        # 2. 查询旧哈希值
        self.cursor.execute(
            "SELECT content_hash FROM crawler_universities WHERE detail_url = %s",
            (item['detail_url'],)
        )
        result = self.cursor.fetchone()
        old_hash = result['content_hash'] if result else None

        # 3. 对比哈希值，决定是否处理
        if old_hash == new_hash:
            spider.log(f"数据未变更，跳过: {item['name']}")
            raise DropItem(f"数据未变更: {item['name']}")

        # 4. 如果是新数据或数据已变更，则执行写入
        spider.log(f"发现新数据或数据已更新，准备写入数据库: {item['name']}")
        self._write_to_db(item, spider)

        return item

    def _write_to_db(self, item, spider):
        try:
            # 写入/更新 universities 表
            upsert_sql = """
                         INSERT INTO crawler_universities (name, location, administration, category, tags, detail_url,
                                                   content_hash)
                         VALUES (%s, %s, %s, %s, %s, %s, %s) ON DUPLICATE KEY
                         UPDATE
                             name =
                         VALUES (name), location =
                         VALUES (location), administration =
                         VALUES (administration), category =
                         VALUES (category), tags =
                         VALUES (tags), content_hash =
                         VALUES (content_hash); \
                         """
            self.cursor.execute(upsert_sql, (
                item.get('name'),
                item.get('location'),
                item.get('administration'),
                item.get('category'),
                json.dumps(item.get('tags'), ensure_ascii=False),  # 将列表转为JSON字符串
                item.get('detail_url'),
                item.get('content_hash')
            ))

            # 归档原始数据到 raw_pages 表
            insert_raw_sql = """
                             INSERT INTO crawler_raw_pages (source_url, raw_content, content_hash)
                             VALUES (%s, %s, %s); \
                             """
            self.cursor.execute(insert_raw_sql, (
                item.get('detail_url'),  # 使用详情页URL作为source_url
                item.get('raw_content'),
                item.get('content_hash')
            ))

            self.conn.commit()
            spider.log(f"成功写入数据库: {item['name']}")

        except mysql.connector.Error as err:
            self.conn.rollback()
            spider.log(f"写入数据库时发生错误: {err}")
