import pymysql
from datetime import datetime
import hashlib
from urllib.parse import urlparse

class MySQLCrawlerDataPipeline:
    def open_spider(self, spider):
        # 连接你的 MySQL
        self.conn = pymysql.connect(
            host='192.168.3.118',
            port=3308,
            user='root',
            password='123456',
            database='dms',
            charset='utf8mb4'
        )
        self.cursor = self.conn.cursor()

    def process_item(self, item, spider):
        """
        item 示例结构：
        {
            'title': ...,
            'authors': [...],
            'summary': ...,
            'url': ...,
            'pdf_url': ...,
            'full_text': ...,
            'publication_date': ...
        }
        """
        url = item.get('url', '')
        # ✅ 从 url 解析域名作为 website
        website = urlparse(url).netloc if url else ''

        source = 0
        source_key = url
        summary = item.get('summary') or item.get('title')
        provider = ','.join(item.get('authors', []))
        biz = '科研'
        import_time = item.get('publication_date') or datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        kind = 0 if not item.get('pdf_url') else 1
        content = item.get('full_text')
        file_type = 'pdf' if item.get('pdf_url') else None
        file_path = item.get('pdf_url')

        # ✅ 修复 md5_hash，确保都是字符串
        title_str = str(item.get('title') or '')
        pdf_url_str = str(item.get('pdf_url') or '')
        md5_hash = hashlib.md5((title_str + pdf_url_str).encode('utf-8')).hexdigest()

        creator = 'spider'
        updater = 'spider'
        now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        deleted = 0
        tenant_id = 0

        sql = """
        INSERT INTO crawler_data_test (
            website, source, source_key, summary, provider, biz, import_time, kind,
            content, file_type, file_path, md5_hash, creator, create_time, updater, update_time, deleted, tenant_id
        ) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
        """
        self.cursor.execute(sql, (
            website, source, source_key, summary, provider, biz, import_time, kind,
            content, file_type, file_path, md5_hash, creator, now, updater, now, deleted, tenant_id
        ))
        self.conn.commit()
        return item

    def close_spider(self, spider):
        self.cursor.close()
        self.conn.close()
