# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html

import pandas as pd
import csv
import pymysql

class FilePipeline(object):
    #初始化方法执行连接数据库操作
    def __init__(self): 
        self.connection = pymysql.connect(
            host='localhost',  # 数据库地址
            user='root',  # 数据库用户名
            password='12345678',  # 数据库密码
            db='spider',  # 数据库名称
            cursorclass=pymysql.cursors.DictCursor  # 使用字典类型返回结果
        )
        self.cursor = self.connection.cursor()



    def process_item(self, item, spider):

        self.cursor.execute(
            "SELECT url FROM gzstjj_tjfbyjd_content WHERE url = %s",
            (item['url'])
        )
        result = self.cursor.fetchone()

        if not result:
            # 如果没有找到记录，则插入新数据
            try:
                self.cursor.execute(
                    "INSERT INTO gzstjj_tjfbyjd_content (title, publish_date, source, content, type, url) VALUES (%s, %s, %s, %s, %s, %s)",
                    (item['title'], item['publish_date'], item['source'], item['content'], item['type'], item['url'])
                )
                self.connection.commit()
            except pymysql.MySQLError as error:
                print(f"Error: {error}")
                self.connection.rollback()
        else:
            print(f"Record with URL {item['url']} already exists.")
        return item
    
    def close_spider(self, spider):
        # 关闭数据库连接
        self.connection.close()

        
    




