# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html

import pymysql

class SinanewsspiderPipeline(object):

    # 打开数据库连接
    # db = pymysql.connect(host='localhost', port=3306, user='root', passwd='root', db='sinanews', charset='utf8')
    db = pymysql.connect("localhost", "root", "root", "sinanews")
    # 使用cursour()方法创建一个游标对象
    cur = db.cursor()


    def process_item(self, item, spider):
        # 使用 execute()  方法执行 SQL 查询

        # self.cur.execute("SELECT VERSION()")

        # 使用 fetchone() 方法获取单条数据.
        # data = self.cur.fetchone()

        # print("Database version : %s " % data)

        # sql = "INSERT INTO SinaLocalNews(title, content, imageUrl, Url, pubtime) " \
              # "VALUES ('%s', '%s', '%s', '%s', trim(replace(replace(replace(left('%s',18),'年','-'),'月','-'),'日',' ')))" \
              # % (item['title'], item['content'], item['imageUrl'], item['Url'],item['pubtime'])

        sql = "INSERT INTO SinaLocalNews(title, content, imageUrl, Url, pubtime) VALUES ('%s', '%s', '%s', '%s', trim(replace(replace(replace(left('%s',16),'年','-'),'月','-'),'日',' ')))" % (
        item['title'], item['content'], item['imageUrl'], item['Url'], item['pubtime'])

        self.cur.execute(sql)
        # self.con.commit()
        # 使用 fetchone() 方法获取单条数据.
        # data = self.cur.fetchone()
        # print("Database version : %s " % data)
        self.db.commit()
        return item