# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import pymysql
import logging
from statistics_report import settings
class StatisticsReportPipeline:

    # def process_item(self, item, spider):
    #     base_dir = os.getcwd()
    #     fiename = base_dir + "/" + item['fileName'] + item['date'] + '.txt'
    #     # 11  # 从内存以追加的方式打开文件，并写入对应的数据
    #     with open(fiename, 'xa',encoding='utf-8') as f:
    #         f.write(item['content'] + '\n')
    #     return item
    def __init__(self):
        self.connect = pymysql.connect(
            host=settings.MYSQL_HOST,
            db=settings.MYSQL_DBNAME,
            user=settings.MYSQL_USER,
            passwd=settings.MYSQL_PASSWD,
            port=settings.MYSQL_PORT,
            charset='utf8mb4',
            use_unicode=True)
        self.cursor = self.connect.cursor()

    def process_item(self, item1, spider):
        print("开始存库=======================================")
        # print(item1)
        sql = """INSERT INTO zhixian_gongbao(title,dt_publish,cd_link,cd_content)
                 value (%s,%s,%s,%s)"""
        try:
            self.cursor.execute(sql, (item1['file_name'],
                                      item1['date'],
                                      item1['link'],
                                      item1['content']
                                      ))
        except Exception as err:
            # 如果数据库中有相对应的信息则插入数据库失败
            print("错误信息为==> " + err + "=========" + item1['file_ame'])
            logging.info("错误信息为==> " + str(err) + "====" + item1['file_ame'] + "=====" + str(item1['link']))
            return item1

        self.connect.commit()
        print("提交数据=======================")
        return item1

    def close_spider(self, spider):
        # 关闭游标和连接
        self.cursor.close()
        self.connect.close()
