# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import pymysql  # 导入数据库连接的包
from caipiao.settings import MYSQL  # 从settings.py导入mysql的配置字典(需要自己配置好)


class CaipiaoPipeline:

    def open_spider(self, spider):  # 在爬虫爬取数据前就打开文件
        self.f = open("双色球数据.csv", mode="a", encoding="utf-8")

    def close_spider(self, spider):  # 在爬虫结束后就及时关闭文件
        if self.f:  # 文件对象不为空
            self.f.close()

    def process_item(self, item, spider):
        blue_ball = item['blue_ball'][0]
        self.f.write(f"{item['date_num']},{'_'.join(item['red_ball'])},{blue_ball}\n")
        return item


class CaipiaoMySQLPipeline:  # 自定义写入mysql文件的管道

    def open_spider(self, spider):  # 在爬虫爬取数据前就打开文件
        self.connection = pymysql.connect(
            host=MYSQL['host'],  # 拿键为host
            port=MYSQL['port'],
            user=MYSQL['user'],
            password=MYSQL['password'],
            database=MYSQL['database']  # 要连接的数据库名字
        )

    def close_spider(self, spider):  # 在爬虫结束后就及时关闭文件
        if self.connection:
            self.connection.close()  # 如果有连接就关闭数据库的连接

    def process_item(self, item, spider):
        # 存储数据到数据库
        blue_ball = item['blue_ball']
        try:
            cursor = self.connection.cursor()  # 创建数据库连接的游标，负责执行SQL语句
            sql = "insert into caipiao(date_num, red_ball, blue_ball) values (%s, %s, %s)"
            cursor.execute(sql, (item['date_num'], "_".join(item['red_ball']), blue_ball))
            self.connection.commit()
        except:
            self.connection.rollback()  # 如果报错了就回滚
        finally:
            if cursor:
                cursor.close()
        return item
