# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import pymysql


# 写入 csv文件
class GamePipeline_CSV:

    # 启动 爬虫时，执行一次
    def open_spider(self, spider):
        print("爬虫启动了...")

        # 打开文件
        self.f = open('./data/game.csv', mode="a", encoding="utf-8")

    # 每次spider返回一条数据. 这里就要运行一次.
    def process_item(self, item, spider):
        """处理spider爬取的数据"""

        # csv中的格式：期号, 红球1_红球2_红球3.., 篮球
        self.f.write(item['name'])
        self.f.write(",")
        self.f.write(item['category'])
        self.f.write(",")
        self.f.write(item['date'])
        self.f.write("\n")

        # 传入下一个 pipeline
        return item

    # 关闭爬虫时，执行一次
    def close_spider(self, spider):
        print("爬虫结束了...")
        # 关闭文件
        self.f.close()

# 写入 mysql数据库
class GamePipeline_MySQL:

    def open_spider(self, spider):
        """在爬虫开始的时候，连接数据库"""
        self.conn = pymysql.connect(host="127.0.0.1",
                                    port=3306,
                                    user="root",
                                    password="zxydsg123",
                                    database="scrapy")

    def close_spider(self, spider):
        """爬虫结束的时候，关闭数据库连接"""
        self.conn.close()

    # 每次spider返回一条数据. 这里就要运行一次.
    def process_item(self, item, spider):
        try:
            # 弄个游标
            cursor = self.conn.cursor()

            # sql语句：插入数据
            sql = "insert into game(name, category, date) values(%s, %s, %s)"

            # 执行 sql
            cursor.execute(sql, (item['name'], item['category'], item['date']))

            # 提交事务
            self.conn.commit()

        except Exception as e:
            # 如果报错. 回滚
            print(e)
            self.conn.rollback()

        print("存储完毕...")

        return item  # 如果不return. 后续的管道将接受不到数据

