# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter

import pymysql


class ScrapysteamPipeline:
    def __init__(self):
        self.db = pymysql.connect(host='localhost',
                                  user='root',
                                  password='ff123456',
                                  port=3306,
                                  db='steam')
        self.cursor = self.db.cursor()
        self.cursor.execute('select version()')
        data = self.cursor.fetchone()
        print('Database version:', data)

    def process_item(self, item, spider):
        game = {
            'name': item['name'],
            'appId': item['appId'],
            'developers': item['developers'],
            'publishers': item['publishers'],
            'issueDate': item['issueDate'],
            'price': item['price'],
            'appTag': item['appTag'],
            'numOfComments': item['numOfComments'],
            'review': item['review'],
            'favorableRate': item['favorableRate']
        }
        table = 'games'
        keys = ','.join(game.keys())
        values = ','.join(['%s'] * len(game))
        sql = 'INSERT INTO {table}({keys}) VALUES ({values})'.format(table=table, keys=keys, values=values)
        print(sql)
        print(tuple(game.values()))
        try:
            if self.cursor.execute(sql, tuple(game.values())):
                print('Successful')
                self.db.commit()
        except Exception as e:
            print(e)
            self.db.rollback()
