

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import MySQLdb


from m_y.m_y_scrapy.settings import MYSQL_IP, MYSQL_DB, MYSQL_PASSWD, MYSQL_USER, MYSQL_TABLE


class MYScrapyPipeline(object):
    def __init__(self):
        self.file = open('ret film info', 'a+', encoding='utf-8')

    def process_item(self, item, spider):
        self.file.write(str(item) + '\n')

        return item

    def close_spider(self, spider):
        self.file.close()



class DBScrapyPipeline(object):
    def __init__(self):

        self.db = MySQLdb.connect(host=MYSQL_IP, user=MYSQL_USER, password=MYSQL_PASSWD, database=MYSQL_DB, port=3306)

    def process_item(self, item, spider):

        with open('db_film','a+',encoding='utf-8') as f:
            f.write('%s,%s,%s,%s,%s,%s,%s,%s'%(
            item.get('id'),
            item.get('cover'),
            item.get('url'),
            item.get('film_name'),
            item.get('rate'),
            item.get('daoyan'),
            item.get('zhuyan'),
            item.get('film_intrduce')
        ))
            f.write('\n')
        # sql = "insert into %s (id,cover,url,title,rate,daoyan,zhuyan,intrduce) values (%s,%s,%s,%s,%s,%s,%s,%s)" %(
        #     MYSQL_TABLE,
        #     item.get('id'),
        #     item.get('cover'),
        #     item.get('url'),
        #     item.get('film_name'),
        #     item.get('rate'),
        #     item.get('daoyan'),
        #     item.get('zhuyan'),
        #     item.get('film_intrduce')
        # )
        # print(sql)
        # cursor.execute(sql)
        # self.db.commit()


    def close_spider(self, spider):
        self.db.close()
