# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
import scrapy
import os
from scrapy.pipelines.images import ImagesPipeline
from scrapy.conf import settings
from mysql import connector

class Top250Pipeline(object):
    def __init__(self):
        super(Top250Pipeline, self).__init__()
        # self.file = open('./top250.json','wb')
        #存到mysql
        self.conn = connector.connect(host=settings['HOST'], port=settings['PORT'], user=settings['USER_NAME'], password=settings['PASSWORD'], database=settings['DATABASE'])

    def process_item(self, item, spider):
        # json_data = json.dumps(dict(item),ensure_ascii=False)+ '\n'
        # self.file.write(json_data.encode('utf-8'))
        # 存到数据库
        # item['local_path']
        save_data = [item['image'],item['local_path'], item['name'], item['description'], item['start'], item['comment']]
        cursor = self.conn.cursor()
        cursor.execute('insert into t_Movie(image, local_path, name, description, start, comment) VALUES (%s,%s,%s,%s,%s,%s)', save_data)
        self.conn.commit()
        return item

    def close_spider(self, spider):
        pass
        self.conn.close()


class Top250ImagePipeline(ImagesPipeline):

    IMAGES_STORE = settings['IMAGES_STORE']

    def get_media_requests(self, item, info):
        image_url = item['image']
        yield scrapy.Request(url=image_url)


    def item_completed(self, results, item, info):
        image_path = [x['path'] for ok, x in results if ok][0]
        source_path = os.path.join(self.IMAGES_STORE,image_path)
        save_path = os.path.join(self.IMAGES_STORE,item['name']+'.'+ image_path.split('.')[-1])
        os.rename(source_path, save_path)
        item['local_path'] = save_path
        return item