# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from openpyxl import Workbook
import os,requests

class MaoyanPipeline(object):
    def __init__(self):
        self.wb = Workbook()
        self.ws = self.wb.active
        self.ws.append(['电影名字', '上映时间', '电影评分','电影主演','电影封面'])
    def process_item(self, item, spider):
        # base_dir = os.getcwd()
        # fiename = base_dir + '/maoyan.txt'
        # with open(fiename, 'a',encoding='utf-8') as f:
        #     f.write(item['filmnames'][0:100] + '\n')
        #     f.write(item['filmdate'][0] + '\n')
        #     f.write(item['filmscore'] + '\n')
        #     f.write(item['filmstar'] [0:100]+ '\n')
        #     f.write(item['filmimg']+ '\n\n')
        line = [item['filmnames'][0:100], item['filmdate'][0], item['filmscore'],item['filmstar'][0:100],item['filmimg']]
        self.ws.append(line)
        self.wb.save('maoyantop.xlsx')
        print('电影名为：{}'.format(item['filmnames']))
        print('上映时间为：{}'.format(item['filmdate']))
        print('电影评分为：{}'.format(item['filmscore']))
        print('电影主演为：{}'.format(item['filmstar']))
        print('电影封面为：{}'.format(item['filmimg']))
        #  封面图片下载
        url =item['filmimg']
        response = requests.get(url)
        cont = response.content
        with open('output/picture/'+item['filmnames'][0:100]+'.jpg', mode='wb') as file:
            file.write(cont)
        print('{}封面下载完成'.format(item['filmnames'][0:100]))
        return item

