# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
import json
import scrapy
from scrapy.pipelines.images import ImagesPipeline
from itemadapter import ItemAdapter
import pymongo

class MoviePipeline:
  def open_spider(self, spider):
    # 开启时，打开文件
    self.file = open('movie.txt', 'a', encoding='utf-8')

  print('open_spider')

  def close_spider(self, spider):
    self.file.close()
    print('close_spider')

  def process_item(self, item, spider):
    # with open('movie.txt', 'a', encoding='utf-8') as f:
    #     f.write(json.dumps(item, ensure_ascii=False))
    # print(item)
    self.file.write(json.dumps(item, ensure_ascii=False))
    return item  # 需要配置，在settings


class ByzwPipeline:
  def open_spider(self, spider):
    self.file = open('novel.txt', 'a', encoding='utf-8')

  def close_spider(self, spider):
    self.file.close()
    print('close_spider')

  def process_item(self, item, spider):
    self.file.write(json.dumps(item, ensure_ascii=False))
    return item


class ImagePipeline(ImagesPipeline):
  def get_media_requests(self, item, info):
    adapter = ItemAdapter(item)
    for file_url in adapter['image_urls']:
      # yield scrapy.Request(file_url)
      yield scrapy.Request(file_url, meta={'image_name': adapter['image_name']})

  # 修改文件名
  def file_path(self, request, response=None, info=None):
    file_name = request.meta['image_name'].strip() + '.jpg'
    return file_name
    # return 'files/' + os.path.basename(urlparse(request.url).path)


class Doubanipeline:
  def open_spider(self, spider):
    uri = 'mongodb://%s:%s@%s' % ('root', '123456', 'localhost:27017')
    self.client = pymongo.MongoClient(uri)

  def process_item(self, item, spider):
    print('-'*30 , item)
    self.client.person.movie.insert_one(item)
    return item

