# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
import scrapy
from scrapy.pipelines.files import FilesPipeline
from scrapy.pipelines.images import ImagesPipeline
from itemadapter import ItemAdapter
from scrapy.exceptions import DropItem
import json
import pymongo
import hashlib

# 重写文件下载管道
class CustomFilesPipeline(FilesPipeline):
	def file_path(self, request, response=None, info=None, *, item=None):
		name = dict(item).get('project', 'full')
		file_guid = request.url.split('/')[-1]
		file_path = u'{0}/{1}'.format(name, file_guid)
		return file_path


# 重写图片下载管道
class CustomImagesPipelin(ImagesPipeline):
	def file_path(self, request, response=None, info=None, *, item=None):
		name = dict(item).get('project', 'full')
		image_guid = request.url.split('/')[-1]
		file_path = u'{0}/{1}'.format(name, image_guid)
		return file_path


# 示例
# ************** 示例 开始 ****************
class ItemPipelinesPipeline:
	def process_item(self, item, spider):
		return item


# 价格验证和丢弃没有价格的项目
class PricePipeline:
	vat_factor = 1.15
	
	def process_item(self, item, spider):
		adapter = ItemAdapter(item)
		if adapter.get('price'):
			if adapter.get('price_excludes_vat'):
				adapter['price'] = adapter['price'] * self.vat_factor
			return item
		else:
			raise DropItem(f"Missing price in {item}")


# 将项目写入 JSON 文件(使用Feed导出)
class JsonWriterPipeline:
	def open_spider(self, spider):
		self.file = open('items.jl', 'w')
	
	def close_spider(self, spider):
		self.file.close()
	
	def process_item(self, item, spider):
		line = json.dumps(ItemAdapter(item).asdict()) + "\n"
		self.file.write(line)
		return item


# 将项目写入 MongoDB
class MongoPipeline:
	collection_name = 'scrapy_items'
	
	def __init__(self, mongo_uri, mongo_db):
		self.mongo_uri = mongo_uri
		self.mongo_db = mongo_db
	
	@classmethod
	def from_crawler(cls, crawler):
		return cls(
			mongo_uri=crawler.settings.get('MONGO_URI'),
			mongo_db=crawler.settings.get('MONGO_DATABASE', 'items')
		)
	
	def open_spider(self, spider):
		self.client = pymongo.MongoClient(self.mongo_uri)
		self.db = self.client[self.mongo_db]
	
	def close_spider(self, spider):
		self.client.close()
	
	def process_item(self, item, spider):
		self.db[self.collection_name].insert_one(ItemAdapter(item).asdict())
		return item


import hashlib
from urllib.parse import quote


# 对项目进行截图
class ScreenshotPipeline:
	"""Pipeline that uses Splash to render screenshot of
	every Scrapy item."""
	
	SPLASH_URL = "http://localhost:8050/render.png?url={}"
	
	async def process_item(self, item, spider):
		adapter = ItemAdapter(item)
		encoded_item_url = quote(adapter["url"])
		screenshot_url = self.SPLASH_URL.format(encoded_item_url)
		request = scrapy.Request(screenshot_url)
		response = await spider.crawler.engine.download(request, spider)
		
		if response.status != 200:
			# Error happened, return item.
			return item
		
		# Save screenshot to file, filename will be hash of url.
		url = adapter["url"]
		url_hash = hashlib.md5(url.encode("utf8")).hexdigest()
		filename = f"{url_hash}.png"
		with open(filename, "wb") as f:
			f.write(response.body)
		
		# Store filename in item.
		adapter["screenshot_filename"] = filename
		return item


# 重复过滤器
class DuplicatesPipeline:
	
	def __init__(self):
		self.ids_seen = set()
	
	def process_item(self, item, spider):
		adapter = ItemAdapter(item)
		if adapter['id'] in self.ids_seen:
			raise DropItem(f"Duplicate item found: {item!r}")
		else:
			self.ids_seen.add(adapter['id'])
			return item

# ************** 示例 结束 ****************
