# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.exporters import JsonLinesItemExporter
import os
import time


class GetProxyPipeline(object):
	def __init__(self):
		# 设置保存的路径, 在爬虫项目根路径 get_proxy_data.join文件
		path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'get_proxy_data.json')
		self.f = open(path, 'wb')
		self.exporter = JsonLinesItemExporter(self.f, ensure_ascii=False, encoding='utf-8')

	def process_item(self, item, spider):
		ip_li = item['ip']
		port_li = item['port']
		category_li = item['category']
		level_li = item['level']
		verify_time_li = item['veryfy_time']
		speed_li = item['speed']

		# 获取每页的数据长度 (所有信息都是一样的长度, 就使用IP字段好了)
		n = len(ip_li)
		data = dict()
		for i in range(n):
			# 判断延迟时间, 如果大于2秒, 就不保存这个ip, 跳出循环 (如果不想2秒可以在这里修改)
			speed = speed_li[i]
			if int(speed.split('.')[0]) > 2:
				continue
			data['speed'] = speed

			data['ip'] = ip_li[i]
			data['port'] = port_li[i]
			data['level'] = level_li[i]
			data['category'] = category_li[i]

			# 获取验证时间的时间戳, 转换成int格式, 以后直接拿时间戳比对 (一天86400秒)
			verify_time = verify_time_li[i]
			data['verify_time'] = int(time.mktime(time.strptime(verify_time, '%Y-%m-%d %H:%M:%S')))

			# 把每一行的数据保存到json文件中
			self.exporter.export_item(data)
		return item

	def close_spider(self, spider):
		self.f.close()
