# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html

# 去重管道
# 使用redis作为缓存器，对于数据进行存储，已达到去重操作
import redis
from scrapy.exceptions import DropItem
class DropDataPipeline(object):

    def __init__(self):
        if not hasattr(DropDataPipeline, 'pool'):
            DropDataPipeline.getRedisCoon()  # 创建redis连接
        self.conn=redis.StrictRedis(connection_pool=DropDataPipeline.pool)

    @staticmethod
    def getRedisCoon():
        redisInfo = {
            "host": '127.0.0.1',
            "password": '',
            "port": 6379,
            "db": 0
        }
        DropDataPipeline.pool = redis.ConnectionPool(host=redisInfo['host'], password=redisInfo['password'], port=redisInfo['port'], db=redisInfo['db'])

    def process_item(self,item,spider):
        if item['zwmc']=='' or item['gsmc']=='' or item['gsmc'] is None or item['zwmc'] is None or self.conn.sismember('zwmc_gsmc_zwlb',item['zwmc']+'_'+item['gsmc']+'_'+item['zwlb']):
            raise DropItem('Duplicate item found')
        else:
            # 在redis加入zwmc_gsmc_zwlb的组合数据
            self.conn.sadd('zwmc_gsmc_zwlb',item['zwmc']+'_'+item['gsmc']+'_'+item['zwlb'])
            # 继续向后传递数据item
            return item

# 数据清洗管道
class DataCleanPipeline(object):
    def process_item(self, items, spider):
        if items['zwyx'][-1] == '元':
            zwyx_list = items['zwyx'][:-1].split('-')
            zwyx_min = zwyx_list[0]
            zwyx_max = zwyx_list[1]
        elif items['zwyx'][-1] == 'K':
            zwyx_list = items['zwyx'][:-1].replace('K', '').split('-')
            if len(zwyx_list) > 2:
                zwyx_min = str(zwyx_list[0]) + '000'
                zwyx_max = str(zwyx_list[1]) + '000'
            else:
                zwyx_min = zwyx_max = zwyx_list[0]
        else:
            zwyx_min = zwyx_max = 0
        items['min_zwyx'] = zwyx_min
        items['max_zwyx'] = zwyx_max

        items['zwmc']=items['zwmc'].strip()
        items['gsmc'] = items['gsmc'].strip()
        if items['gsxz']=='':
            items['gsxz']='其他'
        if items['gzjy']=='':
            items['gsxz'] = '不限'
        return items

# 数据存储管道
from zwspider.models import *
class MysqlSavePipeline(object):
    # open_spider 爬虫运行的时候执行的方法
    def open_spider(self,spider):
        DB_Util.init_db()

    # 默认调用处理数据的方法
    def process_item(self, item, spider):
        session = DB_Util.get_session()
        zw_obj = ZpModel(**item)
        session.add(zw_obj)
        session.commit()
        return item

class ZwspiderPipeline(object):
    # 默认调用处理数据的方法
    def process_item(self, item, spider):
        return item
