# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html

class ZpPipeline(object):
    def process_item(self, item, spider):
        return item
'''
1.数据简单清洗
2.重复公司和职位数据过滤
3.存储
'''
import re
import datetime
#数据简单清洗
class DataCleanPipeline(object):
    def process_item(self,item,spider):
        #1.去除数据的首尾空格
        for key in item:
            item[key]=item[key].strip()
        #2.职位月薪 处理，提取数字
        re_com=re.compile('\d+')
        yx=re_com.findall(item['zwyx']) # 提取数字
        if len(yx)==2:
            min_yx,max_yx=yx
        elif len(yx)==1:
            min_yx=max_yx=yx
        else:
            min_yx = max_yx =0
        item['max_yx']=max_yx
        item['min_yx'] = min_yx
        # 3.职位地点
        item['gzdd']=item['gzdd'].split('-')[0]
        # 4.招聘人数
        item['zprs'] = item['zprs'].strip('人')
        # 5.时间处理
        if item['fbrq']=='最近' or item['fbrq']=='招聘中' or item['fbrq']=='今日' or '小时前' in item['fbrq']:
            item['fbrq']=str(datetime.datetime.now())
        elif item['fbrq']=='昨天':
            item['fbrq'] = str(datetime.date.today()-datetime.timedelta(days=1))
        else:
        # 12-16
        # 2018-1-1
            if int(datetime.datetime.now().month)<int(item['fbrq'].split('-')[0]):
                item['fbrq'] = str(int(datetime.datetime.now().year)-1) + '-' + item['fbrq']
            else:
                item['fbrq'] = str(datetime.datetime.year)+'-'+item['fbrq']
        return item

# 引入去除item的异常模块
from scrapy.exceptions import DropItem

#公司和职位名称重复性问题
# 去除历史相同的数据
class DropItemPipeline(object):
    # 初始化一个记录器
    def __init__(self):
        self.zwmc_gsmc_jl=set() #集合，内部数据具备唯一性特征

    def process_item(self,item,spider):
        if item['zwmc']+item['gsmc'] in self.zwmc_gsmc_jl:
            raise DropItem('this item has been stored already,item is %s'%item)
        else:# 如果不存在
            self.zwmc_gsmc_jl.add(item['zwmc']+item['gsmc'])
        return item

#存储数据
# sqlite 数据库
import sqlite3
class SaveItemPipeline(object):
    def open_spider(self,spider):
        self.conn=sqlite3.connect('zp.sqlit')
        cu = self.conn.cursor()  # 操作游标
        create_table_sql = 'create table if not exists zp_list(zwmc varchar(100),gsmc varchar(100),flxx varchar(100),min_zwyx integer,max_zwyx integer,gzdd varchar(100),fbrq varchar(100),gzxz varchar(100),gzjy varchar(100),zdxl varchar(100),zprs integer,zwlb varchar(100),zwms varchar(100),url varchar(100),lb_big varchar(100));'
        cu.execute(create_table_sql)
        self.conn.commit()

    def close_spider(self,spider):
        self.conn.close()

    def process_item(self,item,spider):
        cu = self.conn.cursor()  # 操作游标
        # 插入操作
        insert_sql='insert into zp_list("%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s");'%(item['zwmc'],item['gsmc'],item['flxx'],item['min_yx'],item['max_yx'],item['gzdd'],item['fbrq'],item['gzxz'],item['gzjy'],item['zdxl'],item['zprs'],item['zwlb'],item['zwms'],item['url'],item['lb_big'])
        print(insert_sql)
        cu.execute(insert_sql)
        self.conn.commit()
        return item


