# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import redis
import json
# 创建连接
# 


class CodesspiderPipeline(object):
    def process_item(self, item, spider):
        r = redis.Redis(**{
        'host':'192.168.222.130',##redis 保护模式破除 config set protected-mode "no"
        'port':6383,
        'db':0
        })
        blog = {
            "title":item['title'],
            "labels":item['labels'],
            "url":item['url'],
            "content":item['content'],
            "create_time":item['create_time']
            }
        r.lpush("list_blogs", json.dumps(blog))
        # print 'url info====>====>====>====>====>====>====>====>\n'
        # print item
        # with open("gitee.gist.txt",'a') as fp:
        #     fp.write(
        #         (item['title'] + '$&' + ','.join(item['labels']) + '$&' + item['url'] + '$&' + item['content'] + '$&' + item['create_time'].replace('\n','')).encode('utf-8')
        #          + '\n')


class blog(object):
    def __init__(self, title, labels, url, content, create_time):
         self.title = title 
         self.labels = labels
         self.url = url
         self.content = content
         self.create_time = create_time