# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import sys
sys.path.insert(0, 'include')
from SearchIndex import SearchIndex
import json
import codecs

class JsonHuxiuPipeline(object):
    '''将抓取的内容保存为json格式的文件'''

    def __init__(self):
        # self.file = codecs.open('huxiu.json', 'wb', encoding='utf-8')
        self.file = codecs.open('huxiu.json', 'wb', encoding='utf-8')

    def process_item(self, item, spider):
        # line = json.dumps(dict(item)) + "\n"
        line = json.dumps(dict(item), ensure_ascii=False) + "\n"
        # self.file.write(line.decode('unicode_escape'))
        self.file.write(line)
        # real_title = item['title'].strip(' \t\n\r')
        # if item['link'] != 'javascript:' and real_title:
        #     line = json.dumps(dict(item)) + '\n'
        #     self.file.write(line.decode("unicode_escape"))
        return item
    def spider_closed(self, spider):
         self.file.close()

class ESHuxiuPipeline(object):
    '''将抓取的内容保存到elasticsearch'''

    def __init__(self):
        self.f_write = codecs.open('eshuxiu', 'w', encoding='utf-8')
        self.si = SearchIndex()
        self.si.SearchInit()

    def process_item(self, item, spider):
        self.save_to_file(item['link'], item['title'])
        self.si.AddIndex(item)
        return item

    def save_to_file(self,url,utitle):
        self.f_write.write(url)
        self.f_write.write('\t')
        self.f_write.write(utitle)
        self.f_write.write('\n')

    def __del__(self):
        self.f_write.close()
        self.si.IndexDone()
