# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
import codecs #解决编码问题 http://www.cnblogs.com/buptldf/p/4805879.html https://www.cnblogs.com/hester/p/5465338.html
#from scrapy.linkextractors import LinkExtractor


class TutorialPipeline(object):
    def __init__(self):
        self.file = codecs.open('data.json', mode='wb',buffering=0, encoding='utf-8')#数据存储到data.json

    def process_item(self, item, spider):
        line = json.dumps(dict(item)) + "\n"  #line = json.dumps(dict(item),ensure_ascii=False) 
        print("******before write to file****")
        print(line)
        print("***")
        data=eval(line)
        print(data)
        self.file.write(str(data)+"\n")
        #self.file.write("\n")
        print("******end write to file****")
        #self.file.write(line.decode("unicode_escape"))
        return item