# -*- coding: utf-8 -*-
import json
import codecs
import requests
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html


class AnjukePipeline(object):
	def __init__(self):
		self.file = codecs.open("E:\jiaocheng\F\Project\\anjuke\data\\name_list.json",'wb',encoding = 'utf-8')

	def process_item(self, item, spider):
		#print item
		for i in range(len(item['name'])):
			#房产经纪人姓名
			name = item['name'][i]
			#等级评分
			grade = item['grade'][i]
			#房源描述分数
			fang_yuan = item['fang_yuan'][i]
			#对比
			fang_yuan_com = item['fang_yuan_com'][i]
			#服务态度
			fu_wu = item['fu_wu'][i]
			#对比
			fu_wu_com = item['fu_wu_com'][i]
			#信用评价
			ping_jia = item['ping_jia'][i]
			#对比
			ping_jia_com = item['ping_jia_com'][i]
			#联系电话
			phone_num = item['phone_num'][i+i+1].replace(' ','')

			dic = {
					"name":name,
					"grade":grade,
					"fang_yuan":fang_yuan,
					"fang_yuan_com":fang_yuan_com,
					"fu_wu":fu_wu,
					"fu_wu_com":fu_wu_com,
					"ping_jia":ping_jia,
					"ping_jia_com":ping_jia_com,
					"phone_num":phone_num
				}
			dicts = json.dumps(dict(dic),ensure_ascii=False)
			line = dicts + '\n'
			self.file.write(line)

		return item
	def close_spider(self,spider):
		self.file.close()