# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html

from scrapy import signals
from scrapy.xlib.pydispatch import dispatcher
import codecs
from collections import OrderedDict
import re
import json

class TerrytaoblogPipeline(object):
    def process_item(self, item, spider):
        return item

class terryBlogPipelineFile(object):
    def __init__(self):
        dispatcher.connect(self.spider_closed, signals.spider_closed)
        self.itemsMeta = {}
        pass
        
    def process_item(self,item,spider):
        title =  item['title']

       # t  = title.replace(u'\u201c',u'_').replace(u'\u201d',u'_')

        filename = u'./data/'+title+u'.html'

        filename = self.filenameTrim(filename)

        self.file = codecs.open(filename, 'w', encoding='utf-8')
        line  =  item['content']
        self.file.write(line)
        self.file.close()
        
        line = self.filenameTrim(title)
        print line 
        
        line  = line.replace('\xa8C','')
        label  = re.sub('\W','', line).replace('_','').lower()
        print label

        self.itemsMeta[label] = {}
        self.itemsMeta[label]['depth'] = item['depth']
        self.itemsMeta[label]['type'] = item['type']
        # print item['type']
        # print item['title']
        # print self.itemsMeta

        return item
        
    def filenameTrim(self,filename):
        tempname = filename.encode('mbcs')
        return tempname.replace('\xa1\xb0', '_').replace('\xa1\xb1','_').replace('?',' ').replace('\xa1\xaf','\'')
        

    def spider_closed(self,spider):
        # print self.itemsMeta
        self.file = codecs.open('./data/FileLevel.json', 'w', encoding='utf-8')
        line = json.dumps(OrderedDict(self.itemsMeta), ensure_ascii=False, sort_keys=False) + "\n"
        self.file.write(line)
        self.file.close()
        pass
