# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo

from scrapy.conf import settings
from scrapy.exceptions import DropItem
from scrapy import log
import xlrd
from xlutils.copy import copy
import csv
# 管道文件，用于保存爬取后的数据，
class CsdnPipeline(object):
    def __init__(self):
        connection = pymongo.MongoClient(
            settings['MONGODB_SERVER'], settings['MONGODB_PORT'])
        db = connection[settings['MONGODB_DB']]
        self.collection = db[settings['MONGODB_COLLECTION']]

    # 用于处理返回的每一个item 消息
    def process_item(self, item, spider):
        valid = True
        for data in item:
            if not data:
                valid = False
                raise DropItem("Missing data!")
        if valid:
            #self.collection.update({'url':item['url']}, dict(item), upsert = True)
            self.collection.insert(dict(item))  # 插入数据到数据库中保存
            log.msg("Article add to mongodb database!",level = log.DEBUG,
                    spider = spider)

        return item

    # def process_item_csv(self, item, spider):
    #     valid = True
    #     for data in item:
    #         if not data:
    #             valid = False
    #             raise DropItem("Missing data!")
    #     if valid:
    #         rb = xlrd.open_workbook('./output.xlxs')
    #         # 通过sheet_by_index()获取的sheet没有write()方法
    #         rs = rb.sheet_by_index(0)
    #         wb = copy(rb)
    #         # 通过get_sheet()获取的sheet有write()方法
    #         ws = wb.get_sheet(0)
    #         ws.write(item)
    #     return item

    # def get_proxy(self):
    #     '''
    #     简答模拟代理池
    #     返回一个字典类型的键值对，
    #     '''
    #     proxy = ["http://116.211.143.11:80",
    #              "http://183.1.86.235:8118",
    #              "http://183.32.88.244:808",
    #              "http://121.40.42.35:9999",
    #              "http://222.94.148.210:808"]
    #     fakepxs = {}
    #     fakepxs['http'] = proxy[random.randint(0, len(proxy))]
    #
    #     return fakepxs

    # def get_agent(self):
    #     '''
    #     模拟header的user-agent字段，
    #     返回一个随机的user-agent字典类型的键值对
    #     '''
    #     agents = ['Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;',
    #               'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv,2.0.1) Gecko/20100101 Firefox/4.0.1',
    #               'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11',
    #               'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11',
    #               'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)']
    #     fakeheader = {}
    #     fakeheader['User-agent'] = agents[random.randint(0, len(agents))]
    #     return fakeheader
    #
    #     # 注意看新的请求函数：

    # def get_html(url):
    #     try:
    #         r = requests.get(url, timeout=30, headers=get_agent())
    #         r.raise_for_status
    #         r.encoding = r.apparent_encoding
    #         return r.status_code
    #     except:
    #         return "Someting Wrong！"
    #
    # '''
    # OUT:
    # '''