# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
#from rule.items import Rule1Item
from scrapy.pipelines.files import FilesPipeline
from scrapy import log
from twisted.enterprise import adbapi
from scrapy.exceptions import DropItem
from scrapy.http import Request
import time
#import MySQLdb
#import MySQLdb.cursors
from scrapy.conf import settings 

class RulePipeline(object):
    def __init__(self):
        host = settings['MONGODB_HOST']
        port = settings['MONGODB_PORT']
        dbName = settings['MONGODB_DBNAME']
        client = pymongo.MongoClient(host=host,port=port)
#        connection = pymongo.Connection(host,port)
        db = client[dbName]
#        db = connection[dbName]
        self.collection = db[settings['MONGODB_DOCNAME']]
    def process_item(self, item, spider):
        valid = True
        for data in item:
            if not data:
                valid = False
                raise DropItem("Missing {0}!".format(data))
#        if isinstance(item,RuleItem):
#            try:
#                self.post.insert(dict(item))
#            except Exception,e:
#                print e
        if valid:
            self.collection.insert(dict(item))
            log.msg("success added to MongoDB database!",level=log.DEBUG,spider=spider)
        return item
class RuleFilesPipeline(FilesPipeline):
    def get_media_requests(self,item,info):
        for url in item["file_urls"]:
            yield scrapy.Request(url)
    def item_completed(self,results,item,info):
#        file_paths = [x["path"] for ok,x in results if ok]
        for ok,value in results:
            file_paths = value["path"]
#        if not file_paths:
#            raise DropItem("Item contains no images")
            item["files_path"] = file_paths
        return item
#class Rule2FilesPipeline(FilesPipeline):
#    FILES_URLS_FIELD = 'com_re_urls'
#    FILES_RESULT_FIELD = 'com_re'
#    def get_media_requests(self,item,info):
#        for url in item["com_re_urls"]:
#            yield scrapy.Request(url)
#    def item_completed(self,results,item,info):
#        file_paths = [x["path"] for ok,x in results if ok]
#        if not file_paths:
#            raise DropItem("Item contains no images")
