# -*- coding: utf-8 -*-
import   redisoper as rop
import scrapy
from scrapy.contrib.pipeline.images import ImagesPipeline
from scrapy.exceptions import DropItem
from scrapy.http import Request
import hashlib
import os
from scrapy.utils.python import to_bytes
import json
import requests
from items import FruitEncoder
from items import  FruitItem
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class ImageDownloadPipeline(object):
    def process_item(self, item, spider):
            path = "/work/scrapy/data/temp/" + item['id'];
            if not os.path.exists(path):
                os.makedirs(path)
            image_guid = hashlib.sha1(item['image_url']).hexdigest()
            image_url=item['image_url']
            #image_url="http://dfdfdfdf.jpg"
            file_path = path + "/%s.jpg" % (image_guid)

            if(not image_url.startswith("http")):
                return item;
            with open(file_path, 'wb') as handle:
                try:
                    requests.adapters.DEFAULT_RETRIES = 2
                    response = requests.get(image_url, stream=True,timeout=3)
                except:
                    pass
                    return item
                for block in response.iter_content(1024):
                    if not block:
                        break
                    handle.write(block)
            return item

class BDImageDownloadPipeline(object):

    def process_item(self, item, spider):
            path = "/data/scrdata/bdimgs/" + item['id'];
            if not os.path.exists(path):
                os.makedirs(path)
            image_guid = hashlib.sha1(item['image_url']).hexdigest()
            image_url=item['image_url']
            #image_url="http://dfdfdfdf.jpg"
            file_path = path + "/%s.jpg" % (image_guid)

            if(not image_url.startswith("http")):
                return item;
            with open(file_path, 'wb') as handle:
                try:
                    requests.adapters.DEFAULT_RETRIES = 2
                    response = requests.get(image_url, stream=True,timeout=3)
                except:
                    pass
                    return item
                for block in response.iter_content(1024):
                    if not block:
                        break
                    handle.write(block)
            return item


class MyImagesPipeline(ImagesPipeline):
    def get_media_requests(self, item, info):
            imageurl=item['image_url']
            request =scrapy.Request(imageurl)
            request.meta['fruid']=item['id']
            yield  request
    def file_path(self, request, response=None, info=None):
        ## start of deprecation warning block (can be removed in the future)
        def _warn():
            from scrapy.exceptions import ScrapyDeprecationWarning
            import warnings
            warnings.warn('ImagesPipeline.image_key(url) and file_key(url) methods are deprecated, '
                          'please use file_path(request, response=None, info=None) instead',
                          category=ScrapyDeprecationWarning, stacklevel=1)

        # check if called from image_key or file_key with url as first argument
        if not isinstance(request, Request):
            _warn()
            url = request
        else:
            url = request.url

        # detect if file_key() or image_key() methods have been overridden
        if not hasattr(self.file_key, '_base'):
            _warn()
            return self.file_key(url)
        elif not hasattr(self.image_key, '_base'):
            _warn()
            return self.image_key(url)
        ## end of deprecation warning block

        image_guid = hashlib.sha1(to_bytes(url)).hexdigest()
        # change to request.url after deprecation
        id=request.meta['fruid']
        path="/work/scrapy/data/fruitimgs/"+id;
        if(os.path.exists(path)==False):
            os.makedirs(path)
        v=path+"/%s.jpg"%(image_guid)
        return v


    def item_completed(self, results, item, info):
        image_paths = [x['path'] for ok, x in results if ok]
        if not image_paths:
            raise DropItem("Item contains no images")
        item['image_path'] = "/work/scrapy/data/fruitimgs/"+item['id']
        return item


class FruitPipeline(object):
    def convert_to_builtin_type(self,obj):
        d = {}
        d.update(obj.__dict__)
        return d

    def __init__(self):
        self.r =  rop.CRedis();
    def process_item(self, item, spider):
        if(item["name"]):
            kal=item["kal"]
            #item["id"]=self.r.incr("seq");
            id=self.r.incr("seq");
            key="fruit_"+str(id);
            v=item['name']+","+item['kal'];
            # itemjson=json.dumps(self.convert_to_builtin_type(item));
            self.r.set(key,v)
            self.r.lpush("fruit_list",id);
        return item
