# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import codecs
import copy
import json
import logging

import os
import urllib

import time
import urllib2


from twisted.enterprise import adbapi


class ScrapySinahealthPipeline(object):
    def process_item(self, item, spider):
        return item


class FlatFilePipeline(object):
    def __init__(self):
        pass

    def process_item(self, item, spider):
        with codecs.open('D:/03.Documents/bxjg/%s.txt' % item['title'], mode='ab+', encoding='utf-8-sig') as f:
            f.write(item['content'])
        # aaa = type(item['content'])
        # self.file.write(item['content'])
        return item


class JsonPipeline(object):
    def __init__(self):
        self.file = codecs.open('sinahealth.json', mode='ab+', encoding='utf-8')

    def process_item(self, item, spider):
        item_c = copy.deepcopy(item);
        line = json.dumps(dict(item_c)) + "\n"
        self.file.write(line.decode("unicode_escape"))
        return item
    pass


class ImageLoadPipeline(object):
    @classmethod
    def from_crawler(cls, crawler):
        image_store_base_path = crawler.settings['IMAGES_STORE_BASE_PATH']
        image_server_virtual_base_path = crawler.settings['IMAGES_SERVER_VIRTUAL_BASE_PATH']
        return cls(image_store_base_path, image_server_virtual_base_path)

    def __init__(self, image_store_base_path, image_server_virtual_base_path):
        self.image_store_base_path = image_store_base_path
        self.image_server_virtual_base_path = image_server_virtual_base_path

    def process_item(self, item, spider):
        customizer = spider.customizer
        image_store_path = '%s/%s' % (self.image_store_base_path, spider.name)  # 存储路径
        image_server_virtual_path = '%s/%s' % (self.image_server_virtual_base_path, spider.name) # 虚拟路径
        if not os.path.exists(image_store_path):
            os.makedirs(image_store_path)
        logging.info("image store path is %s..." % image_store_path)
        index = -1
        for image_url in item['image_urls']:
            index = index + 1
            if self.is_absolute_url(image_url):
                img_abs_url = image_url
            else:
                host = self.get_host(item["url"])
                img_abs_url = self.update_to_abs(image_url, host)
            try:
                logging.info("downloading from %s" % img_abs_url)
                # image = self.download_page(img_abs_url, customizer)
                if spider.name == 'echinalife':
                    image_name = item["image_appendixes"][index] + '.pdf'
                else:
                    image_name = self.get_image_file_name(img_abs_url)
                image_server_virtual_path = self.remove_last_slash(image_server_virtual_path)
                new_image_full_path = "%s/%s" % (image_server_virtual_path, image_name)
                item['content'] = spider.replace_img_src(item['content'], image_url, new_image_full_path)
                # item['content'] = item['content'].replace('data-src', 'src')
                # item['content'] = item['content'].replace(image_url, new_image_full_path)
                image_store_path = self.remove_last_slash(image_store_path)
                self.download_page(img_abs_url, customizer, image_name, image_store_path)
                # self.dump_to_disk(image, image_store_path, image_name)
            except urllib2.HTTPError, e:
                logging.error("HTTPError: downloading image error...")
                logging.error(e)
                # if e.code == "502":
                if str(e.code) in customizer.get_banned_status_code_list():
                    banned_sleep_time = customizer.get_banned_sleep()
                    logging.info("caught %s exception, sleeping %d seconds to avoid anti-crawler mechanism" % (e.code, banned_sleep_time))

                    time.sleep(banned_sleep_time)  # sleep seconds to avoid anti-crawler mechanism.
            except Exception, e2:
                logging.error("Unknown Error: downloading image %s error..." % img_abs_url)
                logging.error(e2)

        return item

    @staticmethod
    def get_image_file_name(img_abs_url):
        filename, ext = os.path.splitext(img_abs_url)
        import re
        strinfo1 = re.compile('^https?://', re.IGNORECASE)
        strinfo2 = re.compile('[/\.\?\=\:]', re.IGNORECASE)

        img_abs_url_tmp = strinfo1.sub('', img_abs_url)
        new_image_file_name = strinfo2.sub('', img_abs_url_tmp)

        full =  new_image_file_name + ext
        if len(full) > 64:
            return full[-64:]
        else:
            return full

    @staticmethod
    def remove_last_slash(path):
        return path.rstrip('/')

    @staticmethod
    def dump_to_disk(image, path, file_name):
        full_path = '%s/%s' % (path, file_name)
        with open(full_path, 'wb') as fp:
            fp.write(image)
            logging.info("downloaded to %s" % full_path)

    @staticmethod
    def download_page(url, customizer, image_name, image_store_path):
        logging.debug("entering download_page method...")
        headers = customizer.get_headers()
        logging.info("request headers is %s..." % headers)
        rand = customizer.get_download_delay()
        logging.info("sleeping %s s..." % rand)
        time.sleep(rand)
        request = urllib2.Request(url=url, headers=headers)
        response = urllib2.urlopen(request)
        f = open(image_store_path+'/'+image_name, 'wb')
        block_sz = 8192
        while True:
            buf = response.read(block_sz)
            if not buf:
                break

            f.write(buf)
        f.close()
        # r = response.read()

        # return null;

    @staticmethod
    def is_absolute_url(url):
        is_abs = url.startswith("http://") or url.startswith("https://")
        return is_abs

    @staticmethod
    def update_to_abs(img_relative_url, base_url):
        if img_relative_url.startswith("//"):
            return "http:" + img_relative_url;
        elif img_relative_url.startswith("../"):
            img_relative_url = img_relative_url.replace('../', '')
            img_relative_url = "/" + img_relative_url
        abs_url = base_url + img_relative_url
        return abs_url

    @staticmethod
    def get_host(url):
        protocol, s1 = urllib.splittype(url)
        host, s2 = urllib.splithost(s1)
        host = "%s://%s" % (protocol, host)
        if host.endswith('/'):
            host = host.rstrip('/')
        return host

class SQLStorePipeline(object):
    def __init__(self):
        self.dbpool = adbapi.ConnectionPool('MySQLdb', db='scrapydb',
                                            user='root', passwd='root1234',
                                            charset='utf8', use_unicode=True)

    def process_item(self, item, spider):
        # run db query in thread pool
        query = self.dbpool.runInteraction(self._conditional_insert, item)
        query.addErrback(self.handle_error)

        return item

    def _conditional_insert(self, tx, item):
        # create record if doesn't exist.
        # all this block run on it's own thread
        if isinstance(item['issue_date'], time.struct_time):
            item['issue_date'] = time.strftime("%Y-%m-%d %X", time.localtime())
        tx.execute( \
            "INSERT INTO t_sinahealth "
            "(title, content, spider, category, tag, url, referer, issue_date, source, createDtTm) "
            "values (%s, %s, %s, %s, %s, %s, %s, %s, %s, current_timestamp) ",
            (item['title'], item['content'], item['spider'], item['category'], item['tag'], item['url'], item['referer'], item['issue_date'], item['source'])
        )
        print("Item stored in db: %s" % item['title'])

    def handle_error(self, e):
        print("Item stored in db error: %s" % e);