# coding=utf-8
import logging
import sys

import os

import time
import urllib2

reload(sys)
sys.setdefaultencoding('utf-8')

image_store_base_path = "d:/pics"


def download_image(img_abs_url, image_name, host, customizer):
    image_store_relatvie_path = customizer.get_image_store_relative_path()

    image_store_path = '%s/%s' % (image_store_base_path, image_store_relatvie_path)  # physical store path

    if not os.path.exists(image_store_path):
        os.makedirs(image_store_path)

    # if is_absolute_url(img_url):
    #     img_abs_url = img_url
    # else:
    #     img_abs_url = update_to_abs(img_url, host)

    # customizer = get_customizer(item)
    try:
        logging.info("downloading from %s" % img_abs_url)
        image = download_page(img_abs_url, customizer)
        # image_name = get_image_file_name(img_abs_url)
        # new_image_full_path = image_store_base_path + image_name # 本来是用来替换的，现在使用virtual_path来替换
        # TODO: replace processor
        # html = html.replace(img_url, new_image_full_path)
        # item["content"] = html
        dump_to_disk(image, image_store_path, image_name)
    except urllib2.HTTPError, e:
        logging.error("HTTPError: downloading image error...")
        logging.error(e)
        # if e.code == "502":
        if str(e.code) in customizer.get_banned_status_code_list():
            logging.info("caught %s exception, sleeping 15 mins to avoid anti-crawler mechanism" % e.code)
            banned_sleep_time = customizer.get_banned_sleep()
            time.sleep(banned_sleep_time)  # sleep seconds to avoid anti-crawler mechanism.
    except Exception, e2:
        logging.error("Unknown Error: downloading image %s error..." % img_abs_url)
        logging.error(e2)

def dump_to_disk(image, path, file_name):
    path = path.rstrip('/')
    full_path = '%s/%s' % (path, file_name)
    with open(full_path, 'wb') as fp:
        fp.write(image)
        logging.info("downloaded to %s" % full_path)


def download_page(url, customizer):
    logging.debug("entering download_page method...")

    # customizer = get_customizer(url)
    headers = customizer.get_headers()
    logging.info("request headers is %s..." % headers)
    # r = random_user_agent()
    # logging.info("user agent is %s..." % r)
#     headers = {
#         'Host': 'img1.gtimg.com',
#         'User-Agent': r,
#         'Upgrade-Insecure-Requests': 1,
#         'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
#         'Accept-Encoding': 'gzip, deflate',
#         'Accept-Language': 'zh-CN,zh;q=0.9,zh-TW;q=0.8,en;q=0.7',
#         'Proxy-Connection': 'keep-alive'
# }
    # values = {"username": "geek", "password": "**********"}
    # data = urllib.urlencode(values)
    # rand = random_sleep()
    rand = customizer.get_download_delay()
    logging.info("sleeping %s s..." % rand)

    time.sleep(rand)

    # cls_name = "customizers.TwentyFirstCenturyEconomyCustomizer"
    # __import__(cls_name)
    # a = sys.modules[cls_name]
    request = urllib2.Request(url=url, headers=headers)
    response = urllib2.urlopen(request)
    r = response.read()

    return r