#!/usr/bin/env python
# -*- coding: utf-8 -*-

# import sys
import logging
import socket
import requests
import random
import urlparse
import time
from datetime import datetime
from utilities import MongoStore


SLEEP_TIME = 3
DEFAULT_DELAY = 1
DEFAULT_RETRIES = 1
DEFAULT_TIMEOUT = 30

# logging.basicConfig(stream=sys.stderr, level=logging.INFO)


class Downloader(object):

    # set a callback function to parse html here
    def __init__(self, delay=DEFAULT_DELAY, headers=None, proxies=None, num_retries=DEFAULT_RETRIES, timeout=DEFAULT_TIMEOUT, cache=None, email_notify=False, remove_proxy=True):
        socket.setdefaulttimeout(timeout)
        self.throttle = Throttle(delay)
        self.headers = headers
        self.proxies = proxies
        self.num_retries = num_retries
        self.cache = cache
        self.email_notify = email_notify
        self.remove_proxy = remove_proxy

    def __call__(self, url):
        '''inspect url cache, if has, then inspect web server response error
        if no cache or response error, execute downloads
        '''
        result = None
        if self.cache:
            try:
                result = self.cache[url]
            except KeyError:
                # url is not available in cache
                pass
            else:
                if self.num_retries > 0 and 500 <= result['code'] < 600:
                    # server error so ignore result from cache and re-download
                    result = None
        if result is None:
            # result was not loaded from cache so still need to download
            self.throttle.wait(url)
            # obtain a ip proxy from proxy pool and convert it to dictionarty
            # form
            proxy = dict([random.choice(self.proxies)]
                         ) if self.proxies else None
            logging.debug('Now IP Proxy is: %s', proxy)
            result = self.download(
                url, self.headers, proxy=proxy, num_retries=self.num_retries)
            if self.num_retries > 0 and 400 <= result['code'] < 500:
                # http request error
                # it cause of bad ip proxy in most cases
                logging.warning(
                    '[download.py-Downloader.download]>>Unexpected download -- [normal url]:{} -- [error url]:{} -- [error status code]:{}'.format(url, result['url'], result['code']))
                if self.remove_proxy:
                    # pxstore = MongoStore()
                    # ip = proxy.values()[0].split(':')[1].strip('//')
                    # pxstore.delete([('ip', ip)])
                    # pxstore.close()
                    fuck_bad_px(proxy)
            if self.remove_proxy and isforbidden(result['url']):
                # if current ip proxy is forbidden, delete it
                fuck_bad_px(proxy)
            if self.cache:
                # save result to cache
                self.cache[url] = result
        return result['html']

    def download(self, url, headers, proxy, num_retries, params=None, data=None, cookies=None):
        # logging.info(
            # '[download.py-Downloader.download]>>Start downloading, ulr is: %s', url)
        if not proxy:
            proxy = dict([random.choice(self.proxies)]
                         ) if self.proxies else None
            logging.warning(
                '[download.py-Downloader.download]-[PROXY]>>Acquire ip proxy again, proxy is: %s', proxy)
        try:
            # requests' proxy structure
            # proxies={
            # 'http': 'http://11.11.11.11.:80',
            # 'https': 'https://12/12/12/12:90'
            # }
            response = requests.get(
                url, headers=headers or {}, proxies=proxy, params=params, data=data, cookies=cookies)
            html = response.content
            code = response.status_code
            url = response.url
        except requests.ConnectionError as er:
            html, url = '', ''
            code = None
            logging.error(
                '[download.py-Downloader.download]>>Connection error: %s', er)
            if self.remove_proxy:
                fuck_bad_px(proxy)
        except requests.TooManyRedirects as err:
            html, url = '', ''
            code = None
            logging.error(
                '[download.py-Downloader.download]>>Redirect error: %s', err)
            if self.remove_proxy:
                fuck_bad_px(proxy)
        except requests.Timeout:
            html, url = '', ''
            code = None
            if self.remove_proxy:
                fuck_bad_px(proxy)
        except requests.RequestException as e:
            html, url = '', ''
            if hasattr(e, 'code'):
                code = e.code
                if num_retries > 0 and 500 <= code < 600:
                    # retry 5XX HTTP errors
                    return self._get(url, headers, proxy, num_retries - 1, params, data, cookies)
            else:
                code = None
        return {'html': html, 'code': code, 'url': url}


class Throttle(object):
    """Throttle downloading by sleeping between requests to same domain
    """

    def __init__(self, delay):
        # amount of delay between downloads for each domain
        self.delay = delay
        # timestamp of when a domain was last accessed
        self.domains = {}

    def wait(self, url):
        """Delay if have accessed this domain recently
        """
        domain = urlparse.urlsplit(url).netloc
        last_accessed = self.domains.get(domain)
        if self.delay > 0 and last_accessed is not None:
            sleep_secs = self.delay - (datetime.now() - last_accessed).seconds
            if sleep_secs > 0:
                # logging.debug('[Throttle] sleep for %s seconds', sleep_secs)
                time.sleep(sleep_secs)
        self.domains[domain] = datetime.now()


def fuck_bad_px(proxy):
    '''remove bad ip proxy from mongodb
    '''
    pxstore = MongoStore()
    ip = proxy.values()[0].split(':')[1].strip('//')
    pxstore.delete([('ip', ip)])
    pxstore.close()


# define the website's forbidden rule which you crawl
# you should design it for yourself
def isforbidden(redirect_url):
    '''validate that wheather the current http request is forbidden
    if forbidden, call a function to process it
    '''
    prased_url = urlparse.urlparse(redirect_url)
    # netloc = prased_url.netloc
    path = prased_url.path
    if 'forbidden' or 'fb.html' or 'lagouhtml' in path.split('/'):
        return True
    else:
        return False
