# -*- coding:utf-8 -*-
## author : cypro666
## note   : python3.4+
"""
Parallel crawlers(not vertical) using diffrent libraries:
    pycrul
    requests
    tornado
"""
import io
import sys
import time
import asyncio
import threading, queue
from collections import deque
from http.client import responses as retcodemean

import pycurl
import requests
from tornado import gen
from tornado import ioloop
from tornado.httputil import HTTPHeaders
from tornado.httpclient import HTTPRequest, AsyncHTTPClient
from tornado.curl_httpclient import CurlAsyncHTTPClient

from .iteralgos import ntake
from .debug import print_exception, time_meter

__all__ = ['AsyncCrawler', 'MtCrawler', 'CoCrawler']

REQUEST_TIMEOUT = 30 

MAX_THREADS = 512  

CRAWL_SCHED = (1024)  


USER_AGENTS = [
    'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0)'
    'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20100101 Firefox/29.0',
    'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:29.0) Gecko/20100101 Firefox/29.0',
    'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.114'
]

CURL_HEAD = [
    'User-Agent: ' + USER_AGENTS[1],
    'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0',
    'Accept-Language: zh-cn,zh;q=0.8,en-us;q=0.5,en;q=0.3',
    'Accept-Encoding: gzip, deflate',    
    'Connection: close'                  
]

TORNADO_HEAD = HTTPHeaders()
TORNADO_HEAD['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0'
TORNADO_HEAD['Accept-Language'] = 'zh-cn,zh;q=0.8,en-us;q=0.5,en;q=0.3'
TORNADO_HEAD['Accept-Encoding'] = 'gzip, deflate'
TORNADO_HEAD['Connection'] = 'close'    


TORNADO_OPTION = {
    'method'            : "GET",
    'headers'           : TORNADO_HEAD,
    'body'              : None,
    'connect_timeout'   : REQUEST_TIMEOUT * 2,
    'request_timeout'   : REQUEST_TIMEOUT,
    'follow_redirects'  : True,
    'max_redirects'     : 3,
    'user_agent'        : USER_AGENTS[1],
    'use_gzip'          : True,
    'auth_username'     : None, 
    'auth_password'     : None, 
    'auth_mode'         : None,
    'if_modified_since' : None, 
    'header_callback'   : None, 
    'streaming_callback': None,
    'prepare_curl_callback' : None,
    'proxy_host'        : None, 
    'proxy_port'        : None, 
    'proxy_username'    : None,
    'proxy_password'    : None, 
    'allow_nonstandard_methods' : False
}


AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")

class AsyncFetcher(CurlAsyncHTTPClient):
    """ fetch urls using tornado, a wrapper of CurlAsyncHTTPClient """
    def __init__(self):
        """ max_clients should be in 4~16 """
        super(AsyncFetcher, self).__init__()
        try:
            for curl in self._curls:
                curl.setopt(pycurl.TIMEOUT, REQUEST_TIMEOUT)
                curl.setopt(pycurl.CONNECTTIMEOUT, REQUEST_TIMEOUT)
                curl.setopt(pycurl.NOSIGNAL, 1)
                curl.setopt(pycurl.DNS_CACHE_TIMEOUT, 3600)  # very important
                curl.setopt(pycurl.DNS_USE_GLOBAL_CACHE, 1)  # deprecate???
        except pycurl.error:
            pass
    
    def setopt(self, **kwargs):
        """ set options for request """
        if not kwargs:
            self.request_options = TORNADO_OPTION
        else:
            self.request_options = kwargs
    
    @gen.coroutine
    def perform(self, url, callback):
        """ fetch url and pass responses data to user's callback """
        response = yield gen.Task(self.fetch, HTTPRequest(url, **self.request_options))
        callback(response)
        return (response.request.url, response.code, response.reason)



class AsyncCrawler(threading.Thread):
    """ crawler based on producer and consumer queue """
    def __init__(self, urls):
        """ `urls` should be a list or tuple """
        threading.Thread.__init__(self)
        assert hasattr(urls, '__iter__')
        self.daemon = True
        self.__urls = urls  
        self.__counter = 0   
        self.__fetcher = AsyncFetcher()
        self.__ioloop = ioloop.IOLoop.instance()   # get global ioloop singleton instance 
        self.__queue = queue.Queue(maxsize = CRAWL_SCHED)
        self.__futures = None
        self.NoRespone = queue.Empty

    def iolooping(self):
        """ is still running """
        return self.is_alive()
    
    def response_arrived(self):
        """ if queue is not empty return true """
        return not self.__queue.empty()
    
    def acquire_response(self):
        """ return new fetched response """
        item = self.__queue.get(block = False)
        self.__queue.task_done()
        return item
    
    def max_queue_size(self):
        """ return queue size """
        return CRAWL_SCHED
    
    def __monitor(self):
        """ print sched and make stop when finished """
        self.__counter += 1
        if not (self.__counter & 127): # print info every 128 urls
            sys.stderr.write(str(self.__counter) + ' urls finished\n')
        if self.__counter >= len(self.__futures):
            self.__ioloop.stop()    # stop when all urls be fetched

    def __callback(self, response):
        """ internal using """
        self.__queue.put(response, block = True)
        self.__monitor()

    def run(self):
        """ run fetching in a thread, then another thread can visit queue """
        if not self.__urls:
            return
        self.__fetcher.setopt(**TORNADO_OPTION) # global default request option
        self.__counter = 0  # this counter is very important
        
        def perform(url):
            if url[:7] != 'http://' and url[:8] != 'https://':
                raise ValueError('AsyncSpider : %s is invalid' % url)
            return self.__fetcher.perform(url, self.__callback)
        
        self.__futures = list(map(perform, self.__urls))
        sys.stderr.write('AsyncSpider stared, num of effect urls : %d\n' % len(self.__futures))

        self.__ioloop.start() # start async ioloop until self.__counter arrived
        
        sys.stderr.write('AsyncSpider finished!\n')



## global_curl_share = pycurl.CurlShare() # this obj should be in global
## global_curl_share.setopt(pycurl.SH_SHARE, pycurl.LOCK_DATA_COOKIE)
## global_curl_share.setopt(pycurl.SH_SHARE, pycurl.LOCK_DATA_DNS)

class MtFetcher(threading.Thread):
    """ fetch urls using pycurl, a wrapper of Curl """
    def __init__(self, urls, callback):
        """ `urls` should be a list or tuple """
        threading.Thread.__init__(self)
        assert callback and hasattr(urls, '__iter__')
        self.daemon = True
        self.__response_head = b''  # in py3, header response from pycurl is bytes object
        self.__curl = pycurl.Curl()
        # self.__curl.setopt(pycurl.SHARE, global_curl_share)
        self.__curl.setopt(pycurl.HEADERFUNCTION, self._store_response_head)
        self.__urls = urls
        self.__callback = callback
        
    def _store_response_head(self, res):
        """ append new http head lines """
        self.__response_head += res
        pass
    
    def setopt(self, 
               maxredirs = 3,       httpheader = CURL_HEAD, 
               forbid_reuse = 0,    timeout = REQUEST_TIMEOUT,
               followlocation = 1,  connecttimeout = 60, 
               fresh_connect = 1,   verbose = 1):
        """ set options for CURL, do Not change default options if not sure what's the meaning """
        setopt = self.__curl.setopt
        setopt(pycurl.VERBOSE, verbose)
        setopt(pycurl.FOLLOWLOCATION, followlocation)
        setopt(pycurl.CONNECTTIMEOUT, connecttimeout)
        setopt(pycurl.TIMEOUT, timeout)
        setopt(pycurl.FRESH_CONNECT, fresh_connect)
        setopt(pycurl.FORBID_REUSE, forbid_reuse)
        setopt(pycurl.HTTPHEADER, httpheader)
        setopt(pycurl.MAXREDIRS, maxredirs)
        setopt(pycurl.NOSIGNAL, 1)             
        setopt(pycurl.DNS_CACHE_TIMEOUT, 3600) 
        setopt(pycurl.DNS_USE_GLOBAL_CACHE, 1) 
        setopt(pycurl.ENCODING, 'gzip')        
        setopt(pycurl.SSL_VERIFYHOST, 2)
        setopt(pycurl.COOKIEFILE, '/dev/null')
        setopt(pycurl.HTTPGET, 1)
    
    def run(self):
        """ thread start """
        for url in self.__urls:
            try:
                bytesio = io.BytesIO()
                self.__response_head = b''  
                self.__curl.setopt(pycurl.URL, url)
                self.__curl.setopt(pycurl.WRITEFUNCTION, bytesio.write)
                self.__curl.perform()
                if __debug__ and self.__curl.getinfo(pycurl.HTTP_CODE) >= 400:
                    sys.stderr.write('4xx:'+url[0:50]+'...\n')
            except Exception as e:
                sys.stderr.write(str(e)+'\n')
            result = (url, self.__response_head, bytesio.getvalue())
            self.__callback(result)
    
    
    def close(self):
        """ release resource """
        self.__curl.close()



class MtCrawler(threading.Thread):
    """ crawler based on producer and consumer queue """
    def __init__(self, urls, max_threads):
        """ max_threads should not be too large """
        assert hasattr(urls, '__iter__') and max_threads >= 1
        threading.Thread.__init__(self)
        for u in urls:
            if u[:7] != 'http://' and u[:8] != 'https://':
                raise ValueError('MtSpider : %s is invalid' % u)
        if max_threads > MAX_THREADS:
            max_threads = MAX_THREADS
        self.__urls = list(iter(urls))
        self.__result = None
        self.__counter = 0
        self.__sched = len(self.__urls) // max_threads
        if not self.__sched:
            self.__sched = 1
        self.__queue = queue.Queue(maxsize = len(self.__urls))
        self.NoRespone = queue.Empty
        self.daemon = True             
        threading.stack_size(1<<16) 

    def response_arrived(self):
        """ if queue is not empty return true """
        return not self.__queue.empty()
    
    def acquire_response(self):
        """ return new fetched response """
        item = self.__queue.get(block = False)
        self.__queue.task_done()
        return item
    
    def max_queue_size(self):
        """ return queue size """
        return len(self.__urls)
    
    def __callback(self, result):
        self.__counter += 1
        if not (self.__counter & 127): 
            sys.stderr.write(str(self.__counter) + ' urls finished\n')
        self.__queue.put(result, block = True)
    
    def run(self):
        """ thread start """
        begin = 0
        threads = []   
        while begin < len(self.__urls): 
            mtf = MtFetcher(ntake(self.__urls, begin, self.__sched), self.__callback)
            mtf.setopt(verbose = 0, 
                       followlocation = 0, 
                       connecttimeout = 60, 
                       timeout = REQUEST_TIMEOUT, 
                       fresh_connect = 1, 
                       forbid_reuse = 1, 
                       httpheader = CURL_HEAD, 
                       maxredirs = 5)
            mtf.start()
            threads.append(mtf)
            begin += self.__sched
        while True: 
            for i in range(len(threads)):
                if threads[i].is_alive():
                    time.sleep(0.1)
                    continue
                threads[i].join()
                threads[i].close() 
                del threads[i] 
                time.sleep(0.1)
                break           
            if not threads: 
                break
        sys.stderr.write('MtSpider finished!')



class CoFetcher(object):
    """ fetch urls using requests, a wrapper requests """
    def __init__(self, urls, callback, sched, timeout = REQUEST_TIMEOUT):
        """ sched shoule be less than len(urls) """
        assert callback and hasattr(urls, '__iter__')
        self.__urls = urls
        self.__callback = callback
        self.__timeout = timeout
        self.__sched = sched
        self.__header = {}
        for line in CURL_HEAD:
            (k, v) = line.split(': ')
            self.__header[k] = v

    def __fetch(self, url):
        """ fetch impl """
        try:
            ret = requests.get(url, timeout = self.__timeout, headers = self.__header)
            ret.headers['code'] = str(ret)
            result = (url, ret.headers, ret.text)
        except Exception as e:
            sys.stderr.write('CoSpider perform:', e, 'from', url[0:50]+'...')
            result = (url, '', '')
        return self.__callback(result)
    
    @asyncio.coroutine
    def generate(self, iters):
        """ generate new response """
        for url in iters:
            yield self.__fetch(url)
    
    def task_queue(self):
        """ return a queue contains generated iterator """
        begin = 0
        taskq = deque()
        while begin < len(self.__urls):
            it = ntake(self.__urls, begin, self.__sched)
            taskq.append(self.generate(it))
            begin += self.__sched
        return taskq



class CoCrawler(threading.Thread):
    """ crawler based on multi-threads and dummy coroutines """
    def __init__(self, urls, qsched, callback):
        """ qsched is the step length for CoFetcher """
        threading.Thread.__init__(self)
        assert hasattr(urls, '__iter__')
        for u in urls:
            if u[:7] != 'http://' and u[:8] != 'https://':
                raise ValueError('MtSpider : %s is invalid' % u)
        self.daemon = True  
        self.__urls = list(iter(urls))
        self.__callback = callback
        self.__qsched = qsched
        self.__tsched = MAX_THREADS  
        
    def __dipatch(self, iterable):
        """ dispatch impl """
        co = CoFetcher(tuple(iterable), self.__callback, self.__qsched)
        taskq = co.task_queue()
        while taskq:
            t = taskq.pop()
            try:
                next(t)
                taskq.appendleft(t)
            except StopIteration:
                pass
            time.sleep(0.01)

    def run(self):
        """ thread start """
        cotasks = []
        if len(self.__urls) > self.__tsched:
            begin = 0
            step = len(self.__urls) // self.__tsched
            while begin < len(self.__urls):
                thread = threading.Thread(target = self.__dipatch, 
                                          args = (ntake(self.__urls, begin, step),))
                cotasks.append(thread)
                begin += step
            for t in cotasks:
                t.start()
            while any(cotasks):
                for i in range(len(cotasks)):
                    if cotasks[i] and not cotasks[i].is_alive():
                        sys.stderr.write(cotasks[i].name + ' joined...\n')
                        cotasks[i].join()
                        cotasks[i] = None
                time.sleep(1.0)
        else:
            self.__dipatch(self.__urls)



def test():
    pass



