#!/usr/bin/env python3
# -*- coding:utf-8 -*-
## author : cypro666
## date   : 2014.5.16
"""
Parallel web-crawler engine, supports 3 kind of spiders
Asynchronous : asynchronous crawler using tornado
Generator    : tasklet crawler using requests
MultiThread  : multi-threads crawler using pycurl and threads
"""
import io
import sys
import time
import asyncio
import threading, queue
from itertools import tee
from collections import deque
from http.client import responses as retcodemean
from enum import Enum

import pycurl
import requests
from tornado import gen
from tornado import ioloop
from tornado.httputil import HTTPHeaders
from tornado.httpclient import HTTPRequest, AsyncHTTPClient
from tornado.curl_httpclient import CurlAsyncHTTPClient

from .iteralgos import ntake
from .debug import print_exception, time_meter
from .configs import REQUEST_TIMEOUT, MAX_THREADS, CRAWL_SCHED

__all__ = ['CURL_HEAD', 'TORNADO_HEAD', 'TORNADO_OPTION'
           'USER_AGENTS', 'ParallelCrawler', 'Engines']


USER_AGENTS = [
    'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0)'
    'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20100101 Firefox/29.0',
    'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:29.0) Gecko/20100101 Firefox/29.0',
    'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.114'
]

CURL_HEAD = [
    'User-Agent: ' + USER_AGENTS[1],
    'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0',
    'Accept-Language: zh-cn,zh;q=0.8,en-us;q=0.5,en;q=0.3',
    'Accept-Encoding: gzip, deflate',    # pycurl using gzip to unzip response automaticlly
    'Connection: close'                  # should not be keep-alive
]

TORNADO_HEAD = HTTPHeaders()
TORNADO_HEAD['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0'
TORNADO_HEAD['Accept-Language'] = 'zh-cn,zh;q=0.8,en-us;q=0.5,en;q=0.3'
TORNADO_HEAD['Accept-Encoding'] = 'gzip, deflate'
TORNADO_HEAD['Connection'] = 'close'     # should not be keep-alive


TORNADO_OPTION = {
    'method'            : "GET",
    'headers'           : TORNADO_HEAD,
    'body'              : None,
    'connect_timeout'   : REQUEST_TIMEOUT * 2,
    'request_timeout'   : REQUEST_TIMEOUT,
    'follow_redirects'  : True,
    'max_redirects'     : 3,
    'user_agent'        : USER_AGENTS[1],
    'use_gzip'          : True,
    'auth_username'     : None, 
    'auth_password'     : None, 
    'auth_mode'         : None,
    'if_modified_since' : None, 
    'header_callback'   : None, 
    'streaming_callback': None,
    'prepare_curl_callback' : None,
    'proxy_host'        : None, 
    'proxy_port'        : None, 
    'proxy_username'    : None,
    'proxy_password'    : None, 
    'allow_nonstandard_methods' : False
}

## global_curl_share = pycurl.CurlShare() # this obj should be in global
## global_curl_share.setopt(pycurl.SH_SHARE, pycurl.LOCK_DATA_COOKIE)
## global_curl_share.setopt(pycurl.SH_SHARE, pycurl.LOCK_DATA_DNS)

AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")

class Engines(Enum):
    """Engine's type used by caller"""
    Asynchronous = 0,   # asynchronous crawler using tornado
    Generator = 1,      # tasklet crawler using requests
    MultiThread = 2     # multi-threads crawler using pycurl and thread


class AsyncFetcher(CurlAsyncHTTPClient):
    """Inner class, used by AsyncSpider"""
    def __init__(self, max_clients):
        super(AsyncFetcher, self).__init__()
        try:
            for curl in self._curls:
                # curl.setopt(pycurl.SHARE, global_curl_share)
                curl.setopt(pycurl.TIMEOUT, REQUEST_TIMEOUT)
                curl.setopt(pycurl.CONNECTTIMEOUT, REQUEST_TIMEOUT)
                curl.setopt(pycurl.DNS_CACHE_TIMEOUT, 3600) #Very important
                curl.setopt(pycurl.DNS_USE_GLOBAL_CACHE, 1) #Very important?
        except pycurl.error:
            pass
        #self._fetch = super(_AsyncFetcher, self).fetch
        
    
    def setopt(self, **karg):
        if not karg:
            self.request_options = TORNADO_OPTION
        else:
            self.request_options = karg
    
    
    @gen.coroutine
    def perform(self, url, callback):
        response = yield gen.Task(self.fetch, HTTPRequest(url, **self.request_options))
        callback(response)
        return (response.request.url, response.code, response.reason)



class AsyncSpider(threading.Thread):
    """Async spider using tornado and producer-consumer queue"""
    
    def __init__(self, urls, max_clients = 64):
        threading.Thread.__init__(self)
        assert hasattr(urls, '__iter__') and max_clients > 0
        self.daemon = True
        self.__iter, = tee(iter(urls), 1)  # prevent iterator invalidation
        self.__max_clients = max_clients
        self.__counter = 0    # a counter toggles number of finished connection
        
        if max_clients > 128: 
            self._max_clients = 128
        elif max_clients < 8: 
            self._max_clients = 8
        
        self.__fetcher = AsyncFetcher(max_clients = self.__max_clients)
        self.__ioloop = ioloop.IOLoop.instance()   # get global ioloop singleton instance 
        self.__queue = queue.Queue(maxsize = CRAWL_SCHED)
        self.__futures = None
        self.NoRespone = queue.Empty

    def iolooping(self):
        return self.is_alive()
    
    def response_arrived(self):
        return not self.__queue.empty()
    
    def acquire_response(self):
        item = self.__queue.get(block = False)
        self.__queue.task_done()
        return item
    
    def max_queue_size(self):
        return CRAWL_SCHED
    
    def __monitor(self):
        self.__counter += 1
        if not (self.__counter & 127): # print info every 128 urls
            sys.stderr.write(str(self.__counter) + ' urls finished\n')
        if self.__counter >= len(self.__futures):
            self.__ioloop.stop()    # stop when all urls be fetched

    def __callback(self, response):
        self.__queue.put(response, block = False)
        self.__monitor()

    def run(self):
        if not self.__iter:
            return
        self.__fetcher.setopt(**TORNADO_OPTION) # global default request option
        self.__counter = 0  # this counter is very important
        
        def perform(url):
            if url[:7] != 'http://' and url[:8] != 'https://':
                raise ValueError('AsyncSpider : %s is invalid' % url)
            return self.__fetcher.perform(url, self.__callback)
        
        self.__futures = list(map(perform, self.__iter))
        sys.stderr.write('AsyncSpider stared, num of effect urls : %d\n' % len(self.__futures))

        self.__ioloop.start() # start async ioloop until self.__counter arrived
        
        if __debug__:
            for i in self.__futures:
                if i.exc_info():
                    sys.stderr.write(i.exc_info() + '\n')
                sys.stderr.write(i.result() + '\n')

        sys.stderr.write('AsyncSpider finished!\n')



class MtFetcher(threading.Thread):
    """Inner class used by MtSpider, based on pycurl"""
    
    def __init__(self, urls, callback):
        threading.Thread.__init__(self)
        assert callback and hasattr(urls, '__iter__')
        self.daemon = True
        self.__response_head = b''  # in py3, header response from pycurl is bytes object
        self.__curl = pycurl.Curl()
        # self.__curl.setopt(pycurl.SHARE, global_curl_share)
        self.__curl.setopt(pycurl.HEADERFUNCTION, self._store_response_head)
        self.__iter = iter(urls)
        self.__callback = callback
        
    def _store_response_head(self, res):
        self.__response_head += res
        pass
    
    def setopt(self, 
               maxredirs = 3,       httpheader = CURL_HEAD, 
               forbid_reuse = 0,    timeout = REQUEST_TIMEOUT,
               followlocation = 1,  connecttimeout = 60, 
               fresh_connect = 1,   verbose = 1):
        setopt = self.__curl.setopt
        setopt(pycurl.VERBOSE, verbose)
        setopt(pycurl.FOLLOWLOCATION, followlocation)
        setopt(pycurl.CONNECTTIMEOUT, connecttimeout)
        setopt(pycurl.TIMEOUT, timeout)
        setopt(pycurl.FRESH_CONNECT, fresh_connect)
        setopt(pycurl.FORBID_REUSE, forbid_reuse)
        setopt(pycurl.HTTPHEADER, httpheader)
        setopt(pycurl.MAXREDIRS, maxredirs)
        setopt(pycurl.NOSIGNAL, 1)             #Very important,for multi threads
        setopt(pycurl.DNS_CACHE_TIMEOUT, 3600) #Very important
        setopt(pycurl.DNS_USE_GLOBAL_CACHE, 1) #Very important
        setopt(pycurl.ENCODING, 'gzip')        #Very important gzip,deflate
        setopt(pycurl.SSL_VERIFYHOST, 2)
        setopt(pycurl.COOKIEFILE, '/dev/null')
        setopt(pycurl.HTTPGET, 1)
    
    def run(self):
        for url in self.__iter:
            try:
                bytesio = io.BytesIO()
                self.__response_head = b''  # Remember Clear it Every Time! Note it's bytes!
                self.__curl.setopt(pycurl.URL, url)
                self.__curl.setopt(pycurl.WRITEFUNCTION, bytesio.write)
                self.__curl.perform()
                if __debug__ and self.__curl.getinfo(pycurl.HTTP_CODE) >= 400:
                    sys.stderr.write('4xx:'+url[0:50]+'...\n')
            except Exception as e:
                sys.stderr.write(str(e)+'\n')
            result = (url, self.__response_head, bytesio.getvalue())
            self.__callback(result)
    
    def close(self):
        self.__curl.close()



class MtSpider(threading.Thread):
    """Multi thread spider using MtFetcher and producer-consumer queue"""
    
    def __init__(self, urls, max_threads, engine):
        assert hasattr(urls, '__iter__') and max_threads >= 1
        threading.Thread.__init__(self)
        for u in urls:
            if u[:7] != 'http://' and u[:8] != 'https://':
                raise ValueError('MtSpider : %s is invalid' % u)
        if max_threads > MAX_THREADS:
            max_threads = MAX_THREADS
        self.__urls = list(iter(urls))
        self.__result = None
        self.__counter = 0
        self.__engine = engine
        self.__sched = len(self.__urls) // max_threads
        if not self.__sched:
            self.__sched = 1
        self.__queue = queue.Queue(maxsize = len(self.__urls))
        self.NoRespone = queue.Empty
        self.daemon = True             # have to call join to wait
        threading.stack_size(1<<16) # bigger stack size can create more sub threads


    def response_arrived(self):
        return not self.__queue.empty()
    
    def acquire_response(self):
        item = self.__queue.get(block = False)
        self.__queue.task_done()
        return item
    
    def max_queue_size(self):
        return len(self.__urls)
    
    def __callback(self, result):
        self.__counter += 1
        if not (self.__counter & 127): # print info every 127 urls
            sys.stderr.write(str(self.__counter) + ' urls finished\n')
        self.__queue.put(result, block = False)
    
    def run(self):
        begin = 0
        threads = []   # save _MtFetcher objects
        while begin < len(self.__urls): 
            mtf = MtFetcher(ntake(self.__urls, begin, self.__sched), self.__callback)
            mtf.setopt(verbose = 0, 
                       followlocation = 0, 
                       connecttimeout = 60, 
                       timeout = REQUEST_TIMEOUT, 
                       fresh_connect = 1, 
                       forbid_reuse = 1, 
                       httpheader = CURL_HEAD, 
                       maxredirs = 5)
            mtf.start()
            threads.append(mtf)
            begin += self.__sched
        while True: # check every crawler thread whether finished
            for i in range(len(threads)):
                if threads[i].is_alive():
                    time.sleep(0.1)
                    continue
                threads[i].join()
                threads[i].close() # free resource, only in extra
                del threads[i] # remove from list 
                time.sleep(0.1) # must sleep
                break           # iterator i can not reuse, so break!
            if not threads:    # all threads finished
                break
        sys.stderr.write('MtSpider finished!')



class CoFetcher(object):
    """Inner class used by CoSpider, based on requests module"""
    
    def __init__(self, urls, callback, sched, timeout = REQUEST_TIMEOUT):
        assert callback and hasattr(urls, '__iter__')
        self.__urls = urls
        self.__callback = callback
        self.__timeout = timeout
        self.__sched = sched
        self.__header = {}
        for line in CURL_HEAD:
            [k, v] = line.split(': ')
            self.__header[k] = v

    def __fetch(self, url):
        try:
            ret = requests.get(url, timeout = self.__timeout, headers = self.__header)
            ret.headers['code'] = str(ret)
            result = (url, ret.headers, ret.text)
        except Exception as e:
            sys.stderr.write('CoSpider perform:', e, 'from', url[0:50]+'...')
            result = (url, '', '')
        return self.__callback(result)
    
    @asyncio.coroutine
    def generator(self, iter):
        for url in iter:
            yield self.__fetch(url)
    
    def task_queue(self):
        begin = 0
        taskq = deque()
        while begin < len(self.__urls):
            it = ntake(self.__urls, begin, self.__sched)
            taskq.append(self.generator(it))
            begin += self.__sched
        return taskq



class CoSpider(threading.Thread):
    """Tasklet spider using CoFetcher, for simple tasks"""
    def __init__(self, urls, qsched, callback):
        threading.Thread.__init__(self)
        assert hasattr(urls, '__iter__')
        for u in urls:
            if u[:7] != 'http://' and u[:8] != 'https://':
                raise ValueError('MtSpider : %s is invalid' % u)
        self.daemon = True  # have to call join to wait
        self.__urls = list(iter(urls))
        self.__callback = callback
        self.__qsched = qsched
        self.__tsched = MAX_THREADS  # num threads at most
        
    def __dipatch(self, iterable):
        co = CoFetcher(tuple(iterable), self.__callback, self.__qsched)
        taskq = co.task_queue()
        while taskq:
            t = taskq.pop()
            try:
                next(t)
                taskq.appendleft(t)
            except StopIteration:
                pass
            time.sleep(0.01)

    def run(self):
        cotasks = []
        if len(self.__urls) > self.__tsched:
            begin = 0
            step = len(self.__urls) // self.__tsched
            while begin < len(self.__urls):
                thread = threading.Thread(target = self.__dipatch, 
                                          args = (ntake(self.__urls, begin, step),))
                cotasks.append(thread)
                begin += step
            for t in cotasks:
                t.start()
            while any(cotasks):
                for i in range(len(cotasks)):
                    if cotasks[i] and not cotasks[i].is_alive():
                        sys.stderr.write(cotasks[i].name + ' joined...\n')
                        cotasks[i].join()
                        cotasks[i] = None
                time.sleep(1.0)
        else:
            self.__dipatch(self.__urls)



class ParallelCrawler(object):
    """The parallel crawler interface"""
    
    def __init__(self, iterable, engine = Engines.Generator, parallel = 8, final_timeout = 3600):
        global engines
        assert iterable and parallel
        assert hasattr(iterable, '__iter__')
        assert isinstance(parallel, int)
        self.urls = list(iter(iterable))
        self.parallel = parallel
        self.engine = engine
        self.fetched = 0
        self.timeout = final_timeout
    
    def fetched(self):
        return self.fetched
    
    def __call__(self, handler, *args, **kargs):
        """lookout that handler should NOT contain any blocking operation!!!"""
        if self.engine == Engines.Asynchronous:
            try:
                spider = AsyncSpider(self.urls, self.parallel)
                spider.start() # AsyncSpider is a Thread object, using start to run!
                while spider.iolooping() or spider.response_arrived():
                    try:
                        handler(spider.acquire_response(), *args, **kargs)
                        self.fetched += 1
                        time.sleep(0.01)
                    except spider.NoRespone:
                        if not spider.is_alive():
                            continue
                        time.sleep(0.1)
                spider.join(self.timeout)
            except Exception as e:
                print_exception('ParallelCrawler')
        elif self.engine == Engines.MultiThread:
            try:
                spider = MtSpider(self.urls, self.parallel, self.engine)
                spider.start()
                while spider.is_alive() or spider.response_arrived():
                    try:
                        handler(spider.acquire_response(), *args, **kargs)
                        self.fetched += 1
                        time.sleep(0.01)
                    except spider.NoRespone:
                        if not spider.is_alive():
                            continue
                        time.sleep(0.1)
                spider.join(self.timeout)
            except Exception as e:
                print_exception('ParallelCrawler')
        elif self.engine == Engines.Generator:
            def __handler(result):
                handler(result, *args, **kargs)
            try:
                spider = CoSpider(self.urls, self.parallel, __handler)
                spider.start()
                spider.join(self.timeout)
            except Exception as e:
                print_exception('ParallelCrawler')
        else:
            raise ValueError('ParallelCrawler : wrong engine type!')


    @time_meter('ParallelCrawler')
    def go(self, handler, *args, **kargs):
        """when all data arrived, handler(*args, **kargs) will be called
           lookout that handler should NOT contain any blocking operation!!!"""
        return self.__call__(handler, *args, **kargs)



@time_meter(__name__)
def test(urls):
    from xbc3.debug import run_stdio_flusher
    run_stdio_flusher() 
    if not urls:
        urls = ['http://www.163.com', 
                'http://www.csdn.net', 
                'http://www.baidu.com', 
                'http://www.ifeng.com',
                'https://github.com']
    
    ndone = 0
    lock = threading.Lock()

    def myhandler_async(response):
        nonlocal ndone
        with lock:
            ndone += 1
            if not (ndone%1000):
                print('async spider', ndone, 'finished')
        try:
            code = response.code
            if __debug__: 
                print(code, retcodemean[code], end=' ')
                print(len(response.body), end=' ')
                print(response.request.url)
        except Exception as e:
            print(e)
   
    def myhandler_mt(result):
        nonlocal ndone
        with lock:
            ndone += 1
            if not (ndone%1000):
                print('mt spider', ndone, 'finished')
        try:
            url, head, body = result
            code = int(str(head.split(b' ', 2)[1], 'utf-8'))
            if __debug__: print(code, retcodemean[code], len(body), url)
        except Exception as e:
            print(e)

    def myhandler_co(result):
        nonlocal ndone
        with lock:
            ndone += 1
            if not (ndone%1000):
                print('gen spider', ndone, 'finished')
        try:
            url, head, body = result
            code = int(head['code'][11:14])
            if __debug__: print(code, retcodemean[code], len(body), url)
        except Exception as e:
            print(e)
   
    print('test Async engine')    
    print('-----------------------------------------------------------------')
    ndone = 0
    crawler = ParallelCrawler(urls, Engines.Asynchronous, 8)
    crawler.go(myhandler_async)
    print('-----------------------------------------------------------------')
    time.sleep(1.0)
    print('\ntest Mt engine')
    print('-----------------------------------------------------------------')
    ndone = 0
    crawler = ParallelCrawler(urls, Engines.MultiThread, 8)
    crawler.go(myhandler_mt)
    print('-----------------------------------------------------------------')
    time.sleep(1.0)
    print('\ntest Gen engine')
    print('-----------------------------------------------------------------')
    ndone = 0
    crawler = ParallelCrawler(urls, Engines.Generator, 8)
    crawler.go(myhandler_co)
    print('-----------------------------------------------------------------')
    
    print('\nall spider finished')





