#!/usr/bin/env python
#
#       handler.py
#
#       Copyright 2009 Chris Glass <tribaal@gmail.com>
#
#       This program is free software: you can redistribute it and/or modify
#       it under the terms of the GNU General Public License as published by
#       the Free Software Foundation, either version 3 of the License, or
#       (at your option) any later version.
#
#       This program is distributed in the hope that it will be useful,
#       but WITHOUT ANY WARRANTY; without even the implied warranty of
#       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#       GNU General Public License for more details.
#
#       You should have received a copy of the GNU General Public License
#       along with this program.  If not, see <http://www.gnu.org/licenses/>.

#from properties import props
#from persist import database_helper
import persistance

from twisted.web.client import _parse

import httplib
import datetime
import re
import threading

class GlassCacheHandler:
    '''This is the main interface to the cache. It's only a view on different
        methods, similar to a factory.'''
    def __init__(self, config):

        # Stats variables
        self.local_hit_count = 0
        self.local_miss_count = 0
        self.peer_hit_count = 0
        self.peer_miss_count = 0    

        self.config = config
        self.dbh = persistance.load_plugin("plugins." + 
            self.config['database_plugin'], self.config)
        self.filter = url_filter(self.config)
        

        # This will create the database if it doesn't exist yet.
        # Otherwise it does nothing.
        self.dbh.create_database()
        
        self.result_lock = threading.Condition()
        self.fastest_result = None
        
    def handle_request(self, url, headers=None):
        '''This will handle cache intelligence. We have several cases:
        1) Request is "cacheable" and content is not in db -> to peers
        2) Request is "cacheable" and content is in db -> 
            if fresh, return
            else to peers
        3) Request is explicitly "not cacheable" -> forward
        4) Request is not "cacheable" or "not cacheable" -> use default
        '''
        
        lookup_result = None
        try:
            user_agent = headers['user-agent']
        except(KeyError, TypeError):
            user_agent = None
        
        # Get the date we last inserted the object in DB, or None.
        time_insert = self.dbh.get_insert_date_for_url(url)
        
        if time_insert and self.filter.is_fresh(url, time_insert):
            # No need to check if it should be cached - it is already!
            # It's still fresh - Re-serve!
            print "Local hit for %s!" % url
            self.local_hit_count += 1
            
            lookup_result = self.dbh.retrieve_entry(url)
            
        else:
            
            # Check if if should be cached:
            if time_insert: # It's already in DB - we should cache.
                should_cache = True
            else: # Not already in DB, let's ask the filter.
                should_cache = self.filter.filter(url)
                
            if should_cache == False:
                # we shouldn't cache - simply forward.
                print "URL shouldn't be cached - forwarding %s" % url
                lookup_result = self._forward(url)
            elif should_cache == None:
                # We should use default caching behavior, that is, not caching.
                lookup_result = self._forward(url)
            else:
                print "Local miss for %s!" % url
                self.local_miss_count += 1
                #let's make sure it's a number:
                cache_time = int(should_cache)
                assert cache_time
                # We should fetch and cache, starting with the peers :)
                lookup_result = self._to_peers(url)
                if not lookup_result:
                    # Peers don't have the item in cache... Let's use
                    # the normal route as a last resort.
                    print "Remote miss for %s! forwarding..." % url
                    lookup_result = self._forward(url, user_agent)
                if lookup_result:
                    self.dbh.insert_cache(url,
                        lookup_result['type'], lookup_result['date_fetched'],
                        lookup_result['content'], lookup_result['headers'])
                    

        return lookup_result
        
    def _to_peers(self, url, headers=None):
        '''Sends a request to this cache's peers, and waits for an 
        answer.'''
        
        recieved_peer_list = []
        recieved_recursion = '0'
        config_timeout = 0
        
        # Let's see if we have a timeout set in the config file.
        try:
            config_timeout = int(self.config['request_timeout'])
        except(KeyError):
            config_timeout = None
        
        # Now check for a recursion level in the config file 
        # (there should), but just in case.
        try:
            config_recursion = self.config['recursion_level']
        except(KeyError):
            config_recursion = '1'
            
        # Here we compute the recursion level we will forward.
        try:
            # If we have a recursion header in the incoming request...
            if headers:
                recieved_recursion = headers['glassvine-recursion']
        except(KeyError):
            recieved_recursion = None
        try:
            # If we have a list of visited peers in the incoming request...
            if headers:
                recieved_peer_list = headers['glassvine-visited']
                recieved_peer_list = recieved_peer_list.split('-')
        except(KeyError):
                recieved_peer_list = [] # Empty list instead of none!
        if not recieved_recursion: 
            # No recieved recursion. We'll send the one in the config file
            to_send_recursion = config_recursion
        elif recieved_recursion > config_recursion:
            # We got a very high recursion level. This allows for local
            # administrator to fine-tune the general behaviour of the
            # cache network in their "neighbourhood"
            to_send_recursion = config_recursion
        else:
            # The basic case, we forward the recursion we recieved, minus
            # one.
            to_send_recursion = (int(config_recursion) - 1)
        
        # Let's open an http connection through a proxy (our peers are proxies)
        try:
            peers = self.config['peers'] # it's a list
        except(KeyError):
            peers = []
        queries = []
        queries.append(default_route_fetcher(self, config_timeout, url, headers))
        print peers
        for peer in peers:
            if peer not in recieved_peer_list:
                fetcher = url_fetcher(self, peer, self.config['port'],
                                      config_timeout, url, to_send_recursion, peers, headers)
                queries.append(fetcher)
                
                
        self.result_lock.acquire()
        
        for runner in queries:
            runner.start()
            
        while self.fastest_result == None:
            self.result_lock.wait()
        fastest = self.fastest_result
        self.result_lock.release()
        
        return fastest    
        
    def _forward(self, url, headers=None):
        '''Sends the request through this cache's normal route.
        This will retrieve the content and headers from the internet,
        not from the network of caches.'''
        
        _, host, port, path = _parse(url)

        #print "Scheme : ", scheme
        #print "Host : ", host
        #print "Port : ", port
        #print "Path : ", path
    
        try:
            config_timeout = int(self.config['request_timeout'])
        except(KeyError):
            config_timeout = None

        conn = httplib.HTTPConnection(host, port, timeout=config_timeout)
        conn.request("GET", path)
        response = conn.getresponse()
        
        return self._parse_response(response)
        
        #print "pickled Response.read() is:"
        #print pickle.dumps(response_data)
        # Insterting in DB.
        #self.dbh.insert_cache(url, mime , datetime.datetime.now(), response_data)
        # Retrieving the row we just inserted to return it...
        
    def _fetch(self, url):
        pass
        
    def _parse_response(self, response):
        '''Returns a dictionnary of response elements.'''
        # quickly convert the headers list to a more handy dict
        headers = self._header_list_to_dict(response.getheaders())
        
        # Let's try to guess content-type
        try:
            content_type = headers['content-type'] 
        except (KeyError):
            # Content type is not specified...
            content_type = ''
        
        # Create a dictionnary of the request results:
        request_result = {}
        request_result['content'] = response.read()
        request_result['headers'] = headers
        request_result['type'] = content_type
        request_result['date_fetched'] = datetime.datetime.now()
        
        return request_result
        
    def _header_list_to_dict(self, mylist):
        mydict = {}
        for item in mylist:
            mydict[item[0]] = item[1]
        return mydict
        
class url_filter():
    '''This object takes care of figuring out if a URL is to be cached,
    forwarded or ignored.'''
    
    
    def __init__(self, config):
        self.config = config
        
        self.no_cache_re = {} # K = txt_regexp, V = compiled_regexp
        self.cache_re = {} # K = txt_regexp, V = compiled_regexp
        
        self.cache_rules = self.config['cache_rules']
        self.no_cache_rules = self.config['no_cache_rules']
        
    def _should_not_cache(self, url):
        '''Do we have a matching rule to forward this URL?'''
        for k in self.no_cache_rules:
            #print "in nocache loop"
            # If we don't have the compiled regexp in memory, we compile
            # it.
            try:
                self.no_cache_re[k]
            except(KeyError):
                # Compile it and put it in the cache
                self.no_cache_re[k] = re.compile(k + '.*')
            if self.no_cache_re[k].match(url):
                return True
                
        # We iterated throught all nocache, so nothing matches
        return False
        
        
    def _should_cache(self, url):
        '''Do we have a matching rule to cache this url?'''
        for k, v in self.cache_rules.iteritems():
            # If we don't have the compiled regexp in memory, we compile
            # it.
            try:
                self.cache_re[k]
            except(KeyError):
                # Compile it and put it in the cache
                self.cache_re[k] = re.compile(k)
            if self.cache_re[k].match(url):
                return int(v)
        # We iterated throught all cache_rules, so nothing matches
        return False
            
    
    #def filter(self, url):
        
        #should_not_cache = self._should_not_cache(url)
        #if should_not_cache == False:
            #should_cache = self._should_cache(url)
            #if should_cache == None:
                ##print "Should be default"
                #return None
            #else:
                #print "Should Cache! %s" % url
                #print "should_cache is", should_cache
                #return should_cache
        #else:
            #print "Should not cache! %s" % url
            #return False
    
    def filter(self, url):
        '''number: should cache
        false: should not cache
        None: Default behavior'''
        should_not_cache = self._should_not_cache(url)
        if should_not_cache == True:
            return False
        should_cache = self._should_cache(url)
        if isinstance(should_cache, int) == 1:
            return should_cache
        return None
    
    def is_fresh(self, url, date):
        max_freshness_secs = self._should_cache(url)
        now = datetime.datetime.now()
        delta = now - date
        return delta.seconds <= max_freshness_secs
        
class url_fetcher(threading.Thread):
    def __init__(self, handler, peer, port, timeout, url, to_send_recursion, peer_list, headers):
        #print "*********", headers
        # Instance variables
        self.handler = handler
        self.peer = peer
        self.port = port
        self.timeout = timeout
        self.url = url
        self.to_send_recursion = to_send_recursion
        self.peer_list = peer_list
        
        threading.Thread.__init__(self)
        
    def run(self):
        conn = httplib.HTTPConnection(self.peer, self.port, timeout=self.timeout)
        conn.putrequest("GET", self.url)
            
        # Adding the headers for our distributed algorithm
        conn.putheader('glassvine-recursion', self.to_send_recursion)
        conn.putheader('glassvine-visited', '-'.join(self.peer_list))
            
        conn.endheaders()
            
        response = conn.getresponse()
        
        self.handler.result_lock.acquire()
        self.handler.fastest_result = self.handler._parse_response(response)
        self.handler.result_lock.notify()
        self.handler.result_lock.release()

class default_route_fetcher(threading.Thread):
    
    def __init__(self, handler, timeout, url, headers):
        
        #print "*****", headers
        # Instance variables
        self.handler = handler
        self.timeout = timeout
        self.url = url
        
        
        threading.Thread.__init__(self)
        
    def run(self):
        #conn = httplib.HTTPConnection(self.peer, self.port, timeout=self.timeout)
        _, host, port, path = _parse(self.url)
        conn = httplib.HTTPConnection(host, port, timeout=self.timeout)
        conn.putrequest("GET", path)
            
        # Adding the headers for our distributed algorithm
        #conn.putheader('glassvine-recursion', self.to_send_recursion)
        #conn.putheader('glassvine-visited', '-'.join(self.peer_list))
            
        conn.endheaders()
            
        response = conn.getresponse()
        
        self.handler.result_lock.acquire()
        self.handler.fastest_result = self.handler._parse_response(response)
        self.handler.result_lock.notify()
        self.handler.result_lock.release()
