#!/usr/bin/env python3
# -*- coding:utf-8 -*-
## author : cypro666
## date   : 2014.6.24
"""
Implementation of LRU(last-recent-call) algorithm, simple and fast, thread-safe,
using as decorator, query wiki for more details about decorator.
"""
try:
    from _thread import RLock
except ImportError:
    class RLock:
        'Dummy reentrant lock for builds without threads'
        def __enter__(self): pass
        def __exit__(self, exctype, excinst, exctb): pass
from functools import update_wrapper

__all__ = ['lrucache', 'simplelru']


class _HashedSeq(list):
    __slots__ = ('hashvalue',)
    def __init__(self, tup):
        self[:] = tup
        self.hashvalue = hash(tup)
    def __hash__(self):
        return self.hashvalue


def __makekey(args, 
              kwds,
              kwd_mark = (object(),),
              fasttypes = {int, str, frozenset, type(None)},
              sorted = sorted, 
              tuple = tuple, 
              type = type, 
              len = len):
    key = args
    if kwds:
        sorted_items = sorted(kwds.items())
        key += kwd_mark
        for item in sorted_items:
            key += item
    if len(key) == 1 and type(key[0]) in fasttypes:
        return key[0]
    return _HashedSeq(key)



def lrucache(maxsize = 1024, mt = True):
    """Decorator of LRU function, multi-thread safe, maxsize should not be too large"""
    assert maxsize <= 0 or maxsize >= 128 or maxsize == None
    sentinel = object()             # unique object used to signal cache miss
    make_key = __makekey           # build a key from the function arguments
    lock = RLock()                  # because linkedlist updates aren't threadsafe
    PREV, NEXT, KEY, RESULT = 0, 1, 2, 3   # names for the link fields

    def decorating_function(user_function):
        cache = {}
        hit = miss = 0
        full = False
        cache_get = cache.get    # bound method to lookup a key or return None
        root = []                # root of the circular doubly linked list
        root[:] = [root, root, None, None]     # initialize by pointing to self
        
        if maxsize <= 0:
            def wrapper(*args, **kwds):
                # Simple caching without ordering or size limit
                nonlocal hit, miss
                key = make_key(args, kwds)
                result = cache_get(key, sentinel)
                if result is not sentinel:
                    hit += 1
                    return result
                result = user_function(*args, **kwds)
                cache[key] = result
                miss += 1
                return result
        else:
            def wrapper(*args, **kwds):
                # Size limited caching that tracks accesses by recency
                nonlocal root, hit, miss, full
                if mt:
                    lock.acquire(blocking=True)
                key = make_key(args, kwds)
                link = cache_get(key)
                if link is not None:
                    # Move the link to the front of the circular queue
                    link_prev, link_next, _key, result = link
                    link_prev[NEXT] = link_next
                    link_next[PREV] = link_prev
                    last = root[PREV]
                    last[NEXT] = root[PREV] = link
                    link[PREV] = last
                    link[NEXT] = root
                    hit += 1
                    if mt:
                        lock.release()
                    return result
                result = user_function(*args, **kwds)
                if key in cache:
                    # Getting here means that this same key was added to the cache.
                    pass
                elif full:
                    # Use the old root to store the new key and result.
                    oldroot = root
                    oldroot[KEY] = key
                    oldroot[RESULT] = result
                    root = oldroot[NEXT]
                    oldkey = root[KEY]
                    root[KEY] = root[RESULT] = None
                    del cache[oldkey]
                    cache[key] = oldroot
                else:
                    # Put result in a new link at the front of the queue.
                    last = root[PREV]
                    link = [last, root, key, result]
                    last[NEXT] = root[PREV] = cache[key] = link
                    full = (len(cache) >= maxsize)
                miss += 1
                if mt:
                    lock.release()
                return result

        def cache_info():
            return {'hit' : hit, 'miss' : miss, 'maxsize' : maxsize, 'cache' : len(cache)}

        def cache_clear():
            nonlocal hit, miss, full
            cache.clear()
            root[:] = [root, root, None, None]
            hit = miss = 0
            full = False

        wrapper.cache_info = cache_info
        wrapper.cache_clear = cache_clear
        return update_wrapper(wrapper, user_function)

    return decorating_function


def simplelru(maxsize):
    """Decorator of LRU function, faster but not thread-safe, maxsize should not be too large"""
    assert maxsize <= 0 or maxsize >= 128
    def decorating_function(user_function):
        cache = {}
        hit = miss = 0
        _cache_get = cache.__getitem__
        def wrapper(arg):
            nonlocal hit, miss, cache
            try:
                result = _cache_get(arg)
                hit += 1
                return result
            except KeyError:
                result = user_function(arg)
                cache[arg] = result
                miss += 1
                if maxsize > 0 and len(cache) > maxsize:
                    cache.clear()
            return result
            
        def cache_info():
            return {'hit' : hit, 'miss' : miss, 'maxsize' : maxsize, 'cache' : len(cache)}

        def cache_clear():
            nonlocal hit, miss, cache
            cache.clear()
            hit = miss = 0

        wrapper.cache_info = cache_info
        wrapper.cache_clear = cache_clear
        return update_wrapper(wrapper, user_function)

    return decorating_function



from .debug import time_meter
@time_meter(__name__)
def test():
    from random import randint
    from functools import lru_cache
    from time import time
    import math
    
    def fun(n):
        if n & 1:
            for i in range(100):
                n = (float(n) + i) / (1 + math.sin(i))
            return math.hypot(n*i, n*i)
        else:
            a = (n + 3.14) / (1 + math.cos(n))
            return math.hypot(n*a, n*a)
    t = time()
    for i in range(100000):
        fun(randint(0, 10000))
    print('raw : %.3f\n' % (time()-t))

    @simplelru(0)
    def fun1(n):
        return fun(n)
    t = time()
    for i in range(100000):
        fun1(randint(0, 10000))
    print(fun1.cache_info())
    print('simplelru : %.3f\n' % (time()-t))

    @lrucache(0, mt=False)
    def fun2(n):
        return fun(n)
    for i in range(100000):
        fun2(randint(0, 10000))
    print(fun2.cache_info())
    print('lrucache %.3f\n' % (time()-t))

    @lru_cache(None)
    def fun3(n):
        return fun(n)
    for i in range(100000):
        fun3(randint(0, 10000))
    print(str(fun3.cache_info()).replace('CacheInfo(', '{').replace(')', '}'))
    print('lru_cache : %.3f\n' % (time()-t))



