from ctypes import cdll, c_int64
import torch
import functools
import threading
import os
import inspect
from typing import List, Tuple
from typing import get_origin, get_args

threadlocal = threading.local()


class _Array:
    def __getitem__(self, size):
        return (self.t * size)()

    def __init__(self, t):
        self.t = t

    def __call__(self, array):
        return (self.t * len(array))(*array)


class _ClibAst:
    c_lib = cdll.LoadLibrary(os.path.join(os.path.dirname(__file__), 'libast.so'))

    @classmethod
    def create_context(cls):
        return cls.emit('CreateContext')

    @classmethod
    def destroy_context(cls, ctx):
        cls.emit('DestroyContext', ctx)

    @classmethod
    def emit(cls, op, *args):
        c_func = getattr(cls.c_lib, op, None)
        if c_func is None:
            raise AttributeError(f"C api {op} not found in libast")

        return c_func(*args)


class _VirtualAst:
    '''
    This class is a virtual ctx for manage threadlocal ast context
    '''

    def __getattr__(self, name):
        def snake_to_camel(name):
            return ''.join([word.capitalize() for word in name.split('_')])
        setattr(self, name, lambda *args: self.tile_graph.call(snake_to_camel(name), *args))
        return getattr(self, name)

    def swap_tile_graph(self, tile_graph):
        prior = self.tile_graph
        self.tile_graph = tile_graph
        return prior

    @property
    def tile_graph(self):
        return getattr(threadlocal, "ast_tile_graph", _NullTileGraph())

    @tile_graph.setter
    def tile_graph(self, value):
        setattr(threadlocal, "ast_tile_graph", value)


ast = _VirtualAst()


class _TileGraph:
    def __init__(self):
        self.prior = None
        self.ctx = None

    def __enter__(self):
        self.prior = ast.swap_tile_graph(self)
        self.ctx = _ClibAst.create_context()
        return self

    def __exit__(self, *args):
        ast.swap_tile_graph(self.prior)
        _ClibAst.destroy_context(self.ctx)
        self.ctx = None

    def get_tensor_dtype_and_dims(self, tensor: '_Tensor'):
        if self.ctx is None:
            raise RuntimeError("No tile graph in context")
        dtype = _ClibAst.emit('GetTensorDtype', self.ctx, tensor.handle)
        dim_num = _ClibAst.emit('GetTensorDimNum', self.ctx, tensor.handle)
        c_int64_array = _Array(c_int64)[dim_num]
        _ClibAst.emit('GetTensorDims', self.ctx, tensor.handle, dim_num, c_int64_array)
        return dtype, list(c_int64_array)

    def call(self, op, *c_args):
        if self.ctx is None:
            raise RuntimeError("No tile graph in context")
        return _Tensor(self, _ClibAst.emit(op, self.ctx, *c_args))

    def run(self, *args):
        pass


class _NullTileGraph(_TileGraph):
    def emit(self, op, *args, **kwargs):
        raise RuntimeError("No tile graph set")


def _c_wraps(v, typ):
    if typ in (List[int], Tuple[int]):
        return _Array(c_int64)(v)
    if typ is int:
        return c_int64(v)
    if typ is _Tensor:
        return v.handle
    raise ValueError(f"Unsupported type {typ} value {repr(v)} for c argument")


def type_constriants(f=None, **specify):
    def is_instance_of_type(value, typ) -> bool:
        origin = get_origin(typ)
        if origin is None:
            return isinstance(value, typ)
        if origin in (list, tuple):
            return isinstance(value, (list, tuple)) and all(is_instance_of_type(item, get_args(typ)[0]) for item in value)
        return False

    def decorator(func):
        @functools.wraps(func)
        def wrapper(*args, **kwargs):
            bound = inspect.signature(func).bind(*args, **kwargs)
            bound.apply_defaults()
            c_args = []
            for name, value in bound.arguments.items():
                expect_type = specify.get(name, func.__annotations__.get(name, _Tensor))
                if not is_instance_of_type(value, expect_type):
                    raise TypeError(
                        f"Argument {name} of {func.__name__} must be {expect_type}, but got {type(value).__name__}")
                c_args.append(_c_wraps(value, expect_type))
            result = func(*c_args)
            expect_type = specify.get('return', func.__annotations__.get('return', _Tensor))
            expect_type = type(None) if expect_type is None else expect_type
            if not is_instance_of_type(result, expect_type):
                raise TypeError(
                    f"Return value of {func.__name__} must be {expect_type}, but got {type(result).__name__}")
            return result
        return wrapper
    if f is not None:
        return decorator(f)
    return decorator


class _Tensor:
    def __init__(self, graph, handle):
        self.graph: _TileGraph = graph
        self.handle = handle

        self._size = None
        self._dtype = None

    def __add__(self, other):
        return add(self, other)

    def __sub__(self, other):
        return sub(self, other)

    def __mul__(self, other):
        return mul(self, other)

    def __truediv__(self, other):
        return div(self, other)

    def size(self):
        if self._size is None:
            self._dtype, self._size = self.graph.get_tensor_dtype_and_dims(self)
        return self._size

    @ property
    def shape(self):
        return self.size()

    @ property
    def dtype(self):
        if self._dtype is None:
            self._dtype, self._size = self.graph.get_tensor_dtype_and_dims(self)
        return self._dtype

    def __repr__(self):
        return f"Tensor(id={self.handle}, dtype={self.dtype}, shape={self.shape})"


def load(t: torch.Tensor):
    return create_tensor(0, t.dim(), list(t.size()))


@type_constriants
def create_tensor(dtype: int, dim_num: int, dims: List[int]):
    return ast.create_tensor(dtype, dim_num, dims)


@type_constriants
def store(output, value) -> None:
    ast.assign(output, value)


@type_constriants
def broadcast(x, size: List[int]):
    return ast.broadcast(x, size)


@type_constriants
def row_max_expand(x):
    return ast.row_max_expand(x)


@type_constriants
def row_sum_expand(x):
    return ast.row_sum_expand(x)


@type_constriants
def add(x, y):
    return ast.add(x, y)


@type_constriants
def sub(x, y):
    return ast.sub(x, y)


@type_constriants
def div(x, y):
    return ast.div(x, y)


@type_constriants
def exp(x):
    return ast.exp(x)


@type_constriants
def mul(x, y):
    return ast.mul(x, y)


def jit(fn):
    @functools.wraps(fn)
    def wrapper(*args, **kwargs):
        assert len(kwargs) == 0, "kwargs not supported"
        assert all(isinstance(arg, torch.Tensor) for arg in args), "args must be torch.Tensor"
        with _TileGraph() as graph:
            ast_args = [load(arg) for arg in args]
            fn(*ast_args, **kwargs)
        graph.run([arg.data_ptr() for arg in args])
    return wrapper
