from collections.abc import Iterable

import numpy as np


class Tensor:
    training = True

    def __init__(self, data, parents=[], note='', trainable=True):
        self.data = data
        self.grad = np.zeros_like(self.data)
        if not isinstance(parents, Iterable):
            parents = [parents]
        self._parents = parents
        self._backward = lambda: None
        self._note = note
        self._trainable = trainable

    @staticmethod
    def randn(*shape, trainable=True):
        return Tensor(np.random.randn(*shape), trainable=trainable)

    @staticmethod
    def zeros(*shape, trainable=True):
        return Tensor(np.zeros(shape, dtype=np.float32), trainable=trainable)

    @staticmethod
    def ones(*shape, trainable=True):
        return Tensor(np.ones(shape, dtype=np.float32), trainable=trainable)

    @staticmethod
    def uniform(*shape, trainable=True):
        return Tensor((np.random.uniform(-1., 1., size=shape)/np.sqrt(np.prod(shape))).astype(np.float32), trainable=trainable)

    @property
    def dtype(self):
        return self.data.dtype

    @property
    def shape(self):
        return self.data.shape

    @property
    def ndims(self):
        return len(self.shape)

    def topo_sort(self):
        def _dfs(node, visited, nodes):
            visited.add(node)
            for n in node._parents:
                if n not in visited:
                    _dfs(n, visited, nodes)
            nodes.append(node)
            return nodes
        return _dfs(self, set(), [])

    def backward(self):
        assert self.shape == (1,)
        self.grad = np.ones_like(self.data)
        for n in reversed(self.topo_sort()):
            n._backward()

    def reshape(self, shape):
        res = Tensor(self.data.reshape(shape), self,
                     f'reshape({shape})', False)

        def _bw():
            self.grad += res.grad.reshape(self.shape)
        res._backward = _bw
        return res

    def transpose(self, axes):
        res = Tensor(np.transpose(self.data, axes),
                     self, f'transpose({axes})', False)

        def _bw():
            self.grad += np.transpose(res.grad, np.argsort(axes))
        res._backward = _bw
        return res

    def unbroadcast(self, grad):
        sum_axis = tuple([i for i in range(self.ndims) if self.shape[i]
                          == 1 and grad.shape[i] > 1]) if self.shape != (1,) else None
        self.grad += np.sum(grad, sum_axis, keepdims=True)

    def __add__(self, oth):
        if isinstance(oth, (int, float)):
            res = Tensor(self.data+float(oth), self, f'+{float(oth)}', False)

            def _bw():
                self.grad += res.grad
            res._backward = _bw
            return res
        elif isinstance(oth, np.ndarray):
            assert self.ndims == len(oth.shape)
            res = Tensor(self.data+oth, self, f'+{oth.shape}', False)

            def _bw():
                self.unbroadcast(res.grad)
            res._backward = _bw
            return res
        elif isinstance(oth, Tensor):
            assert self.ndims == oth.ndims
            res = Tensor(self.data+oth.data, (self, oth), f'+', False)

            def _bw():
                self.unbroadcast(res.grad)
                oth.unbroadcast(res.grad)
            res._backward = _bw
            return res
        else:
            raise Exception('Not impl')

    __radd__ = __add__

    # def __iadd__(self, oth):
    #     raise Exception('Not impl')

    def __neg__(self):
        res = Tensor(-self.data, self, f'-', False)

        def _bw():
            self.grad -= res.grad
        res._backward = _bw
        return res

    def __sub__(self, oth):
        return self+(-oth)

    __rsub__ = __sub__

    # def __isub__(self, oth):
    #     raise Exception('Not impl')

    def __mul__(self, oth):
        if isinstance(oth, (int, float)):
            res = Tensor(self.data*float(oth), self, f'*{float(oth)}', False)

            def _bw():
                self.grad += res.grad*float(oth)
            res._backward = _bw
            return res
        elif isinstance(oth, np.ndarray):
            assert self.ndims == len(oth.shape)
            res = Tensor(self.data*oth, self, f'*{oth.shape}', False)

            def _bw():
                self.unbroadcast(oth * res.grad)
            res._backward = _bw
            return res
        elif isinstance(oth, Tensor):
            assert self.ndims == oth.ndims
            res = Tensor(self.data * oth.data, (self, oth), '*', False)

            def _bw():
                self.unbroadcast(oth.data * res.grad)
                oth.unbroadcast(self.data * res.grad)
            res._backward = _bw
            return res
        else:
            raise Exception('Not impl')

    __rmul__ = __mul__

    # def __imul__(self, oth):
    #     raise Exception('Not impl')

    def __pow__(self, oth):
        if isinstance(oth, (int, float)):
            res = Tensor(self.data**float(oth), self, f'^{float(oth)}', False)

            def _bw():
                self.grad += float(oth)*self.data**(float(oth)-1.)*res.grad
            res._backward = _bw
            return res
        elif isinstance(oth, np.ndarray):
            assert self.ndims == len(oth.shape)
            res = Tensor(self.data**oth, self, f'^{oth.shape}', False)

            def _bw():
                self.unbroadcast(oth*self.data**(oth-1.)*res.grad)
            res._backward = _bw
            return res
        elif isinstance(oth, Tensor):
            assert self.ndims == oth.ndims
            res = Tensor(self.data ** oth.data, (self, oth), '^', False)

            def _bw():
                self.unbroadcast(oth.data*self.data**(oth.data-1.)*res.grad)
                oth.unbroadcast(self.data**oth.data*np.log(self.data)*res.grad)
            res._backward = _bw
            return res
        else:
            raise Exception('Not impl')

    def __truediv__(self, oth):
        return self*oth**-1

    # def __itruediv__(self, oth):
    #     raise Exception('Not impl')

    def __matmul__(self, oth):
        if isinstance(oth, np.ndarray):
            res = Tensor(self.data@oth, self, f'@{oth.shape}', False)

            def _bw():
                self.grad += res.grad @ np.swapaxes(oth.data, -2, -1)
            res._backward = _bw
            return res
        elif isinstance(oth, Tensor):
            res = Tensor(self.data@oth.data, (self, oth), f'@', False)

            def _bw():
                self.grad += res.grad @ np.swapaxes(oth.data, -2, -1)
                oth.grad += np.swapaxes(self.data, -2, -1) @ res.grad
            res._backward = _bw
            return res
        else:
            raise Exception('Not impl')

    __rmatmul__ = __matmul__

    def slice(self, arg=None):
        def inner_slice(x, arg):
            padding = [(max(0, -p[0]), max(0, p[1]-x.shape[i]))
                       for i, p in enumerate(arg)]
            x = np.pad(x, padding)
            slicee = [(p[0] + padding[i][0], p[1] + padding[i][0])
                      for i, p in enumerate(arg)]
            return x[tuple([slice(x[0], x[1], None) for x in slicee])]
        res = Tensor(inner_slice(self.data, arg), self, 'slice', False)

        def _bw():
            narg = [(0-p[0], res.shape[i]+(self.shape[i]-p[1]))
                    for i, p in enumerate(arg)]
            self.grad += inner_slice(res.grad, narg)
        res._backward = _bw
        return res

    def __getitem__(self, val):
        arg = []
        for i, s in enumerate(val if isinstance(val, (list, tuple)) else ([] if val is None else [val])):
            arg.append((s.start if s.start is not None else 0,
                        (s.stop if s.stop >= 0 else self.shape[i]+s.stop) if s.stop is not None else self.shape[i]))
            assert s.step is None or s.step == 1
        return self.slice(arg=arg+[(0, self.shape[i]) for i in range(len(arg), len(self.shape))])

    def sum(self, axis=None):
        if axis is None:
            res = Tensor(np.array([self.data.sum()]), self, 'sum', False)
        else:
            res = Tensor(self.data.sum(axis), self, 'sum', False)

        def _bw():
            nonlocal axis
            if isinstance(axis, int):
                axis = [axis]
            if axis is not None:
                axis = [x if x >= 0 else self.ndims+x for x in axis]
            shape = tuple(1 if axis is None or i in axis else self.shape[i]
                          for i in range(self.ndims))
            self.grad += res.grad.reshape(shape)
        res._backward = _bw
        return res

    def max(self, axis=None):
        if isinstance(axis, int):
            axis = [axis]
        if axis is not None:
            axis = tuple(x if x >= 0 else self.ndims+x for x in axis)
        ret = np.amax(self.data, axis, keepdims=True)
        if axis is not None:
            res = Tensor(ret.reshape([self.shape[i] for i in range(
                self.ndims) if i not in axis]), self, 'max', False)
        else:
            res = Tensor(ret, self, 'max', False)

        def _bw():
            shape = [1 if axis is None or i in axis else self.shape[i]
                     for i in range(self.ndims)]
            tmp = (self.data == ret.reshape(shape))
            self.grad += tmp * \
                res.grad.reshape(shape)/tmp.sum(axis, keepdims=True)
        res._backward = _bw
        return res

    def exp(self):
        res = Tensor(np.exp(self.data), self, 'exp', False)

        def _bw():
            self.grad += res.data*res.grad
        res._backward = _bw
        return res

    def log(self):
        res = Tensor(np.log(self.data), self, 'log', False)

        def _bw():
            self.grad += res.grad/self.data
        res._backward = _bw
        return res

    def relu(self):
        res = Tensor(np.maximum(self.data, 0), self, 'relu', False)

        def _bw():
            self.grad += (res.data >= 0)*res.grad
        res._backward = _bw
        return res

    def softmax(self):
        ns = list(self.shape)[:-1]+[1]
        m = self.max(-1).reshape(ns)
        e = (self - m).exp()
        ss = e.sum(-1).reshape(ns)
        return e/ss

    def logsoftmax(self):
        ns = list(self.shape)[:-1]+[1]
        m = self.max(-1).reshape(ns)
        ss = m + (self-m).exp().sum(-1).reshape(ns).log()
        return self - ss

    def sigmoid(self):
        e = self.exp()
        return e/(1 + e)

    def tanh(self):
        return 2.0 * ((2.0 * self).sigmoid()) - 1.0

    def leakyrelu(self, neg_slope=0.01):
        return self.relu() - (-neg_slope*self).relu()

    def mean(self, axis=None):
        res = self.sum(axis)
        return res * (np.prod(res.shape)/np.prod(self.shape))

    def conv2d(self, w, stride=1, groups=1):
        if isinstance(stride, int):
            stride = (stride, stride)
        cout, cin, H, W = w.shape
        ys, xs = stride
        bs, cin_ = self.shape[0], self.shape[1]
        oy, ox = (self.shape[2]-(H-ys))//ys, (self.shape[3]-(W-xs))//xs
        assert cin*groups == cin_
        assert cout % groups == 0
        rcout = cout//groups
        gx = self.reshape((bs, groups, cin, self.shape[2], self.shape[3]))
        tx = np.lib.stride_tricks.as_strided(gx.data,
                                             shape=(bs, groups, cin,
                                                    oy, ox, H, W),
                                             strides=(
                                                 *gx.data.strides[0:3], gx.data.strides[3]*ys, gx.data.strides[4]*xs, *gx.data.strides[3:5]),
                                             writeable=False,
                                             )
        tw = w.reshape((groups, rcout, cin, H, W))
        ret = np.zeros((bs, groups, oy, ox, rcout), dtype=self.dtype)
        for g in range(groups):
            # ijYXyx,kjyx -> iYXk ->ikYX
            ret[:, g] += np.tensordot(tx[:, g],
                                      tw.data[g], ((1, 4, 5), (1, 2, 3)))
        res = Tensor(np.moveaxis(ret, 4, 2).reshape(
            bs, cout, oy, ox), (gx, tw), 'conv2d', False)

        def _bw():
            OY, OX = self.shape[2:4]
            ggg = res.grad.reshape(bs, groups, rcout, oy, ox)
            gdw = np.zeros((groups, rcout, cin, H, W), dtype=self.dtype)
            for g in range(groups):
                #'ikYX,ijYXyx -> kjyx'
                gdw[g] += np.tensordot(ggg[:, g], tx[:, g],
                                       ((0, 2, 3), (0, 2, 3)))

            gdx = np.zeros((bs, groups, cin, OY, OX), dtype=self.dtype)
            for k in range(oy*ox):
                Y, X = k//ox, k % ox
                iY, iX = Y*ys, X*xs
                #gdx[:,:,: , iY:iY+H, iX:iX+W] += np.einsum('igk,gkjyx->igjyx', ggg[:,:,:,Y,X], tw)
                for g in range(groups):
                    tg = np.dot(ggg[:, g, :, Y, X].reshape(
                        bs, -1), tw.data[g].reshape(rcout, -1))
                    gdx[:, g, :, iY:iY+H, iX:iX +
                        W] += tg.reshape((bs, cin, H, W))
            self.grad += gdx.reshape(bs, groups*cin, OY, OX)
            w.grad += gdw.reshape(groups*rcout, cin, H, W)
        res._backward = _bw
        return res

    def _pool2d(self, py, px):
        xup = self[:, :, :self.shape[2]-self.shape[2] %
                   py, :self.shape[3]-self.shape[3] % px]
        return xup.reshape(shape=(xup.shape[0], xup.shape[1], xup.shape[2]//py, py, xup.shape[3]//px, px))

    def avg_pool2d(self, kernel_size=(2, 2)):
        return self._pool2d(*kernel_size).mean((3, 5))

    def max_pool2d(self, kernel_size=(2, 2)):
        return self._pool2d(*kernel_size).max((3, 5))

    def pad2d(self, pad):
        return self[:, :, -pad[2]:self.shape[2]+pad[3], -pad[0]:self.shape[3]+pad[1]]

    def dropout(self, p=0.2):
        if Tensor.training:
            _mask = np.asarray(np.random.binomial(
                1, 1.0-p, size=self.shape), dtype=self.dtype)
            return self * Tensor(_mask, trainable=False) * (1/(1.0 - p))
        else:
            return self


if __name__ == '__main__':
    a = Tensor.ones(1, 1, 4, 4)
    w = Tensor.ones(1, 1, 2, 2)
    b = a.conv2d(w)
    c = b.max_pool2d()
    # b.sum().backward()
    # print(a.data)
    print(b.data)
    print(c.data)
    # print(a.grad)
    # print(w.grad)
