import numpy as np
from queue import Queue

#from autograd.ops import Add, Sub, Mul
#import autograd.ops as ops
import autograd.ops as ops

class Tensor:
    def __init__(self,
                 data,
                 requires_grad=False,
                 grad_fn=None,
                 is_leaf=True
    ):
        self._data = data.data if isinstance(data, Tensor) else np.array(data)
        self.requires_grad = requires_grad
        self._grad = None  # None 1.0 ndarray
        self.grad_fn = grad_fn  #该tensor依赖于哪个基础算子
        self.is_leaf = is_leaf

        if self.requires_grad:
            self.zero_grad()

    @property
    def grad(self):
        return self._grad

    @grad.setter
    def grad(self, grad):
        self._grad = grad

    @property
    def data(self):
        return self._data
    
    @data.setter
    def data(self, data):
        self._data = data
        self._grad = None

    @property
    def shape(self):
        return self._data.shape

    def dim(self):
        return self._data.ndim

    @property
    def T(self) -> "Tensor":
        return self.transpose()

    # 定义一些类方法,可以直接生成特定Tensor

    @classmethod
    def ones(cls, shape, **kwargs) -> "Tensor":
        return cls(np.ones(shape=shape), **kwargs)

    @classmethod
    def zeros(cls, shape, **kwargs) -> "Tensor":
        return cls(np.zeros(shape=shape), **kwargs)

    @classmethod
    def empty(cls, shape, **kwargs) -> "Tensor":
        return cls(np.empty(shape=shape), **kwargs)

    @classmethod
    def randn(cls, shape, **kwargs) -> "Tensor":
        return cls(np.random.randn(size=shape), **kwargs)

    @classmethod
    def uniform(cls, low=0.0, high=1.0, size=None, **kwargs) -> "Tensor":
        return cls(np.random.uniform(low=low, high=high, size=size), **kwargs)

    def reshape(self, shape=None):
        return ops.Reshape()(self, shape=shape)


    def transpose(self, axes=None):
        return ops.Transpose()(self,axes=axes)

    def __getitem__(self, idxs):
        return ops.Getitem()(self, idx=idxs)

    def __setitem__(self, key, value):
        if isinstance(value, Tensor):
            value = value.data
        self.data[key] = value
        # self._grad = None
        return self

    def add__(self, other):
        data = self.data + other.data
        requires_grad = self.requires_grad | other.requires_grad
        #tensor_cls = self.__class__
        return Tensor(data, requires_grad=requires_grad, grad_fn="AddbackwardFn",is_leaf=False)

    def __add__(self, other):
        return ops.Add()(self, other)

    def __sub__(self, other):
        return ops.Sub()(self, other)

    def __mul__(self, other):
        return ops.Mul()(self, other)

    def __truediv__(self, other):
        return ops.TrueDiv()(self, other)

    def __matmul__(self, other):
        return ops.Matmul()(self, other)

    def __neg__(self):
        return ops.Neg()(self)
    
    def sum(self, axis=None):
        return ops.Sum()(self,axis=axis)
    
    def mean(self, axis=None):
        return ops.Mean()(self,axis=axis)

    def max(self, axis=None):
        return ops.Max()(self, axis=axis)

    def min(self, axis=None):
        return ops.Min()(self, axis=axis)

    def log(self):
        return ops.Log()(self)

    def exp(self):
        return ops.Exp()(self)

    def pow(self, c):
        return ops.Pow()(self, c=c)

    def __repr__(self):
        return f"Tensor({self.data}" \
               f"{', requires_grad=' + str(self.requires_grad) if self.requires_grad else ''}" \
               f"{', grad_fn=' + str(self.grad_fn.__class__) if self.grad_fn is not None else ''})"

    def __str__(self):
        return f"Tensor({self.data}" \
               f"{', requires_grad=' + str(self.requires_grad) if self.requires_grad else ''}" \
               f"{', grad_fn=' + str(self.grad_fn.__class__) if self.grad_fn is not None else ''})"
    
    def backward(self, grad=None):
        
        assert self.requires_grad, "called backward on tensor do not require grad"
        grads = np.ones(1) if grad is None else grad #若一个tensor为反向传播图的入口,则该tensor的梯度是1.0 或者 是外部传入的grad grad为ndarray
        self.grad = np.ones([1]) if grad is None else grad
        
        #grads = self.grad_fn.backward(grad) # 求当前tensor的梯度
        #深度搜索
        
        #广度搜索,遍历每个tensor依赖的ops,所以backward()不会递归调用
        fns = Queue()  # ops的队列 
        fns.put(self.grad_fn)  #反向传播图的入口,第一个ops为当前tensor的grad_fn
        #开始遍历
        while not fns.empty():  # 不为空,则继续遍历
            fn = fns.get()  #获取第一个ops
            inputs = fn.inputs  #获取该ops的输入
            #grads = fn.backward(*grads)  # 传入该ops的输出的梯度 获取该ops的输入的梯度
            grads = fn.backward(fn.outputs.grad)
            if not isinstance(grads, tuple):
                grads = (grads,)
            for x, x_grad in zip(inputs, grads):
                if x.requires_grad and x_grad is not None:
                    if x.grad is not None:
                        x.grad = x.grad + x_grad
                    else:
                        x.grad = x_grad

                if x.grad_fn is not None:
                    fns.put(x.grad_fn)
            
    def zero_grad(self):
        self.grad = np.zeros(self.shape)


