from engine import *
import torch;
def Example2():
    # input x1,x2
    x1 = Value(2.0, _label='x1')
    x2 = Value(0.0, _label='x2')

    # weights w1,w2
    w1 = Value(-3.0, _label='w1')
    w2 = Value(1.0, _label='w2')
    b = Value(6.8813735870195432, _label='b') # bias
    x1w1 = x1 * w1; x1w1._label = 'x1w1'
    x2w2 = x2 * w2; x2w2._label = 'x2w2'
    x1w1x2w2 = x1w1 + x2w2; x1w1x2w2._label = 'x1w1 + 2w2'
    n = x1w1x2w2 + b; n._label = 'n'
    o = n.tanh(); o._label = 'o'
    o.grad = 1.0
    # o = tanh(n)
    # do/dn = 1 - tanh(n)^2
    # tanh(n) = o.data
    n.grad = 1.0 - o.data**2
    # cuz o = x1w1x2w2 + b
    # and the grad of plus is 1 
    # chain rule:  do/dx1w1x2w2 = do/dn * dn/dx1w1x2w1 = do/dn * 1, same as b
    x1w1x2w2.grad = n.grad
    b.grad = n.grad
    # cuz x1w1x2w2 = x1 * w1 + x2 * w2
    # chain rule:  do/dx1w1= do/dn * dn/dx1w1x2w1 *  dx1w1x2w2/dx1w1 = do/dn * 1 * 1
    x1w1.grad = x1w1x2w2.grad
    x2w2.grad = x1w1x2w2.grad

    x2.grad = w2.data * x2w2.grad
    w2.grad = x2.data * x2w2.grad

    x1.grad = w1.data * x1w1.grad
    w1.grad = x1.data * x1w1.grad
    return o

def Example3():
# input x1,x2
    x1 = Value(2.0, _label='x1')
    x2 = Value(0.0, _label='x2')

    # weights w1,w2
    w1 = Value(-3.0, _label='w1')
    w2 = Value(1.0, _label='w2')
    b = Value(6.8813735870195432, _label='b') # bias
    x1w1 = x1 * w1; x1w1._label = 'x1w1'
    x2w2 = x2 * w2; x2w2._label = 'x2w2'
    x1w1x2w2 = x1w1 + x2w2; x1w1x2w2._label = 'x1w1 + x2w2'
    n = x1w1x2w2 + b; n._label = 'n'
    o = n.tanh(); o._label = 'o'
    o.grad = 1.0
    o._backward()
    n._backward()
    x1w1x2w2._backward()
    x1w1._backward()
    x1._backward()
    x2._backward()
    return o

def Example4():
    x1 = Value(2.0, _label='x1')
    x2 = Value(0.0, _label='x2')

    # weights w1,w2
    w1 = Value(-3.0, _label='w1')
    w2 = Value(1.0, _label='w2')
    b = Value(6.8813735870195432, _label='b') # bias
    x1w1 = x1 * w1; x1w1._label = 'x1w1'
    x2w2 = x2 * w2; x2w2._label = 'x2w2'
    x1w1x2w2 = x1w1 + x2w2; x1w1x2w2._label = 'x1w1 + 2w2'
    n = x1w1x2w2 + b; n._label = 'n'
    o = n.tanh(); o._label = 'o'
    o.grad = 1.0
    topo = build_topo(o)
    for v in reversed(topo):
        v._backward()
    return o

def Example5():
    x1 = Value(2.0, _label='x1')
    x2 = Value(0.0, _label='x2')

    # weights w1,w2
    w1 = Value(-3.0, _label='w1')
    w2 = Value(1.0, _label='w2')
    b = Value(6.8813735870195432, _label='b') # bias
    x1w1 = x1 * w1; x1w1._label = 'x1w1'
    x2w2 = x2 * w2; x2w2._label = 'x2w2'
    x1w1x2w2 = x1w1 + x2w2; x1w1x2w2._label = 'x1w1 + 2w2'
    n = x1w1x2w2 + b; n._label = 'n'
    o = n.tanh(); o._label = 'o'
    o.backward()
    return o

def Example6():
    x1 = Value(2.0, _label='x1')
    x2 = Value(0.0, _label='x2')

    # weights w1,w2
    w1 = Value(-3.0, _label='w1')
    w2 = Value(1.0, _label='w2')
    b = Value(6.8813735870195432, _label='b') # bias
    x1w1 = x1 * w1; x1w1._label = 'x1w1'
    x2w2 = x2 * w2; x2w2._label = 'x2w2'
    x1w1x2w2 = x1w1 + x2w2; x1w1x2w2._label = 'x1w1 + 2w2'
    n = x1w1x2w2 + b; n._label = 'n'
    e = (2 * n).exp(); e._label = 'e'
    o = (e - 1) / (e + 1); o._label = 'o'
    o.backward()
    return o

def Example7():
    x1 = torch.tensor([2.0]).double(); x1.requires_grad = True
    x2 = torch.tensor([0.0]).double(); x2.requires_grad = True
    w1 = torch.tensor([-3.0]).double(); w1.requires_grad = True
    w2 = torch.tensor([1.0]).double(); w2.requires_grad = True
    b = torch.tensor([6.8813735870195432]).double(); b.requires_grad = True
    n = x1 * w1 + x2 * w2 + b
    o = torch.tanh(n)
    print (o.data.item())
    o.backward()
    print (x1.grad.item())
    print (x2.grad.item())
    print (w1.grad.item())
    print (w2.grad.item())
    print (b.grad.item())
if __name__ == "__main__":
    # TOTO: trace the bug in Example 4 5 6. backward not stable
    Example7()