# 导入需要使用的包
import numpy as np
import math
import matplotlib.pyplot as plt
from random import randint

#task 1

def g(x):
    return x**4+3*x**3-7*x**2+2*x-1

x = np.linspace(-100,100,2001) 
y = g(x)
plt.plot(x,y)



# Parameters
LEARNING_RATE = 0.1
MAX_LEARNING_RATE = 1
ITERATIONS = 1000
EPS = 1e-5

# calcualte gradient
def cal_grad(x,f):
    # using difference instead if derivative
    return (f(x+EPS)-f(x))/EPS

def adaptive_learningrate(learningrate,log_grad):
    total_gard=0
    #total_num=0;
    for pre in log_grad:
        total_gard=pre**2
    #    total_num+=1
    # if(total_num%3000==0):
    #     return MAX_LEARNING_RATE
    return learningrate/math.sqrt(total_gard+EPS)

ext_point = float(randint(-100,100)) 
print("initial ext_point:{:.2f}".format(ext_point))
log = []  
log_y=[]
log_grad=[]  
for iter in range(ITERATIONS):
    log.append(ext_point)
    log_y.append(g(ext_point))
    log_grad.append(cal_grad(ext_point,g))
    ext_point -= adaptive_learningrate(LEARNING_RATE,log_grad) * cal_grad(ext_point,g)
print("ext_point is {:.2f}".format(ext_point))
plt.plot(log,log_y,'.')

#limit x,y
plt.xlim(-5,5)
plt.ylim(-100,100)
plt.show()



#task 2

# make fake data
def linearf(x):
    return 3*x + np.random.randn(x.shape[0])
x = np.linspace(-3,3,11)
y = linearf(x)
plt.scatter(x,y)


# Parameters
LEARNING_RATE = 0.01
ITERATIONS = 1000
EPS = 1e-5
# init w,b
w=float(randint(1,5))
b=float(randint(1,5))
print("initial w={:.4f}\ninit b={:.4f}".format(w,b))

def loss(w,b):
    total=0.0
    r=y-w*x-b
    for num in r:
        total+=num**2
    return total/11


def cal_grad_w(loss,w):
    return (loss(w+EPS,b)-loss(w,b))/EPS

def cal_grad_b(loss,b):
    return(loss(w,b+EPS)-loss(w,b))/EPS

for iter in range(ITERATIONS):
    w-=LEARNING_RATE*cal_grad_w(loss,w)
    b-=LEARNING_RATE*cal_grad_b(loss,b)
    

print("predict: y={:.4f}x{:+.4f}".format(w,b))
y_predict=w*x+b
plt.plot(x,y_predict,'--')

plt.show()
