#Gradiant_descent.py
#linear_regression seen below
'''
I think this program is fairly efficient. 
First, for most of polynomials, it can find ALL the extreme points(<=1000), no more no less,
without adjusting initial ext points manually
Then, even when initial value is set to be 150, which means the grad around that point is magnitude, 
it still won't grow exponentially over the save range, and return the iteration result within 500 times
'''
import numpy as np
from matplotlib import pyplot as plt

extp_init = 100
#extp_init = 150
#extp_init = 0
'''
The initial point of grad descant
'''

'''
y = x^4 + 3x^3 - 7x^2 + 2x - 1
or y = sum(coeff[i]*x^i)
'''
order = 4
#order = 5
coeff = [-1, 2, -7, 3, 1]
#coeff=[-6, 2, 5, -3, -2, -5]

extreme_max_points = []
extreme_min_points = []

def F(x, order=order, coeff=coeff):
    '''
    There is a tiny diffrence; Unlike the quadratic function(二次), 
    which is sure to present only one extreme value, given in the example,
    we don't know exactly how many extreme points a high-order polynomial(高次) could have, but no more than (order - 1)
    '''
    value = 0
    for i in range(order + 1):
        value += coeff[i]*pow(x, i)

    return value



def F_prime(x, fun=F):
    EPS = 1e-5
    return (fun(x + EPS) - fun(x))/EPS



def peak_search_reverse(extP=0, fun=F, current_iter=0,  broad_iterations=order - 1):
    '''
    To the left of ogrinal point, iterate (order - 1) times
    First find a minimum point, then find a maximum point on its left, then go ever lefter for another minimum, and so on
    '''
    ITER = current_iter
    if ITER>=broad_iterations:
        return 
    '''
    check if it need to return from the loop of iterations
    '''
    narrow_iterations = np.arange(0, 501, 1)
    
    log = [extP]
    ext_point = extP
    
    for iter in (narrow_iterations):
        log.append(ext_point)
        learning_rate = 0.00001*pow(10, 3.5*iter/len(narrow_iterations))
        '''
        Assume the grad is large at first, then gradually approach 0
        So the rate should be small at first, otherwise it gonna swing wider and wider and finally approach infinity, 
        then let it become biger and biger so it can find the result with fawer iteration times

        '''
        ext_point -= pow(-1, ITER)*learning_rate*F_prime(ext_point, fun)
        '''
        '-=' to find minimum points, '+=' to find maximum points
        use (-1)^iter to change my orientation
        '''
        if abs(ext_point)>1000:
            return 
        '''
        if ext_point is too large(|ext_point|>1000), it is reckoned to be over the possible range
        '''
    if ITER%2==0:
        extreme_min_points.append(ext_point)
    else:
        extreme_max_points.append(ext_point)
    plt.plot(log)
    #plt.show()

    ext_point -= 1e-2
    '''
    manage to jump out from the peak or trough region
    '''
    return peak_search_reverse(ext_point, fun, ITER + 1, broad_iterations)



def peak_search_forward(extP=0, fun=F,  current_iter=0,  broad_iterations=order - 1):
    '''
    To the right of the ogrin, repeat the same process
    But maximum first
    '''
    ITER = current_iter
    if ITER>=broad_iterations:
        return

    narrow_iterations = np.arange(0, 501, 1)
    log = [extP]
    ext_point = extP
    
    for iter in (narrow_iterations):
        log.append(ext_point)
        learning_rate = 0.00001*pow(10, 3.5*iter/len(narrow_iterations))
        ext_point += pow(-1, ITER)*learning_rate*F_prime(ext_point, fun)
        if abs(ext_point)>1000:
            return 
        '''
        '-=' to find minimum points, '+=' to find maximum points
        use (-1)**iter to change my orientation
        '''
    if ITER%2==1:
        extreme_min_points.append(ext_point)
    else:
        extreme_max_points.append(ext_point)
    plt.plot(log)
    #plt.show()

    ext_point += 1e-2
        
    return peak_search_forward(ext_point, fun, ITER + 1 ,broad_iterations)



x = np.linspace(-100,100,100001) 
y = F(x)
#plt.plot(y)

#extp_init = 0
'''
Starting from initial point, I intend to iterate (order - 1) times 
to find any possible (-1000<x<1000) peak and trough to both left and right side of orginal point
In my program, 150 is still vaild
'''
peak_search_forward(extp_init)
peak_search_reverse(extp_init)

print('extreme maximum point(s) is(are) :')
for point in extreme_max_points:
    print(point)
print('extreme minimum point(s) is(are) :')
for point in extreme_min_points:
    print(point)

plt.show()





#linear_regression.py
def linearf(x):
    np.random.seed(3)
    return 3*x + np.random.randn(x.shape[0])
x = np.linspace(-3,3,20)
y = linearf(x)

def loss_para(w):
    t = w*x
    loss = 0
    for i in range(len(t)):
        loss += pow(t[i] - y[i], 2)
    return loss/20

# s = np.linspace(-10, 10, 1000)
# plt.plot(s, list(map(loss_para, s)))
# plt.show()

init_point = np.random.randint(-100, 100)
peak_search_reverse(init_point, loss_para, 0, broad_iterations=1)
'''only need to iterate once'''
print('approximate w is :')
print(extreme_min_points[-1])
plt.show()





#Gradiantdescent and linear regression
'''
For gradiant descent, starting from orginal point, create two branches of iteration toward both positive and negative direction, 
each of which can iterate order(the order of polynomial) - 1 times at most to find possible solution for extreme points, 
untill it runs into a extremely large number(I choose 1000  as the edge). In addtion, after each iteration process, 
the function will veverse its orientation, say, from peak search to trough search, 
for after a maximum point, there will be either nothing or a minimum point. That is to say,I can divide all the solutions
into maximum point and minimum point. Then draw the plot to visualize the searching process

For linear regression, just use the function above.
'''