'''
@Time: 2020/10/13 20:32  
@Author: 程嘉明
@File: linear_regression_02.py
Software: PyCharm
target: 程序目标
'''

import pandas as pd
import numpy as np
import random
import sys
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['font.serif'] = ['SimHei']

train_data = pd.read_csv('./data/train.csv', encoding='gb18030')
test_data = pd.read_csv('./data/test.csv', encoding='gb18030')

max_list,min_list=list(),list()

'''
取得每种物质的最大最小值
'''
def get_max_min_list():
    max_list = [0 for i in range(0, 18)]
    min_list = [float('inf') for i in range(0, 18)]
    train_data_list = list(train_data.groupby('时间戳'))
    for one_day_train_data in train_data_list:
        one_day_train_data = one_day_train_data[1]
        one_day_train_data.replace('NR', 0, inplace=True)
        real_index = 0
        for index, row in one_day_train_data.iterrows():
            row_max = max(row[2:].astype(float))
            row_min = min(row[2:].astype(float))
            if row_max > max_list[real_index]:
                max_list[real_index] = row_max
            if row_min < min_list[real_index]:
                min_list[real_index] = row_min
            real_index += 1
    return max_list,min_list

max_list,min_list=get_max_min_list()

'''
均方根
不含分母项N，已经化简
'''
def root_mean_square_not_contain_N(W):
    W=np.array(W)
    return np.sqrt(sum(W*W))

'''
将train中的数据分为10个小时为一组
'''
def split_train_data(train_data):
    new_train_data_list=list()
    '''
    将训练数据转化为每10个小时为一个batch，例：[0,1,2,3,4,5,6,7,8]小时为一个train data 则[9]小时为lable
    :param train_data_list: 
    :return:一个包含所有batch的list
    '''
    train_data_list=list(train_data.groupby('时间戳'))
    for one_day_train_data in train_data_list:
        one_day_train_data=one_day_train_data[1]
        for index in range(2,len(one_day_train_data)-1,1):
            new_train_data_list.append(one_day_train_data.iloc[:,[i for i in range(index,index+10)]])
    return new_train_data_list


'''
将RAINFALL字符属性转化为数值属性
'''
def NR_to_number(train_data_list):
    '''
    将NR转化为0
    :param train_data_list:
    :return: 转化后的train_data_list
    '''
    new_train_data_list=list()
    for train_data in train_data_list:
        train_data.replace('NR',0,inplace=True)
        new_train_data_list.append(train_data)
    return new_train_data_list

'''
数据缩放，归一化
'''
def data_scaling(train_data_list):
    for train_data in train_data_list[0:1]:
        row_index=0
        for row_index in range(0,18):
            for clo_index in range(0,10):
                max_min_value=float(max_list[row_index]) - float(min_list[row_index])
                old_value_min_value=float(train_data.iloc[row_index,clo_index])-float(min_list[row_index])
                new_value=old_value_min_value/max_min_value
                train_data.iloc[row_index,clo_index]=new_value
    return train_data_list

'''
数据预处理
'''
def data_processing():
    #将训练数据转化为每10个小时为一个训练batch，一个batch中9个训练集1个lable
    train_data_list=split_train_data(train_data)
    #将NR转化为0
    train_data_list=NR_to_number(train_data_list)
    train_data_list=data_scaling(train_data_list)
    return train_data_list

'''
初始化参数Wi0 Wj0  b0
'''
def initialize_W_b():
    '''
    初始化
    :return: W0,b0
    '''
    Wi0=np.random.random((1,18*9))
    Wj0=np.random.random((1,18*9))
    b0=random.random()
    return Wi0,Wj0,b0


'''
Loss function
'''
def loss_function(y_true,Wi,Wj,b,X,Wi_previous,Wj_previous):
    '''
    Loss function: L=∑(y_ture-(b+∑Wi*Xi+∑Wj*(Xi)^2))^2
    gradient descent:∂L/∂Wi=∑2*(y_ture-(b+∑Wi*Xi+∑Wj*(Xi)^2))*(-Xi)  ∂L/∂Wj=∑2*(y_ture-(b+∑Wi*Xi+∑Wj*(Xi)^2))*(-Xi^2)    ∂L/∂b=∑2*(y_ture-(b+∑Wi*Xi+∑Wj*(Xi)^2))*(-1)
    :param y_true:
    :param W:
    :param b:
    :param X:
    :return:更新后的W与b
    '''
    new_Wi=np.empty((1,162))
    new_Wj=np.empty((1,162))
    new_b=b
    X_square=X*X
    Π=0.000000000000000000000000001#学习率
    #对每一个Wi参数都进行更新
    for index in range(0,len(Wi[0])):
        #每一个Wi的更新梯度
        gradient_descent_Wi=2*(y_true-(b+np.dot(Wi,X)[0][0]+np.dot(Wj,X_square)[0][0]))*(-X[index][0])
        #更新参数，原来Wi减去学习率与更新梯之积
        σ=root_mean_square_not_contain_N(Wi_previous[index])
        new_Wi[0][index]=Wi[0][index]-(Π*gradient_descent_Wi)/σ
        Wi_previous[index].append(gradient_descent_Wi)
    #对每一个Wj参数进行更新
    for index in range(0,len(Wj[0])):
        #每一个Wj的更新梯度
        gradient_descent_Wj = 2 * (y_true - (b + np.dot(Wi, X)[0][0] + np.dot(Wj, X_square)[0][0])) * (-(X[index][0])**2)
        # 更新参数，原来Wi减去学习率与更新梯之积
        σ = root_mean_square_not_contain_N(Wj_previous[index])
        new_Wj[0][index] = Wj[0][index] - (Π * gradient_descent_Wj) / σ
        Wj_previous[index].append(gradient_descent_Wj)
    #b的更新梯度
    gradient_descent_b=2*(y_true-(b+np.dot(Wi,X)[0][0]+np.dot(Wj,X_square)[0][0]))*(-1)
    #更新后的b
    new_b=b-Π*gradient_descent_b
    #计算误差
    Loss=(y_true-(b+np.dot(new_Wi,X)[0][0]+np.dot(new_Wj,X_square)[0][0]))**2
    return new_Wi,new_Wj,new_b,Wi_previous,Wj_previous,Loss


'''
linear_function
'''
def linear_model(data,Wi,Wj,b,Wi_previous,Wj_previous):
    '''
    :param W:
    :param b:
    :param data:
    :return:返回更新后的W，b，Loss
    '''
    X=np.array(data.iloc[:,0:-1]).astype(np.float64)
    X=X.reshape(18*9,1)
    y=data.iloc[:,-1].reset_index(drop=True)
    try:
        y_true = float(y[9])
    except:
        print(y)
    y_model=b+np.dot(Wi,X)+np.dot(Wj,X*X)
    print('训练前误差为'+str(y_true-y_model[0][0]))
    Wi,Wj,b,Wi_previous,Wj_previous,Loss=loss_function(y_true,Wi,Wj,b,X,Wi_previous,Wj_previous)#Wi,Wj,b,X,Wi_previous,Wj_previous
    y_model=b+np.dot(Wi,X)+np.dot(Wj,X*X)
    print('训练后误差为' + str(y_true - y_model[0][0]))
    return Wi,Wj,b,Wi_previous,Wj_previous,Loss


def model_control():
    Loss_list = list()
    Loss_min = float('inf')
    Wi_min = 0
    Wj_min=0
    # 初始化参数
    Wi,Wj,b = initialize_W_b()
    Wi_previous=[[1] for i in range(18*9)]
    Wj_previous=[[1] for i in range(18*9)]
    train_data_list = data_processing()
    index = 1
    for train_data in train_data_list:
        print('------------第' + str(index) + "次训练------------")
        temp_Wi, temp_Wj,temp_b,Wi_previous,Wj_previous, Loss = linear_model(train_data,Wi,Wj,b,Wi_previous,Wj_previous)
        print('----------------------------------------------')
        index += 1
        Wi = temp_Wi
        Wj = temp_Wj
        b = temp_b
        if Loss < Loss_min:
            Wi_min = temp_Wi
            Wj_min = temp_Wj
            Loss_min = Loss
        Loss_list.append(Loss)
    print('最小loss' + str(Loss_min))
    print(Wi_min)
    print(Wj_min)
    x_index = [i for i in range(len(Loss_list))]
    plt.plot(x_index, Loss_list, color='red', linewidth=2.0, linestyle='-')
    plt.title('3600次每一次迭代的Loss值')
    plt.ylabel('Loss值')
    plt.xlabel('次数')
    plt.show()

if __name__ == '__main__':
    model_control()

