'''
@Time: 2020/10/2 16:43  
@Author: 程嘉明
@File: linear_regression.py
Software: PyCharm
target: 预测239天的PM2.5值
'''

'''
train data:14/01/01到14/12/20 24小时各个小时18项指标的值
test data:连续9个小时的各项指标的数据
'''

import pandas as pd
import numpy as np
import random
import sys
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['font.serif'] = ['SimHei']

train_data = pd.read_csv('./data/train.csv', encoding='gb18030')
test_data = pd.read_csv('./data/test.csv', encoding='gb18030')

'''
将train中的数据分为10个小时为一组
'''
def split_train_data(train_data):
    new_train_data_list=list()
    '''
    将训练数据转化为每10个小时为一个batch，例：[0,1,2,3,4,5,6,7,8]小时为一个train data 则[9]小时为lable
    :param train_data_list: 
    :return:一个包含所有batch的list
    '''
    train_data_list=list(train_data.groupby('时间戳'))
    for one_day_train_data in train_data_list:
        one_day_train_data=one_day_train_data[1]
        for index in range(2,len(one_day_train_data)-1,1):
            new_train_data_list.append(one_day_train_data.iloc[:,[i for i in range(index,index+10)]])
    return new_train_data_list

'''
将RAINFALL字符属性转化为数值属性
'''
def NR_to_number(train_data_list):
    '''
    将NR转化为0
    :param train_data_list:
    :return: 转化后的train_data_list
    '''
    new_train_data_list=list()
    for train_data in train_data_list:
        train_data.replace('NR',0,inplace=True)
        new_train_data_list.append(train_data)
    return new_train_data_list



'''
数据预处理
'''
def data_processing():
    #将训练数据转化为每10个小时为一个训练batch，一个batch中9个训练集1个lable
    train_data_list=split_train_data(train_data)
    #将NR转化为0
    train_data_list=NR_to_number(train_data_list)
    return train_data_list



#3600个batch，作为一个训练的原始数据
# train_data_list=split_train_data(train_data)
# train_data_list=NR_to_number(train_data_list)



'''
初始化参数W0 b0
'''
def initialize_W_b():
    '''
    初始化
    :return: W0,b0
    '''
    W0=np.random.random((1,18*9))
    b0=random.random()
    return W0,b0

'''
Loss function
'''
def loss_function(y_true,W,b,X):
    '''
    Loss function: L=∑(y_ture-(b+∑Wi*Xi))^2+λ∑(Wi)^2
    gradient descent:∂L/∂Wi=∑2*(y_ture-(b+∑Wi*Xi))*(-Xi)+2*λ*Wi      ∂L/∂b=∑2*(y_ture-(b+∑Wi*Xi))*(-1)
    :param y_true:
    :param W:
    :param b:
    :param X:
    :return:更新后的W与b
    '''
    new_W=np.empty((1,162))
    new_b=b
    λ=0.1#平滑参数
    Π=0.000001#学习率
    #对每一个W参数都进行更新
    for index in range(0,len(W[0])):
        #每一个Wi的更新梯度
        gradient_descent_Wi=2*(y_true-(b+np.dot(W,X)[0][0]))*(-X[index][0])+2*λ*W[0][index]
        # print('y_true'+str(y_true))
        # print('np.dot(W,X)'+str(np.dot(W,X)[0][0]))
        # print('y_true-(b+np.dot(W,X)[0][0])'+str(y_true-(b+np.dot(W,X)[0][0])))
        # print('(-X[index][0])'+str((-X[index][0])))
        # print('2*λ*W[0][index]'+str(2*λ*W[0][index]))
        # print(gradient_descent_Wi)
        #更新参数，原来Wi减去学习率与更新梯之积
        new_Wi=W[0][index]-Π*gradient_descent_Wi
        new_W[0][index]=new_Wi
        # print(new_Wi)
        # print('------------------------------')
    #b的更新梯度
    gradient_descent_b=2*(y_true-(b+np.dot(W,X)))*(-1)
    #更新后的b
    new_b=b-Π*gradient_descent_b[0][0]
    # print('---更新后的参数-----')
    # print(new_W)
    # print(new_b)
    # print('------------------')
    #计算误差
    Loss=(y_true-(b+np.dot(new_W,X)[0][0]))**2
    for index in range(0,len(new_W[0])):
        Loss+=λ*(W[0][index])**2
    return new_W,new_b,Loss

'''
linear_model
'''
def linear_model(data,W,b):
    '''
    :param W:
    :param b:
    :param X:
    :param loss:
    :return:返回更新后的W，b
    '''
    X=np.array(data.iloc[:,0:-1]).astype(np.float64)
    X=X.reshape(18*9,1)
    y=data.iloc[:,-1].reset_index(drop=True)
    try:
        y_true = float(y[9])
    except:
        print(y)
    y_model=b+np.dot(W,X)
    print('训练前误差为'+str(y_true-y_model[0][0]))
    W,b,Loss=loss_function(y_true,W,b,X)
    y_model = b + np.dot(W, X)
    print('训练后误差为' + str(y_true - y_model[0][0]))
    return W,b,Loss


'''
模型训练控制
'''
def model_control():
    Loss_list=list()
    Loss_min=float('inf')
    W_min=0
    #初始化参数
    W,b=initialize_W_b()
    train_data_list=data_processing()
    index=1
    # print('------初始化-----')
    # print(W,b)
    for train_data in train_data_list:
        print('------------第'+str(index)+"次训练------------")
        temp_W,temp_b,Loss=linear_model(train_data,W,b)
        print('----------------------------------------------')
        index+=1
        W=temp_W
        b=temp_b
        if Loss<Loss_min:
            W_min=temp_W
            Loss_min=Loss
        Loss_list.append(Loss)
    print('最小loss'+str(Loss_min))
    print(W_min)
    x_index=[i for i in range(len(Loss_list))]
    plt.plot(x_index, Loss_list, color='red', linewidth=2.0, linestyle='-')
    plt.title('3600次每一次迭代的Loss值')
    plt.ylabel('Loss值')
    plt.xlabel('次数')
    plt.show()



if __name__ == '__main__':
    model_control()