'''
@Time: 2020/10/28 9:54  
@Author: 程嘉明
@File: binary_classification.py
Software: PyCharm
target: 程序目标
'''
import pandas as pd
import numpy as np
import math
from sklearn.preprocessing import OneHotEncoder
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['font.serif'] = ['SimHei']

original_data=pd.read_csv(r'./data/train.csv')


'''
将y转化为0,1格式,低于5k为False超过5k为True
'''
def deal_trage_Y(train_data):
    train_data['y_transform']=train_data['y'].map(lambda x: False if x =='-50000' else True)
    return  train_data

'''
将顺序数据、分类数据采用one-hot编码
'''
def deal_one_hot_coding(train_data):
    '''需要编码的object类型属性'''
    object_attribute_trian_data=train_data.select_dtypes(include=[object])
    '''不需要编码的int类型属性'''
    int_attribute_trian_data_df=train_data.select_dtypes(exclude=[object])
    '''转化'''
    enc=OneHotEncoder(sparse=False)
    new_object_attribute_trian_data=enc.fit_transform(object_attribute_trian_data)#转化后的类型为ndarray
    '''将ndarray转化为dataframe'''
    new_object_attribute_trian_data_df=pd.DataFrame(new_object_attribute_trian_data,index=range(54256),columns=range(417))
    '''拼接转化数据和不需要转化的数据'''
    new_train_data=pd.concat([int_attribute_trian_data_df,new_object_attribute_trian_data_df], axis=1)
    return new_train_data #(54256, 424)

'''
数据预处理方法
'''
def data_pre_processing():
    '''将y转码为true false'''
    train_data=deal_trage_Y(original_data)
    '''拆分出XY数据部分'''
    train_data_X=train_data.iloc[:,range(29)]
    train_data_Y = train_data.iloc[:, range(30,31)]
    '''X数据one_hot编码'''
    train_data_X=deal_one_hot_coding(train_data_X)
    return train_data_X,train_data_Y

'''
初始化参数W0 b0
'''
def initialize_W_b():
    '''
    初始化
    :return: W0,b0
    '''
    W0=np.random.random((1,424))
    b0=np.random.random()
    return W0,b0

'''
线性部分计算
返回线性计算的值
'''
def linear_model(X,W,b):
    y_model=np.dot(W, X.T) + b
    return y_model

'''
sigmod函数
'''
def sigmod(y_model):
    sigmod_y_model=list()
    for ele in y_model[0]:
        sigmod_value=1/(1+math.exp(-ele))
        sigmod_y_model.append(sigmod_value)
    sigmod_y_model=[ 0.999999 if i>0.999999999 else i for i in sigmod_y_model]#解决sigmod函数敏感度问题
    return np.array(sigmod_y_model)

'''
计算loss值
形式为两个伯努利分布的交叉熵
'''
def get_Loss(Y_model,Y):
    #计算工资大于5k的概率
    loss_list=list()
    for ele in zip(Y_model,Y.T.values[0]):
        if ele[1]==True:
            loss_list.append(-math.log(ele[0],math.e))
        else:
            loss_list.append(-math.log(1-ele[0],math.e))
    return loss_list

'''
更新参数
'''
def updata_W_b(loss_list,W,b,Y,y_model,X):
    learnning_rate=0.75
    sigmod_Z=y_model
    for ele in zip(loss_list,Y.T.values[0]):
        if ele[1]==True:
            updata=1-sigmod_Z
        else:
            updata=-sigmod_Z
        #更新W
        for index in range(424):
            new_Wi=W[0][index]-learnning_rate*updata*X.iloc[0,index]
            W[0][index]=new_Wi
        #更新b
        b=b-updata
    return W,b

'''
模型主体
计算线性回归和sigmoid函数
'''
def model(train_X,train_Y,W0,b0):
    W_train=W0
    b_train=b0
    loss_value_list=list()
    '''
    batch_size 设置为8
    '''
    for index in range(0,54256,1):
        #创建XY数据
        X=train_X.iloc[range(index,index+1),:]
        Y_true=train_Y.iloc[range(index,index+1),:]
        #线性部分计算值
        y_model=linear_model(X,W_train,b_train)
        #sigmod函数
        y_model=sigmod(y_model)
        # 计算Loss值
        loss_value=get_Loss(y_model,Y_true)
        loss_value_list.append(loss_value)
        print('第'+str(index)+"次loss值为："+str(loss_value[0]))
        #更新参数
        W_train,b_train=updata_W_b(loss_value,W_train,b_train,Y_true,y_model,X)
    x_index = [i for i in range(len(loss_value_list))]
    plt.plot(x_index, loss_value_list, color='red', linewidth=2.0, linestyle='-')
    plt.title('54256次每一次迭代的Loss值')
    plt.ylabel('Loss值')
    plt.xlabel('次数')
    plt.show()





'''
控制方法
'''
def control():
    train_data_X,train_data_Y=data_pre_processing()
    W0,b0=initialize_W_b()
    print(W0.shape)
    model(train_data_X,train_data_Y,W0,b0)

control()