from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import torch

'''
预处理数据
1.将类型编码转换为独热编码
2.归一化数值类型
'''
def preprocess_data() -> pd.DataFrame | pd.DataFrame | dict:
    data_path = 'chapter3_bike/data.csv'
    rides = pd.read_csv(data_path)
    # 类型编码
    dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
    # 1.将类型编码转换为独热编码
    for field in dummy_fields:
        dummies = pd.get_dummies(rides[field], prefix=field, drop_first=False)
        # 将独热编码字段合并到原数据里
        rides = pd.concat([rides, dummies], axis=1)
    # 删除不用的字段
    del_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday', 'instant', 'dteday', 'atemp', 'workingday']
    data = rides.drop(del_fields, axis=1)

    # 数值类型
    quant_fields = ['cnt', 'temp', 'hum', 'windspeed']
    quant_scaled_features = {}    # 用于保存数值类型字段的均值和方差
    # 2.归一化数值类型，(原值-均值)/均方差
    for field in quant_fields:
        mean, std = data[field].mean(), data[field].std()
        quant_scaled_features[field] = [mean, std]
        # loc数组中的第一个参数代表行（此处":"指所有行），第二个参数代表列；此处是设置所有行的列值
        data.loc[:, field] = (data[field] - mean) / std
    return rides, data, quant_scaled_features

def training(rides: pd.DataFrame, data: pd.DataFrame, quant_scaled_features: dict):
    test_data = data[-21*24:]   # 测试数据，最后21天
    train_data = data[:-21*24]  # 训练数据
    
    # 目标列字段
    target_fields = ['cnt', 'casual', 'registered']
    # 将训练集划分为特征变量列和目标特征列
    features, targets = train_data.drop(target_fields, axis=1), train_data[target_fields]
    # 将测试集划分为特征变量列和目标特征列
    test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
    # 将数据转换为NumPy数组
    x = features.values
    y = targets['cnt'].values
    y = y.astype(float)
    y = np.reshape(y, [len(y),1])

    input_size = features.shape[1]  # 输入单元个数，取特征变量的总列数（此时有56列）
    hidden_size = 10    # 隐藏单元个数
    output_size = 1     # 输出单元个数
    # 构造神经网络
    neu = torch.nn.Sequential(
        torch.nn.Linear(input_size, hidden_size),   # 输入层到隐藏层的线性映射
        torch.nn.Sigmoid(), # 隐藏层的非线性sigmoid函数
        torch.nn.Linear(hidden_size, output_size)   # 隐藏层到输出层的线性映射
    )
    # 开始训练neu
    losses = []
    batch_size = 128    # 批处理大小
    cost = torch.nn.MSELoss()   # 损失函数
    optimizer = torch.optim.SGD(neu.parameters(), lr=0.01) # 梯度下降法
    for i in range(1000):
        batch_losses = []
        for start in range(0, len(x), batch_size):
            end = start + batch_size if start + batch_size < len(x) else len(x)
            xx = torch.FloatTensor(x[start:end])
            yy = torch.FloatTensor(y[start:end])
            predict = neu(xx)
            loss = cost(predict, yy)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            batch_losses.append(loss.data.numpy())
        
        if i % 100 == 0:
            losses.append(np.mean(batch_losses))
            print(i, np.mean(batch_losses))
    draw_losses(losses)
    testing(neu, rides, test_data, test_features, test_targets, quant_scaled_features)
    testing_subject(neu, rides, test_features, test_targets)
    plt.show()

# 分解神经网络
def testing_subject(
        neu: torch.nn.Sequential,
        rides: pd.DataFrame,
        test_features: pd.DataFrame,
        test_targets: pd.DataFrame,
) -> None:
    bool1 = rides['dteday'] == '2012-12-22'
    bool2 = rides['dteday'] == '2012-12-23'
    bool3 = rides['dteday'] == '2012-12-24'
    bools = [any(tup) for tup in zip(bool1, bool2, bool3)]
    subset = test_features.loc[rides[bools].index]
    subtargets = test_targets.loc[rides[bools].index]
    subtargets = subtargets['cnt']
    subtargets = subtargets.values.reshape([len(subtargets), 1])
    results = features(subset.values, neu).data.numpy()
    predict = neu(torch.FloatTensor(subset.values)).data.numpy()
    mean, std = quant_scaled_features['cnt']
    predict = predict * std + mean
    subtargets = subtargets * std + mean
    # 绘图
    # plt.subplot(1,1,1)
    fig, ax = plt.subplots(figsize=(10,7))
    ax.set_title('testing_subject')
    ax.plot(results[:,:], '.:', alpha = 0.1)
    ax.plot((predict - min(predict)) / (max(predict) - min(predict)), 'bo-', label = 'Predication')
    ax.plot((subtargets - min(predict)) / (max(predict) - min(predict)), 'ro-', label='Real')
    ax.plot(results[:, 5], '.:', alpha=1, label='Neuro 6')

    ax.set_xlim(right=len(predict))
    ax.legend()
    plt.ylabel('Normalized Values')

    dates = pd.to_datetime(rides.loc[subset.index]['dteday'])
    dates = dates.apply(lambda d: d.strftime('%b %d'))
    ax.set_xticks(np.arange(len(dates))[12::24])
    _ = ax.set_xticklabels(dates[12::24], rotation = 45)

def testing(
    neu: torch.nn.Sequential,
    rides: pd.DataFrame,
    test_data: pd.DataFrame,
    test_features: pd.DataFrame,
    test_targets: pd.DataFrame,
    quant_scaled_features: dict,
) -> None:
    targets = test_targets['cnt']
    targets = targets.values.reshape([len(targets), 1])
    targets = targets.astype(float)
    x = torch.FloatTensor(test_features.values)
    y = torch.FloatTensor(targets)

    # 预测
    predict = neu(x)
    predict = predict.data.numpy()

    mean, std = quant_scaled_features['cnt']
    # plt.subplot(1,1,1)
    fig,ax = plt.subplots(figsize=(10, 7))
    ax.set_title('testing')
    ax.plot(predict * std + mean, label = 'Predication')
    ax.plot(targets * std + mean, label = 'Data')
    ax.legend()
    ax.set_xlabel('Date-Time')
    ax.set_ylabel('Counts')
    dates = pd.to_datetime(rides.loc[test_data.index]['dteday'])
    dates = dates.apply(lambda d: d.strftime('%b %d'))
    ax.set_xticks(np.arange(len(dates))[12::24])
    _ = ax.set_xticklabels(dates[12::24], rotation=45)

def features(x: pd.DataFrame, net: torch.nn.Sequential) -> torch.Tensor:
    x = torch.from_numpy(x).type(torch.FloatTensor)
    dic = dict(net.named_parameters()) # 提取神经网络中的所有参数
    weights = dic['0.weight']   # 权重，‘0’代表第一层
    plt.subplots(figsize=(10, 7))
    plt.title('Weights')
    plt.plot(weights.data.numpy()[6,:], 'o-')
    plt.xlabel('Input Neurons')
    plt.ylabel('Weight')
    biases = dic['0.bias']      # 偏置
    h = torch.sigmoid(x.mm(weights.t()) + biases.expand([len(x), len(biases)]))
    return h

def draw_losses(losses:list):
    plt.subplots(figsize=(10,7))
    plt.title('Loss')
    plt.plot(np.arange(len(losses))*100,losses)
    plt.xlabel('epoch')
    plt.ylabel('MSE')

if __name__ == '__main__':
    rides, data, quant_scaled_features = preprocess_data()
    training(rides, data, quant_scaled_features)