from math import sqrt
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import time

HTable0 = pd.read_excel("py_data/high/00.xlsx", keep_default_na=False)
HTable1 = pd.read_excel("py_data/high/01.xlsx", keep_default_na=False)
HTable2 = pd.read_excel("py_data/high/02.xlsx", keep_default_na=False)
HTable3 = pd.read_excel("py_data/high/03.xlsx", keep_default_na=False)
HTable4 = pd.read_excel("py_data/high/04.xlsx", keep_default_na=False)
HTable5 = pd.read_excel("py_data/high/05.xlsx", keep_default_na=False)
HTable6 = pd.read_excel("py_data/high/06.xlsx", keep_default_na=False)
HTable7 = pd.read_excel("py_data/high/07.xlsx", keep_default_na=False)
HTable8 = pd.read_excel("py_data/high/08.xlsx", keep_default_na=False)
HTable9 = pd.read_excel("py_data/high/09.xlsx", keep_default_na=False)
HTable10 = pd.read_excel("py_data/high/10.xlsx", keep_default_na=False)
HTable11 = pd.read_excel("py_data/high/11.xlsx", keep_default_na=False)
HTable12 = pd.read_excel("py_data/high/12.xlsx", keep_default_na=False)
HTable13 = pd.read_excel("py_data/high/13.xlsx", keep_default_na=False)
HTable14 = pd.read_excel("py_data/high/14.xlsx", keep_default_na=False)
HTable15 = pd.read_excel("py_data/high/15.xlsx", keep_default_na=False)
HTable16 = pd.read_excel("py_data/high/16.xlsx", keep_default_na=False)
HTable17 = pd.read_excel("py_data/high/17.xlsx", keep_default_na=False)
HTable18 = pd.read_excel("py_data/high/18.xlsx", keep_default_na=False)
HTable19 = pd.read_excel("py_data/high/19.xlsx", keep_default_na=False)
MTable0 = pd.read_excel("py_data/medium/00.xlsx", keep_default_na=False)
MTable1 = pd.read_excel("py_data/medium/01.xlsx", keep_default_na=False)
MTable2 = pd.read_excel("py_data/medium/02.xlsx", keep_default_na=False)
MTable3 = pd.read_excel("py_data/medium/03.xlsx", keep_default_na=False)
MTable4 = pd.read_excel("py_data/medium/04.xlsx", keep_default_na=False)
MTable5 = pd.read_excel("py_data/medium/05.xlsx", keep_default_na=False)
MTable6 = pd.read_excel("py_data/medium/06.xlsx", keep_default_na=False)
MTable7 = pd.read_excel("py_data/medium/07.xlsx", keep_default_na=False)
MTable8 = pd.read_excel("py_data/medium/08.xlsx", keep_default_na=False)
MTable9 = pd.read_excel("py_data/medium/09.xlsx", keep_default_na=False)
MTable10 = pd.read_excel("py_data/medium/10.xlsx", keep_default_na=False)
MTable11 = pd.read_excel("py_data/medium/11.xlsx", keep_default_na=False)
MTable12 = pd.read_excel("py_data/medium/12.xlsx", keep_default_na=False)
MTable13 = pd.read_excel("py_data/medium/13.xlsx", keep_default_na=False)
MTable14 = pd.read_excel("py_data/medium/14.xlsx", keep_default_na=False)
MTable15 = pd.read_excel("py_data/medium/15.xlsx", keep_default_na=False)
MTable16 = pd.read_excel("py_data/medium/16.xlsx", keep_default_na=False)
MTable17 = pd.read_excel("py_data/medium/17.xlsx", keep_default_na=False)
MTable18 = pd.read_excel("py_data/medium/18.xlsx", keep_default_na=False)
MTable19 = pd.read_excel("py_data/medium/19.xlsx", keep_default_na=False)

LTable0 = pd.read_excel("py_data//low/00.xlsx", keep_default_na=False)
LTable1 = pd.read_excel("py_data//low/01.xlsx", keep_default_na=False)
LTable2 = pd.read_excel("py_data//low/02.xlsx", keep_default_na=False)
LTable3 = pd.read_excel("py_data//low/03.xlsx", keep_default_na=False)
LTable4 = pd.read_excel("py_data//low/04.xlsx", keep_default_na=False)
LTable5 = pd.read_excel("py_data//low/05.xlsx", keep_default_na=False)
LTable6 = pd.read_excel("py_data//low/06.xlsx", keep_default_na=False)
LTable7 = pd.read_excel("py_data//low/07.xlsx", keep_default_na=False)
LTable8 = pd.read_excel("py_data//low/08.xlsx", keep_default_na=False)
LTable9 = pd.read_excel("py_data//low/09.xlsx", keep_default_na=False)
LTable10 = pd.read_excel("py_data/low/10.xlsx", keep_default_na=False)
LTable11 = pd.read_excel("py_data/low/11.xlsx", keep_default_na=False)
LTable12 = pd.read_excel("py_data/low/12.xlsx", keep_default_na=False)
LTable13 = pd.read_excel("py_data/low/13.xlsx", keep_default_na=False)
LTable14 = pd.read_excel("py_data/low/14.xlsx", keep_default_na=False)
LTable15 = pd.read_excel("py_data/low/15.xlsx", keep_default_na=False)
LTable16 = pd.read_excel("py_data/low/16.xlsx", keep_default_na=False)
LTable17 = pd.read_excel("py_data/low/17.xlsx", keep_default_na=False)
LTable18 = pd.read_excel("py_data/low/18.xlsx", keep_default_na=False)
LTable19 = pd.read_excel("py_data/low/19.xlsx", keep_default_na=False)

MixTable0 = pd.read_excel("py_data/mix/00.xlsx", keep_default_na=False)
MixTable1 = pd.read_excel("py_data/mix/01.xlsx", keep_default_na=False)
MixTable2 = pd.read_excel("py_data/mix/02.xlsx", keep_default_na=False)
MixTable3 = pd.read_excel("py_data/mix/03.xlsx", keep_default_na=False)
MixTable4 = pd.read_excel("py_data/mix/04.xlsx", keep_default_na=False)
MixTable5 = pd.read_excel("py_data/mix/05.xlsx", keep_default_na=False)
MixTable6 = pd.read_excel("py_data/mix/06.xlsx", keep_default_na=False)
MixTable7 = pd.read_excel("py_data/mix/07.xlsx", keep_default_na=False)
MixTable8 = pd.read_excel("py_data/mix/08.xlsx", keep_default_na=False)
MixTable9 = pd.read_excel("py_data/mix/09.xlsx", keep_default_na=False)
MixTable10 = pd.read_excel("py_data/mix/10.xlsx", keep_default_na=False)
MixTable11 = pd.read_excel("py_data/mix/11.xlsx", keep_default_na=False)
MixTable12 = pd.read_excel("py_data/mix/12.xlsx", keep_default_na=False)
MixTable13 = pd.read_excel("py_data/mix/13.xlsx", keep_default_na=False)
MixTable14 = pd.read_excel("py_data/mix/14.xlsx", keep_default_na=False)
MixTable15 = pd.read_excel("py_data/mix/15.xlsx", keep_default_na=False)
MixTable16 = pd.read_excel("py_data/mix/16.xlsx", keep_default_na=False)
MixTable17 = pd.read_excel("py_data/mix/17.xlsx", keep_default_na=False)
MixTable18 = pd.read_excel("py_data/mix/18.xlsx", keep_default_na=False)
MixTable19 = pd.read_excel("py_data/mix/19.xlsx", keep_default_na=False)

Hfig0, Haxes0 = plt.subplots(2, 2, figsize=(13, 6))
Hfig1, Haxes1 = plt.subplots(2, 2, figsize=(13, 6))
Hfig2, Haxes2 = plt.subplots(2, 2, figsize=(13, 6))
Hfig3, Haxes3 = plt.subplots(2, 2, figsize=(13, 6))
Hfig4, Haxes4 = plt.subplots(2, 2, figsize=(13, 6))

Mfig1, Maxes1 = plt.subplots(2, 2, figsize=(13, 6))
Mfig2, Maxes2 = plt.subplots(2, 2, figsize=(13, 6))
Mfig3, Maxes3 = plt.subplots(2, 2, figsize=(13, 6))
Mfig0, Maxes0 = plt.subplots(2, 2, figsize=(13, 6))
Mfig4, Maxes4 = plt.subplots(2, 2, figsize=(13, 6))

Lfig1, Laxes1 = plt.subplots(2, 2, figsize=(13, 6))
Lfig2, Laxes2 = plt.subplots(2, 2, figsize=(13, 6))
Lfig3, Laxes3 = plt.subplots(2, 2, figsize=(13, 6))
Lfig0, Laxes0 = plt.subplots(2, 2, figsize=(13, 6))
Lfig4, Laxes4 = plt.subplots(2, 2, figsize=(13, 6))

Mixfig1, Mixaxes1 = plt.subplots(2, 2, figsize=(13, 6))
Mixfig2, Mixaxes2 = plt.subplots(2, 2, figsize=(13, 6))
Mixfig3, Mixaxes3 = plt.subplots(2, 2, figsize=(13, 6))
Mixfig0, Mixaxes0 = plt.subplots(2, 2, figsize=(13, 6))
Mixfig4, Mixaxes4 = plt.subplots(2, 2, figsize=(13, 6))

plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False


def input_data(seq, ws):
    out = []
    L = len(seq)
    for i in range(L - ws):
        window = seq[i:i + ws]
        label = seq[i + ws:i + ws + 1]
        out.append((window, label))
    return out


def CNN_deal(DF, ax, title, epoch, lr):
    train_size = 500
    df = DF.iloc[-train_size:, :]
    y = df['Delay'].values.astype(float)
    test_size = 50

    # 划分训练和测试集，最后24个值作为测试集
    train_set = y[:-test_size]
    test_set = y[-test_size:]



    # 归一化处理
    scaler = MinMaxScaler(feature_range=(-1, 1))
    train_norm = scaler.fit_transform(train_set.reshape(-1, 1))

    # 转换成 tensor
    train_norm = torch.FloatTensor(train_norm).view(-1)
    window_size = 12

    # 将数据按window_size一组分段，每次输入一段后，会输出一个预测的值y_pred
    # y_pred与每段之后的window_size+1个数据作为对比值，用于计算损失函数
    # 例如前5个数据为(1,2,3,4,5),取前4个进行CNN预测,得出的值与(5)比较计算loss
    # 这里使用每组13个数据,最后一个数据作评估值,即window_size=12
    train_data = input_data(train_norm, window_size)

    class CNNnetwork(nn.Module):
        def __init__(self):
            super().__init__()
            self.conv1d = nn.Conv1d(1, 64, kernel_size=2)
            self.relu = nn.ReLU(inplace=True)
            self.Linear1 = nn.Linear(64 * 11, 50)
            self.Linear2 = nn.Linear(50, 1)

        def forward(self, x):
            x = self.conv1d(x)
            x = self.relu(x)
            x = x.view(-1)
            x = self.Linear1(x)
            x = self.relu(x)
            x = self.Linear2(x)
            return x

    torch.manual_seed(101)
    model = CNNnetwork()
    # 设置损失函数,这里使用的是均方误差损失
    criterion = nn.MSELoss()
    # 设置优化函数和学习率lr
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    # 设置训练周期
    epochs = epoch
    model.train()
    start_time = time.time()
    for epoch in range(epochs):
        for seq, y_train in train_data:
            # 每次更新参数前都梯度归零和初始化
            optimizer.zero_grad()
            # 注意这里要对样本进行reshape，
            # 转换成conv1d的input size（batch size, in_channels, series length）
            y_pred = model(seq.reshape(1, 1, -1))
            # output – (minibatch, out_channels, i W)
            loss = criterion(y_pred, y_train)
            loss.backward()
            optimizer.step()
        print(f'Epoch: {epoch + 1:2} Loss: {loss.item():10.8f}')
    print(f'\nDuration: {time.time() - start_time:.0f} seconds')

    future = 50
    # 选取序列最后50个值开始预测
    preds = train_norm[-window_size:].tolist()
    # 设置成eval模式
    model.eval()
    # 循环的每一步表示向时间序列向后滑动一格
    for i in range(future):
        seq = torch.FloatTensor(preds[-window_size:])
        with torch.no_grad():
            preds.append(model(seq.reshape(1, 1, -1)).item())
    # 逆归一化还原真实值
    true_predictions = scaler.inverse_transform(np.array(preds[window_size:]).reshape(-1, 1))
    # 对比真实值和预测值
    # plt.figure(figsize=(12, 4))
    plt.grid(True)
    # RMSE = sqrt(mean_squared_error(test_set["Delay"], true_predictions))
    ax.plot(df["Time"], df['Delay'])
    x = np.arange(2915, 2940, 0.5)
    ax.plot(x, true_predictions)
    ax.legend(["y_true", "y_pred"])
    ax.set_title(title + " CNN_带宽分析")
    ax.set_xlabel("time")
    ax.set_ylabel("band_width")


H = 0
for q in range(5):
    for i in range(2):
        for j in range(2):
            CNN_deal(eval("HTable" + str(H)), eval("Haxes" + str(q) + "[i, j]"), "HTable" + str(H), 200, 0.0001)
            print("次序", H)
            H = H + 1
    eval("Hfig" + str(q)).tight_layout()
    eval("Hfig" + str(q)).suptitle("Hfig" + str(q))
    eval("Hfig" + str(q)).savefig("pic_result/CNN/" + "high" + str(q) + ".png")

M = 0
for q in range(5):
    for i in range(2):
        for j in range(2):
            CNN_deal(eval("MTable" + str(M)), eval("Maxes" + str(q) + "[i, j]"), "MTable" + str(M), 200, 0.0005)
            print("次序", M)
            M = M + 1
    eval("Mfig" + str(q)).tight_layout()
    eval("Mfig" + str(q)).suptitle("Mfig" + str(q))
    eval("Mfig" + str(q)).savefig("pic_result/CNN/" + "medium" + str(q) + ".png")

L = 0
for q in range(5):
    for i in range(2):
        for j in range(2):
            CNN_deal(eval("LTable" + str(L)), eval("Laxes" + str(q) + "[i, j]"), "LTable" + str(L), 200, 0.0001)
            print("次序", L)
            L = L + 1
    eval("Lfig" + str(q)).tight_layout()
    eval("Lfig" + str(q)).suptitle("Lfig" + str(q))
    eval("Lfig" + str(q)).savefig("pic_result/CNN/" + "low" + str(q) + ".png")

Mix = 0
for q in range(5):
    for i in range(2):
        for j in range(2):
            CNN_deal(eval("MixTable" + str(Mix)), eval("Mixaxes" + str(q) + "[i, j]"), "MixTable" + str(Mix), 200,
                     0.0001)
            print("次序", Mix)
            Mix = Mix + 1
    eval("Mixfig" + str(q)).tight_layout()
    eval("Mixfig" + str(q)).suptitle("Mixfig" + str(q))
    eval("Mixfig" + str(q)).savefig("pic_result/CNN/" + "mix" + str(q) + ".png")
