import numpy as np
import torch
from matplotlib import pyplot as plt
from torch import nn
import pandas as pd
from sklearn import preprocessing
import torch.utils.data
import torch.utils.data as Data

# 位置编码
class PositionalEncoding(nn.Module):
    def __init__(self, d_model, max_len=5000):
        super(PositionalEncoding, self).__init__()
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = 1 / (10000 ** ((2 * np.arange(d_model)) / d_model))
        # 通过sin和cos定义positional encoding
        pe[:, 0::2] = torch.sin(position * div_term[0::2])
        pe[:, 1::2] = torch.cos(position * div_term[1::2])
        pe = pe.unsqueeze(0).transpose(0, 1)  # [5000, 1, d_model],so need seq-len <= 5000
        self.register_buffer('pe', pe)

    def forward(self, x):
        # repeat的作用:对于repeat(x,y,z),把参数通道数复制x遍,行复制y遍,列复制z遍
        return x + self.pe[:x.size(0), :].repeat(1, x.shape[1], 1)


class TransAm(nn.Module):
    def __init__(self, series_dim, feature_size=250, num_encoder_layers=1, num_decoder_layers=1, dropout=0.1):
        super(TransAm, self).__init__()
        self.model_type = 'Transformer'
        self.input_embedding = nn.Linear(series_dim, feature_size)
        self.src_mask = None
        # 调用先前定义的PositionalEncoding类，定义位置编码层
        self.pos_encoder = PositionalEncoding(feature_size)
        self.encoder_layer = nn.TransformerEncoderLayer(d_model=feature_size, nhead=10, dropout=dropout)
        self.encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=num_encoder_layers)
        self.decoder_layer = nn.TransformerDecoderLayer(d_model=feature_size, nhead=10, dropout=dropout)
        self.decoder = nn.TransformerDecoder(self.decoder_layer, num_layers=num_decoder_layers)
        # 使用pytorch的transformer,调用其中的Encoder和Decoder
        self.linear = nn.Linear(feature_size, series_dim)

    def forward(self, src, tgt):
        if self.src_mask is None or self.src_mask.size(0) != len(src):
            device = src.device
            mask = self._generate_square_subsequent_mask(len(src)).to(device)
            self.src_mask = mask
        src = self.input_embedding(src)  # linear transformation before positional embedding
        src = self.pos_encoder(src)
        # 对encoder输入进行位置编码,输入encoder
        memory = self.encoder(src, self.src_mask)
        # 对decoder输入进行位置编码,输入decoder
        tgt = self.input_embedding(tgt)
        tgt = self.pos_encoder(tgt)
        output = self.decoder(tgt, memory)
        # 最后结果连一个线性层
        output = self.linear(output)
        # 结果抛去起始部分，只把预测部分提取出来
        return output[-PREDICT_SIZE:, :, :]

    def _generate_square_subsequent_mask(self, sz):
        mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
        mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
        return mask


DEVICE = torch.device("cuda")
BATCH_SIZE = 32
LEARNING_RATE = 0.001
EPOCH = 20
# 准备进行数据的处理（划分样本和标签，训练集与测试集的划分）
# 读取csv表格数据
data = pd.read_csv('data/ETTh1.csv').values[0:10000]
data = data[:, 1:]
plt.plot(data)
plt.show()

# 数据归一化：为了消除不同维度指标之间的量纲影响，需要进行数据标准化处理，以解决数据指标之间的可比性。按均值和方差将数据限制在[-1,1]或者[0,1]
min_max_scaler = preprocessing.MinMaxScaler()
data = min_max_scaler.fit_transform(data)

# 构建样本与标签
WINDOW_SIZE = 96
LABEL_SIZE = 48
PREDICT_SIZE = 96
samples = []
labels = []
for i in range(len(data) - WINDOW_SIZE - PREDICT_SIZE):
    samples.append(data[i:i + WINDOW_SIZE])
    labels.append(data[i + WINDOW_SIZE - LABEL_SIZE:i + WINDOW_SIZE + PREDICT_SIZE])
samples = torch.tensor(np.array(samples), dtype=torch.float32)
labels = torch.tensor(np.array(labels), dtype=torch.float32)

# 划分训练集，测试集 按4:1划分
train_test_boundary = int(len(data) * 0.8)
train_samples = samples[:train_test_boundary, :].to(DEVICE)
train_labels = labels[:train_test_boundary, :].to(DEVICE)
test_samples = samples[train_test_boundary:, :].to(DEVICE)
test_labels = labels[train_test_boundary:, :].to(DEVICE)

# 使用pytorch的dataloader构建数据集
train_dataset = Data.TensorDataset(train_samples, train_labels)
train_loader = Data.DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, shuffle=True)

# 数据已准备好，开始训练
# 实例化模型，这里使用Transformer作为我们的模型
model = TransAm(series_dim=7)
model = model.to(DEVICE)

# 定义损失函数和优化器等
loss_function = nn.MSELoss()
updater = torch.optim.Adam(model.parameters(), LEARNING_RATE)


# 开始训练
def train():
    for epoch in range(EPOCH):
        current_epoch_loss = 0
        batches_per_epoch = 0
        for X, Y in train_loader:
            # 把数据的第0维和第1维转一下，即把输入和输出转为 (sequence_length, batch_size, series_dim)的格式
            X, Y = X.permute(1, 0, 2), Y.permute(1, 0, 2)
            # 为TransFormer构建decoder的输入
            decoder_input = torch.zeros_like(Y[-PREDICT_SIZE:, :, :]).float()
            decoder_input = torch.cat([Y[:LABEL_SIZE, :, ], decoder_input], dim=0).float().to(DEVICE)
            Y_pred = model(X, decoder_input)
            # 通过损失函数和优化器来更新
            updater.zero_grad()
            loss = loss_function(Y_pred, Y[-PREDICT_SIZE:, :, :])
            current_epoch_loss += float(loss) * BATCH_SIZE
            batches_per_epoch += BATCH_SIZE
            loss.backward()
            updater.step()  # 梯度下降更新权重
        print(f"*Current epoch:{epoch} training loss:{current_epoch_loss / batches_per_epoch}")
        torch.save(model.state_dict(), "model state/state")


MODEL_SAVED = True
if MODEL_SAVED:
    model.load_state_dict(torch.load("model state/state"))
else:
    train()

# 预测
# 预测的输入值,设置BATCH_SIZE为1
test_input = test_samples[400].reshape(WINDOW_SIZE, 1, 7)
# 真实的未来输出值
test_output = test_labels[400].reshape(PREDICT_SIZE + LABEL_SIZE, 1, 7)
decoder_input = torch.zeros_like(test_output[-PREDICT_SIZE:, :, :]).float()
decoder_input = torch.cat([test_input[-LABEL_SIZE:, :, ], decoder_input], dim=0).float().to(DEVICE)
prediction = model(test_input, decoder_input)
print(prediction)

# 绘制
true_output = test_output.cpu().numpy().reshape(PREDICT_SIZE + LABEL_SIZE, 7)
predict_output = prediction.detach().cpu().numpy().reshape(PREDICT_SIZE, 7)
predict_output = np.concatenate((true_output[:LABEL_SIZE], predict_output), axis=0)
plt.plot(true_output[:, 0])
plt.plot(predict_output[:, 0])
plt.show()
