import re
import math
import torch
import numpy as np
import pandas as pd
from random import *
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as Data
from torch.utils.data import DataLoader, TensorDataset
from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts

class MultiHeadAttention(nn.Module):
    def __init__(self, d_model, d_k, d_v, n_heads, dropout):
        super(MultiHeadAttention, self).__init__()
        self.n_heads = n_heads
        self.d_k = d_k
        self.d_v = d_v
        self.W_Q = nn.Linear(d_model, d_k * n_heads, bias=False)
        self.W_K = nn.Linear(d_model, d_k * n_heads, bias=False)
        self.W_V = nn.Linear(d_model, d_v * n_heads, bias=False)
        self.linear = nn.Linear(d_v * n_heads, d_model)
        self.norm = nn.LayerNorm(d_model)

    def forward(self, Q, K, V):
        residual, batch_size = Q, Q.size(0)
        q_s = self.W_Q(Q).view(batch_size, -1, self.n_heads, self.d_k).transpose(1,
                                                                                 2)  #q_s:[batch_size,n_head,t_len,d_k]
        k_s = self.W_Q(K).view(batch_size, -1, self.n_heads, self.d_k).transpose(1, 2)
        v_s = self.W_Q(V).view(batch_size, -1, self.n_heads, self.d_v).transpose(1, 2)

        scores = torch.matmul(q_s, k_s.transpose(-1, -2)) / np.sqrt(self.d_k)  #scores:[batch_size,n_head,t_len,t_len]
        attn = nn.Softmax(dim=-1)(scores)
        context = torch.matmul(attn, v_s)  #context:[batch_size,n_heads,t_len,d_v]
        # print(context.shape) #context:[6,12,30,64]
        context = context.transpose(1, 2).contiguous().view(batch_size, -1, self.n_heads * self.d_v)

        output = self.linear(context)
        # print(output.shape)
        return self.norm(output + residual)  #output:[batch_size,t_len,d_model]


class PoswiseFeedForwardNet(nn.Module):
    def __init__(self, d_model, dropout):
        super(PoswiseFeedForwardNet, self).__init__()
        self.fc1 = nn.Linear(d_model, d_model * 4)
        self.gelu = nn.GELU()
        self.dropout = nn.Dropout(dropout)
        self.fc2 = nn.Linear(d_model * 4, d_model)

    def forward(self, x):
        #[batch_size,t_len,d_model] -> [batch_size,t_len,d_model*4] -> [batch_size,t_len,d_model]
        x = self.fc1(x)
        # print(x.shape)
        x = self.gelu(x)
        x = self.dropout(x)
        x = self.fc2(x)
        return x


class EncoderLayer(nn.Module):
    def __init__(self, d_model, d_k, d_v, n_heads, dropout=0.1):
        super(EncoderLayer, self).__init__()
        self.enc_self_attn = MultiHeadAttention(d_model, d_k, d_v, n_heads, dropout)
        self.pos_ffn = PoswiseFeedForwardNet(d_model, dropout)

    def forward(self, input):
        enc_output = self.enc_self_attn(input, input, input)
        enc_output = self.pos_ffn(enc_output)
        return enc_output


def _init_weight(m):
    if isinstance(m, nn.Linear):
        nn.init.trunc_normal_(m.weight, std=0.1)
        if m.bias is not None:
            nn.init.zeros_(m.bias)


class BERT(nn.Module):
    def __init__(self, batch_size, t_len, c_in, n_layers=6, d_model=768, dropout=0.,
                 n_layer=6, n_heads=12, d_k=64, d_v=64, out_features=42, in_features=1):
        super(BERT, self).__init__()

        self.t_len = t_len
        self.c_in = c_in
        self.out_features = out_features
        self.in_features = in_features
        #input encoding
        self.W_P = nn.Linear(1, d_model)

        #layer norm
        self.layer_norm = nn.LayerNorm(d_model)

        #pred_token
        self.pred_token = nn.Parameter(torch.zeros(1, 1, d_model))

        #position encoding
        self.W_pos = nn.Embedding(self.out_features + 1, d_model)

        #Redisual dropout
        self.dropout = nn.Dropout(dropout)

        #encoder
        self.layers = nn.ModuleList([EncoderLayer(d_model, d_k, d_v, n_heads, dropout) for _ in range(n_layers)])

        self.linear = nn.Linear(d_model, d_model)
        self.active2 = nn.GELU()
        self.fc = nn.Linear(d_model, 1)

        nn.init.trunc_normal_(self.pred_token, std=0.02)
        self.apply(_init_weight)

        # self.w0 = nn.parameter.Parameter(torch.randn(self.t_len, 1))
        # self.b0 = nn.parameter.Parameter(torch.randn(1))
        # # self.w = nn.parameter.Parameter(torch.randn(self.t_len, out_features - 1))
        # self.b = nn.parameter.Parameter(torch.randn(out_features - 1))

        self.w0 = nn.parameter.Parameter(torch.randn(self.in_features, 1))
        self.b0 = nn.parameter.Parameter(torch.randn(1))
        self.w = nn.parameter.Parameter(torch.randn(self.in_features, d_model - 1))
        self.b = nn.parameter.Parameter(torch.randn(d_model - 1))
        self.f = torch.sin

    def t2v(self, tau, f, w, b, w0, b0):
        v1 = f(torch.matmul(tau.unsqueeze(-1), w) + b)
        v2 = torch.matmul(tau.unsqueeze(-1), w0) + b0
        # print(v1.shape)
        return torch.cat([v1, v2], -1)

    def forward(self, x):
        ##input:[batch_size,t_size,c_in]
        x = self.t2v(x, self.f, self.w, self.b, self.w0, self.b0)
        # print(x.shape)
        # x = x.unsqueeze(-1)
        # print(x.shape)

        # x = self.W_P(x)  #x:[batch_size,t_size,d_model]

        #norm
        x = self.layer_norm(x)

        pred_token = self.pred_token.expand(x.shape[0], -1, -1)
        x = torch.cat((pred_token, x), dim=1)

        #position encoding
        u = self.W_pos(torch.arange(self.t_len + 1).unsqueeze(0).to(device))  # u:[1, t_len+1, d_model]
        u = u.expand(x.size(0), -1, -1)  #[batch_size,t_len+1,d_model]
        # print(u.shape)
        x = self.dropout(x + u)  #[batch_size,t_len+1,d_model]

        #encoder
        for layer in self.layers:
            output = layer(x)  #x:[batch_size,t_len+1,d_model]

        output = self.active2(self.linear(output))  #x:[batch_size,t_len+1,d_model]
        x_pooled = output[:, 0]  #[batch_size, d_model]
        logits_x = self.fc(x_pooled)  #x:[batch_size, 1]

        return logits_x


# target_data = torch.randn(6, 1)
# timeseries_data = torch.randn(6, 30, 2)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

data_path = r'C:\Machine Learning\BERT\cleaned_DCOILWTICO.csv'
df = pd.read_csv(data_path)

timeseries_data = df['DCOILWTICO'].values


def create_seq(data, seq_length, input_step=1, target_step=1):
    input_seq = []
    target_seq = []
    max_start_idx = len(data) - seq_length - target_step + 1

    for i in range(0, max_start_idx, input_step):
        input_seq.append(data[i:i + seq_length])
        target_seq.append(data[i + seq_length:i + seq_length + target_step])

    return np.array(input_seq), np.array(target_seq)


seq_length = 30
input_step = 10
target_step = 1
num_epochs = 10000
train_ratio = 0.8
batch_size = 64
input_data, target_data = create_seq(timeseries_data, seq_length, input_step, target_step)

train_size = int(len(input_data)*train_ratio)
train_input_seq = input_data[:train_size]
train_target_seq = target_data[:train_size]
test_input_seq = input_data[train_size:]
test_target_seq = target_data[train_size:]

train_input_seq = torch.tensor(train_input_seq, dtype=torch.float32)
train_target_seq = torch.tensor(train_target_seq, dtype=torch.float32)
test_input_seq = torch.tensor(test_input_seq, dtype=torch.float32)
test_target_seq = torch.tensor(test_target_seq, dtype=torch.float32)

dataset = TensorDataset(train_input_seq, train_target_seq)
dataloader = DataLoader(dataset, batch_size=64, shuffle=True)

test_dataset = TensorDataset(test_input_seq, test_target_seq)
test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True)

model = BERT(batch_size=batch_size, t_len=30, c_in=1).to(device)
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=1e-5, weight_decay=1e-5)
scheduler = CosineAnnealingWarmRestarts(optimizer, T_0=num_epochs, T_mult=1, eta_min=1e-6)



best_mae = 1000
for epoch in range(num_epochs):
    model.train()
    running_loss = 0.0
    total_mae = 0.0
    for input_timeseries, target_timeseries in dataloader:
        target_timeseries = target_timeseries.to(device)
        input_timeseries = input_timeseries.to(device)
        optimizer.zero_grad()
        outputs = model(input_timeseries.to(device))
        loss = criterion(outputs, target_timeseries)
        loss.backward()
        optimizer.step()
        running_loss += loss.item()

        mae = torch.mean(torch.abs(outputs - target_timeseries))
        total_mae += mae.item()

    avg_loss = running_loss / len(dataloader)
    avg_mae = total_mae / len(dataloader)
    print(f'Epoch {epoch + 1}, Loss: {avg_loss}, MAE: {avg_mae}')

    scheduler.step()

# torch.save(model, 'trained_model.pth')
# print('训练完成')

    model.eval()
    test_running_loss = 0.0
    test_total_mae = 0.0
    for input_timeseries, test_target_timeseries in test_dataloader:
        input_timeseries = input_timeseries.to(device)
        test_target_timeseries = test_target_timeseries.to(device)
        test_outputs = model(input_timeseries)
        test_running_loss += criterion(test_outputs, test_target_timeseries).item()
    # test_avg_loss = test_running_loss / len(test_dataloader)
        mae = torch.mean(torch.abs(test_outputs - test_target_timeseries))
        test_total_mae += mae.item()
    avg_mae = test_total_mae / len(test_dataloader)
    avg_loss = test_running_loss / len(test_dataloader)
    print(f'Epoch {epoch + 1}, Test Loss: {avg_loss}, Test MAE: {avg_mae}')
    # print(f'Test Loss: {test_avg_loss}')

    if avg_mae < best_mae:
        torch.save(model, 'best.pth')
        best_mae = avg_mae
    print(f'Best MAE: {best_mae}')