import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import random
import pickle
from torch.utils.data import DataLoader, Dataset
import torchxlstm
from datetime import datetime
from pytorch_tcn import TCN
from sklearn.metrics import precision_score
import os
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
warnings.filterwarnings("ignore", category=UserWarning)
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
device = 'cpu'

class CustomDataset(Dataset):
    def __init__(self, df, label1):
        self.df = df
        self.label1 = label1
    def __len__(self):
        return self.df.shape[0]
    def __getitem__(self, index):
        data = self.df[index]
        label1 = self.label1[index]

        return torch.tensor(data).float(), torch.tensor(label1).float()

a = np.load('train_x.npy')
b = np.load('train_y.npy')
a = np.array(a, dtype=float)
def minmax_normalize(arr):
    min_vals = np.nanmin(arr, axis=2, keepdims=True)
    max_vals = np.nanmax(arr, axis=2, keepdims=True)
    normalized_arr = (arr - min_vals) / (max_vals - min_vals)
    return normalized_arr
a = np.load('train_x.npy')
b = np.load('train_y.npy')
nan_planes = np.isnan(a).any(axis=(1, 2))
nan_indices = np.isnan(b)
nan = nan_planes | nan_indices
cleaned_a = a[~nan]
b = b[~nan]
a = minmax_normalize(a)
count_0 = np.count_nonzero(b == 0) // 10
count_1 = np.count_nonzero(b == 1) // 4
count_2 = np.count_nonzero(b == 2) // 4
indices_0 = np.where(b == 0)[0]
indices_1 = np.where(b == 1)[0]
indices_2 = np.where(b == 2)[0]
index_0 = random.sample(indices_0.tolist(), count_0) if len(indices_0) >= count_0 else []
index_1 = random.sample(indices_1.tolist(), count_1) if len(indices_1) >= count_1 else []
index_2 = random.sample(indices_2.tolist(), count_2) if len(indices_2) >= count_2 else []

selected_indices = index_0 + index_1 + index_2
bb = b[selected_indices]
aa = cleaned_a[selected_indices]

bbb = np.zeros(shape=(len(bb), 3))
for i in range(len(bb)):
    bbb[i, bb[i]] = 1



num = 16



train_dataset = CustomDataset(aa, bbb)
train_dataloader = DataLoader(train_dataset, batch_size=200, shuffle=True)


class SimpleCNN(nn.Module):
    def __init__(self):
        super(SimpleCNN, self).__init__()
        self.conv1 = nn.Conv1d(in_channels=2, out_channels=16, kernel_size=3)
        self.pool = nn.MaxPool1d(kernel_size=3, stride=3)  # 调整池化层的参数
        self.fc1 = nn.Linear(16 * 59, num)  # 根据池化和卷积层的参数调整线性层的输入维度和输出维度

    def forward(self, x):
        x = self.conv1(x)
        x = nn.functional.relu(x)
        x = self.pool(x)
        x = x.view(-1, 16 * 59)
        x = self.fc1(x)
        x = x.view(-1, 1, num)
        return x


class GEM(nn.Module):
    def __init__(self, hidden_size=30):
        super().__init__()
        self.cnn = SimpleCNN()
        self.tcn = TCN(num_inputs=num, num_channels=[num,num], input_shape='NLC')
        self.xlstm = torchxlstm.xLSTM(input_size=num, head_size=4, num_heads=hidden_size, layers='m')
        self.change1 = nn.Linear(in_features=num, out_features=3)

    def forward(self, stocks):
        h_stock = self.cnn(stocks)
        h_stock = self.tcn(h_stock)
        h_stock, _ = self.xlstm(h_stock)
        h_stock = h_stock.reshape(-1, num)
        out = self.change1(h_stock)
        return nn.functional.softmax(out, dim=1)



learning_rate = 0.001
mymodel = GEM().to(device)
mymodel.train()
optimizer = optim.AdamW(mymodel.parameters(), lr=learning_rate)
loss_function = torch.nn.CrossEntropyLoss()

num_epochs = 100
for epoch in range(num_epochs):
    print(f"Epoch {epoch + 1}/{num_epochs}")
    start_time = datetime.now()
    running_loss = 0.0
    mymodel.train()
    mypre = 0
    i = 1
    for stocks, label1 in train_dataloader:
        stocks, label1 = stocks.to(device), label1.to(device)
        optimizer.zero_grad()
        out = mymodel(stocks)
        loss = loss_function(out, label1)
        loss.backward()
        optimizer.step()
        running_loss += loss.item()
        label1 = label1.detach().numpy()
        out = out.detach().numpy()
        label2 = np.argmax(label1, axis=1)
        out2 = np.argmax(out, axis=1)
        precision = precision_score(label2, out2, average='macro')
        mypre += precision
        i += 1

    end_time = datetime.now()
    print(f"Loss: {running_loss / i}, Acc: {mypre / i}, Time: {end_time - start_time}")