

import torch
import torch.nn as nn
import numpy as np
from transformer import MultiTaskTransformer, PositionalEncoding
import os

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def multi_task_test(model_path, data_path):
    # torch.serialization.add_safe_globals([MultiTaskTransformer, PositionalEncoding])
    # torch.serialization.add_safe_globals([
    #     nn.modules.linear.Linear, 
    #     nn.modules.transformer.TransformerEncoderLayer, 
    #     nn.modules.activation.MultiheadAttention,
    #     nn.modules.transformer.TransformerEncoder,
    #     nn.modules.linear.NonDynamicallyQuantizableLinear,
    #     nn.modules.dropout.Dropout,
    #     nn.modules.normalization.LayerNorm,
    #     nn.functional.relu,
    #     nn.modules.container.ModuleList
    # ])
    checkpoint = torch.load(model_path, weights_only=False, map_location=device)
    model = checkpoint
    model.eval()
    model.to(device)
    data = np.load(data_path)
    data = data.reshape(1, 600, 17)

    data = torch.tensor(data).float().to(device)
    data = data.to(device)
    output = model(data)
    print(output)
    my_y = []
    for y in output:
        _, result = torch.max(y, 1)
        result = result.cpu().numpy().tolist()
        
        my_y.append(result)
    print (my_y)
    return my_y

if __name__ == '__main__':
    model_path = 'E:\\github\\flask_pytorch_models\\classfy_model_end.pth'
    data_path = os.path.join('multi_task', 'multi_task_sample1.npy')
    multi_task_test(model_path, data_path)