import os
import random
import torch
import pandas as pd
import numpy as np
from model import TransformerAutoEncoder
from torch.utils.data import TensorDataset, DataLoader
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import joblib

show_limit = 1


# 使用训练好的模型进行特征提取

# Set env
def seed_torch(seed=42):
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)  # if you are using multi-GPU.
    torch.backends.cudnn.benchmark = False
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.enabled = True


seed_torch()

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# Load data
train_df = pd.read_csv('dataset/cs-training.csv')
median = train_df.median()
print("median:", median)
train_df = train_df.fillna(median)
test_df = pd.read_csv('dataset/cs-test.csv')
test_df = test_df.fillna(median)

print(train_df.columns)
train = train_df.values[:, 1:]
test = test_df.values[:, 1:]

X, y = train[:, 1:], train[:, 0]
X_test, y_test = test[:, 1:], test[:, 0]

# Min-max scaler
scaler = MinMaxScaler().fit(X)
X = scaler.transform(X)
X_test = scaler.transform(X_test)

X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42, stratify=y)

# Construct dataset and dataloader
batch_size = 128
train_loader = DataLoader(TensorDataset(torch.FloatTensor(X_train)), batch_size=batch_size, shuffle=False)
val_loader = DataLoader(TensorDataset(torch.FloatTensor(X_val)), batch_size=batch_size, shuffle=False)
test_loader = DataLoader(TensorDataset(torch.FloatTensor(X_test)), batch_size=batch_size, shuffle=False)
# Construct model
m = TransformerAutoEncoder(num_inputs=10).to(device)
m.load_state_dict(torch.load('model.pt', map_location=device))
# Extract feature
m.eval()

train_features = []
train_features_c = []
val_features = []
test_features = []

with torch.no_grad():
    for batch in train_loader:
        # train step
        x = batch[0].to(device)
        f, f_c = m(x)
        ans_f = f.cpu().detach().numpy()
        ans_f_c = f_c.cpu().detach().numpy()
        train_features.append(ans_f)
        train_features_c.append(ans_f_c)
        if show_limit:
            print(f"ever batch input ：{len(batch[0])} {len(batch[0][0])}")
            for item in batch[0][0]:
                print('{:.10f}'.format(float(item)), end=",")
            print("\n")
            print(f"ever batch output ：{len(ans_f)} {len(ans_f[0])}")
            print(f"ever batch output ：{len(ans_f_c)} {len(ans_f_c[0])}")
            show_limit -= 1
    for batch in val_loader:
        # train step
        x = batch[0].to(device)
        f, _ = m(x)
        val_features.append(f.cpu().detach().numpy())
    for batch in test_loader:
        # train step
        x = batch[0].to(device)
        f, _ = m(x)
        test_features.append(f.cpu().detach().numpy())

train_features = np.concatenate(train_features)
train_features_c = np.concatenate(train_features_c)
val_features = np.concatenate(val_features)
test_features = np.concatenate(test_features)

print(train_features.shape, train_features_c.shape, val_features.shape, test_features.shape)

# Save features
joblib.dump(train_features, 'train_features.npy')
joblib.dump(train_features, 'train_features_c.npy')
joblib.dump(val_features, 'val_features.npy')
joblib.dump(test_features, 'test_features.npy')
