import os
import random
import torch
import pandas as pd
import numpy as np
from model import TransformerAutoEncoder, TransformerEncoder
from torch.utils.data import TensorDataset, DataLoader
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from collections import Counter


class XShape:
    def __init__(
        self,
        num_inputs,
        hidden_size=1024,
        num_subspaces=8,
        embed_dim=128,
        num_heads=8,
        dropout=0,
        feedforward_dim=512,
    ):
        super().__init__()
        assert hidden_size == embed_dim * num_subspaces
        self.num_subspaces = num_subspaces
        self.num_heads = num_heads
        self.embed_dim = embed_dim

        self.excite = torch.nn.Linear(in_features=num_inputs, out_features=hidden_size)
        self.encoder_1 = TransformerEncoder(embed_dim, num_heads, dropout, feedforward_dim)
        self.encoder_2 = TransformerEncoder(embed_dim, num_heads, dropout, feedforward_dim)
        self.encoder_3 = TransformerEncoder(embed_dim, num_heads, dropout, feedforward_dim)

        self.reconstructor = torch.nn.Linear(in_features=hidden_size, out_features=num_inputs)
        self.x = x

    def after_excite(self, x):
        x = torch.nn.functional.relu(self.excite(self.x))
        print(x.shape)


# 探寻model中x的shape变化情况

# Set env
def seed_torch(seed=42):
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)  # if you are using multi-GPU.
    torch.backends.cudnn.benchmark = False
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.enabled = True


seed_torch()

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# Load data
train_df = pd.read_csv('dataset/cs-training.csv')
median = train_df.median()
print("median:\n", median)
print("\n")
train_df = train_df.fillna(median)
test_df = pd.read_csv('dataset/cs-test.csv')
test_df = test_df.fillna(median)

print(train_df.columns)
train = train_df.values[:, 1:]
test = test_df.values[:, 1:]

X, y = train[:, 1:], train[:, 0]
X_test, y_test = test[:, 1:], test[:, 0]

# Min-max scaler
scaler = MinMaxScaler().fit(X)
X = scaler.transform(X)
X_test = scaler.transform(X_test)

print(f'=====进行切分=====')
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42, stratify=y)
print(f'验证集 总共: {len(y_val)} 条数据')
d = Counter(y_val)
d_s = sorted(d.items(), key=lambda x: x[1], reverse=True)
for item in d_s:
    print(f'值：{item[0]}: {item[1]}条 {item[1] / len(y_val)}占比')
print(f'训练集 总共: {len(y_train)} 条数据')
d = Counter(y_train)
d_s = sorted(d.items(), key=lambda x: x[1], reverse=True)
for item in d_s:
    print(f'值：{item[0]}: {item[1]}条 {item[1] / len(y_train)}占比')
print(f'=====切分完成=====')

# Construct dataset and dataloader
batch_size = 128
train_loader = DataLoader(TensorDataset(torch.FloatTensor(X_train)), batch_size=batch_size, shuffle=True)
val_loader = DataLoader(TensorDataset(torch.FloatTensor(X_val)), batch_size=batch_size, shuffle=False)

m = XShape(num_inputs=10)
# Train model
epochs = 1
for e in range(epochs):
    train_loss = 0
    for batch in train_loader:
        # train step
        x = batch[0].to(device)
