import os
import random
import torch
import pandas as pd
import numpy as np
from model import TransformerAutoEncoder
from torch.utils.data import TensorDataset, DataLoader
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from collections import Counter
# 训练transformer_autoencoder

# Set env
def seed_torch(seed=42):
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
    torch.backends.cudnn.benchmark = False
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.enabled = True
seed_torch()

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# Load data
train_df = pd.read_csv('dataset/cs-training.csv')
median = train_df.median()
print("median:\n", median)
print("\n")
train_df = train_df.fillna(median)
test_df = pd.read_csv('dataset/cs-test.csv')
test_df = test_df.fillna(median)

print(train_df.columns)
train = train_df.values[:,1:]
test = test_df.values[:,1:]

X, y = train[:, 1:], train[:, 0]
X_test, y_test = test[:, 1:], test[:, 0]

# Min-max scaler
scaler = MinMaxScaler().fit(X)
X = scaler.transform(X)
X_test = scaler.transform(X_test)

print(f'=====进行切分=====')
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42, stratify=y)
print(f'验证集 总共: {len(y_val)} 条数据')
d = Counter(y_val)
d_s = sorted(d.items(),key=lambda x:x[1],reverse=True)
for item in d_s:
    print(f'值：{item[0]}: {item[1]}条 {item[1]/len(y_val)}占比')
print(f'训练集 总共: {len(y_train)} 条数据')
d = Counter(y_train)
d_s = sorted(d.items(),key=lambda x:x[1],reverse=True)
for item in d_s:
    print(f'值：{item[0]}: {item[1]}条 {item[1]/len(y_train)}占比')
print(f'=====切分完成=====')

# Construct dataset and dataloader
batch_size = 128
train_loader = DataLoader(TensorDataset(torch.FloatTensor(X_train)), batch_size=batch_size, shuffle=True)
val_loader = DataLoader(TensorDataset(torch.FloatTensor(X_val)), batch_size=batch_size, shuffle=False)

# Construct model
m = TransformerAutoEncoder(num_inputs=10).to(device)
optimizer = torch.optim.Adam(m.parameters(), lr=1e-5)

# Train model
epochs = 10
for e in range(epochs):
    train_loss = 0
    m.train()
    for batch in train_loader:
        # train step
        x = batch[0].to(device)
        loss = m.loss(x)

        # clear gradients
        optimizer.zero_grad()

        # backward
        loss.backward()

        # update parameters
        optimizer.step()

        train_loss += loss.item()

    val_loss = 0
    m.eval()
    with torch.no_grad():
        for batch in val_loader:
            # train step
            x = batch[0].to(device)
            loss = m.loss(x)

            val_loss += loss.item()

    print('====> Epoch: {} train_loss: {:.3f}  val_loss: {:.3f}'.format(
          e, train_loss / len(train_loader), val_loss / len(val_loader)))

# Save model
torch.save(m.state_dict(), 'model.pt')
