import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import tqdm
from flowdataset import *
import os
from config import *


def pos_embedding(batch_size):
    dP = torch.zeros((batch_size, feature_num, hidden_dim))
    dX = torch.arange(feature_num, dtype=torch.float32).reshape(-1, 1) / torch.pow(10000, torch.arange(0, hidden_dim, 2, dtype=torch.float32) / hidden_dim)
    for i in range(0,batch_size):
        dP[i, :, 0::2] = torch.sin(dX)
        dP[i, :, 1::2] = torch.cos(dX)
    dP=dP.to(device)
    return dP


class DF_Network(nn.Module):
    def __init__(self):
        super(DF_Network, self).__init__()
        self.emb=nn.Embedding(vocab_size, hidden_dim)
        self.enc = nn.GRU(hidden_dim, hidden_dim, GRU_layer_num, False, True, dropout_rate, GRU_bidirection)
        self.dec = nn.GRU(hidden_dim, hidden_dim, GRU_layer_num, False, True, dropout_rate, GRU_bidirection)
        self.recon=nn.Linear((GRU_dirction_num * GRU_layer_num) * (hidden_dim * GRU_dirction_num), feature_num)
        out_input_dim = 4 * GRU_dirction_num * GRU_layer_num * hidden_dim
        out_hidden_dim = int(out_input_dim / 2)
        self.out = nn.Sequential(
            nn.ELU(),              
            nn.Flatten(),
            nn.BatchNorm1d(out_input_dim),
            nn.Linear(out_input_dim, out_hidden_dim),
            nn.BatchNorm1d(out_hidden_dim),
            nn.ReLU(),
            nn.Linear(out_hidden_dim, num_class)
        )
    def forward(self, x):
        x=self.emb(x)
        x=x+pos_embedding(x.size(0))
        enc_out,dec_input = self.enc(x)
        dec_input=dec_input.transpose(0,1)
        dec_out,dec_output = self.dec(dec_input)
        dec_output=dec_output.transpose(0,1)
        x_recon=self.recon(dec_out.reshape(x.size(0),-1))
        product=dec_input*dec_output
        absolute=torch.abs(dec_input-dec_output)
        final=torch.cat([dec_input,dec_output,product,absolute],dim=1).reshape(x.size(0),-1)
        output = self.out(final)
        return nn.LogSoftmax(dim=-1)(output),x_recon


def train_model(device, model, optimizer, data_batch, label_batch):
    model.zero_grad()
    data_batch=data_batch
    data_batch = data_batch.to(device)
    label_batch = label_batch.to(device)
    pre,x_pre= model(data_batch)
    loss = nn.NLLLoss()(pre, label_batch.view(-1).long())
    loss_x=nn.MSELoss()(data_batch.reshape(-1).float(), x_pre.reshape(-1))
    loss=loss+loss_x*alpha
    loss.backward() 
    optimizer.step()
    
    correct_num = (pre.argmax(dim=1) == label_batch).sum().item()
    return loss, correct_num


def evaluate(device,model,test_dataloader):
    correct = 0
    total = 0
    total_loss = 0
    model.eval()
    for i, (label_va_batch, data_va_batch, fnames) in enumerate(test_dataloader):
        data_va_batch=data_va_batch
        data_va_batch = data_va_batch.to(device)
        label_va_batch = label_va_batch.to(device)
        with torch.no_grad():
            pre,x_pre = model(data_va_batch)
        pred = torch.argmax(pre, dim=1)
        gold = label_va_batch 
        correct += torch.sum(pred == gold).item()
        loss = nn.NLLLoss()(pre, label_va_batch.view(-1).long())
        loss_x = nn.MSELoss()(data_va_batch.reshape(-1).float(), x_pre.reshape(-1))
        total_loss += (loss + loss_x * alpha).item()
        total += len(label_va_batch)
    return correct / total, total_loss / total


def main():
    model=DF_Network()

    if not resume_model:
        for n, p in list(model.named_parameters()):
            if "gamma" not in n and "beta" not in n:
                p.data.normal_(0, 0.02)  

    model = model.to(device)
    
    if resume_model:
        if os.path.isfile(output_model_path):
            print("resuming model from:", output_model_path)
            model.load_state_dict(torch.load(output_model_path))
        else:
            print("[Caution]", output_model_path, "not exists")
    
    
    print("building dateset...")
    
    if not only_eval:
        train_dataset = flowDataset('/home/spa/2023/robust/traffic/perturb/dataset_CW/cw_json/train.json', truncate_num=truncate_num, offset=offset)
        train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    
    test_dataset = flowDataset('/home/spa/2023/robust/traffic/perturb/dataset_CW/cw_json/test.json', truncate_num=truncate_num, offset=offset)
    test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True)
    
    
    optimizer=torch.optim.RMSprop(model.parameters(),lr=learning_rate)

    result, best_result = 0.0, 0.0

    if not only_eval:
        print("Start training.")
        for epoch in range(1, num_epoch + 1):

            model.train()
            
            total_num = 0
            correct_num = 0
            total_loss = 0
            
            pbar = tqdm.tqdm(enumerate(train_dataloader), total=len(train_dataloader))
            pbar.set_description(f"epoch {epoch}")
            
            for i, (label_batch, data_batch, fnames) in pbar:
                if (i + 1) == len(train_dataloader):
                    break
                loss, correct = train_model(device, model, optimizer, data_batch, label_batch)
                total_loss += loss.item()
                correct_num += correct
                total_num += len(label_batch)
                
                pbar.set_postfix({
                    'r_acc': f"{correct_num / total_num:.3f}",
                    'r_loss': f"{total_loss / total_num:.3f}",
                })
                

            result, eval_loss = evaluate(device,model, test_dataloader)
            print("acc:", result, "loss:", eval_loss)
            if result > best_result:
                best_result = result
                torch.save(model.state_dict(), output_model_path)
    
    print("start testing...")
    result, eval_loss = evaluate(device, model, test_dataloader)
    print("acc:", result, "loss:", eval_loss)
    if result > best_result:
        best_result = result
        torch.save(model.state_dict(), output_model_path)


if __name__ == "__main__":
    main()