import csv
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import tqdm
from kdddata import KddData
from flowdataset import *
import os

batch_size=32
num_epoch=100
learning_rate=1e-3
print("learning rate:", learning_rate)
alpha=0.01

resume_model = True
only_eval = False

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# device = torch.device('cpu')
print("device:", device)

num_class = 95
truncate_num = 1000
feature_num = truncate_num
hidden_dim = 8
vocab_size = 300
GRU_layer_num = 2
GRU_dirction_num = 2
GRU_bidirection = True if GRU_dirction_num == 2 else False
dropout_rate = 0.1

# train_path="C:\\Users\\Lenovo\\Desktop\\1\\for_DF\\1\\train_dataset.xlsx"
# valid_path="C:\\Users\\Lenovo\\Desktop\\1\\for_DF\\1\\valid_dataset.csv"
# output_model_path="C:\\Users\\Lenovo\\Desktop\\1\\srt\\FS-Net\\models\\df.params"

output_model_path = "fsnet_save/1000/fsnet.params"
if not os.path.isdir(os.path.dirname(output_model_path)):
    os.mkdir(os.path.dirname(output_model_path))
    print("making dir:", os.path.dirname(output_model_path))


dP = torch.zeros((batch_size, feature_num, hidden_dim))
dX = torch.arange(feature_num, dtype=torch.float32).reshape(-1, 1) / torch.pow(10000, torch.arange(0, hidden_dim, 2, dtype=torch.float32) / hidden_dim)
for i in range(0,batch_size):
    dP[i, :, 0::2] = torch.sin(dX)
    dP[i, :, 1::2] = torch.cos(dX)
dP=dP.to(device)

class DF_Network(nn.Module):
    def __init__(self):
        super(DF_Network, self).__init__()
        self.emb=nn.Embedding(vocab_size, hidden_dim)
        self.enc = nn.GRU(hidden_dim, hidden_dim, GRU_layer_num, False, True, dropout_rate, GRU_bidirection)
        self.dec = nn.GRU(hidden_dim, hidden_dim, GRU_layer_num, False, True, dropout_rate, GRU_bidirection)
        self.recon=nn.Linear((GRU_dirction_num * GRU_layer_num) * (hidden_dim * GRU_dirction_num), feature_num)
        out_input_dim = 4 * GRU_dirction_num * GRU_layer_num * hidden_dim
        out_hidden_dim = int(out_input_dim / 2)
        self.out = nn.Sequential(
            nn.ELU(),              
            nn.Flatten(),
            nn.BatchNorm1d(out_input_dim),
            nn.Linear(out_input_dim, out_hidden_dim),
            nn.BatchNorm1d(out_hidden_dim),
            nn.ReLU(),
            nn.Linear(out_hidden_dim, num_class)
        )
    def forward(self, x):
        x=self.emb(x)
        x=x+dP
        enc_out,dec_input = self.enc(x)
        dec_input=dec_input.transpose(0,1)
        dec_out,dec_output = self.dec(dec_input)
        dec_output=dec_output.transpose(0,1)
        x_recon=self.recon(dec_out.reshape(batch_size,-1))
        product=dec_input*dec_output
        absolute=torch.abs(dec_input-dec_output)
        final=torch.cat([dec_input,dec_output,product,absolute],dim=1).reshape(batch_size,-1)
        output = self.out(final)
        return nn.LogSoftmax(dim=-1)(output),x_recon

def batch_loader(data,label):
    instances_num = data.size()[0]
    for i in range(instances_num // batch_size):
        data_batch = data[i * batch_size : (i + 1) * batch_size, :]
        label_batch = label[i * batch_size : (i + 1) * batch_size]
        yield data_batch,label_batch
    if instances_num > instances_num // batch_size * batch_size+1:
        data_batch = data[instances_num // batch_size * batch_size :, :]
        label_batch = label[instances_num // batch_size * batch_size :]
        yield data_batch,label_batch

def train_model(device, model, optimizer, data_batch, label_batch):
    model.zero_grad()
    data_batch=data_batch
    data_batch = data_batch.to(device)
    label_batch = label_batch.to(device)
    pre,x_pre= model(data_batch)
    loss = nn.NLLLoss()(pre, label_batch.view(-1).long())
    loss_x=nn.MSELoss()(data_batch.reshape(-1).float(), x_pre.reshape(-1))
    loss=loss+loss_x*alpha
    loss.backward() 
    optimizer.step()
    
    correct_num = (pre.argmax(dim=1) == label_batch).sum().item()
    return loss, correct_num

def evaluate(device,model,test_dataloader):
    correct = 0
    total = 0
    total_loss = 0
    model.eval()
    # for i, (data_va_batch,label_va_batch) in enumerate(batch_loader(data_va,label_va)):
    for i, (label_va_batch, data_va_batch, fnames) in enumerate(test_dataloader):
        if (i + 1) == len(test_dataloader):
            break
        data_va_batch=data_va_batch
        data_va_batch = data_va_batch.to(device)
        label_va_batch = label_va_batch.to(device)
        with torch.no_grad():
            pre,x_pre = model(data_va_batch)
        pred = torch.argmax(pre, dim=1)
        gold = label_va_batch 
        correct += torch.sum(pred == gold).item()
        loss = nn.NLLLoss()(pre, label_va_batch.view(-1).long())
        loss_x = nn.MSELoss()(data_va_batch.reshape(-1).float(), x_pre.reshape(-1))
        total_loss += (loss + loss_x * alpha).item()
        total += len(label_va_batch)
    return correct / total, total_loss / total

def main():
    model=DF_Network()

    if not resume_model:
        for n, p in list(model.named_parameters()):
            if "gamma" not in n and "beta" not in n:
                p.data.normal_(0, 0.02)  


    model = model.to(device)
    
    if resume_model:
        if os.path.isfile(output_model_path):
            model.load_state_dict(torch.load(output_model_path))
            print("resumed model from:", output_model_path)
        else:
            print("[Caution]", output_model_path, "not exists")
    
    
    print("building dateset...")
    train_dataset = flowDataset('dataset_CW/train.fname', load_time=0, load_length=1, load_direction=1, no_neg=True, truncate_num=truncate_num)
    test_dataset = flowDataset('dataset_CW/test.fname', load_time=0, load_length=1, load_direction=1, no_neg=True, truncate_num=truncate_num)

    print("batch size:", batch_size)
    train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True)

    
    optimizer=torch.optim.RMSprop(model.parameters(),lr=learning_rate)

    result, best_result = 0.0, 0.0

    if not only_eval:
        print("Start training.")
        for epoch in range(1, num_epoch + 1):

            model.train()
            
            total_num = 0
            correct_num = 0
            total_loss = 0
            
            pbar = tqdm.tqdm(enumerate(train_dataloader), total=len(train_dataloader))
            pbar.set_description(f"epoch {epoch}")
            # for i, (data_batch, label_batch) in enumerate(batch_loader(data, label)):
            for i, (label_batch, data_batch, fnames) in pbar:
                if (i + 1) == len(train_dataloader):
                    break
                loss, correct = train_model(device, model, optimizer, data_batch, label_batch)
                total_loss += loss.item()
                correct_num += correct
                total_num += len(label_batch)
                
                pbar.set_postfix({
                    'r_acc': f"{correct_num / total_num:.3f}",
                    'r_loss': f"{total_loss / total_num:.3f}",
                })
                

            result, eval_loss = evaluate(device,model, test_dataloader)
            print("acc:", result, "loss:", eval_loss)
            if result > best_result:
                best_result = result
                torch.save(model.state_dict(), output_model_path)
    
    print("start testing...")
    result, eval_loss = evaluate(device, model, test_dataloader)
    print("acc:", result, "loss:", eval_loss)
    if result > best_result:
        best_result = result
        torch.save(model.state_dict(), output_model_path)


if __name__ == "__main__":
    # X = torch.randint(0, vocab_size, (batch_size, feature_num))
    
    # model = DF_Network()
    # for n, p in list(model.named_parameters()):
    #     if "gamma" not in n and "beta" not in n:
    #         p.data.normal_(0, 0.02)  
    # pre, x_pre = model(X)
    # print(pre.shape)
    # print(x_pre.shape)
    
    main()