import torch
import torch.nn as nn 
import torch.nn.functional as F 
import argparse, os, random
import torch.optim as optimizer 
from sklearn.metrics import accuracy_score
from torch.utils.data import DataLoader,Subset, dataloader,random_split
import numpy as np 

import sys
sys.path.append("core/data_loader")

from multimodal.model.t4sa.core.data_loader.load_data_t4sa import *
from core.model.UniModel import Model_Single_Text
from core.model.basic_multi_modal import Multi_Fusion_Model_Baseline
from core.model.image import * 
from core.model.vit_text import * 

def parse_args():
    parser = argparse.ArgumentParser()
    # Model
    parser.add_argument('--layer', type=int, default=4)
    parser.add_argument('--hidden_size', type=int, default=1024)
    parser.add_argument('--dim', type=int, default=1024)

    parser.add_argument('--dropout_r', type=float, default=0.1)
    parser.add_argument('--multi_head', type=int, default=8)
    parser.add_argument('--ff_size', type=int, default=2048)
    parser.add_argument('--word_embed_size', type=int, default=300)

    parser.add_argument('--image_hw', type=int, default=256)
    parser.add_argument('--patch_hw', type=int, default=32)
    parser.add_argument('--lang_size', type=int, default=31)
    parser.add_argument('--num_classes', type=int, default=3)

    # Training
    parser.add_argument('--output', type=str, default='ckpt/')
    parser.add_argument('--name', type=str, default='exp0/')
    parser.add_argument('--batch_size', type=int, default=64)
    parser.add_argument('--max_epoch', type=int, default=99)
    parser.add_argument('--opt', type=str, default="Adam")
    parser.add_argument('--opt_params', type=str, default="{'betas': '(0.9, 0.98)', 'eps': '1e-9'}")
    parser.add_argument('--lr_base', type=float, default=0.0001)
    parser.add_argument('--lr_decay', type=float, default=0.5)
    parser.add_argument('--lr_decay_times', type=int, default=2)
    parser.add_argument('--warmup_epoch', type=float, default=0)
    parser.add_argument('--grad_norm_clip', type=float, default=-1)
    parser.add_argument('--eval_start', type=int, default=0)
    parser.add_argument('--early_stop', type=int, default=3)
    parser.add_argument('--seed', type=int, default=random.randint(0, 9999999))
    parser.add_argument('--sigma', type=float, default=1.0)
    parser.add_argument('--ans_size', type=float, default=3)
    parser.add_argument('--pred_func',type=str, default="amax")
    # Dataset and task

    args = parser.parse_args()
    return args


def set_seed(seed):
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)  
    np.random.seed(seed)  
    torch.backends.cudnn.benchmark = False
    torch.backends.cudnn.deterministic = True 




def train_single(net, train_loader, eval_loader, args,device):

    logfile = open(
        args.output + "/" + args.name +
        '/log_run.txt',
        'w+'
    )
    logfile.write(str(args))

    loss_sum = 0
    best_eval_accuracy = 0.0
    early_stop = 0
    decay_count = 0

    # Load the optimizer paramters
    optim = torch.optim.Adam(net.parameters(), lr=args.lr_base)

    loss_fn = torch.nn.CrossEntropyLoss(reduction="sum")
    eval_accuracies = []
    for epoch in range(0, args.max_epoch):

        time_start = time.time()

        for step, (
                x,
                y,
                ans
        ) in enumerate(train_loader):

            loss_tmp = 0
            optim.zero_grad()

            x = x.to(device)
            y = y.to(device)
            # z = z.cuda()
            ans = ans.to(device)

            pred = net(x,y)
            loss = loss_fn(pred, ans)
            loss.backward()

            loss_sum += loss.cpu().data.numpy()
            loss_tmp += loss.cpu().data.numpy()

            print("\r[Epoch %2d][Step %4d/%4d] Loss: %.4f, Lr: %.2e, %4d m "
                  "remaining" % (
                      epoch + 1,
                      step,
                      int(len(train_loader.dataset) / args.batch_size),
                      loss_tmp / args.batch_size,
                      *[group['lr'] for group in optim.param_groups],
                      ((time.time() - time_start) / (step + 1)) * ((len(train_loader.dataset) / args.batch_size) - step) / 60,
                  ), end='          ')

            # Gradient norm clipping
            if args.grad_norm_clip > 0:
                nn.utils.clip_grad_norm_(
                    net.parameters(),
                    args.grad_norm_clip
                )

            optim.step()

        time_end = time.time()
        elapse_time = time_end-time_start
        print('Finished in {}s'.format(int(elapse_time)))
        epoch_finish = epoch + 1

        # Logging
        logfile.write(
            'Epoch: ' + str(epoch_finish) +
            ', Loss: ' + str(loss_sum / len(train_loader.dataset)) +
            ', Lr: ' + str([group['lr'] for group in optim.param_groups]) + '\n' +
            'Elapsed time: ' + str(int(elapse_time)) +
            ', Speed(s/batch): ' + str(elapse_time / step) +
            '\n\n'
        )

        # Eval
        if epoch_finish >= args.eval_start:
            print('Evaluation...')
            accuracy = evaluate_single(net, eval_loader, args,device)
            print('Accuracy :'+str(accuracy))
            eval_accuracies.append(accuracy)
            if accuracy > best_eval_accuracy:
                # Best
                state = {
                    'state_dict': net.state_dict(),
                    'optimizer': optim.state_dict(),
                    'args': args,
                }
                torch.save(
                    state,
                    args.output + "/" + args.name +
                    '/best'+str(args.seed)+'.pkl'
                )
                best_eval_accuracy = accuracy
                early_stop = 0

            elif decay_count < args.lr_decay_times:
                # Decay
                print('LR Decay...')
                decay_count += 1
                net.load_state_dict(torch.load(args.output + "/" + args.name +
                                               '/best'+str(args.seed)+'.pkl')['state_dict'])
                # adjust_lr(optim, args.lr_decay)
                for group in optim.param_groups:
                    group['lr'] *= args.lr_decay

            else:
                # Early stop
                early_stop += 1
                if early_stop == args.early_stop:
                    logfile.write('Early stop reached' + '\n')
                    print('Early stop reached')
                    logfile.write('best_overall_acc :' + str(best_eval_accuracy) + '\n\n')
                    print('best_eval_acc :' + str(best_eval_accuracy) + '\n\n')
                    os.rename(args.output + "/" + args.name +
                              '/best'+str(args.seed)+'.pkl',
                              args.output + "/" + args.name +
                              '/best' + str(best_eval_accuracy) + "_" + str(args.seed) + '.pkl')
                    logfile.close()
                    return eval_accuracies

        loss_sum = 0


def evaluate_single(net, eval_loader, args,device):
    accuracy = []
    net.train(False)
    preds = []
    for step, (
            x,
            y,
            ans
    ) in enumerate(eval_loader):
        x = x.to(device)
        y = y.to(device)
        pred = net(x,y).cpu().data.numpy()

        
        ans = ans.cpu().data.numpy()
        accuracy += list(np.argmax(pred, axis=1) == ans)
            
        # Save preds
        # for p in pred:
        #     preds.append = p

    net.train(True)
    return 100*np.mean(np.array(accuracy)) #, preds







def run(num_train):

   

    print("loading dataset ....")
    
    #lr_scheduler = optimizer.lr_scheduler.MultiStepLR(opt, milestones=[90],gamma=0.1)
    # print(net)
    
    #DataLoader_Seq50()
    args = parse_args()



    # train_dataset = VSNLI(0,None)  #106
    # val_dataset =VSNLI(1,train_dataset.token_to_ix)
    # test_dataset =VSNLI(2,train_dataset.token_to_ix)


    train_dataset = T4SA_Vision_And_Text(0,None)  
    #print(train_dataset.__len__())
    #train,val = random_split(train_dataset,[300000,68586])
    val_dataset =T4SA_Vision_And_Text(1,train_dataset.token_to_ix)
    test_dataset =T4SA_Vision_And_Text(2,train_dataset.token_to_ix)


    
    #total_number_of_test_dataset = test_dataset.__len__()
    # result_best_list = []
    # print("train_data_len:",train_dataset.__len__())
    # print("val_data_len:",val_dataset.__len__())
    #print("test_data_len:",test_dataset.__len__()) 
    
    print("train_data_len:",train_dataset.__len__())
    print("val_data_len:",val_dataset.__len__())

   
    train_data_iter = DataLoader(train_dataset,batch_size=args.batch_size,shuffle=True,num_workers=4) #
    val_data_iter = DataLoader(val_dataset,batch_size=args.batch_size,shuffle=False)
    
    

    device = torch.device('cuda:0')

    net = Multi_Fusion_Model_Baseline(args, train_dataset.vocab_size, train_dataset.pretrained_emb).to(device)
   

    
  



    train_single(net, train_data_iter, val_data_iter, args,device)


       





if __name__ == "__main__":
    run(1)


