import torch
import torch.nn as nn 
import torch.nn.functional as F 
import argparse, os, random
import torch.optim as optimizer 
from sklearn.metrics import accuracy_score
from torch.utils.data import DataLoader,Subset, dataloader,random_split
import numpy as np 
import time,datetime
import sys
sys.path.append("core/data_loader")

from optim import get_optim, adjust_lr
from core.data_loader.mosei_dataset_three import *
from core.model.decent_model_gate_cofeature_mosei_three_early import * 

def parse_args():
    parser = argparse.ArgumentParser()
    # Model
    parser.add_argument('--layer', type=int, default=4)
    parser.add_argument('--hidden_size', type=int, default=1024)
    parser.add_argument('--dim', type=int, default=1024)

    parser.add_argument('--dropout_r', type=float, default=0.1)
    parser.add_argument('--multi_head', type=int, default=8)
    parser.add_argument('--ff_size', type=int, default=2048)
    parser.add_argument('--word_embed_size', type=int, default=300)

    parser.add_argument('--image_hw', type=int, default=256)
    parser.add_argument('--patch_hw', type=int, default=32)
    parser.add_argument('--lang_size', type=int, default=31)
    parser.add_argument('--num_classes', type=int, default=2)
    parser.add_argument('--img_len', type=int, default=65)
    parser.add_argument('--text_len', type=int, default=60)
    parser.add_argument('--audio_len', type=int, default=61)
    parser.add_argument('--video_len', type=int, default=60)

    

    # Training
    parser.add_argument('--output', type=str, default='ckpt/')
    parser.add_argument('--name', type=str, default='exp_cofeature/')
    parser.add_argument('--batch_size', type=int, default=256)
    parser.add_argument('--max_epoch', type=int, default=99)
    parser.add_argument('--opt', type=str, default="Adam")
    parser.add_argument('--opt_params', type=str, default="{'betas': '(0.9, 0.98)', 'eps': '1e-9'}")
    parser.add_argument('--lr_base', type=float, default=0.0001)
    parser.add_argument('--lr_decay', type=float, default=0.5)
    parser.add_argument('--lr_decay_times', type=int, default=3)
    parser.add_argument('--warmup_epoch', type=float, default=0)
    parser.add_argument('--grad_norm_clip', type=float, default=-1)
    parser.add_argument('--eval_start', type=int, default=0)
    parser.add_argument('--early_stop', type=int, default=3)
    parser.add_argument('--seed', type=int, default=random.randint(0, 9999999))
    parser.add_argument('--sigma', type=float, default=1.0)
    parser.add_argument('--ans_size', type=float, default=3)
    parser.add_argument('--pred_func',type=str, default="amax")
    # Dataset and task
    parser.add_argument('--root_dir',type=str,default="/mnt/ssd/Datasets_/")
    args = parser.parse_args()
    return args


def set_seed(seed):
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)  
    np.random.seed(seed)  
    torch.backends.cudnn.benchmark = False
    torch.backends.cudnn.deterministic = True 





def top_pred(x,y,concat):
    new_array = []
    for i,j,o in zip(x,y,concat):
        if np.max(i)>np.max(j) and np.max(i)>np.max(o):
            new_array.append(list(i))
        elif np.max(j)>np.max(i) and np.max(j)>np.max(o):
            new_array.append(list(j))
        else:
            new_array.append(list(o))

    return np.array(new_array)





def evaluate_single(net, eval_loader, args,device,select_modal):
    """
    select_modal: 0:img  1:text 3:concat
    """
    accuracy = []
    net.train(False)
    preds = []
    for step, (
            x,
            y,
            z,
            ans,
            xyz_mask
    ) in enumerate(eval_loader):
        x = x.to(device)
        y = y.to(device)
        z = z.to(device)
        ans = ans.to(device)
        xyz_mask = xyz_mask.to(device)


        concat_pred = net(x,y,z,xyz_mask)

        
        concat_pred = concat_pred.cpu().data.numpy()

        # fin_pred = top_pred(x_pred,y_pred,concat_pred)
        
        ans = ans.cpu().data.numpy()
        accuracy += list(np.argmax(concat_pred, axis=1) == ans)
            
            
        # Save preds
        # for p in pred:
        #     preds.append = p

    net.train(True)
    return 100*np.mean(np.array(accuracy)) #, preds




def evaluate_single_zero_padding(net, eval_loader, args,device,select_padding):
    """
    select_padding: str: 100,010,001,110,011,101 

    """
    accuracy = []
    net.train(False)
    preds = []
    for step, (
            x,
            y,
            z,
            ans,
            xyz_mask
    ) in enumerate(eval_loader):
        x = x.to(device)
        y = y.to(device)
        z = z.to(device)
        ans = ans.to(device)
        xyz_mask = xyz_mask.to(device)


        
            
        if select_padding =="100":
            x = torch.zeros_like(x).to(device) 
        if select_padding =="010":
            y = torch.zeros_like(y).to(device)
        if select_padding =="001":
            z = torch.zeros_like(z).to(device)
        if select_padding =="110":
            x = torch.zeros_like(x).to(device) 
            y = torch.zeros_like(y).to(device) 
        if select_padding =="101":
            x = torch.zeros_like(x).to(device) 
            z = torch.zeros_like(z).to(device) 
        if select_padding =="011":
            y = torch.zeros_like(y).to(device) 
            z = torch.zeros_like(z).to(device) 
        
        
        

        

        concat_pred = net(x,y,z,xyz_mask)

        
        concat_pred = concat_pred.cpu().data.numpy()

        # fin_pred = top_pred(x_pred,y_pred,concat_pred)
        
        ans = ans.cpu().data.numpy()
        accuracy += list(np.argmax(concat_pred, axis=1) == ans)
            
        # Save preds
        # for p in pred:
        #     preds.append = p

    net.train(True)
    return 100*np.mean(np.array(accuracy)) #, preds








def forward_hook(module,data_input,data_output):
    """
    hook function for lambda 
    """
    data_ = data_output.cpu().data.numpy() # batch,2
    print(data_.shape)

    # lambda control how much the cofeature add to module  
    x_lambda = data_[:,0]
    y_lambda = data_[:,1]

    # print(torch.sigmoid(x_lambda))
    # print(torch.sigmoid(y_lambda))
    print(x_lambda>y_lambda)
    print(x_lambda)
    print(y_lambda)
    print("End.....................")


class Save_lambda:
    """
    save lambda output
    """
    def __init__(self):
        self.output = []

    def __call__(self,module,data_input,data_output):
        self.output.append(data_output.cpu().data.numpy())
    
    def save(self,layer_num):
        np.save("/mnt/ssd/Datasets_/t4sa/data_pro/analysis/lambda{}.npy".format(layer_num),self.output)



def run(num_train):

   

    print("loading dataset ....")
    args = parse_args()



    # normal dataset
    train_dataset = Mosei_Dataset_ATV_Early_Fusion('train',None)  
    val_dataset =Mosei_Dataset_ATV_Early_Fusion('valid',train_dataset.token_to_ix)
    test_dataset =Mosei_Dataset_ATV_Early_Fusion('test',train_dataset.token_to_ix)

    # score dataset 
    # select_padding: 0:audio, 1:text 
    # select_class:   0:in_class, 1:out_class 
    device = torch.device('cuda:1')
    
    # select_padding = 1
    # select_class = 0
    # train_dataset = Mosei_Dataset_ATV_Score('train',None,select_padding,select_class)  
    # val_dataset =Mosei_Dataset_ATV_Score('valid',train_dataset.token_to_ix,select_padding,select_class)
    # test_dataset =Mosei_Dataset_ATV_Score('test',train_dataset.token_to_ix,select_padding,select_class)
    
 
    print("val_data_len:",val_dataset.__len__())
    print("test_data_len:",test_dataset.__len__()) 
    
    val_data_iter = DataLoader(val_dataset,batch_size=args.batch_size,shuffle=False,num_workers=8)
    test_data_iter = DataLoader(test_dataset,batch_size=args.batch_size,shuffle=False,num_workers=8)
    
    
    

    # normal dataset
    # net = Multi_Fusion_Model_Decent_CoFeature_MOSEI(args, train_dataset.vocab_size, train_dataset.pretrained_emb).to(device)

    # score dataset
    net = Mosei_Early(args, train_dataset.vocab_size, train_dataset.pretrained_emb).to(device)


    net.load_state_dict(torch.load("ckpt/mosei_three_early/best80.33137359700694_8961716.pkl")['state_dict'])
   

    # acc 
    acc = evaluate_single(net,test_data_iter,args,device,0)
    print(acc)

    # acc with padding 
    acc_list = []
    padding_list =["100","010","001","110","101","011"]
    for i in padding_list:
        acc = evaluate_single_zero_padding(net, test_data_iter, args,device,i)
        acc_list.append(acc)
    print("padding acc...")
    print(acc_list)    
   

    # acc average
    # padding_list =["100","010","001","110","101","011"]
    # all_acc = []
    # for i in padding_list:
    #     print(i)
    #     acc_list = []
    #     for i in range(10):
    #         seed = np.random.choice([i for i in range(1000)],1)
    #         np.random.seed(seed)
    #         acc = evaluate_single(net,test_data_iter,args,device,i)
    #         acc_list.append(round(acc,2))
    #     all_acc.append(acc_list)
    
    # for i in all_acc:
    #     print(i)
    # print(np.mean(i))
    # print(np.std(i))


    





    # print(net.enc_list[0].softgate)
    # print(net.enc_list[0].softgate.layer[3])
    # train_single(net, train_data_iter, val_data_iter, args,device,datasize)
    #sl_0 = Save_lambda()
    
    # net.enc_list[0].softgate.layer[3].register_forward_hook(forward_hook)
    # net.enc_list[1].softgate.layer[3].register_forward_hook(forward_hook)
    # net.enc_list[2].softgate.layer[3].register_forward_hook(forward_hook)
    # net.enc_list[3].softgate.layer[3].register_forward_hook(forward_hook)
    
    

    # for step, (
    #         x,
    #         y,
    #         y_mask,
    #         xy_mask,
    #         ans
    # ) in tqdm(enumerate(val_data_iter)):
    #     x = x.to(device)
    #     y = y.to(device)
    #     y_mask = y_mask.to(device)
    #     xy_mask = xy_mask.to(device)

    #     select_padding = 1
    #     if select_padding ==0:
    #         x = torch.ones_like(x).to(device) 
    #         #xy_mask[:,:,1:65] = True
    #     else:
    #         y = torch.zeros_like(y).to(device)



    #     x_pred,y_pred,concat_pred = net(x,y,y_mask,xy_mask)
    #     break 

    # sl_0.save(3)
        # x_pred = x_pred.cpu().data.numpy()
        # y_pred = y_pred.cpu().data.numpy()
        # concat_pred = concat_pred.cpu().data.numpy()

    
    # acc_img = evaluate_single(net, val_data_iter, args,device,0)
    # acc_text = evaluate_single(net, val_data_iter, args,device,1)
    # acc_concat = evaluate_single(net, val_data_iter, args,device,2)
    # print(acc_img,acc_text,acc_concat)


def run_score(num_train):

   

    print("loading dataset ....")
    args = parse_args()



    # normal dataset
    # train_dataset = Mosei_Dataset_ATV('train',None)  
    # val_dataset =Mosei_Dataset_ATV('valid',train_dataset.token_to_ix)
    # test_dataset =Mosei_Dataset_ATV('test',train_dataset.token_to_ix)

    # score dataset 
    # select_padding: 0:audio, 1:text 
    # select_class:   0:in_class, 1:out_class 
    device = torch.device('cuda:0')
    

    all_acc = []
    padding_list =["100","010","001","110","101","011"]
    for i in padding_list: 
        print(i)
        select_padding = i
        select_class = 1



        train_dataset = Mosei_Dataset_ATV_Early_Fusion_Score('train',None,select_padding,select_class)  
        val_dataset =Mosei_Dataset_ATV_Early_Fusion_Score('valid',train_dataset.token_to_ix,select_padding,select_class)
        test_dataset =Mosei_Dataset_ATV_Early_Fusion_Score('test',train_dataset.token_to_ix,select_padding,select_class)
        

        print("val_data_len:",val_dataset.__len__())
        print("test_data_len:",test_dataset.__len__()) 
        
        val_data_iter = DataLoader(val_dataset,batch_size=args.batch_size,shuffle=False,num_workers=8)
        test_data_iter = DataLoader(test_dataset,batch_size=args.batch_size,shuffle=False,num_workers=8)
        

        net = Mosei_Early(args, train_dataset.vocab_size, train_dataset.pretrained_emb).to(device)
        net.load_state_dict(torch.load("ckpt/mosei_three_early/best80.33137359700694_8961716.pkl")['state_dict'])


        acc_list = []
        for i in range(10):
            seed = np.random.choice([i for i in range(1000)],1)
            np.random.seed(seed)
            acc = evaluate_single(net,test_data_iter,args,device,i)
            acc_list.append(round(acc,2))
        all_acc.append(acc_list)
    for i in all_acc:
        print(i)
        print(np.mean(i))
        print(np.std(i))
    

if __name__ == "__main__":
    #run_score(1)
    run(1)


