import torch
import torch.nn as nn
from opt import parse_opt
from util.dataloader import SimDataset
from torch.utils.data import DataLoader
from model.vggnet import vgg_pool7
from model.biLstm import RNNEncoder,PhraseAttention
from model.nmn import NMN
import numpy as np
import os.path as osp
from util import text_processing


def main():
    opt = vars(parse_opt())
    device = torch.device("cuda:%d"% opt['gpuid'] if torch.cuda.is_available() else "cpu")

    val_set = SimDataset(opt)
    dataloaders = {
        'val': DataLoader(val_set, batch_size=opt['batch_size'], shuffle=True, num_workers=0)
    }
    # input_data = next(iter(dataloaders['val']))
    # imcrop, spatial, text_seq, label = input_data['imcrop'], input_data['spatial'], input_data['text_seq'], \
    #                                    input_data['label']
    # sp_dim = spatial.size(2)


    model = NMN(opt,512,5,opt['word_vec_size'])
    model.to(device)
    crit = nn.CrossEntropyLoss().to(device)


    # crit = nn.LogSoftmax() #score, label []
    checkpointpath_ = osp.join(opt['this_dir'], 'checkpoints', '%s%d%s' % ('iter_', 2000, '.pth'))
    val(opt,device,dataloaders['val'],model,crit,checkpointpath_)  # ,model=,crit=,loss_fn=)


def val(opt,device,dataloaders,model, loss_fn,checkpoint):
    if torch.cuda.is_available():
        model.load_state_dict(torch.load(checkpoint))
    else: model.load_state_dict(torch.load(checkpoint,map_location='cpu' ))
    model.eval()
    iters,train_len =0,len(dataloaders)
    score_list = []
    label_list = []
    loss_sum =0
    with torch.no_grad():
        while True:
            iters +=1
            input_data = next(iter(dataloaders))
        #for ii in range(opt['n_iter']):
            imcrop, spatial, text_seq, label = input_data['imcrop'], input_data['spatial'], input_data['text_seq'], \
                                               input_data['label']
            '''
            imcrop torch.Size([12, 25, 3, 224, 224])
            spatial torch.Size([12, 25, 5])
            text_seq 8 , (batch,seq_len,1)
            label torch.Size([12, 1])
            raw_im torch.Size([12, 50, 50, 3])
            raw_sent 8
            '''
            imcrop, spatial, text_seq,label = imcrop.to(device),spatial.to(device),text_seq.to(device),label.to(device)
            score,attn_phrase = model(imcrop,spatial,text_seq)
            argmax = score.argmax(dim=1)
            attn_topk_idx = attn_phrase.size(1)-attn_phrase.topk(k=5,dim=1).indices#[s.topk(10) for s in attn_phrase]
            #print(argmax.tolist(),'//',label.tolist())
            score_list.append(argmax.tolist())
            label_list.append(label.squeeze_(1).tolist())
            #print(np.sum(np.array() == np.array()) / len(argmax.tolist()))
            loss = loss_fn(score,label.long())
            loss_sum += loss.item()
            # if iters % opt['print_step'] == 0 :
            #     print(iters, '=', loss.item())
            #     if iters % opt['print_step']*10 ==0:
            #         pass
            if iters%1 ==0:
                print(iters,':',loss_sum/iters)
                print(input_data['raw_sent'] ,attn_topk_idx.data)
            if opt['n_iter']<iters:
                print('loss:',loss_sum/iters)
                a = np.array(score_list).reshape(1,-1)
                b = np.array(label_list).reshape(1,-1)
                print(a)
                print(b)
                print(np.sum(a==b)/(iters*opt['batch_size']))
                # checkpointpath_ = osp.join(opt['this_dir'], 'checkpoints','%s%d%s'% ('iter_',opt['n_iter'],'.pth'))
                # torch.save(model.state_dict(), checkpointpath_)
                # print('model saved to %s' % checkpointpath_)
                break



if __name__ == '__main__':
    main()
