# -*- coding: utf-8 -*-
import os
import argparse
import numpy as np
import scipy.io as sio
import pickle
import random
import logging

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim
from torch.autograd import Variable

from configs import Config
from model import get_model, get_encoder, get_decoder
from dataset import get_train_data, get_eval_data
from optim import get_opt_schedule, set_lr
from Loss import get_loss
from utils import set_log, set_seed
from NCE.NCEAverage import NCEAverage
from NCE.NCECriterion import NCESoftmaxLoss
from eval import evaluate, evaluate_mAP

def parse_args():
    parser = argparse.ArgumentParser(description='ssvh')
    parser.add_argument('--config', default='configs/conmh_fcv.py', type = str,
        help='config file path'
    )
    parser.add_argument('--gpu_id', default = 0, type = str,
        help = 'specify gpu device'
    )

    args = parser.parse_args()
    return args


def main():
    args = parse_args()
    cfg = Config.fromfile(args.config)

    if cfg.is_vis:
        from utils.visualize import Visualizer
        vis = Visualizer(cfg.env)

    if not os.path.exists(cfg.file_path):
        os.makedirs(cfg.file_path)
    
    # set logging
    logger = set_log(cfg, 'log.txt')
    logger.info('Self Supervised Video Hashing Training: {}'.format(cfg.model_name))

    # set seed
    set_seed(cfg)
    logger.info('set seed: {}'.format(cfg.seed))

    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True

    # hyper parameter
    # os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    if cfg.is_use_gpu and torch.cuda.is_available( ):
        device = torch.device(f'cuda:{args.gpu_id}')
        cfg.gpu_id = args.gpu_id
    else:
        device = torch.device('cpu')
    logger.info('used gpu: {}'.format(args.gpu_id))


    logger.info('PARAMETER ......')
    logger.info(cfg)

    logger.info('loading model ......') 
    # model = get_model(cfg).to(device)

    model_encoder = get_encoder(cfg).to(device)
    model_decoder = get_decoder(cfg).to(device)

    if torch.cuda.device_count( ) > 1 and cfg.is_use_parallel:
        model_decoder = nn.DataParallel(model_decoder)
        model_encoder = nn.DataParallel(model_encoder)
        # pass

    logger.info('loading train data ......')
    train_loader = get_train_data(cfg)
    eval_loader = get_eval_data(cfg)
    total_len = len(train_loader)

    epoch = 0

    # optimizer and schedule
    opt_encoder_schedule = get_opt_schedule(cfg, model_encoder)
    opt_decoder_schedule = get_opt_schedule(cfg, model_decoder)

    contrast = NCEAverage(cfg.nbits, cfg.train_num_sample, cfg.K, cfg.T, cfg.momentum, cfg.is_use_softmax, device)
    criterion_contrast = NCESoftmaxLoss()

    if cfg.use_checkpoint is not None:
        checkpoint = torch.load(cfg.use_checkpoint)
        model.load_state_dict(checkpoint['model_state_dict'])
        opt_schedule._optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        epoch = checkpoint['epoch'] + 1
        opt_schedule._schedule.last_epoch = checkpoint['epoch']
    

    while True:
        if cfg.dataset == 'fcv':
           if epoch % 5 == 0 and epoch > 0:
            # if epoch % 1 == 0 and epoch > 0:
            #     evaluate(cfg, eval_loader, model_encoder, cfg.test_num_sample, logger)  # 模型训练,增加eval_loader 参数（我），20241211
                evaluate_mAP(cfg, eval_loader, model_encoder, cfg.test_num_sample, logger, vis)
        elif cfg.dataset == 'activitynet':
            if epoch % 20 == 0:
                evaluate(cfg, model, cfg.test_num_sample, logger)
        elif cfg.dataset == 'yfcc':
            if epoch == 40:
                evaluate(cfg, model, cfg.test_num_sample, logger)

        logger.info('begin training stage: [{}/{}]'.format(epoch+1, cfg.num_epochs))  
        model_encoder.train()

        for i, data in enumerate(train_loader, start=1):
            data = {key: value.to(device, non_blocking=True) for key, value in data.items( )}
            batch_size = data["visual_word"].size(0)
            if i % cfg.d_every == 0:

                opt_decoder_schedule.zero_grad( )
                opt_encoder_schedule.zero_grad( )
                bool_masked_pos = data["mask"].flatten(1).to(torch.bool)
                # bool_masked_pos = torch.zeros(data['mask'].shape).to(torch.bool)
                hash_code = model_encoder.forward(data["visual_word"], bool_masked_pos)

                frame = model_decoder.forward(hash_code, bool_masked_pos)

                # frame_2, hash_code_2 = model.forward(data["visual_word"], bool_masked_pos_2)

                # hash_code = torch.mean(hash_code, 1)

                labels = data["visual_word"][bool_masked_pos].reshape(batch_size, -1, cfg.feature_size)

                recon_loss = F.mse_loss(frame, labels)

                if i % 50 == 0:
                    logger.info('Epoch:[%d/%d] Step:[%d/%d] reconstruction_loss: %.2f ' \
                                % (epoch + 1, cfg.num_epochs, i, total_len, \
                                   recon_loss.data.cpu( ).numpy( ),))

                    vis.plot('deconder_recon_loss', recon_loss.data.cpu( ).numpy( ))

                recon_loss.backward( )
                opt_decoder_schedule._optimizer_step( )
                opt_encoder_schedule._optimizer_step( )

            if i % cfg.e_every == 0:
                opt_encoder_schedule.zero_grad()
                opt_decoder_schedule.zero_grad()
                # data = {key: value.to(device, non_blocking=True) for key, value in data.items( )}

                # bool_masked_pos = data["mask"].flatten(1).to(torch.bool)
                bool_masked_pos = torch.zeros(data['mask'].shape).to(torch.bool)
                hash_code = model_encoder.forward(data["visual_word"], bool_masked_pos)
                frame = model_decoder.forward(hash_code, bool_masked_pos)

                hash_code = torch.mean(hash_code, 1)
                hash_code_norm = torch.div(hash_code, hash_code.norm(dim=1,keepdim=True)) # (我),20241215
                recon_loss = F.mse_loss(frame, data["visual_word"])
                contra_out = contrast(hash_code_norm, data['index'], epoch=epoch-cfg.warmup_epoch)
                contra_loss = criterion_contrast(contra_out)
                loss =  recon_loss  + cfg.a * contra_loss

                if i % 50 == 0:

                    logger.info('Epoch:[%d/%d] Step:[%d/%d] reconstruction_loss: %.2f contra_loss: %.2f' \
                                % (epoch + 1, cfg.num_epochs, i, total_len, \
                                 recon_loss.data.cpu( ).numpy( ),contra_loss.data.cpu( ).numpy( ) ))
                    vis.plot('enconder_recon_loss', recon_loss.data.cpu( ).numpy( ))
                    vis.plot('enconder_contra_loss', contra_loss.data.cpu( ).numpy( ))
                loss.backward()
                # recon_loss.backward()
                opt_encoder_schedule._optimizer_step()
                # evaluate_mAP(cfg, eval_loader, model_encoder, cfg.test_num_sample, logger, vis)

        opt_decoder_schedule._schedule_step()
        opt_encoder_schedule._schedule_step()
        # logger.info('now the encoder learning rate is: {}'.format(opt_encoder_schedule.lr()))
        # logger.info('now the decoder learning rate is: {}'.format(opt_decoder_schedule.lr( )))
        # if epoch == cfg.num_epochs - 6:
        #     save_file = cfg.file_path + '/{}_{}.pth'.format(cfg.dataset, cfg.nbits)
        #     torch.save({
        #         'model_state_dict': model.state_dict()
        #     }, save_file)
        #
        # save_file = cfg.file_path + '/model.pth'
        # torch.save({
        #     'epoch': epoch,
        #     'model_state_dict': model.state_dict(),
        #     'optimizer_state_dict': opt_schedule._optimizer.state_dict()
        # }, save_file)
        
        epoch += 1
        if epoch >= cfg.num_epochs:
            break

if __name__ == '__main__':
    main()
