from asyncore import write
import os

from argparse import ArgumentParser
from pydoc import describe
import sys
import time
import torch
import torch.nn
from tqdm import tqdm

from utils import evaluate, get_dataset, FFDataset, setup_logger
from trainer import Trainer
import numpy as np
import random
import torch
# import logging

def get_writer(log_dir):
    from torch.utils.tensorboard import SummaryWriter
    return SummaryWriter(log_dir)

def parse_args():
    parser = ArgumentParser(description='F3 net')
    
    parser.add_argument('--dataset_path', 
                        help='train data path',
                        default='dataset/')
    parser.add_argument('--model_dir',
                         help='The path of save model ',
                        default='weight_save/')
    parser.add_argument('--batch_size', 
                        help='batch size',
                        default=12, type=int)
    parser.add_argument('--gpu_ids', 
                        help='The GPU ids for training',
                        default='0,1', type=str)
    parser.add_argument('--max_epoch', 
                        help='the epochs for training', 
                        default=5, type=int)
    parser.add_argument('--loss_freq', 
                        help='The steps for print loss', 
                        default=40, type=int)
    parser.add_argument('--mode', 
                        help='mode', 
                        default='Both')
    parser.add_argument('--ckpt_dir', default='./data/F3Net')
    parser.add_argument('--ckpt_name', default='FAD4_bz128')
    parser.add_argument('--log_dir', 
                        help='the dir for log output', 
                        default='./log')
    parser.add_argument('--resume', 
                        help='The path for load weight of trained F3Net model, for testing or ...',
                        default='')
    return parser.parse_args()
    

# config
# dataset_path = 'dataset/'
# model_dir = 'weight_save/'
# pretrained_path = 'pre_train_weight/xception-b5690688.pth'
# pretrained_path = f"{model_dir}/414484result.pth"
# batch_size = 12
# max_epoch = 5
# loss_freq = 40
# mode = 'Both' # ['Original', 'FAD', 'LFS', 'Both', 'Mix']
# ckpt_dir = './data/F3Net'
# ckpt_name = 'FAD4_bz128'


if __name__ == '__main__':
    args = parse_args()
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_ids
    osenvs = len(args.gpu_ids.split(','))
    gpu_ids = [*range(osenvs)]
    writer = get_writer(args.log_dir)
    dataset = FFDataset(dataset_root=os.path.join(args.dataset_path, 'train', 'real'), size=299, frame_num=300, augment=True)
    dataloader_real = torch.utils.data.DataLoader(
        dataset=dataset,
        batch_size=args.batch_size // 2,
        shuffle=True,
        num_workers=8)
    
    len_dataloader = dataloader_real.__len__()
    print("len_dataloader={}".format(len_dataloader))

    dataset_real = FFDataset(dataset_root=os.path.join(args.dataset_path, 'valid', 'real'), size=299, frame_num=50, augment=False)
    dataset_fake, total_len =  get_dataset(name='valid', size=299, root=args.dataset_path, frame_num=50, augment=False)
    dataset_img = torch.utils.data.ConcatDataset([dataset_real, dataset_fake])
    dataloader_fake = torch.utils.data.DataLoader(
        dataset=dataset_img,
        batch_size=args.batch_size // 2,
        shuffle=True,
        num_workers=8
    )
    len_dataloader_ = dataloader_fake.__len__()
    # logging.info("len_dataloader_fake={}".format(len_dataloader_))
    print("len_dataloader_fake={}".format(len_dataloader_))

    # init checkpoint and logger
    ckpt_path = os.path.join(args.ckpt_dir, args.ckpt_name)
    logger = setup_logger(ckpt_path, 'result.log', 'logger')
    best_val = 0.
    ckpt_model_name = 'best.pkl'
    
    # train
    model = Trainer(gpu_ids, args.mode, args.resume)
    model.total_steps = 0
    epoch = 0
    
    while epoch < args.max_epoch:
        # logging.info("this is {} epoch".format(epoch))
        fake_iter = iter(dataloader_fake)
        real_iter = iter(dataloader_real)
        
        logger.debug(f'No {epoch}')
        i = 0
        bar_process = tqdm(range(len_dataloader))
        while i < len_dataloader:
            
            i += 1
            model.total_steps += 1

            try:
                data_real = real_iter.next()
                data_fake = fake_iter.next()
            except StopIteration:
                break
            # -------------------------------------------------
            
            if data_real.shape[0] != data_fake.shape[0]:
                continue

            bz = data_real.shape[0]
            
            data = torch.cat([data_real,data_fake],dim=0)
            label = torch.cat([torch.zeros(bz).unsqueeze(dim=0),torch.ones(bz).unsqueeze(dim=0)],dim=1).squeeze(dim=0)

            # manually shuffle
            idx = list(range(data.shape[0]))
            random.shuffle(idx)
            data = data[idx]
            label = label[idx]

            data = data.detach()
            label = label.detach()

            model.set_input(data,label)
            loss = model.optimize_weight()

            if model.total_steps % args.loss_freq == 0:
                logger.debug(f'loss: {loss} at step: {model.total_steps}')
            writer.add_scalar('loss', loss.item(), model.total_steps)
            bar_process.set_description(f'loss: {loss} at step: {model.total_steps}')

            if i % int(len_dataloader / 10) == 0:
                model.model.eval()
                # auc, r_acc, f_acc = evaluate(model, args.dataset_path, mode='valid', writer=writer, steps=epoch * len_dataloader + i)
                auc, r_acc, f_acc = evaluate(model, args.dataset_path)
                logger.debug(f'(Val @ epoch {epoch}) auc: {auc}, r_acc: {r_acc}, f_acc:{f_acc}')
                writer.add_scalar('auc', auc, epoch * len_dataloader + i)
                writer.add_scalar('r_acc', r_acc, epoch * len_dataloader + i)
                writer.add_scalar('f_acc', f_acc, epoch * len_dataloader + i)
                
                # auc, r_acc, f_acc = evaluate(model, dataset_path, mode='test')
                # logger.debug(f'(Test @ epoch {epoch}) auc: {auc}, r_acc: {r_acc}, f_acc:{f_acc}')
                model.model.train()
                # logging.info("save weight!")
                model.save(args.model_dir + str(epoch) + str(i) + "result.pth")
            bar_process.update(1)
            

        # torch.save(model, str(epoch) + "result.pth")
        epoch = epoch + 1
        bar_process.close()

    # torch.save(model, "result.pth")
    print("开始test.........")
    model.model.eval()
    #auc, r_acc, f_acc = evaluate(model, args.dataset_path, mode='test', writer=writer, steps=args.max_epoch * len_dataloader)
    auc, r_acc, f_acc = evaluate(model, args.dataset_path, mode='test')
    logger.debug(f'(Test @ epoch {epoch}) auc: {auc}, r_acc: {r_acc}, f_acc:{f_acc}')
