import importlib
import model
import torch.optim as optim
import time
import sys
import yaml
from easydict import EasyDict as edict
from torch.utils.tensorboard import SummaryWriter
from tool import *


def test_target_training(test_net, test_loader, device, flag):
    test_net.eval()

    error_yaw = 0
    error_pitch = 0

    accs = 0
    c_count = 0

    data_target = iter(test_loader)
    for i, d_tar in enumerate(data_target):
        data_tar, label_tar = d_tar

        data_tar['face'] = data_tar['face'].to(device)
        predict = test_net(data_tar['face'])

        c_count += 1
        t_accs = 0
        for i in range(predict.shape[0]):
            pre_gaze = np.asarray([predict.detach().cpu()[i,0], predict.detach().cpu()[i,1]])

            tar_gaze = np.asarray([label_tar['gaze_yaw_pitch'].detach().cpu()[i, 0],
                                   label_tar['gaze_yaw_pitch'].detach().cpu()[i, 1]])

            t_accs += angular(gazeto3d(pre_gaze), gazeto3d(tar_gaze))

        t_accs = t_accs / predict.shape[0]
        accs += t_accs

        error_yaw += torch.sum(torch.abs((np.rad2deg(label_tar['gaze_yaw_pitch'][:, 0]))- np.rad2deg(predict.detach().cpu()[:,0]))) / \
                     label_tar['gaze_yaw_pitch'].shape[0]

        error_pitch += torch.sum(torch.abs((np.rad2deg(label_tar['gaze_yaw_pitch'][:, 1])) - np.rad2deg(predict.detach().cpu()[:,1]))) / \
                       label_tar['gaze_yaw_pitch'].shape[0]

    test_log = f"******************    {flag} Datasets Test   *****************\n" \
               f"******************  | angular error:{accs / c_count:.5f} | error_yaw:{error_yaw / c_count:.5f} | error_pitch:{error_pitch / c_count:.5f}"
    torch.cuda.empty_cache()
    outfile.write(test_log + "\n")
    sys.stdout.flush()
    outfile.flush()

    print(test_log)

    return accs / c_count


def setup_seed(seed):
    torch.manual_seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)
    random.seed(seed)


if __name__ == "__main__":
    torch.cuda.empty_cache()
    setup_seed(2025)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    writer = SummaryWriter(log_dir='./logs')

    config = yaml.load(open(sys.argv[1]), Loader=yaml.FullLoader)
    config = edict(config)
    readername = config.reader
    batch_size = config.params.batch_size

    reader = importlib.import_module(readername)

    # modelname = config["save"]["model_name"]

    print("Read data")
    if os.path.isdir(config.data_360.label_path):
        label_360 = os.listdir(config.data_360.label_path)
        label_360.sort()
        print(f"360:{label_360}")
        label_360_path = [os.path.join(config.data_360.label_path, j) for j in label_360]
        config.data_360.label_path = label_360_path

    if os.path.isdir(config.data_mpii.label_path):
        mpii_label = os.listdir(config.data_mpii.label_path)
        mpii_label.sort()
        print(f"mpii:{mpii_label}")
        mpii_label_path = [os.path.join(config.data_mpii.label_path, j) for j in mpii_label]
        config.data_mpii.label_path = mpii_label_path

    if os.path.isdir(config.data_diap.label_path):
        diap_label = os.listdir(config.data_diap.label_path)
        diap_label.sort()
        print(f"diap:{diap_label}")
        diap_label_path = [os.path.join(config.data_diap.label_path, j) for j in diap_label]
        config.data_diap.label_path = diap_label_path

    dataset_360 = reader.txtload(config.data_360, batch_size, shuffle=True, num_workers=0)
    dataset_eth = reader.txtload(config.data_eth, batch_size, shuffle=True, num_workers=0)
    dataset_target_mpii = reader.txtload(config.data_mpii, batch_size, shuffle=True, num_workers=0)
    dataset_target_diap = reader.txtload(config.data_diap, batch_size, shuffle=True, num_workers=0)

    savepath = os.path.join(config.save.save_path, f"checkpoint/{config.data_360.name}")
    if not os.path.exists(savepath):
        os.makedirs(savepath)
    print("Model building")
    net = model.model()
    net.to(device)

    print("optimizer building")
    loss_l1 = getattr(nn, "L1Loss")().cuda()

    base_lr = config.params.lr
    decaysteps = config.params.decay_step
    gamma = config.params.gamma

    optimizer = optim.Adam(net.parameters(), lr=base_lr, betas=(0.9, 0.95))
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=decaysteps, gamma=gamma)

    print("Training")
    length = len(dataset_eth)
    total = length * (config.params.epoch)
    cur = 0
    timebegin = time.time()
    count = 1

    with open(os.path.join(savepath, "train_log"), 'w') as outfile:
        for epoch in range(1, config.params.epoch + 1):
            if epoch % 1 == 0 or epoch == 1:
                with torch.no_grad():
                    print("test")
                    # error_source = test_target_training(net, dataset_target_few, device, 'Target')
                    error_mpii = test_target_training(net, dataset_target_mpii, device, 'Target_mpii')
                    error_diap = test_target_training(net, dataset_target_diap, device, 'Target_diap')

            net.train()
            for i, d_sou in enumerate(dataset_eth):
                data, label = d_sou
                if data["face"].shape[0] != batch_size:
                    continue

                input_s = data['face'].to(device)

                label['gaze_yaw_pitch'] = label['gaze_yaw_pitch'].to(device)
                predict = net(input_s)

                count += 1

                loss = loss_l1(predict, label['gaze_yaw_pitch'])

                writer.add_scalar("Loss/gaze_loss", loss, count)

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                scheduler.step()

                cur += 1

                if i % 1000 == 0:
                    lr_G = optimizer.state_dict()['param_groups'][0]['lr']
                    timeend = time.time()
                    resttime = (timeend - timebegin) / cur * (total - cur) / 3600

                    log = f"-----------[{epoch}/{config['params']['epoch']}]: [{i}/{length}]--------------\n" \
                          f"| Gaze Loss:{loss:.8f} | lr:{lr_G}  |  rest time:{resttime:.2f}h \n"

                    print(log)
                    outfile.write(log + "\n")
                    sys.stdout.flush()
                    outfile.flush()
            if epoch % config["save"]["step"] == 0:
                # state_save = {'net': net.module.state_dict(),
                #               'optimizer': optimizer.state_dict(), 'epoch': epoch}
                # torch.save(state_save, os.path.join(savepath, f"Iter_{epoch}_{modelname}.pt"))
                torch.save(net, os.path.join(savepath, f"Iter_{epoch}.pt"))
    torch.cuda.empty_cache()
