#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2025/8/19
# @USER    : Shengji He
# @File    : train.py
# @Software: PyCharm
# @Version  : Python-
# @TASK:
import torch
import torch.nn.functional as F

from dataset import get_data_loader
from util import set_random_seed
from network import VQGAN, Discriminator, Transformer


def main():
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    seed = 42

    train_dir = r'D:\heshengji\tools\dataset'
    pt_folder, pt_file4vqgan, pt_file4disc, pt_file4tfm = './', 'vqgan.pt', 'disc.pt', 'tfm.pt'
    train_ratio, valid_ratio, test_ratio, k = 0.8, 0.1, 0.1, 0
    lr = 5e-3
    patience = 80
    epoch_num = 1000
    in_dim = 784
    out_dim = 128
    batch_size = 128

    set_random_seed(seed)

    torch.backends.cudnn.deterministic = True

    train_loader, valid_loader = get_data_loader(train_dir, batch_size)

    vqgan = VQGAN().to(device)
    discriminator = Discriminator().to(device)

    opt_vq = torch.optim.Adam(vqgan.parameters(), lr=lr)
    opt_disc = torch.optim.Adam(discriminator.parameters(), lr=lr)

    best_score, score, time, first = 0, 0, 0, True
    for epoch in range(epoch_num):
        for step, (pic, label) in enumerate(train_loader):
            pic = pic.to(device)
            decoded_pic, _, q_loss = vqgan(pic)
            disc_real = discriminator(pic)
            disc_fake = discriminator(decoded_pic)
            # disc_factor, when to start the discriminator.
            d_loss_real = torch.mean(F.relu(1.0 - disc_real))
            d_loss_fake = torch.mean(F.relu(1.0 + disc_fake))
            # why does discriminator start later?
            disc_factor = vqgan.adopt_weight(1.0, epoch * len(train_loader) + step, threshold=0)
            gan_loss = disc_factor * 0.5 * (d_loss_real + d_loss_fake)
            # the loss of generator,
            # For Generator, care pseudo example, the harder it is to discriminate, the better.
            recon_loss = vqgan.mse_loss(pic, decoded_pic)
            λ = vqgan.calculate_lambda(recon_loss, -d_loss_fake)
            vq_loss = recon_loss + q_loss + 1.0 * λ * -d_loss_fake
            # update discriminator
            opt_disc.zero_grad()
            gan_loss.backward(retain_graph=True)
            # update generator
            opt_vq.zero_grad()
            vq_loss.backward()
            opt_disc.step()
            opt_vq.step()
            if step % 100 == 0:
                print(f'epoch: {epoch + 1}, step: {step + 1}, vq_loss: {vq_loss}, gan_loss:{gan_loss}')
        vqgan.eval()
        discriminator.eval()
        val_loss = 0
        for step, (pic, label) in enumerate(valid_loader):
            pic = pic.to(device)
            decoded_pic, _, q_loss = vqgan(pic)
            disc_real = discriminator(pic)
            disc_fake = discriminator(decoded_pic)
            gan_loss = 0.5 * (d_loss_real + d_loss_fake)
            recon_loss = vqgan.mse_loss(pic, decoded_pic)
            d_loss_real = torch.mean(F.relu(1.0 - disc_real))
            d_loss_fake = torch.mean(F.relu(1.0 + disc_fake))
            λ = vqgan.calculate_lambda(recon_loss, -d_loss_fake)
            vq_loss = recon_loss + q_loss + 1.0 * λ * -d_loss_fake
            val_loss += (vq_loss.item() + gan_loss.item())
        val_loss = val_loss / len(valid_loader)
        # early stopping
        if first or val_loss < best_score - 1e-4:
            first = False
            best_score = val_loss
            time = 0
            torch.save(vqgan.state_dict(), f'{pt_folder}//{pt_file4vqgan}')
            torch.save(discriminator.state_dict(), f'{pt_folder}//{pt_file4disc}')
        elif time >= patience:
            # vqgan = torch.load(f'{pt_folder}//{pt_file4vqgan}')
            # discriminator = torch.load(f'{pt_folder}//{pt_file4disc}')
            break
        else:
            time += 1
        print(
            f'epoch: {epoch + 1}, early stopping time/full time: {time}/{patience}, valid loss: {val_loss * 1.0 / len(valid_loader)}')
    pass


def main_decoder():
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    seed = 42

    train_dir = r'D:\heshengji\tools\dataset'
    pt_folder, pt_file4vqgan, pt_file4disc, pt_file4tfm = './', 'vqgan.pt', 'disc.pt', 'tfm.pt'
    train_ratio, valid_ratio, test_ratio, k = 0.8, 0.1, 0.1, 0
    lr = 5e-3
    patience = 80
    epoch_num = 1000
    in_dim = 784
    out_dim = 128
    batch_size = 128

    set_random_seed(seed)

    torch.backends.cudnn.deterministic = True

    train_loader, valid_loader = get_data_loader(train_dir, batch_size)

    vqgan = VQGAN().to(device)
    vqgan.load_state_dict(torch.load(f'{pt_folder}//{pt_file4vqgan}', map_location=device, weights_only=False))

    tfm = Transformer(vqgan).to(device)
    opt_tf = torch.optim.Adam(tfm.parameters(), lr=lr)

    best_score, score, time, first = 0, 0, 0, True
    for epoch in range(epoch_num):
        for step, (pic, label) in enumerate(train_loader):
            pic = pic.to(device)
            logits, targets = tfm(pic)
            loss = F.cross_entropy(logits.reshape(-1, logits.size(-1)), targets.reshape(-1))
            opt_tf.zero_grad()
            loss.backward()
            opt_tf.step()
            if step % 100 == 0:
                print(f'epoch: {epoch + 1}, step: {step + 1}, loss: {loss}')
        tfm.eval()
        val_loss = 0
        for step, (pic, label) in enumerate(valid_loader):
            pic = pic.to(device)
            logits, targets = tfm(pic)
            loss = F.cross_entropy(logits.reshape(-1, logits.size(-1)), targets.reshape(-1))
            val_loss += loss.item()
        val_loss = val_loss / len(valid_loader)
        # early stopping
        if first or val_loss < best_score - 1e-4:
            first = False
            best_score = val_loss
            time = 0
            torch.save(tfm.state_dict(), f'{pt_folder}//{pt_file4tfm}')
        elif time >= patience:
            # tfm = torch.load(f'{pt_folder}//{pt_file4tfm}')
            break
        else:
            time += 1
        print(
            f'epoch: {epoch + 1}, early stopping time/full time: {time}/{patience}, valid loss: {val_loss * 1.0 / len(valid_loader)}')
    pass


if __name__ == '__main__':
    # main()
    main_decoder()
    print('done')
