import time
import os
import argparse
import random
import numpy as np

import torch
import torch.optim as optim
import torch.optim.lr_scheduler as LS
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.utils.data as data
from torchvision import transforms
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm

# 设置GPU编号
os.environ["CUDA_VISIBLE_DEVICES"] = "4"

def set_random_seeds(seed=42):
    """
    设置所有随机种子以确保可复现性。
    """
    # 设置 Python 的随机种子
    random.seed(seed)
    
    # 设置 NumPy 的随机种子
    np.random.seed(seed)
    
    # 设置 PyTorch 的随机种子
    torch.manual_seed(seed)
    
    # 如果使用 GPU，设置 CUDA 的随机种子
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)
        
        # 设置 CUDA 的确定性模式
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False

def worker_init_fn(worker_id):
    base_seed = 30  # 手动设置一个固定的种子值
    seed = (base_seed + worker_id) % (2**32)
    np.random.seed(seed)
    torch.manual_seed(seed)
    random.seed(seed)

parser = argparse.ArgumentParser()
parser.add_argument(
    '--batch-size', '-N', type=int, default=128, help='batch size')
parser.add_argument(
    '--model-name', '-m', type=str, default='default', help='model name')
# parser.add_argument(
#     '--train', '-f', required=True, type=str, help='folder of training images')
parser.add_argument(
    '--max-epochs', '-e', type=int, default=100, help='max epochs')
parser.add_argument(
    '--one-shot', '-os', type=bool, default=False, help='one shot')
parser.add_argument('--lr', '-l', type=float, default=0.0001, help='learning rate')
parser.add_argument('--cuda', '-g', action='store_true', help='enables cuda')
parser.add_argument(
    '--iterations', '-it', type=int, default=16, help='unroll iterations')
parser.add_argument('--checkpoint', type=int, help='unroll iterations')
args = parser.parse_args()

## load 32x32 patches from images
import dataset

# 设置随机种子
set_random_seeds(30)

train_set_path = "train_set.txt"
val_set_path = "val_set.txt"

train_transform = transforms.Compose([
    transforms.RandomCrop((32, 32)),
    transforms.ToTensor(),
])

val_transform = transforms.Compose([
    transforms.CenterCrop((32, 32)),
    transforms.ToTensor(),
])

train_set = dataset.ImageFolder(train_set_path, transform=train_transform)
train_loader = data.DataLoader(dataset=train_set, batch_size=args.batch_size, shuffle=True, num_workers=4, worker_init_fn=worker_init_fn)

val_set = dataset.ImageFolder(val_set_path, transform=val_transform)
val_loader = data.DataLoader(dataset=val_set, batch_size=args.batch_size, shuffle=False, num_workers=4, worker_init_fn=worker_init_fn)

print('train images: {}; train batches: {}'.format(
    len(train_set), len(train_loader)))

print('val images: {}; val batches: {}'.format(
    len(val_set), len(val_loader)))

## load networks on GPU
import network

encoder = network.EncoderCell().cuda()
binarizer = network.Binarizer().cuda()
decoder = network.DecoderCell().cuda()

solver = optim.Adam(
    [
        {
            'params': encoder.parameters()
        },
        {
            'params': binarizer.parameters()
        },
        {
            'params': decoder.parameters()
        },
    ],
    lr=args.lr)


def resume(epoch=None):
    if epoch is None:
        s = 'iter'
        epoch = 0
    else:
        s = 'epoch'

    encoder.load_state_dict(
        torch.load('checkpoint/encoder_{}_{:03d}.pth'.format(s, epoch)))
    binarizer.load_state_dict(
        torch.load('checkpoint/binarizer_{}_{:03d}.pth'.format(s, epoch)))
    decoder.load_state_dict(
        torch.load('checkpoint/decoder_{}_{:03d}.pth'.format(s, epoch)))


def save(name):
    if not os.path.exists('checkpoint'):
        os.mkdir('checkpoint')

    torch.save(encoder.state_dict(), f'checkpoint/{name}_encoder.pth')

    torch.save(binarizer.state_dict(),f'checkpoint/{name}_binarizer.pth')

    torch.save(decoder.state_dict(), f'checkpoint/{name}_decoder.pth')


# resume()

# scheduler = LS.MultiStepLR(solver, milestones=[3, 10, 20, 50, 100], gamma=0.5)

last_epoch = 0
# if args.checkpoint:
#     resume(args.checkpoint)
#     last_epoch = args.checkpoint
#     scheduler.last_epoch = last_epoch - 1

# 创建tensorboard文件夹
if args.one_shot:
    model_name = f'{args.model_name}_BS_{args.batch_size}_LR{args.lr}_IT{args.iterations}_ONESHOT'
else:
    model_name = f'{args.model_name}_BS_{args.batch_size}_LR{args.lr}_IT{args.iterations}_ADD'

if not os.path.exists(os.path.join('tensorboard',model_name)):
    os.mkdir(os.path.join('tensorboard',model_name))
train_writer = SummaryWriter(log_dir=os.path.join('tensorboard',model_name))
val_writer = SummaryWriter(log_dir=os.path.join('tensorboard',model_name))

best_loss = float('inf')
for epoch in range(last_epoch + 1, args.max_epochs + 1):

    # train阶段
    train_pbar = tqdm(train_loader, desc=f"Train Epoch {epoch}/{args.max_epochs}", unit="batch", ncols=100)
    train_total_loss = 0
    for batch, data in enumerate(train_loader):
        # init lstm state
        encoder_h_1 = (Variable(torch.zeros(data.size(0), 256, 8, 8).cuda()),
                       Variable(torch.zeros(data.size(0), 256, 8, 8).cuda()))
        encoder_h_2 = (Variable(torch.zeros(data.size(0), 512, 4, 4).cuda()),
                       Variable(torch.zeros(data.size(0), 512, 4, 4).cuda()))
        encoder_h_3 = (Variable(torch.zeros(data.size(0), 512, 2, 2).cuda()),
                       Variable(torch.zeros(data.size(0), 512, 2, 2).cuda()))

        decoder_h_1 = (Variable(torch.zeros(data.size(0), 512, 2, 2).cuda()),
                       Variable(torch.zeros(data.size(0), 512, 2, 2).cuda()))
        decoder_h_2 = (Variable(torch.zeros(data.size(0), 512, 4, 4).cuda()),
                       Variable(torch.zeros(data.size(0), 512, 4, 4).cuda()))
        decoder_h_3 = (Variable(torch.zeros(data.size(0), 256, 8, 8).cuda()),
                       Variable(torch.zeros(data.size(0), 256, 8, 8).cuda()))
        decoder_h_4 = (Variable(torch.zeros(data.size(0), 128, 16, 16).cuda()),
                       Variable(torch.zeros(data.size(0), 128, 16, 16).cuda()))

        patches = Variable(data.cuda())

        solver.zero_grad()

        losses = []

        res = patches - 0.5

        for _ in range(args.iterations):
            encoded, encoder_h_1, encoder_h_2, encoder_h_3 = encoder(
                res, encoder_h_1, encoder_h_2, encoder_h_3)

            codes = binarizer(encoded)

            output, decoder_h_1, decoder_h_2, decoder_h_3, decoder_h_4 = decoder(
                codes, decoder_h_1, decoder_h_2, decoder_h_3, decoder_h_4)

            if args.one_shot == False:
                res = res - output
                losses.append(res.abs().mean())
            else:
                it_loss = res - output
                losses.append(it_loss.abs().mean())

        loss = sum(losses) / args.iterations
        loss.backward()
        solver.step()

        train_pbar.set_postfix(loss=loss.item())
        train_pbar.update(1)
        train_total_loss += loss.item()
        
    
    # 每个 epoch 结束时，计算并打印平均损失
    train_pbar.close()
    train_avg_loss = train_total_loss / len(train_loader)
    # 将当前 epoch 的损失写入 TensorBoard
    train_writer.add_scalar(f'{model_name}/Loss/train', train_avg_loss, epoch)
    print(f"Train Epoch {epoch}/{args.max_epochs}, Average Loss: {train_avg_loss:.4f}")

    # val阶段
    val_pbar = tqdm(val_loader, desc=f"Val Epoch {epoch}/{args.max_epochs}", unit="batch", ncols=100)
    val_total_loss = 0
    with torch.no_grad():
        for batch, data in enumerate(val_loader):
            # init lstm state
            encoder_h_1 = (Variable(torch.zeros(data.size(0), 256, 8, 8).cuda()),
                        Variable(torch.zeros(data.size(0), 256, 8, 8).cuda()))
            encoder_h_2 = (Variable(torch.zeros(data.size(0), 512, 4, 4).cuda()),
                        Variable(torch.zeros(data.size(0), 512, 4, 4).cuda()))
            encoder_h_3 = (Variable(torch.zeros(data.size(0), 512, 2, 2).cuda()),
                        Variable(torch.zeros(data.size(0), 512, 2, 2).cuda()))

            decoder_h_1 = (Variable(torch.zeros(data.size(0), 512, 2, 2).cuda()),
                        Variable(torch.zeros(data.size(0), 512, 2, 2).cuda()))
            decoder_h_2 = (Variable(torch.zeros(data.size(0), 512, 4, 4).cuda()),
                        Variable(torch.zeros(data.size(0), 512, 4, 4).cuda()))
            decoder_h_3 = (Variable(torch.zeros(data.size(0), 256, 8, 8).cuda()),
                        Variable(torch.zeros(data.size(0), 256, 8, 8).cuda()))
            decoder_h_4 = (Variable(torch.zeros(data.size(0), 128, 16, 16).cuda()),
                        Variable(torch.zeros(data.size(0), 128, 16, 16).cuda()))

            patches = Variable(data.cuda())

            losses = []

            res = patches - 0.5

            for _ in range(args.iterations):
                encoded, encoder_h_1, encoder_h_2, encoder_h_3 = encoder(
                    res, encoder_h_1, encoder_h_2, encoder_h_3)

                codes = binarizer(encoded)

                output, decoder_h_1, decoder_h_2, decoder_h_3, decoder_h_4 = decoder(
                    codes, decoder_h_1, decoder_h_2, decoder_h_3, decoder_h_4)

                if args.one_shot == False:
                    res = res - output
                    losses.append(res.abs().mean())
                else:
                    it_loss = res - output
                    losses.append(it_loss.abs().mean())

            loss = sum(losses) / args.iterations


            val_pbar.set_postfix(loss=loss.item())
            val_pbar.update(1)
            val_total_loss += loss.item()
        
    
    # 每个 epoch 结束时，计算并打印平均损失
    val_pbar.close()
    val_avg_loss = val_total_loss / len(val_loader)
    # 将当前 epoch 的损失写入 TensorBoard
    val_writer.add_scalar(f'{model_name}/Loss/val', val_avg_loss, epoch)
    print(f"Val Epoch {epoch}/{args.max_epochs}, Average Loss: {val_avg_loss:.4f}")
    # 更新 best_loss
    if val_avg_loss < best_loss:
        best_loss = val_avg_loss
        save(model_name)
    print(f"Val Epoch {epoch}/{args.max_epochs}, Average Loss: {val_avg_loss:.4f}, Best Loss: {best_loss:.4f}")

    # lr变化
    # scheduler.step()

train_writer.close()
val_writer.close()
