# -*- coding: utf-8 -*-
from __future__ import print_function  # do not delete this line if you want to save your log file.
import os
import random
import numpy as np
from PIL import Image
import torch
from torch import nn
from torch.optim import Adam
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader

# 硬件
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)

# 超参数
LOAD_PATH = None
MAX_EPOCH = 500
CHECKPOINT_INTERVAL = 10
TRAIN_BATCH_SIZE = 33
SMALL_SIZE = 112
LARGE_SIZE = 224
IMG_CHANNEL = 3
N_FEATURES1 = 64
N_FEATURES2 = 32
KERNEL_F1 = 9
KERNEL_F3 = 5
LR = 0.0002
BETA1 = 0.5
if KERNEL_F1 % 2 == 0:
    KERNEL_F1 += 1
if KERNEL_F3 % 2 == 0:
    KERNEL_F3 += 1

# 准备模型断点保存路径
CHECKPOINT_DIR = r'./cache/checkpoints/'
if not os.path.exists(CHECKPOINT_DIR):
    os.mkdir(CHECKPOINT_DIR)


class MyDataset(Dataset):
    def __init__(self, data_dir='.', pre_transform=None):
        print('\nDataset directory: {}\n'.format(os.path.abspath(data_dir)))
        self.pre_transform = pre_transform
        self.data_info_lst = self.get_data_info(data_dir)

    def __getitem__(self, index):
        path_small_img,  path_large_img = self.data_info_lst[index]
        small_img = Image.open(path_small_img).convert('RGB')
        large_img = Image.open(path_large_img).convert('RGB')

        if self.pre_transform is not None:
            small_img = self.pre_transform(small_img)
            large_img = self.pre_transform(large_img)

        return small_img, large_img

    def __len__(self):
        return len(self.data_info_lst)

    @staticmethod
    def get_data_info(data_dir):
        large_img_info_lst = []
        small_img_info_lst = []

        for root, sub_dirs, files in os.walk(data_dir, topdown=False):
            # print(f"\nroot:{root} \n\t sub_dirs:{sub_dirs} \n\t files:{files}\n")  # 追踪显示os.walk足迹
            for sub_dir in sub_dirs:
                if sub_dir == 'big':
                    img_names = os.listdir(os.path.join(root, sub_dir))
                    for img_name in img_names:
                        path_img = os.path.join(root, sub_dir, img_name)
                        if path_img.endswith('jpg') or path_img.endswith('png'):  # 只要jpg和png图片文件
                            large_img_info_lst.append(path_img)

                if sub_dir == 'small':
                    img_names = os.listdir(os.path.join(root, sub_dir))
                    for img_name in img_names:
                        path_img = os.path.join(root, sub_dir, img_name)
                        if path_img.endswith('jpg') or path_img.endswith('png'):  # 只要jpg和png图片文件
                            small_img_info_lst.append(path_img)

        data_info_lst = list(zip(small_img_info_lst, large_img_info_lst))
        print(data_info_lst)
        return data_info_lst


# 神经网络SRCNN
class SRCNN(nn.Module):
    def __init__(self):
        super(SRCNN, self).__init__()

        # self.layer0 = nn.Upsample(scale_factor=LARGE_SIZE//SMALL_SIZE, mode='bicubic')

        self.layers = nn.Sequential(
            nn.Conv2d(in_channels=IMG_CHANNEL, out_channels=N_FEATURES1, kernel_size=KERNEL_F1, padding=(KERNEL_F1-1)//2),
            nn.ReLU(True),
            nn.BatchNorm2d(N_FEATURES1),

            nn.Conv2d(in_channels=N_FEATURES1, out_channels=N_FEATURES2, kernel_size=1, padding=0),
            nn.ReLU(True),
            nn.BatchNorm2d(N_FEATURES2),

            nn.Conv2d(in_channels=N_FEATURES2, out_channels=IMG_CHANNEL, kernel_size=KERNEL_F3, padding=(KERNEL_F3-1)//2),
            nn.ReLU(True)
        )

    def forward(self, x):
        # x = self.layer0(x)
        y = self.layers(x)
        return y


def train():
    for i, data in enumerate(train_loader):
            batch_In_Y, batch_True_X = data[0].to(device), data[1].to(device)
            optimizer.zero_grad()
            batch_out_y = srcnn(batch_In_Y)
            loss = criterion(input=batch_out_y, target=batch_True_X)
            loss.backward()
            optimizer.step()


# 训练图片数据路径
path_train = os.path.join(r'./cache/raw/')

# 训练图片数据预处理
train_transforms = transforms.Compose([
    transforms.Resize((LARGE_SIZE, LARGE_SIZE)),
    transforms.ToTensor(),
])

# 训练图片数据导入
train_dataset = MyDataset(data_dir=path_train, 
                          pre_transform=train_transforms)

train_loader = DataLoader(dataset=train_dataset, 
                          batch_size=TRAIN_BATCH_SIZE, 
                          shuffle=True)

# 模型
srcnn = SRCNN().to(device)
# srcnn.apply(weights_init)

# 目标函数优化器
criterion = nn.MSELoss(reduction='mean').to(device)
optimizer = Adam(srcnn.parameters(), lr=LR, betas=(BETA1, 0.999))

# 载入模型断点参数
if LOAD_PATH is not None:
    if torch.cuda.is_available():
        checkpoint = torch.load(LOAD_PATH)
    else:
        checkpoint = torch.load(LOAD_PATH, map_location='cpu')
    srcnn.load_state_dict(checkpoint['model_state_dict'])
    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    last_epoch = checkpoint['epoch']
else:
    last_epoch = 0
print(f'last epoch: {last_epoch}')

# 迭代训练
print('start training loop...')
for epoch in range(last_epoch+1, MAX_EPOCH+1):
    train()
    print('epoch:', epoch)
    # 保存断点模型
    if epoch % CHECKPOINT_INTERVAL == 0 or epoch == MAX_EPOCH:
        checkpoint = {
            'model_state_dict': srcnn.state_dict(),
            'optimizer_state_dict': optimizer.state_dict(),
            'epoch': epoch}
        path_checkpoint = os.path.join(CHECKPOINT_DIR, 'SRCNN_{}.pkl'.format(epoch))
        torch.save(obj=checkpoint, f=path_checkpoint)
        print('checkpoint at epoch {} saved'.format(epoch))


import moxing as mox
from naie.context import Context
mox.file.copy_parallel(CHECKPOINT_DIR, os.path.join(Context.get_output_path(level='algo'), 'checkpoints'))
