# -*- coding: utf-8 -*-
from __future__ import print_function  # do not delete this line if you want to save your log file.
import os
import random
import numpy as np
from PIL import Image
import torch
from torch import nn
from torch.optim import Adam
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader
import matplotlib.pyplot as plt

# 硬件
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)

# 超参数
LOAD_PATH = r'./checkpoints/SRCNN_100.pkl'
MAX_EPOCH = 500
CHECKPOINT_INTERVAL = 10
TRAIN_BATCH_SIZE, TEST_BATCH_SIZE = 33, 1
SMALL_SIZE = 112
LARGE_SIZE = 224
IMG_CHANNEL = 3
N_FEATURES1 = 64
N_FEATURES2 = 32
KERNEL_F1 = 9
KERNEL_F3 = 5
LR = 0.0002
BETA1 = 0.5
if KERNEL_F1 % 2 == 0:
    KERNEL_F1 += 1
if KERNEL_F3 % 2 == 0:
    KERNEL_F3 += 1

# 准备模型断点保存路径
CHECKPOINT_DIR = r'./cache/checkpoints/'
if not os.path.exists(CHECKPOINT_DIR):
    os.mkdir(CHECKPOINT_DIR)


class MyTestDataset(Dataset):
    def __init__(self, data_dir='.', pre_transform=None):
        print('\nDataset directory: {}\n'.format(os.path.abspath(data_dir)))
        self.pre_transform = pre_transform
        self.data_info_lst = self.get_data_info(data_dir)

    def __getitem__(self, index):
       	path_small_img = self.data_info_lst[index]
        small_img = Image.open(path_small_img).convert('RGB')

        if self.pre_transform is not None:
            small_img = self.pre_transform(small_img)

        return small_img

    def __len__(self):
        return len(self.data_info_lst)

    @staticmethod
    def get_data_info(data_dir):
        small_img_info_lst = []

        for root, sub_dirs, files in os.walk(data_dir, topdown=False):
            # print(f"\nroot:{root} \n\t sub_dirs:{sub_dirs} \n\t files:{files}\n")  # 追踪显示os.walk足迹
            for file in files:
                path_img = os.path.join(root, file)
                if path_img.endswith('jpg') or path_img.endswith('png'):  # 只要jpg和png图片文件
                    small_img_info_lst.append(path_img)

        print(len(small_img_info_lst))
        return small_img_info_lst


# 神经网络SRCNN
class SRCNN(nn.Module):
    def __init__(self):
        super(SRCNN, self).__init__()

        # self.layer0 = nn.Upsample(scale_factor=LARGE_SIZE//SMALL_SIZE, mode='bicubic')

        self.layers = nn.Sequential(
            nn.Conv2d(in_channels=IMG_CHANNEL, out_channels=N_FEATURES1, kernel_size=KERNEL_F1, padding=(KERNEL_F1-1)//2),
            nn.ReLU(True),
            nn.BatchNorm2d(N_FEATURES1),

            nn.Conv2d(in_channels=N_FEATURES1, out_channels=N_FEATURES2, kernel_size=1, padding=0),
            nn.ReLU(True),
            nn.BatchNorm2d(N_FEATURES2),

            nn.Conv2d(in_channels=N_FEATURES2, out_channels=IMG_CHANNEL, kernel_size=KERNEL_F3, padding=(KERNEL_F3-1)//2),
            nn.ReLU(True)
        )

    def forward(self, x):
        # x = self.layer0(x)
        y = self.layers(x)
        return y


def visualize_and_save(batch_num, img_tensor_batch, io_mark):
	"""io_mark in {'input', 'output'}"""
	for i, img_tensor in enumerate(img_tensor_batch):
		img = np.uint8(img_tensor.detach().cpu().numpy().transpose((1, 2, 0))*255)
		
		# print(img, img.shape, type(img))

		# plt.imshow(img)
		# plt.axis('off')
		# plt.show()
		
		img = Image.fromarray(img)
		img.save(path_test+'SR_{}_{}_{}.jpg'.format(batch_num, i, io_mark))


def test():
	for b, data in enumerate(test_loader):
		visualize_and_save(b, data, 'input')
		batch_In_Y = data.to(device)
		batch_out_y = srcnn(batch_In_Y)
		visualize_and_save(b, batch_out_y, 'output')


# 训练图片数据路径
path_test = os.path.join(r'./cache/raw/test/')

# 训练图片数据预处理
test_transforms = transforms.Compose([
    transforms.Resize((LARGE_SIZE, LARGE_SIZE)),
    transforms.ToTensor(),
])

# 训练图片数据导入
test_dataset = MyTestDataset(data_dir=path_test, 
                             pre_transform=test_transforms)

test_loader = DataLoader(dataset=test_dataset, 
                         batch_size=TEST_BATCH_SIZE, 
                         shuffle=True)

# 模型
srcnn = SRCNN().to(device)
# srcnn.apply(weights_init)

# 载入模型断点参数
if LOAD_PATH is not None:
    if torch.cuda.is_available():
        checkpoint = torch.load(LOAD_PATH)
    else:
        checkpoint = torch.load(LOAD_PATH, map_location='cpu')
    srcnn.load_state_dict(checkpoint['model_state_dict'])
    load_epoch = checkpoint['epoch']
else:
    load_epoch = 0
print(f'load epoch: {load_epoch}')

# 测试
print('start testing...')
test()
print('test done.')

# import moxing as mox
# from naie.context import Context
# mox.file.copy_parallel(CHECKPOINT_DIR, os.path.join(Context.get_output_path(level='algo'), 'checkpoints'))
