# encoding: utf-8
import os

import numpy as np

import torch
from torch.utils import data
from model import Unet
from torchvision import transforms
from PIL import Image
from tqdm import tqdm
from utils.images import batch_tensor_to_img

transform = transforms.Compose([
    transforms.Resize((600, 600)),
    transforms.ToTensor(),
])

if not os.path.exists("./models"):
    os.mkdir("./models")

# dataloader
class DataGen(data.Dataset):
    def __init__(self, a_path, b_path, transform):
        self.data = []
        self.label = []
        self.transform = transform
        self.load_data(a_path, b_path)

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        # img_a = self.transform(self.data[index])
        # img_b = self.transform(self.label[index])
        # img_a = self.transform(Image.open(self.data[index]).convert("RGB"))
        # img_b = self.transform(Image.open(self.label[index]).convert("RGB"))
        # return img_a, img_b
        return self.data[index], self.label[index]

    def load_data(self, a_path, b_path):
        img_a = Image.open(a_path).convert("RGB")
        img_b = Image.open(b_path).convert("RGB")
        self.data.append(self.transform(img_a))
        self.label.append(self.transform(img_b))
        # self.data.append(a_path)
        # self.label.append(b_path)


class DataGen2(data.Dataset):
    def __init__(self, file_path: str):
        self.data = []
        self.label = []
        self.transform = transform
        self.load_data(file_path)

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        return self.data[index], self.label[index]

    def load_data(self, file_path):
        labels = os.listdir(os.path.join(file_path, "label"))
        for file_name in labels:
            # print(file_name)  # AP3361_0.jpg
            if os.path.exists(os.path.join(file_path, "origin", file_name)):
                img_a = self.transform(Image.open(os.path.join(file_path, "origin", file_name)))
                self.data.append(img_a)
                img_b = self.transform(Image.open(os.path.join(file_path, "label", file_name)))
                self.label.append(img_b)

datasets = DataGen("./datasdemo/a.png", "./datasdemo/b.png", transform)
# datasets = DataGen2("./imgs")
batch_size = 1
dataloader = data.DataLoader(datasets, batch_size=batch_size)

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
epochs = 10
criterion = torch.nn.MSELoss()
# criterion = torch.nn.BCELoss()
# criterion = MyAbsLossFn()

model = Unet(n_channels=3, n_classes=3)
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
# optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.3)

for epoch in range(epochs):
    model.train()
    loss_sum = 0

    for img, label in tqdm(dataloader):
        img = img.to(device).float()
        label = label.to(device).float()
        out = model(img)
        loss = criterion(out, label)
        optimizer.zero_grad()
        loss.backward()  # 反向传播
        optimizer.step()  # 梯度更新
        loss_sum += loss.cpu().data.numpy()
        # accu += (out.argmax(1) == label).sum().cpu().data.numpy()
        batch_tensor_to_img(epoch, out)

    print(f"epoch={epoch + 1}, loss={loss_sum}")
    # torch.save(model.state_dict(), f"./models/model_epoch{epoch}_loss_{loss_sum}.pt")

