import numpy as np

class SegmentationMetric(object):
    def __init__(self, numClass):
        self.numClass = numClass
        self.confusionMatrix = np.zeros((self.numClass,) * 2)

    def pixelAccuracy(self):
        # return all class overall pixel accuracy
        #  PA = acc = (TP + TN) / (TP + TN + FP + TN)
        acc = np.diag(self.confusionMatrix).sum() / self.confusionMatrix.sum()
        return acc

    def classPixelAccuracy(self):
        # return each category pixel accuracy(A more accurate way to call it precision)
        # acc = (TP) / TP + FP
        classAcc = np.diag(self.confusionMatrix) / self.confusionMatrix.sum(axis=1)
        return classAcc  # 返回的是一个列表值，如：[0.90, 0.80, 0.96]，表示类别1 2 3各类别的预测准确率

    def meanPixelAccuracy(self):
        classAcc = self.classPixelAccuracy()
        meanAcc = np.nanmean(classAcc)  # np.nanmean 求平均值，nan表示遇到Nan类型，其值取为0
        return meanAcc  # 返回单个值，如：np.nanmean([0.90, 0.80, 0.96, nan, nan]) = (0.90 + 0.80 + 0.96） / 3 =  0.89

    def meanIntersectionOverUnion(self):
        # Intersection = TP Union = TP + FP + FN
        # IoU = TP / (TP + FP + FN)
        intersection = np.diag(self.confusionMatrix)  # 取对角元素的值，返回列表
        union = np.sum(self.confusionMatrix, axis=1) + np.sum(self.confusionMatrix, axis=0) - np.diag(
            self.confusionMatrix)  # axis = 1表示混淆矩阵行的值，返回列表； axis = 0表示取混淆矩阵列的值，返回列表
        IoU = intersection / union  # 返回列表，其值为各个类别的IoU
        mIoU = np.nanmean(IoU)  # 求各类别IoU的平均
        return mIoU

    def genConfusionMatrix(self, imgPredict, imgLabel):  # 同FCN中score.py的fast_hist()函数
        # remove classes from unlabeled pixels in gt image and predict
        mask = (imgLabel >= 0) & (imgLabel < self.numClass)
        label = self.numClass * imgLabel[mask] + imgPredict[mask]
        count = np.bincount(label, minlength=self.numClass ** 2)
        confusionMatrix = count.reshape(self.numClass, self.numClass)
        return confusionMatrix

    def Frequency_Weighted_Intersection_over_Union(self):
        # FWIOU =     [(TP+FN)/(TP+FP+TN+FN)] *[TP / (TP + FP + FN)]
        freq = np.sum(self.confusionMatrix, axis=1) / np.sum(self.confusionMatrix)
        iu = np.diag(self.confusionMatrix) / (
                np.sum(self.confusionMatrix, axis=1) + np.sum(self.confusionMatrix, axis=0) -
                np.diag(self.confusionMatrix))
        FWIoU = (freq[freq > 0] * iu[freq > 0]).sum()
        return FWIoU

    def addBatch(self, imgPredict, imgLabel):
        assert imgPredict.shape == imgLabel.shape
        self.confusionMatrix += self.genConfusionMatrix(imgPredict, imgLabel)

    def reset(self):
        self.confusionMatrix = np.zeros((self.numClass, self.numClass))


import os
import numpy as np
import random
import torch
import cv2
import torchvision.transforms as transforms
from torch.utils.data.dataset import Dataset

matches = [100, 200, 300, 400, 500, 600, 700, 800]
images_path = './train/train/image'
labels_path = './train/train/label'
img_name_list = os.listdir(images_path)
label_name_list = os.listdir(labels_path)
training_samples = int(len(img_name_list) * 0.9)

def get_img_label_paths(images_path, labels_path):
    res = []
    for dir_entry in os.listdir(images_path):
        if os.path.isfile(os.path.join(images_path, dir_entry)):
            file_name, _ = os.path.splitext(dir_entry)
            res.append((os.path.join(images_path, file_name+".tif"),
                        os.path.join(labels_path, file_name+".png")))
    return res


img_transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])


class MaskToTensor(object):
    def __call__(self, mask):
        return torch.from_numpy(np.array(mask, dtype=np.int32)).long()


label_transform = MaskToTensor()


class RSDataset(Dataset):
    def __init__(self, img_label_pairs, img_transform, label_transform, train=True):
        train_img_label_pairs = img_label_pairs[:training_samples]
        val_img_label_pairs = img_label_pairs[training_samples:]

        if train:
            self.img_label_path = train_img_label_pairs
        else:
            self.img_label_path = val_img_label_pairs

        self.img_transform = img_transform
        self.label_transform = label_transform

    def __getitem__(self, index):
        img = cv2.imread(self.img_label_path[index][0], cv2.IMREAD_UNCHANGED)
        label = cv2.imread(self.img_label_path[index][1], cv2.IMREAD_UNCHANGED)

        for m in matches:
            label[label == m] = matches.index(m)
        '''
        # pytorch中使用CrossEntropyLoss时不需要进行one-hot编码
        # nn.CrossEntropyLoss()函数内部会将数值型label转换为one-hot编码后的label
        seg_labels = np.zeros((256, 256, nClasses))
        for c in range(nClasses):
            seg_labels[:, :, c] = (label == c).astype(int)
        '''

        return self.img_transform(img), self.label_transform(label)

    def __len__(self):
        return len(self.img_label_path)

img_label_pairs = get_img_label_paths(images_path, labels_path)
random.shuffle(img_label_pairs)

train_loader = torch.utils.data.DataLoader(
    RSDataset(img_label_pairs, img_transform, label_transform, train=True),
    batch_size=32, shuffle=True, num_workers=16, pin_memory=True
)

val_loader = torch.utils.data.DataLoader(
    RSDataset(img_label_pairs, img_transform, label_transform, train=False),
    batch_size=4, shuffle=False, num_workers=16, pin_memory=True
)



import torch
import torch.nn as nn


class EncoderBlock_(nn.Module):
    def __init__(self, in_channels, out_channels, dropout=False):
        super(EncoderBlock_, self).__init__()
        layers = [
            nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
        ]
        if dropout:
            layers.append(nn.Dropout())
        layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
        self.encode = nn.Sequential(*layers)

    def forward(self, x):
        return self.encode(x)


class DecoderBlock_(nn.Module):
    def __init__(self, in_channels, out_channels):
        super(DecoderBlock_, self).__init__()
        self.decode = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.UpsamplingNearest2d(scale_factor=2)
        )

    def forward(self, x):
        return self.decode(x)


class unet(nn.Module):
    def __init__(self, num_classes):
        super(unet, self).__init__()
        self.enc1 = EncoderBlock_(3, 64)
        self.enc2 = EncoderBlock_(64, 128)
        self.enc3 = EncoderBlock_(128, 256)
        self.enc4 = EncoderBlock_(256, 256)

        self.dec4 = DecoderBlock_(256, 512)
        self.dec3 = DecoderBlock_(512 + 256, 256)
        self.dec2 = DecoderBlock_(256 + 128, 128)
        self.dec1 = DecoderBlock_(128 + 64, 64)

        self.dec0 = nn.Sequential(
            nn.Conv2d(64, 32, kernel_size=3, padding=1),
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True),
        )

        self.final = nn.Conv2d(32, num_classes, kernel_size=3, padding=1)

    def forward(self, x):
        enc1 = self.enc1(x)
        enc2 = self.enc2(enc1)
        enc3 = self.enc3(enc2)
        enc4 = self.enc4(enc3)

        dec4 = self.dec4(enc4)
        dec3 = self.dec3(torch.cat([enc3, dec4], dim=1))
        dec2 = self.dec2(torch.cat([enc2, dec3], dim=1))
        dec1 = self.dec1(torch.cat([enc1, dec2], dim=1))
        dec0 = self.dec0(dec1)
        final = self.final(dec0)
        # nn.CrossEntropyLoss中内置了softmax
        # CrossEntropyLoss的input为没有softmax过的output, target为未经过one-hot的label
        return final



import torch
import torch.nn as nn


class EncoderBlock_(nn.Module):
    def __init__(self, in_channels, out_channels, dropout=False):
        super(EncoderBlock_, self).__init__()
        layers = [
            nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
        ]
        if dropout:
            layers.append(nn.Dropout())
        layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
        self.encode = nn.Sequential(*layers)

    def forward(self, x):
        return self.encode(x)


class DecoderBlock_(nn.Module):
    def __init__(self, in_channels, out_channels):
        super(DecoderBlock_, self).__init__()
        self.decode = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.UpsamplingNearest2d(scale_factor=2)
        )

    def forward(self, x):
        return self.decode(x)


class unet(nn.Module):
    def __init__(self, num_classes):
        super(unet, self).__init__()
        self.enc1 = EncoderBlock_(3, 64)
        self.enc2 = EncoderBlock_(64, 128)
        self.enc3 = EncoderBlock_(128, 256)
        self.enc4 = EncoderBlock_(256, 256)

        self.dec4 = DecoderBlock_(256, 512)
        self.dec3 = DecoderBlock_(512 + 256, 256)
        self.dec2 = DecoderBlock_(256 + 128, 128)
        self.dec1 = DecoderBlock_(128 + 64, 64)

        self.dec0 = nn.Sequential(
            nn.Conv2d(64, 32, kernel_size=3, padding=1),
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True),
        )

        self.final = nn.Conv2d(32, num_classes, kernel_size=3, padding=1)

    def forward(self, x):
        enc1 = self.enc1(x)
        enc2 = self.enc2(enc1)
        enc3 = self.enc3(enc2)
        enc4 = self.enc4(enc3)

        dec4 = self.dec4(enc4)
        dec3 = self.dec3(torch.cat([enc3, dec4], dim=1))
        dec2 = self.dec2(torch.cat([enc2, dec3], dim=1))
        dec1 = self.dec1(torch.cat([enc1, dec2], dim=1))
        dec0 = self.dec0(dec1)
        final = self.final(dec0)
        # nn.CrossEntropyLoss中内置了softmax
        # CrossEntropyLoss的input为没有softmax过的output, target为未经过one-hot的label
        return final



import torch
import torch.nn as nn
from tqdm import tqdm
import os
# from dataloader import train_loader, val_loader
# from model import unet
# from utils.metrics import SegmentationMetric
# os.environ['CUDA_VISIBLE_DEVICES'] = '3'

num_classes = 8
metric = SegmentationMetric(num_classes)

def train(train_loader, model, criterion, optimizer):
    train_loss_sum, train_fwiou_sum, n = 0.0, 0.0, 0
    model.train()

    for param_group in optimizer.param_groups:
        print("the lr is ",param_group['lr'])

    print("start ")
    for input, target in tqdm(train_loader):
        input = input.cuda(non_blocking=True)
        target = target.cuda(non_blocking=True)

        output = model(input)
        loss = criterion(output, target.long())

        print("the loss is \n",loss)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
     
        for i in range(output.shape[0]):
            pre = output[i, :, :, :].argmax(axis=0).cpu()
            label = target[i, :, :].cpu()
            metric.addBatch(pre, label)
            FWIoU = metric.Frequency_Weighted_Intersection_over_Union()
            train_fwiou_sum += FWIoU

        train_loss_sum += loss.sum().item()
        # train_acc_sum += (output.argmax(dim=1) == target).float().sum().item()
        n += target.shape[0]

    return train_loss_sum / n, train_fwiou_sum / n

def adjust_lr(optimizer,epoch):
    lr = 0.01*(0.1**(epoch//8))
    for param_group in optimizer.param_groups:
        param_group['lr']=lr

def validate(val_loader, model, criterion):
    val_loss_sum, val_fwiou_sum, n = 0.0, 0.0, 0
    model.eval()
    for input, target in tqdm(val_loader):
        input = input.cuda()
        target = target.cuda()

        output = model(input)
        loss = criterion(output, target.long())

        for i in range(output.shape[0]):
            pre = output[i, :, :, :].argmax(axis=0).cpu()
            label = target[i, :, :].cpu()
            metric.addBatch(pre, label)
            FWIoU = metric.Frequency_Weighted_Intersection_over_Union()
            val_fwiou_sum += FWIoU

        val_loss_sum += loss.sum().item()
        # val_acc_sum += (output.argmax(dim=1) == target).float().sum().item()
        n += target.shape[0]

    return val_loss_sum / n, val_fwiou_sum / n

from model1 import SCSEUnet
#model = unet(num_classes=num_classes).cuda()
model = SCSEUnet(3,num_classes).cuda()
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)

best_loss = 2020.0
epochs = 30

for epoch in range(epochs):
    adjust_lr(optimizer,epoch)
    train_loss, train_FWIoU = train(train_loader, model, criterion, optimizer)
    val_loss, val_FWIoU = validate(val_loader, model, criterion)
    # scheduler.step()

    print('Epoch %d: train loss %.4f, train FWIoU %.3f, val_loss %.4f, val_FWIoU %.3f'
          % (epoch, train_loss, train_FWIoU, val_loss, val_FWIoU))
    if val_loss < best_loss:
        best_loss = val_loss
        torch.save(model.state_dict(), 'baseline.pt')




