#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File  : baseline.py
# @Author: Richard Chiming Xu
# @Date  : 2023/6/13
# @Desc  :
import glob
import time

import cv2
import numpy as np
import torch

torch.manual_seed(0)
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True

import torchvision.models as models
import torch.nn as nn
from torch.utils.data.dataset import Dataset
import albumentations as A

# 读取数据设置数据路径
train_path = glob.glob('data/train/*/*')
test_path = glob.glob('data/test/*')

np.random.shuffle(train_path)
np.random.shuffle(test_path)
print(train_path[:2])

# 设置全局缓存
DATA_CACHE = {}


# 自定义数据集
class XunFeiDataset(Dataset):
    def __init__(self, img_path, transform=None):
        self.img_path = img_path
        if transform is not None:
            self.transform = transform
        else:
            self.transform = None

    def __getitem__(self, index):
        path = self.img_path[index].replace('\\', '/')
        # 如果读取过的图片，直接从缓存提取数组
        # if path in DATA_CACHE:
        #     img = DATA_CACHE[path]
        # else:
        #     img = cv2.imread(path)
        #     DATA_CACHE[path] = img
        img = cv2.imread(path)

        # 若存在数据增强，则增强数据
        if self.transform is not None:
            img = self.transform(image=img)['image']
        # 设置标签
        if path.split('/')[-2] in ['d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7', 'd8', 'd9']:
            label = ['d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7', 'd8', 'd9'].index(path.split('/')[-2])
        else:
            label = -1
        # 调整维度[三原色, 像素, 像素]
        img = img.transpose([2, 0, 1])
        return img, torch.from_numpy(np.array(label))

    def __len__(self):
        return len(self.img_path)


# 定义网络
class XunFeiNet(nn.Module):
    def __init__(self):
        super(XunFeiNet, self).__init__()
        # 加载resnet
        model = models.resnet18(True)
        # 调整池化和线性层
        model.avgpool = nn.AdaptiveAvgPool2d(1)
        model.fc = nn.Linear(512, 9)
        self.resnet = model

    def forward(self, img):
        out = self.resnet(img)
        return out


# 验证
def validate(val_loader, model, criterion):
    model.eval()

    val_acc = 0.0

    with torch.no_grad():
        end = time.time()
        for i, (image_data, target) in enumerate(val_loader):
            image_data = image_data.to('cuda')
            target = target.to('cuda')

            # compute output
            output = model(image_data)
            loss = criterion(output, target.long())

            val_acc += (output.argmax(1) == target).sum().item()

    return val_acc / len(val_loader.dataset)


# 预测
def predict(test_loader, model, criterion):
    model.eval()
    val_acc = 0.0

    test_pred = []
    with torch.no_grad():
        end = time.time()
        for i, (image_data, target) in enumerate(test_loader):
            image_data = image_data.to('cuda')
            target = target.to('cuda')

            # compute output
            output = model(image_data.long())
            test_pred.append(output.data.cpu().numpy())

    return np.vstack(test_pred)


# 训练
def train(train_loader, model, criterion, optimizer):
    model.train()
    train_loss = 0.0
    print(2222)
    for i, (image_data, target) in enumerate(train_loader):
        image_data = image_data.to('cuda')
        target = target.to('cuda')

        # compute output
        output = model(image_data)
        loss = criterion(output, target.long())

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if i % 20 == 0:
            print('Train loss', loss.item())

        train_loss += loss.item()

    return train_loss / len(train_loader)


if __name__ == '__main__':

    # 创建dataloader
    train_loader = torch.utils.data.DataLoader(
        XunFeiDataset(train_path[:-1000],
                      A.Compose([
                          A.RandomRotate90(),
                          A.Resize(256, 256),
                          A.RandomCrop(224, 224),
                          A.HorizontalFlip(p=0.5),
                          A.RandomContrast(p=0.5),
                          A.RandomBrightnessContrast(p=0.5),
                          A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
                      ])
                      ), batch_size=30, shuffle=True, num_workers=1, pin_memory=False
    )

    val_loader = torch.utils.data.DataLoader(
        XunFeiDataset(train_path[-1000:],
                      A.Compose([
                          A.Resize(256, 256),
                          A.RandomCrop(224, 224),
                          # A.HorizontalFlip(p=0.5),
                          # A.RandomContrast(p=0.5),
                          A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
                      ])
                      ), batch_size=30, shuffle=False, num_workers=1, pin_memory=False
    )

    test_loader = torch.utils.data.DataLoader(
        XunFeiDataset(test_path,
                      A.Compose([
                          A.Resize(256, 256),
                          A.RandomCrop(224, 224),
                          A.HorizontalFlip(p=0.5),
                          A.RandomContrast(p=0.5),
                          A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
                      ])
                      ), batch_size=30, shuffle=False, num_workers=1, pin_memory=False
    )
    # 创建模型
    model = XunFeiNet()
    model = model.to('cuda')
    criterion = nn.CrossEntropyLoss().cuda()
    optimizer = torch.optim.AdamW(model.parameters(), 0.001)
    # 训练
    for i in range(3):
        print('-' * 15, i, '-' * 15)
        train_loss = train(train_loader, model, criterion, optimizer)
        val_acc = validate(val_loader, model, criterion)
        train_acc = validate(train_loader, model, criterion)

        print(train_loss, train_acc, val_acc)
