#导包
import os
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.utils.data import Dataset, DataLoader, SubsetRandomSampler
from tensorboardX import SummaryWriter
from torchvision import datasets, transforms, utils
import torchvision.models as models
import cv2
from PIL import Image
import numpy as np
import pandas as pd
import matplotlib.image as mpimg
import tensorboard as tb
from dataclasses import dataclass
#matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('ggplot')

#参数配置
@dataclass
class SystemConfig:
    seed = 11
    cudnn_benchmark_enabled = True
    cudnn_deterministic = True
@dataclass
class TrainingConfig:
    device = 'cuda'
    model_save_best = True
    batch_size = 64
    epochs_count = 50
    log_interval = 5
    test_interval = 1
    model_name = 'BaseVGG'
    num_workers = 10
    num_classes = 43
    data_augmentation = False
    mean = [0.3447, 0.3131, 0.3243]
    std = [0.1565, 0.1575, 0.1670]
@dataclass
class DataConfig:
    root_dir = '../DataSet3'
    train_dir = 'Train/'
    test_dir = 'Test/'
    train_csv = 'Train.csv'
    test_csv = 'Test.csv'
    model_dir = os.path.join('./../records', 'models/')
    log_dir = os.path.join('./../records', 'logs/')
@dataclass
class OptimizerConfig:
    init_learning_rate = 0.0001
    weight_decay = 0.0001
    scheduler_step_size = 6
    scheduler_gamma = 0.1
def setup_system(system_config: SystemConfig) -> None:
    torch.manual_seed(system_config.seed)
    if torch.cuda.is_available():
        device = torch.device("cuda:0")
        torch.backends.cudnn_benchmark_enabled = system_config.cudnn_benchmark_enabled
        torch.backends.cudnn.deterministic = system_config.cudnn_deterministic
sc = SystemConfig()
tc = TrainingConfig()
dc = DataConfig()
oc = OptimizerConfig()
setup_system(sc)
for path in [dc.log_dir, dc.model_dir]:
    if not os.path.exists(path):
        os.makedirs(path)
tb_writer = SummaryWriter(dc.log_dir)
#下载CSV文件查看数据
train_file = os.path.join(dc.root_dir,dc.train_csv)
test_file = os.path.join(dc.root_dir,dc.test_csv)
df_train = pd.read_csv(train_file)
print(df_train.head())
df_test = pd.read_csv(test_file)
print(df_test.head())
print(f'Train csv shape: {df_train.shape}, \nTest csv shape: {df_test.shape}')

class_list = []
imgs = []
#遍历数据框，返回index，data元组
for index,data in df_train.iterrows():
    file_name = data['Path']
    class_id = data['ClassId']
    if class_id not in class_list:
        class_list.append(class_id)
        imgs.append(mpimg.imread(os.path.join(dc.root_dir,file_name)))
    if index % 10000 == 0:
        print(f'Currently on row {index} of 39209')


#展示图像数据从每个类别中
plt.figure(figsize=(20,15))
plt.suptitle('Sample images from each class')
columns = 10
for i, image in enumerate(imgs):
    plt.subplot((int)(len(imgs) / columns + 1), columns, i + 1)
    plt.title(f'Class: {i+1}', color='black')
    plt.imshow(image)
# plt.show()
plt.savefig("../save/image1.png")
plt.close()

#数据分配
plt.figure(figsize=(15,15))
c = df_train['ClassId'].nunique()
x = df_train['ClassId'].value_counts()

plt.bar(x=x.index.sort_values(), height=x, color='#0066ff')
plt.title('Distibution of occurences in each class', color='black')
plt.xlabel("Classes", color='black')
plt.ylabel("Occurences", color='black')
plt.tick_params(colors='black')
# plt.show()
plt.savefig("../save/image2.png")
plt.close()

#自定义训练数据集
class GTSR43Dataset(Dataset):
    def __init__(self,root_dir,train_file,transform=None):
        self.root_dir = root_dir
        self.train_file_path = train_file
        self.label_df = pd.read_csv(os.path.join(self.root_dir, self.train_file_path))
        self.transform = transform
        self.classes = list(self.label_df['ClassId'].unique())

    def __getitem__(self, idx):
        """调整大小和预处理后，返回(图像，目标)"""
        img = os.path.join(self.root_dir, self.label_df.iloc[idx, 7])
        """列选取，选取idx中的第8列，即路径"""
        X = Image.open(img)
        y = self.class_to_index(self.label_df.iloc[idx, 6])
        """列选取，选取idx中的第7列，即类别"""
        if self.transform:
            X = self.transform(X)
        return X, y
    def class_to_index(self, class_name):
        """返回给定类的索引"""
        return self.classes.index(class_name)
    def index_to_class(self, class_index):
        """返回给定索引的类"""
        return self.classes[class_index]
    def get_class_count(self):
        """返回出现的标签列表"""
        cls_count = dict(self.label_df.ClassId.value_counts())
        #         cls_percent = list(map(lambda x: (1 - x / sum(cls_count)), cls_count))
        return cls_count
    def __len__(self):
        """返回数据集的长度"""
        return len(self.label_df)

#分割数据集的数据类
class GTSR43Subset(GTSR43Dataset):
    def __init__(self, subset, transform=None):
        self.subset = subset
        self.transform = transform
    def __getitem__(self, idx):
        X, y = self.subset[idx]
        if self.transform:
            X = self.transform(X)
        return X, y
    def __len__(self):
        return len(self.subset)
#自定义测试数据集
class GTSR43Testset(Dataset):
    def __init__(self, root_dir, test_file, transform=None):
        self.root_dir = root_dir
        self.test_file_path = test_file
        self.label_df = pd.read_csv(os.path.join(self.root_dir, self.test_file_path))
        self.transform = transform
    def __getitem__(self, idx):
        """从数据集中检索出一项"""
        img = os.path.join(self.root_dir, self.label_df.iloc[idx, 7])
        image = Image.open(img)
        if self.transform:
            image = self.transform(image)
        return image
    def __len__(self):
        return len(self.label_df)
#测试自定义数据集是否能运行
ds = GTSR43Dataset(dc.root_dir, dc.train_csv)
X, y = ds.__getitem__(5)
print(f'Train image: {X}\nTarget class: {y}\nClass count: {ds.get_class_count()}')

#数据增强
def image_resize():
    """T调整大小，裁剪变换，最终大小为30"""
    resize_transforms = transforms.Compose([transforms.Resize(40),
                                            transforms.CenterCrop(36),
                                           ])
    return resize_transforms
def image_preprocess():
    """T调整大小，裁剪变换，最终转换为张量的形式"""
    preprocess_transforms = transforms.Compose([transforms.Resize(40),
                                                transforms.CenterCrop(36),
                                                transforms.ToTensor()
                                               ])
    return preprocess_transforms
def common_transforms(mean, std):
    """这些变换对于训练集和测试集均通用"""
    common_transforms = transforms.Compose([image_preprocess(),
                                            transforms.Normalize(mean, std)
                                           ])
    return common_transforms
def data_aug(mean, std):
    """增强数据所做的变换"""
    data_aug_transforms = transforms.Compose([transforms.Resize(40),    #VGG   36   Resnet18 40
                                              transforms.CenterCrop(36),  #VGG 32   Resnet18 36
                                              transforms.RandomVerticalFlip(),
                                              transforms.RandomHorizontalFlip(),
                                              transforms.ColorJitter(), #随机改变图像亮度
                                              transforms.ToTensor(),
                                              transforms.Normalize(mean, std),
                                              transforms.RandomErasing(),
                                             ])
    return data_aug_transforms

#用于提取数据集中图像的均值和标准值的辅助函数
#结果提取出来，保存于dataclass类中了
def get_mean_std():
    """获取均值和标准差"""
    ds = GTSR43Dataset(dc.root_dir,
                       dc.train_csv,
                       transform=image_preprocess())
    loader = DataLoader(ds,
                        batch_size = 10,
                        shuffle = False,
                        num_workers = 4)

    mean = 0.0
    std = 0.0

    for images, _ in loader:
        batch_samples = images.size(0)
        # 最后一批可以小一点
        images = images.view(batch_samples, images.size(1), -1)
        mean += images.mean(2).sum(0)
        std += images.std(2).sum(0)

    mean /= len(loader.dataset)
    std /= len(loader.dataset)

    return mean, std
if __name__ == '__main__':
    mean, std = get_mean_std()
    print(mean, std)

#将数据加载到 Train/Test 子集的辅助函数
def get_data(root_dir, batch_size, num_workers=4, data_augmentation=False):
    """加载和分割数据到训练和测试子集 """
    dataset = GTSR43Dataset(root_dir, dc.train_csv)
    if data_augmentation:
        X_transforms = data_aug(tc.mean, tc.std)
    else:
        X_transforms = common_transforms(tc.mean, tc.std)
    y_transforms = common_transforms(tc.mean, tc.std)
    """将数据集以8:2的比例分为训练集和测试集"""
    X_size = int(0.8 * len(dataset))
    y_size = len(dataset) - X_size
    X_dataset, y_dataset = torch.utils.data.random_split(dataset, [X_size, y_size])
    X_subset = GTSR43Subset(X_dataset, X_transforms)
    y_subset = GTSR43Subset(y_dataset, y_transforms)
    X_loader = DataLoader(X_subset,
                          batch_size=batch_size,
                          shuffle=False,
                          num_workers=num_workers)
    y_loader = DataLoader(y_subset,
                          batch_size=batch_size,
                          shuffle=False,
                          num_workers=num_workers)
    return X_loader, y_loader

#训练函数
def train(tc: TrainingConfig,
          model: nn.Module,
          optimizer: torch.optim.Optimizer,
          X_loader: torch.utils.data.DataLoader,
          epoch_idx: int):
    model.train()

    batch_loss = np.array([])
    batch_acc = np.array([])

    for batch,(X, y) in enumerate(X_loader):

        if batch == 0:
            print(f'Device: {torch.cuda.get_device_name(0)}, Data: {X.shape}, Target: {y.shape}')
        index_y = y.clone()
        X = X.to(tc.device)
        y = y.to(tc.device)
        optimizer.zero_grad()
        output = model(X)
        loss = F.cross_entropy(output, y)
        loss.backward()
        optimizer.step()

        batch_loss = np.append(batch_loss, [loss.item()])
        prob = F.softmax(output, dim=1)
        pred = prob.data.max(dim=1)[1]
        correct = pred.cpu().eq(index_y).sum()
        acc = float(correct) / float(len(X))
        batch_acc = np.append(batch_acc, [acc])

    epoch_loss = batch_loss.mean()
    epoch_acc = 100. * batch_acc.mean()

    print(f'Training   - loss: {epoch_loss:.4f}, accuracy: {epoch_acc:.2f}%')

    return epoch_loss, epoch_acc

#测试函数
def validate(tc: TrainingConfig,
             model: nn.Module,
             y_loader: torch.utils.data.DataLoader) -> float:
    model.eval()

    loss = 0.0
    correct = 0.0
    for X, y in y_loader:
        index_y = y.clone()

        X = X.to(tc.device)
        y = y.to(tc.device)

        output = model(X)
        loss += F.cross_entropy(output, y).item()
        prob = F.softmax(output, dim=1)
        pred = prob.data.max(dim=1)[1]
        correct += pred.cpu().eq(index_y).sum()

    loss = loss / len(y_loader)
    accuracy = 100. * correct / len(y_loader.dataset)

    print(f'Validation - loss: {loss:.4f}, accuracy: {accuracy:.2f}%, {correct}/{len(y_loader.dataset)}')

    return loss, accuracy / 100.0

"""下载和保存模型"""


def save_model(model, device, accuracy):
    if not os.path.exists(dc.model_dir):
        os.makedirs(dc.model_dir)

    model_path = os.path.join(dc.model_dir, tc.model_name)

    if device == 'cuda':
        model.to('cpu')

    torch.save(model.state_dict(), model_path + '_.pt')

    if device == 'cuda':
        model.to('cuda')
    return


def load_model(model):
    model_path = os.path.join(dc.model_dir, tc.model_name)
    model.load_state_dict(torch.load(model_path + '_.pt'))

    return model


# 主函数
def main(model, optimizer, tb_writer, scheduler=None, data_augmentation=True):
    if torch.cuda.is_available():
        tc.device = "cuda"
    else:
        tc.device = "cpu"
        batch_size_to_set = 10
        num_workers_to_set = 2

    model.to(tc.device)

    X_loader, y_loader = get_data(root_dir=dc.root_dir,
                                  batch_size=tc.batch_size,
                                  num_workers=tc.num_workers,
                                  data_augmentation=tc.data_augmentation)

    best_loss = torch.tensor(np.inf)

    epoch_X_loss = np.array([])
    epoch_y_loss = np.array([])
    epoch_X_acc = np.array([])
    epoch_y_acc = np.array([])

    t_begin = time.time()

    for epoch in range(tc.epochs_count):
        print(f'\nEpoch: {epoch + 1}/{tc.epochs_count}')

        X_loss, X_acc = train(tc=tc,
                              model=model,
                              optimizer=optimizer,
                              X_loader=X_loader,
                              epoch_idx=epoch)

        epoch_X_loss = np.append(epoch_X_loss, [X_loss])
        epoch_X_acc = np.append(epoch_X_acc, [X_acc])

        elapsed_time = time.time() - t_begin
        speed_epoch = elapsed_time / (epoch + 1)
        speed_batch = speed_epoch / len(X_loader)
        eta = speed_epoch * tc.epochs_count - elapsed_time

        tb_writer.add_scalar('Loss/Train', X_loss, epoch)
        tb_writer.add_scalar('Accuracy/Train', X_acc, epoch)
        tb_writer.add_scalar('Time/elapsed_time', elapsed_time, epoch)
        tb_writer.add_scalar('Time/speed_epoch', speed_epoch, epoch)
        tb_writer.add_scalar('Time/speed_batch', speed_batch, epoch)
        tb_writer.add_scalar('Time/eta', eta, epoch)

        if epoch % tc.test_interval == 0:

            current_loss, current_acc = validate(tc, model, y_loader)

            epoch_y_loss = np.append(epoch_y_loss, [current_loss])
            epoch_y_acc = np.append(epoch_y_acc, [current_acc])

            if current_loss < best_loss:
                best_loss = current_loss
                save_model(model, device=tc.device, accuracy=current_acc)
                print('Model Improved! Saved!')

            tb_writer.add_scalar('Loss/Validation', current_loss, epoch)
            tb_writer.add_scalar('Accuracy/Validation', current_acc, epoch)
            tb_writer.add_scalars('Loss/Train-val', {'Train': X_loss, 'validation': current_loss}, epoch)
            tb_writer.add_scalars('Accuracy/Train-val', {'Train': X_acc, 'validation': current_acc}, epoch)

        if scheduler is not None:
            scheduler.step()

        print(
            f'Time: {elapsed_time:.2f}s, {speed_epoch:.2f} s/epoch, {speed_batch:.2f} s/batch, Learning rate: {scheduler.get_last_lr()[0]}')

    print(f'Total time: {time.time() - t_begin:.2f}, Best loss: {best_loss:.3f}')

    return model, epoch_X_loss, epoch_X_acc, epoch_y_loss, epoch_y_acc

# from VGG.model import Vgg16_net
# model = Vgg16_net()

from model import ResNet_18
model = ResNet_18()

optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
                             lr=oc.init_learning_rate,
                             weight_decay=oc.weight_decay)

scheduler = optim.lr_scheduler.StepLR(optimizer,
                                      step_size=oc.scheduler_step_size,
                                      gamma=oc.scheduler_gamma)
tb_writer = SummaryWriter()
print(f'Device: {tc.device}\n\
Epochs: {tc.epochs_count}\n\
Batch size: {tc.batch_size}\n\
Data Augmentation: {tc.data_augmentation}\n\
Scheduler step size: {oc.scheduler_step_size}\n\
Scheduler gamma: {oc.scheduler_gamma}\n\
Learning rate: {oc.init_learning_rate}\n\
L2 weight decay: {oc.weight_decay}')

model, train_loss, train_acc, val_loss, val_acc = main(model,
                                                       optimizer,
                                                       tb_writer,
                                                       scheduler=scheduler,
                                                       data_augmentation=tc.data_augmentation)
