#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File  : baseline.py
# @Author: Richard Chiming Xu
# @Date  : 2023/6/13
# @Desc  :
import glob

from transformers import AutoFeatureExtractor, ResNetForImageClassification
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
from datasets import load_dataset
import cv2
import pandas as pd
import numpy as np
import albumentations as A

class Config():
    train_path = glob.glob('data/train/*/*')
    test_path = glob.glob('data/test/*')

    mode = 'train'
    device = 'cpu'

    model = 'microsoft/resnet-50'
    model_save = 'result/model/'
    result_save = 'result/result.csv'
    label = ['d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7', 'd8', 'd9']
    id_2_label = None
    label_2_id = None

    extractor = None

    epochs = 5
    lr = 1e-4
    batch_size = 64
    test_batch_size = 32


class XunFeiDataset(Dataset):
    def __init__(self, img_path, extractor, transform=None):
        self.img_path = img_path
        self.extractor = extractor
        if transform is not None:
            self.transform = transform
        else:
            self.transform = None

    def __getitem__(self, index):
        path = self.img_path[index].replace('\\', '/')
        img = cv2.imread(path)
        # 若存在数据增强，则增强数据
        if self.transform is not None:
            img = self.transform(image=img)['image']

        result = {
            'pixel_values': self.extractor(img, return_tensors="pt")['pixel_values'].squeeze()
        }
        # 设置标签
        if path.split('/')[-2] in ['d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7', 'd8', 'd9']:
            label = ['d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7', 'd8', 'd9'].index(path.split('/')[-2])
            result['labels'] = torch.tensor([label], dtype=torch.long)

        return result

    def __len__(self):
        return len(self.img_path)


def create_dataloader(config: Config):
    if config.mode == 'train':
        train_dataloader = DataLoader(XunFeiDataset(config.train_path[:-1000], config.extractor), batch_size=config.batch_size, shuffle=True)
        val_dataloader = DataLoader(XunFeiDataset(config.train_path[-1000:], config.extractor), batch_size=config.test_batch_size, shuffle=True)
        return train_dataloader, val_dataloader
    else:
        test_dataloader = DataLoader(XunFeiDataset(config.test_path, config.extractor), batch_size=config.test_batch_size, shuffle=False)
        return test_dataloader

def predict(config, test_dataloader):
    model = ResNetForImageClassification.from_pretrained(config.model_save,
                                                         num_labels=len(config.label),
                                                         id2label=config.id_2_label,
                                                         label2id=config.label_2_id,
                                                         ignore_mismatched_sizes=True)
    model.to(config.device)
    model.eval()

    test_pred = []
    with torch.no_grad():
        for i, mini_batch in enumerate(test_dataloader):
            pixel_values = mini_batch['pixel_values'].to(config.device)
            # compute output
            output = model(pixel_values)
            logits = output.logits

            test_pred += list(logits.argmax(1).cpu().numpy())

    return test_pred

def val(config, model, val_dataloader):
    model.eval()
    preds = []
    labels = []
    val_loss, val_acc = 0.,0.
    # val_iterator = tqdm(val_dataloader, desc='Evaluation', total=len(val_dataloader))
    with torch.no_grad():
        for mini_batch in val_dataloader:
            pixel_values = mini_batch['pixel_values'].to(config.device)
            labels = mini_batch['labels'].view(len(mini_batch['labels'])).to(config.device)
            output = model(pixel_values=pixel_values, labels=labels)

            loss = output.loss
            logits = output.logits

            val_acc += (logits.argmax(1) == labels).sum().item()/len(pixel_values)
            val_loss += loss.item()
    return val_loss/len(val_dataloader), val_acc/len(val_dataloader)

def train(config, train_dataloader, val_dataloader):
    # 初始化模型
    model = ResNetForImageClassification.from_pretrained(config.model,
                                                         num_labels=len(config.label),
                                                         id2label=config.id_2_label,
                                                         label2id=config.label_2_id,
                                                         ignore_mismatched_sizes=True)
    model.to(config.device)

    # 定义损失函数
    opt = torch.optim.AdamW(model.parameters(), config.lr)
    # 梯度衰减
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(opt, len(train_dataloader) * config.epochs)
    for epoch in range(config.epochs):
        model.train()
        for iter_id, mini_batch in enumerate(train_dataloader):

            # batch_cuda = {item: value.to(config.device) for item, value in mini_batch.items()}
            pixel_values = mini_batch['pixel_values'].to(config.device)
            labels = mini_batch['labels'].view(len(mini_batch['labels'])).to(config.device)
            output = model(pixel_values=pixel_values, labels=labels)

            loss = output.loss

            # compute gradient and do SGD step
            opt.zero_grad()
            loss.backward()
            opt.step()
            scheduler.step()

            if iter_id % 20 == 0:
                print('train loss', loss.item())

        val_loss, val_acc = val(config, model, val_dataloader)
        print('-' * 50)
        print('epoch: {}, val_loss: {}, val_acc: {}'.format(epoch, val_loss, val_acc))
        print('-' * 50)

    return model




if __name__ == '__main__':
    config = Config()
    config.device = 'cuda' if torch.cuda.is_available() else 'cpu'

    config.id_2_label = {i:config.label[i] for i in range(len(config.label))}
    config.label_2_id = {v:k for k,v in config.id_2_label.items()}

    config.mode = 'predict'
    if config.mode == 'train':
        feature_extractor = AutoFeatureExtractor.from_pretrained(config.model)
        config.extractor = feature_extractor
        # 读取数据
        train_dataloader, val_dataloader = create_dataloader(config)
        # 训练
        model = train(config, train_dataloader, val_dataloader)
        # 保存模型
        model.save_pretrained(config.model_save)
    else:
        feature_extractor = AutoFeatureExtractor.from_pretrained(config.model)
        config.extractor = feature_extractor
        # 读取数据
        test_dataloader = create_dataloader(config)
        # 训练
        test_pred = predict(config, test_dataloader)
        print(test_pred)
        result = []
        for i in range(len(config.test_path)):
            result.append({
                'uuid': config.test_path[i].replace('\\','/').split('/')[-1],
                'label': config.id_2_label[test_pred[i]]
            })
        pd.DataFrame(result).to_csv(config.result_save, index=False)



