from array import array
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import numpy as np
import model
import csv
import pickle
from PIL import Image
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader
from sklearn.metrics import f1_score
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
from losses import BalancedSoftmax
from losses import CB_loss
from losses import FocalLoss

import warnings
warnings.filterwarnings("ignore")


if not torch.cuda.is_available():
    from torchsummary import summary

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

shape = (44, 44)


class DataSetFactory:

    def __init__(self):
        images = []
        emotions = []
        private_images = []
        private_emotions = []
        public_images = []
        public_emotions = []

        with open('../dataset/fer2013B.csv', 'r') as csvin:
            data = csv.reader(csvin)
            next(data)
            for row in data:
                face = [int(pixel) for pixel in row[1].split()]
                face = np.asarray(face).reshape(48, 48)
                face = face.astype('uint8')

                if row[-1] == 'Training':
                    emotions.append(int(row[0]))
                    images.append(Image.fromarray(face))
                elif row[-1] == "Valid":
                    private_emotions.append(int(row[0]))
                    private_images.append(Image.fromarray(face))
                elif row[-1] == "Test":
                    public_emotions.append(int(row[0]))
                    public_images.append(Image.fromarray(face))

        print('Training size %d : PrivateTest size %d : PublicTest size %d' % (
            len(images), len(private_images), len(public_images)))
        train_transform = transforms.Compose([
            transforms.RandomCrop(shape[0]),
            transforms.RandomHorizontalFlip(),
            ToTensor(),
        ])
        val_transform = transforms.Compose([
            transforms.CenterCrop(shape[0]),
            ToTensor(),
        ])

        self.training = DataSet(transform=train_transform, images=images, emotions=emotions)
        self.private = DataSet(transform=val_transform, images=private_images, emotions=private_emotions)
        self.public = DataSet(transform=val_transform, images=public_images, emotions=public_emotions)


class DataSet(torch.utils.data.Dataset):

    def __init__(self, transform=None, images=None, emotions=None):
        self.transform = transform
        self.images = images
        self.emotions = emotions

    def __getitem__(self, index):
        image = self.images[index]
        emotion = self.emotions[index]
        if self.transform is not None:
            image = self.transform(image)
        return image, emotion

    def __len__(self):
        return len(self.images)


def main():
    # variables  -------------
    batch_size = 128
    lr = 0.01
    epochs = 3
    learning_rate_decay_start = 80
    learning_rate_decay_every = 5
    learning_rate_decay_rate = 0.9
    # ------------------------

    classes = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
    cls_num_list = [3995, 436, 4097, 7215, 4830, 3171, 4965]
    network = model.Model(num_classes=len(classes)).to(device)
    network2 = model.Model(num_classes=len(classes)).to(device)
    if not torch.cuda.is_available():
        summary(network, (1, shape[0], shape[1]))

    beta = 0.9999
    effective_num = 1.0 - np.power(beta, cls_num_list)
    
    print(effective_num)
    
    per_cls_weights = (1.0 - beta) / np.array(effective_num)
    per_cls_weights = per_cls_weights / np.sum(per_cls_weights) * len(cls_num_list)
    per_cls_weights = torch.FloatTensor(per_cls_weights).to(device)
    
    #optimizer = torch.optim.Adam(network.parameters(), lr=lr, betas=(0.9, 0.999), eps=1e-8,  weight_decay=5e-3, amsgrad=False)
    #criterion = BalancedSoftmax(cls_num_list)
    criterion = nn.CrossEntropyLoss()
    #focal = FocalLoss(weight=per_cls_weights, gamma=2).to(device)
    factory = DataSetFactory()

    training_loader = DataLoader(factory.training, batch_size=batch_size, shuffle=True, num_workers=0)
    validation_loader = {
        'private': DataLoader(factory.private, batch_size=batch_size, shuffle=True, num_workers=0),
        'public': DataLoader(factory.public, batch_size=batch_size, shuffle=True, num_workers=0)
    }

    min_validation_loss = {
        'private': 10000,
        'public': 10000,
    }
    
    optimizer = torch.optim.SGD(network.parameters(), lr=lr, momentum=0.9, weight_decay=5e-3)
    acc_list_SGD = []
    loss_list_SGD = []

    for epoch in range(epochs):
        network.train()
        total = 0
        correct = 0
        total_train_loss = 0
        if epoch > learning_rate_decay_start and learning_rate_decay_start >= 0:

            #
            frac = (epoch - learning_rate_decay_start) // learning_rate_decay_every
            decay_factor = learning_rate_decay_rate ** frac
            current_lr = lr * decay_factor
            for group in optimizer.param_groups:
                group['lr'] = current_lr
        else:
            current_lr = lr
        print('learning_rate: %s' % str(current_lr))
        
        #创建列表来存储预测结果和标签
        y_train_list = []
        predicted_list = []
        y_train_list_use = []
        predicted_list_use =[]
        for i, (x_train, y_train) in enumerate(training_loader):
            optimizer.zero_grad()
            x_train = x_train.to(device)
            y_train = y_train.to(device)
            y_predicted = network(x_train)
            #loss = criterion(y_predicted, y_train)
            #loss = CB_loss(labels=y_train, logits=y_predicted,
            #                samples_per_cls=cls_num_list, no_of_classes=7,
            #                loss_type="softmax", beta=0.9999, gamma=1)
            loss = criterion(y_predicted, y_train)
            loss.backward()
            optimizer.step()
            _, predicted = torch.max(y_predicted.data, 1)
            total_train_loss += loss.data
            total += y_train.size(0)
            correct += predicted.eq(y_train.data).sum()

            y_train_list = np.array(y_train.cpu().detach()).tolist()
            predicted_list = np.array(predicted.cpu().detach()).tolist()
            #print(y_train_list)
            #print(predicted_list)
            y_train_list_use.extend(y_train_list)
            predicted_list_use.extend(predicted_list)

        accuracy = 100. * float(correct) / total
        f1score = f1_score(y_train_list_use, predicted_list_use, average='macro')
        
        print('Epoch [%d/%d] Training Loss: %.4f, Accuracy: %.4f, F1-Score: %.4f' % (
            epoch + 1, epochs, total_train_loss / (i + 1), accuracy, f1score))
        acc_list_SGD.append(accuracy)
        loss_list_SGD.append(total_train_loss.cpu().detach() / (i + 1))
        print(classification_report(predicted_list_use, y_train_list_use))
        
        network.eval()
        with torch.no_grad():
            for name in ['private', 'public']:
                total = 0
                correct = 0
                total_validation_loss = 0
                
                y_val_list = []
                predicted_list =[]
                y_val_list_use = []
                predicted_list_use = []
                
                for j, (x_val, y_val) in enumerate(validation_loader[name]):
                    x_val = x_val.to(device)
                    y_val = y_val.to(device)
                    y_val_predicted = network(x_val)
                    #val_loss = CB_loss(labels=y_train, logits=y_predicted,
                    #        samples_per_cls=cls_num_list, no_of_classes=7,
                    #        loss_type="softmax", beta=0.9999, gamma=1)
                    val_loss = criterion(y_predicted, y_train)
                    _, predicted = torch.max(y_val_predicted.data, 1)
                    total_validation_loss += val_loss.data
                    total += y_val.size(0)
                    correct += predicted.eq(y_val.data).sum()
                    
                    y_val_list = np.array(y_val.cpu().detach()).tolist()
                    predicted_list = np.array(predicted.cpu().detach()).tolist()
                    y_val_list_use.extend(y_val_list)
                    predicted_list_use.extend(predicted_list)

                accuracy = 100. * float(correct) / total
                f1score2 = f1_score(y_val_list_use, predicted_list_use, average='macro')
                
                if total_validation_loss <= min_validation_loss[name]:
                    if epoch >= 10:
                        print('saving new model')
                        #state = {'net': network.state_dict()}
                        with open('../model/%s_model_%d_%d.pkl' % (name, epoch + 1, accuracy), 'wb') as f:
                            pickle.dump(network, f)
                    min_validation_loss[name] = total_validation_loss

                print('Epoch [%d/%d] %s validation Loss: %.4f, Accuracy: %.4f, F1-Score: %.4f' % (
                    epoch + 1, epochs, name, total_validation_loss / (j + 1), accuracy, f1score2))
                
                print(classification_report(predicted_list_use, y_val_list_use))
    
    optimizer2 = torch.optim.Adam(network2.parameters(), betas=(0.9, 0.999), eps=1e-8,  weight_decay=5e-3, amsgrad=False)
    acc_list_Adam = []
    loss_list_Adam = []

    for epoch in range(epochs):
        network2.train()
        total = 0
        correct = 0
        total_train_loss = 0
        if epoch > learning_rate_decay_start and learning_rate_decay_start >= 0:

            #
            frac = (epoch - learning_rate_decay_start) // learning_rate_decay_every
            decay_factor = learning_rate_decay_rate ** frac
            current_lr = lr * decay_factor
            for group in optimizer2.param_groups:
                group['lr'] = current_lr
        else:
            current_lr = lr
        print('learning_rate: %s' % str(current_lr))
        
        #创建列表来存储预测结果和标签
        y_train_list = []
        predicted_list = []
        y_train_list_use = []
        predicted_list_use =[]
        for i, (x_train, y_train) in enumerate(training_loader):
            optimizer2.zero_grad()
            x_train = x_train.to(device)
            y_train = y_train.to(device)
            y_predicted = network2(x_train)
            #loss = criterion(y_predicted, y_train)
            #loss = CB_loss(labels=y_train, logits=y_predicted,
            #                samples_per_cls=cls_num_list, no_of_classes=7,
            #                loss_type="softmax", beta=0.9999, gamma=1)
            loss = criterion(y_predicted, y_train)
            loss.backward()
            optimizer2.step()
            _, predicted = torch.max(y_predicted.data, 1)
            total_train_loss += loss.data
            total += y_train.size(0)
            correct += predicted.eq(y_train.data).sum()

            y_train_list = np.array(y_train.cpu().detach()).tolist()
            predicted_list = np.array(predicted.cpu().detach()).tolist()
            #print(y_train_list)
            #print(predicted_list)
            y_train_list_use.extend(y_train_list)
            predicted_list_use.extend(predicted_list)

        accuracy = 100. * float(correct) / total
        f1score = f1_score(y_train_list_use, predicted_list_use, average='macro')
        
        print('Epoch [%d/%d] Training Loss: %.4f, Accuracy: %.4f, F1-Score: %.4f' % (
            epoch + 1, epochs, total_train_loss / (i + 1), accuracy, f1score))
        acc_list_Adam.append(accuracy)
        loss_list_Adam.append(total_train_loss.cpu().detach() / (i + 1))
        print(classification_report(predicted_list_use, y_train_list_use))
        
        network2.eval()
        with torch.no_grad():
            for name in ['private', 'public']:
                total = 0
                correct = 0
                total_validation_loss = 0
                
                y_val_list = []
                predicted_list =[]
                y_val_list_use = []
                predicted_list_use = []
                
                for j, (x_val, y_val) in enumerate(validation_loader[name]):
                    x_val = x_val.to(device)
                    y_val = y_val.to(device)
                    y_val_predicted = network2(x_val)
                    #val_loss = CB_loss(labels=y_train, logits=y_predicted,
                    #        samples_per_cls=cls_num_list, no_of_classes=7,
                    #        loss_type="softmax", beta=0.9999, gamma=1)
                    val_loss = criterion(y_predicted, y_train)
                    _, predicted = torch.max(y_val_predicted.data, 1)
                    total_validation_loss += val_loss.data
                    total += y_val.size(0)
                    correct += predicted.eq(y_val.data).sum()
                    
                    y_val_list = np.array(y_val.cpu().detach()).tolist()
                    predicted_list = np.array(predicted.cpu().detach()).tolist()
                    y_val_list_use.extend(y_val_list)
                    predicted_list_use.extend(predicted_list)

                accuracy = 100. * float(correct) / total
                f1score2 = f1_score(y_val_list_use, predicted_list_use, average='macro')
                
                if total_validation_loss <= min_validation_loss[name]:
                    if epoch >= 10:
                        print('saving new model')
                        #state = {'net': network.state_dict()}
                        with open('../model/%s_model_%d_%d.pkl' % (name, epoch + 1, accuracy), 'wb') as f:
                            pickle.dump(network2, f)
                    min_validation_loss[name] = total_validation_loss

                print('Epoch [%d/%d] %s validation Loss: %.4f, Accuracy: %.4f, F1-Score: %.4f' % (
                    epoch + 1, epochs, name, total_validation_loss / (j + 1), accuracy, f1score2))
                
                print(classification_report(predicted_list_use, y_val_list_use))   
    

    
    x = range(0, epochs)
    
    acc_dict={}
    loss_dict={}
    
    acc_dict['SGD'] = acc_list_SGD
    acc_dict['Adam'] = acc_list_Adam
    
    loss_dict['SGD'] = loss_list_SGD
    loss_dict['Adam'] = loss_list_Adam
    
    colors = {"SGD": "r", "Adam": "g"}   
    
    for key in acc_dict.keys():
        plt.plot(x, acc_dict[key], color = colors[key], label=key)
    plt.xlabel("epoch")
    plt.ylabel("accuracy")
    plt.title("train accuracy curve")
    plt.legend()
    plt.show()
    
    for key in loss_dict.keys():
        plt.plot(x, loss_dict[key], color = colors[key], label=key)
    plt.xlabel("epoch")
    plt.ylabel("loss")
    plt.title("train loss curve")
    plt.legend()
    plt.show()


if __name__ == "__main__":
    main()
