import torch
import torch.nn as nn
import torchvision.models as tvmodel
from torch.autograd import Variable

import os
import cv2
import numpy as np
import matplotlib.pyplot as plt

from sklearn.model_selection import train_test_split
from torch.utils.data import TensorDataset, DataLoader

import random
import seaborn as sbn
from sklearn.metrics import confusion_matrix, precision_score, f1_score, recall_score

random.seed(1)
np.random.seed(1)
torch.random.manual_seed(1)

# DATADIR = 'C:\DL_DATA\dl2_DataSets\catvsdog1000'
# DATADIR = 'C:\DL_DATA\dl2_DataSets\zoo'
BASE_DIR, FILE_NAME = os.path.split(__file__)
rel_path = '../../../../../../large_data/DL1/_many_files/zoo'
DATADIR = BASE_DIR + '/' + rel_path

IMG_W = 224
IMG_H = 224
BATCH_SIZE = 16
EPOCH = 5


# data sets
def readData(path):
    x_image = []
    y_label = []
    for i, j in enumerate(os.listdir(path)):
        image_name = os.path.join(path, j)
        for image in os.listdir(image_name):
            image = os.path.join(image_name, image)
            image = cv2.imread(image)
            image = cv2.resize(image, (IMG_W, IMG_H)) / 255
            x_image.append(image)
            y_label.append(i)
    return np.array(x_image), np.array(y_label)


x_image, y_label = readData(DATADIR)

x_train, x_test, y_train, y_test = train_test_split(x_image, y_label, train_size=0.8)
x_test, x_val, y_test, y_val = train_test_split(x_test, y_test, train_size=0.5)

x_train = Variable(torch.Tensor(x_train))
x_test = Variable(torch.Tensor(x_test))
x_val = Variable(torch.Tensor(x_val))

x_train = torch.transpose(x_train, 3, 1)
x_test = torch.transpose(x_test, 3, 1)
x_val = torch.transpose(x_val, 3, 1)

# data visulazation
plt.figure(figsize=(9, 9))
for i in range(9):
    plt.subplot(3, 3, i + 1)
    r = random.randint(0, len(x_test) - 1)
    img = x_test[r]
    plt.imshow(cv2.cvtColor(torch.transpose(img, 0, 2).numpy(), cv2.COLOR_BGR2RGB))
    label = y_test[r]
    plt.title('cat' if label == 0 else 'dog')
    plt.axis('off')
print('Please check and close the draing window to continue ...')
plt.show()

y_train = Variable(torch.LongTensor(y_train))
y_test = Variable(torch.LongTensor(y_test))
y_val = Variable(torch.LongTensor(y_val))

ds_train = TensorDataset(x_train, y_train)
ds_val = TensorDataset(x_val, y_val)
ds_test = TensorDataset(x_test, y_test)

dl_train = DataLoader(ds_train, BATCH_SIZE, shuffle=True)
dl_val = DataLoader(ds_val, BATCH_SIZE, shuffle=True)
dl_test = DataLoader(ds_test, BATCH_SIZE, shuffle=True)


class MyNet(nn.Module):
    def __init__(self):
        super(MyNet, self).__init__()
        resnet = tvmodel.resnet34(pretrained=True)
        for layer in resnet.parameters():
            layer.requires_grad = False
        resnet_out_channel = resnet.fc.in_features
        print(resnet)
        self.resnet = nn.Sequential(*list(resnet.children())[:-1])
        self.fc = nn.Linear(resnet_out_channel, 2)

    def forward(self, x):
        x = self.resnet(x)
        x = torch.squeeze(x, dim=3)
        x = torch.squeeze(x, dim=2)
        x = self.fc(x)
        return x


def accuracy(y_pred, y_true):
    y_pred = y_pred.argmax(dim=1)
    y_true = y_true
    acc = (y_pred == y_true).float().mean()
    return acc


def process_data(dl, is_train, label, device):
    avg_loss = 0.
    avg_acc = 0.
    for i, (bx, by) in enumerate(dl):
        b_len = len(bx)
        bx = bx.float().to(device)
        by = by.long().to(device)
        if is_train:
            model.train(True)
            optim.zero_grad()
            pred = model(bx)
            loss = criterion(pred, by)
            loss.backward()
            optim.step()
            acc = accuracy(pred, by)
            model.train(False)
        else:
            model.train(False)
            pred = model(bx)
            loss = criterion(pred, by)
            acc = accuracy(pred, by)
        loss = loss.detach().cpu().item()
        acc = acc.detach().cpu().item()
        avg_loss += loss
        avg_acc += acc
        print(f'{label}: epoch#{epoch + 1}: #{i + 1} loss = {loss}, acc = {acc}')
    avg_loss /= i + 1
    avg_acc /= i + 1
    return avg_loss, avg_acc


if '__main__' == __name__:

    device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
    print('device:', device)
    device = torch.device(device)

    model = MyNet().to(device)
    print(model)
    criterion = torch.nn.CrossEntropyLoss()
    optim = torch.optim.SGD(model.parameters(), lr=1e-4, momentum=0.9, weight_decay=1e-4)

    loss_his = []
    acc_his = []
    loss_his_val = []
    acc_his_val = []
    for epoch in range(EPOCH):
        avg_loss, avg_acc = process_data(dl_train, True, 'train', device)
        avg_loss_val, avg_acc_val = process_data(dl_val, False, 'val', device)
        loss_his.append(avg_loss)
        loss_his_val.append(avg_loss_val)
        acc_his.append(avg_acc)
        acc_his_val.append(avg_acc_val)
        print(
            f'epoch#{epoch + 1}: loss = {avg_loss} acc = {avg_acc}, loss_val = {avg_loss_val}, acc_val = {avg_acc_val}')
    avg_loss_test, avg_acc_test = process_data(dl_test, False, 'test', device)
    print(f'loss_test = {avg_loss_test}, acc_test = {avg_acc_test}')

    spr = 2
    spc = 2
    spn = 0
    plt.figure(figsize=[6, 6])

    spn += 1
    plt.subplot(spr, spc, spn)
    plt.plot(loss_his, 'ro-', label='train')
    plt.plot(loss_his_val, 'go--', label='validation')
    plt.legend()
    plt.title('loss')

    spn += 1
    plt.subplot(spr, spc, spn)
    plt.plot(acc_his, 'ro-', label='train')
    plt.plot(acc_his_val, 'go--', label='validation')
    plt.legend()
    plt.title('acc')

    # pred_test = model(x_test)
    pred_test = None
    y_test = None
    model.train(False)
    for bx, by in dl_test:
        bx = bx.float().to(device)
        pred = model(bx).detach().cpu().numpy()
        by = by.detach().cpu().numpy()
        if pred_test is None:
            pred_test = pred
            y_test = by
        else:
            pred_test = np.concatenate([pred_test, pred], axis=0)
            y_test = np.concatenate([y_test, by], axis=0)
    pred_test = np.argmax(pred_test, 1)

    # acc_test = (torch.argmax(pred_test,1)==y_test).float().mean()
    acc_test = avg_acc_test

    spn += 1
    plt.subplot(spr, spc, spn)
    plt.pie([acc_test, 1 - acc_test], explode=[0, 0.02], labels=['True predicted', 'Flase predicted'],
            autopct='%1.1f%%')

    spn += 1
    plt.subplot(spr, spc, spn)
    test_matrix = confusion_matrix(y_test, pred_test)
    print('test matrix shape:', np.shape(test_matrix))
    sbn.heatmap(test_matrix, annot=True)

    precision = precision_score(y_test, pred_test)
    print(f'查准率:{precision:.3f}')
    # 召回率-查全率
    recall = recall_score(y_test, pred_test)
    print(f'查全率:{recall:.3f}')
    # f1精度
    f1 = f1_score(y_test, pred_test)
    print(f'f1分值:{f1:.3f}')

    plt.show()
