# -*- coding: utf-8 -*-
"""
Created on Thu Jun 25 12:45:02 2020

@author: wangrong
"""

import torchvision
import torch
import torch.nn.functional as F
from torchvision import transforms

import os
# noinspection PyUnresolvedReferences
import shutil
# noinspection PyUnresolvedReferences
import random
import numpy as np
from pathlib import Path
import matplotlib.pyplot as plt

# noinspection PyUnresolvedReferences
from sklearn import metrics
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix, precision_score

from models import *

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# net = RegNetX_400MF()
# net = ResNeXt29_2x64d()
# net = ResNet50()
# net = ReSeNet50()
net = SENet50()
# net = ResNet18()
# net = SSENet101()
class Classification(object):
    def __init__(self, model_name=None, ctx_id=0):
        self.model_name = model_name
        self.device = torch.device("cuda:" + str(ctx_id)) if ctx_id > -1 else torch.device("cpu")
        # device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
        self.net = self.load_model()
    
    def load_model(self):
        # net = SENet(BasicBlock, [2, 2, 2, 2])           
        # net = RegNetX_200MF(cfg)
        # net = VGG19()
        # net = ResNeXt(num_blocks=[3,3,3], cardinality=2, bottleneck_width=64)
        # net=ResNet(Bottleneck, [3, 4, 6, 3])
        if self.model_name is not None:
            net.load_state_dict(torch.load(self.model_name, map_location=None if torch.cuda.is_available() else 'cpu'))
        if torch.cuda.is_available():
            net.to(self.device)
        net.eval()
        
        return net
    
    def train(self, dataset=None, batch_size=256, lr=0.05, num_epochs=5):
        train_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True)
        optimizer = torch.optim.SGD(self.net.parameters(), lr=lr)
        loss_list = []
        train_acc = []
        test_acc = []
        for epoch in range(0, num_epochs):
            self.net.train()

            train_loss = 0
            correct_pred, num_examples = 0, 0
            # for batch_idx, (features, targets) in enumerate(train_loader):
            #     features = features.to(device)
            #     targets = targets.to(device)
            #     # logits, probas = net(features)
            #     logits, probas = self.net.forward(features)
                
            #     loss = F.cross_entropy(logits, targets)
            #     train_loss += loss
            #     optimizer.zero_grad()
            #     loss.backward()
            #     optimizer.step()
            #     _, predicted_labels = torch.max(probas, 1)
            #     num_examples += targets.size(0)
            #     correct_pred += (predicted_labels == targets).sum()
                
                # print('Epoch: %03d/%03d | Batch %04d/%04d | Loss: %.4f'
                #       % (epoch + 1, num_epochs, batch_idx, len(train_loader), loss))
                # fw = open('./train_resnet50.txt','a')                                                             #最终的txt保存在这个路径下，下面的都改
                # fw.write('Epoch: %03d/%03d | Batch %04d/%04d | Loss: %.4f'
                #       % (epoch + 1, num_epochs, batch_idx, len(train_loader), loss) + '\n')
                # fw.close()
            # print('............1,,,,,,,,')
            # tr_acc=correct_pred.float() / num_examples * 100
            # tr_acc = self.compute_accuracy(train_loader)
            # print('............2,,,,,,,,')
            with torch.no_grad():
                te_acc = self.compute_accuracy(test_loader)
            # print('Epoch: %03d/%03d training accuracy: %.2f%% testing accuracy: %.2f%%' % (
            #     epoch + 1, num_epochs, tr_acc, te_acc))
            # fn = open('./test_resnet50.txt','a')                                                             #最终的txt保存在这个路径下，下面的都改
            # fn.write('Epoch: %03d/%03d training accuracy: %.2f%% testing accuracy: %.2f%%' % (
            #     epoch + 1, num_epochs, tr_acc, te_acc) + '\n')
            # fn.close()
            loss_list.append(train_loss / len(train_loader))
            # train_acc.append(tr_acc)
            test_acc.append(te_acc)
            


    def calculate_result(self,actual, pred):
        m_accuracy = accuracy_score(np.array(actual), np.array(pred))
        m_precision = metrics.precision_score(np.array(actual), np.array(pred), average='macro')
        m_recall = metrics.recall_score(np.array(actual), np.array(pred), average='macro')
        m_f1score = metrics.f1_score(np.array(actual), np.array(pred), average='macro')
        conf_matrix = confusion_matrix(actual, pred)
        #     fpr, tpr, thresholds = roc_curve(actual, pred, pos_label=1)
        #     area = auc(fpr, tpr)

        print('accuracy:{0:.5f}'.format(m_accuracy), 'precision:{0:.5f}'.format(m_precision),
              'recall:{0:0.5f}'.format(m_recall), 'F1-score:{0:.5f}'.format(m_f1score), "\n", conf_matrix)
        # fw = open('./test_senet18.txt','a')  
        # for line in conf_matrix:
        #       for a in line:
        #          fw.write(str(a))
        #          fw.write('\t')
        #       fw.write('\n')                                                           #最终的txt保存在这个路径下，下面的都改
        
        # fw.close()
        # fig, ax = plt.subplots(figsize=(2.5, 2.5))
        # ax.matshow(conf_matrix, cmap=plt.cm.Blues, alpha=0.3)
        # for i in range(conf_matrix.shape[0]):
        #     for j in range(conf_matrix.shape[1]):
        #         ax.text(x=j, y=i,  # ax.text()在轴上添加文本
        #                 s=conf_matrix[i, j],
        #                 va='center',
        #                 ha='center')
        # plt.xlabel('Predicted label')
        # plt.ylabel('True label')
        # plt.tight_layout()
        # plt.show()

    def compute_accuracy(self, data_loader):
        prediction = []
        gt = []
        correct_pred, num_examples = 0, 0
        for i, (features, targets) in enumerate(data_loader):
            features = features.to(device)
            targets = targets.to(device)
            gt += targets.cpu().numpy().tolist()
            logits, probas = self.net.forward(features)
            # logits, probas = net(features)
            _, predicted_labels = torch.max(probas, 1)
            prediction += predicted_labels.cpu().numpy().tolist()
            num_examples += targets.size(0)
            correct_pred += (predicted_labels == targets).sum()
        self.calculate_result(gt,prediction)
        return correct_pred.float() / num_examples * 100
    
    def predict(self, image, transform):
        image_tensor = transform(image).float()
        image_tensor = image_tensor.unsqueeze_(0)
        image_tensor = image_tensor.to(device)
        _, output = self.net(image_tensor)
        _, index = torch.max(output.data, 1)
        return index

if __name__ == '__main__':

    src_dir = 'G:/dataset/PlantVillage-Dataset-train'
    to_dir = 'G:/dataset/PlantVillage-Dataset-test'

    train_dataset = torchvision.datasets.ImageFolder(root=str(src_dir),
                                                     transform=transforms.Compose([transforms.Resize((224, 224)),
                                                                                   transforms.ToTensor()]))
    test_dataset = torchvision.datasets.ImageFolder(root=str(to_dir),
                                                    transform=transforms.Compose([transforms.Resize((224,224)),
                                                                                  transforms.ToTensor()]))
    
    test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=256, shuffle=True)

    print(train_dataset.classes)     # 根据分的文件夹的名字来确定的类别eg.['cat', 'dog']
    print(test_dataset.classes)
    # print(train_dataset.class_to_idx)
    # #按顺序为这些类别定义索引为0,1...eg.{'cat': 0, 'dog': 1}
    # print(train_dataset.imgs)    #返回从所有文件夹中得到的图片的路径以及其类别eg.[('./data/dogcat_2/cat/cat.12484.jpg', 0), ('./data/dogcat_2/cat/cat.12485.jpg', 0)
    # print(test_dataset[0][1])#得到的是类别0，即cat

    
    cls = Classification(model_name='H:/code/classes/class_traing1/SENet50/model100.pth')
    cls.train(train_dataset)
