# -*- coding: utf-8 -*-
"""
Created on Thu Jun 25 12:45:02 2020

@author: wangrong
"""

import torchvision
import torch
import torch.nn.functional as F
from torchvision import transforms
# from wastesorting.seresnet import SENet
# from wastesorting.seresnet import BasicBlock
import os
import shutil
import random
import numpy as np
import matplotlib.pyplot as plt
# from models.resnext import ResNeXt
# from models.resnext import Block
# from models.resnet1 import BasicBlock
import models.regnet
from models import *
from models import lenet
from models.regnet import RegNetX_400MF
# from vgg import VGG19

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# net = RegNetX_400MF()
# net = ResNeXt29_2x64d()
# net = DPN26()
# net = SENet18()

# net = ResNet101()
#net = models.lenet.LeNet()
# net = ResNet50()

# net = models.regnet.RegNetX_200MF()
# net = VGG('VGG16')
# net = VGG('VGG16')
class Classification(object):
    def __init__(self, model_name=None, train_net = None,ctx_id=0):
        self.train_net = train_net
        self.model_name = model_name
        self.device = torch.device("cuda:" + str(ctx_id)) if ctx_id > -1 else torch.device("cpu")
        # device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
        self.net = self.load_model()
    
    def load_model(self):
        # net = SENet(BasicBlock, [2, 2, 2, 2])           
        # net = RegNetX_200MF(cfg)
        # net = VGG19()
        # net = ResNeXt(num_blocks=[3,3,3], cardinality=2, bottleneck_width=64)
        # net=ResNet(Bottleneck, [3, 4, 6, 3])
        if self.model_name is not None:
            self.train_net.load_state_dict(torch.load(self.model_name, map_location=None if torch.cuda.is_available() else 'cpu'))
        if torch.cuda.is_available():
            self.train_net.to(self.device)
        self.train_net.eval()
        return self.train_net
    
    def train(self, dataset=None, batch_size=64, lr=0.05, num_epochs=1001):
        train_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True)
        optimizer = torch.optim.SGD(self.net.parameters(), lr=lr)
        loss_list = []
        train_acc = []
        test_acc = []
        for epoch in range(0, num_epochs):
            self.net.train()
            
            train_loss = 0
            correct_pred, num_examples = 0, 0
            for batch_idx, (features, targets) in enumerate(train_loader):
                features = features.to(device)
                targets = targets.to(device)
                # logits, probas = net(features)
                logits, probas = self.net.forward(features)
                
                loss = F.cross_entropy(logits, targets)
                train_loss += loss
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                _, predicted_labels = torch.max(probas, 1)
                num_examples += targets.size(0)
                correct_pred += (predicted_labels == targets).sum()
                
                print('Epoch: %03d/%03d | Batch %04d/%04d | Loss: %.4f'
                      % (epoch + 1, num_epochs, batch_idx, len(train_loader), loss))
                fw = open('./train_resnet50.txt','a')                                                             #最终的txt保存在这个路径下，下面的都改
                fw.write('Epoch: %03d/%03d | Batch %04d/%04d | Loss: %.4f'
                      % (epoch + 1, num_epochs, batch_idx, len(train_loader), loss) + '\n')
                fw.close()
            print('............1,,,,,,,,')
            tr_acc=correct_pred.float() / num_examples * 100
            # tr_acc = self.compute_accuracy(train_loader)
            print('............2,,,,,,,,')
            with torch.no_grad():
                te_acc = self.compute_accuracy(test_loader)
            print('Epoch: %03d/%03d training accuracy: %.2f%% testing accuracy: %.2f%%' % (
                epoch + 1, num_epochs, tr_acc, te_acc))
            fn = open('./test_resnet50.txt','a')                                                             #最终的txt保存在这个路径下，下面的都改
            fn.write('Epoch: %03d/%03d training accuracy: %.2f%% testing accuracy: %.2f%%' % (
                epoch + 1, num_epochs, tr_acc, te_acc) + '\n')
            fn.close()
            loss_list.append(train_loss.cpu().detach().numpy()/ len(train_loader))
            train_acc.append(tr_acc.cpu().detach().numpy())
            test_acc.append(te_acc.cpu().detach().numpy())
            
            if epoch % 100 == 0:
                torch.save(self.net.state_dict(), 'model'+ str(epoch) + '.pth')
        
        l = len(loss_list)
        x = np.arange(0, l)
        plt.figure(figsize=(12, 6))
        ax1 = plt.subplot(121)

        plt.plot(x, loss_list)
        plt.xlabel('epoch')
        plt.ylabel('loss')
        plt.title('loss function curve')
        ax2 = plt.subplot(122)
        plt.plot(x, train_acc, color='r')
        plt.plot(x, test_acc, color='g')
        plt.xlabel('epoch')
        plt.ylabel('accuracy')
        plt.legend(['train_acc', 'test_acc'], loc=4)
        plt.title('accuracy curve')
        plt.savefig("loss_acc.jpg")
        plt.show()
    
    def compute_accuracy(self, data_loader):
        correct_pred, num_examples = 0, 0
        for i, (features, targets) in enumerate(data_loader):
            features = features.to(device)
            targets = targets.to(device)
            
            logits, probas = self.net.forward(features)
            # logits, probas = net(features)
            _, predicted_labels = torch.max(probas, 1)
            num_examples += targets.size(0)
            correct_pred += (predicted_labels == targets).sum()
        return correct_pred.float() / num_examples * 100
    
    def predict(self, image, transform):
        image_tensor = transform(image).float()
        image_tensor = image_tensor.unsqueeze_(0)
        image_tensor = image_tensor.to(device)
        _, output = self.net(image_tensor)
        _, index = torch.max(output.data, 1)
        return index


if __name__ == '__main__':
    
    # 准备数据
    def train_test_split(img_src_dir, img_to_dir, rate=0.3):
        for flod in os.listdir(img_src_dir):
            path_dir = os.listdir(os.path.join(img_src_dir,flod))  # 取图片的原始路径
            file_number = len(path_dir)
            pick_number = int(file_number * rate)  # 按照rate比例从文件夹中取一定数量图片
            sample = random.sample(path_dir, pick_number)  # 随机选取picknumber数量的样本图片
            for name in sample:
                shutil.move(os.path.join(os.path.join(img_src_dir,flod), name), os.path.join(os.path.join(img_to_dir,flod), name))
        return
    
    
    src_dir = 'F:/pycode/traindata'
    to_dir = 'F:/pycode/testdata'
    # train_test_split(src_dir,to_dir)
    # if os.path.isdir('G:/dataset/plant_disease_test') == False:
    #     # 添加test文件夹
    #     os.mkdir('G:/dataset/' + 'PlantVillage-Dataset-master-zengqiang-test')
    #     for dir in os.listdir(src_dir):
    #         os.mkdir('G:/dataset/plant_disease_test/' + dir)
    
    # num = len(os.listdir(os.path.join(to_dir, 'Apple___Apple_scab')))
    #
    # if num == 0:
    #     # 查看图片数量 并分开训练集测试集
    #     for file in os.listdir(src_dir):
    #         file_dir = os.path.join(src_dir, file)
    #         image = os.listdir(file_dir)
    #         print(file, '图片总量', len(image))
    #
    #         train_test_split(os.path.join(src_dir, file), os.path.join(to_dir, file))
    
    train_dataset = torchvision.datasets.ImageFolder(root=src_dir,
                                                     transform=transforms.Compose([transforms.Resize((32, 32)),
                                                                                   transforms.ToTensor()]))
    test_dataset = torchvision.datasets.ImageFolder(root=to_dir,
                                                    transform=transforms.Compose([transforms.Resize((32,32)),
                                                                                  transforms.ToTensor()]))

    test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=64, shuffle=True)

    print(train_dataset.classes)
    print(test_dataset.classes)
    # print(train_dataset.class_to_idx)
    # print(train_dataset.imgs)

    cls = Classification(train_net = VGG('VGG16'))
    cls.train(train_dataset)

