# -*- coding: utf-8 -*-
# @Create On    : 2020/4/26 10:22
# @Author  : Bao Linfeng
# @Function    : dogs vs cats  train
# @File    : train.py.py
# @Software: PyCharm

from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
import torch.optim as optim
import torch.nn as nn
import torch
import pickle
from PIL import Image
import os
import logging
import datetime
import configparser
import argparse
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import models

os.environ["CUDA_VISIBLE_DEVICES"]='1'
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = models.AlexNet()
# model=nn.DataParallel(model,device_ids=[5,6]) # multi-GPU
model = model.to(DEVICE)
train_loss = []
total_loss = []
total_acc = []

class KaggleLoader(Dataset):
    def __init__(self, root_dir, train=True, test=False, transform=None, split=0.7):
        """
        :param root_dir: 数据根目录
        :param train: True for train, False for validation
        :param transform:
        :param split: radio
        """
        self.root_dir = root_dir
        self.train = train
        self.test = test
        self.transform = transform
        self.images_name = [img for img in os.listdir(self.root_dir)]
        # sort images
        if self.test:
            self.images_name = sorted(self.images_name, key=lambda x: int(x.replace(".jpg", "")))
        else:
            self.images_name = sorted(self.images_name, key=lambda x: int(x.split(".")[1]))
            sum_images = len(self.images_name)
            if self.train:  # 训练数据
                self.images_name = self.images_name[:int(split * sum_images)]
            else:  # 验证集
                self.images_name = self.images_name[int(split * sum_images):]

    def __getitem__(self, item):
        image_name = self.images_name[item]
        label = 1 if "dog" in image_name else 0
        data = Image.open(os.path.join(self.root_dir, image_name))
        data = self.transform(data)
        return data, label

    def __len__(self):
        return len(self.images_name)


def train(train_loader, optimizer, epoch):
    model.train()
    criterion = nn.CrossEntropyLoss()
    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = data.to(DEVICE), target.to(DEVICE)
        optimizer.zero_grad()
        output = model(data)
        loss = criterion(output, target)
        loss.backward()
        optimizer.step()
        if (batch_idx + 1) % 30 == 0:
            logging.info('Train Epoch: {} [{}/{} ({:.0f} %)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                       100. * batch_idx / len(train_loader), loss.item()))
    train_loss.append(loss.item())




def test(test_loader):
    import torch.nn.functional as F
    model.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(DEVICE), target.to(DEVICE)
            output = model(data)
            test_loss += F.nll_loss(output, target, reduction='sum')  # 将一批的损失相加
            pred = output.max(1, keepdim=True)[1]  # 找到概率最大的下标
            correct += pred.eq(target.view_as(pred)).sum().item()

    test_loss /= len(test_loader.dataset)
    total_loss.append(test_loss)
    total_acc.append(100.0 * correct / len(test_loader.dataset))
    logging.info("\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.3f}%) \n".format(
        test_loss, correct, len(test_loader.dataset),
        100. * correct / len(test_loader.dataset)
    ))


def launch(args):
    argparse.ArgumentParser()
    pass

if __name__ == '__main__':
    '''读取全局配置'''
    cfg = configparser.ConfigParser()
    cfg.read('parameter.cfg')
    batch_size = int(cfg.get("config","batch_size"))
    learning_rate = float(cfg.get("config","learning_rate"))
    learning_drop = int(cfg.get("config","learning_drop"))
    max_epochs = int(cfg.get("config","max_epochs"))
    num_workers = int(cfg.get("config","num_workers"))
    root_dir = cfg.get("config","root_dir")


    '''日志保存，用于V100训练'''
    time = datetime.datetime.now()
    log_file_name = "output/%02d_%02d__%02d_%02d_%02d.log" % (
        time.month, time.day, time.hour, time.minute, time.second
    )
    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
                        filename=log_file_name)
    train_transform = transforms.Compose([
        transforms.Resize(256),
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),  # 水平翻转
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    dataSet = KaggleLoader(root_dir=root_dir, train=True, transform=train_transform)
    logging.info(str(model))
    logging.info("%s = % s"%("root_dir",cfg.get("config","root_dir")))
    logging.info("%s = % s"%("output",cfg.get("config","output")))
    logging.info("%s = % s"%("batch_size",cfg.get("config","batch_size")))
    logging.info("%s = % s"%("learning_rate",cfg.get("config","learning_rate")))
    logging.info("%s = % s"%("max_epochs",cfg.get("config","max_epochs")))
    logging.info("%s = % s"%("learning_drop",cfg.get("config","learning_drop")))

    trainLoader = DataLoader(dataSet, batch_size=batch_size, shuffle=True,num_workers=num_workers)
    optimizer = optim.Adam(params=model.parameters(), lr=learning_rate)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.8)
    test_transform = transforms.Compose([
        transforms.Resize(224),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    validateDataSet = KaggleLoader(root_dir="data/train", train=False, transform=test_transform)
    validateLoader = DataLoader(validateDataSet, batch_size=batch_size, shuffle=True,num_workers=num_workers)
    print(len(validateDataSet))
    for epoch in range(1, max_epochs + 1):
        train(trainLoader, optimizer, epoch)
        scheduler.step()
        test(validateLoader)
    # 保存模型
    # pickle.dump(model,open(log_file_name.replace(".log","_model.pkl"),'wb'))
    torch.save(model,open(log_file_name.replace(".log","_model.pkl"),'wb'))
    plt.figure()
    plt.title("batchsize = %d lr = %0.4f minTestLoss=%f" % (batch_size, learning_rate, min(total_loss)))
    plt.plot(total_loss)
    plt.plot(train_loss)
    plt.legend(["test_loss", "train_loss"])
    plt.xlabel("epoch")
    plt.ylabel("loss")
    time = datetime.datetime.now()
    plt.savefig(fname=log_file_name.replace(".log",".loss.png"))
    plt.figure()
    plt.title("batchsize = %d lr = %0.4f maxTestAcc= %0.3f%%" % (batch_size, learning_rate, max(total_acc)))
    plt.plot(total_acc)
    plt.xlabel("epoch")
    plt.ylabel("acc")
    plt.savefig(fname=log_file_name.replace(".log",".acc.png"))
    txtFileName = log_file_name.replace(".log", '.result.txt')
    with open(txtFileName, 'w') as f:
        for data in total_loss:
            f.write("%0.5f " % data)
        f.write('\n')
        for data in train_loss:
            f.write("%0.5f " % data)
        for data in total_acc:
            f.write("%0.5f " % data)
        f.write('\n')
    logging.info("finished")
