# -*- coding: utf-8  -*-
# @Time : 2021/10/25  16:15
# @Author : zhangnengbo
# @File : train_reg.py
# @Company : HPY

import os
import sys
import argparse
import time
from datetime import datetime

import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms

from torch.utils.data import DataLoader
# from torch.utils.tensorboard import SummaryWriter

from conf import settings
from utils import get_network, get_training_dataloader, get_test_dataloader, WarmUpLR, \
    most_recent_folder, most_recent_weights, last_epoch, best_acc_weights, get_training_dataloader_target, get_test_dataloader_target

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable


def train(epoch):
    print('++++',epoch)
    start = time.time()
    print(start)
    net.train()
    print('=========================')
    for batch_index, (images, labels) in enumerate(stone_train):

        if args.gpu:
            labels = labels.cuda()
            images = images.cuda()

        optimizer.zero_grad()
        outputs = net(images)

        outputs = outputs.to(torch.float32)
        labels = labels.to(torch.float32)
        ones = torch.ones(1,32)
        mins_ones = -1.0 * torch.ones(1,32)
       # labels.where()
        loss = loss_function(outputs, labels) # + abs( + )

        # loss = F.mse_loss(outputs, labels)
        loss.backward()
        optimizer.step()
        n_iter = (epoch - 1) * len(stone_train) + batch_index + 1
        last_layer = list(net.children())[-1]

        print('Training Epoch: {epoch} [{trained_samples}/{total_samples}]\tLoss: {:0.4f}\tLR: {:0.6f}'.format(
            loss.item(),
            optimizer.param_groups[0]['lr'],
            epoch=epoch,
            trained_samples=batch_index * args.b + len(images),
            total_samples=len(stone_train.dataset)
        ))

        #update training loss for each iteration
        # writer.add_scalar('Train/loss', loss.item(), n_iter)

        # if epoch <= args.warm:
        #    warmup_scheduler.step()

    #for name, param in net.named_parameters():
       # layer, attr = os.path.splitext(name)
       # attr = attr[1:]
       # writer.add_histogram("{}/{}".format(layer, attr), param, epoch)

    finish = time.time()

    print('epoch {} training time consumed: {:.2f}s'.format(epoch, finish - start))

@torch.no_grad()
def eval_training(epoch=0, tb=True):

    start = time.time()
    net.eval()  # 自动固定BN和dropout

    test_loss = 0.0 # cost function error
    correct = 0.0
    correct_number_0 = 0.0
    correct_number_1 = 0.0
    error_number_0 = 0.0
    error_number_1 = 0.0
    number_0 = 0.0
    number_1 = 0.0
    test_loss_mine = 0
    test_loss_waste = 0

    for (images, labels) in stone_mine:

        if args.gpu:
            images = images.cuda()
            labels = labels.cuda()

        outputs = net(images)
        loss = loss_function(outputs, labels)

        test_loss_mine += loss.item()
        preds = outputs > 0
        preds = preds + 0
      #   preds = torch.where(preds == 0, -1, preds)

        correct = preds.sum()
        correct_number_1 = correct_number_1 + correct

    correct = 0.0
    for (images, labels) in stone_waste:

        if args.gpu:
            images = images.cuda()
            labels = labels.cuda()

        outputs = net(images)
        loss = loss_function(outputs, labels)
        test_loss_waste += loss.item()
        preds = outputs < 0
        preds = preds + 0
        correct = preds.sum()
        correct_number_0 = correct_number_0 + correct
        '''
        # preds = outputs > 0
        preds = outputs > 0
        preds = preds + 0
        preds = torch.where(preds < 1, -1, preds)
        zeros_result = (labels == -1).sum()
        ones_result = (labels == 1).sum()
        result_correct_0 = ((preds == -1) & (labels == -1)).sum()
        result_correct_1 = ((preds == 1) & (labels == 1)).sum()
        result_error_0 = zeros_result - result_correct_0
        result_error_1 = ones_result - result_correct_1
        correct_number_0 += result_correct_0
        correct_number_1 += result_correct_1
        error_number_0 += result_error_0
        error_number_1 += result_error_1
        number_0 += zeros_result
        number_1 += ones_result
        correct += preds.eq(labels).sum()
        '''
    number_1 = len(stone_mine.dataset)
    number_0 = len(stone_waste.dataset)
    length =  len(stone_mine.dataset) + len(stone_waste.dataset)
    finish = time.time()
    if args.gpu:
        print('GPU INFO.....')
        print(torch.cuda.memory_summary(), end='')
    print('Evaluating Network.....')
    print('Test set: Epoch: {}, Average loss: {:.4f}, Accuracy: {:.4f}, Time consumed:{:.2f}s'.format(
        epoch,
        test_loss / length,
        (correct_number_1 + correct_number_0).float() / length,
        finish - start
    ))
    print('the number of mine is {:.4f},the number of correcting mine is {:.4f}, the number of waste is {:.4f}, the number of correcting waste {:.4f}'.format(
        number_1,
        correct_number_1 / number_1,
        number_0,
        correct_number_0 / number_0
    ))

    #add informations to tensorboard

    return 100

def mian_run(dataPath='G:\\dingding\\ImageTrain\\', savePath='G:\\workspace\\torch_cifar100\\checkpoint_save', modelType='companyReg'):
    parser = argparse.ArgumentParser()
    parser.add_argument('-net', type=str, required=False, help='net type')
    parser.add_argument('-gpu', action='store_true', default=False, help='use gpu or not')
    parser.add_argument('-b', type=int, default=32, help='batch size for dataloader')
    parser.add_argument('-warm', type=int, default=1, help='warm up training phase')
    parser.add_argument('-lr', type=float, default=0.01, help='initial learning rate')
    parser.add_argument('-resume', action='store_true', default=False, help='resume training')
    args = parser.parse_args()
    # args.net = 'resnet18'
    args.net = 'companyReg'
    print(modelType)

    args.gpu = True
    net = get_network(args)

    print("model have {} paramerters in total".format(sum(x.numel() for x in net.parameters())))
    # net.train()
    # data preprocessing:
    stone_train = get_training_dataloader_target(
        dataPath,
        settings.CIFAR100_TRAIN_MEAN,
        settings.CIFAR100_TRAIN_STD,
        num_workers=1,
        batch_size=args.b,
        shuffle=True
    )

    stone_mine, stone_waste = get_test_dataloader_target(
        dataPath,
        settings.CIFAR100_TRAIN_MEAN,
        settings.CIFAR100_TRAIN_STD,
        num_workers=1,
        batch_size=args.b,
        shuffle=True
    )

    loss_function = nn.MSELoss()
    optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9, weight_decay=0.0002)

    iter_per_epoch = len(stone_train)

    checkpoint_path = os.path.join(savePath, args.net, settings.TIME_NOW)

    checkpoint_path = os.path.join(checkpoint_path, '{net}-{epoch}-{type}.pth')

    best_acc = 0.0
    # print('=========================')
    for epoch in range(1, 10 + 1):
        #train(epoch)
        start = time.time()
        net.train()
        # print('====')
        for batch_index, (images, labels) in enumerate(stone_train):

            if args.gpu:
                labels = labels.cuda()
                images = images.cuda()

            optimizer.zero_grad()
            outputs = net(images)

            outputs = outputs.to(torch.float32)
            labels = labels.to(torch.float32)
            # labels.where()
            loss = loss_function(outputs, labels)  # + abs( + )

            # loss = F.mse_loss(outputs, labels)
            loss.backward()
            optimizer.step()
            n_iter = (epoch - 1) * len(stone_train) + batch_index + 1
            last_layer = list(net.children())[-1]

          #  print('Training Epoch: {epoch} [{trained_samples}/{total_samples}]\tLoss: {:0.4f}\tLR: {:0.6f}'.format(
           #     loss.item(),
           #     optimizer.param_groups[0]['lr'],
           #     epoch=epoch,
           #     trained_samples=batch_index * args.b + len(images),
           #     total_samples=len(stone_train.dataset)
           # ))

        finish = time.time()
        print('epoch {} training time consumed: {:.2f}s'.format(epoch, finish - start))
       #  acc = eval_training(epoch)

        #if epoch > settings.MILESTONES[1] and best_acc < acc:
        #    weights_path = checkpoint_path.format(net=args.net, epoch=epoch, type='best')
        #    print('saving weights file to {}'.format(weights_path))
        #    torch.save(net.state_dict(), weights_path)
        #    best_acc = acc
        #    continue

        # if epoch % 1 == 0: #settings.SAVE_EPOCH:
        weights_path = checkpoint_path.format(net=args.net, epoch=epoch, type='regular')
        print('saving weights file to {}'.format(weights_path))
        torch.save(net.state_dict(), weights_path)




if __name__ == '__main__':

    parser = argparse.ArgumentParser()
    parser.add_argument('-net', type=str, required=False, help='net type')
    parser.add_argument('-gpu', action='store_true', default=False, help='use gpu or not')
    parser.add_argument('-b', type=int, default=32, help='batch size for dataloader')
    parser.add_argument('-warm', type=int, default=1, help='warm up training phase')
    parser.add_argument('-lr', type=float, default=0.01, help='initial learning rate')
    parser.add_argument('-resume', action='store_true', default=False, help='resume training')
    args = parser.parse_args()
    # args.net = 'resnet18'
    args.net = 'companyReg'

    args.gpu = True
    net = get_network(args)

    print("model have {} paramerters in total".format(sum(x.numel() for x in net.parameters())))

    #data preprocessing:
    stone_train = get_training_dataloader(
        settings.CIFAR100_TRAIN_MEAN,
        settings.CIFAR100_TRAIN_STD,
        num_workers=4,
        batch_size=args.b,
        shuffle=True
    )

    stone_mine, stone_waste = get_test_dataloader(
        settings.CIFAR100_TRAIN_MEAN,
        settings.CIFAR100_TRAIN_STD,
        num_workers=4,
        batch_size=args.b,
        shuffle=True
    )

    # loss_function = nn.CrossEntropyLoss()  # focal_loss() #
    loss_function = nn.MSELoss()
    optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9, weight_decay=0.0002)

    # train_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=settings.MILESTONES, gamma=0.2) #learning rate decay
    iter_per_epoch = len(stone_train)
    # warmup_scheduler = WarmUpLR(optimizer, iter_per_epoch * args.warm)

    checkpoint_path = os.path.join(settings.CHECKPOINT_PATH, args.net, settings.TIME_NOW)

    #use tensorboard
    if not os.path.exists(settings.LOG_DIR):
        os.mkdir(settings.LOG_DIR)


    if not os.path.exists(checkpoint_path):
        os.makedirs(checkpoint_path)
    checkpoint_path = os.path.join(checkpoint_path, '{net}-{epoch}-{type}.pth')

    best_acc = 0.0


    for epoch in range(1, settings.EPOCH + 1):
       #  if epoch > args.warm:
          #  train_scheduler.step(epoch)

        train(epoch)
        acc = eval_training(epoch)

        #start to save best performance model after learning rate decay to 0.01
        if epoch > settings.MILESTONES[1] and best_acc < acc:
            weights_path = checkpoint_path.format(net=args.net, epoch=epoch, type='best')
            print('saving weights file to {}'.format(weights_path))
            torch.save(net.state_dict(), weights_path)
            best_acc = acc
            continue

        if not epoch % settings.SAVE_EPOCH:
            weights_path = checkpoint_path.format(net=args.net, epoch=epoch, type='regular')
            print('saving weights file to {}'.format(weights_path))
            torch.save(net.state_dict(), weights_path)

