# -*- coding:utf-8 -*-
'''
计算总体样本更新方向和单个干净样本更新方向的余弦相似度
计算总体样本更新方向和单个噪声样本更新方向的余弦相似度
'''
import os
import time
import argparse
import datetime

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F

from loss.loss import *
from data.load import load_dataset
from model.cnn import cnn, resnet34, resnet50, resnet101
from utils.acc_eva import accuracy, evaluate
from utils.build_vector import build_gradient_vector, build_gradient_vector_list,build_dis_vector_list, build_weight_vector, dis


parser = argparse.ArgumentParser()
parser.add_argument('--method', type=str, help='method to deal with noise', default='coteaching')
parser.add_argument('--model', type=str, help='[cnn, resnet34, resnet50, resnet101]', default='resnet34')
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--result_dir', type=str, help='dir to save result txt files', default='results/')
parser.add_argument('--noise_type', type=str, help='[pairflip, symmetric]', default='pairflip')
parser.add_argument('--noise_rate', type=float, help='corruption rate, should be less than 1', default=0.2)
parser.add_argument('--dataset', type=str, help='mnist, cifar10, or cifar100', default='mnist')
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--epoch_begin_record', type=int, default=25)
parser.add_argument('--n_epoch', type=int, default=200)
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--num_workers', type=int, default=4, help='how many subprocesses to use for data loading')

args = parser.parse_args()

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# Dataset
input_channel, num_classes, train_dataset, test_dataset = load_dataset(
    args.dataset, 
    args.noise_type, 
    args.noise_rate, 
)

# Method
method_dic = {
    'none': none, 
    'coteaching': loss_coteaching, 
    'coteaching_plus': loss_coteaching_plus, 
    'my_teaching_soft': my_loss_coteaching, 
    'my_teaching_self': my_loss_coteaching_2, 
    'my_teaching_hard': my_loss_coteaching_hard, 
    'loss_coteaching_pseudo': loss_coteaching_pseudo, 
}
method = method_dic[args.method]

# Model
model_dic = {
    'cnn': cnn,
    'resnet34': resnet34,
    'resnet50': resnet50,
    'resnet101': resnet101,
}
model = model_dic[args.model]

# Seed
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)


save_dir = args.result_dir+'/'+args.dataset+'/single_gradient/'+args.model+'/'
if not os.path.exists(save_dir):
    os.system('mkdir -p %s' % save_dir)

model_str = args.dataset+'_'+args.model+'_'+args.noise_type+'_'+str(args.noise_rate)
txtfile = save_dir+"/"+model_str+".txt"
nowTime = datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
if os.path.exists(txtfile):
    os.system('mv %s %s' % (txtfile, txtfile+".bak-%s" % nowTime))

# Train the Model
def train(method, train_loader, epoch, model, optimizer):
    print ('Training %s...' % (model_str))

    train_total = 0
    train_correct = 0 
    train_loss = 0
    for i, (images, labels, cleans, indexes) in enumerate(train_loader):

        images, labels, cleans, indexes = images.to(device), labels.to(device), cleans.to(device), indexes.to(device)
        logits = model(images)
        prec = accuracy(logits, labels, topk=(1,))
        loss = F.cross_entropy(logits, labels, reduction='none')
        if epoch >= args.epoch_begin_record:
            if i == 0:
                dis_vector = build_dis_vector_list(model, optimizer, loss, dis)
                indices = indexes
                clean_or_not = labels == cleans.reshape(-1)
            else:
                dis_vector = torch.cat((dis_vector, build_dis_vector_list(model, optimizer, loss, dis)))
                indices = torch.cat((indices, indexes))
                clean_or_not = torch.cat((clean_or_not, labels == cleans.reshape(-1)))
        else:
            indices = None
            dis_vector = None
            clean_or_not = None
        
        loss = loss.mean()
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        train_total += 1
        train_correct += prec[0]
        train_loss += loss
        if i % 100 == 0:
            print ('Epoch [%d/%d], Iter [%d/%d] Training Accuracy: %.4F, Loss: %.4f' % (epoch+1, args.n_epoch, i+1, len(train_dataset)//args.batch_size, prec[0], loss.item()))
    
    train_acc = train_correct / train_total
    train_loss = train_loss / train_total

    if isinstance(indices, torch.Tensor):
        sorted_indices, ind = indices.sort()
        dis_vector = dis_vector[ind]
        clean_or_not = clean_or_not[ind]

    return dis_vector, clean_or_not, train_acc, train_loss
    #return train_acc, train_loss

def main():
    # Data Loader (Input Pipeline)
    print ('loading dataset...')
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=args.batch_size, 
                                               num_workers=args.num_workers,
                                               drop_last=True,
                                               shuffle=True)
    
    test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                              batch_size=5000, 
                                              num_workers=args.num_workers,
                                              drop_last=True,
                                              shuffle=False)

    # Define models
    print ('building model...')
    cnn = nn.DataParallel(model(input_channel, num_classes))
    cnn = cnn.to(device)
    #optimizer = torch.optim.Adam(cnn.parameters(), lr=args.lr)
    optimizer = torch.optim.SGD(cnn.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-3)

    # training
    for epoch in range(args.n_epoch):
        start_time = time.time()
        # train models
        cnn.train()
        dis_vector, clean_or_not, train_acc, train_loss = train(method, train_loader, epoch, cnn, optimizer)

        # evaluate models
        test_acc, test_loss = evaluate(test_loader, cnn, device)
        print('Epoch [%d/%d] Test Accuracy on the %s test images: %.4f %%' % (epoch+1, args.n_epoch, len(test_dataset), test_acc))
        
        if epoch == args.epoch_begin_record:
            last_clean_or_not = clean_or_not
            with open(txtfile, "a") as myfile:
                for con in clean_or_not:
                    myfile.write(str(int(con)) + ' ')
                myfile.write('\n')
        if epoch >= args.epoch_begin_record:
            if last_clean_or_not.equal(clean_or_not) == False:
                print(epoch, 'last_clean_or_not != clean_or_not')
            last_clean_or_not = clean_or_not
            
            with open(txtfile, "a") as myfile:
                for dv in dis_vector:
                    myfile.write(str(dv.item()) + ' ')
                myfile.write('\n')
        print('time:', time.time() - start_time, 's')
        print()

if __name__=='__main__':
    main()
