__author__ = 'Yubo Wang'

import os
import sys
import pickle
import torch
import torchvision
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import save_image
from sklearn.metrics import recall_score, precision_score, f1_score, accuracy_score, roc_auc_score, precision_recall_curve
import sklearn.metrics as metrics
import torch.nn.functional as F
from dataset import ChemDataset
from network import ChemNet
import numpy as np
from utils.metriclearning import construct_k_neighbor, compute_distance_matrix
import argparse
import torch.utils.data as Data
from torchvision import datasets,transforms
from sklearn import svm
from utils.DataTool import simulate_data
from sklearn.neighbors import LocalOutlierFactor

parser = argparse.ArgumentParser()
parser.add_argument('-b', '--beta', type=float, default=0.5)
parser.add_argument('-m', '--margin', type=float, default=0.1)
parser.add_argument('-p', '--pos', type=int, default=0)
parser.add_argument('-ds', '--dataset', type=str, default='kmnist')
args = parser.parse_args()

os.environ["CUDA_VISIBLE_DEVICES"]="4,5,6,7"
num_epochs = 2000
ratio = 0.3
batch_size = 128
learning_rate = 1e-2
denoising = True
use_cuda = torch.cuda.is_available()

train_data, train_targets, test_data, test_targets = simulate_data(args.dataset, args.pos, ratio)

train_data = train_data.float()
test_data = test_data.float()

def MSELoss(pred, target, size_average=False):

    loss = F.pairwise_distance(pred.view(pred.size(0), -1), target.view(pred.size(0), -1), p=2) / pred.size(1)
    if size_average:
        return loss.mean()
    else:
        return loss

model = ChemNet(train_data.size(1), 32)
device = 'cuda' if use_cuda else 'cpu'
model.to(device)

# regularization parameter
center = torch.rand(1, 32, requires_grad=True, device=device)

criterion = MSELoss
optimizer = torch.optim.Adam(
    [{'params': model.parameters(), 'lr': learning_rate, 'weight_decay': 1e-5}, ])

print('generating pesudo targets')
outliers_fraction = ratio/(1+ratio)
#clf = svm.OneClassSVM(nu=0.001, kernel="linear", gamma=0.1, max_iter=1000)
#pesudo_targets = (clf.fit(train_data).predict(test_data)+1)/2

clf = LocalOutlierFactor(n_neighbors=15, contamination=outliers_fraction, n_jobs=-1)
pesudo_targets = (clf.fit_predict(test_data)+1)/2

#clf = LocalOutlierFactor(n_neighbors=15, novelty=True, contamination=0.001, n_jobs=-1)
#pesudo_targets = (clf.fit(train_data).predict(test_data)+1)/2

print('begin training')

for epoch in range(num_epochs):

    ulb_img = test_data.to(device)
    # ===================forward=====================
    ulb_out_d1, _ = model(ulb_img, 1)
    ulb_out_d2, _ = model(ulb_img, 2)

    ulb_error_d1 = criterion(ulb_out_d1, ulb_img)
    ulb_error_d2 = criterion(ulb_out_d2, ulb_img)

    loss = torch.cat((ulb_error_d1[pesudo_targets==1],ulb_error_d2[pesudo_targets==0])).mean()

    # ===================backward====================
    optimizer.zero_grad()
    loss.backward(retain_graph=True)
    optimizer.step()

    test_img = test_data.to(device)
    y_true = test_targets.detach()
    R_in, _ = model(test_img, 1)
    R_out, _ = model(test_img, 2)
    L_in = criterion(R_in, test_img)
    L_out = criterion(R_out, test_img)
    y_score = (L_out - L_in).detach().to('cpu')
    y_pred = (y_score > 0).int()
    Miss_in = (y_score[y_true==1]<=0).sum().item()
    Miss_out = (y_score[y_true==0]>0).sum().item()
    f1 = f1_score(y_true, y_pred)
    auc = roc_auc_score(y_true, y_score)
    print("epoch = %4d, auc = %.4f, f1 = %.4f, Miss_in = %d, Miss_out = %d, loss = %.4f" % (epoch, auc, f1, Miss_in, Miss_out, loss.item()))

torch.save(model.state_dict(), "saved_model/%s_%d_pre.pth" % (args.dataset, args.pos))

