__author__ = 'Yubo Wang'

import os
import sys
import pickle
import torch
import torchvision
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import save_image
from sklearn.metrics import recall_score, precision_score, f1_score, accuracy_score, roc_auc_score, precision_recall_curve
import sklearn.metrics as metrics
import torch.nn.functional as F
from dataset import ChemDataset
from network import ChemNet
import numpy as np
from utils.metriclearning import construct_k_neighbor, compute_distance_matrix
import argparse
import torch.utils.data as Data
from torchvision import datasets,transforms
from utils.DataTool import simulate_data

parser = argparse.ArgumentParser()
parser.add_argument('-b', '--beta', type=float, default=0.5)
parser.add_argument('-m', '--margin', type=float, default=0.1)
parser.add_argument('-p', '--pos', type=int, default=0)
parser.add_argument('-ds', '--dataset', type=str, default='kmnist')
parser.add_argument('-pt', '--preTrain', type=int, default=1)
args = parser.parse_args()

os.environ["CUDA_VISIBLE_DEVICES"]="4,5,6,7"
num_epochs = 4000
ratio = 0.3
batch_size = 128
learning_rate = 1e-3
denoising = True
use_cuda = torch.cuda.is_available()

train_dataset = torchvision.datasets.ImageFolder(root='KolektorSDD_cora/train', transform=transforms.Compose([transforms.Resize(28), transforms.Grayscale(), transforms.ToTensor(), transforms.Lambda(lambda i: i.view(-1))]))
test_dataset = torchvision.datasets.ImageFolder(root='KolektorSDD_cora/test', transform=transforms.Compose([transforms.Resize(28), transforms.Grayscale(), transforms.ToTensor(), transforms.Lambda(lambda i: i.view(-1))]))

train_loader = Data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
test_loader = Data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True)


def MSELoss(pred, target, size_average=False):

    loss = F.pairwise_distance(pred.view(pred.size(0), -1), target.view(pred.size(0), -1), p=2) / pred.size(1)
    if size_average:
        return loss.mean()
    else:
        return loss

model = ChemNet(784, 32).to('cuda')
device = 'cuda' if use_cuda else 'cpu'

# regularization parameter
center = torch.rand(1, 32, requires_grad=True, device=device)

criterion = MSELoss
optimizer1 = torch.optim.Adam(
    [{'params': model.parameters(), 'lr': learning_rate, 'weight_decay': 1e-5}, ])

optimizer2 = torch.optim.Adam(
    [{'params': center, 'lr': learning_rate}])

def kmeans(pos, ulb):
    '''
    kmeans method for hidden feature center initialization
    '''
    center = torch.rand(2, pos.size(1))
    lbl = torch.rand(ulb.size(0)).gt(0.5).long()
    if use_cuda:
        center = center.cuda()
        lbl = lbl.cuda()
    prev_lbl = lbl

    for i in range(100):
        center[0] = torch.mean(torch.cat((pos, ulb[lbl.eq(0).nonzero().squeeze(), :])))
        center[1] = torch.mean(ulb[lbl.eq(1).nonzero().squeeze(), :])

        dist = compute_distance_matrix(ulb, center)
        loss, lbl = dist.topk(k=1, dim=1, largest=False)
        lbl = lbl.squeeze()
        # print('Initialize with Kmeans epoch {} loss {:.4f}'.format(i, torch.mean(loss)))
        if torch.sum(lbl - prev_lbl) == 0:
            break
        prev_lbl = lbl
    return lbl


def corruption(feat, mean=0, stddev=0.1):
    '''
    corruption the data with Gaussian noise
    '''
    noise = torch.zeros(feat.size()).normal_(mean, stddev)

    if use_cuda:
        noise = noise.cuda()
    noise = feat + noise
    return noise


def margin_loss(x, c, margin):
    dist = F.pairwise_distance(x, c.repeat(x.size(0), 1))
    dist = torch.clamp(dist - margin, 0)
    return dist


def inverse_margin_loss(x, c, margin):
    dist = F.pairwise_distance(x, c.repeat(x.size(0), 1))
    dist = torch.clamp(margin - dist, 0)
    return dist

valid_loader = Data.DataLoader(dataset=test_dataset, batch_size=len(test_dataset), shuffle=True)
test_img, y_true = iter(valid_loader).next()
test_img = test_img.to(device)

print('begin training')
max_f1 = 0.0
max_auc = 0.0

for epoch in range(num_epochs):

    it = iter(train_loader)
    for ib, data in enumerate(test_loader):
        ulb_img, _ = data
        N = ulb_img.size(0)
        # if denoising:
        #    additive_noise = torch.zeros(img.size()).normal_(0, 0.3)
        #    img += additive_noise
        #img, target = train_dataset.get_batch(ulb_img.size(0))
        try:
            img, _ = it.next()
        except StopIteration:
            it = iter(train_loader)
            img, _ = it.next()
        if len(ulb_img)!=len(img):
            l = min(len(img), len(ulb_img))
            img = img[:l]
            ulb_img = ulb_img[:l]

        img = img.to(device)
        ulb_img = ulb_img.to(device)
        # ===================forward=====================
        pos_out, pos_z = model(img, 1)
        ulb_out, ulb_z = model(ulb_img, 2)
        ulb_out_pos, ulb_z_pos = model(ulb_img, 1)

        if epoch == 0:
            idx = kmeans(img.detach(), ulb_img.detach())
            pos_loss = criterion(pos_out, img)  # positive loss
            pos_loss = pos_loss.mean()
            neg_loss_1 = criterion(ulb_out_pos, ulb_img)
            neg_loss_2 = criterion(ulb_out, ulb_img)
            neg_loss = torch.cat((neg_loss_1.unsqueeze(1), neg_loss_2.unsqueeze(1)), 1)
            
            l1 = neg_loss_1[idx.eq(0)]
            if l1.nelement() > 1:
                pos_loss = (pos_loss * img.size(0) + torch.sum(l1)) / (img.size(0) + l1.nelement())
                # print ulb_out_pos[idx.eq(0).nonzero()[:,0]].size()
                loss = args.beta * (torch.sum(margin_loss(pos_z, center, args.margin)) + \
                    torch.sum(margin_loss(ulb_z_pos[idx.eq(0).nonzero()[:, 0]], center, args.margin))) / (N + l1.nelement())
            else:
                loss = args.beta * torch.mean(margin_loss(pos_z, center, args.margin))

            l2 = neg_loss_2[idx.eq(1)]
            if l2.nelement() > 1:
                loss = loss + torch.mean(l2) + args.beta * torch.mean(inverse_margin_loss(ulb_z_pos[idx.eq(1).nonzero()[:, 0]],\
                         center, 2 * args.margin))
            else:
                loss = loss + neg_loss_2[0] 

        else:
            pos_loss = criterion(pos_out, img)  # positive loss
            pos_loss = pos_loss.mean()
            neg_loss_1 = criterion(ulb_out_pos, ulb_img)
            neg_loss_2 = criterion(ulb_out, ulb_img)
            neg_loss = torch.cat((neg_loss_1.unsqueeze(1), neg_loss_2.unsqueeze(1)), 1)
            loss_, idx = torch.topk(neg_loss, k=1, largest=False)


            l1 = neg_loss_1[idx.eq(0).squeeze()]
            if l1.nelement() > 1:
                pos_loss = (pos_loss * img.size(0) + torch.sum(l1)) / (img.size(0) + l1.nelement())
                # print ulb_out_pos[idx.eq(0).nonzero()[:,0]].size()
                loss = args.beta * (torch.sum(margin_loss(pos_z, center, args.margin)) + torch.sum(margin_loss(ulb_z_pos[idx.eq(0).nonzero()[:, 0]], center, args.margin))) / (N + l1.nelement())
            else:
                loss = args.beta * torch.mean(margin_loss(pos_z, center, args.margin))

            l2 = neg_loss_2[idx.eq(1).squeeze()]
            if l2.nelement() > 1:
                loss = loss + torch.mean(l2) + args.beta * torch.mean(inverse_margin_loss(ulb_z_pos[idx.eq(1).nonzero()[:, 0]], center, 2 * args.margin))
            else:
                loss = loss + loss_[0]

        # ===================backward====================
        optimizer2.zero_grad()
        optimizer1.zero_grad()

        pos_loss.backward(retain_graph=True)
        loss.backward()

        optimizer1.step()
        optimizer2.step()

    R_in, _ = model(test_img, 1)
    R_out, _ = model(test_img, 2)
    L_in = criterion(R_in, test_img)
    L_out = criterion(R_out, test_img)
    y_score = (L_out - L_in).detach().to('cpu')
    y_pred = (y_score > 0).int()
    Miss_in = (y_score[y_true==1]<=0).sum().item()
    Miss_out = (y_score[y_true==0]>0).sum().item()
    f1 = f1_score(y_true, y_pred)
    auc = roc_auc_score(y_true, y_score)
    print("epoch = %4d, auc = %.4f, f1 = %.4f, Miss_in = %d, Miss_out = %d, loss = %.4f" % (epoch, auc, f1, Miss_in, Miss_out, pos_loss.item()+loss.item()))
