import torch
import pickle
import torch.nn as nn
import torch.nn.parallel
import numpy as np
import time

class AverageMeter(object):
    """Computes and stores the average and current value
       Imported from https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262
    """
    def __init__(self):
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count

        
        
def accuracy(output, target, topk=(1,)):
    """Computes the precision@k for the specified values of k"""
    maxk = max(topk)
    batch_size = target.size(0)

    _, pred = output.topk(maxk, 1, True, True)
    pred = pred.t()
    correct = pred.eq(target.view(1, -1).expand_as(pred))

    res = []
    for k in topk:
        correct_k = correct[:k].reshape(-1).float().sum(0)
        res.append(correct_k.mul_(100.0 / batch_size))
    return res   
        
        
           
def train(train_data,labels,model,criterion,optimizer,epoch,use_cuda,device=torch.device('cuda'),num_batchs=999999,debug_='MEDIUM',batch_size=32, uniform_reg=False):
    # switch to train mode
    model.train()

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    
    
    end = time.time()
    len_t =  (len(train_data)//batch_size)-1
    
    for ind in range(len_t):
        if ind > num_batchs:
            break
        # measure data loading time
        inputs = train_data[ind*batch_size:(ind+1)*batch_size]
        targets = labels[ind*batch_size:(ind+1)*batch_size]

        data_time.update(time.time() - end)

        if use_cuda:
            inputs, targets = inputs.to(device), targets.to(device)
        inputs, targets = torch.autograd.Variable(inputs), torch.autograd.Variable(targets)

        # compute output
        try:
            outputs,_,_ = model(inputs)
        except:
            try:
                outputs,_ = model(inputs)
            except:
                outputs = model(inputs)
        uniform_=torch.ones(len(outputs))/len(outputs)
        
        if uniform_reg==True:
            loss = criterion(outputs, targets) + F.kl_div(uniform_,outputs)
        else:
            loss = criterion(outputs, targets)

        # measure accuracy and record loss
        prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
        losses.update(loss.item(), inputs.size(0))
        top1.update(prec1.item(), inputs.size(0))
        top5.update(prec5.item(), inputs.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        # plot progress
        if False and debug_=='HIGH' and ind%100==0:
            print  ('Classifier: ({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(
                    batch=ind + 1,
                    size=len_t,
                    data=data_time.avg,
                    bt=batch_time.avg,
                    loss=losses.avg,
                    top1=top1.avg,
                    top5=top5.avg,
                    ))

    return (losses.avg, top1.avg)


# In[9]:


def test(test_data,labels,model,criterion,use_cuda,device=torch.device('cuda'), debug_='MEDIUM',batch_size=16):

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to evaluate mode
    model.eval()

    end = time.time() 
    
    len_t = len(test_data)//batch_size
    if len(test_data)%batch_size:
        len_t += 1

    data_time.update(time.time() - end)
 
    total = 0
    for ind in range(len_t):
        inputs =  test_data[ind*batch_size:(ind+1)*batch_size].to(device)
        targets = labels[ind*batch_size:(ind+1)*batch_size].to(device)

        total += len(inputs)
        # compute output
        try:
            outputs,_,_ = model(inputs)
        except:
            try:
                outputs,_ = model(inputs)
            except:
                outputs = model(inputs)
 
        loss = criterion(outputs, targets)

        # measure accuracy and record loss
        prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
        losses.update(loss.item(), inputs.size(0))
        top1.update(prec1.item(), inputs.size(0))
        top5.update(prec5.item(), inputs.size(0))


 

    return (losses.avg, top1.avg)


def train_attack(train_data,labels,attack_data,attack_label,model,attack_model,criterion,attack_criterion,optimizer,
                 attack_optimizer,epoch,use_cuda,num_batchs=100000,skip_batch=0,debug_='MEDIUM',batch_size=16):
    model.eval()
    attack_model.train()

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    
    r=np.arange(len(attack_data))
    np.random.shuffle(r)
    
    end = time.time()
    batch_size = batch_size//2
    len_t =  min((len(attack_data)//batch_size) ,(len(train_data)//batch_size))-1

    for ind in range(skip_batch, len_t):

        if ind >= skip_batch+num_batchs:
            break
        # measure data loading time
        inputs = train_data[ind*batch_size:(ind+1)*batch_size]
        targets = labels[ind*batch_size:(ind+1)*batch_size]
        
        inputs_attack = attack_data[r[ind*batch_size:(ind+1)*batch_size]]
        targets_attack = attack_label[r[ind*batch_size:(ind+1)*batch_size]]

        data_time.update(time.time() - end)

        if use_cuda:
            inputs, targets = inputs.cuda(), targets.cuda()
            inputs_attack , targets_attack = inputs_attack.cuda(), targets_attack.cuda()

        inputs, targets = torch.autograd.Variable(inputs), torch.autograd.Variable(targets)
        inputs_attack , targets_attack = torch.autograd.Variable(inputs_attack), torch.autograd.Variable(targets_attack)

        # compute output
        outputs, h_layer = model(inputs)
        outputs_non, h_layer_non = model(inputs_attack)

        comb_inputs_h = torch.cat((h_layer,h_layer_non))
        comb_inputs = torch.cat((outputs,outputs_non))

        attack_input = comb_inputs
        
        one_hot_tr = torch.from_numpy((np.zeros((attack_input.size(0),outputs.size(1))))).cuda().type(torch.cuda.FloatTensor)
        target_one_hot_tr=one_hot_tr.scatter_(1,torch.cat((targets,targets_attack)).type(torch.cuda.LongTensor).view([-1,1]).data,1)
        
        infer_input_one_hot = torch.autograd.Variable(target_one_hot_tr)

        attack_output = attack_model(attack_input,comb_inputs_h,infer_input_one_hot).view([-1])

        att_labels = np.zeros((inputs.size(0)+inputs_attack.size(0)))
        att_labels [:inputs.size(0)] =1.0
        att_labels [inputs.size(0):] =0.0
        is_member_labels = torch.from_numpy(att_labels).type(torch.FloatTensor)

        if use_cuda:
            is_member_labels = is_member_labels.cuda()

        v_is_member_labels = torch.autograd.Variable(is_member_labels)
        
        loss_attack = attack_criterion(attack_output, v_is_member_labels)
        
        prec1=np.mean(np.equal((attack_output.data.cpu().numpy() >0.5),(v_is_member_labels.data.cpu().numpy()> 0.5)))
        losses.update(loss_attack.item(), attack_input.size(0))
        top1.update(prec1, attack_input.size(0))
        
        #print ( attack_output.data.cpu().numpy(),v_is_member_labels.data.cpu().numpy() ,attack_input.data.cpu().numpy())

        # compute gradient and do SGD step
        attack_optimizer.zero_grad()
        loss_attack.backward()
        attack_optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        # plot progress
        if False and debug_=='HIGH' and ind%100==0:
            print('Attack model: ({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | | Loss: {loss:.4f} | top1: {top1: .4f}'
                  .format(
                      batch=ind + 1,
                      size=len_t,
                      data=data_time.avg,
                      bt=batch_time.avg,
                      loss=losses.avg,
                      top1=top1.avg,
                  ))

    return (losses.avg, top1.avg)


def test_attack(train_data,labels,attack_data,attack_label,model,attack_model,criterion,attack_criterion,
                optimizer,attack_optimizer,epoch,use_cuda,batch_size=16,debug_='MEDIUM'):

    model.eval()
    attack_model.eval()

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    
    
    end = time.time()
    len_t =  min((len(attack_data)//batch_size) ,(len(train_data)//batch_size))-1
    for ind in range(len_t):
        # measure data loading time
        inputs = train_data[ind*batch_size:(ind+1)*batch_size]
        targets = labels[ind*batch_size:(ind+1)*batch_size]
        
        inputs_attack = attack_data[ind*batch_size:(ind+1)*batch_size]
        targets_attack = attack_label[ind*batch_size:(ind+1)*batch_size]        

        data_time.update(time.time() - end)

        if use_cuda:
            inputs, targets = inputs.cuda(), targets.cuda()
            inputs_attack , targets_attack = inputs_attack.cuda(), targets_attack.cuda()
        
        with torch.no_grad():
            inputs, targets = torch.autograd.Variable(inputs), torch.autograd.Variable(targets)
            inputs_attack , targets_attack = torch.autograd.Variable(inputs_attack), torch.autograd.Variable(targets_attack)


        # compute output
        outputs,h_layer = model(inputs)
        outputs_non,h_layer_non = model(inputs_attack)
        

        comb_inputs_h = torch.cat((h_layer,h_layer_non))
        comb_inputs = torch.cat((outputs,outputs_non))

        attack_input = comb_inputs        
        
        one_hot_tr = torch.from_numpy((np.zeros((attack_input.size(0),outputs.size(1))))).cuda().type(torch.cuda.FloatTensor)
        target_one_hot_tr=one_hot_tr.scatter_(1,torch.cat((targets,targets_attack)).type(torch.cuda.LongTensor).view([-1,1]).data,1)

        infer_input_one_hot = torch.autograd.Variable(target_one_hot_tr)


        attack_output = attack_model(attack_input,comb_inputs_h,infer_input_one_hot).view([-1])

        att_labels = np.zeros((inputs.size(0)+inputs_attack.size(0)))
        att_labels [:inputs.size(0)] =1.0
        att_labels [inputs.size(0):] =0.0

        is_member_labels = torch.from_numpy(att_labels).type(torch.FloatTensor)

        if use_cuda:
            is_member_labels = is_member_labels.cuda()
        
        v_is_member_labels = torch.autograd.Variable(is_member_labels)

        loss = attack_criterion(attack_output, v_is_member_labels)

        # measure accuracy and record loss
        prec1=np.mean(np.equal((attack_output.data.cpu().numpy() >0.5),(v_is_member_labels.data.cpu().numpy()> 0.5)))
        losses.update(loss.item(), attack_input.size(0))
        top1.update(prec1, attack_input.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        # plot progress
        if False and debug_=='HIGH' and ind%100==0:
            print('({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | | Loss: {loss:.4f} | top1: {top1: .4f} '
                  .format(
                      batch=ind + 1,
                      size=len_t,
                      data=data_time.avg,
                      bt=batch_time.avg,
                      loss=losses.avg,
                      top1=top1.avg,
                  ))

    return (losses.avg, top1.avg)


def train_privatly(train_data,labels,model,inference_model,criterion,optimizer,epoch,use_cuda,
                   num_batchs=10000,skip_batch=0,alpha=0.5,verbose=False,batch_size=16,loss_fun='mean'):
    model.train()
    inference_model.eval()

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    end = time.time()

    attack_criterion = nn.MSELoss()
    len_t =  (len(train_data)//batch_size)-1

    for ind in range(skip_batch,len_t):
        if ind >= skip_batch+num_batchs:
            break

        inputs = train_data[ind*batch_size:(ind+1)*batch_size]
        
        targets = labels[ind*batch_size:(ind+1)*batch_size]

        data_time.update(time.time() - end)

        if use_cuda:
            inputs, targets = inputs.cuda(), targets.cuda()
        inputs, targets = torch.autograd.Variable(inputs), torch.autograd.Variable(targets)

        # compute output
        outputs,h_layer = model(inputs)

        one_hot_tr = torch.from_numpy((np.zeros((outputs.size(0),outputs.size(1))))).cuda().type(torch.cuda.FloatTensor)
        target_one_hot_tr = one_hot_tr.scatter_(1, targets.type(torch.cuda.LongTensor).view([-1,1]).data,1)

        infer_input_one_hot = torch.autograd.Variable(target_one_hot_tr)
        
        inference_output = inference_model(outputs,h_layer,infer_input_one_hot)
        att_labels = np.ones((inputs.size(0)))
        is_member_labels = torch.from_numpy(att_labels).type(torch.FloatTensor)

        if use_cuda:
            is_member_labels = is_member_labels.cuda()

        v_is_member_labels = torch.autograd.Variable(is_member_labels)

        loss = criterion(outputs, targets) +(alpha*(inference_output.mean()-0.5))
        
        # measure accuracy and record loss
        prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
        losses.update(loss.item(), inputs.size(0))
        top1.update(prec1.item(), inputs.size(0))
        top5.update(prec5.item(), inputs.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        # plot progress
        if False and verbose and ind%100==0:
            print  (alpha, '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(
                    batch=ind + 1,
                    size=len_t,
                    data=data_time.avg,
                    bt=batch_time.avg,
                    loss=losses.avg,
                    top1=top1.avg,
                    top5=top5.avg,
                    ))

    return (losses.avg, top1.avg)



class InferenceAttack_HZ(nn.Module):
    def __init__(self,num_classes):
        self.num_classes=num_classes
        super(InferenceAttack_HZ, self).__init__()
        self.features=nn.Sequential(
            nn.Linear(num_classes,1024),
            nn.ReLU(),
            nn.Linear(1024,512),
            nn.ReLU(),
            nn.Linear(512,64),
            nn.ReLU(),
            )

        self.labels=nn.Sequential(
           nn.Linear(num_classes,128),
            nn.ReLU(),
            nn.Linear(128,64),
            nn.ReLU(),
            )
        self.combine=nn.Sequential(
            nn.Linear(64*2,512),
            
            nn.ReLU(),
            nn.Linear(512,256),
            
            nn.ReLU(),
            nn.Linear(256,128),
            nn.ReLU(),
            nn.Linear(128,64),
            nn.ReLU(),
            nn.Linear(64,1),
            )
        for key in self.state_dict():
            # print (key)
            if key.split('.')[-1] == 'weight':    
                nn.init.normal_(self.state_dict()[key], std=0.01)
                
            elif key.split('.')[-1] == 'bias':
                self.state_dict()[key][...] = 0
        self.output= nn.Sigmoid()
    def forward(self,x1,x2,l):

        out_x1 = self.features(x1)
        
        out_l = self.labels(l)

        is_member =self.combine( torch.cat((out_x1,out_l),1))
        
        
        return self.output(is_member)