import torch 
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np


# Loss functions
def none(logits, labels, cleans):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    
    loss_list = torch.tensor([0, 0, 0, 0, 0, 0], dtype=torch.float32).to(device)
    num_list = torch.tensor([0, 0, 0, 0, 0, 0], dtype=torch.float32).to(device)
    _, preds = torch.max(logits.data, 1)
    for i in zip(cleans, labels, preds, logits):
        if i[0] == i[1] == i[2]:
            tmp = 0 # clean true
        if i[0] == i[1] != i[2]:
            tmp = 1 # clean false noise false
        if i[0] != i[1] and i[0] == i[2]:
            tmp = 2 # noise true noise true
        if i[0] != i[1] and i[1] == i[2]:
            tmp = 3 # noise false clean false
        if i[0] != i[1] and i[0] != i[2] and i[1] != i[2]:
            tmp = 4 # noise other
        loss = F.cross_entropy(torch.unsqueeze(i[3] ,dim=0), torch.unsqueeze(i[1] ,dim=0))
        loss_list[tmp + 1] += loss
        loss_list[0] += loss
        num_list[tmp + 1] += 1
        num_list[0] += 1
    return loss_list, num_list


def loss_coteaching(y_1, y_2, t, forget_rate, ind, noise_or_not):
    remember_rate = 1 - forget_rate
    num_remember = int(remember_rate * len(t))

    loss_1 = F.cross_entropy(y_1, t, reduction = 'none')
    loss_2 = F.cross_entropy(y_2, t, reduction = 'none')
    _, ind_1_selected = torch.topk(loss_1, num_remember, largest=False)
    _, ind_2_selected = torch.topk(loss_2, num_remember, largest=False)

    num_clean_1 = torch.sum(noise_or_not[ind[ind_1_selected]], dtype=torch.float)
    num_clean_2 = torch.sum(noise_or_not[ind[ind_2_selected]], dtype=torch.float)
    
    pure_ratio_1 = num_clean_1 / num_remember
    pure_ratio_2 = num_clean_2 / num_remember

    # exchange
    loss_1_update = loss_1[ind_2_selected]
    loss_2_update = loss_2[ind_1_selected]

    return torch.mean(loss_1_update), torch.mean(loss_2_update), pure_ratio_1, pure_ratio_2

def loss_coteaching_plus(y_1, y_2, t, forget_rate, ind, noise_or_not):
    _, pred_1 = torch.max(y_1.data, 1)
    _, pred_2 = torch.max(y_2.data, 1)
    select_index = pred_1 != pred_2

    loss_1 = F.cross_entropy(y_1, t, reduction = 'none')
    loss_2 = F.cross_entropy(y_2, t, reduction = 'none')
    loss_1_selected = torch.masked_select(loss_1, select_index)
    loss_2_selected = torch.masked_select(loss_2, select_index)

    remember_rate = 1 - forget_rate
    num_remember = int(remember_rate * len(loss_1_selected))
    # exchange
    _, ind_1_selected = torch.topk(loss_1_selected, num_remember, largest=False)
    _, ind_2_selected = torch.topk(loss_2_selected, num_remember, largest=False)

    loss_1_update = loss_1_selected[ind_2_selected]
    loss_2_update = loss_2_selected[ind_1_selected]

    return torch.mean(loss_1_update), torch.mean(loss_2_update), torch.tensor(-1), torch.tensor(-1)

# Rewritten Loss functions
def my_loss_coteaching(y_1, y_2, t, forget_rate, ind, noise_or_not):
    # self small loss and other teaches
    def my_cross_entropy(x1, x2):
        x1 = F.softmax(x1, dim=1)
        x2 = F.softmax(x2, dim=1)
        return -torch.sum(x2 * x1.log(), dim=1)

    loss_1_2 = my_cross_entropy(y_1, y_2.detach())
    loss_2_1 = my_cross_entropy(y_2, y_1.detach())
    loss_1_t = F.cross_entropy(y_1, t, reduction = 'none')
    loss_2_t = F.cross_entropy(y_2, t, reduction = 'none')
    loss_1_t_sorted, ind_1_t_sorted = torch.sort(loss_1_t)
    loss_2_t_sorted, ind_2_t_sorted = torch.sort(loss_2_t)
    
    remember_rate = 1 - forget_rate
    num_remember = int(remember_rate * len(t))

    num_clean_1 = torch.sum(noise_or_not[ind[ind_1_t_sorted[:num_remember]]], dtype=torch.float)
    num_clean_2 = torch.sum(noise_or_not[ind[ind_2_t_sorted[:num_remember]]], dtype=torch.float)
    pure_ratio_1 = num_clean_1 / num_remember
    pure_ratio_2 = num_clean_2 / num_remember

    # exchange
    loss_1_update = torch.cat((loss_1_t_sorted[:num_remember], loss_1_2[ind_1_t_sorted[num_remember:]]))
    loss_2_update = torch.cat((loss_2_t_sorted[:num_remember], loss_2_1[ind_2_t_sorted[num_remember:]]))

    return torch.mean(loss_1_update), torch.mean(loss_2_update), pure_ratio_1, pure_ratio_2

# Rewritten Loss functions
def my_loss_coteaching_2(y_1, y_2, t, forget_rate, ind, noise_or_not):
    # other small loss and other teaches (soft label)
    def my_cross_entropy(x1, x2):
        x1 = F.softmax(x1, dim=1)
        x2 = F.softmax(x2, dim=1)
        return -torch.sum(x2 * x1.log(), dim=1)

    loss_1_2 = my_cross_entropy(y_1, y_2.detach())
    loss_2_1 = my_cross_entropy(y_2, y_1.detach())
    loss_1_t = F.cross_entropy(y_1, t, reduction = 'none')
    loss_2_t = F.cross_entropy(y_2, t, reduction = 'none')
    _, ind_1_t_sorted = torch.sort(loss_1_t)
    _, ind_2_t_sorted = torch.sort(loss_2_t)
    
    remember_rate = 1 - forget_rate
    num_remember = int(remember_rate * len(t))

    num_clean_1 = torch.sum(noise_or_not[ind[ind_1_t_sorted[:num_remember]]], dtype=torch.float)
    num_clean_2 = torch.sum(noise_or_not[ind[ind_2_t_sorted[:num_remember]]], dtype=torch.float)
    pure_ratio_1 = num_clean_1 / num_remember
    pure_ratio_2 = num_clean_2 / num_remember

    # exchange
    loss_1_update = torch.cat((loss_1_t[ind_2_t_sorted[:num_remember]], loss_1_2[ind_2_t_sorted[num_remember:]]))
    loss_2_update = torch.cat((loss_2_t[ind_1_t_sorted[:num_remember]], loss_2_1[ind_1_t_sorted[num_remember:]]))

    return torch.mean(loss_1_update), torch.mean(loss_2_update), pure_ratio_1, pure_ratio_2

# Rewritten Loss functions
def my_loss_coteaching_hard(y_1, y_2, t, forget_rate, ind, noise_or_not):
    # other small loss and other teaches (hard label)
    _, pred_1 = torch.max(y_1.data, 1)
    _, pred_2 = torch.max(y_2.data, 1)

    loss_1_2 = F.cross_entropy(y_1, pred_2.detach(), reduction = 'none')
    loss_2_1 = F.cross_entropy(y_2, pred_1.detach(), reduction = 'none')
    loss_1_t = F.cross_entropy(y_1, t, reduction = 'none')
    loss_2_t = F.cross_entropy(y_2, t, reduction = 'none')
    _, ind_1_t_sorted = torch.sort(loss_1_t)
    _, ind_2_t_sorted = torch.sort(loss_2_t)
    
    remember_rate = 1 - forget_rate
    num_remember = int(remember_rate * len(t))

    num_clean_1 = torch.sum(noise_or_not[ind[ind_1_t_sorted[:num_remember]]], dtype=torch.float)
    num_clean_2 = torch.sum(noise_or_not[ind[ind_2_t_sorted[:num_remember]]], dtype=torch.float)
    pure_ratio_1 = num_clean_1 / num_remember
    pure_ratio_2 = num_clean_2 / num_remember

    # exchange
    loss_1_update = torch.cat((loss_1_t[ind_2_t_sorted[:num_remember]], loss_1_2[ind_2_t_sorted[num_remember:]]))
    loss_2_update = torch.cat((loss_2_t[ind_1_t_sorted[:num_remember]], loss_2_1[ind_1_t_sorted[num_remember:]]))

    return torch.mean(loss_1_update), torch.mean(loss_2_update), pure_ratio_1, pure_ratio_2

def loss_coteaching_pseudo(y_1, y_2, t, alpha1, alpha2, beta, device):
    # self small loss and other teaches
    def my_cross_entropy(x1, x2):
        x1 = F.softmax(x1, dim=1)
        x2 = F.softmax(x2, dim=1)
        return -torch.sum(x2 * x1.log(), dim=1)

    loss_1_t = F.cross_entropy(y_1[0], t, reduction='none')
    loss_2_t = F.cross_entropy(y_2[0], t, reduction='none')

    omega_1_1 = F.softmax(alpha1 * loss_1_t, dim=0).unsqueeze(dim=1)
    omega_1_2 = F.softmax(alpha1 * loss_2_t, dim=0).unsqueeze(dim=1)
    loss_1_t_augment = torch.empty(y_1[1:].shape[0], loss_1_t.shape[0]).to(device)
    loss_2_t_augment = torch.empty(y_2[1:].shape[0], loss_2_t.shape[0]).to(device)

    for i, (T1, T2) in enumerate(zip(y_1[1:], y_2[1:])):
        loss_1_t_augment[i] = my_cross_entropy(T1, y_1[0])
        loss_2_t_augment[i] = my_cross_entropy(T2, y_2[0])
    loss_1_t_augment = torch.sum(loss_1_t_augment, dim=0)
    loss_2_t_augment = torch.sum(loss_2_t_augment, dim=0)

    omega_2_1 = F.softmax(alpha2 * loss_1_t_augment, dim=0).unsqueeze(dim=1)
    omega_2_2 = F.softmax(alpha2 * loss_2_t_augment, dim=0).unsqueeze(dim=1)
    t = torch.unsqueeze(t, dim=-1)
    t = torch.zeros_like(y_1[0]).scatter_(1, t, 1)

    pseudo_label_1 = beta * omega_1_1 * t + (1 - beta) * omega_2_1 * y_1[0]
    pseudo_label_2 = beta * omega_1_2 * t + (1 - beta) * omega_2_2 * y_2[0]

    return pseudo_label_1, pseudo_label_2

def teaching_student(y_1, y_2, t, forget_rate, ind, noise_or_not, train_clean_labels, device):
    '''
    y_1: student net outputs
    y_2: teacher net outputs
    t:   noise labels
    '''
    _, pred_y_2 = torch.max(y_2.data, 1)
    clean_labels = torch.squeeze(train_clean_labels[ind], 1)
    correct_or_not = pred_y_2 == clean_labels

    m = nn.Softmax(dim=1)
    prob_y_1 = m(y_1)
    prob_y_2 = m(y_2).detach()
    loss_0 = -torch.sum(prob_y_2 * prob_y_1.log(), dim=1)
    loss_1 = F.cross_entropy(y_1, t, reduction = 'none')
    loss_2 = F.cross_entropy(y_2, t, reduction = 'none')

    ind_0_sorted = np.argsort(loss_0.data).to(device)
    loss_0_sorted = loss_0[ind_0_sorted]
    ind_1_sorted = np.argsort(loss_1.data).to(device)
    loss_1_sorted = loss_1[ind_1_sorted]

    remember_rate = 1 - forget_rate
    num_remember = int(remember_rate * len(loss_1_sorted))
    num_forget = len(loss_1_sorted) - num_remember

    num_clean = torch.sum(noise_or_not[ind[ind_1_sorted[:num_remember]]], dtype=torch.float)
    num_correct = torch.sum(correct_or_not[ind_1_sorted[num_remember:]], dtype=torch.float)
    pure_ratio_remember = num_clean / num_remember
    pure_ratio_forget = num_correct / num_forget
    loss_forget = F.cross_entropy(y_2[ind_1_sorted[num_remember:]], train_clean_labels[ind[ind_1_sorted[num_remember:]]])
    print(pure_ratio_remember)
    print(pure_ratio_forget)
    print(loss_forget)

    input()
    # exchange
    loss_1_update = torch.cat((loss_1_sorted[:num_remember], loss_0_sorted[num_remember:]), 0)
    loss_2_update = loss_2[ind_1_sorted[:num_remember]]
    print(loss_0)
    print(loss_1)
    print(loss_2)
    print(loss_1_update)
    print(loss_2_update)
    input()
    return torch.sum(loss_1_update)/len(loss_1_sorted), torch.sum(loss_2_update)/num_remember, pure_ratio_remember, pure_ratio_forget, loss_forget


if __name__ == '__main__':
    a = torch.tensor([[0.1,0.3],[0.2,1.2],[3.2,0.3]])
    b = torch.tensor([[1.1,0.3],[0.2,0.3],[1.2,0.3]])
    print(a.dtype)
    print(torch.sum(a, dtype=torch.float)/3)
    print(a.dtype)
