import os
import sys
import math
import time

import torch
import torch.nn.functional as F

def build_gradient_vector(model, optimizer, loss, retain=True):
    optimizer.zero_grad()
    loss.backward(retain_graph=retain) # retain the graph so that calculate next gradient
    for param in model.parameters():
        # 第一次进入这个循环，gradient_vector还没有初值，所以cat会报错，进入except赋初值
        try:
            gradient_vector = torch.cat((gradient_vector, param.grad.reshape(-1)), 0)
        except:
            gradient_vector = param.grad.reshape(-1)
    return gradient_vector


def build_gradient_vector_list(model, optimizer, loss, retain=False):
    gradient_vector_list = build_gradient_vector(model, optimizer, loss[0]).unsqueeze(dim=0)
    for i, l in enumerate(loss[1:-1]):
        # OOM when i == 22
        gradient_vector_list_tmp = build_gradient_vector(model, optimizer, l).unsqueeze(dim=0)
        gradient_vector_list = torch.cat((gradient_vector_list, gradient_vector_list_tmp), dim=0)
    gradient_vector_list = torch.cat((gradient_vector_list, build_gradient_vector(model, optimizer, loss[-1], retain).unsqueeze(dim=0)), dim=0)
    return gradient_vector_list

def build_dis_vector_list(model, optimizer, loss, dis, retain=True):
    mean_loss_gradient_vector = build_gradient_vector(model, optimizer, loss.mean())
    dis_vector_list = []
    for l in loss:
        single_loss_gradient_vector = build_gradient_vector(model, optimizer, l)
        dis_vector_list.append(dis(mean_loss_gradient_vector, single_loss_gradient_vector).unsqueeze(dim=0))
    return torch.cat(dis_vector_list)

def build_weight_vector(model):
    for param in model.parameters():
        try:
            weight_vector = torch.cat((weight_vector, param.reshape(-1)), 0)
        except:
            weight_vector = param.reshape(-1)
    return weight_vector

def dis(gradient_1, gradient_2):
    dis = torch.sum(gradient_1 * gradient_2)
    a = gradient_1.norm(dim=-1, keepdim=True)
    b = gradient_2.norm(dim=-1, keepdim=True)
    if a * b == 0:
        return torch.tensor(0., device=dis.device)
    return dis / torch.sum(a * b)
