import torch
from tqdm import tqdm
from torch.nn.functional import one_hot
import random
import torch.nn as nn
import numpy as np
import os

def get_l2_loss(output, target):
    # output.dim = (batch, N, c)
    # target.dim = (batch, N, c)   
    # output = output.squeeze(-1) 
    # target = target.squeeze(-1) 

    error = output - target
    
    norm_error_sample = torch.norm(error, dim=-2) / (torch.norm(target, dim=-2) + 1e-6)
    if norm_error_sample.shape[-1] == 1:
        norm_error_channnel = norm_error_sample.squeeze(-1) 
    else:
        norm_error_channnel = torch.mean(norm_error_sample, dim=-1)
    
    norm_error_batch = torch.mean(norm_error_channnel)
    
    return norm_error_batch

class LpLoss(object):
    def __init__(self, d=2, p=2, size_average=True, reduction=True):
        super(LpLoss, self).__init__()
        # Dimension and Lp-norm type are postive
        assert d > 0 and p > 0

        self.d = d
        self.p = p
        self.reduction = reduction
        self.size_average = size_average

    def abs(self, x, y):
        num_examples = x.size()[0]

        # Assume uniform mesh
        h = 1.0 / (x.size()[1] - 1.0)

        all_norms = (h ** (self.d / self.p)) * np.linalg.norm(
            x.reshape((num_examples, -1)) - y.reshape((num_examples, -1)), self.p, 1
        ) ###指定在哪个轴计算

        if self.reduction:
            if self.size_average:
                return np.mean(all_norms)
            else:
                return np.sum(all_norms)

        return all_norms

    def rel(self, x, y):
        diff_norms = np.linalg.norm(x-y, 2)
        y_norms = np.linalg.norm(y, self.p)

        if self.reduction:
            if self.size_average:
                return np.mean(diff_norms / y_norms)
            else:
                return np.sum(diff_norms / y_norms)

        return diff_norms / y_norms

    def __call__(self, x, y):
        return self.rel(x, y)

def get_val_loss(output_p_hat, p, if_rescale, info):
    device = output_p_hat.device
    #################################
    p_target = p.to(device)
    
    losses = {}
    
    losses['L2_p_norm'] = get_l2_loss(output_p_hat[...,:1], (p_target - info['p_mean']) / info['p_std']).item()
    ################
    if if_rescale:
        p_hat = output_p_hat[...,:1] * info['p_std'] + info['p_mean']
        
    loss_fn = LpLoss(size_average=True)  
    p_target = p_target.squeeze(0).detach().cpu().numpy()
    p_hat = p_hat.squeeze(0).detach().cpu().numpy()
    losses['L2_p'] = loss_fn(p_hat, p_target)
    
    # losses['L2_p'] = get_l2_loss(p_hat, p_target).item()
    
    return losses


def get_train_loss(output_p_hat, p, loss_flag, if_rescale, info):

    device = output_p_hat.device
    #################################
    p_target = p.to(device)
    
    
    losses = {}
    criterion = nn.MSELoss()
        
    if loss_flag == 'L2_loss_norm':
        losses['loss'] = get_l2_loss(output_p_hat[...,:1], (p_target - info['p_mean']) / info['p_std'])  
    elif loss_flag == 'MSE_loss_norm':
        losses['loss'] = criterion(output_p_hat[...,:1], (p_target - info['p_mean']) / info['p_std'])

    losses['L2_p_norm'] = get_l2_loss(output_p_hat[...,:1], (p_target - info['p_mean']) / info['p_std']).item()
    
    if if_rescale:
        p_hat = output_p_hat[...,:1] * info['p_std'] + info['p_mean']

    
    # model = LaplaceSmoothing(k=6).to(device)
    # smooth_loss = model(p_hat, node_pos.to(device))
    # lambda_smooth = 0.001
    # + lambda_smooth * smooth_loss
    
    if loss_flag == 'L2_loss':
        losses['loss'] = get_l2_loss(p_hat, p_target) 
        
    elif loss_flag == 'MSE_loss':
        losses['loss'] = criterion(p_hat, p_target)

    losses['L2_p'] = get_l2_loss(p_hat, p_target).item()

    return losses

def train(args, model, train_dataloader, optim, device):
        
    model.train()    
    loss = 0
    L2_p = 0
    L2_p_norm = 0
    
    num = 0
    # for i, [input, t] in enumerate(tqdm(train_dataloader, desc="Training")):
    for i, [input,name] in enumerate(train_dataloader):
        # forward
        optim.zero_grad()
        
        p = input['pressure']
        
        node_pos = input['node_pos']
        areas = input['areas']
        edges = input['edges']
        output_p_hat = model(node_pos.to(device), areas.to(device), edges) 
        
        costs = get_train_loss(
            output_p_hat,
            p, 
            args.train["loss_flag"], 
            args.train["if_rescale"], 
            args.train["info"]            
            )
        costs['loss'].backward()
        optim.step()
        # print(f"iter {i}, {name[0]}, loss: {costs['loss'].item():.4f}")
        
        loss = loss + costs['loss'].item()
        L2_p = L2_p + costs['L2_p']
        L2_p_norm = L2_p_norm + costs['L2_p_norm']
        #########################################
        num = num + 1
        
        # break
        
    batch_error = {}
    batch_error['loss'] = loss / num

    batch_error['L2_p'] = L2_p / num
    batch_error['L2_p_norm'] = L2_p_norm / num
    
    return batch_error

def validate(args, model, val_dataloader, device):
    
    model.eval()
    
    L2_p = 0
    L2_p_norm = 0
    
    num = 0
    with torch.no_grad():
          
        # for i, [input, t] in enumerate(tqdm(val_dataloader, desc="Validation")):
        for i, [input,_] in enumerate(val_dataloader):    
            
            p = input['pressure']
            
            node_pos = input['node_pos']
            areas = input['areas']
            edges = input['edges']
            output_p_hat = model(node_pos.to(device), areas.to(device), edges) 
                                
            costs = get_val_loss(
                output_p_hat,
                p, 
                args.train["if_rescale"], 
                args.train["info"]
                )

            L2_p = L2_p + costs['L2_p']
            L2_p_norm = L2_p_norm + costs['L2_p_norm']
            #########################################
            num = num + 1

    batch_error = {}
    
    batch_error['L2_p'] = L2_p / num
    batch_error['L2_p_norm'] = L2_p_norm / num
    
    return batch_error