import os
import sys
import argparse

import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import pytorch3d
from pytorch3d.io import load_obj, save_obj
from pytorch3d.structures import Meshes
from pytorch3d.ops import sample_points_from_meshes
from pytorch3d.loss import (
    chamfer_distance,
    mesh_edge_loss,
    mesh_laplacian_smoothing,
    mesh_normal_consistency,
    )
from pytorch3d.ops import cot_laplacian

import numpy as np
import scipy.io as sio
import potpourri3d as pp3d

from utils.functions import compute_variables, compute_variables_area

from src import diffusion_net
from src.diffusion_net.utils import toNP
from src.diffusion_net.geometry import to_basis, from_basis
from src.diffusion_net.layers import DiffusionNetBlock

from utils.model import SURFMNet

import src.pyFM
from src.pyFM.spectral.precise_map import point_to_triangles_projection

import time


def in_plane_angles(verts, normals, faces):

    # Edges
    e1 = verts[faces[:, 1]]-verts[faces[:, 0]]
    e2 = verts[faces[:, 2]]-verts[faces[:, 1]]
    e3 = verts[faces[:, 0]]-verts[faces[:, 2]]
    e1_norm = torch.nn.functional.normalize(e1, dim=1)
    e2_norm = torch.nn.functional.normalize(e2, dim=1)
    e3_norm = torch.nn.functional.normalize(e3, dim=1)

    # Angles
    alpha1 = torch.acos(torch.sum(e1_norm * (-e3_norm), 1) - torch.sum(e1_norm * normals[faces[:, 0]], 1)*torch.sum((-e3_norm) * normals[faces[:, 0]], 1) )
    alpha2 = torch.acos(torch.sum(e2_norm * (-e1_norm), 1) - torch.sum(e2_norm * normals[faces[:, 1]], 1)*torch.sum((-e1_norm) * normals[faces[:, 1]], 1) )
    alpha3 = torch.acos(torch.sum(e3_norm * (-e2_norm), 1) - torch.sum(e3_norm * normals[faces[:, 2]], 1)*torch.sum((-e2_norm) * normals[faces[:, 2]], 1) )
    angles = torch.cat((alpha1, alpha2, alpha3), 0)

    return angles

class Model(nn.Module):
    def __init__(self, mesh, mesh_x, mesh_y, frames, radii, device, offset_indices, radius_coeff=0.9, w_conformality=1, w_lap_conform=5e5, w_deflection=1.0):
        super().__init__()

        # Load mesh
        self.mesh = mesh.to(device)
        self.frames = frames[:, :2, :].permute([0, 2, 1]).to(device)
        self.mesh_x = mesh_x.to(device)
        self.mesh_y = mesh_y.to(device)
        self.radii = radii.to(device)
        self.device = device
        self.offset_indices = offset_indices
        self.ref_angles = in_plane_angles(verts=self.mesh_x.verts_packed(), normals=self.mesh_x.verts_normals_packed(),
                                          faces=self.mesh_x.faces_packed()) # Reference value of in plane angles at vertex
        self.radius_coeff = radius_coeff

        # self.cos = nn.CosineSimilarity(dim=1, eps=1e-6)

        self.ref_laplacian, _ = cot_laplacian(verts=self.mesh_x.verts_packed(), faces=self.mesh_x.faces_packed()) # The cotangent laplacian
        self.ref_laplacian = self.ref_laplacian.to_dense()
        self.ref_laplacian = self.ref_laplacian - torch.diag(torch.sum(self.ref_laplacian, dim=1)).to(self.device)
        self.ref_laplacian.requires_grad_ = False

        # Weights of losses
        self.w_chamfer = 1.0
        self.w_normal = 50
        self.w_conformality = w_conformality
        self.w_deflection = w_deflection
        self.w_lap_conform = w_lap_conform

        # Items to optimize
        shape_template = torch.zeros(offset_indices.shape[0], 2, 1).to(device)
        self.deform_verts = nn.parameter.Parameter(torch.randn_like(shape_template)*0.001).to(device)

    def forward(self):

        # Update the source mesh
        offset_matrix = torch.zeros([self.mesh_x.verts_packed().shape[0], 2, 1]).to(self.device)
        offset_matrix[self.offset_indices.squeeze(), :, :] = self.deform_verts
        deform_verts_in_tangent = torch.bmm(self.frames, offset_matrix).squeeze()
        deformed_mesh = self.mesh.offset_verts(deform_verts_in_tangent)
        deflections = torch.sqrt(torch.sum(deform_verts_in_tangent ** 2, dim=1))

        # Sample the mesh - to make sure the two meshes are of the same shape - denser sampling
        sample_mesh = sample_points_from_meshes(deformed_mesh, 5000)
        sample_y = sample_points_from_meshes(self.mesh_y, 5000)

        # Angle-preservation at vertex
        computed_angles = in_plane_angles(verts=deformed_mesh.verts_packed(), normals=deformed_mesh.verts_normals_packed(), faces=deformed_mesh.faces_packed())
        computed_angles[torch.isnan(computed_angles)] = self.ref_angles[torch.isnan(computed_angles)]

        # Laplacian-based w_conformality
        ref_mcn = self.ref_laplacian @ deformed_mesh.verts_packed()
        current_normals = deformed_mesh.verts_normals_packed()

        tangent_residues = ref_mcn-torch.sum(ref_mcn*current_normals, dim=-1).reshape(-1, 1)*current_normals
        residues = tangent_residues.norm(dim=-1)

        # Compute the losses
        loss_chamfer, _ = chamfer_distance(sample_mesh, sample_y) # ensure the deformed mesh are still of the same shape as reference
        loss_normal = mesh_normal_consistency(deformed_mesh)
        loss_conformality = torch.sum((computed_angles-self.ref_angles)**2)
        loss_lap_conform = torch.sum(residues**2)
        loss_deflection = torch.sum(torch.nn.functional.relu(deflections-self.radii*self.radius_coeff)*(10/self.radii)) # Conservative plan, only half the radius allowed

        loss = loss_chamfer * self.w_chamfer + loss_conformality * self.w_conformality + loss_lap_conform * self.w_lap_conform + loss_deflection * self.w_deflection + loss_normal * self.w_normal

        return loss, deformed_mesh, loss_chamfer, loss_conformality, loss_lap_conform, loss_deflection, loss_normal


# The main function of inference

def inference(args):

    # Params
    k_eig = args.k_eig # Downsized
    n_basis = 48 # number of eigenbases incorporated in building functional map - check if 16 is enough - expanded to 24 - to 32
    input_features = args.input_features # one of ['xyz', 'hks', 'wks', 'uni']
    hks_dim = 16
    wks_dim = 16
    uni_dim = 1
    C_in={'xyz':3, 'hks':hks_dim, 'uni':uni_dim, 'wks':wks_dim}[input_features] # dimension of input features
    C_out = 256 # Enlarged

    save_dir = args.save_dir

    # Set the device
    if torch.cuda.is_available():
        device = torch.device("cuda:0")
    else:
        device = torch.device("cpu")
        print("WARNING: CPU only, this will be slow!")

    # Build model
    model = SURFMNet(C_in=C_in,
                     C_out=C_out,
                     C_width=256, # More channels
                     N_block=args.n_blocks, # Also downsized
                     outputs_at='vertices',
                     dropout=True)
    model = model.to(device)

    # Load state dict
    cache_file = os.path.join(save_dir, args.cache_file)
    model.load_state_dict(torch.load(cache_file))

    # Read meshes
    verts_list, faces_list, frames_list, massvec_list, L_list, evals_list, evecs_list, gradX_list, gradY_list, evecs_trans_list = compute_variables_area(base_dir=args.base_dir, fnames=args.fnames, k_eig=k_eig)

    # Load inputs
    verts_x, faces_x, frames_x, mass_x, L_x, evals_x, evecs_x, gradX_x, gradY_x, evecs_trans_x = verts_list[0], faces_list[0], frames_list[0], massvec_list[0], L_list[0], evals_list[0], evecs_list[0], gradX_list[0], gradY_list[0], evecs_trans_list[0]
    verts_y, faces_y, frames_y, mass_y, L_y, evals_y, evecs_y, gradX_y, gradY_y, evecs_trans_y = verts_list[1], faces_list[1], frames_list[1], massvec_list[1], L_list[1], evals_list[1], evecs_list[1], gradX_list[1], gradY_list[1], evecs_trans_list[1]

    # Output orginal mesh (optional)
    output_dir = os.path.join(args.base_dir, args.out_folder)
    diffusion_net.utils.ensure_dir_exists(output_dir)
    #if args.original_mesh:
    #    save_obj(os.path.join(output_dir, args.fnames[0]), verts_x, faces_x)
    #if args.target_mesh:
    #    save_obj(os.path.join(output_dir, args.fnames[1]), verts_y, faces_y)

    # Compute features
    if input_features == 'xyz':
        features_x = verts_x
        features_y = verts_y
    elif input_features == 'hks':
        features_x = diffusion_net.geometry.compute_hks_autoscale(evals_x, evecs_x, hks_dim)
        features_y = diffusion_net.geometry.compute_hks_autoscale(evals_y, evecs_y, hks_dim)
    elif input_features == 'wks':
        features_x = auto_WKS(evals_x, evecs_x, wks_dim, scaled=False)
        features_y = auto_WKS(evals_y, evecs_y, wks_dim, scaled=False)
    elif input_features == 'uni':
        features_x = torch.ones(verts_x.shape[0], uni_dim).to(device)
        features_y = torch.ones(verts_y.shape[0], uni_dim).to(device)

    # Extend dim - batch
    verts_x = verts_x.unsqueeze(0).to(device)
    faces_x = faces_x.unsqueeze(0).to(device)
    frames_x = frames_x.unsqueeze(0).to(device)
    mass_x = mass_x.unsqueeze(0).to(device)
    L_x = L_x.unsqueeze(0).to(device)
    evals_x = evals_x.unsqueeze(0).to(device)
    evecs_x = evecs_x.unsqueeze(0).to(device)
    gradX_x = gradX_x.unsqueeze(0).to(device)
    gradY_x = gradY_x.unsqueeze(0).to(device)
    evecs_trans_x = evecs_trans_x.unsqueeze(0).to(device)

    verts_y = verts_y.unsqueeze(0).to(device)
    faces_y = faces_y.unsqueeze(0).to(device)
    vertex_area_y = pp3d.vertex_areas(toNP(verts_y.squeeze()), toNP(faces_y.squeeze()))
    frames_y = frames_y.unsqueeze(0).to(device)
    mass_y = mass_y.unsqueeze(0).to(device)
    L_y = L_y.unsqueeze(0).to(device)
    evals_y = evals_y.unsqueeze(0).to(device)
    evecs_y = evecs_y.unsqueeze(0).to(device)
    gradX_y = gradX_y.unsqueeze(0).to(device)
    gradY_y = gradY_y.unsqueeze(0).to(device)
    evecs_trans_y = evecs_trans_y.unsqueeze(0).to(device)

    features_x = features_x.unsqueeze(0).to(device)
    features_y = features_y.unsqueeze(0).to(device)

    # Evaluation
    model.eval()
    eval_n_basis = args.eval_n_basis

    with torch.no_grad():
         C1, C2, feat_x, feat_y = model(x_in=features_x, mass_x=mass_x, L_x=L_x, evals_x=evals_x, evecs_x=evecs_x, gradX_x=gradX_x, gradY_x=gradY_x, evecs_trans_x=evecs_trans_x[:, :eval_n_basis, :], y_in=features_y, mass_y=mass_y, L_y=L_y, evals_y=evals_y, evecs_y=evecs_y, gradX_y=gradX_y, gradY_y=gradY_y, evecs_trans_y=evecs_trans_y[:, :eval_n_basis, :])

    # Nearest-neighbour
    # To pointwise
    x_to_y = toNP(C1.squeeze()@evecs_x[:, :, :eval_n_basis].squeeze().t())
    y = toNP(evecs_y[:, :, :eval_n_basis].squeeze().t())

    # Gradient weight
    gradient_Xx = gradX_x.to_dense().squeeze() @ evecs_x[:, :, :eval_n_basis].squeeze()
    gradient_Yx = gradY_x.to_dense().squeeze() @ evecs_x[:, :, :eval_n_basis].squeeze()
    gradient_x = torch.sqrt(gradient_Xx ** 2 + gradient_Yx ** 2)
    gradient_x = gradient_x/torch.linalg.norm(gradient_x, dim=1).reshape(-1, 1)
    gradient_x.shape

    corresponded_idx_list = []
    x_feats = x_to_y.transpose()
    grads_x = toNP(gradient_x)
    y_feats = y.transpose()

    for idx in range(verts_x.squeeze().shape[0]):
        x_feat = x_feats[idx, :]
        distance = np.sum(((y_feats-x_feat)* grads_x[idx, :]) ** 2 , axis=1)
        corresponded_idx_list.append(np.argmin(distance))

    corresponded_idx = np.array(corresponded_idx_list)

    # Output
    to_save = {'T12_initial': corresponded_idx, 'C21_initial': toNP(C2).squeeze().astype(np.float64), 'evecs_x': toNP(evecs_x.squeeze()), 'evecs_y': toNP(evecs_y.squeeze()), 'feat_x': toNP(feat_x.squeeze()), 'feat_y': toNP(feat_y.squeeze()), 'evecs_trans_x': toNP(evecs_trans_x.squeeze()), 'evecs_trans_y': toNP(evecs_trans_y.squeeze())}
    sio.savemat(os.path.join(output_dir, 'zoomout_input.mat'), to_save)

    return

# __main__
if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        description="Pass parameters."
    )
    parser.add_argument("--lr", type=float, default=0.000025, help="refined inference: learning rate")
    parser.add_argument("--Niter", type=int, default=5000, help="refined inference: number of iterations")
    parser.add_argument("--decay_every", type=int, default=1000, help="refined inference: interval for lr decay")
    parser.add_argument("--decay_rate", type=float, default=0.707, help="refined inference: coefficient for lr decay")
    parser.add_argument("--radius_coeff", type=float, default=0.9, help="refined inference: coefficient of searching radius")
    parser.add_argument('--input_features', default='hks', help='type of the input features, one of xyz, hks and wks')
    parser.add_argument("--anchor_ratio", type=float, default=0.001, help="ratio of anchors in optimization")
    parser.add_argument("--deflect_margin", type=float, default=0.4, help="margin threshold in judging deflection")
    parser.add_argument("--batch_interval", type=int, default=100, help="cases in a batch during batch least-square inference")
    parser.add_argument("--edge_ratio", type=float, default=10, help="cases in a batch during batch least-square inference")
    parser.add_argument("--coeff_threshold", type=float, default=-0.1, help="the allowed threshold margin in finding legal barycentric parameters")
    parser.add_argument("--n_blocks", type=int, default=6, help="number of DiffusionBlocks")

    parser.add_argument('--base_dir', help='root directory of the meshes')
    parser.add_argument('--save_dir', help='path to the cached model')
    parser.add_argument('--cache_file', help='name of the cache file')
    parser.add_argument('--fnames', nargs='+', help='list of the meshes')
    parser.add_argument("--eval_n_basis", type=int, default=48, help="number of eigenbases for evaluation")
    parser.add_argument("--k_eig", type=int, default=128, help="number of eigenbases for spectral acceleration")
    parser.add_argument('--raw_inference', help='name of the raw inference')
    parser.add_argument('--refined_inference', default='', help='name of the refined inference')
    parser.add_argument('--out_folder', default='infered_correspondence', help='name of the refined inference')
    parser.add_argument("--w_conformality", type=float, default=1.0, help="weight of angle-based conformality loss")
    parser.add_argument("--w_lap_conform", type=float, default=5e5, help="weight of mcn-based conformality loss")
    parser.add_argument("--w_deflection", type=float, default=1.0, help="weight of the penalty in terms of localization")

    parser.add_argument("--raw_only", type=bool, default=False, help="refined inference: opt-out")
    parser.add_argument("--original_mesh", type=bool, default=False, help="output the scaled/centered original mesh or not")
    parser.add_argument("--target_mesh", type=bool, default=False, help="output the scaled/centered target mesh or not")

    args = parser.parse_args()
    inference(args)
