import os, sys, argparse, yaml
import torch
import torch.nn as nn
import numpy as np
import open3d as o3d
sys.path.append('./')
sys.path.append('./dif')
from lib.fk.FK_layer import FK_layer
from dif.dif_net import DeformedImplicitField
from lib.fk.utils import o3d_read_mesh, get_4x4_matrix, R2q
from lib.tf.utils import get_grasp_hand_lineset
from lib.tf.FC_loss import FCLoss

class ContactPointsRefiner:
    def __init__(self, category, pts, codes, inst_list, verbose=False):
        bs = pts.shape[0] # batch size
        pts_num = pts.shape[1]
        #super(FK_layer, self).__init__()
        self.delta_p = nn.Embedding(bs, pts_num*3)
        self.delta_p.cuda()
        nn.init.zeros_(self.delta_p.weight)
        self.pts = torch.from_numpy(pts).float().cuda()
        self.codes = torch.from_numpy(codes).float().cuda()
        self.loss = FCLoss()
        
        dif_config = 'dif/configs/generate/{0}.yml'.format(category)
        with open(os.path.join(dif_config),'r') as stream: 
            meta_params = yaml.safe_load(stream)
        self.dif_model = DeformedImplicitField(**meta_params)
        self.dif_model.load_state_dict(torch.load(meta_params['checkpoint_path']))
        self.dif_model.cuda()
        
        self.inst_list = inst_list
        self.verbose = verbose
        
    def forward(self):
        delta = self.delta_p.weight
        bs = delta.size(0)
        delta = delta.reshape(bs, -1, 3)
        cur_pts = self.pts + delta
        sdfs = self.dif_model.inference_with_grad(cur_pts, self.codes)
        normals = torch.autograd.grad(sdfs, cur_pts, 
                                                  grad_outputs=torch.ones_like(sdfs), 
                                                  create_graph=True)[0]
        normals = normals / torch.norm(normals, dim=2, keepdim=True)
        l_fc_a, l_fc_b = self.loss.fc_loss(cur_pts, normals)
        l_fc_a = torch.mean(l_fc_a)
        l_fc_b = torch.mean(l_fc_b)
        
        l_reg = torch.mean(torch.norm(delta, dim=1))
        l_t = torch.mean(torch.norm(sdfs, dim=1))
        return l_fc_a, l_fc_b, l_reg, l_t, cur_pts
        
    def run(self, lr=1e-3, nepoch=50, vis_id=-1):
        optim = torch.optim.SGD(self.delta_p.parameters(), lr=lr, momentum=0.9)
        
        if vis_id != -1:
            vis = o3d.visualization.Visualizer()
            vis.create_window()
            mesh_path = f"datasets/obj/{self.inst_list[vis_id]}.ply"
            # inst_path = self.inst_list[vis_id].replace('eval/', '')
            # mesh_path = f"dif/eval/{inst_path}/checkpoints/test.ply"
            mesh = o3d_read_mesh(mesh_path, scale=2.0)
            contact_pts = o3d.geometry.PointCloud()
            contact_pts.points = o3d.utility.Vector3dVector(self.pts[vis_id].detach().cpu().numpy())
            contact_pts.paint_uniform_color([1, 0, 0])
            vis.add_geometry(mesh)
            vis.add_geometry(contact_pts)
        
        for epoch in range(nepoch):
            l_fc1, l_fc2, l_reg, l_t, cur_pts = self.forward()
            l_fc2 *= 1e-4
            l_t *= 1e2
            l_reg *= 1e0
            l_total = l_fc1 + l_fc2 + l_t
            if self.verbose:
                print(f'Epoch [{epoch:3d}/{nepoch:3d} || L_fc1: {l_fc1.item():6.2f} | L_fc2: {l_fc2.item():6.2f} | L_t: {l_t.item():6.2f} | L_reg:{l_reg.item():6.2f}')
            optim.zero_grad()
            l_total.backward(retain_graph=True)
            optim.step()
            
            if vis_id != -1 and epoch % 1 == 0:
                contact_pts.points = o3d.utility.Vector3dVector(cur_pts[vis_id].detach().cpu().numpy())
                vis.update_geometry(contact_pts)       
                vis.poll_events()
                vis.update_renderer()
                
        if vis_id != -1:
            vis.run()

if __name__ == '__main__':
    category = 'camera'
    mode = 'eval'
    grasp_id = 13972
    vis_id = 8
    g_pts_file = "grasp_{}_points_on_surface.npz".format(grasp_id)
    norm_pts_list = []
    tf_pos_list = []
    inst_list = []
    scale_list = []
    code_list = []
    inv_scale_list = []
    for inst_name in os.listdir("grasp_data/{}/{}".format(category, mode)):
        for number in os.listdir("grasp_data/{}/{}/{}".format(category, mode, inst_name)):
            inst_list.append(os.path.join(category, mode, inst_name, str(number)))
            npz_path = os.path.join("grasp_data", category, mode, inst_name, str(number), g_pts_file)
            tf_pos = np.load(npz_path)
            
            # use predition of object scale when input data are single-view point clouds (Sec.IV-C)
            # use ground truth scale when input data are complete models (Ablation Study)
            if 'pred_scale' in tf_pos.keys():
                scale = tf_pos['pred_scale']
            else:
                scale = tf_pos['obj_scale']
            
            norm_points = tf_pos["grasp_points"]
            points = tf_pos["grasp_points"] * scale * 0.5
            norm_pts_list.append(norm_points)
            tf_pos_list.append(points)
            scale_list.append(scale)
            inv_scale_list.append(1.0 / (scale * 0.5))
            code_list.append(tf_pos["code"])
    
    norm_points = np.array(norm_pts_list, np.float32)
    codes = np.array(code_list)
    worker = ContactPointsRefiner(category, norm_points, codes, inst_list, verbose=True)
    worker.run(nepoch=200, vis_id=vis_id)