"""
the path of this script:
$ROOT/refine.py
"""
import os, sys, argparse, yaml
import torch
import numpy as np
import open3d as o3d
sys.path.append('./')
sys.path.append('./dif')
from lib.fk.FK_layer import FK_layer
from dif.dif_net import DeformedImplicitField
from lib.fk.utils import o3d_read_mesh
from lib.tf.utils import get_grasp_hand_lineset
from lib.tf.FC_loss import FCLoss

parser = argparse.ArgumentParser(description='Arguments for the program')
parser.add_argument('--category', default="mug", type=str)
parser.add_argument('--grasp_id', default="2", type=str)
parser.add_argument('--epochs', default=150, type=int, help='epoch num')
parser.add_argument('--lr', default=1e-3, type=float, help='learning rate')
parser.add_argument('--vis_id', default=-1, type=int, help='-1 represents no visualiztion!')
parser.add_argument('--exp_name', default="tf", type=str, choices=["all", "tf", "fc", "trans", "colli"])
args = parser.parse_args()

SHADOW_HAND_DOF_NUM = 22
g_pts_file = "grasp_{}_points_on_surface.npz".format(args.grasp_id)

def get_transferred_grasp_points(category):
    mode = "eval"
    tf_pos_list = []
    inst_list = []
    scale_list = []
    code_list = []
    inv_scale_list = []
    for inst_name in os.listdir("grasp_data/{}/{}".format(category, mode)):
        for number in os.listdir("grasp_data/{}/{}/{}".format(category, mode, inst_name)):
            inst_list.append(os.path.join(category, mode, inst_name, str(number)))
            npz_path = os.path.join("grasp_data", category, mode, inst_name, str(number), g_pts_file)
            tf_pos = np.load(npz_path)
            points = tf_pos["grasp_points"] * tf_pos["obj_scale"] * 0.5
            tf_pos_list.append(points)
            scale_list.append(tf_pos["obj_scale"])
            inv_scale_list.append(1.0 / (tf_pos["obj_scale"] * 0.5))
            code_list.append(tf_pos["code"])
    return np.array(tf_pos_list, np.float32), inst_list, np.array(scale_list), np.array(inv_scale_list), np.array(code_list)

def get_init_grasp_config(category, grasp_id):
    labeled_model_dict = {
                          "mug"   :"62634df2ad8f19b87d1b7935311a2ed0",
                          "bottle":"3108a736282eec1bc58e834f0b160845",
                          "bowl"  :"8bb057d18e2fcc4779368d1198f406e7",
                          }
    path = "grasp_data/{}/train/{}/0/label_grasp_{}.npz".format(category, labeled_model_dict[category], grasp_id)
    grasp_label = np.load(path)
    palm_q = grasp_label['palm_q'][[3,0,1,2]]       # [x,y,z,w] -> [w,x,y,z]
    base = np.concatenate([grasp_label['palm_t'], palm_q], axis=0)
    rotations = grasp_label['joints'][2:24]

    contact_info_path = "grasp_data/{}/train/{}/0/label_grasp_{}_contact_info.npz".format(category, labeled_model_dict[category], grasp_id)
    contact_label = np.load(contact_info_path)
    return base, rotations, grasp_label, contact_label

def main():

    # 用于监督的抓取点， 它们通过迁移获取(lib/tf/transfer.py)
    tf_pos, inst_list, scales, inv_scales, codes = get_transferred_grasp_points(args.category)
    inst_num = tf_pos.shape[0]
    gt_positions = torch.from_numpy(tf_pos).float().cuda()
    codes_cuda = torch.from_numpy(codes).float().cuda()

    # 标注的抓取，作为初始抓取配置
    base, rotations, grasp_label, contact_label = get_init_grasp_config(args.category, args.grasp_id)
    contact_idx = contact_label['contact_idx']
    non_contact_idx = contact_label['non_contact_idx']
    pre_bases = torch.FloatTensor(base).reshape(1, -1).repeat(inst_num, 1).cuda()
    pre_rotations = torch.FloatTensor(rotations).reshape(1, -1).repeat(inst_num, 1).cuda()

    inv_scales_cuda = torch.from_numpy(inv_scales).float().reshape(-1, 1, 1).repeat(1, 2000, 3).cuda()
    
    # 可视化
    vis_id = args.vis_id
    if vis_id != -1:
        vis = o3d.visualization.Visualizer()
        vis.create_window()
        mesh_path = "assets/urdf/shapenet/{}.ply".format(inst_list[vis_id])
        mesh = o3d_read_mesh(mesh_path, scale=scales[vis_id])
        # mesh = o3d_read_mesh('mesh.ply', scale=scales[vis_id] * 0.5)
        hand_key_pts = get_grasp_hand_lineset(tf_pos[vis_id])
        hand_mesh = o3d.geometry.PointCloud()
        hand_mesh.points = o3d.utility.Vector3dVector(tf_pos[vis_id])
        contact_pts = o3d.geometry.PointCloud()
        contact_pts.points = o3d.utility.Vector3dVector(tf_pos[vis_id])
        contact_pts.paint_uniform_color([1, 0, 0])
        vis.add_geometry(mesh)
        vis.add_geometry(hand_key_pts)
        vis.add_geometry(hand_mesh)
        vis.add_geometry(contact_pts)

    # 模型
    dif_config = 'dif/configs/generate/{0}.yml'.format(args.category)
    with open(os.path.join(dif_config),'r') as stream: 
        meta_params = yaml.safe_load(stream)
    dif_model = DeformedImplicitField(**meta_params)
    dif_model.load_state_dict(torch.load(meta_params['checkpoint_path']))
    dif_model.cuda()
    fk_model = FK_layer(pre_bases, pre_rotations, inst_num, SHADOW_HAND_DOF_NUM)
    fk_model.cuda()

    mseloss = torch.nn.MSELoss()
    fcloss = FCLoss()

    # 优化器和学习率
    optim_pos = torch.optim.SGD(fk_model.root_offset.parameters(), lr=args.lr * 1e-3, momentum=0.9)
    optim_rot = torch.optim.SGD(fk_model.emb.parameters(), lr=args.lr, momentum=0.9)

    loss_min = 1e7
    for epoch in range(args.epochs+1):
        positions, hand_pts = fk_model()
        # transfer loss
        sup_positions = hand_pts[:, contact_idx, :]
        l_transfer = mseloss(sup_positions, gt_positions)
        
        # compute sdf value for sampled points of shadow hand
        normlized_hand_pts = hand_pts * inv_scales_cuda
        contact_pts = torch.clone(normlized_hand_pts[:, contact_idx, :])
        contact_pts_sdfs = dif_model.inference_with_grad(contact_pts, codes_cuda)
        non_contact_pts = torch.clone(normlized_hand_pts[:, non_contact_idx, :])
        non_contact_pts_sdfs = dif_model.inference_with_grad(non_contact_pts, codes_cuda)
        
        # contact loss
        abs_contact_pts_sdfs = contact_pts_sdfs * contact_pts_sdfs
        l_contact = torch.where(abs_contact_pts_sdfs>5e-3, abs_contact_pts_sdfs, torch.zeros_like(abs_contact_pts_sdfs))
        l_contact = torch.mean(l_contact)
        
        # force closure loss
        contact_pts_normals = torch.autograd.grad(contact_pts_sdfs, contact_pts, 
                                                  grad_outputs=torch.ones_like(contact_pts_sdfs), 
                                                  create_graph=True)[0]
        normals = contact_pts_normals / torch.norm(contact_pts_normals, dim=2, keepdim=True)
        l_fc_a, l_fc_b = fcloss.fc_loss(contact_pts, normals)
        l_fc_a = torch.mean(l_fc_a)
        l_fc_b = torch.mean(l_fc_b)
        
        # collision loss
        l_collision = -torch.where(non_contact_pts_sdfs<0, non_contact_pts_sdfs, torch.zeros_like(non_contact_pts_sdfs))
        l_collision = torch.mean(l_collision)

        l_collision *= 1e4
        l_transfer *= 1e6
        l_fc_a *= 1
        l_fc_b *= 1e-2
        l_contact *= 1e3
        
        if args.exp_name == 'all':
            if epoch > args.epochs / 2:
                loss = l_collision + l_transfer + l_fc_a + l_fc_b + l_contact
            else:
                loss = l_collision + l_transfer
        elif args.exp_name == 'tf':
            loss = l_collision + l_transfer
        elif args.exp_name == 'fc':
            loss = l_fc_a + l_fc_b + l_contact
        elif args.exp_name == 'trans':
            loss = l_transfer
        elif args.exp_name == 'colli':
            loss = l_collision

        optim_pos.zero_grad()
        optim_rot.zero_grad()
        loss.backward(retain_graph=True)
        optim_pos.step()
        optim_rot.step()
        
        fk_model.set_rotations_into_window()
        params = fk_model.state_dict()
        deltas = params['emb.weight'].data.cpu().numpy()
        
        ref_rotations = rotations[np.newaxis, ...] + deltas[:, :SHADOW_HAND_DOF_NUM]
        
        best_ref_rotations = ref_rotations
        best_delta_positions = params['root_offset.weight'].data.cpu().numpy()
        
        diff = np.linalg.norm(deltas)

        if epoch % 10 == 0:
            print(f"Epoch: [{epoch}/{args.epochs}], L_trans: {l_transfer.item():.4f}, L_coll: {l_collision.item():.4f}, L_FC(a/b/c): {l_fc_a.item():.4f} / {l_fc_b.item():.4f} / {l_contact.item():.4f}, diff: {diff:.4f}")
        
        # 动态显示优化效果
        if vis_id != -1 and epoch % 1 == 0:
            hand_key_pts.points = o3d.utility.Vector3dVector(positions[vis_id].detach().cpu().numpy())
            hand_mesh.points = o3d.utility.Vector3dVector(hand_pts[vis_id].detach().cpu().numpy())
            hand_mesh.paint_uniform_color([0,0,0])
            vis.update_geometry(hand_key_pts)
            vis.update_geometry(hand_mesh)
            vis.poll_events()
            vis.update_renderer()
            # if epoch % 10 == 0: # block
            #     vis.run()
    if vis_id != -1:
        vis.destroy_window()

    # 保存数据，存入grasp_data/*/tf_grasp_*.npz
    sim_joints = np.concatenate([np.zeros([inst_num, 2]), best_ref_rotations], axis=1, dtype=np.float32) # [inst_num, 22]
    for i in range(sim_joints.shape[0]):
        save_dir = os.path.join("grasp_data", inst_list[i])
        np.savez(os.path.join(save_dir, "{}_grasp_{}".format(args.exp_name, args.grasp_id)), q=grasp_label["q"], t=grasp_label["t"]+best_delta_positions[i], joints=sim_joints[i], obj_scale=scales[i])

if __name__ == "__main__":
    main()