import os, sys, argparse, yaml, math
import torch
import numpy as np
import open3d as o3d
sys.path.append('./')
sys.path.append('./dif')
from lib.fk.FK_layer import FK_layer
from dif.dif_net import DeformedImplicitField
from lib.fk.utils import o3d_read_mesh, get_4x4_matrix, R2q
from lib.tf.utils import get_grasp_hand_lineset
from lib.tf.FC_loss import FCLoss
from lib.tf.refine_contact_points import ContactPointsRefiner

parser = argparse.ArgumentParser(description='Arguments for the program')
parser.add_argument('--category', default="mug", type=str)
parser.add_argument('--grasp_id', default="0", type=str)
parser.add_argument('--epochs', default=200, type=int, help='epoch num')
parser.add_argument('--lr', default=1e-3, type=float, help='learning rate')
parser.add_argument('--vis_id', default=-1, type=int, help='-1 represents no visualiztion!')
parser.add_argument('--exp_name', default="all", type=str)
parser.add_argument('--pc_input', action='store_true', default=False)
args = parser.parse_args()
print(args)

SHADOW_HAND_DOF_NUM = 22
g_pts_file = "grasp_{}_points_on_surface.npz".format(args.grasp_id)

def get_transferred_grasp_points(category):
    mode = "eval_pc" if args.pc_input else "eval"
    tf_pos_list = []
    inst_list = []
    scale_list = []
    code_list = []
    inv_scale_list = []
    norm_pts_list = []
    g_pts_wt_list = []
    for inst_name in os.listdir("grasp_data/{}/{}".format(category, mode)):
        for number in os.listdir("grasp_data/{}/{}/{}".format(category, mode, inst_name)):
            inst_list.append(os.path.join(category, mode, inst_name, str(number)))
            npz_path = os.path.join("grasp_data", category, mode, inst_name, str(number), g_pts_file)
            tf_pos = np.load(npz_path)
            
            # use predition of object scale when input data are single-view point clouds (Sec.IV-C)
            # use ground truth scale when input data are complete models (Ablation Study)
            # if 'pred_scale' in tf_pos.keys():
            if args.pc_input:
                scale = tf_pos['pred_scale']
            else:
                assert 'pred_scale' not in tf_pos.keys()
                scale = tf_pos['obj_scale']
            
            norm_points = tf_pos["grasp_points"]
            #points = tf_pos["grasp_points"] * scale * 0.5
            #tf_pos_list.append(points)
            norm_pts_list.append(norm_points)
            scale_list.append(scale)
            inv_scale_list.append(1.0 / (scale * 0.5))
            code_list.append(tf_pos["code"])
            
            g_pts_wt_list.append(tf_pos["grasp_points_weight"])
            
    norm_points = np.array(norm_pts_list, np.float32)
    codes = np.array(code_list)
    scales = np.array(scale_list)
    inv_scales = np.array(inv_scale_list)
    g_pts_wts = np.array(g_pts_wt_list, np.float32)
    # if args.exp_name == 'allfc':  # refine contact points according to force-closure
    #     refiner = ContactPointsRefiner(category, norm_points, codes, inst_list, verbose=True)
    #     refiner.run(nepoch=100, vis_id=args.vis_id)
    #     delta = refiner.delta_p.weight.data.cpu().numpy()
    #     delta = delta.reshape(norm_points.shape[0], -1, 3)
    #     norm_points += delta
        
    tf_pos = norm_points * scales[:, np.newaxis, np.newaxis] * 0.5

    return tf_pos, \
           inst_list, \
           scales, \
           inv_scales, \
           codes, \
           g_pts_wts

def get_init_grasp_config(category, grasp_id):
    with open('cfg/class_cfg.yml','r') as stream: 
        class_cfg = yaml.safe_load(stream)
    src_inst = class_cfg[category]['src_inst_name']
    path = f"grasp_label/{category}/train/{src_inst}/0/label_grasp_{grasp_id}.npz"
    grasp_label = np.load(path)
    palm_q = grasp_label['palm_q'][[3,0,1,2]]       # [x,y,z,w] -> [w,x,y,z]
    base = np.concatenate([grasp_label['palm_t'], palm_q], axis=0)
    rotations = grasp_label['joints'][2:24]

    contact_info_path = f"grasp_label/{category}/train/{src_inst}/0/label_grasp_{grasp_id}_contact_info.npz"
    contact_label = np.load(contact_info_path)
    return base, rotations, grasp_label, contact_label

def robot_hand_self_collision_loss(key_pts):
    def subset_dis(pts, idx1_list, idx2_list):
            subset_dis = ((pts[:, idx1_list].unsqueeze(2).expand(-1, -1, len(idx2_list), -1)
                        - pts[:, idx2_list].unsqueeze(1).expand(-1, len(idx1_list), -1, -1)) ** 2).sum(-1).sqrt().reshape(pts.shape[0], -1)
            return subset_dis
    # lines = [[0, 1], [2, 3], [3, 4], [4, 23],  # food
    #         [0, 5], [6, 7], [7, 8], [8, 24],   # middle
    #         [0, 9], [10, 11], [11, 12], [12, 25],  # ring
    #         [0, 13], [13, 15], [15, 16], [16, 17], [17, 26], # little
    #         [0, 18], [19, 21], [21, 22], [22, 27]]         # thumb
    # # 中指指尖 - 其他指尖
    # d1 = subset_dis(key_pts, [24, ], [23, 25, 26, 27])
    # # 无名指尖 + 拇指指尖 - 大小拇指指尖
    # d2 = subset_dis(key_pts, [23, 25], [26, 27])
    # d3 = torch.clone(d2)
    # # 大拇指 2 / 3关节 - 其他指尖
    # d4 = subset_dis(key_pts, [21, 22], [23, 24, 25, 26])
    # # 小拇指 2 / 3关节 - 其他指尖
    # d5 = subset_dis(key_pts, [16, 17], [23, 25, 25, 27])
    # d = torch.cat([d1, d2, d3, d4, d5], dim=1) - 0.0030
    # d1 = subset_dis(key_pts, [3, 4, 23], [7, 8, 24])
    # d2 = subset_dis(key_pts, [7, 8, 24], [11, 12, 25])
    # d3 = subset_dis(key_pts, [11, 12, 25], [16, 17, 26])
    # d4 = subset_dis(key_pts, [21, 22, 27], [2, 3, 4, 6, 7, 8, 10, 11, 12, 15, 16, 17,])
    d1 = subset_dis(key_pts, [3, 4,], [7, 8,])
    d2 = subset_dis(key_pts, [7, 8,], [11, 12,])
    d3 = subset_dis(key_pts, [11, 12,], [16, 17,])
    d4 = subset_dis(key_pts, [21, 22, 27], [2, 3, 4, 6, 7, 8, 10, 11, 12, 15, 16, 17,])    
    d = torch.cat([d1, d2, d3, d4 ], dim=1) - 0.022
    loss = torch.where(d < 0, -d, torch.zeros_like(d))
    loss = torch.mean(loss, dim=1, keepdim=False)
    return loss

def refine(inst_num, tf_pos, inst_list, scales, inv_scales, codes, gt_positions, codes_cuda, g_pts_wts_cuda):
    # 标注的抓取，作为初始抓取配置
    base, rotations, grasp_label, contact_label = get_init_grasp_config(args.category, args.grasp_id)
    contact_idx = contact_label['contact_idx']
    non_contact_idx = contact_label['non_contact_idx']
    pre_bases = torch.FloatTensor(base).reshape(1, -1).repeat(inst_num, 1).cuda()
    pre_rotations = torch.FloatTensor(rotations).reshape(1, -1).repeat(inst_num, 1).cuda()

    pts_dict = np.load('./assets/sampled_pts_of_shadow_hand.npy', allow_pickle=True).item()
    
    num = 0
    for name in pts_dict:
        # ['palm', 'lfmetacarpal', 'F3', 'F2', 'F1', 'TH3_z', 'TH2_z', 'TH1_z']
        if name[0] == 'F':
            num += 4 * pts_dict[name].shape[0]
        else:
            num += pts_dict[name].shape[0]
    inv_scales_cuda = torch.from_numpy(inv_scales).float().reshape(-1, 1, 1).repeat(1, num, 3).cuda()
    
    # 可视化
    vis_id = args.vis_id
    if vis_id != -1:
        vis = o3d.visualization.Visualizer()
        vis.create_window()
        inst_path = inst_list[vis_id].replace('eval_pc', 'eval')
        mesh_path = f"datasets/obj/{inst_path}.ply"
        mesh = o3d_read_mesh(mesh_path, scale=scales[vis_id])
        # mesh = o3d_read_mesh('mesh.ply', scale=scales[vis_id] * 0.5)
        hand_key_pts = get_grasp_hand_lineset(tf_pos[vis_id])
        hand_mesh = o3d.geometry.PointCloud()
        hand_mesh.points = o3d.utility.Vector3dVector(tf_pos[vis_id])
        contact_pts = o3d.geometry.PointCloud()
        contact_pts.points = o3d.utility.Vector3dVector(tf_pos[vis_id])
        contact_pts.paint_uniform_color([1, 0, 0])
        vis.add_geometry(mesh)
        vis.add_geometry(hand_key_pts)
        vis.add_geometry(hand_mesh)
        vis.add_geometry(contact_pts)

    # 模型
    dif_config = 'dif/configs/generate/{0}.yml'.format(args.category)
    with open(os.path.join(dif_config),'r') as stream: 
        meta_params = yaml.safe_load(stream)
    dif_model = DeformedImplicitField(**meta_params)
    dif_model.load_state_dict(torch.load(meta_params['checkpoint_path']))
    dif_model.cuda()
    fk_model = FK_layer(pre_bases, pre_rotations, inst_num, SHADOW_HAND_DOF_NUM)
    fk_model.cuda()

    mseloss = torch.nn.MSELoss()
    fcloss = FCLoss()

    # 优化器和学习率
    optim_pos = torch.optim.SGD(fk_model.root_offset.parameters(), lr=args.lr * 1e-3, momentum=0.9)
    optim_rot = torch.optim.SGD(fk_model.emb.parameters(), lr=args.lr, momentum=0.9)

    loss_min = 1e7

    if args.exp_name != 'allfc':
        # fc_refine_nepoch = int(args.epochs / 4)
        # nepoch = args.epochs + 1 + fc_refine_nepoch        
        nepoch = args.epochs + 1
    else:
        # fc_refine_nepoch = int(args.epochs / 4)
        # nepoch = args.epochs + 1 + fc_refine_nepoch
        nepoch = args.epochs + 1

    for epoch in range(nepoch):
        positions, hand_pts = fk_model()
        
        # 动态显示优化效果
        if vis_id != -1 and epoch % 1 == 0:
            hand_key_pts.points = o3d.utility.Vector3dVector(positions[vis_id].detach().cpu().numpy())
            hand_mesh.points = o3d.utility.Vector3dVector(hand_pts[vis_id].detach().cpu().numpy())
            hand_mesh.paint_uniform_color([0,0,0])
            vis.update_geometry(hand_key_pts)
            vis.update_geometry(hand_mesh)
            vis.poll_events()
            vis.update_renderer()
            
        # if True:
        #     import trimesh
        #     pc1 = trimesh.PointCloud(positions[vis_id][-5:].detach().cpu().numpy(), colors=np.array([255,0,0,255]))
        #     pc2 = trimesh.PointCloud(hand_pts[vis_id].detach().cpu().numpy())
        #     trimesh.Scene([pc2,pc1,]).show()
                    
        # transfer loss
        sup_positions = hand_pts[:, contact_idx, :]
        # l_transfer = mseloss(sup_positions, gt_positions)
        # l_transfer_2 = torch.sum((sup_positions - gt_positions) ** 2, dim=[1,2])
        _l_transfer = ((sup_positions - gt_positions) ** 2) * g_pts_wts_cuda
        l_transfer = torch.mean(_l_transfer)
        
        # index = torch.argmax(l_transfer_2)
        # print(f'\033[1;33m baddest instance {index.item()}\033[0m')
        
        # compute sdf value for sampled points of shadow hand
        normlized_hand_pts = hand_pts * inv_scales_cuda
        contact_pts = torch.clone(normlized_hand_pts[:, contact_idx, :])
        contact_pts_sdfs = dif_model.inference_with_grad(contact_pts, codes_cuda)
        non_contact_pts = torch.clone(normlized_hand_pts[:, non_contact_idx, :])
        non_contact_pts_sdfs = dif_model.inference_with_grad(non_contact_pts, codes_cuda)

        # force closure loss
        contact_pts_normals = torch.autograd.grad(contact_pts_sdfs, contact_pts, 
                                                  grad_outputs=torch.ones_like(contact_pts_sdfs), 
                                                  create_graph=True)[0]
        normals = contact_pts_normals / torch.norm(contact_pts_normals, dim=2, keepdim=True)
        # l_fc_a_1, l_fc_b_1 = fcloss.fc_loss(contact_pts, normals)
        l_fc_a, l_fc_b = fcloss.fc_loss(hand_pts[:, contact_idx, :], normals)
        l_fc1 = torch.mean(l_fc_a)
        # flag = l_fc_b > 200
        # if epoch > args.epochs and torch.sum(flag) == 0:
        #     break
        # l_fc_b = l_fc_b * flag
        l_fc2 = torch.where(l_fc_b > 0.5, l_fc_b, torch.zeros_like(l_fc_b))
        l_fc2 = torch.mean(l_fc2)
        
        l_fc1 *= 1e-3
        l_fc2 *= 1e-3

        # contact loss
        l_contact = torch.where(contact_pts_sdfs>0, contact_pts_sdfs, torch.zeros_like(contact_pts_sdfs))
        # if epoch > args.epochs:
        #     l_contact = torch.where(l_fc_b > 500, l_contact, torch.zeros_like(l_contact))
        l_contact = torch.mean(l_contact)
        # hand-object collision loss
        l_collision = -torch.where(non_contact_pts_sdfs<0, non_contact_pts_sdfs, torch.zeros_like(non_contact_pts_sdfs))
        # if epoch > args.epochs:
        #     l_collision = torch.where(l_fc_b > 500, l_collision, torch.zeros_like(l_collision))
        l_collision = torch.mean(l_collision)
        # self-collision loss of robot hand
        l_self_colli = robot_hand_self_collision_loss(positions)
        # if epoch > args.epochs:
        #     l_self_colli = torch.where(l_fc_b > 500, l_self_colli, torch.zeros_like(l_self_colli))
        l_self_colli = torch.mean(l_self_colli)
        
        l_collision *= 1e4
        l_transfer *= 1e6
        # l_transfer_decay = l_transfer * (nepoch - epoch) / nepoch
        # l_transfer_decay = l_transfer * math.exp(-0.02 * epoch)
        l_transfer_decay = l_transfer
        l_contact *= 1e2
        l_self_colli *= 1e4

        # if epoch > args.epochs and l_fc2 < 0.12:
        #     break
        
        if args.exp_name == 'all':
            loss = l_collision + l_transfer_decay + l_contact + l_self_colli
        elif args.exp_name == 'tf':
            loss = l_collision + l_transfer_decay + l_self_colli
        elif args.exp_name == 'ct':
            loss = l_transfer_decay + l_contact + l_self_colli
            # loss = l_transfer_decay + l_contact
        elif args.exp_name == 'tt':
            loss = l_collision + l_contact + l_self_colli
        elif args.exp_name == 'trans':
            loss = l_transfer_decay + l_self_colli
            # loss = l_transfer_decay
        elif args.exp_name == 'touch':
            loss = l_contact + l_self_colli
        elif args.exp_name == 'colli':
            loss = l_collision + l_self_colli
        elif args.exp_name == 'awsc':
            loss = l_collision + l_transfer_decay + l_contact
        elif args.exp_name == 'allfc':
            # if epoch <= args.epochs:
            #     loss = l_collision + l_transfer_decay + l_contact + l_self_colli
            # else:
            #     loss = l_collision + l_fc1 + l_fc2 + l_contact + l_self_colli
            loss = l_collision + l_transfer_decay + l_contact + l_self_colli + l_fc1 + l_fc2
        elif args.exp_name == 'fc':
            loss = l_collision + l_fc1 + l_fc2 + l_contact + l_self_colli

                        
        optim_pos.zero_grad()
        optim_rot.zero_grad()
        loss.backward(retain_graph=True)
        optim_pos.step()
        optim_rot.step()
        # for name, param in fk_model.named_parameters():
        #     print('name :', name)
        #     print('param:', param[4])
        #     print('grad required:', param.requires_grad)
        #     print('grad value   :', param.grad[4])
        
        fk_model.set_rotations_into_window()
        params = fk_model.state_dict()
        deltas = params['emb.weight'].data.cpu().numpy()
        
        ref_rotations = rotations[np.newaxis, ...] + deltas[:, :SHADOW_HAND_DOF_NUM]
        
        ## choice 1: choose min(l_sum) as final results
        # if l_sum < loss_min:
        #     loss_min = l_sum
        #     best_ref_rotations = ref_rotations
        #     best_delta_positions = deltas[:, SHADOW_HAND_DOF_NUM:]
        ## choice 2: choose last epoch results
        best_ref_rotations = ref_rotations
        best_delta_positions = params['root_offset.weight'].data.cpu().numpy()
        
        diff = np.linalg.norm(deltas)

        if epoch % 20 == 0:
            # print(f"Epoch: [{epoch:3d}/{args.epochs}], L_trans: {l_transfer.item():6.2f}, L_cont: {l_contact.item():6.2f}, L_coll: {l_collision.item():6.2f}, L_self: {l_self_colli.item():6.2f}, L_fc1: {l_fc1.item():6.2f}, L_fc2: {l_fc2.item():6.2f}, diff: {diff:6.2f}")
            print(f"Epoch: [{epoch:3d}/{args.epochs}], L_trans: {l_transfer.item():6.2f}, L_cont: {l_contact.item():6.2f}, L_coll: {l_collision.item():6.2f}, L_self: {l_self_colli.item():6.2f}, diff: {diff:6.2f}")

    if vis_id != -1:
        # vis.run()
        vis.destroy_window()

    # 保存数据，存入grasp_data/*/tf_grasp_*.npz
    # [inst_num, 22]
    if vis_id == -1:
        sim_joints = np.concatenate([np.zeros([inst_num, 2]), best_ref_rotations], axis=1, dtype=np.float32)
        for i in range(sim_joints.shape[0]):
            save_dir = os.path.join("grasp_data", inst_list[i])

            npz_path = os.path.join("grasp_data", inst_list[i], g_pts_file)
            tf_pos = np.load(npz_path)

            actor_q = grasp_label["q"]
            actor_palm_q = grasp_label["palm_q"]
            actor_t = grasp_label["t"]+best_delta_positions[i]
            actor_palm_t = grasp_label["palm_t"]+best_delta_positions[i]
            
            if 'trans_offset' in tf_pos.keys():
                trans_offset = tf_pos['trans_offset']
                actor_Rt = get_4x4_matrix(actor_q, actor_t)
                actor_Rt = trans_offset @ actor_Rt
                actor_q = R2q(actor_Rt[:3, :3])
                actor_t = actor_Rt[:3, 3]
                
                actor_palm_Rt = get_4x4_matrix(actor_palm_q, actor_palm_t)
                actor_palm_Rt = trans_offset @ actor_palm_Rt
                actor_palm_q = R2q(actor_palm_Rt[:3, :3])
                actor_palm_t = actor_palm_Rt[:3, 3]
            
            np.savez(os.path.join(save_dir, "{}_grasp_{}".format(args.exp_name, args.grasp_id)), 
                    palm_q=actor_palm_q,
                    palm_t=actor_palm_t,
                    q=actor_q,
                    t=actor_t, 
                    joints=sim_joints[i],
                    obj_scale=scales[i])       

def split_list_by_n(list_collection, n):
    """
    将集合均分,每份n个元素
    :param list_collection:
    :param n:
    :return:返回的结果为评分后的每份可迭代对象
    """
    if isinstance(list_collection, np.ndarray):
        l = list_collection.shape[0]
    elif isinstance(list_collection, list):
        l = len(list_collection)
    elif isinstance(list_collection, torch.Tensor):
        l = list_collection.size(0)
    data = []
    for i in range(0, l, n):
        data.append(list_collection[i: i + n])
    return data

def main():

    # 用于监督的抓取点， 它们通过迁移获取(lib/tf/transfer.py)
    tf_pos, inst_list, scales, inv_scales, codes, g_pts_wts = get_transferred_grasp_points(args.category)
    inst_num = tf_pos.shape[0]
    gt_positions = torch.from_numpy(tf_pos).float().cuda()
    codes_cuda = torch.from_numpy(codes).float().cuda()
    g_pts_wts_cuda = torch.from_numpy(g_pts_wts).float().cuda()

    # import pdb
    # pdb.set_trace()
    
    tf_pos = split_list_by_n(tf_pos, 100)
    inst_list = split_list_by_n(inst_list, 100)
    scales = split_list_by_n(scales, 100)
    inv_scales = split_list_by_n(inv_scales, 100)
    codes = split_list_by_n(codes, 100)
    g_pts_wts_cuda = split_list_by_n(g_pts_wts_cuda, 100)
    gt_positions = split_list_by_n(gt_positions, 100)
    codes_cuda = split_list_by_n(codes_cuda, 100)
    
    for i in range(len(tf_pos)):
        inst_num = tf_pos[i].shape[0]
        refine(inst_num, tf_pos[i], inst_list[i], scales[i], inv_scales[i], codes[i], gt_positions[i], codes_cuda[i], g_pts_wts_cuda[i])
    

if __name__ == "__main__":
    main()