import torch
import json
from torch.utils.data import DataLoader
from dataset import PointClouds, PointCloudsDex, PointCloudsDexMy
from trainer import Trainer
import os
from os.path import join as pjoin
import numpy as np

RUN_TAG = "run03"
CKPT_ROOT = f'ckpt/{RUN_TAG}/'
DEVICE = torch.device('cuda:0')
val_labels = ['sem', "core"]
# val_labels = ['sem', "core", "ddg", "mujoco"]
dataset_path = '/data2/haoran/3DGeneration/3DAutoEncoder/data/meshdatav3_pc_fps_new'
TRAIN_LOGS = f'logs/{RUN_TAG}.json'
CKPT_PATH = f"ckpt/{RUN_TAG}/34.pth"
VISU_ROOT = f"/data2/haoran/3DGeneration/3DAutoEncoder/visu_new/{RUN_TAG}"

def save_point_cloud_to_ply(points, colors, save_name='01.ply', save_root='/home/haorangeng/PointGroup_raw/dataset/visualization_self_space'):
    '''
    Save point cloud to ply file
    '''
    PLY_HEAD = f"ply\nformat ascii 1.0\nelement vertex {len(points)}\nproperty float x\nproperty float y\nproperty float z\nproperty uchar red\nproperty uchar green\nproperty uchar blue\nend_header\n"
    file_sting = PLY_HEAD
    for i in range(len(points)):
        file_sting += f'{points[i][0]} {points[i][1]} {points[i][2]} {int(colors[i][0])} {int(colors[i][1])} {int(colors[i][2])}\n'
    f = open(pjoin(save_root, save_name), 'w')
    f.write(file_sting)
    f.close()



def train_and_evaluate():

    val = PointCloudsDexMy(dataset_path, val_labels, is_training=False)


    val_loader = DataLoader(
        dataset=val, batch_size=1, shuffle=False,
        num_workers=1, pin_memory=True
    )

    model = Trainer(0, DEVICE)
    model.load(CKPT_PATH)
    # model.network.to(DEVICE)

    i = 0
    logs = []
    text = 'i: {0}, loss: {1:.3f}'


    eval_losses = []
    model.network.eval()
    feats = torch.ones((0,128), device = DEVICE)
    for x in val_loader:

        x = x.to(DEVICE)
        loss, x_restored, feat = model.evaluate(x)
        feats = torch.cat((feats, feat.reshape(1, 128)), dim = 0)

        # os.makedirs(VISU_ROOT, exist_ok=True)
        # points_raw = x[0].permute(1, 0).cpu().numpy()
        # color = np.zeros_like(points_raw)
        # points_new = x_restored[0].permute(1, 0).cpu().numpy()
        # import pdb
        # pdb.set_trace()
        # save_point_cloud_to_ply(points_raw, color, f"{i}_raw.ply", VISU_ROOT)
        # save_point_cloud_to_ply(points_new, color, f"{i}_new.ply", VISU_ROOT)
        eval_losses.append(loss)
        print(text.format(i, loss))
        i += 1
        # eval_losses = {k: sum(d[k] for d in eval_losses)/len(eval_losses) for k in loss.keys()}
        # eval_losses.update({'type': 'eval'})
    print(eval_losses)
    logs.append(eval_losses)
    import pdb
    pdb.set_trace()
    np.save("")



# data = np.load("/data2/haoran/3DGeneration/3DAutoEncoder/data/meshdatav3_pc_fps_new/core/bottle-1a7ba1f4c892e2da30711cdbdbc73924/coacd/pc_fps1024_010.npy")
# color = np.zeros_like(data)
# save_point_cloud_to_ply(data, color, "debug.ply", "/data2/haoran/3DGeneration/3DAutoEncoder/visu/debug")

# import pdb
# pdb.set_trace()
train_and_evaluate()
