Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
1da48bb
1
Parent(s):
0619e07
update
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitignore +1 -0
- app.py +766 -0
- bash_raw_cospeech_download.sh +4 -0
- ckpt/beatx2_cospeech_diffusion/0403_212319_diffusion_rvqvae_128.txt +0 -0
- ckpt/beatx2_cospeech_diffusion/0403_212319_diffusion_rvqvae_128.yaml +54 -0
- ckpt/beatx2_cospeech_diffusion/1001_203942_diffusion_rvqvae_128_gaps-210-0.txt +451 -0
- ckpt/beatx2_cospeech_diffusion/last_500.bin +3 -0
- ckpt/beatx2_rvqvae/RVQVAE_hands/net_300000.pth +3 -0
- ckpt/beatx2_rvqvae/RVQVAE_hands/run.log +0 -0
- ckpt/beatx2_rvqvae/RVQVAE_lower/net_300000.pth +3 -0
- ckpt/beatx2_rvqvae/RVQVAE_lower/run.log +0 -0
- ckpt/beatx2_rvqvae/RVQVAE_lower_trans/net_300000.pth +3 -0
- ckpt/beatx2_rvqvae/RVQVAE_lower_trans/run.log +0 -0
- ckpt/beatx2_rvqvae/RVQVAE_upper/net_300000.pth +3 -0
- ckpt/beatx2_rvqvae/RVQVAE_upper/run.log +0 -0
- configs/beat2_rvqvae.yaml +134 -0
- configs/diffusion_rvqvae_128.yaml +118 -0
- configs/diffusion_rvqvae_128_hf.yaml +118 -0
- dataloaders/amass_sep_lower.py +713 -0
- dataloaders/beat_sep.py +772 -0
- dataloaders/beat_sep_lower.py +876 -0
- dataloaders/beat_sep_lower_single.py +730 -0
- dataloaders/beat_smplx2020.py +763 -0
- dataloaders/build_vocab.py +199 -0
- dataloaders/data_tools.py +1756 -0
- dataloaders/mix_sep.py +637 -0
- dataloaders/pymo/Quaternions.py +468 -0
- dataloaders/pymo/__init__.py +0 -0
- dataloaders/pymo/data.py +53 -0
- dataloaders/pymo/features.py +43 -0
- dataloaders/pymo/mocapplayer/data-template.js +3 -0
- dataloaders/pymo/mocapplayer/js/skeletonFactory.js +233 -0
- dataloaders/pymo/mocapplayer/libs/jquery.min.js +4 -0
- dataloaders/pymo/mocapplayer/libs/math.min.js +0 -0
- dataloaders/pymo/mocapplayer/libs/mocapjs.js +1312 -0
- dataloaders/pymo/mocapplayer/libs/pace.min.js +2 -0
- dataloaders/pymo/mocapplayer/libs/papaparse.min.js +6 -0
- dataloaders/pymo/mocapplayer/libs/threejs/Detector.js +78 -0
- dataloaders/pymo/mocapplayer/libs/threejs/OrbitControls.js +1037 -0
- dataloaders/pymo/mocapplayer/libs/threejs/dat.gui.min.js +14 -0
- dataloaders/pymo/mocapplayer/libs/threejs/three.min.js +0 -0
- dataloaders/pymo/mocapplayer/playBuffer.html +418 -0
- dataloaders/pymo/mocapplayer/playURL.html +269 -0
- dataloaders/pymo/mocapplayer/styles/pace.css +76 -0
- dataloaders/pymo/parsers.py +274 -0
- dataloaders/pymo/preprocessing.py +726 -0
- dataloaders/pymo/rotation_tools.py +153 -0
- dataloaders/pymo/rotation_tools.py! +69 -0
- dataloaders/pymo/viz_tools.py +236 -0
- dataloaders/pymo/writers.py +55 -0
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
**_pycache**
|
app.py
ADDED
@@ -0,0 +1,766 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import signal
|
3 |
+
import time
|
4 |
+
import csv
|
5 |
+
import sys
|
6 |
+
import warnings
|
7 |
+
import random
|
8 |
+
import gradio as gr
|
9 |
+
import torch
|
10 |
+
import torch.nn as nn
|
11 |
+
import torch.nn.functional as F
|
12 |
+
import torch.distributed as dist
|
13 |
+
from torch.nn.parallel import DistributedDataParallel as DDP
|
14 |
+
import torch.multiprocessing as mp
|
15 |
+
import numpy as np
|
16 |
+
import time
|
17 |
+
import pprint
|
18 |
+
from loguru import logger
|
19 |
+
import smplx
|
20 |
+
from torch.utils.tensorboard import SummaryWriter
|
21 |
+
import wandb
|
22 |
+
import matplotlib.pyplot as plt
|
23 |
+
from utils import config, logger_tools, other_tools_hf, metric, data_transfer, other_tools
|
24 |
+
from dataloaders import data_tools
|
25 |
+
from dataloaders.build_vocab import Vocab
|
26 |
+
from optimizers.optim_factory import create_optimizer
|
27 |
+
from optimizers.scheduler_factory import create_scheduler
|
28 |
+
from optimizers.loss_factory import get_loss_func
|
29 |
+
from dataloaders.data_tools import joints_list
|
30 |
+
from utils import rotation_conversions as rc
|
31 |
+
import soundfile as sf
|
32 |
+
import librosa
|
33 |
+
import subprocess
|
34 |
+
from transformers import pipeline
|
35 |
+
from diffusion.model_util import create_gaussian_diffusion
|
36 |
+
from diffusion.resample import create_named_schedule_sampler
|
37 |
+
from models.vq.model import RVQVAE
|
38 |
+
import train
|
39 |
+
import spaces
|
40 |
+
|
41 |
+
command = ["bash","./demo/install_mfs.sh"]
|
42 |
+
result = subprocess.run(command, capture_output=True, text=True)
|
43 |
+
|
44 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
45 |
+
|
46 |
+
pipe = pipeline(
|
47 |
+
"automatic-speech-recognition",
|
48 |
+
model="openai/whisper-tiny.en",
|
49 |
+
chunk_length_s=30,
|
50 |
+
device=device,
|
51 |
+
)
|
52 |
+
|
53 |
+
debug = False
|
54 |
+
|
55 |
+
class BaseTrainer(object):
|
56 |
+
def __init__(self, args,ap):
|
57 |
+
args.use_ddim=True
|
58 |
+
hf_dir = "hf"
|
59 |
+
time_local = time.localtime()
|
60 |
+
time_name_expend = "%02d%02d_%02d%02d%02d_"%(time_local[1], time_local[2],time_local[3], time_local[4], time_local[5])
|
61 |
+
self.time_name_expend = time_name_expend
|
62 |
+
tmp_dir = args.out_path + "custom/"+ time_name_expend + hf_dir
|
63 |
+
if not os.path.exists(tmp_dir + "/"):
|
64 |
+
os.makedirs(tmp_dir + "/")
|
65 |
+
self.audio_path = tmp_dir + "/tmp.wav"
|
66 |
+
sf.write(self.audio_path, ap[1], ap[0])
|
67 |
+
|
68 |
+
|
69 |
+
audio, ssr = librosa.load(self.audio_path,sr=args.audio_sr)
|
70 |
+
|
71 |
+
# use asr model to get corresponding text transcripts
|
72 |
+
file_path = tmp_dir+"/tmp.lab"
|
73 |
+
self.textgrid_path = tmp_dir + "/tmp.TextGrid"
|
74 |
+
if not debug:
|
75 |
+
text = pipe(audio, batch_size=8)["text"]
|
76 |
+
with open(file_path, "w", encoding="utf-8") as file:
|
77 |
+
file.write(text)
|
78 |
+
|
79 |
+
# use montreal forced aligner to get textgrid
|
80 |
+
|
81 |
+
command = ["mfa", "align", tmp_dir, "english_us_arpa", "english_us_arpa", tmp_dir]
|
82 |
+
result = subprocess.run(command, capture_output=True, text=True)
|
83 |
+
|
84 |
+
|
85 |
+
ap = (ssr, audio)
|
86 |
+
self.args = args
|
87 |
+
self.rank = 0 # dist.get_rank()
|
88 |
+
|
89 |
+
args.textgrid_file_path = self.textgrid_path
|
90 |
+
args.audio_file_path = self.audio_path
|
91 |
+
|
92 |
+
|
93 |
+
self.rank = 0 # dist.get_rank()
|
94 |
+
|
95 |
+
self.checkpoint_path = tmp_dir
|
96 |
+
args.tmp_dir = tmp_dir
|
97 |
+
if self.rank == 0:
|
98 |
+
self.test_data = __import__(f"dataloaders.{args.dataset}", fromlist=["something"]).CustomDataset(args, "test")
|
99 |
+
self.test_loader = torch.utils.data.DataLoader(
|
100 |
+
self.test_data,
|
101 |
+
batch_size=1,
|
102 |
+
shuffle=False,
|
103 |
+
num_workers=args.loader_workers,
|
104 |
+
drop_last=False,
|
105 |
+
)
|
106 |
+
logger.info(f"Init test dataloader success")
|
107 |
+
model_module = __import__(f"models.{args.model}", fromlist=["something"])
|
108 |
+
|
109 |
+
self.model = torch.nn.DataParallel(getattr(model_module, args.g_name)(args), args.gpus).cuda()
|
110 |
+
|
111 |
+
if self.rank == 0:
|
112 |
+
logger.info(self.model)
|
113 |
+
logger.info(f"init {args.g_name} success")
|
114 |
+
|
115 |
+
self.smplx = smplx.create(
|
116 |
+
self.args.data_path_1+"smplx_models/",
|
117 |
+
model_type='smplx',
|
118 |
+
gender='NEUTRAL_2020',
|
119 |
+
use_face_contour=False,
|
120 |
+
num_betas=300,
|
121 |
+
num_expression_coeffs=100,
|
122 |
+
ext='npz',
|
123 |
+
use_pca=False,
|
124 |
+
).to(self.rank).eval()
|
125 |
+
|
126 |
+
|
127 |
+
|
128 |
+
|
129 |
+
|
130 |
+
self.args = args
|
131 |
+
self.ori_joint_list = joints_list[self.args.ori_joints]
|
132 |
+
self.tar_joint_list_face = joints_list["beat_smplx_face"]
|
133 |
+
self.tar_joint_list_upper = joints_list["beat_smplx_upper"]
|
134 |
+
self.tar_joint_list_hands = joints_list["beat_smplx_hands"]
|
135 |
+
self.tar_joint_list_lower = joints_list["beat_smplx_lower"]
|
136 |
+
|
137 |
+
self.joint_mask_face = np.zeros(len(list(self.ori_joint_list.keys()))*3)
|
138 |
+
self.joints = 55
|
139 |
+
for joint_name in self.tar_joint_list_face:
|
140 |
+
self.joint_mask_face[self.ori_joint_list[joint_name][1] - self.ori_joint_list[joint_name][0]:self.ori_joint_list[joint_name][1]] = 1
|
141 |
+
self.joint_mask_upper = np.zeros(len(list(self.ori_joint_list.keys()))*3)
|
142 |
+
for joint_name in self.tar_joint_list_upper:
|
143 |
+
self.joint_mask_upper[self.ori_joint_list[joint_name][1] - self.ori_joint_list[joint_name][0]:self.ori_joint_list[joint_name][1]] = 1
|
144 |
+
self.joint_mask_hands = np.zeros(len(list(self.ori_joint_list.keys()))*3)
|
145 |
+
for joint_name in self.tar_joint_list_hands:
|
146 |
+
self.joint_mask_hands[self.ori_joint_list[joint_name][1] - self.ori_joint_list[joint_name][0]:self.ori_joint_list[joint_name][1]] = 1
|
147 |
+
self.joint_mask_lower = np.zeros(len(list(self.ori_joint_list.keys()))*3)
|
148 |
+
for joint_name in self.tar_joint_list_lower:
|
149 |
+
self.joint_mask_lower[self.ori_joint_list[joint_name][1] - self.ori_joint_list[joint_name][0]:self.ori_joint_list[joint_name][1]] = 1
|
150 |
+
|
151 |
+
self.tracker = other_tools.EpochTracker(["fid", "l1div", "bc", "rec", "trans", "vel", "transv", 'dis', 'gen', 'acc', 'transa', 'exp', 'lvd', 'mse', "cls", "rec_face", "latent", "cls_full", "cls_self", "cls_word", "latent_word","latent_self","predict_x0_loss"], [False,True,True, False, False, False, False, False, False, False, False, False, False, False, False, False, False,False, False, False,False,False,False])
|
152 |
+
|
153 |
+
vq_model_module = __import__(f"models.motion_representation", fromlist=["something"])
|
154 |
+
self.args.vae_layer = 2
|
155 |
+
self.args.vae_length = 256
|
156 |
+
self.args.vae_test_dim = 106
|
157 |
+
self.vq_model_face = getattr(vq_model_module, "VQVAEConvZero")(self.args).to(self.rank)
|
158 |
+
other_tools.load_checkpoints(self.vq_model_face, "./datasets/hub/pretrained_vq/face_vertex_1layer_790.bin", args.e_name)
|
159 |
+
|
160 |
+
|
161 |
+
vq_type = self.args.vqvae_type
|
162 |
+
if vq_type=="vqvae":
|
163 |
+
|
164 |
+
self.args.vae_layer = 4
|
165 |
+
self.args.vae_test_dim = 78
|
166 |
+
self.vq_model_upper = getattr(vq_model_module, "VQVAEConvZero")(self.args).to(self.rank)
|
167 |
+
other_tools.load_checkpoints(self.vq_model_upper, args.vqvae_upper_path, args.e_name)
|
168 |
+
self.args.vae_test_dim = 180
|
169 |
+
self.vq_model_hands = getattr(vq_model_module, "VQVAEConvZero")(self.args).to(self.rank)
|
170 |
+
other_tools.load_checkpoints(self.vq_model_hands, args.vqvae_hands_path, args.e_name)
|
171 |
+
self.args.vae_test_dim = 54
|
172 |
+
self.args.vae_layer = 4
|
173 |
+
self.vq_model_lower = getattr(vq_model_module, "VQVAEConvZero")(self.args).to(self.rank)
|
174 |
+
other_tools.load_checkpoints(self.vq_model_lower, args.vqvae_lower_path, args.e_name)
|
175 |
+
|
176 |
+
elif vq_type=="rvqvae":
|
177 |
+
|
178 |
+
args.num_quantizers = 6
|
179 |
+
args.shared_codebook = False
|
180 |
+
args.quantize_dropout_prob = 0.2
|
181 |
+
args.mu = 0.99
|
182 |
+
|
183 |
+
args.nb_code = 512
|
184 |
+
args.code_dim = 512
|
185 |
+
args.code_dim = 512
|
186 |
+
args.down_t = 2
|
187 |
+
args.stride_t = 2
|
188 |
+
args.width = 512
|
189 |
+
args.depth = 3
|
190 |
+
args.dilation_growth_rate = 3
|
191 |
+
args.vq_act = "relu"
|
192 |
+
args.vq_norm = None
|
193 |
+
|
194 |
+
dim_pose = 78
|
195 |
+
args.body_part = "upper"
|
196 |
+
self.vq_model_upper = RVQVAE(args,
|
197 |
+
dim_pose,
|
198 |
+
args.nb_code,
|
199 |
+
args.code_dim,
|
200 |
+
args.code_dim,
|
201 |
+
args.down_t,
|
202 |
+
args.stride_t,
|
203 |
+
args.width,
|
204 |
+
args.depth,
|
205 |
+
args.dilation_growth_rate,
|
206 |
+
args.vq_act,
|
207 |
+
args.vq_norm)
|
208 |
+
|
209 |
+
dim_pose = 180
|
210 |
+
args.body_part = "hands"
|
211 |
+
self.vq_model_hands = RVQVAE(args,
|
212 |
+
dim_pose,
|
213 |
+
args.nb_code,
|
214 |
+
args.code_dim,
|
215 |
+
args.code_dim,
|
216 |
+
args.down_t,
|
217 |
+
args.stride_t,
|
218 |
+
args.width,
|
219 |
+
args.depth,
|
220 |
+
args.dilation_growth_rate,
|
221 |
+
args.vq_act,
|
222 |
+
args.vq_norm)
|
223 |
+
|
224 |
+
dim_pose = 54
|
225 |
+
if args.use_trans:
|
226 |
+
dim_pose = 57
|
227 |
+
self.args.vqvae_lower_path = self.args.vqvae_lower_trans_path
|
228 |
+
args.body_part = "lower"
|
229 |
+
self.vq_model_lower = RVQVAE(args,
|
230 |
+
dim_pose,
|
231 |
+
args.nb_code,
|
232 |
+
args.code_dim,
|
233 |
+
args.code_dim,
|
234 |
+
args.down_t,
|
235 |
+
args.stride_t,
|
236 |
+
args.width,
|
237 |
+
args.depth,
|
238 |
+
args.dilation_growth_rate,
|
239 |
+
args.vq_act,
|
240 |
+
args.vq_norm)
|
241 |
+
|
242 |
+
self.vq_model_upper.load_state_dict(torch.load(self.args.vqvae_upper_path)['net'])
|
243 |
+
self.vq_model_hands.load_state_dict(torch.load(self.args.vqvae_hands_path)['net'])
|
244 |
+
self.vq_model_lower.load_state_dict(torch.load(self.args.vqvae_lower_path)['net'])
|
245 |
+
|
246 |
+
self.vqvae_latent_scale = self.args.vqvae_latent_scale
|
247 |
+
|
248 |
+
self.vq_model_upper.eval().to(self.rank)
|
249 |
+
self.vq_model_hands.eval().to(self.rank)
|
250 |
+
self.vq_model_lower.eval().to(self.rank)
|
251 |
+
|
252 |
+
|
253 |
+
|
254 |
+
|
255 |
+
|
256 |
+
self.args.vae_test_dim = 61
|
257 |
+
self.args.vae_layer = 4
|
258 |
+
self.args.vae_test_dim = 330
|
259 |
+
self.args.vae_layer = 4
|
260 |
+
self.args.vae_length = 240
|
261 |
+
|
262 |
+
|
263 |
+
self.vq_model_face.eval()
|
264 |
+
self.vq_model_upper.eval()
|
265 |
+
self.vq_model_hands.eval()
|
266 |
+
self.vq_model_lower.eval()
|
267 |
+
|
268 |
+
self.cls_loss = nn.NLLLoss().to(self.rank)
|
269 |
+
self.reclatent_loss = nn.MSELoss().to(self.rank)
|
270 |
+
self.vel_loss = torch.nn.L1Loss(reduction='mean').to(self.rank)
|
271 |
+
self.rec_loss = get_loss_func("GeodesicLoss").to(self.rank)
|
272 |
+
self.log_softmax = nn.LogSoftmax(dim=2).to(self.rank)
|
273 |
+
|
274 |
+
self.diffusion = create_gaussian_diffusion(use_ddim=args.use_ddim)
|
275 |
+
self.schedule_sampler_type = 'uniform'
|
276 |
+
self.schedule_sampler = create_named_schedule_sampler(self.schedule_sampler_type, self.diffusion)
|
277 |
+
self.mean = np.load(args.mean_pose_path)
|
278 |
+
self.std = np.load(args.std_pose_path)
|
279 |
+
|
280 |
+
self.use_trans = args.use_trans
|
281 |
+
if self.use_trans:
|
282 |
+
self.trans_mean = np.load(args.mean_trans_path)
|
283 |
+
self.trans_std = np.load(args.std_trans_path)
|
284 |
+
self.trans_mean = torch.from_numpy(self.trans_mean).cuda()
|
285 |
+
self.trans_std = torch.from_numpy(self.trans_std).cuda()
|
286 |
+
|
287 |
+
|
288 |
+
joints = [3,6,9,12,13,14,15,16,17,18,19,20,21]
|
289 |
+
upper_body_mask = []
|
290 |
+
for i in joints:
|
291 |
+
upper_body_mask.extend([i*6, i*6+1, i*6+2, i*6+3, i*6+4, i*6+5])
|
292 |
+
|
293 |
+
joints = list(range(25,55))
|
294 |
+
hands_body_mask = []
|
295 |
+
for i in joints:
|
296 |
+
hands_body_mask.extend([i*6, i*6+1, i*6+2, i*6+3, i*6+4, i*6+5])
|
297 |
+
|
298 |
+
joints = [0,1,2,4,5,7,8,10,11]
|
299 |
+
lower_body_mask = []
|
300 |
+
for i in joints:
|
301 |
+
lower_body_mask.extend([i*6, i*6+1, i*6+2, i*6+3, i*6+4, i*6+5])
|
302 |
+
|
303 |
+
self.mean_upper = self.mean[upper_body_mask]
|
304 |
+
self.mean_hands = self.mean[hands_body_mask]
|
305 |
+
self.mean_lower = self.mean[lower_body_mask]
|
306 |
+
self.std_upper = self.std[upper_body_mask]
|
307 |
+
self.std_hands = self.std[hands_body_mask]
|
308 |
+
self.std_lower = self.std[lower_body_mask]
|
309 |
+
|
310 |
+
self.mean_upper = torch.from_numpy(self.mean_upper).cuda()
|
311 |
+
self.mean_hands = torch.from_numpy(self.mean_hands).cuda()
|
312 |
+
self.mean_lower = torch.from_numpy(self.mean_lower).cuda()
|
313 |
+
self.std_upper = torch.from_numpy(self.std_upper).cuda()
|
314 |
+
self.std_hands = torch.from_numpy(self.std_hands).cuda()
|
315 |
+
self.std_lower = torch.from_numpy(self.std_lower).cuda()
|
316 |
+
|
317 |
+
|
318 |
+
def inverse_selection(self, filtered_t, selection_array, n):
|
319 |
+
original_shape_t = np.zeros((n, selection_array.size))
|
320 |
+
selected_indices = np.where(selection_array == 1)[0]
|
321 |
+
for i in range(n):
|
322 |
+
original_shape_t[i, selected_indices] = filtered_t[i]
|
323 |
+
return original_shape_t
|
324 |
+
|
325 |
+
def inverse_selection_tensor(self, filtered_t, selection_array, n):
|
326 |
+
selection_array = torch.from_numpy(selection_array).cuda()
|
327 |
+
original_shape_t = torch.zeros((n, 165)).cuda()
|
328 |
+
selected_indices = torch.where(selection_array == 1)[0]
|
329 |
+
for i in range(n):
|
330 |
+
original_shape_t[i, selected_indices] = filtered_t[i]
|
331 |
+
return original_shape_t
|
332 |
+
|
333 |
+
def _load_data(self, dict_data):
|
334 |
+
tar_pose_raw = dict_data["pose"]
|
335 |
+
tar_pose = tar_pose_raw[:, :, :165].to(self.rank)
|
336 |
+
tar_contact = tar_pose_raw[:, :, 165:169].to(self.rank)
|
337 |
+
tar_trans = dict_data["trans"].to(self.rank)
|
338 |
+
tar_trans_v = dict_data["trans_v"].to(self.rank)
|
339 |
+
tar_exps = dict_data["facial"].to(self.rank)
|
340 |
+
in_audio = dict_data["audio"].to(self.rank)
|
341 |
+
in_word = dict_data["word"].to(self.rank)
|
342 |
+
tar_beta = dict_data["beta"].to(self.rank)
|
343 |
+
tar_id = dict_data["id"].to(self.rank).long()
|
344 |
+
bs, n, j = tar_pose.shape[0], tar_pose.shape[1], self.joints
|
345 |
+
|
346 |
+
tar_pose_jaw = tar_pose[:, :, 66:69]
|
347 |
+
tar_pose_jaw = rc.axis_angle_to_matrix(tar_pose_jaw.reshape(bs, n, 1, 3))
|
348 |
+
tar_pose_jaw = rc.matrix_to_rotation_6d(tar_pose_jaw).reshape(bs, n, 1*6)
|
349 |
+
tar_pose_face = torch.cat([tar_pose_jaw, tar_exps], dim=2)
|
350 |
+
|
351 |
+
tar_pose_hands = tar_pose[:, :, 25*3:55*3]
|
352 |
+
tar_pose_hands = rc.axis_angle_to_matrix(tar_pose_hands.reshape(bs, n, 30, 3))
|
353 |
+
tar_pose_hands = rc.matrix_to_rotation_6d(tar_pose_hands).reshape(bs, n, 30*6)
|
354 |
+
|
355 |
+
tar_pose_upper = tar_pose[:, :, self.joint_mask_upper.astype(bool)]
|
356 |
+
tar_pose_upper = rc.axis_angle_to_matrix(tar_pose_upper.reshape(bs, n, 13, 3))
|
357 |
+
tar_pose_upper = rc.matrix_to_rotation_6d(tar_pose_upper).reshape(bs, n, 13*6)
|
358 |
+
|
359 |
+
tar_pose_leg = tar_pose[:, :, self.joint_mask_lower.astype(bool)]
|
360 |
+
tar_pose_leg = rc.axis_angle_to_matrix(tar_pose_leg.reshape(bs, n, 9, 3))
|
361 |
+
tar_pose_leg = rc.matrix_to_rotation_6d(tar_pose_leg).reshape(bs, n, 9*6)
|
362 |
+
|
363 |
+
tar_pose_lower = tar_pose_leg
|
364 |
+
|
365 |
+
|
366 |
+
tar4dis = torch.cat([tar_pose_jaw, tar_pose_upper, tar_pose_hands, tar_pose_leg], dim=2)
|
367 |
+
|
368 |
+
|
369 |
+
if self.args.pose_norm:
|
370 |
+
tar_pose_upper = (tar_pose_upper - self.mean_upper) / self.std_upper
|
371 |
+
tar_pose_hands = (tar_pose_hands - self.mean_hands) / self.std_hands
|
372 |
+
tar_pose_lower = (tar_pose_lower - self.mean_lower) / self.std_lower
|
373 |
+
|
374 |
+
if self.use_trans:
|
375 |
+
tar_trans_v = (tar_trans_v - self.trans_mean)/self.trans_std
|
376 |
+
tar_pose_lower = torch.cat([tar_pose_lower,tar_trans_v], dim=-1)
|
377 |
+
|
378 |
+
latent_face_top = self.vq_model_face.map2latent(tar_pose_face) # bs*n/4
|
379 |
+
latent_upper_top = self.vq_model_upper.map2latent(tar_pose_upper)
|
380 |
+
latent_hands_top = self.vq_model_hands.map2latent(tar_pose_hands)
|
381 |
+
latent_lower_top = self.vq_model_lower.map2latent(tar_pose_lower)
|
382 |
+
|
383 |
+
latent_in = torch.cat([latent_upper_top, latent_hands_top, latent_lower_top], dim=2)/self.args.vqvae_latent_scale
|
384 |
+
|
385 |
+
|
386 |
+
tar_pose_6d = rc.axis_angle_to_matrix(tar_pose.reshape(bs, n, 55, 3))
|
387 |
+
tar_pose_6d = rc.matrix_to_rotation_6d(tar_pose_6d).reshape(bs, n, 55*6)
|
388 |
+
latent_all = torch.cat([tar_pose_6d, tar_trans, tar_contact], dim=-1)
|
389 |
+
style_feature = None
|
390 |
+
if self.args.use_motionclip:
|
391 |
+
motionclip_feat = tar_pose_6d[...,:22*6]
|
392 |
+
batch = {}
|
393 |
+
bs,seq,feat = motionclip_feat.shape
|
394 |
+
batch['x']=motionclip_feat.permute(0,2,1).contiguous()
|
395 |
+
batch['y']=torch.zeros(bs).int().cuda()
|
396 |
+
batch['mask']=torch.ones([bs,seq]).bool().cuda()
|
397 |
+
style_feature = self.motionclip.encoder(batch)['mu'].detach().float()
|
398 |
+
|
399 |
+
|
400 |
+
|
401 |
+
# print(tar_index_value_upper_top.shape, index_in.shape)
|
402 |
+
return {
|
403 |
+
"tar_pose_jaw": tar_pose_jaw,
|
404 |
+
"tar_pose_face": tar_pose_face,
|
405 |
+
"tar_pose_upper": tar_pose_upper,
|
406 |
+
"tar_pose_lower": tar_pose_lower,
|
407 |
+
"tar_pose_hands": tar_pose_hands,
|
408 |
+
'tar_pose_leg': tar_pose_leg,
|
409 |
+
"in_audio": in_audio,
|
410 |
+
"in_word": in_word,
|
411 |
+
"tar_trans": tar_trans,
|
412 |
+
"tar_exps": tar_exps,
|
413 |
+
"tar_beta": tar_beta,
|
414 |
+
"tar_pose": tar_pose,
|
415 |
+
"tar4dis": tar4dis,
|
416 |
+
"latent_face_top": latent_face_top,
|
417 |
+
"latent_upper_top": latent_upper_top,
|
418 |
+
"latent_hands_top": latent_hands_top,
|
419 |
+
"latent_lower_top": latent_lower_top,
|
420 |
+
"latent_in": latent_in,
|
421 |
+
"tar_id": tar_id,
|
422 |
+
"latent_all": latent_all,
|
423 |
+
"tar_pose_6d": tar_pose_6d,
|
424 |
+
"tar_contact": tar_contact,
|
425 |
+
"style_feature":style_feature,
|
426 |
+
}
|
427 |
+
|
428 |
+
def _g_test(self, loaded_data):
|
429 |
+
sample_fn = self.diffusion.p_sample_loop
|
430 |
+
if self.args.use_ddim:
|
431 |
+
sample_fn = self.diffusion.ddim_sample_loop
|
432 |
+
mode = 'test'
|
433 |
+
bs, n, j = loaded_data["tar_pose"].shape[0], loaded_data["tar_pose"].shape[1], self.joints
|
434 |
+
tar_pose = loaded_data["tar_pose"]
|
435 |
+
tar_beta = loaded_data["tar_beta"]
|
436 |
+
tar_exps = loaded_data["tar_exps"]
|
437 |
+
tar_contact = loaded_data["tar_contact"]
|
438 |
+
tar_trans = loaded_data["tar_trans"]
|
439 |
+
in_word = loaded_data["in_word"]
|
440 |
+
in_audio = loaded_data["in_audio"]
|
441 |
+
in_x0 = loaded_data['latent_in']
|
442 |
+
in_seed = loaded_data['latent_in']
|
443 |
+
|
444 |
+
remain = n%8
|
445 |
+
if remain != 0:
|
446 |
+
tar_pose = tar_pose[:, :-remain, :]
|
447 |
+
tar_beta = tar_beta[:, :-remain, :]
|
448 |
+
tar_trans = tar_trans[:, :-remain, :]
|
449 |
+
in_word = in_word[:, :-remain]
|
450 |
+
tar_exps = tar_exps[:, :-remain, :]
|
451 |
+
tar_contact = tar_contact[:, :-remain, :]
|
452 |
+
in_x0 = in_x0[:, :in_x0.shape[1]-(remain//self.args.vqvae_squeeze_scale), :]
|
453 |
+
in_seed = in_seed[:, :in_x0.shape[1]-(remain//self.args.vqvae_squeeze_scale), :]
|
454 |
+
n = n - remain
|
455 |
+
|
456 |
+
tar_pose_jaw = tar_pose[:, :, 66:69]
|
457 |
+
tar_pose_jaw = rc.axis_angle_to_matrix(tar_pose_jaw.reshape(bs, n, 1, 3))
|
458 |
+
tar_pose_jaw = rc.matrix_to_rotation_6d(tar_pose_jaw).reshape(bs, n, 1*6)
|
459 |
+
tar_pose_face = torch.cat([tar_pose_jaw, tar_exps], dim=2)
|
460 |
+
|
461 |
+
tar_pose_hands = tar_pose[:, :, 25*3:55*3]
|
462 |
+
tar_pose_hands = rc.axis_angle_to_matrix(tar_pose_hands.reshape(bs, n, 30, 3))
|
463 |
+
tar_pose_hands = rc.matrix_to_rotation_6d(tar_pose_hands).reshape(bs, n, 30*6)
|
464 |
+
|
465 |
+
tar_pose_upper = tar_pose[:, :, self.joint_mask_upper.astype(bool)]
|
466 |
+
tar_pose_upper = rc.axis_angle_to_matrix(tar_pose_upper.reshape(bs, n, 13, 3))
|
467 |
+
tar_pose_upper = rc.matrix_to_rotation_6d(tar_pose_upper).reshape(bs, n, 13*6)
|
468 |
+
|
469 |
+
tar_pose_leg = tar_pose[:, :, self.joint_mask_lower.astype(bool)]
|
470 |
+
tar_pose_leg = rc.axis_angle_to_matrix(tar_pose_leg.reshape(bs, n, 9, 3))
|
471 |
+
tar_pose_leg = rc.matrix_to_rotation_6d(tar_pose_leg).reshape(bs, n, 9*6)
|
472 |
+
tar_pose_lower = torch.cat([tar_pose_leg, tar_trans, tar_contact], dim=2)
|
473 |
+
|
474 |
+
tar_pose_6d = rc.axis_angle_to_matrix(tar_pose.reshape(bs, n, 55, 3))
|
475 |
+
tar_pose_6d = rc.matrix_to_rotation_6d(tar_pose_6d).reshape(bs, n, 55*6)
|
476 |
+
latent_all = torch.cat([tar_pose_6d, tar_trans, tar_contact], dim=-1)
|
477 |
+
|
478 |
+
rec_all_face = []
|
479 |
+
rec_all_upper = []
|
480 |
+
rec_all_lower = []
|
481 |
+
rec_all_hands = []
|
482 |
+
vqvae_squeeze_scale = self.args.vqvae_squeeze_scale
|
483 |
+
roundt = (n - self.args.pre_frames * vqvae_squeeze_scale) // (self.args.pose_length - self.args.pre_frames * vqvae_squeeze_scale)
|
484 |
+
remain = (n - self.args.pre_frames * vqvae_squeeze_scale) % (self.args.pose_length - self.args.pre_frames * vqvae_squeeze_scale)
|
485 |
+
round_l = self.args.pose_length - self.args.pre_frames * vqvae_squeeze_scale
|
486 |
+
|
487 |
+
|
488 |
+
for i in range(0, roundt):
|
489 |
+
in_word_tmp = in_word[:, i*(round_l):(i+1)*(round_l)+self.args.pre_frames * vqvae_squeeze_scale]
|
490 |
+
|
491 |
+
in_audio_tmp = in_audio[:, i*(16000//30*round_l):(i+1)*(16000//30*round_l)+16000//30*self.args.pre_frames * vqvae_squeeze_scale]
|
492 |
+
in_id_tmp = loaded_data['tar_id'][:, i*(round_l):(i+1)*(round_l)+self.args.pre_frames]
|
493 |
+
in_seed_tmp = in_seed[:, i*(round_l)//vqvae_squeeze_scale:(i+1)*(round_l)//vqvae_squeeze_scale+self.args.pre_frames]
|
494 |
+
in_x0_tmp = in_x0[:, i*(round_l)//vqvae_squeeze_scale:(i+1)*(round_l)//vqvae_squeeze_scale+self.args.pre_frames]
|
495 |
+
mask_val = torch.ones(bs, self.args.pose_length, self.args.pose_dims+3+4).float().cuda()
|
496 |
+
mask_val[:, :self.args.pre_frames, :] = 0.0
|
497 |
+
if i == 0:
|
498 |
+
in_seed_tmp = in_seed_tmp[:, :self.args.pre_frames, :]
|
499 |
+
else:
|
500 |
+
in_seed_tmp = last_sample[:, -self.args.pre_frames:, :]
|
501 |
+
|
502 |
+
cond_ = {'y':{}}
|
503 |
+
cond_['y']['audio'] = in_audio_tmp
|
504 |
+
cond_['y']['word'] = in_word_tmp
|
505 |
+
cond_['y']['id'] = in_id_tmp
|
506 |
+
cond_['y']['seed'] =in_seed_tmp
|
507 |
+
cond_['y']['mask'] = (torch.zeros([self.args.batch_size, 1, 1, self.args.pose_length]) < 1).cuda()
|
508 |
+
|
509 |
+
|
510 |
+
|
511 |
+
cond_['y']['style_feature'] = torch.zeros([bs, 512]).cuda()
|
512 |
+
|
513 |
+
shape_ = (bs, 1536, 1, 32)
|
514 |
+
sample = sample_fn(
|
515 |
+
self.model,
|
516 |
+
shape_,
|
517 |
+
clip_denoised=False,
|
518 |
+
model_kwargs=cond_,
|
519 |
+
skip_timesteps=0,
|
520 |
+
init_image=None,
|
521 |
+
progress=True,
|
522 |
+
dump_steps=None,
|
523 |
+
noise=None,
|
524 |
+
const_noise=False,
|
525 |
+
)
|
526 |
+
sample = sample.squeeze().permute(1,0).unsqueeze(0)
|
527 |
+
|
528 |
+
last_sample = sample.clone()
|
529 |
+
|
530 |
+
rec_latent_upper = sample[...,:512]
|
531 |
+
rec_latent_hands = sample[...,512:1024]
|
532 |
+
rec_latent_lower = sample[...,1024:1536]
|
533 |
+
|
534 |
+
|
535 |
+
|
536 |
+
if i == 0:
|
537 |
+
rec_all_upper.append(rec_latent_upper)
|
538 |
+
rec_all_hands.append(rec_latent_hands)
|
539 |
+
rec_all_lower.append(rec_latent_lower)
|
540 |
+
else:
|
541 |
+
rec_all_upper.append(rec_latent_upper[:, self.args.pre_frames:])
|
542 |
+
rec_all_hands.append(rec_latent_hands[:, self.args.pre_frames:])
|
543 |
+
rec_all_lower.append(rec_latent_lower[:, self.args.pre_frames:])
|
544 |
+
|
545 |
+
rec_all_upper = torch.cat(rec_all_upper, dim=1) * self.vqvae_latent_scale
|
546 |
+
rec_all_hands = torch.cat(rec_all_hands, dim=1) * self.vqvae_latent_scale
|
547 |
+
rec_all_lower = torch.cat(rec_all_lower, dim=1) * self.vqvae_latent_scale
|
548 |
+
|
549 |
+
rec_upper = self.vq_model_upper.latent2origin(rec_all_upper)[0]
|
550 |
+
rec_hands = self.vq_model_hands.latent2origin(rec_all_hands)[0]
|
551 |
+
rec_lower = self.vq_model_lower.latent2origin(rec_all_lower)[0]
|
552 |
+
|
553 |
+
|
554 |
+
if self.use_trans:
|
555 |
+
rec_trans_v = rec_lower[...,-3:]
|
556 |
+
rec_trans_v = rec_trans_v * self.trans_std + self.trans_mean
|
557 |
+
rec_trans = torch.zeros_like(rec_trans_v)
|
558 |
+
rec_trans = torch.cumsum(rec_trans_v, dim=-2)
|
559 |
+
rec_trans[...,1]=rec_trans_v[...,1]
|
560 |
+
rec_lower = rec_lower[...,:-3]
|
561 |
+
|
562 |
+
if self.args.pose_norm:
|
563 |
+
rec_upper = rec_upper * self.std_upper + self.mean_upper
|
564 |
+
rec_hands = rec_hands * self.std_hands + self.mean_hands
|
565 |
+
rec_lower = rec_lower * self.std_lower + self.mean_lower
|
566 |
+
|
567 |
+
|
568 |
+
|
569 |
+
|
570 |
+
n = n - remain
|
571 |
+
tar_pose = tar_pose[:, :n, :]
|
572 |
+
tar_exps = tar_exps[:, :n, :]
|
573 |
+
tar_trans = tar_trans[:, :n, :]
|
574 |
+
tar_beta = tar_beta[:, :n, :]
|
575 |
+
|
576 |
+
|
577 |
+
rec_exps = tar_exps
|
578 |
+
#rec_pose_jaw = rec_face[:, :, :6]
|
579 |
+
rec_pose_legs = rec_lower[:, :, :54]
|
580 |
+
bs, n = rec_pose_legs.shape[0], rec_pose_legs.shape[1]
|
581 |
+
rec_pose_upper = rec_upper.reshape(bs, n, 13, 6)
|
582 |
+
rec_pose_upper = rc.rotation_6d_to_matrix(rec_pose_upper)#
|
583 |
+
rec_pose_upper = rc.matrix_to_axis_angle(rec_pose_upper).reshape(bs*n, 13*3)
|
584 |
+
rec_pose_upper_recover = self.inverse_selection_tensor(rec_pose_upper, self.joint_mask_upper, bs*n)
|
585 |
+
rec_pose_lower = rec_pose_legs.reshape(bs, n, 9, 6)
|
586 |
+
rec_pose_lower = rc.rotation_6d_to_matrix(rec_pose_lower)
|
587 |
+
rec_lower2global = rc.matrix_to_rotation_6d(rec_pose_lower.clone()).reshape(bs, n, 9*6)
|
588 |
+
rec_pose_lower = rc.matrix_to_axis_angle(rec_pose_lower).reshape(bs*n, 9*3)
|
589 |
+
rec_pose_lower_recover = self.inverse_selection_tensor(rec_pose_lower, self.joint_mask_lower, bs*n)
|
590 |
+
rec_pose_hands = rec_hands.reshape(bs, n, 30, 6)
|
591 |
+
rec_pose_hands = rc.rotation_6d_to_matrix(rec_pose_hands)
|
592 |
+
rec_pose_hands = rc.matrix_to_axis_angle(rec_pose_hands).reshape(bs*n, 30*3)
|
593 |
+
rec_pose_hands_recover = self.inverse_selection_tensor(rec_pose_hands, self.joint_mask_hands, bs*n)
|
594 |
+
rec_pose = rec_pose_upper_recover + rec_pose_lower_recover + rec_pose_hands_recover
|
595 |
+
rec_pose[:, 66:69] = tar_pose.reshape(bs*n, 55*3)[:, 66:69]
|
596 |
+
|
597 |
+
rec_pose = rc.axis_angle_to_matrix(rec_pose.reshape(bs*n, j, 3))
|
598 |
+
rec_pose = rc.matrix_to_rotation_6d(rec_pose).reshape(bs, n, j*6)
|
599 |
+
tar_pose = rc.axis_angle_to_matrix(tar_pose.reshape(bs*n, j, 3))
|
600 |
+
tar_pose = rc.matrix_to_rotation_6d(tar_pose).reshape(bs, n, j*6)
|
601 |
+
|
602 |
+
return {
|
603 |
+
'rec_pose': rec_pose,
|
604 |
+
'rec_trans': rec_trans,
|
605 |
+
'tar_pose': tar_pose,
|
606 |
+
'tar_exps': tar_exps,
|
607 |
+
'tar_beta': tar_beta,
|
608 |
+
'tar_trans': tar_trans,
|
609 |
+
'rec_exps': rec_exps,
|
610 |
+
}
|
611 |
+
|
612 |
+
|
613 |
+
def test_demo(self, epoch):
|
614 |
+
'''
|
615 |
+
input audio and text, output motion
|
616 |
+
do not calculate loss and metric
|
617 |
+
save video
|
618 |
+
'''
|
619 |
+
results_save_path = self.checkpoint_path + f"/{epoch}/"
|
620 |
+
if os.path.exists(results_save_path):
|
621 |
+
import shutil
|
622 |
+
shutil.rmtree(results_save_path)
|
623 |
+
os.makedirs(results_save_path)
|
624 |
+
start_time = time.time()
|
625 |
+
total_length = 0
|
626 |
+
test_seq_list = self.test_data.selected_file
|
627 |
+
align = 0
|
628 |
+
latent_out = []
|
629 |
+
latent_ori = []
|
630 |
+
l2_all = 0
|
631 |
+
lvel = 0
|
632 |
+
self.model.eval()
|
633 |
+
self.smplx.eval()
|
634 |
+
# self.eval_copy.eval()
|
635 |
+
with torch.no_grad():
|
636 |
+
for its, batch_data in enumerate(self.test_loader):
|
637 |
+
loaded_data = self._load_data(batch_data)
|
638 |
+
net_out = self._g_test(loaded_data)
|
639 |
+
tar_pose = net_out['tar_pose']
|
640 |
+
rec_pose = net_out['rec_pose']
|
641 |
+
tar_exps = net_out['tar_exps']
|
642 |
+
tar_beta = net_out['tar_beta']
|
643 |
+
rec_trans = net_out['rec_trans']
|
644 |
+
tar_trans = net_out['tar_trans']
|
645 |
+
rec_exps = net_out['rec_exps']
|
646 |
+
bs, n, j = tar_pose.shape[0], tar_pose.shape[1], self.joints
|
647 |
+
if (30/self.args.pose_fps) != 1:
|
648 |
+
assert 30%self.args.pose_fps == 0
|
649 |
+
n *= int(30/self.args.pose_fps)
|
650 |
+
tar_pose = torch.nn.functional.interpolate(tar_pose.permute(0, 2, 1), scale_factor=30/self.args.pose_fps, mode='linear').permute(0,2,1)
|
651 |
+
rec_pose = torch.nn.functional.interpolate(rec_pose.permute(0, 2, 1), scale_factor=30/self.args.pose_fps, mode='linear').permute(0,2,1)
|
652 |
+
|
653 |
+
|
654 |
+
rec_pose = rc.rotation_6d_to_matrix(rec_pose.reshape(bs*n, j, 6))
|
655 |
+
rec_pose = rc.matrix_to_rotation_6d(rec_pose).reshape(bs, n, j*6)
|
656 |
+
tar_pose = rc.rotation_6d_to_matrix(tar_pose.reshape(bs*n, j, 6))
|
657 |
+
tar_pose = rc.matrix_to_rotation_6d(tar_pose).reshape(bs, n, j*6)
|
658 |
+
|
659 |
+
rec_pose = rc.rotation_6d_to_matrix(rec_pose.reshape(bs*n, j, 6))
|
660 |
+
rec_pose = rc.matrix_to_axis_angle(rec_pose).reshape(bs*n, j*3)
|
661 |
+
tar_pose = rc.rotation_6d_to_matrix(tar_pose.reshape(bs*n, j, 6))
|
662 |
+
tar_pose = rc.matrix_to_axis_angle(tar_pose).reshape(bs*n, j*3)
|
663 |
+
|
664 |
+
|
665 |
+
tar_pose_np = tar_pose.detach().cpu().numpy()
|
666 |
+
rec_pose_np = rec_pose.detach().cpu().numpy()
|
667 |
+
rec_trans_np = rec_trans.detach().cpu().numpy().reshape(bs*n, 3)
|
668 |
+
rec_exp_np = rec_exps.detach().cpu().numpy().reshape(bs*n, 100)
|
669 |
+
tar_exp_np = tar_exps.detach().cpu().numpy().reshape(bs*n, 100)
|
670 |
+
tar_trans_np = tar_trans.detach().cpu().numpy().reshape(bs*n, 3)
|
671 |
+
gt_npz = np.load("./demo/examples/2_scott_0_1_1.npz", allow_pickle=True)
|
672 |
+
|
673 |
+
results_npz_file_save_path = results_save_path+f"result_{self.time_name_expend[:-1]}"+'.npz'
|
674 |
+
np.savez(results_npz_file_save_path,
|
675 |
+
betas=gt_npz["betas"],
|
676 |
+
poses=rec_pose_np,
|
677 |
+
expressions=rec_exp_np,
|
678 |
+
trans=rec_trans_np,
|
679 |
+
model='smplx2020',
|
680 |
+
gender='neutral',
|
681 |
+
mocap_frame_rate = 30,
|
682 |
+
)
|
683 |
+
total_length += n
|
684 |
+
render_vid_path = other_tools_hf.render_one_sequence_no_gt(
|
685 |
+
results_npz_file_save_path,
|
686 |
+
# results_save_path+"gt_"+test_seq_list.iloc[its]['id']+'.npz',
|
687 |
+
results_save_path,
|
688 |
+
self.audio_path,
|
689 |
+
self.args.data_path_1+"smplx_models/",
|
690 |
+
use_matplotlib = False,
|
691 |
+
args = self.args,
|
692 |
+
)
|
693 |
+
|
694 |
+
result = [
|
695 |
+
gr.Video(value=render_vid_path, visible=True),
|
696 |
+
gr.File(value=results_npz_file_save_path, label="download motion and visualize in blender"),
|
697 |
+
]
|
698 |
+
|
699 |
+
end_time = time.time() - start_time
|
700 |
+
logger.info(f"total inference time: {int(end_time)} s for {int(total_length/self.args.pose_fps)} s motion")
|
701 |
+
return result
|
702 |
+
|
703 |
+
@logger.catch
|
704 |
+
@spaces.GPU
|
705 |
+
def syntalker(audio_path,sample_stratege):
|
706 |
+
args = config.parse_args()
|
707 |
+
if sample_stratege==0:
|
708 |
+
args.use_ddim=True
|
709 |
+
elif sample_stratege==1:
|
710 |
+
args.use_ddim=False
|
711 |
+
print(sample_stratege)
|
712 |
+
print(args.use_ddim)
|
713 |
+
#os.environ['TRANSFORMERS_CACHE'] = args.data_path_1 + "hub/"
|
714 |
+
if not sys.warnoptions:
|
715 |
+
warnings.simplefilter("ignore")
|
716 |
+
# dist.init_process_group(backend="gloo", rank=rank, world_size=world_size)
|
717 |
+
|
718 |
+
#logger_tools.set_args_and_logger(args, rank)
|
719 |
+
other_tools_hf.set_random_seed(args)
|
720 |
+
other_tools_hf.print_exp_info(args)
|
721 |
+
|
722 |
+
# return one intance of trainer
|
723 |
+
trainer = BaseTrainer(args, ap = audio_path)
|
724 |
+
other_tools.load_checkpoints(trainer.model, args.test_ckpt, args.g_name)
|
725 |
+
|
726 |
+
result = trainer.test_demo(999)
|
727 |
+
return result
|
728 |
+
|
729 |
+
examples = [
|
730 |
+
["demo/examples/2_scott_0_1_1.wav"],
|
731 |
+
["demo/examples/2_scott_0_2_2.wav"],
|
732 |
+
["demo/examples/2_scott_0_3_3.wav"],
|
733 |
+
["demo/examples/2_scott_0_4_4.wav"],
|
734 |
+
["demo/examples/2_scott_0_5_5.wav"],
|
735 |
+
]
|
736 |
+
|
737 |
+
demo = gr.Interface(
|
738 |
+
syntalker, # function
|
739 |
+
inputs=[
|
740 |
+
# gr.File(label="Please upload SMPL-X file with npz format here.", file_types=["npz", "NPZ"]),
|
741 |
+
gr.Audio(),
|
742 |
+
gr.Radio(choices=["DDIM", "DDPM"], label="Please select a sample strategy", type="index", value="DDIM"), # 0 for DDIM, 1 for DDPM
|
743 |
+
# gr.File(label="Please upload textgrid format file here.", file_types=["TextGrid", "Textgrid", "textgrid"])
|
744 |
+
], # input type
|
745 |
+
outputs=[
|
746 |
+
gr.Video(format="mp4", visible=True),
|
747 |
+
gr.File(label="download motion and visualize in blender")
|
748 |
+
],
|
749 |
+
title='SynTalker: Enabling Synergistic Full-Body Control in Prompt-Based Co-Speech Motion Generation',
|
750 |
+
description="1. Upload your audio. <br/>\
|
751 |
+
2. Then, sit back and wait for the rendering to happen! This may take a while (e.g. 2 minutes) <br/>\
|
752 |
+
3. After, you can view the videos. <br/>\
|
753 |
+
4. Notice that we use a fix face animation, our method only produce body motion. <br/>\
|
754 |
+
5. Use DDPM sample strategy will generate a better result, while it will take more inference time. \
|
755 |
+
",
|
756 |
+
article="Project links: [SynTalker](https://robinwitch.github.io/SynTalker-Page). <br/>\
|
757 |
+
Reference links: [EMAGE](https://pantomatrix.github.io/EMAGE/). ",
|
758 |
+
examples=examples,
|
759 |
+
)
|
760 |
+
|
761 |
+
|
762 |
+
if __name__ == "__main__":
|
763 |
+
os.environ["MASTER_ADDR"]='127.0.0.1'
|
764 |
+
os.environ["MASTER_PORT"]='8675'
|
765 |
+
#os.environ["TORCH_DISTRIBUTED_DEBUG"] = "DETAIL"
|
766 |
+
demo.launch(share=True)
|
bash_raw_cospeech_download.sh
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
mkdir -p datasets/BEAT_SMPL
|
2 |
+
cd datasets/BEAT_SMPL
|
3 |
+
gdown https://drive.google.com/uc?id=1_iXr0XiT_EdslXe4b0HwDr2OoOCrtlrB
|
4 |
+
unzip beat_v2.0.0.zip
|
ckpt/beatx2_cospeech_diffusion/0403_212319_diffusion_rvqvae_128.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
ckpt/beatx2_cospeech_diffusion/0403_212319_diffusion_rvqvae_128.yaml
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{a_encoder: null, a_fix_pre: false, a_pre_encoder: null, acc: 1, acc_weight: 0.0,
|
2 |
+
additional_data: false, adv_weight: 20.0, ali_weight: 0.0, amsgrad: false, apex: false,
|
3 |
+
asmr: 0.0, atcont: 0.0, atmr: 0.0, aud_prob: 1.0, audio_dims: 1, audio_f: 256, audio_fps: 16000,
|
4 |
+
audio_norm: false, audio_rep: onset+amplitude, audio_sr: 16000, batch_size: 40,
|
5 |
+
beat_align: true, benchmark: true, cache_only: false, cache_path: datasets/beat_cache/beat_smplx_en_emage_2_128/,
|
6 |
+
cf: 0.0, ch: 1.0, cl: 1.0, clean_final_seconds: 0, clean_first_seconds: 0, commit: 0.02,
|
7 |
+
config: configs/diffusion_rvqvae_128.yaml, csv_name: a2g_0, cu: 1.0, cudnn_enabled: true,
|
8 |
+
d_lr_weight: 0.2, d_name: null, data_path: /mnt/fu09a/chenbohong/PantoMatrix/scripts/EMAGE_2024/datasets/BEAT_SMPL/beat_v2.0.0/beat_english_v2.0.0/,
|
9 |
+
data_path_1: /mnt/fu09a/chenbohong/PantoMatrix/scripts/EMAGE_2024/datasets/hub/,
|
10 |
+
dataset: beat_sep_lower, ddp: false, debug: false, decay_epochs: 200, decay_rate: 0.1,
|
11 |
+
decode_fusion: null, depth: 3, deterministic: true, dilation_growth_rate: 3, disable_filtering: false,
|
12 |
+
div_reg_weight: 0.0, downs_t: [3], dropout_prob: 0.3, e_name: VAESKConv, e_path: weights/AESKConv_240_100.bin,
|
13 |
+
emb_width: 512, emo_rep: null, emotion_dims: 8, emotion_f: 0, epoch_stage: 0, epochs: 1000,
|
14 |
+
eval_model: motion_representation, f_encoder: 'null', f_fix_pre: false, f_pre_encoder: 'null',
|
15 |
+
fac_prob: 1.0, facial_dims: 100, facial_f: 0, facial_fps: 15, facial_norm: false,
|
16 |
+
facial_rep: smplxflame_30, fid_weight: 0.0, finger_net: original, freeze_wordembed: false,
|
17 |
+
fsmr: 0.0, ftmr: 0.0, fusion_mode: sum, g_name: MDM, gap_weight: 0.0, gpus: [0],
|
18 |
+
grad_norm: 0.99, hidden_size: 768, hvqvae_multipliers: [1], id_rep: onehot, input_context: both,
|
19 |
+
is_train: true, ita_weight: 0.0, iwa_weight: 0.0, joint_channel: 3, kld_aud_weight: 0.0,
|
20 |
+
kld_fac_weight: 0.0, kld_weight: 0.0, l: 4, l_bins: 512, l_mu: 0.99, levels: 1,
|
21 |
+
lf: 3.0, lh: 3.0, ll: 3.0, loader_workers: 0, log_period: 10, loss_contrastive_neg_weight: 0.005,
|
22 |
+
loss_contrastive_pos_weight: 0.2, loss_gan_weight: 5.0, loss_kld_weight: 0.1, loss_physical_weight: 0.0,
|
23 |
+
loss_reg_weight: 0.05, loss_regression_weight: 70.0, lr_base: 5.0e-05, lr_min: 1.0e-07,
|
24 |
+
lr_policy: step, lu: 3.0, m_conv: 1.0, m_decoder: null, m_encoder: 'null', m_fix_pre: false,
|
25 |
+
m_pre_encoder: 'null', mean_pose_path: /mnt/fu09a/chenbohong/PantoMatrix/beatx_2_330_mean.npy,
|
26 |
+
mean_trans_path: /mnt/fu09a/chenbohong/PantoMatrix/beatx_2_trans_mean.npy, model: denoiser,
|
27 |
+
momentum: 0.8, motion_f: 256, msmr: 0.0, mtmr: 0.0, multi_length_training: [1.0],
|
28 |
+
n_layer: 1, n_poses: 34, n_pre_poses: 4, name: 0403_212319_diffusion_rvqvae_128,
|
29 |
+
nesterov: true, new_cache: false, no_adv_epoch: 999, notes: '', opt: adam, opt_betas: [
|
30 |
+
0.5, 0.999], ori_joints: beat_smplx_joints, out_path: /mnt/fu09a/chenbohong/PantoMatrix/scripts/EMAGE_2024/outputs/audio2pose/,
|
31 |
+
pos_encoding_type: sin, pos_prob: 1.0, pose_dims: 330, pose_fps: 30, pose_length: 128,
|
32 |
+
pose_norm: true, pose_rep: smplxflame_30, pre_frames: 4, pre_type: zero, pretrain: false,
|
33 |
+
project: s2g, queue_size: 1024, random_seed: 2021, rec_aud_weight: 0.0, rec_fac_weight: 0.0,
|
34 |
+
rec_pos_weight: 0.0, rec_txt_weight: 0.0, rec_ver_weight: 0.0, rec_weight: 1.0,
|
35 |
+
root_path: /mnt/fu09a/chenbohong/PantoMatrix/scripts/EMAGE_2024/, root_weight: 1.0,
|
36 |
+
rot6d: true, sample_length: 34, sem_rep: null, sparse: 1, speaker_dims: 4, speaker_f: 0,
|
37 |
+
speaker_id: onehot, stat: ts, std_pose_path: /mnt/fu09a/chenbohong/PantoMatrix/beatx_2_330_std.npy,
|
38 |
+
std_trans_path: /mnt/fu09a/chenbohong/PantoMatrix/beatx_2_trans_std.npy, stride: 20,
|
39 |
+
strides_t: [2], t_encoder: 'null', t_fix_pre: false, t_pre_encoder: fasttext, tar_joints: beat_smplx_full,
|
40 |
+
test_ckpt: /mnt/fu09a/chenbohong/PantoMatrix/scripts/EMAGE_2024/outputs/audio2pose/custom/0330_140056_diffusion_rvqvae/last_300.bin,
|
41 |
+
test_data_path: /datasets/trinity/test/, test_length: 128, test_period: 20, train_data_path: /datasets/trinity/train/,
|
42 |
+
train_trans: true, trainer: diffusion_rvqvae, training_speakers: [2], tsmr: 0.0,
|
43 |
+
ttmr: 0.0, txt_prob: 1.0, use_amass: false, use_aug: false, use_bottleneck: true,
|
44 |
+
use_trans: true, vae_codebook_size: 256, vae_grow: [1, 1, 2, 1], vae_layer: 4, vae_length: 240,
|
45 |
+
vae_quantizer_lambda: 1.0, vae_test_dim: 330, vae_test_len: 32, vae_test_stride: 20,
|
46 |
+
val_data_path: /datasets/trinity/val/, variational: false, vel: 1, vel_weight: 0.0,
|
47 |
+
vqvae_ckpt: null, vqvae_hands_path: /mnt/fu09a/chenbohong/gdc/T2M-GPT/output_beatx2/RVQVAE_hands/net_300000.pth,
|
48 |
+
vqvae_latent_scale: 5.0, vqvae_lower_path: /mnt/fu09a/chenbohong/gdc/T2M-GPT/output_beatx2/RVQVAE_lower/net_300000.pth,
|
49 |
+
vqvae_lower_trans_path: /mnt/fu09a/chenbohong/gdc/T2M-GPT/output_beatx2/RVQVAE_lower_trans/net_300000.pth,
|
50 |
+
vqvae_reverse_decoder_dilation: true, vqvae_squeeze_scale: 4, vqvae_type: rvqvae,
|
51 |
+
vqvae_upper_path: /mnt/fu09a/chenbohong/gdc/T2M-GPT/output_beatx2/RVQVAE_upper/net_300000.pth,
|
52 |
+
warmup_epochs: 0, warmup_lr: 0.0005, wei_weight: 0.0, weight_decay: 0.0, width: 512,
|
53 |
+
word_cache: false, word_dims: 300, word_f: 256, word_index_num: 11195, word_rep: textgrid,
|
54 |
+
z_type: speaker}
|
ckpt/beatx2_cospeech_diffusion/1001_203942_diffusion_rvqvae_128_gaps-210-0.txt
ADDED
@@ -0,0 +1,451 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
10-01 20:39:43 | {'a_encoder': None,
|
2 |
+
'a_fix_pre': False,
|
3 |
+
'a_pre_encoder': None,
|
4 |
+
'acc': 1,
|
5 |
+
'acc_weight': 0.0,
|
6 |
+
'additional_data': False,
|
7 |
+
'adv_weight': 20.0,
|
8 |
+
'ali_weight': 0.0,
|
9 |
+
'amsgrad': False,
|
10 |
+
'apex': False,
|
11 |
+
'asmr': 0.0,
|
12 |
+
'atcont': 0.0,
|
13 |
+
'atmr': 0.0,
|
14 |
+
'aud_prob': 1.0,
|
15 |
+
'audio_dims': 1,
|
16 |
+
'audio_f': 256,
|
17 |
+
'audio_fps': 16000,
|
18 |
+
'audio_norm': False,
|
19 |
+
'audio_rep': 'onset+amplitude',
|
20 |
+
'audio_sr': 16000,
|
21 |
+
'batch_size': 40,
|
22 |
+
'beat_align': True,
|
23 |
+
'benchmark': True,
|
24 |
+
'cache_only': False,
|
25 |
+
'cache_path': 'datasets/beat_cache/beat_smplx_en_emage_2_128/',
|
26 |
+
'cf': 0.0,
|
27 |
+
'ch': 1.0,
|
28 |
+
'cl': 1.0,
|
29 |
+
'clean_final_seconds': 0,
|
30 |
+
'clean_first_seconds': 0,
|
31 |
+
'commit': 0.02,
|
32 |
+
'config': 'configs/diffusion_rvqvae_128_gaps-210-0.yaml',
|
33 |
+
'csv_name': 'a2g_0',
|
34 |
+
'cu': 1.0,
|
35 |
+
'cudnn_enabled': True,
|
36 |
+
'd_lr_weight': 0.2,
|
37 |
+
'd_name': None,
|
38 |
+
'data_path': './datasets/BEAT_SMPL/beat_v2.0.0/beat_english_v2.0.0/',
|
39 |
+
'data_path_1': './datasets/hub/',
|
40 |
+
'dataset': 'beat_sep_lower',
|
41 |
+
'ddp': False,
|
42 |
+
'debug': False,
|
43 |
+
'decay_epochs': 500,
|
44 |
+
'decay_rate': 0.1,
|
45 |
+
'decode_fusion': None,
|
46 |
+
'depth': 3,
|
47 |
+
'deterministic': True,
|
48 |
+
'dilation_growth_rate': 3,
|
49 |
+
'disable_filtering': False,
|
50 |
+
'div_reg_weight': 0.0,
|
51 |
+
'downs_t': [3],
|
52 |
+
'dropout_prob': 0.3,
|
53 |
+
'e_name': 'VAESKConv',
|
54 |
+
'e_path': 'weights/AESKConv_240_100.bin',
|
55 |
+
'emb_width': 512,
|
56 |
+
'emo_rep': None,
|
57 |
+
'emotion_dims': 8,
|
58 |
+
'emotion_f': 0,
|
59 |
+
'epoch_stage': 0,
|
60 |
+
'epochs': 2000,
|
61 |
+
'eval_model': 'motion_representation',
|
62 |
+
'f_encoder': 'null',
|
63 |
+
'f_fix_pre': False,
|
64 |
+
'f_pre_encoder': 'null',
|
65 |
+
'fac_prob': 1.0,
|
66 |
+
'facial_dims': 100,
|
67 |
+
'facial_f': 0,
|
68 |
+
'facial_fps': 15,
|
69 |
+
'facial_norm': False,
|
70 |
+
'facial_rep': 'smplxflame_30',
|
71 |
+
'fid_weight': 0.0,
|
72 |
+
'finger_net': 'original',
|
73 |
+
'freeze_wordembed': False,
|
74 |
+
'fsmr': 0.0,
|
75 |
+
'ftmr': 0.0,
|
76 |
+
'fusion_mode': 'sum',
|
77 |
+
'g_name': 'MDM',
|
78 |
+
'gap_weight': 0.0,
|
79 |
+
'gpus': [0],
|
80 |
+
'grad_norm': 0.99,
|
81 |
+
'hidden_size': 768,
|
82 |
+
'hvqvae_multipliers': [1],
|
83 |
+
'id_rep': 'onehot',
|
84 |
+
'input_context': 'both',
|
85 |
+
'is_train': True,
|
86 |
+
'ita_weight': 0.0,
|
87 |
+
'iwa_weight': 0.0,
|
88 |
+
'joint_channel': 3,
|
89 |
+
'kld_aud_weight': 0.0,
|
90 |
+
'kld_fac_weight': 0.0,
|
91 |
+
'kld_weight': 0.0,
|
92 |
+
'l': 4,
|
93 |
+
'l_bins': 512,
|
94 |
+
'l_mu': 0.99,
|
95 |
+
'levels': 1,
|
96 |
+
'lf': 3.0,
|
97 |
+
'lh': 3.0,
|
98 |
+
'll': 3.0,
|
99 |
+
'loader_workers': 0,
|
100 |
+
'log_period': 10,
|
101 |
+
'loss_contrastive_neg_weight': 0.005,
|
102 |
+
'loss_contrastive_pos_weight': 0.2,
|
103 |
+
'loss_gan_weight': 5.0,
|
104 |
+
'loss_kld_weight': 0.1,
|
105 |
+
'loss_physical_weight': 0.0,
|
106 |
+
'loss_reg_weight': 0.05,
|
107 |
+
'loss_regression_weight': 70.0,
|
108 |
+
'lr_base': 5e-05,
|
109 |
+
'lr_min': 1e-07,
|
110 |
+
'lr_policy': 'step',
|
111 |
+
'lu': 3.0,
|
112 |
+
'm_conv': 1.0,
|
113 |
+
'm_decoder': None,
|
114 |
+
'm_encoder': 'null',
|
115 |
+
'm_fix_pre': False,
|
116 |
+
'm_pre_encoder': 'null',
|
117 |
+
'mean_pose_path': '../../beatx_2_330_mean.npy',
|
118 |
+
'mean_trans_path': '../../beatx_2_trans_mean.npy',
|
119 |
+
'model': 'denoiser',
|
120 |
+
'momentum': 0.8,
|
121 |
+
'motion_f': 256,
|
122 |
+
'msmr': 0.0,
|
123 |
+
'mtmr': 0.0,
|
124 |
+
'multi_length_training': [1.0],
|
125 |
+
'n_layer': 1,
|
126 |
+
'n_poses': 34,
|
127 |
+
'n_pre_poses': 4,
|
128 |
+
'name': '1001_203942_diffusion_rvqvae_128_gaps-210-0',
|
129 |
+
'nesterov': True,
|
130 |
+
'new_cache': False,
|
131 |
+
'no_adv_epoch': 999,
|
132 |
+
'notes': '',
|
133 |
+
'opt': 'adam',
|
134 |
+
'opt_betas': [0.5, 0.999],
|
135 |
+
'ori_joints': 'beat_smplx_joints',
|
136 |
+
'out_path': './outputs/audio2pose/',
|
137 |
+
'pos_encoding_type': 'sin',
|
138 |
+
'pos_prob': 1.0,
|
139 |
+
'pose_dims': 330,
|
140 |
+
'pose_fps': 30,
|
141 |
+
'pose_length': 128,
|
142 |
+
'pose_norm': True,
|
143 |
+
'pose_rep': 'smplxflame_30',
|
144 |
+
'pre_frames': 4,
|
145 |
+
'pre_type': 'zero',
|
146 |
+
'pretrain': False,
|
147 |
+
'project': 's2g',
|
148 |
+
'queue_size': 1024,
|
149 |
+
'random_seed': 2021,
|
150 |
+
'rec_aud_weight': 0.0,
|
151 |
+
'rec_fac_weight': 0.0,
|
152 |
+
'rec_pos_weight': 0.0,
|
153 |
+
'rec_txt_weight': 0.0,
|
154 |
+
'rec_ver_weight': 0.0,
|
155 |
+
'rec_weight': 1.0,
|
156 |
+
'root_path': './',
|
157 |
+
'root_weight': 1.0,
|
158 |
+
'rot6d': True,
|
159 |
+
'sample_length': 34,
|
160 |
+
'sem_rep': None,
|
161 |
+
'sparse': 1,
|
162 |
+
'speaker_dims': 4,
|
163 |
+
'speaker_f': 0,
|
164 |
+
'speaker_id': 'onehot',
|
165 |
+
'stat': 'ts',
|
166 |
+
'std_pose_path': '../../beatx_2_330_std.npy',
|
167 |
+
'std_trans_path': '../../beatx_2_trans_std.npy',
|
168 |
+
'stride': 20,
|
169 |
+
'strides_t': [2],
|
170 |
+
't_encoder': 'null',
|
171 |
+
't_fix_pre': False,
|
172 |
+
't_pre_encoder': 'fasttext',
|
173 |
+
'tar_joints': 'beat_smplx_full',
|
174 |
+
'test_ckpt': './outputs/audio2pose/custom/0403_212319_diffusion_rvqvae_128/last_500.bin',
|
175 |
+
'test_data_path': '/datasets/trinity/test/',
|
176 |
+
'test_length': 128,
|
177 |
+
'test_period': 20,
|
178 |
+
'train_data_path': '/datasets/trinity/train/',
|
179 |
+
'train_trans': True,
|
180 |
+
'trainer': 'diffusion_rvqvae',
|
181 |
+
'training_speakers': [2],
|
182 |
+
'tsmr': 0.0,
|
183 |
+
'ttmr': 0.0,
|
184 |
+
'txt_prob': 1.0,
|
185 |
+
'use_amass': False,
|
186 |
+
'use_aug': False,
|
187 |
+
'use_bottleneck': True,
|
188 |
+
'use_motionclip': False,
|
189 |
+
'use_trans': True,
|
190 |
+
'vae_codebook_size': 256,
|
191 |
+
'vae_grow': [1, 1, 2, 1],
|
192 |
+
'vae_layer': 4,
|
193 |
+
'vae_length': 240,
|
194 |
+
'vae_quantizer_lambda': 1.0,
|
195 |
+
'vae_test_dim': 330,
|
196 |
+
'vae_test_len': 32,
|
197 |
+
'vae_test_stride': 20,
|
198 |
+
'val_data_path': '/datasets/trinity/val/',
|
199 |
+
'variational': False,
|
200 |
+
'vel': 1,
|
201 |
+
'vel_weight': 0.0,
|
202 |
+
'vqvae_ckpt': None,
|
203 |
+
'vqvae_hands_path': './datasets/hub/output_beatx2/RVQVAE_hands/net_300000.pth',
|
204 |
+
'vqvae_latent_scale': 5.0,
|
205 |
+
'vqvae_lower_path': './datasets/hub/output_beatx2/RVQVAE_lower/net_300000.pth',
|
206 |
+
'vqvae_lower_trans_path': './datasets/hub/output_beatx2/RVQVAE_lower_trans/net_300000.pth',
|
207 |
+
'vqvae_reverse_decoder_dilation': True,
|
208 |
+
'vqvae_squeeze_scale': 4,
|
209 |
+
'vqvae_type': 'rvqvae',
|
210 |
+
'vqvae_upper_path': './datasets/hub/output_beatx2/RVQVAE_upper/net_300000.pth',
|
211 |
+
'warmup_epochs': 0,
|
212 |
+
'warmup_lr': 0.0005,
|
213 |
+
'wei_weight': 0.0,
|
214 |
+
'weight_decay': 0.0,
|
215 |
+
'width': 512,
|
216 |
+
'word_cache': False,
|
217 |
+
'word_dims': 300,
|
218 |
+
'word_f': 256,
|
219 |
+
'word_index_num': 11195,
|
220 |
+
'word_rep': 'textgrid',
|
221 |
+
'z_type': 'speaker'}
|
222 |
+
10-01 20:39:43 | # ------------ 1001_203942_diffusion_rvqvae_128_gaps-210-0 ----------- #
|
223 |
+
10-01 20:39:43 | PyTorch version: 2.4.1+cu121
|
224 |
+
10-01 20:39:43 | CUDA version: 12.1
|
225 |
+
10-01 20:39:43 | 1 GPUs
|
226 |
+
10-01 20:39:43 | Random Seed: 2021
|
227 |
+
10-01 20:39:46 | Audio bit rate: 16000
|
228 |
+
10-01 20:39:46 | Reading data './datasets/BEAT_SMPL/beat_v2.0.0/beat_english_v2.0.0/'...
|
229 |
+
10-01 20:39:46 | Creating the dataset cache...
|
230 |
+
10-01 20:39:46 | Found the cache ./datasets/beat_cache/beat_smplx_en_emage_2_128/train/smplxflame_30_cache
|
231 |
+
10-01 20:39:46 | Init train dataloader success
|
232 |
+
10-01 20:39:46 | Init val dataloader success
|
233 |
+
10-01 20:39:46 | Audio bit rate: 16000
|
234 |
+
10-01 20:39:46 | Reading data './datasets/BEAT_SMPL/beat_v2.0.0/beat_english_v2.0.0/'...
|
235 |
+
10-01 20:39:46 | Creating the dataset cache...
|
236 |
+
10-01 20:39:46 | Found the cache ./datasets/beat_cache/beat_smplx_en_emage_2_128/test/smplxflame_30_cache
|
237 |
+
10-01 20:39:46 | Init test dataloader success
|
238 |
+
10-01 20:39:46 | DataParallel(
|
239 |
+
(module): MDM(
|
240 |
+
(WavEncoder): WavEncoder(
|
241 |
+
(feat_extractor): Sequential(
|
242 |
+
(0): BasicBlock(
|
243 |
+
(conv1): Conv1d(2, 64, kernel_size=(15,), stride=(5,), padding=(1700,))
|
244 |
+
(bn1): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
|
245 |
+
(act1): LeakyReLU(negative_slope=0.01, inplace=True)
|
246 |
+
(conv2): Conv1d(64, 64, kernel_size=(15,), stride=(1,), padding=(7,))
|
247 |
+
(bn2): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
|
248 |
+
(act2): LeakyReLU(negative_slope=0.01, inplace=True)
|
249 |
+
(downsample): Sequential(
|
250 |
+
(0): Conv1d(2, 64, kernel_size=(15,), stride=(5,), padding=(1700,))
|
251 |
+
(1): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
|
252 |
+
)
|
253 |
+
)
|
254 |
+
(1): BasicBlock(
|
255 |
+
(conv1): Conv1d(64, 64, kernel_size=(15,), stride=(6,))
|
256 |
+
(bn1): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
|
257 |
+
(act1): LeakyReLU(negative_slope=0.01, inplace=True)
|
258 |
+
(conv2): Conv1d(64, 64, kernel_size=(15,), stride=(1,), padding=(7,))
|
259 |
+
(bn2): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
|
260 |
+
(act2): LeakyReLU(negative_slope=0.01, inplace=True)
|
261 |
+
(downsample): Sequential(
|
262 |
+
(0): Conv1d(64, 64, kernel_size=(15,), stride=(6,))
|
263 |
+
(1): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
|
264 |
+
)
|
265 |
+
)
|
266 |
+
(2): BasicBlock(
|
267 |
+
(conv1): Conv1d(64, 64, kernel_size=(15,), stride=(1,), padding=(7,))
|
268 |
+
(bn1): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
|
269 |
+
(act1): LeakyReLU(negative_slope=0.01, inplace=True)
|
270 |
+
(conv2): Conv1d(64, 64, kernel_size=(15,), stride=(1,), padding=(7,))
|
271 |
+
(bn2): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
|
272 |
+
(act2): LeakyReLU(negative_slope=0.01, inplace=True)
|
273 |
+
)
|
274 |
+
(3): BasicBlock(
|
275 |
+
(conv1): Conv1d(64, 128, kernel_size=(15,), stride=(6,))
|
276 |
+
(bn1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
|
277 |
+
(act1): LeakyReLU(negative_slope=0.01, inplace=True)
|
278 |
+
(conv2): Conv1d(128, 128, kernel_size=(15,), stride=(1,), padding=(7,))
|
279 |
+
(bn2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
|
280 |
+
(act2): LeakyReLU(negative_slope=0.01, inplace=True)
|
281 |
+
(downsample): Sequential(
|
282 |
+
(0): Conv1d(64, 128, kernel_size=(15,), stride=(6,))
|
283 |
+
(1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
|
284 |
+
)
|
285 |
+
)
|
286 |
+
(4): BasicBlock(
|
287 |
+
(conv1): Conv1d(128, 128, kernel_size=(15,), stride=(1,), padding=(7,))
|
288 |
+
(bn1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
|
289 |
+
(act1): LeakyReLU(negative_slope=0.01, inplace=True)
|
290 |
+
(conv2): Conv1d(128, 128, kernel_size=(15,), stride=(1,), padding=(7,))
|
291 |
+
(bn2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
|
292 |
+
(act2): LeakyReLU(negative_slope=0.01, inplace=True)
|
293 |
+
)
|
294 |
+
(5): BasicBlock(
|
295 |
+
(conv1): Conv1d(128, 256, kernel_size=(15,), stride=(3,))
|
296 |
+
(bn1): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
|
297 |
+
(act1): LeakyReLU(negative_slope=0.01, inplace=True)
|
298 |
+
(conv2): Conv1d(256, 256, kernel_size=(15,), stride=(1,), padding=(7,))
|
299 |
+
(bn2): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
|
300 |
+
(act2): LeakyReLU(negative_slope=0.01, inplace=True)
|
301 |
+
(downsample): Sequential(
|
302 |
+
(0): Conv1d(128, 256, kernel_size=(15,), stride=(3,))
|
303 |
+
(1): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
|
304 |
+
)
|
305 |
+
)
|
306 |
+
)
|
307 |
+
)
|
308 |
+
(text_encoder_body): Linear(in_features=300, out_features=256, bias=True)
|
309 |
+
(text_pre_encoder_body): Embedding(11195, 300)
|
310 |
+
(sequence_pos_encoder): PositionalEncoding(
|
311 |
+
(dropout): Dropout(p=0.1, inplace=False)
|
312 |
+
)
|
313 |
+
(mytimmblocks): ModuleList(
|
314 |
+
(0-7): 8 x Block(
|
315 |
+
(norm1): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
|
316 |
+
(attn): Attention(
|
317 |
+
(qkv): Linear(in_features=512, out_features=1536, bias=False)
|
318 |
+
(q_norm): Identity()
|
319 |
+
(k_norm): Identity()
|
320 |
+
(attn_drop): Dropout(p=0.0, inplace=False)
|
321 |
+
(proj): Linear(in_features=512, out_features=512, bias=True)
|
322 |
+
(proj_drop): Dropout(p=0.0, inplace=False)
|
323 |
+
)
|
324 |
+
(ls1): Identity()
|
325 |
+
(drop_path1): DropPath(drop_prob=0.100)
|
326 |
+
(norm2): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
|
327 |
+
(mlp): Mlp(
|
328 |
+
(fc1): Linear(in_features=512, out_features=1024, bias=True)
|
329 |
+
(act): GELU(approximate='none')
|
330 |
+
(drop1): Dropout(p=0.0, inplace=False)
|
331 |
+
(norm): Identity()
|
332 |
+
(fc2): Linear(in_features=1024, out_features=512, bias=True)
|
333 |
+
(drop2): Dropout(p=0.0, inplace=False)
|
334 |
+
)
|
335 |
+
(ls2): Identity()
|
336 |
+
(drop_path2): DropPath(drop_prob=0.100)
|
337 |
+
)
|
338 |
+
)
|
339 |
+
(embed_timestep): TimestepEmbedder(
|
340 |
+
(sequence_pos_encoder): PositionalEncoding(
|
341 |
+
(dropout): Dropout(p=0.1, inplace=False)
|
342 |
+
)
|
343 |
+
(time_embed): Sequential(
|
344 |
+
(0): Linear(in_features=512, out_features=512, bias=True)
|
345 |
+
(1): SiLU()
|
346 |
+
(2): Linear(in_features=512, out_features=512, bias=True)
|
347 |
+
)
|
348 |
+
)
|
349 |
+
(embed_style): Linear(in_features=6, out_features=64, bias=True)
|
350 |
+
(embed_text): Linear(in_features=6144, out_features=512, bias=True)
|
351 |
+
(output_process): OutputProcess(
|
352 |
+
(poseFinal): Linear(in_features=512, out_features=1536, bias=True)
|
353 |
+
)
|
354 |
+
(rel_pos): SinusoidalEmbeddings()
|
355 |
+
(input_process): InputProcess(
|
356 |
+
(poseEmbedding): Linear(in_features=1536, out_features=512, bias=True)
|
357 |
+
)
|
358 |
+
(input_process2): Linear(in_features=1280, out_features=512, bias=True)
|
359 |
+
(mix_audio_text): Linear(in_features=512, out_features=256, bias=True)
|
360 |
+
)
|
361 |
+
)
|
362 |
+
10-01 20:39:46 | init MDM success
|
363 |
+
10-01 20:39:46 | load self-pretrained checkpoints for VAESKConv
|
364 |
+
10-01 20:39:46 | load self-pretrained checkpoints for VAESKConv
|
365 |
+
10-01 20:39:46 | VAESKConv(
|
366 |
+
(encoder): LocalEncoder(
|
367 |
+
(layers): ModuleList(
|
368 |
+
(0): Sequential(
|
369 |
+
(0): SkeletonResidual(
|
370 |
+
(residual): Sequential(
|
371 |
+
(0): SkeletonConv()
|
372 |
+
(1): GroupNorm(10, 330, eps=1e-05, affine=True)
|
373 |
+
)
|
374 |
+
(shortcut): SkeletonConv()
|
375 |
+
(common): Sequential(
|
376 |
+
(0): SkeletonPool()
|
377 |
+
(1): Tanh()
|
378 |
+
)
|
379 |
+
)
|
380 |
+
)
|
381 |
+
(1): Sequential(
|
382 |
+
(0): SkeletonResidual(
|
383 |
+
(residual): Sequential(
|
384 |
+
(0): SkeletonConv()
|
385 |
+
(1): GroupNorm(10, 210, eps=1e-05, affine=True)
|
386 |
+
)
|
387 |
+
(shortcut): SkeletonConv()
|
388 |
+
(common): Sequential(
|
389 |
+
(0): SkeletonPool()
|
390 |
+
(1): Tanh()
|
391 |
+
)
|
392 |
+
)
|
393 |
+
)
|
394 |
+
(2-3): 2 x Sequential(
|
395 |
+
(0): SkeletonResidual(
|
396 |
+
(residual): Sequential(
|
397 |
+
(0): SkeletonConv()
|
398 |
+
(1): GroupNorm(10, 240, eps=1e-05, affine=True)
|
399 |
+
)
|
400 |
+
(shortcut): SkeletonConv()
|
401 |
+
(common): Sequential(
|
402 |
+
(0): Tanh()
|
403 |
+
)
|
404 |
+
)
|
405 |
+
)
|
406 |
+
)
|
407 |
+
)
|
408 |
+
(decoder): VQDecoderV3(
|
409 |
+
(main): Sequential(
|
410 |
+
(0): ResBlock(
|
411 |
+
(model): Sequential(
|
412 |
+
(0): Conv1d(240, 240, kernel_size=(3,), stride=(1,), padding=(1,))
|
413 |
+
(1): LeakyReLU(negative_slope=0.2, inplace=True)
|
414 |
+
(2): Conv1d(240, 240, kernel_size=(3,), stride=(1,), padding=(1,))
|
415 |
+
)
|
416 |
+
)
|
417 |
+
(1): ResBlock(
|
418 |
+
(model): Sequential(
|
419 |
+
(0): Conv1d(240, 240, kernel_size=(3,), stride=(1,), padding=(1,))
|
420 |
+
(1): LeakyReLU(negative_slope=0.2, inplace=True)
|
421 |
+
(2): Conv1d(240, 240, kernel_size=(3,), stride=(1,), padding=(1,))
|
422 |
+
)
|
423 |
+
)
|
424 |
+
(2): Upsample(scale_factor=2.0, mode='nearest')
|
425 |
+
(3): Conv1d(240, 240, kernel_size=(3,), stride=(1,), padding=(1,))
|
426 |
+
(4): LeakyReLU(negative_slope=0.2, inplace=True)
|
427 |
+
(5): Upsample(scale_factor=2.0, mode='nearest')
|
428 |
+
(6): Conv1d(240, 240, kernel_size=(3,), stride=(1,), padding=(1,))
|
429 |
+
(7): LeakyReLU(negative_slope=0.2, inplace=True)
|
430 |
+
(8): Upsample(scale_factor=2.0, mode='nearest')
|
431 |
+
(9): Conv1d(240, 240, kernel_size=(3,), stride=(1,), padding=(1,))
|
432 |
+
(10): LeakyReLU(negative_slope=0.2, inplace=True)
|
433 |
+
(11): Upsample(scale_factor=2.0, mode='nearest')
|
434 |
+
(12): Conv1d(240, 330, kernel_size=(3,), stride=(1,), padding=(1,))
|
435 |
+
(13): LeakyReLU(negative_slope=0.2, inplace=True)
|
436 |
+
(14): Conv1d(330, 330, kernel_size=(3,), stride=(1,), padding=(1,))
|
437 |
+
)
|
438 |
+
)
|
439 |
+
(fc_mu): Linear(in_features=240, out_features=240, bias=True)
|
440 |
+
(fc_logvar): Linear(in_features=240, out_features=240, bias=True)
|
441 |
+
)
|
442 |
+
10-01 20:39:46 | init VAESKConv success
|
443 |
+
10-01 20:39:47 | load self-pretrained checkpoints for VAESKConv
|
444 |
+
10-01 20:39:47 | load self-pretrained checkpoints for VAESKConv
|
445 |
+
10-01 20:39:48 | load self-pretrained checkpoints for MDM
|
446 |
+
10-01 21:08:57 | l2 loss: 0.0
|
447 |
+
10-01 21:08:57 | lvel loss: 0.0
|
448 |
+
10-01 21:08:58 | fid score: 0.46525881529758983
|
449 |
+
10-01 21:08:58 | align score: 0.7361291368819373
|
450 |
+
10-01 21:08:58 | l1div score: 12.30848217010498
|
451 |
+
10-01 21:08:58 | total inference time: 1749 s for 945 s motion
|
ckpt/beatx2_cospeech_diffusion/last_500.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d79b6fd3e412f7e3cb61eb6795ff686f6cdf80d32ce2bf941cd985d8cae24cc1
|
3 |
+
size 128770342
|
ckpt/beatx2_rvqvae/RVQVAE_hands/net_300000.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f4eb84ff69009be0b3e68419c5382aa10443b73739dfe2e2928b046e2db59a8b
|
3 |
+
size 83048747
|
ckpt/beatx2_rvqvae/RVQVAE_hands/run.log
ADDED
The diff for this file is too large to render.
See raw diff
|
|
ckpt/beatx2_rvqvae/RVQVAE_lower/net_300000.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a29217af4f33b7b50ae9aebfdfc2bf2c0e80bed48316ab218cdc40043bb03d20
|
3 |
+
size 81499947
|
ckpt/beatx2_rvqvae/RVQVAE_lower/run.log
ADDED
The diff for this file is too large to render.
See raw diff
|
|
ckpt/beatx2_rvqvae/RVQVAE_lower_trans/net_300000.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cb81af9ebd6c34b473db39e4c343e76fb3b30e4dbbab60d56460544f9cea7f6f
|
3 |
+
size 81536811
|
ckpt/beatx2_rvqvae/RVQVAE_lower_trans/run.log
ADDED
The diff for this file is too large to render.
See raw diff
|
|
ckpt/beatx2_rvqvae/RVQVAE_upper/net_300000.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:959d066138b293455a98fb0175b1fe2fcc31da9a1de83c9bfbf093ffea746a0e
|
3 |
+
size 81794923
|
ckpt/beatx2_rvqvae/RVQVAE_upper/run.log
ADDED
The diff for this file is too large to render.
See raw diff
|
|
configs/beat2_rvqvae.yaml
ADDED
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
is_train: True
|
2 |
+
ddp: False
|
3 |
+
stat: ts
|
4 |
+
root_path: ./
|
5 |
+
out_path: ./outputs/audio2pose/
|
6 |
+
project: s2g
|
7 |
+
data_path: ./datasets/BEAT_SMPL/beat_v2.0.0/beat_english_v2.0.0/
|
8 |
+
e_path: weights/AESKConv_240_100.bin
|
9 |
+
eval_model: motion_representation
|
10 |
+
e_name: VAESKConv
|
11 |
+
test_ckpt: ./outputs/audio2pose/custom/0112_001634_emage/last_200.bin
|
12 |
+
data_path_1: ./datasets/hub/
|
13 |
+
|
14 |
+
vae_test_len: 32
|
15 |
+
vae_test_dim: 330
|
16 |
+
vae_test_stride: 20
|
17 |
+
vae_length: 240
|
18 |
+
vae_codebook_size: 256
|
19 |
+
vae_layer: 4
|
20 |
+
vae_grow: [1,1,2,1]
|
21 |
+
variational: False
|
22 |
+
|
23 |
+
# data config
|
24 |
+
training_speakers: [2] #[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30] #[2]
|
25 |
+
additional_data: False
|
26 |
+
cache_path: datasets/beat_cache/beat_smplx_en_emage_2_rvqvae/
|
27 |
+
dataset: mix_sep
|
28 |
+
new_cache: True
|
29 |
+
use_amass: False
|
30 |
+
# motion config
|
31 |
+
ori_joints: beat_smplx_joints
|
32 |
+
tar_joints: beat_smplx_full
|
33 |
+
pose_rep: smplxflame_30
|
34 |
+
pose_norm: False
|
35 |
+
pose_fps: 30
|
36 |
+
rot6d: True
|
37 |
+
pre_frames: 4
|
38 |
+
pose_dims: 330
|
39 |
+
pose_length: 64
|
40 |
+
stride: 20
|
41 |
+
test_length: 64
|
42 |
+
motion_f: 256
|
43 |
+
m_pre_encoder: null
|
44 |
+
m_encoder: null
|
45 |
+
m_fix_pre: False
|
46 |
+
|
47 |
+
# audio config
|
48 |
+
audio_rep: onset+amplitude
|
49 |
+
audio_sr: 16000
|
50 |
+
audio_fps: 16000
|
51 |
+
audio_norm: False
|
52 |
+
audio_f: 256
|
53 |
+
# a_pre_encoder: tcn_camn
|
54 |
+
# a_encoder: none
|
55 |
+
# a_fix_pre: False
|
56 |
+
|
57 |
+
# text config
|
58 |
+
word_rep: textgrid
|
59 |
+
word_index_num: 11195
|
60 |
+
word_dims: 300
|
61 |
+
freeze_wordembed: False
|
62 |
+
word_f: 256
|
63 |
+
t_pre_encoder: fasttext
|
64 |
+
t_encoder: null
|
65 |
+
t_fix_pre: False
|
66 |
+
|
67 |
+
# facial config
|
68 |
+
facial_rep: smplxflame_30
|
69 |
+
facial_dims: 100
|
70 |
+
facial_norm: False
|
71 |
+
facial_f: 0
|
72 |
+
f_pre_encoder: null
|
73 |
+
f_encoder: null
|
74 |
+
f_fix_pre: False
|
75 |
+
|
76 |
+
# speaker config
|
77 |
+
id_rep: onehot
|
78 |
+
speaker_f: 0
|
79 |
+
|
80 |
+
# model config
|
81 |
+
batch_size: 80 #80
|
82 |
+
# warmup_epochs: 1
|
83 |
+
# warmup_lr: 1e-6
|
84 |
+
lr_base: 4e-4
|
85 |
+
model: motion_representation
|
86 |
+
g_name: VQVAEConvZero
|
87 |
+
trainer: ae_total
|
88 |
+
hidden_size: 768
|
89 |
+
n_layer: 1
|
90 |
+
|
91 |
+
rec_weight: 1
|
92 |
+
grad_norm: 0.99
|
93 |
+
epochs: 200
|
94 |
+
test_period: 20
|
95 |
+
ll: 3
|
96 |
+
lf: 3
|
97 |
+
lu: 3
|
98 |
+
lh: 3
|
99 |
+
cl: 1
|
100 |
+
cf: 0
|
101 |
+
cu: 1
|
102 |
+
ch: 1
|
103 |
+
|
104 |
+
|
105 |
+
|
106 |
+
#below is vavae config, copy from QPGESTURE
|
107 |
+
#Codebook Configs
|
108 |
+
levels: 1
|
109 |
+
downs_t: [3]
|
110 |
+
strides_t : [2]
|
111 |
+
emb_width : 512
|
112 |
+
l_bins : 512
|
113 |
+
l_mu : 0.99
|
114 |
+
commit : 0.1
|
115 |
+
hvqvae_multipliers : [1]
|
116 |
+
width: 512
|
117 |
+
depth: 3
|
118 |
+
m_conv : 1.0
|
119 |
+
dilation_growth_rate : 3
|
120 |
+
sample_length: 80
|
121 |
+
use_bottleneck: True
|
122 |
+
joint_channel: 6
|
123 |
+
# depth: 3
|
124 |
+
# width: 128
|
125 |
+
# m_conv: 1.0
|
126 |
+
# dilation_growth_rate: 1
|
127 |
+
# dilation_cycle: None
|
128 |
+
vel: 1 # 1 -> 0
|
129 |
+
acc: 1 # 1 -> 0
|
130 |
+
vqvae_reverse_decoder_dilation: True
|
131 |
+
|
132 |
+
|
133 |
+
## below is special for emage
|
134 |
+
rec_pos_weight : 1.0
|
configs/diffusion_rvqvae_128.yaml
ADDED
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
is_train: True
|
2 |
+
ddp: False
|
3 |
+
stat: ts
|
4 |
+
root_path: ./
|
5 |
+
out_path: ./outputs/audio2pose/
|
6 |
+
project: s2g
|
7 |
+
data_path: ./datasets/BEAT_SMPL/beat_v2.0.0/beat_english_v2.0.0/
|
8 |
+
e_path: weights/AESKConv_240_100.bin
|
9 |
+
eval_model: motion_representation
|
10 |
+
e_name: VAESKConv
|
11 |
+
test_ckpt: ./ckpt/beatx2_cospeech_diffusion/last_500.bin
|
12 |
+
data_path_1: ./datasets/hub/
|
13 |
+
pose_norm: True
|
14 |
+
|
15 |
+
|
16 |
+
mean_pose_path: ./mean_std/beatx_2_330_mean.npy
|
17 |
+
std_pose_path: ./mean_std/beatx_2_330_std.npy
|
18 |
+
|
19 |
+
mean_trans_path: ./mean_std/beatx_2_trans_mean.npy
|
20 |
+
std_trans_path: ./mean_std/beatx_2_trans_std.npy
|
21 |
+
|
22 |
+
|
23 |
+
vqvae_upper_path: ./ckpt/beatx2_rvqvae/RVQVAE_upper/net_300000.pth
|
24 |
+
vqvae_hands_path: ./ckpt/beatx2_rvqvae/RVQVAE_hands/net_300000.pth
|
25 |
+
vqvae_lower_path: ./ckpt/beatx2_rvqvae/RVQVAE_lower/net_300000.pth
|
26 |
+
|
27 |
+
vqvae_lower_trans_path: ./ckpt/beatx2_rvqvae/RVQVAE_lower_trans/net_300000.pth
|
28 |
+
use_trans: True
|
29 |
+
|
30 |
+
decay_epoch: 500
|
31 |
+
|
32 |
+
vqvae_squeeze_scale: 4
|
33 |
+
vqvae_type: rvqvae
|
34 |
+
vqvae_latent_scale: 5
|
35 |
+
|
36 |
+
vae_test_len: 32
|
37 |
+
vae_test_dim: 330
|
38 |
+
vae_test_stride: 20
|
39 |
+
vae_length: 240
|
40 |
+
vae_codebook_size: 256
|
41 |
+
vae_layer: 4
|
42 |
+
vae_grow: [1,1,2,1]
|
43 |
+
variational: False
|
44 |
+
|
45 |
+
# data config
|
46 |
+
training_speakers: [2] #[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
|
47 |
+
additional_data: False
|
48 |
+
cache_path: datasets/beat_cache/beat_smplx_en_emage_2_128/
|
49 |
+
dataset: beat_sep_lower
|
50 |
+
new_cache: False
|
51 |
+
|
52 |
+
# motion config
|
53 |
+
ori_joints: beat_smplx_joints
|
54 |
+
tar_joints: beat_smplx_full
|
55 |
+
pose_rep: smplxflame_30
|
56 |
+
pose_fps: 30
|
57 |
+
rot6d: True
|
58 |
+
pre_frames: 4
|
59 |
+
pose_dims: 330
|
60 |
+
pose_length: 128
|
61 |
+
stride: 20
|
62 |
+
test_length: 128
|
63 |
+
motion_f: 256
|
64 |
+
m_pre_encoder: null
|
65 |
+
m_encoder: null
|
66 |
+
m_fix_pre: False
|
67 |
+
|
68 |
+
|
69 |
+
audio_rep: onset+amplitude
|
70 |
+
audio_sr: 16000
|
71 |
+
audio_fps: 16000
|
72 |
+
audio_norm: False
|
73 |
+
audio_f: 256
|
74 |
+
|
75 |
+
|
76 |
+
word_rep: textgrid
|
77 |
+
word_index_num: 11195
|
78 |
+
word_dims: 300
|
79 |
+
freeze_wordembed: False
|
80 |
+
word_f: 256
|
81 |
+
t_pre_encoder: fasttext
|
82 |
+
t_encoder: null
|
83 |
+
t_fix_pre: False
|
84 |
+
|
85 |
+
|
86 |
+
facial_rep: smplxflame_30
|
87 |
+
facial_dims: 100
|
88 |
+
facial_norm: False
|
89 |
+
facial_f: 0
|
90 |
+
f_pre_encoder: null
|
91 |
+
f_encoder: null
|
92 |
+
f_fix_pre: False
|
93 |
+
|
94 |
+
|
95 |
+
id_rep: onehot
|
96 |
+
speaker_f: 0
|
97 |
+
|
98 |
+
|
99 |
+
batch_size: 40
|
100 |
+
lr_base: 5e-5
|
101 |
+
model: denoiser
|
102 |
+
g_name: MDM
|
103 |
+
trainer: diffusion_rvqvae
|
104 |
+
hidden_size: 768
|
105 |
+
n_layer: 1
|
106 |
+
|
107 |
+
rec_weight: 1
|
108 |
+
grad_norm: 0.99
|
109 |
+
epochs: 2000
|
110 |
+
test_period: 20
|
111 |
+
ll: 3
|
112 |
+
lf: 3
|
113 |
+
lu: 3
|
114 |
+
lh: 3
|
115 |
+
cl: 1
|
116 |
+
cf: 0
|
117 |
+
cu: 1
|
118 |
+
ch: 1
|
configs/diffusion_rvqvae_128_hf.yaml
ADDED
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
is_train: True
|
2 |
+
ddp: False
|
3 |
+
stat: ts
|
4 |
+
root_path: ./
|
5 |
+
out_path: ./outputs/audio2pose/
|
6 |
+
project: s2g
|
7 |
+
data_path: ./datasets/BEAT_SMPL/beat_v2.0.0/beat_english_v2.0.0/
|
8 |
+
e_path: weights/AESKConv_240_100.bin
|
9 |
+
eval_model: motion_representation
|
10 |
+
e_name: VAESKConv
|
11 |
+
test_ckpt: ./ckpt/beatx2_cospeech_diffusion/last_500.bin
|
12 |
+
data_path_1: ./datasets/hub/
|
13 |
+
pose_norm: True
|
14 |
+
|
15 |
+
|
16 |
+
mean_pose_path: ./mean_std/beatx_2_330_mean.npy
|
17 |
+
std_pose_path: ./mean_std/beatx_2_330_std.npy
|
18 |
+
|
19 |
+
mean_trans_path: ./mean_std/beatx_2_trans_mean.npy
|
20 |
+
std_trans_path: ./mean_std/beatx_2_trans_std.npy
|
21 |
+
|
22 |
+
|
23 |
+
vqvae_upper_path: ./ckpt/beatx2_rvqvae/RVQVAE_upper/net_300000.pth
|
24 |
+
vqvae_hands_path: ./ckpt/beatx2_rvqvae/RVQVAE_hands/net_300000.pth
|
25 |
+
vqvae_lower_path: ./ckpt/beatx2_rvqvae/RVQVAE_lower/net_300000.pth
|
26 |
+
|
27 |
+
vqvae_lower_trans_path: ./ckpt/beatx2_rvqvae/RVQVAE_lower_trans/net_300000.pth
|
28 |
+
use_trans: True
|
29 |
+
|
30 |
+
decay_epoch: 500
|
31 |
+
|
32 |
+
vqvae_squeeze_scale: 4
|
33 |
+
vqvae_type: rvqvae
|
34 |
+
vqvae_latent_scale: 5
|
35 |
+
|
36 |
+
vae_test_len: 32
|
37 |
+
vae_test_dim: 330
|
38 |
+
vae_test_stride: 20
|
39 |
+
vae_length: 240
|
40 |
+
vae_codebook_size: 256
|
41 |
+
vae_layer: 4
|
42 |
+
vae_grow: [1,1,2,1]
|
43 |
+
variational: False
|
44 |
+
|
45 |
+
# data config
|
46 |
+
training_speakers: [2] #[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
|
47 |
+
additional_data: False
|
48 |
+
cache_path: datasets/beat_cache/web_demo_test/
|
49 |
+
dataset: beat_sep_lower_single
|
50 |
+
new_cache: True
|
51 |
+
|
52 |
+
# motion config
|
53 |
+
ori_joints: beat_smplx_joints
|
54 |
+
tar_joints: beat_smplx_full
|
55 |
+
pose_rep: smplxflame_30
|
56 |
+
pose_fps: 30
|
57 |
+
rot6d: True
|
58 |
+
pre_frames: 4
|
59 |
+
pose_dims: 330
|
60 |
+
pose_length: 128
|
61 |
+
stride: 20
|
62 |
+
test_length: 128
|
63 |
+
motion_f: 256
|
64 |
+
m_pre_encoder: null
|
65 |
+
m_encoder: null
|
66 |
+
m_fix_pre: False
|
67 |
+
|
68 |
+
|
69 |
+
audio_rep: onset+amplitude
|
70 |
+
audio_sr: 16000
|
71 |
+
audio_fps: 16000
|
72 |
+
audio_norm: False
|
73 |
+
audio_f: 256
|
74 |
+
|
75 |
+
|
76 |
+
word_rep: textgrid
|
77 |
+
word_index_num: 11195
|
78 |
+
word_dims: 300
|
79 |
+
freeze_wordembed: False
|
80 |
+
word_f: 256
|
81 |
+
t_pre_encoder: fasttext
|
82 |
+
t_encoder: null
|
83 |
+
t_fix_pre: False
|
84 |
+
|
85 |
+
|
86 |
+
facial_rep: smplxflame_30
|
87 |
+
facial_dims: 100
|
88 |
+
facial_norm: False
|
89 |
+
facial_f: 0
|
90 |
+
f_pre_encoder: null
|
91 |
+
f_encoder: null
|
92 |
+
f_fix_pre: False
|
93 |
+
|
94 |
+
|
95 |
+
id_rep: onehot
|
96 |
+
speaker_f: 0
|
97 |
+
|
98 |
+
|
99 |
+
batch_size: 40
|
100 |
+
lr_base: 5e-5
|
101 |
+
model: denoiser
|
102 |
+
g_name: MDM
|
103 |
+
trainer: diffusion_rvqvae
|
104 |
+
hidden_size: 768
|
105 |
+
n_layer: 1
|
106 |
+
|
107 |
+
rec_weight: 1
|
108 |
+
grad_norm: 0.99
|
109 |
+
epochs: 2000
|
110 |
+
test_period: 20
|
111 |
+
ll: 3
|
112 |
+
lf: 3
|
113 |
+
lu: 3
|
114 |
+
lh: 3
|
115 |
+
cl: 1
|
116 |
+
cf: 0
|
117 |
+
cu: 1
|
118 |
+
ch: 1
|
dataloaders/amass_sep_lower.py
ADDED
@@ -0,0 +1,713 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import pickle
|
3 |
+
import math
|
4 |
+
import shutil
|
5 |
+
import numpy as np
|
6 |
+
import lmdb as lmdb
|
7 |
+
import textgrid as tg
|
8 |
+
import pandas as pd
|
9 |
+
import torch
|
10 |
+
import glob
|
11 |
+
import json
|
12 |
+
from termcolor import colored
|
13 |
+
from loguru import logger
|
14 |
+
from collections import defaultdict
|
15 |
+
from torch.utils.data import Dataset
|
16 |
+
import torch.distributed as dist
|
17 |
+
#import pyarrow
|
18 |
+
import pickle
|
19 |
+
import librosa
|
20 |
+
import smplx
|
21 |
+
import glob
|
22 |
+
|
23 |
+
from .build_vocab import Vocab
|
24 |
+
from .utils.audio_features import Wav2Vec2Model
|
25 |
+
from .data_tools import joints_list
|
26 |
+
from .utils import rotation_conversions as rc
|
27 |
+
from .utils import other_tools
|
28 |
+
|
29 |
+
# ACCAD 120
|
30 |
+
# BioMotionLab_NTroje 120
|
31 |
+
# CMU 很复杂
|
32 |
+
# EKUT 100
|
33 |
+
# Eyes_Japan_Dataset 很复杂
|
34 |
+
# HumanEva 很复杂
|
35 |
+
# KIT 100
|
36 |
+
# MPI_HDM05 120
|
37 |
+
# MPI_Limits 120
|
38 |
+
# MPI_mosh 很复杂
|
39 |
+
# SFU 120
|
40 |
+
# SSM_synced 很复杂
|
41 |
+
# TCD_handMocap 很复杂
|
42 |
+
# TotalCapture 60
|
43 |
+
# Transitions_mocap 120
|
44 |
+
|
45 |
+
all_sequences = [
|
46 |
+
'ACCAD',
|
47 |
+
'BioMotionLab_NTroje',
|
48 |
+
'CMU',
|
49 |
+
'EKUT',
|
50 |
+
'Eyes_Japan_Dataset',
|
51 |
+
'HumanEva',
|
52 |
+
'KIT',
|
53 |
+
'MPI_HDM05',
|
54 |
+
'MPI_Limits',
|
55 |
+
'MPI_mosh',
|
56 |
+
'SFU',
|
57 |
+
'SSM_synced',
|
58 |
+
'TCD_handMocap',
|
59 |
+
'TotalCapture',
|
60 |
+
'Transitions_mocap',
|
61 |
+
]
|
62 |
+
amass_test_split = ['Transitions_mocap', 'SSM_synced']
|
63 |
+
amass_vald_split = ['HumanEva', 'MPI_HDM05', 'SFU', 'MPI_mosh']
|
64 |
+
amass_train_split = ['BioMotionLab_NTroje', 'Eyes_Japan_Dataset', 'TotalCapture', 'KIT', 'ACCAD', 'CMU', 'MPI_Limits',
|
65 |
+
'TCD_handMocap', 'EKUT']
|
66 |
+
|
67 |
+
# 上面这些spilt方式是MOTION CLIP的,但是由于motionx中的framerate处理有问题,我先暂且只挑部分数据集进行训练
|
68 |
+
# 这些都是120fps的
|
69 |
+
# amass_test_split = ['SFU']
|
70 |
+
# amass_vald_split = ['MPI_Limits']
|
71 |
+
# amass_train_split = ['BioMotionLab_NTroje', 'MPI_HDM05', 'ACCAD','Transitions_mocap']
|
72 |
+
|
73 |
+
|
74 |
+
amass_splits = {
|
75 |
+
'test': amass_test_split,
|
76 |
+
'val': amass_vald_split,
|
77 |
+
'train': amass_train_split
|
78 |
+
}
|
79 |
+
class CustomDataset(Dataset):
|
80 |
+
def __init__(self, args, loader_type, augmentation=None, kwargs=None, build_cache=True):
|
81 |
+
self.args = args
|
82 |
+
self.loader_type = loader_type
|
83 |
+
|
84 |
+
self.rank = dist.get_rank()
|
85 |
+
self.ori_stride = self.args.stride
|
86 |
+
self.ori_length = self.args.pose_length
|
87 |
+
self.alignment = [0,0] # for trinity
|
88 |
+
|
89 |
+
self.ori_joint_list = joints_list[self.args.ori_joints]
|
90 |
+
self.tar_joint_list = joints_list[self.args.tar_joints]
|
91 |
+
if 'smplx' in self.args.pose_rep:
|
92 |
+
self.joint_mask = np.zeros(len(list(self.ori_joint_list.keys()))*3)
|
93 |
+
self.joints = len(list(self.tar_joint_list.keys()))
|
94 |
+
for joint_name in self.tar_joint_list:
|
95 |
+
self.joint_mask[self.ori_joint_list[joint_name][1] - self.ori_joint_list[joint_name][0]:self.ori_joint_list[joint_name][1]] = 1
|
96 |
+
else:
|
97 |
+
self.joints = len(list(self.ori_joint_list.keys()))+1
|
98 |
+
self.joint_mask = np.zeros(self.joints*3)
|
99 |
+
for joint_name in self.tar_joint_list:
|
100 |
+
if joint_name == "Hips":
|
101 |
+
self.joint_mask[3:6] = 1
|
102 |
+
else:
|
103 |
+
self.joint_mask[self.ori_joint_list[joint_name][1] - self.ori_joint_list[joint_name][0]:self.ori_joint_list[joint_name][1]] = 1
|
104 |
+
# select trainable joints
|
105 |
+
self.smplx = smplx.create(
|
106 |
+
self.args.data_path_1+"smplx_models/",
|
107 |
+
model_type='smplx',
|
108 |
+
gender='NEUTRAL_2020',
|
109 |
+
use_face_contour=False,
|
110 |
+
num_betas=300,
|
111 |
+
num_expression_coeffs=100,
|
112 |
+
ext='npz',
|
113 |
+
use_pca=False,
|
114 |
+
).cuda().eval()
|
115 |
+
|
116 |
+
split_rule = pd.read_csv(args.data_path+"train_test_split.csv")
|
117 |
+
self.selected_file = split_rule.loc[(split_rule['type'] == loader_type) & (split_rule['id'].str.split("_").str[0].astype(int).isin(self.args.training_speakers))]
|
118 |
+
if args.additional_data and loader_type == 'train':
|
119 |
+
split_b = split_rule.loc[(split_rule['type'] == 'additional') & (split_rule['id'].str.split("_").str[0].astype(int).isin(self.args.training_speakers))]
|
120 |
+
#self.selected_file = split_rule.loc[(split_rule['type'] == 'additional') & (split_rule['id'].str.split("_").str[0].astype(int).isin(self.args.training_speakers))]
|
121 |
+
self.selected_file = pd.concat([self.selected_file, split_b])
|
122 |
+
if self.selected_file.empty:
|
123 |
+
logger.warning(f"{loader_type} is empty for speaker {self.args.training_speakers}, use train set 0-8 instead")
|
124 |
+
self.selected_file = split_rule.loc[(split_rule['type'] == 'train') & (split_rule['id'].str.split("_").str[0].astype(int).isin(self.args.training_speakers))]
|
125 |
+
self.selected_file = self.selected_file.iloc[0:8]
|
126 |
+
self.data_dir = args.data_path
|
127 |
+
|
128 |
+
if loader_type == "test":
|
129 |
+
self.args.multi_length_training = [1.0]
|
130 |
+
self.max_length = int(args.pose_length * self.args.multi_length_training[-1])
|
131 |
+
self.max_audio_pre_len = math.floor(args.pose_length / args.pose_fps * self.args.audio_sr)
|
132 |
+
if self.max_audio_pre_len > self.args.test_length*self.args.audio_sr:
|
133 |
+
self.max_audio_pre_len = self.args.test_length*self.args.audio_sr
|
134 |
+
|
135 |
+
if args.word_rep is not None:
|
136 |
+
with open(f"{args.data_path}weights/vocab.pkl", 'rb') as f:
|
137 |
+
self.lang_model = pickle.load(f)
|
138 |
+
|
139 |
+
preloaded_dir = self.args.root_path + 'datasets/beat_cache/amass_smplx_en_emage_new/' + loader_type + f"/{args.pose_rep}_cache"
|
140 |
+
# if args.pose_norm:
|
141 |
+
# # careful for rotation vectors
|
142 |
+
# if not os.path.exists(args.data_path+args.mean_pose_path+f"{args.pose_rep.split('_')[0]}/bvh_mean.npy"):
|
143 |
+
# self.calculate_mean_pose()
|
144 |
+
# self.mean_pose = np.load(args.data_path+args.mean_pose_path+f"{args.pose_rep.split('_')[0]}/bvh_mean.npy")
|
145 |
+
# self.std_pose = np.load(args.data_path+args.mean_pose_path+f"{args.pose_rep.split('_')[0]}/bvh_std.npy")
|
146 |
+
# if args.audio_norm:
|
147 |
+
# if not os.path.exists(args.data_path+args.mean_pose_path+f"{args.audio_rep.split('_')[0]}/bvh_mean.npy"):
|
148 |
+
# self.calculate_mean_audio()
|
149 |
+
# self.mean_audio = np.load(args.data_path+args.mean_pose_path+f"{args.audio_rep.split('_')[0]}/npy_mean.npy")
|
150 |
+
# self.std_audio = np.load(args.data_path+args.mean_pose_path+f"{args.audio_rep.split('_')[0]}/npy_std.npy")
|
151 |
+
# if args.facial_norm:
|
152 |
+
# if not os.path.exists(args.data_path+args.mean_pose_path+f"{args.pose_rep.split('_')[0]}/bvh_mean.npy"):
|
153 |
+
# self.calculate_mean_face()
|
154 |
+
# self.mean_facial = np.load(args.data_path+args.mean_pose_path+f"{args.facial_rep}/json_mean.npy")
|
155 |
+
# self.std_facial = np.load(args.data_path+args.mean_pose_path+f"{args.facial_rep}/json_std.npy")
|
156 |
+
if self.args.beat_align:
|
157 |
+
if not os.path.exists(args.data_path+f"weights/mean_vel_{args.pose_rep}.npy"):
|
158 |
+
self.calculate_mean_velocity(args.data_path+f"weights/mean_vel_{args.pose_rep}.npy")
|
159 |
+
self.avg_vel = np.load(args.data_path+f"weights/mean_vel_{args.pose_rep}.npy")
|
160 |
+
|
161 |
+
if build_cache and self.rank == 0:
|
162 |
+
self.build_cache(preloaded_dir)
|
163 |
+
self.lmdb_env = lmdb.open(preloaded_dir, readonly=True, lock=False)
|
164 |
+
with self.lmdb_env.begin() as txn:
|
165 |
+
self.n_samples = txn.stat()["entries"]
|
166 |
+
|
167 |
+
|
168 |
+
def calculate_mean_velocity(self, save_path):
|
169 |
+
self.smplx = smplx.create(
|
170 |
+
self.args.data_path_1+"smplx_models/",
|
171 |
+
model_type='smplx',
|
172 |
+
gender='NEUTRAL_2020',
|
173 |
+
use_face_contour=False,
|
174 |
+
num_betas=300,
|
175 |
+
num_expression_coeffs=100,
|
176 |
+
ext='npz',
|
177 |
+
use_pca=False,
|
178 |
+
).cuda().eval()
|
179 |
+
dir_p = self.data_dir + self.args.pose_rep + "/"
|
180 |
+
all_list = []
|
181 |
+
from tqdm import tqdm
|
182 |
+
for tar in tqdm(os.listdir(dir_p)):
|
183 |
+
if tar.endswith(".npz"):
|
184 |
+
m_data = np.load(dir_p+tar, allow_pickle=True)
|
185 |
+
betas, poses, trans, exps = m_data["betas"], m_data["poses"], m_data["trans"], m_data["expressions"]
|
186 |
+
n, c = poses.shape[0], poses.shape[1]
|
187 |
+
betas = betas.reshape(1, 300)
|
188 |
+
betas = np.tile(betas, (n, 1))
|
189 |
+
betas = torch.from_numpy(betas).cuda().float()
|
190 |
+
poses = torch.from_numpy(poses.reshape(n, c)).cuda().float()
|
191 |
+
exps = torch.from_numpy(exps.reshape(n, 100)).cuda().float()
|
192 |
+
trans = torch.from_numpy(trans.reshape(n, 3)).cuda().float()
|
193 |
+
max_length = 128
|
194 |
+
s, r = n//max_length, n%max_length
|
195 |
+
#print(n, s, r)
|
196 |
+
all_tensor = []
|
197 |
+
for i in range(s):
|
198 |
+
with torch.no_grad():
|
199 |
+
joints = self.smplx(
|
200 |
+
betas=betas[i*max_length:(i+1)*max_length],
|
201 |
+
transl=trans[i*max_length:(i+1)*max_length],
|
202 |
+
expression=exps[i*max_length:(i+1)*max_length],
|
203 |
+
jaw_pose=poses[i*max_length:(i+1)*max_length, 66:69],
|
204 |
+
global_orient=poses[i*max_length:(i+1)*max_length,:3],
|
205 |
+
body_pose=poses[i*max_length:(i+1)*max_length,3:21*3+3],
|
206 |
+
left_hand_pose=poses[i*max_length:(i+1)*max_length,25*3:40*3],
|
207 |
+
right_hand_pose=poses[i*max_length:(i+1)*max_length,40*3:55*3],
|
208 |
+
return_verts=True,
|
209 |
+
return_joints=True,
|
210 |
+
leye_pose=poses[i*max_length:(i+1)*max_length, 69:72],
|
211 |
+
reye_pose=poses[i*max_length:(i+1)*max_length, 72:75],
|
212 |
+
)['joints'][:, :55, :].reshape(max_length, 55*3)
|
213 |
+
all_tensor.append(joints)
|
214 |
+
if r != 0:
|
215 |
+
with torch.no_grad():
|
216 |
+
joints = self.smplx(
|
217 |
+
betas=betas[s*max_length:s*max_length+r],
|
218 |
+
transl=trans[s*max_length:s*max_length+r],
|
219 |
+
expression=exps[s*max_length:s*max_length+r],
|
220 |
+
jaw_pose=poses[s*max_length:s*max_length+r, 66:69],
|
221 |
+
global_orient=poses[s*max_length:s*max_length+r,:3],
|
222 |
+
body_pose=poses[s*max_length:s*max_length+r,3:21*3+3],
|
223 |
+
left_hand_pose=poses[s*max_length:s*max_length+r,25*3:40*3],
|
224 |
+
right_hand_pose=poses[s*max_length:s*max_length+r,40*3:55*3],
|
225 |
+
return_verts=True,
|
226 |
+
return_joints=True,
|
227 |
+
leye_pose=poses[s*max_length:s*max_length+r, 69:72],
|
228 |
+
reye_pose=poses[s*max_length:s*max_length+r, 72:75],
|
229 |
+
)['joints'][:, :55, :].reshape(r, 55*3)
|
230 |
+
all_tensor.append(joints)
|
231 |
+
joints = torch.cat(all_tensor, axis=0)
|
232 |
+
joints = joints.permute(1, 0)
|
233 |
+
dt = 1/30
|
234 |
+
# first steps is forward diff (t+1 - t) / dt
|
235 |
+
init_vel = (joints[:, 1:2] - joints[:, :1]) / dt
|
236 |
+
# middle steps are second order (t+1 - t-1) / 2dt
|
237 |
+
middle_vel = (joints[:, 2:] - joints[:, 0:-2]) / (2 * dt)
|
238 |
+
# last step is backward diff (t - t-1) / dt
|
239 |
+
final_vel = (joints[:, -1:] - joints[:, -2:-1]) / dt
|
240 |
+
#print(joints.shape, init_vel.shape, middle_vel.shape, final_vel.shape)
|
241 |
+
vel_seq = torch.cat([init_vel, middle_vel, final_vel], dim=1).permute(1, 0).reshape(n, 55, 3)
|
242 |
+
#print(vel_seq.shape)
|
243 |
+
#.permute(1, 0).reshape(n, 55, 3)
|
244 |
+
vel_seq_np = vel_seq.cpu().numpy()
|
245 |
+
vel_joints_np = np.linalg.norm(vel_seq_np, axis=2) # n * 55
|
246 |
+
all_list.append(vel_joints_np)
|
247 |
+
avg_vel = np.mean(np.concatenate(all_list, axis=0),axis=0) # 55
|
248 |
+
np.save(save_path, avg_vel)
|
249 |
+
|
250 |
+
|
251 |
+
def build_cache(self, preloaded_dir):
|
252 |
+
logger.info(f"Audio bit rate: {self.args.audio_fps}")
|
253 |
+
logger.info("Reading data '{}'...".format(self.data_dir))
|
254 |
+
logger.info("Creating the dataset cache...")
|
255 |
+
if self.args.new_cache:
|
256 |
+
if os.path.exists(preloaded_dir):
|
257 |
+
shutil.rmtree(preloaded_dir)
|
258 |
+
if os.path.exists(preloaded_dir):
|
259 |
+
logger.info("Found the cache {}".format(preloaded_dir))
|
260 |
+
elif self.loader_type == "test":
|
261 |
+
self.cache_generation(
|
262 |
+
preloaded_dir, True,
|
263 |
+
0, 0,
|
264 |
+
is_test=True)
|
265 |
+
else:
|
266 |
+
self.cache_generation(
|
267 |
+
preloaded_dir, self.args.disable_filtering,
|
268 |
+
self.args.clean_first_seconds, self.args.clean_final_seconds,
|
269 |
+
is_test=False)
|
270 |
+
|
271 |
+
def __len__(self):
|
272 |
+
return self.n_samples
|
273 |
+
|
274 |
+
|
275 |
+
def load_amass(self,data):
|
276 |
+
## 这个是用来
|
277 |
+
# 修改amass数据里面的朝向,原本在blender里面是Z轴向上,目标是Y轴向上,当时面向目前没改
|
278 |
+
|
279 |
+
data_dict = {key: data[key] for key in data}
|
280 |
+
frames = data_dict['poses'].shape[0]
|
281 |
+
b = data_dict['poses'][...,:3]
|
282 |
+
b = rc.axis_angle_to_matrix(torch.from_numpy(b))
|
283 |
+
rot_matrix = np.array([[1.0, 0.0, 0.0], [0.0 , 0.0, 1.0], [0.0, -1.0, 0.0]])
|
284 |
+
c = np.einsum('ij,kjl->kil',rot_matrix,b)
|
285 |
+
c = rc.matrix_to_axis_angle(torch.from_numpy(c))
|
286 |
+
data_dict['poses'][...,:3] = c
|
287 |
+
|
288 |
+
trans_matrix1 = np.array([[1.0, 0.0, 0.0], [0.0 , 0.0, -1.0], [0.0, 1.0, 0.0]])
|
289 |
+
data_dict['trans'] = np.einsum("bi,ij->bj",data_dict['trans'],trans_matrix1)
|
290 |
+
|
291 |
+
betas300 = np.zeros(300)
|
292 |
+
betas300[:16] = data_dict['betas']
|
293 |
+
data_dict['betas'] = betas300
|
294 |
+
data_dict["expressions"] = np.zeros((frames,100))
|
295 |
+
|
296 |
+
return data_dict
|
297 |
+
|
298 |
+
def cache_generation(self, out_lmdb_dir, disable_filtering, clean_first_seconds, clean_final_seconds, is_test=False):
|
299 |
+
# if "wav2vec2" in self.args.audio_rep:
|
300 |
+
# self.wav2vec_model = Wav2Vec2Model.from_pretrained(f"{self.args.data_path_1}/hub/transformer/wav2vec2-base-960h")
|
301 |
+
# self.wav2vec_model.feature_extractor._freeze_parameters()
|
302 |
+
# self.wav2vec_model = self.wav2vec_model.cuda()
|
303 |
+
# self.wav2vec_model.eval()
|
304 |
+
|
305 |
+
self.n_out_samples = 0
|
306 |
+
# create db for samples
|
307 |
+
if not os.path.exists(out_lmdb_dir): os.makedirs(out_lmdb_dir)
|
308 |
+
dst_lmdb_env = lmdb.open(out_lmdb_dir, map_size= int(1024 ** 3 * 500))# 500G
|
309 |
+
n_filtered_out = defaultdict(int)
|
310 |
+
|
311 |
+
|
312 |
+
if self.args.use_amass:
|
313 |
+
amass_dir = '/mnt/fu09a/chenbohong/PantoMatrix/scripts/EMAGE_2024/datasets/AMASS_SMPLX'
|
314 |
+
for dataset in amass_splits[self.loader_type]:
|
315 |
+
search_path = os.path.join(amass_dir,dataset, '**', '*.npz')
|
316 |
+
npz_files = glob.glob(search_path, recursive=True)
|
317 |
+
for index, file_name in enumerate(npz_files):
|
318 |
+
f_name = file_name.split('/')[-1]
|
319 |
+
ext = ".npz" if "smplx" in self.args.pose_rep else ".bvh"
|
320 |
+
pose_file = file_name
|
321 |
+
pose_each_file = []
|
322 |
+
trans_each_file = []
|
323 |
+
trans_v_each_file = []
|
324 |
+
shape_each_file = []
|
325 |
+
audio_each_file = []
|
326 |
+
facial_each_file = []
|
327 |
+
word_each_file = []
|
328 |
+
emo_each_file = []
|
329 |
+
sem_each_file = []
|
330 |
+
vid_each_file = []
|
331 |
+
id_pose = f_name #1_wayne_0_1_1
|
332 |
+
get_foot_contact = True
|
333 |
+
logger.info(colored(f"# ---- Building cache for Pose {id_pose} ---- #", "blue"))
|
334 |
+
if "smplx" in self.args.pose_rep:
|
335 |
+
pose_data = np.load(pose_file, allow_pickle=True)
|
336 |
+
if len(pose_data.files)==6:
|
337 |
+
logger.info(colored(f"# ---- state file ---- #", "red"))
|
338 |
+
continue
|
339 |
+
assert 30%self.args.pose_fps == 0, 'pose_fps should be an aliquot part of 30'
|
340 |
+
assert self.args.pose_fps == 30, "should 30"
|
341 |
+
m_data = np.load(pose_file, allow_pickle=True)
|
342 |
+
m_data= self.load_amass(m_data)
|
343 |
+
betas, poses, trans, exps = m_data["betas"], m_data["poses"], m_data["trans"], m_data["expressions"]
|
344 |
+
mocap_framerate = float(m_data['mocap_frame_rate'])
|
345 |
+
stride = round(mocap_framerate / self.args.pose_fps)
|
346 |
+
pose_each_file = poses[::stride]
|
347 |
+
trans_each_file = trans[::stride]
|
348 |
+
trans_each_file[:,0] = trans_each_file[:,0] - trans_each_file[0,0]
|
349 |
+
trans_each_file[:,2] = trans_each_file[:,2] - trans_each_file[0,2]
|
350 |
+
trans_v_each_file = np.zeros_like(trans_each_file)
|
351 |
+
trans_v_each_file[1:,0] = trans_each_file[1:,0] - trans_each_file[:-1,0]
|
352 |
+
trans_v_each_file[0,0] = trans_v_each_file[1,0]
|
353 |
+
trans_v_each_file[1:,2] = trans_each_file[1:,2] - trans_each_file[:-1,2]
|
354 |
+
trans_v_each_file[0,2] = trans_v_each_file[1,2]
|
355 |
+
trans_v_each_file[:,1] = trans_each_file[:,1]
|
356 |
+
|
357 |
+
|
358 |
+
shape_each_file = np.repeat(betas.reshape(1, -1), pose_each_file.shape[0], axis=0)
|
359 |
+
|
360 |
+
n, c = poses.shape[0], poses.shape[1]
|
361 |
+
betas = betas.reshape(1, 300)
|
362 |
+
betas = np.tile(betas, (n, 1))
|
363 |
+
betas = torch.from_numpy(betas).cuda().float()
|
364 |
+
poses = torch.from_numpy(poses.reshape(n, c)).cuda().float()
|
365 |
+
exps = torch.from_numpy(exps.reshape(n, 100)).cuda().float()
|
366 |
+
trans = torch.from_numpy(trans.reshape(n, 3)).cuda().float()
|
367 |
+
|
368 |
+
if get_foot_contact:
|
369 |
+
max_length = 128
|
370 |
+
s, r = n//max_length, n%max_length
|
371 |
+
#print(n, s, r)
|
372 |
+
all_tensor = []
|
373 |
+
for i in range(s):
|
374 |
+
with torch.no_grad():
|
375 |
+
joints = self.smplx(
|
376 |
+
betas=betas[i*max_length:(i+1)*max_length],
|
377 |
+
transl=trans[i*max_length:(i+1)*max_length],
|
378 |
+
expression=exps[i*max_length:(i+1)*max_length],
|
379 |
+
jaw_pose=poses[i*max_length:(i+1)*max_length, 66:69],
|
380 |
+
global_orient=poses[i*max_length:(i+1)*max_length,:3],
|
381 |
+
body_pose=poses[i*max_length:(i+1)*max_length,3:21*3+3],
|
382 |
+
left_hand_pose=poses[i*max_length:(i+1)*max_length,25*3:40*3],
|
383 |
+
right_hand_pose=poses[i*max_length:(i+1)*max_length,40*3:55*3],
|
384 |
+
return_verts=True,
|
385 |
+
return_joints=True,
|
386 |
+
leye_pose=poses[i*max_length:(i+1)*max_length, 69:72],
|
387 |
+
reye_pose=poses[i*max_length:(i+1)*max_length, 72:75],
|
388 |
+
)['joints'][:, (7,8,10,11), :].reshape(max_length, 4, 3).cpu()
|
389 |
+
all_tensor.append(joints)
|
390 |
+
if r != 0:
|
391 |
+
with torch.no_grad():
|
392 |
+
joints = self.smplx(
|
393 |
+
betas=betas[s*max_length:s*max_length+r],
|
394 |
+
transl=trans[s*max_length:s*max_length+r],
|
395 |
+
expression=exps[s*max_length:s*max_length+r],
|
396 |
+
jaw_pose=poses[s*max_length:s*max_length+r, 66:69],
|
397 |
+
global_orient=poses[s*max_length:s*max_length+r,:3],
|
398 |
+
body_pose=poses[s*max_length:s*max_length+r,3:21*3+3],
|
399 |
+
left_hand_pose=poses[s*max_length:s*max_length+r,25*3:40*3],
|
400 |
+
right_hand_pose=poses[s*max_length:s*max_length+r,40*3:55*3],
|
401 |
+
return_verts=True,
|
402 |
+
return_joints=True,
|
403 |
+
leye_pose=poses[s*max_length:s*max_length+r, 69:72],
|
404 |
+
reye_pose=poses[s*max_length:s*max_length+r, 72:75],
|
405 |
+
)['joints'][:, (7,8,10,11), :].reshape(r, 4, 3).cpu()
|
406 |
+
all_tensor.append(joints)
|
407 |
+
joints = torch.cat(all_tensor, axis=0) # all, 4, 3
|
408 |
+
# print(joints.shape)
|
409 |
+
feetv = torch.zeros(joints.shape[1], joints.shape[0])
|
410 |
+
joints = joints.permute(1, 0, 2)
|
411 |
+
#print(joints.shape, feetv.shape)
|
412 |
+
feetv[:, :-1] = (joints[:, 1:] - joints[:, :-1]).norm(dim=-1)
|
413 |
+
#print(feetv.shape)
|
414 |
+
contacts = (feetv < 0.01).numpy().astype(float)
|
415 |
+
# print(contacts.shape, contacts)
|
416 |
+
contacts = contacts.transpose(1, 0)[::stride]
|
417 |
+
pose_each_file = pose_each_file * self.joint_mask
|
418 |
+
pose_each_file = pose_each_file[:, self.joint_mask.astype(bool)]
|
419 |
+
pose_each_file = np.concatenate([pose_each_file, contacts], axis=1)
|
420 |
+
# print(pose_each_file.shape)
|
421 |
+
else:
|
422 |
+
pose_each_file = pose_each_file * self.joint_mask
|
423 |
+
pose_each_file = pose_each_file[:, self.joint_mask.astype(bool)]
|
424 |
+
|
425 |
+
# print(pose_each_file.shape)
|
426 |
+
|
427 |
+
|
428 |
+
if self.args.id_rep is not None:
|
429 |
+
vid_each_file = np.repeat(np.array(int(100)-1).reshape(1, 1), pose_each_file.shape[0], axis=0)
|
430 |
+
|
431 |
+
filtered_result = self._sample_from_clip(
|
432 |
+
dst_lmdb_env,
|
433 |
+
audio_each_file, pose_each_file, trans_each_file, trans_v_each_file,shape_each_file, facial_each_file, word_each_file,
|
434 |
+
vid_each_file, emo_each_file, sem_each_file,
|
435 |
+
disable_filtering, clean_first_seconds, clean_final_seconds, is_test,
|
436 |
+
)
|
437 |
+
for type in filtered_result.keys():
|
438 |
+
n_filtered_out[type] += filtered_result[type]
|
439 |
+
|
440 |
+
|
441 |
+
|
442 |
+
with dst_lmdb_env.begin() as txn:
|
443 |
+
logger.info(colored(f"no. of samples: {txn.stat()['entries']}", "cyan"))
|
444 |
+
n_total_filtered = 0
|
445 |
+
for type, n_filtered in n_filtered_out.items():
|
446 |
+
logger.info("{}: {}".format(type, n_filtered))
|
447 |
+
n_total_filtered += n_filtered
|
448 |
+
logger.info(colored("no. of excluded samples: {} ({:.1f}%)".format(
|
449 |
+
n_total_filtered, 100 * n_total_filtered / (txn.stat()["entries"] + n_total_filtered)), "cyan"))
|
450 |
+
dst_lmdb_env.sync()
|
451 |
+
dst_lmdb_env.close()
|
452 |
+
|
453 |
+
def _sample_from_clip(
|
454 |
+
self, dst_lmdb_env, audio_each_file, pose_each_file, trans_each_file, trans_v_each_file,shape_each_file, facial_each_file, word_each_file,
|
455 |
+
vid_each_file, emo_each_file, sem_each_file,
|
456 |
+
disable_filtering, clean_first_seconds, clean_final_seconds, is_test,
|
457 |
+
):
|
458 |
+
"""
|
459 |
+
for data cleaning, we ignore the data for first and final n s
|
460 |
+
for test, we return all data
|
461 |
+
"""
|
462 |
+
# audio_start = int(self.alignment[0] * self.args.audio_fps)
|
463 |
+
# pose_start = int(self.alignment[1] * self.args.pose_fps)
|
464 |
+
#logger.info(f"before: {audio_each_file.shape} {pose_each_file.shape}")
|
465 |
+
# audio_each_file = audio_each_file[audio_start:]
|
466 |
+
# pose_each_file = pose_each_file[pose_start:]
|
467 |
+
# trans_each_file =
|
468 |
+
#logger.info(f"after alignment: {audio_each_file.shape} {pose_each_file.shape}")
|
469 |
+
#print(pose_each_file.shape)
|
470 |
+
round_seconds_skeleton = pose_each_file.shape[0] // self.args.pose_fps # assume 1500 frames / 15 fps = 100 s
|
471 |
+
#print(round_seconds_skeleton)
|
472 |
+
if audio_each_file != []:
|
473 |
+
if self.args.audio_rep != "wave16k":
|
474 |
+
round_seconds_audio = len(audio_each_file) // self.args.audio_fps # assume 16,000,00 / 16,000 = 100 s
|
475 |
+
elif self.args.audio_rep == "mfcc":
|
476 |
+
round_seconds_audio = audio_each_file.shape[0] // self.args.audio_fps
|
477 |
+
else:
|
478 |
+
round_seconds_audio = audio_each_file.shape[0] // self.args.audio_sr
|
479 |
+
if facial_each_file != []:
|
480 |
+
round_seconds_facial = facial_each_file.shape[0] // self.args.pose_fps
|
481 |
+
logger.info(f"audio: {round_seconds_audio}s, pose: {round_seconds_skeleton}s, facial: {round_seconds_facial}s")
|
482 |
+
round_seconds_skeleton = min(round_seconds_audio, round_seconds_skeleton, round_seconds_facial)
|
483 |
+
max_round = max(round_seconds_audio, round_seconds_skeleton, round_seconds_facial)
|
484 |
+
if round_seconds_skeleton != max_round:
|
485 |
+
logger.warning(f"reduce to {round_seconds_skeleton}s, ignore {max_round-round_seconds_skeleton}s")
|
486 |
+
else:
|
487 |
+
logger.info(f"pose: {round_seconds_skeleton}s, audio: {round_seconds_audio}s")
|
488 |
+
round_seconds_skeleton = min(round_seconds_audio, round_seconds_skeleton)
|
489 |
+
max_round = max(round_seconds_audio, round_seconds_skeleton)
|
490 |
+
if round_seconds_skeleton != max_round:
|
491 |
+
logger.warning(f"reduce to {round_seconds_skeleton}s, ignore {max_round-round_seconds_skeleton}s")
|
492 |
+
|
493 |
+
clip_s_t, clip_e_t = clean_first_seconds, round_seconds_skeleton - clean_final_seconds # assume [10, 90]s
|
494 |
+
clip_s_f_audio, clip_e_f_audio = self.args.audio_fps * clip_s_t, clip_e_t * self.args.audio_fps # [160,000,90*160,000]
|
495 |
+
clip_s_f_pose, clip_e_f_pose = clip_s_t * self.args.pose_fps, clip_e_t * self.args.pose_fps # [150,90*15]
|
496 |
+
|
497 |
+
|
498 |
+
for ratio in self.args.multi_length_training:
|
499 |
+
if is_test:# stride = length for test
|
500 |
+
cut_length = clip_e_f_pose - clip_s_f_pose
|
501 |
+
self.args.stride = cut_length
|
502 |
+
self.max_length = cut_length
|
503 |
+
else:
|
504 |
+
self.args.stride = int(ratio*self.ori_stride)
|
505 |
+
cut_length = int(self.ori_length*ratio)
|
506 |
+
|
507 |
+
num_subdivision = math.floor((clip_e_f_pose - clip_s_f_pose - cut_length) / self.args.stride) + 1
|
508 |
+
logger.info(f"pose from frame {clip_s_f_pose} to {clip_e_f_pose}, length {cut_length}")
|
509 |
+
logger.info(f"{num_subdivision} clips is expected with stride {self.args.stride}")
|
510 |
+
|
511 |
+
if audio_each_file != []:
|
512 |
+
audio_short_length = math.floor(cut_length / self.args.pose_fps * self.args.audio_fps)
|
513 |
+
"""
|
514 |
+
for audio sr = 16000, fps = 15, pose_length = 34,
|
515 |
+
audio short length = 36266.7 -> 36266
|
516 |
+
this error is fine.
|
517 |
+
"""
|
518 |
+
logger.info(f"audio from frame {clip_s_f_audio} to {clip_e_f_audio}, length {audio_short_length}")
|
519 |
+
|
520 |
+
n_filtered_out = defaultdict(int)
|
521 |
+
sample_pose_list = []
|
522 |
+
sample_audio_list = []
|
523 |
+
sample_facial_list = []
|
524 |
+
sample_shape_list = []
|
525 |
+
sample_word_list = []
|
526 |
+
sample_emo_list = []
|
527 |
+
sample_sem_list = []
|
528 |
+
sample_vid_list = []
|
529 |
+
sample_trans_list = []
|
530 |
+
sample_trans_v_list = []
|
531 |
+
|
532 |
+
for i in range(num_subdivision): # cut into around 2s chip, (self npose)
|
533 |
+
start_idx = clip_s_f_pose + i * self.args.stride
|
534 |
+
fin_idx = start_idx + cut_length
|
535 |
+
sample_pose = pose_each_file[start_idx:fin_idx]
|
536 |
+
|
537 |
+
sample_trans = trans_each_file[start_idx:fin_idx]
|
538 |
+
sample_trans_v = trans_v_each_file[start_idx:fin_idx]
|
539 |
+
sample_shape = shape_each_file[start_idx:fin_idx]
|
540 |
+
# print(sample_pose.shape)
|
541 |
+
if self.args.audio_rep is not None and audio_each_file != []:
|
542 |
+
audio_start = clip_s_f_audio + math.floor(i * self.args.stride * self.args.audio_fps / self.args.pose_fps)
|
543 |
+
audio_end = audio_start + audio_short_length
|
544 |
+
sample_audio = audio_each_file[audio_start:audio_end]
|
545 |
+
else:
|
546 |
+
sample_audio = np.array([-1])
|
547 |
+
sample_facial = facial_each_file[start_idx:fin_idx] if self.args.facial_rep is not None else np.array([-1])
|
548 |
+
sample_word = word_each_file[start_idx:fin_idx] if self.args.word_rep is not None else np.array([-1])
|
549 |
+
sample_emo = emo_each_file[start_idx:fin_idx] if self.args.emo_rep is not None else np.array([-1])
|
550 |
+
sample_sem = sem_each_file[start_idx:fin_idx] if self.args.sem_rep is not None else np.array([-1])
|
551 |
+
sample_vid = vid_each_file[start_idx:fin_idx] if self.args.id_rep is not None else np.array([-1])
|
552 |
+
|
553 |
+
if sample_pose.any() != None:
|
554 |
+
# filtering motion skeleton data
|
555 |
+
sample_pose, filtering_message = MotionPreprocessor(sample_pose).get()
|
556 |
+
is_correct_motion = (sample_pose != [])
|
557 |
+
if is_correct_motion or disable_filtering:
|
558 |
+
sample_pose_list.append(sample_pose)
|
559 |
+
sample_audio_list.append(sample_audio)
|
560 |
+
sample_facial_list.append(sample_facial)
|
561 |
+
sample_shape_list.append(sample_shape)
|
562 |
+
sample_word_list.append(sample_word)
|
563 |
+
sample_vid_list.append(sample_vid)
|
564 |
+
sample_emo_list.append(sample_emo)
|
565 |
+
sample_sem_list.append(sample_sem)
|
566 |
+
sample_trans_list.append(sample_trans)
|
567 |
+
sample_trans_v_list.append(sample_trans_v)
|
568 |
+
else:
|
569 |
+
n_filtered_out[filtering_message] += 1
|
570 |
+
|
571 |
+
if len(sample_pose_list) > 0:
|
572 |
+
with dst_lmdb_env.begin(write=True) as txn:
|
573 |
+
for pose, audio, facial, shape, word, vid, emo, sem, trans,trans_v in zip(
|
574 |
+
sample_pose_list,
|
575 |
+
sample_audio_list,
|
576 |
+
sample_facial_list,
|
577 |
+
sample_shape_list,
|
578 |
+
sample_word_list,
|
579 |
+
sample_vid_list,
|
580 |
+
sample_emo_list,
|
581 |
+
sample_sem_list,
|
582 |
+
sample_trans_list,
|
583 |
+
sample_trans_v_list,):
|
584 |
+
k = "{:005}".format(self.n_out_samples).encode("ascii")
|
585 |
+
v = [pose, audio, facial, shape, word, emo, sem, vid, trans,trans_v]
|
586 |
+
v = pickle.dumps(v,5)
|
587 |
+
txn.put(k, v)
|
588 |
+
self.n_out_samples += 1
|
589 |
+
return n_filtered_out
|
590 |
+
|
591 |
+
def __getitem__(self, idx):
|
592 |
+
with self.lmdb_env.begin(write=False) as txn:
|
593 |
+
key = "{:005}".format(idx).encode("ascii")
|
594 |
+
sample = txn.get(key)
|
595 |
+
sample = pickle.loads(sample)
|
596 |
+
tar_pose, in_audio, in_facial, in_shape, in_word, emo, sem, vid, trans,trans_v = sample
|
597 |
+
#print(in_shape)
|
598 |
+
#vid = torch.from_numpy(vid).int()
|
599 |
+
emo = torch.from_numpy(emo).int()
|
600 |
+
sem = torch.from_numpy(sem).float()
|
601 |
+
in_audio = np.zeros([68266,2])
|
602 |
+
in_audio = torch.from_numpy(in_audio).float()
|
603 |
+
in_word = np.zeros([128])
|
604 |
+
in_facial = np.zeros([128,100])
|
605 |
+
in_word = torch.from_numpy(in_word).float() if self.args.word_cache else torch.from_numpy(in_word).int()
|
606 |
+
if self.loader_type == "test":
|
607 |
+
tar_pose = torch.from_numpy(tar_pose).float()
|
608 |
+
trans = torch.from_numpy(trans).float()
|
609 |
+
trans_v = torch.from_numpy(trans_v).float()
|
610 |
+
in_facial = torch.from_numpy(in_facial).float()
|
611 |
+
vid = torch.from_numpy(vid).float()
|
612 |
+
in_shape = torch.from_numpy(in_shape).float()
|
613 |
+
else:
|
614 |
+
in_shape = torch.from_numpy(in_shape).reshape((in_shape.shape[0], -1)).float()
|
615 |
+
trans = torch.from_numpy(trans).reshape((trans.shape[0], -1)).float()
|
616 |
+
trans_v = torch.from_numpy(trans_v).reshape((trans_v.shape[0], -1)).float()
|
617 |
+
vid = torch.from_numpy(vid).reshape((vid.shape[0], -1)).float()
|
618 |
+
tar_pose = torch.from_numpy(tar_pose).reshape((tar_pose.shape[0], -1)).float()
|
619 |
+
in_facial = torch.from_numpy(in_facial).reshape((in_facial.shape[0], -1)).float()
|
620 |
+
return {"pose":tar_pose, "audio":in_audio, "facial":in_facial, "beta": in_shape, "word":in_word, "id":vid, "emo":emo, "sem":sem, "trans":trans,"trans_v":trans_v}
|
621 |
+
|
622 |
+
|
623 |
+
class MotionPreprocessor:
|
624 |
+
def __init__(self, skeletons):
|
625 |
+
self.skeletons = skeletons
|
626 |
+
#self.mean_pose = mean_pose
|
627 |
+
self.filtering_message = "PASS"
|
628 |
+
|
629 |
+
def get(self):
|
630 |
+
assert (self.skeletons is not None)
|
631 |
+
|
632 |
+
# filtering
|
633 |
+
if self.skeletons != []:
|
634 |
+
if self.check_pose_diff():
|
635 |
+
self.skeletons = []
|
636 |
+
self.filtering_message = "pose"
|
637 |
+
# elif self.check_spine_angle():
|
638 |
+
# self.skeletons = []
|
639 |
+
# self.filtering_message = "spine angle"
|
640 |
+
# elif self.check_static_motion():
|
641 |
+
# self.skeletons = []
|
642 |
+
# self.filtering_message = "motion"
|
643 |
+
|
644 |
+
# if self.skeletons != []:
|
645 |
+
# self.skeletons = self.skeletons.tolist()
|
646 |
+
# for i, frame in enumerate(self.skeletons):
|
647 |
+
# assert not np.isnan(self.skeletons[i]).any() # missing joints
|
648 |
+
|
649 |
+
return self.skeletons, self.filtering_message
|
650 |
+
|
651 |
+
def check_static_motion(self, verbose=True):
|
652 |
+
def get_variance(skeleton, joint_idx):
|
653 |
+
wrist_pos = skeleton[:, joint_idx]
|
654 |
+
variance = np.sum(np.var(wrist_pos, axis=0))
|
655 |
+
return variance
|
656 |
+
|
657 |
+
left_arm_var = get_variance(self.skeletons, 6)
|
658 |
+
right_arm_var = get_variance(self.skeletons, 9)
|
659 |
+
|
660 |
+
th = 0.0014 # exclude 13110
|
661 |
+
# th = 0.002 # exclude 16905
|
662 |
+
if left_arm_var < th and right_arm_var < th:
|
663 |
+
if verbose:
|
664 |
+
print("skip - check_static_motion left var {}, right var {}".format(left_arm_var, right_arm_var))
|
665 |
+
return True
|
666 |
+
else:
|
667 |
+
if verbose:
|
668 |
+
print("pass - check_static_motion left var {}, right var {}".format(left_arm_var, right_arm_var))
|
669 |
+
return False
|
670 |
+
|
671 |
+
|
672 |
+
def check_pose_diff(self, verbose=False):
|
673 |
+
# diff = np.abs(self.skeletons - self.mean_pose) # 186*1
|
674 |
+
# diff = np.mean(diff)
|
675 |
+
|
676 |
+
# # th = 0.017
|
677 |
+
# th = 0.02 #0.02 # exclude 3594
|
678 |
+
# if diff < th:
|
679 |
+
# if verbose:
|
680 |
+
# print("skip - check_pose_diff {:.5f}".format(diff))
|
681 |
+
# return True
|
682 |
+
# # th = 3.5 #0.02 # exclude 3594
|
683 |
+
# # if 3.5 < diff < 5:
|
684 |
+
# # if verbose:
|
685 |
+
# # print("skip - check_pose_diff {:.5f}".format(diff))
|
686 |
+
# # return True
|
687 |
+
# else:
|
688 |
+
# if verbose:
|
689 |
+
# print("pass - check_pose_diff {:.5f}".format(diff))
|
690 |
+
return False
|
691 |
+
|
692 |
+
|
693 |
+
def check_spine_angle(self, verbose=True):
|
694 |
+
def angle_between(v1, v2):
|
695 |
+
v1_u = v1 / np.linalg.norm(v1)
|
696 |
+
v2_u = v2 / np.linalg.norm(v2)
|
697 |
+
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
|
698 |
+
|
699 |
+
angles = []
|
700 |
+
for i in range(self.skeletons.shape[0]):
|
701 |
+
spine_vec = self.skeletons[i, 1] - self.skeletons[i, 0]
|
702 |
+
angle = angle_between(spine_vec, [0, -1, 0])
|
703 |
+
angles.append(angle)
|
704 |
+
|
705 |
+
if np.rad2deg(max(angles)) > 30 or np.rad2deg(np.mean(angles)) > 20: # exclude 4495
|
706 |
+
# if np.rad2deg(max(angles)) > 20: # exclude 8270
|
707 |
+
if verbose:
|
708 |
+
print("skip - check_spine_angle {:.5f}, {:.5f}".format(max(angles), np.mean(angles)))
|
709 |
+
return True
|
710 |
+
else:
|
711 |
+
if verbose:
|
712 |
+
print("pass - check_spine_angle {:.5f}".format(max(angles)))
|
713 |
+
return False
|
dataloaders/beat_sep.py
ADDED
@@ -0,0 +1,772 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import pickle
|
3 |
+
import math
|
4 |
+
import shutil
|
5 |
+
import numpy as np
|
6 |
+
import lmdb as lmdb
|
7 |
+
import textgrid as tg
|
8 |
+
import pandas as pd
|
9 |
+
import torch
|
10 |
+
import glob
|
11 |
+
import json
|
12 |
+
from termcolor import colored
|
13 |
+
from loguru import logger
|
14 |
+
from collections import defaultdict
|
15 |
+
from torch.utils.data import Dataset
|
16 |
+
import torch.distributed as dist
|
17 |
+
#import pyarrow
|
18 |
+
import pickle
|
19 |
+
import librosa
|
20 |
+
import smplx
|
21 |
+
|
22 |
+
from .build_vocab import Vocab
|
23 |
+
from .utils.audio_features import Wav2Vec2Model
|
24 |
+
from .data_tools import joints_list
|
25 |
+
from .utils import rotation_conversions as rc
|
26 |
+
from .utils import other_tools
|
27 |
+
|
28 |
+
class CustomDataset(Dataset):
|
29 |
+
def __init__(self, args, loader_type, augmentation=None, kwargs=None, build_cache=True):
|
30 |
+
self.args = args
|
31 |
+
self.loader_type = loader_type
|
32 |
+
|
33 |
+
self.rank = dist.get_rank()
|
34 |
+
self.ori_stride = self.args.stride
|
35 |
+
self.ori_length = self.args.pose_length
|
36 |
+
self.alignment = [0,0] # for trinity
|
37 |
+
|
38 |
+
self.ori_joint_list = joints_list[self.args.ori_joints]
|
39 |
+
self.tar_joint_list = joints_list[self.args.tar_joints]
|
40 |
+
if 'smplx' in self.args.pose_rep:
|
41 |
+
self.joint_mask = np.zeros(len(list(self.ori_joint_list.keys()))*3)
|
42 |
+
self.joints = len(list(self.tar_joint_list.keys()))
|
43 |
+
for joint_name in self.tar_joint_list:
|
44 |
+
self.joint_mask[self.ori_joint_list[joint_name][1] - self.ori_joint_list[joint_name][0]:self.ori_joint_list[joint_name][1]] = 1
|
45 |
+
else:
|
46 |
+
self.joints = len(list(self.ori_joint_list.keys()))+1
|
47 |
+
self.joint_mask = np.zeros(self.joints*3)
|
48 |
+
for joint_name in self.tar_joint_list:
|
49 |
+
if joint_name == "Hips":
|
50 |
+
self.joint_mask[3:6] = 1
|
51 |
+
else:
|
52 |
+
self.joint_mask[self.ori_joint_list[joint_name][1] - self.ori_joint_list[joint_name][0]:self.ori_joint_list[joint_name][1]] = 1
|
53 |
+
# select trainable joints
|
54 |
+
|
55 |
+
split_rule = pd.read_csv(args.data_path+"train_test_split.csv")
|
56 |
+
self.selected_file = split_rule.loc[(split_rule['type'] == loader_type) & (split_rule['id'].str.split("_").str[0].astype(int).isin(self.args.training_speakers))]
|
57 |
+
if args.additional_data and loader_type == 'train':
|
58 |
+
split_b = split_rule.loc[(split_rule['type'] == 'additional') & (split_rule['id'].str.split("_").str[0].astype(int).isin(self.args.training_speakers))]
|
59 |
+
#self.selected_file = split_rule.loc[(split_rule['type'] == 'additional') & (split_rule['id'].str.split("_").str[0].astype(int).isin(self.args.training_speakers))]
|
60 |
+
self.selected_file = pd.concat([self.selected_file, split_b])
|
61 |
+
if self.selected_file.empty:
|
62 |
+
logger.warning(f"{loader_type} is empty for speaker {self.args.training_speakers}, use train set 0-8 instead")
|
63 |
+
self.selected_file = split_rule.loc[(split_rule['type'] == 'train') & (split_rule['id'].str.split("_").str[0].astype(int).isin(self.args.training_speakers))]
|
64 |
+
self.selected_file = self.selected_file.iloc[0:8]
|
65 |
+
self.data_dir = args.data_path
|
66 |
+
|
67 |
+
if loader_type == "test":
|
68 |
+
self.args.multi_length_training = [1.0]
|
69 |
+
self.max_length = int(args.pose_length * self.args.multi_length_training[-1])
|
70 |
+
self.max_audio_pre_len = math.floor(args.pose_length / args.pose_fps * self.args.audio_sr)
|
71 |
+
if self.max_audio_pre_len > self.args.test_length*self.args.audio_sr:
|
72 |
+
self.max_audio_pre_len = self.args.test_length*self.args.audio_sr
|
73 |
+
|
74 |
+
if args.word_rep is not None:
|
75 |
+
with open(f"{args.data_path}weights/vocab.pkl", 'rb') as f:
|
76 |
+
self.lang_model = pickle.load(f)
|
77 |
+
|
78 |
+
preloaded_dir = self.args.root_path + self.args.cache_path + loader_type + f"/{args.pose_rep}_cache"
|
79 |
+
# if args.pose_norm:
|
80 |
+
# # careful for rotation vectors
|
81 |
+
# if not os.path.exists(args.data_path+args.mean_pose_path+f"{args.pose_rep.split('_')[0]}/bvh_mean.npy"):
|
82 |
+
# self.calculate_mean_pose()
|
83 |
+
# self.mean_pose = np.load(args.data_path+args.mean_pose_path+f"{args.pose_rep.split('_')[0]}/bvh_mean.npy")
|
84 |
+
# self.std_pose = np.load(args.data_path+args.mean_pose_path+f"{args.pose_rep.split('_')[0]}/bvh_std.npy")
|
85 |
+
# if args.audio_norm:
|
86 |
+
# if not os.path.exists(args.data_path+args.mean_pose_path+f"{args.audio_rep.split('_')[0]}/bvh_mean.npy"):
|
87 |
+
# self.calculate_mean_audio()
|
88 |
+
# self.mean_audio = np.load(args.data_path+args.mean_pose_path+f"{args.audio_rep.split('_')[0]}/npy_mean.npy")
|
89 |
+
# self.std_audio = np.load(args.data_path+args.mean_pose_path+f"{args.audio_rep.split('_')[0]}/npy_std.npy")
|
90 |
+
# if args.facial_norm:
|
91 |
+
# if not os.path.exists(args.data_path+args.mean_pose_path+f"{args.pose_rep.split('_')[0]}/bvh_mean.npy"):
|
92 |
+
# self.calculate_mean_face()
|
93 |
+
# self.mean_facial = np.load(args.data_path+args.mean_pose_path+f"{args.facial_rep}/json_mean.npy")
|
94 |
+
# self.std_facial = np.load(args.data_path+args.mean_pose_path+f"{args.facial_rep}/json_std.npy")
|
95 |
+
if self.args.beat_align:
|
96 |
+
if not os.path.exists(args.data_path+f"weights/mean_vel_{args.pose_rep}.npy"):
|
97 |
+
self.calculate_mean_velocity(args.data_path+f"weights/mean_vel_{args.pose_rep}.npy")
|
98 |
+
self.avg_vel = np.load(args.data_path+f"weights/mean_vel_{args.pose_rep}.npy")
|
99 |
+
|
100 |
+
if build_cache and self.rank == 0:
|
101 |
+
self.build_cache(preloaded_dir)
|
102 |
+
self.lmdb_env = lmdb.open(preloaded_dir, readonly=True, lock=False)
|
103 |
+
with self.lmdb_env.begin() as txn:
|
104 |
+
self.n_samples = txn.stat()["entries"]
|
105 |
+
|
106 |
+
|
107 |
+
def calculate_mean_velocity(self, save_path):
|
108 |
+
self.smplx = smplx.create(
|
109 |
+
self.args.data_path_1+"smplx_models/",
|
110 |
+
model_type='smplx',
|
111 |
+
gender='NEUTRAL_2020',
|
112 |
+
use_face_contour=False,
|
113 |
+
num_betas=300,
|
114 |
+
num_expression_coeffs=100,
|
115 |
+
ext='npz',
|
116 |
+
use_pca=False,
|
117 |
+
).cuda().eval()
|
118 |
+
dir_p = self.data_dir + self.args.pose_rep + "/"
|
119 |
+
all_list = []
|
120 |
+
from tqdm import tqdm
|
121 |
+
for tar in tqdm(os.listdir(dir_p)):
|
122 |
+
if tar.endswith(".npz"):
|
123 |
+
m_data = np.load(dir_p+tar, allow_pickle=True)
|
124 |
+
betas, poses, trans, exps = m_data["betas"], m_data["poses"], m_data["trans"], m_data["expressions"]
|
125 |
+
n, c = poses.shape[0], poses.shape[1]
|
126 |
+
betas = betas.reshape(1, 300)
|
127 |
+
betas = np.tile(betas, (n, 1))
|
128 |
+
betas = torch.from_numpy(betas).cuda().float()
|
129 |
+
poses = torch.from_numpy(poses.reshape(n, c)).cuda().float()
|
130 |
+
exps = torch.from_numpy(exps.reshape(n, 100)).cuda().float()
|
131 |
+
trans = torch.from_numpy(trans.reshape(n, 3)).cuda().float()
|
132 |
+
max_length = 128
|
133 |
+
s, r = n//max_length, n%max_length
|
134 |
+
#print(n, s, r)
|
135 |
+
all_tensor = []
|
136 |
+
for i in range(s):
|
137 |
+
with torch.no_grad():
|
138 |
+
joints = self.smplx(
|
139 |
+
betas=betas[i*max_length:(i+1)*max_length],
|
140 |
+
transl=trans[i*max_length:(i+1)*max_length],
|
141 |
+
expression=exps[i*max_length:(i+1)*max_length],
|
142 |
+
jaw_pose=poses[i*max_length:(i+1)*max_length, 66:69],
|
143 |
+
global_orient=poses[i*max_length:(i+1)*max_length,:3],
|
144 |
+
body_pose=poses[i*max_length:(i+1)*max_length,3:21*3+3],
|
145 |
+
left_hand_pose=poses[i*max_length:(i+1)*max_length,25*3:40*3],
|
146 |
+
right_hand_pose=poses[i*max_length:(i+1)*max_length,40*3:55*3],
|
147 |
+
return_verts=True,
|
148 |
+
return_joints=True,
|
149 |
+
leye_pose=poses[i*max_length:(i+1)*max_length, 69:72],
|
150 |
+
reye_pose=poses[i*max_length:(i+1)*max_length, 72:75],
|
151 |
+
)['joints'][:, :55, :].reshape(max_length, 55*3)
|
152 |
+
all_tensor.append(joints)
|
153 |
+
if r != 0:
|
154 |
+
with torch.no_grad():
|
155 |
+
joints = self.smplx(
|
156 |
+
betas=betas[s*max_length:s*max_length+r],
|
157 |
+
transl=trans[s*max_length:s*max_length+r],
|
158 |
+
expression=exps[s*max_length:s*max_length+r],
|
159 |
+
jaw_pose=poses[s*max_length:s*max_length+r, 66:69],
|
160 |
+
global_orient=poses[s*max_length:s*max_length+r,:3],
|
161 |
+
body_pose=poses[s*max_length:s*max_length+r,3:21*3+3],
|
162 |
+
left_hand_pose=poses[s*max_length:s*max_length+r,25*3:40*3],
|
163 |
+
right_hand_pose=poses[s*max_length:s*max_length+r,40*3:55*3],
|
164 |
+
return_verts=True,
|
165 |
+
return_joints=True,
|
166 |
+
leye_pose=poses[s*max_length:s*max_length+r, 69:72],
|
167 |
+
reye_pose=poses[s*max_length:s*max_length+r, 72:75],
|
168 |
+
)['joints'][:, :55, :].reshape(r, 55*3)
|
169 |
+
all_tensor.append(joints)
|
170 |
+
joints = torch.cat(all_tensor, axis=0)
|
171 |
+
joints = joints.permute(1, 0)
|
172 |
+
dt = 1/30
|
173 |
+
# first steps is forward diff (t+1 - t) / dt
|
174 |
+
init_vel = (joints[:, 1:2] - joints[:, :1]) / dt
|
175 |
+
# middle steps are second order (t+1 - t-1) / 2dt
|
176 |
+
middle_vel = (joints[:, 2:] - joints[:, 0:-2]) / (2 * dt)
|
177 |
+
# last step is backward diff (t - t-1) / dt
|
178 |
+
final_vel = (joints[:, -1:] - joints[:, -2:-1]) / dt
|
179 |
+
#print(joints.shape, init_vel.shape, middle_vel.shape, final_vel.shape)
|
180 |
+
vel_seq = torch.cat([init_vel, middle_vel, final_vel], dim=1).permute(1, 0).reshape(n, 55, 3)
|
181 |
+
#print(vel_seq.shape)
|
182 |
+
#.permute(1, 0).reshape(n, 55, 3)
|
183 |
+
vel_seq_np = vel_seq.cpu().numpy()
|
184 |
+
vel_joints_np = np.linalg.norm(vel_seq_np, axis=2) # n * 55
|
185 |
+
all_list.append(vel_joints_np)
|
186 |
+
avg_vel = np.mean(np.concatenate(all_list, axis=0),axis=0) # 55
|
187 |
+
np.save(save_path, avg_vel)
|
188 |
+
|
189 |
+
|
190 |
+
def build_cache(self, preloaded_dir):
|
191 |
+
logger.info(f"Audio bit rate: {self.args.audio_fps}")
|
192 |
+
logger.info("Reading data '{}'...".format(self.data_dir))
|
193 |
+
logger.info("Creating the dataset cache...")
|
194 |
+
if self.args.new_cache:
|
195 |
+
if os.path.exists(preloaded_dir):
|
196 |
+
shutil.rmtree(preloaded_dir)
|
197 |
+
if os.path.exists(preloaded_dir):
|
198 |
+
logger.info("Found the cache {}".format(preloaded_dir))
|
199 |
+
elif self.loader_type == "test":
|
200 |
+
self.cache_generation(
|
201 |
+
preloaded_dir, True,
|
202 |
+
0, 0,
|
203 |
+
is_test=True)
|
204 |
+
else:
|
205 |
+
self.cache_generation(
|
206 |
+
preloaded_dir, self.args.disable_filtering,
|
207 |
+
self.args.clean_first_seconds, self.args.clean_final_seconds,
|
208 |
+
is_test=False)
|
209 |
+
|
210 |
+
def __len__(self):
|
211 |
+
return self.n_samples
|
212 |
+
|
213 |
+
|
214 |
+
def cache_generation(self, out_lmdb_dir, disable_filtering, clean_first_seconds, clean_final_seconds, is_test=False):
|
215 |
+
# if "wav2vec2" in self.args.audio_rep:
|
216 |
+
# self.wav2vec_model = Wav2Vec2Model.from_pretrained(f"{self.args.data_path_1}/hub/transformer/wav2vec2-base-960h")
|
217 |
+
# self.wav2vec_model.feature_extractor._freeze_parameters()
|
218 |
+
# self.wav2vec_model = self.wav2vec_model.cuda()
|
219 |
+
# self.wav2vec_model.eval()
|
220 |
+
|
221 |
+
self.n_out_samples = 0
|
222 |
+
# create db for samples
|
223 |
+
if not os.path.exists(out_lmdb_dir): os.makedirs(out_lmdb_dir)
|
224 |
+
dst_lmdb_env = lmdb.open(out_lmdb_dir, map_size= int(1024 ** 3 * 50))# 50G
|
225 |
+
n_filtered_out = defaultdict(int)
|
226 |
+
|
227 |
+
for index, file_name in self.selected_file.iterrows():
|
228 |
+
f_name = file_name["id"]
|
229 |
+
ext = ".npz" if "smplx" in self.args.pose_rep else ".bvh"
|
230 |
+
pose_file = self.data_dir + self.args.pose_rep + "/" + f_name + ext
|
231 |
+
pose_each_file = []
|
232 |
+
trans_each_file = []
|
233 |
+
shape_each_file = []
|
234 |
+
audio_each_file = []
|
235 |
+
facial_each_file = []
|
236 |
+
word_each_file = []
|
237 |
+
emo_each_file = []
|
238 |
+
sem_each_file = []
|
239 |
+
vid_each_file = []
|
240 |
+
id_pose = f_name #1_wayne_0_1_1
|
241 |
+
|
242 |
+
logger.info(colored(f"# ---- Building cache for Pose {id_pose} ---- #", "blue"))
|
243 |
+
if "smplx" in self.args.pose_rep:
|
244 |
+
pose_data = np.load(pose_file, allow_pickle=True)
|
245 |
+
assert 30%self.args.pose_fps == 0, 'pose_fps should be an aliquot part of 30'
|
246 |
+
stride = int(30/self.args.pose_fps)
|
247 |
+
pose_each_file = pose_data["poses"][::stride] * self.joint_mask
|
248 |
+
pose_each_file = pose_each_file[:, self.joint_mask.astype(bool)]
|
249 |
+
# print(pose_each_file.shape)
|
250 |
+
trans_each_file = pose_data["trans"][::stride]
|
251 |
+
shape_each_file = np.repeat(pose_data["betas"].reshape(1, 300), pose_each_file.shape[0], axis=0)
|
252 |
+
if self.args.facial_rep is not None:
|
253 |
+
logger.info(f"# ---- Building cache for Facial {id_pose} and Pose {id_pose} ---- #")
|
254 |
+
facial_each_file = pose_data["expressions"][::stride]
|
255 |
+
if self.args.facial_norm:
|
256 |
+
facial_each_file = (facial_each_file - self.mean_facial) / self.std_facial
|
257 |
+
|
258 |
+
else:
|
259 |
+
assert 120%self.args.pose_fps == 0, 'pose_fps should be an aliquot part of 120'
|
260 |
+
stride = int(120/self.args.pose_fps)
|
261 |
+
with open(pose_file, "r") as pose_data:
|
262 |
+
for j, line in enumerate(pose_data.readlines()):
|
263 |
+
if j < 431: continue
|
264 |
+
if j%stride != 0:continue
|
265 |
+
data = np.fromstring(line, dtype=float, sep=" ")
|
266 |
+
rot_data = rc.euler_angles_to_matrix(torch.from_numpy(np.deg2rad(data)).reshape(-1, self.joints,3), "XYZ")
|
267 |
+
rot_data = rc.matrix_to_axis_angle(rot_data).reshape(-1, self.joints*3)
|
268 |
+
rot_data = rot_data.numpy() * self.joint_mask
|
269 |
+
|
270 |
+
pose_each_file.append(rot_data)
|
271 |
+
trans_each_file.append(data[:3])
|
272 |
+
|
273 |
+
pose_each_file = np.array(pose_each_file)
|
274 |
+
# print(pose_each_file.shape)
|
275 |
+
trans_each_file = np.array(trans_each_file)
|
276 |
+
shape_each_file = np.repeat(np.array(-1).reshape(1, 1), pose_each_file.shape[0], axis=0)
|
277 |
+
if self.args.facial_rep is not None:
|
278 |
+
logger.info(f"# ---- Building cache for Facial {id_pose} and Pose {id_pose} ---- #")
|
279 |
+
facial_file = pose_file.replace(self.args.pose_rep, self.args.facial_rep).replace("bvh", "json")
|
280 |
+
assert 60%self.args.pose_fps == 0, 'pose_fps should be an aliquot part of 120'
|
281 |
+
stride = int(60/self.args.pose_fps)
|
282 |
+
if not os.path.exists(facial_file):
|
283 |
+
logger.warning(f"# ---- file not found for Facial {id_pose}, skip all files with the same id ---- #")
|
284 |
+
self.selected_file = self.selected_file.drop(self.selected_file[self.selected_file['id'] == id_pose].index)
|
285 |
+
continue
|
286 |
+
with open(facial_file, 'r') as facial_data_file:
|
287 |
+
facial_data = json.load(facial_data_file)
|
288 |
+
for j, frame_data in enumerate(facial_data['frames']):
|
289 |
+
if j%stride != 0:continue
|
290 |
+
facial_each_file.append(frame_data['weights'])
|
291 |
+
facial_each_file = np.array(facial_each_file)
|
292 |
+
if self.args.facial_norm:
|
293 |
+
facial_each_file = (facial_each_file - self.mean_facial) / self.std_facial
|
294 |
+
|
295 |
+
if self.args.id_rep is not None:
|
296 |
+
vid_each_file = np.repeat(np.array(int(f_name.split("_")[0])-1).reshape(1, 1), pose_each_file.shape[0], axis=0)
|
297 |
+
|
298 |
+
if self.args.audio_rep is not None:
|
299 |
+
logger.info(f"# ---- Building cache for Audio {id_pose} and Pose {id_pose} ---- #")
|
300 |
+
audio_file = pose_file.replace(self.args.pose_rep, 'wave16k').replace(ext, ".wav")
|
301 |
+
if not os.path.exists(audio_file):
|
302 |
+
logger.warning(f"# ---- file not found for Audio {id_pose}, skip all files with the same id ---- #")
|
303 |
+
self.selected_file = self.selected_file.drop(self.selected_file[self.selected_file['id'] == id_pose].index)
|
304 |
+
continue
|
305 |
+
audio_each_file, sr = librosa.load(audio_file)
|
306 |
+
audio_each_file = librosa.resample(audio_each_file, orig_sr=sr, target_sr=self.args.audio_sr)
|
307 |
+
if self.args.audio_rep == "onset+amplitude":
|
308 |
+
from numpy.lib import stride_tricks
|
309 |
+
frame_length = 1024
|
310 |
+
# hop_length = 512
|
311 |
+
shape = (audio_each_file.shape[-1] - frame_length + 1, frame_length)
|
312 |
+
strides = (audio_each_file.strides[-1], audio_each_file.strides[-1])
|
313 |
+
rolling_view = stride_tricks.as_strided(audio_each_file, shape=shape, strides=strides)
|
314 |
+
amplitude_envelope = np.max(np.abs(rolling_view), axis=1)
|
315 |
+
# pad the last frame_length-1 samples
|
316 |
+
amplitude_envelope = np.pad(amplitude_envelope, (0, frame_length-1), mode='constant', constant_values=amplitude_envelope[-1])
|
317 |
+
audio_onset_f = librosa.onset.onset_detect(y=audio_each_file, sr=self.args.audio_sr, units='frames')
|
318 |
+
onset_array = np.zeros(len(audio_each_file), dtype=float)
|
319 |
+
onset_array[audio_onset_f] = 1.0
|
320 |
+
# print(amplitude_envelope.shape, audio_each_file.shape, onset_array.shape)
|
321 |
+
audio_each_file = np.concatenate([amplitude_envelope.reshape(-1, 1), onset_array.reshape(-1, 1)], axis=1)
|
322 |
+
elif self.args.audio_rep == "mfcc":
|
323 |
+
audio_each_file = librosa.feature.melspectrogram(y=audio_each_file, sr=self.args.audio_sr, n_mels=128, hop_length=int(self.args.audio_sr/self.args.audio_fps))
|
324 |
+
audio_each_file = audio_each_file.transpose(1, 0)
|
325 |
+
# print(audio_each_file.shape, pose_each_file.shape)
|
326 |
+
if self.args.audio_norm and self.args.audio_rep == "wave16k":
|
327 |
+
audio_each_file = (audio_each_file - self.mean_audio) / self.std_audio
|
328 |
+
# print(audio_each_file.shape)
|
329 |
+
time_offset = 0
|
330 |
+
if self.args.word_rep is not None:
|
331 |
+
logger.info(f"# ---- Building cache for Word {id_pose} and Pose {id_pose} ---- #")
|
332 |
+
word_file = f"{self.data_dir}{self.args.word_rep}/{id_pose}.TextGrid"
|
333 |
+
if not os.path.exists(word_file):
|
334 |
+
logger.warning(f"# ---- file not found for Word {id_pose}, skip all files with the same id ---- #")
|
335 |
+
self.selected_file = self.selected_file.drop(self.selected_file[self.selected_file['id'] == id_pose].index)
|
336 |
+
continue
|
337 |
+
tgrid = tg.TextGrid.fromFile(word_file)
|
338 |
+
if self.args.t_pre_encoder == "bert":
|
339 |
+
from transformers import AutoTokenizer, BertModel
|
340 |
+
tokenizer = AutoTokenizer.from_pretrained(self.args.data_path_1 + "hub/bert-base-uncased", local_files_only=True)
|
341 |
+
model = BertModel.from_pretrained(self.args.data_path_1 + "hub/bert-base-uncased", local_files_only=True).eval()
|
342 |
+
list_word = []
|
343 |
+
all_hidden = []
|
344 |
+
max_len = 400
|
345 |
+
last = 0
|
346 |
+
word_token_mapping = []
|
347 |
+
first = True
|
348 |
+
for i, word in enumerate(tgrid[0]):
|
349 |
+
last = i
|
350 |
+
if (i%max_len != 0) or (i==0):
|
351 |
+
if word.mark == "":
|
352 |
+
list_word.append(".")
|
353 |
+
else:
|
354 |
+
list_word.append(word.mark)
|
355 |
+
else:
|
356 |
+
max_counter = max_len
|
357 |
+
str_word = ' '.join(map(str, list_word))
|
358 |
+
if first:
|
359 |
+
global_len = 0
|
360 |
+
end = -1
|
361 |
+
offset_word = []
|
362 |
+
for k, wordvalue in enumerate(list_word):
|
363 |
+
start = end+1
|
364 |
+
end = start+len(wordvalue)
|
365 |
+
offset_word.append((start, end))
|
366 |
+
#print(offset_word)
|
367 |
+
token_scan = tokenizer.encode_plus(str_word, return_offsets_mapping=True)['offset_mapping']
|
368 |
+
#print(token_scan)
|
369 |
+
for start, end in offset_word:
|
370 |
+
sub_mapping = []
|
371 |
+
for i, (start_t, end_t) in enumerate(token_scan[1:-1]):
|
372 |
+
if int(start) <= int(start_t) and int(end_t) <= int(end):
|
373 |
+
#print(i+global_len)
|
374 |
+
sub_mapping.append(i+global_len)
|
375 |
+
word_token_mapping.append(sub_mapping)
|
376 |
+
#print(len(word_token_mapping))
|
377 |
+
global_len = word_token_mapping[-1][-1] + 1
|
378 |
+
list_word = []
|
379 |
+
if word.mark == "":
|
380 |
+
list_word.append(".")
|
381 |
+
else:
|
382 |
+
list_word.append(word.mark)
|
383 |
+
|
384 |
+
with torch.no_grad():
|
385 |
+
inputs = tokenizer(str_word, return_tensors="pt")
|
386 |
+
outputs = model(**inputs)
|
387 |
+
last_hidden_states = outputs.last_hidden_state.reshape(-1, 768).cpu().numpy()[1:-1, :]
|
388 |
+
all_hidden.append(last_hidden_states)
|
389 |
+
|
390 |
+
#list_word = list_word[:10]
|
391 |
+
if list_word == []:
|
392 |
+
pass
|
393 |
+
else:
|
394 |
+
if first:
|
395 |
+
global_len = 0
|
396 |
+
str_word = ' '.join(map(str, list_word))
|
397 |
+
end = -1
|
398 |
+
offset_word = []
|
399 |
+
for k, wordvalue in enumerate(list_word):
|
400 |
+
start = end+1
|
401 |
+
end = start+len(wordvalue)
|
402 |
+
offset_word.append((start, end))
|
403 |
+
#print(offset_word)
|
404 |
+
token_scan = tokenizer.encode_plus(str_word, return_offsets_mapping=True)['offset_mapping']
|
405 |
+
#print(token_scan)
|
406 |
+
for start, end in offset_word:
|
407 |
+
sub_mapping = []
|
408 |
+
for i, (start_t, end_t) in enumerate(token_scan[1:-1]):
|
409 |
+
if int(start) <= int(start_t) and int(end_t) <= int(end):
|
410 |
+
sub_mapping.append(i+global_len)
|
411 |
+
#print(sub_mapping)
|
412 |
+
word_token_mapping.append(sub_mapping)
|
413 |
+
#print(len(word_token_mapping))
|
414 |
+
with torch.no_grad():
|
415 |
+
inputs = tokenizer(str_word, return_tensors="pt")
|
416 |
+
outputs = model(**inputs)
|
417 |
+
last_hidden_states = outputs.last_hidden_state.reshape(-1, 768).cpu().numpy()[1:-1, :]
|
418 |
+
all_hidden.append(last_hidden_states)
|
419 |
+
last_hidden_states = np.concatenate(all_hidden, axis=0)
|
420 |
+
|
421 |
+
for i in range(pose_each_file.shape[0]):
|
422 |
+
found_flag = False
|
423 |
+
current_time = i/self.args.pose_fps + time_offset
|
424 |
+
j_last = 0
|
425 |
+
for j, word in enumerate(tgrid[0]):
|
426 |
+
word_n, word_s, word_e = word.mark, word.minTime, word.maxTime
|
427 |
+
if word_s<=current_time and current_time<=word_e:
|
428 |
+
if self.args.word_cache and self.args.t_pre_encoder == 'bert':
|
429 |
+
mapping_index = word_token_mapping[j]
|
430 |
+
#print(mapping_index, word_s, word_e)
|
431 |
+
s_t = np.linspace(word_s, word_e, len(mapping_index)+1)
|
432 |
+
#print(s_t)
|
433 |
+
for tt, t_sep in enumerate(s_t[1:]):
|
434 |
+
if current_time <= t_sep:
|
435 |
+
#if len(mapping_index) > 1: print(mapping_index[tt])
|
436 |
+
word_each_file.append(last_hidden_states[mapping_index[tt]])
|
437 |
+
break
|
438 |
+
else:
|
439 |
+
if word_n == " ":
|
440 |
+
word_each_file.append(self.lang_model.PAD_token)
|
441 |
+
else:
|
442 |
+
word_each_file.append(self.lang_model.get_word_index(word_n))
|
443 |
+
found_flag = True
|
444 |
+
j_last = j
|
445 |
+
break
|
446 |
+
else: continue
|
447 |
+
if not found_flag:
|
448 |
+
if self.args.word_cache and self.args.t_pre_encoder == 'bert':
|
449 |
+
word_each_file.append(last_hidden_states[j_last])
|
450 |
+
else:
|
451 |
+
word_each_file.append(self.lang_model.UNK_token)
|
452 |
+
word_each_file = np.array(word_each_file)
|
453 |
+
#print(word_each_file.shape)
|
454 |
+
|
455 |
+
if self.args.emo_rep is not None:
|
456 |
+
logger.info(f"# ---- Building cache for Emo {id_pose} and Pose {id_pose} ---- #")
|
457 |
+
rtype, start = int(id_pose.split('_')[3]), int(id_pose.split('_')[3])
|
458 |
+
if rtype == 0 or rtype == 2 or rtype == 4 or rtype == 6:
|
459 |
+
if start >= 1 and start <= 64:
|
460 |
+
score = 0
|
461 |
+
elif start >= 65 and start <= 72:
|
462 |
+
score = 1
|
463 |
+
elif start >= 73 and start <= 80:
|
464 |
+
score = 2
|
465 |
+
elif start >= 81 and start <= 86:
|
466 |
+
score = 3
|
467 |
+
elif start >= 87 and start <= 94:
|
468 |
+
score = 4
|
469 |
+
elif start >= 95 and start <= 102:
|
470 |
+
score = 5
|
471 |
+
elif start >= 103 and start <= 110:
|
472 |
+
score = 6
|
473 |
+
elif start >= 111 and start <= 118:
|
474 |
+
score = 7
|
475 |
+
else: pass
|
476 |
+
else:
|
477 |
+
# you may denote as unknown in the future
|
478 |
+
score = 0
|
479 |
+
emo_each_file = np.repeat(np.array(score).reshape(1, 1), pose_each_file.shape[0], axis=0)
|
480 |
+
#print(emo_each_file)
|
481 |
+
|
482 |
+
if self.args.sem_rep is not None:
|
483 |
+
logger.info(f"# ---- Building cache for Sem {id_pose} and Pose {id_pose} ---- #")
|
484 |
+
sem_file = f"{self.data_dir}{self.args.sem_rep}/{id_pose}.txt"
|
485 |
+
sem_all = pd.read_csv(sem_file,
|
486 |
+
sep='\t',
|
487 |
+
names=["name", "start_time", "end_time", "duration", "score", "keywords"])
|
488 |
+
# we adopt motion-level semantic score here.
|
489 |
+
for i in range(pose_each_file.shape[0]):
|
490 |
+
found_flag = False
|
491 |
+
for j, (start, end, score) in enumerate(zip(sem_all['start_time'],sem_all['end_time'], sem_all['score'])):
|
492 |
+
current_time = i/self.args.pose_fps + time_offset
|
493 |
+
if start<=current_time and current_time<=end:
|
494 |
+
sem_each_file.append(score)
|
495 |
+
found_flag=True
|
496 |
+
break
|
497 |
+
else: continue
|
498 |
+
if not found_flag: sem_each_file.append(0.)
|
499 |
+
sem_each_file = np.array(sem_each_file)
|
500 |
+
#print(sem_each_file)
|
501 |
+
|
502 |
+
filtered_result = self._sample_from_clip(
|
503 |
+
dst_lmdb_env,
|
504 |
+
audio_each_file, pose_each_file, trans_each_file, shape_each_file, facial_each_file, word_each_file,
|
505 |
+
vid_each_file, emo_each_file, sem_each_file,
|
506 |
+
disable_filtering, clean_first_seconds, clean_final_seconds, is_test,
|
507 |
+
)
|
508 |
+
for type in filtered_result.keys():
|
509 |
+
n_filtered_out[type] += filtered_result[type]
|
510 |
+
|
511 |
+
with dst_lmdb_env.begin() as txn:
|
512 |
+
logger.info(colored(f"no. of samples: {txn.stat()['entries']}", "cyan"))
|
513 |
+
n_total_filtered = 0
|
514 |
+
for type, n_filtered in n_filtered_out.items():
|
515 |
+
logger.info("{}: {}".format(type, n_filtered))
|
516 |
+
n_total_filtered += n_filtered
|
517 |
+
logger.info(colored("no. of excluded samples: {} ({:.1f}%)".format(
|
518 |
+
n_total_filtered, 100 * n_total_filtered / (txn.stat()["entries"] + n_total_filtered)), "cyan"))
|
519 |
+
dst_lmdb_env.sync()
|
520 |
+
dst_lmdb_env.close()
|
521 |
+
|
522 |
+
def _sample_from_clip(
|
523 |
+
self, dst_lmdb_env, audio_each_file, pose_each_file, trans_each_file, shape_each_file, facial_each_file, word_each_file,
|
524 |
+
vid_each_file, emo_each_file, sem_each_file,
|
525 |
+
disable_filtering, clean_first_seconds, clean_final_seconds, is_test,
|
526 |
+
):
|
527 |
+
"""
|
528 |
+
for data cleaning, we ignore the data for first and final n s
|
529 |
+
for test, we return all data
|
530 |
+
"""
|
531 |
+
# audio_start = int(self.alignment[0] * self.args.audio_fps)
|
532 |
+
# pose_start = int(self.alignment[1] * self.args.pose_fps)
|
533 |
+
#logger.info(f"before: {audio_each_file.shape} {pose_each_file.shape}")
|
534 |
+
# audio_each_file = audio_each_file[audio_start:]
|
535 |
+
# pose_each_file = pose_each_file[pose_start:]
|
536 |
+
# trans_each_file =
|
537 |
+
#logger.info(f"after alignment: {audio_each_file.shape} {pose_each_file.shape}")
|
538 |
+
#print(pose_each_file.shape)
|
539 |
+
round_seconds_skeleton = pose_each_file.shape[0] // self.args.pose_fps # assume 1500 frames / 15 fps = 100 s
|
540 |
+
#print(round_seconds_skeleton)
|
541 |
+
if audio_each_file != []:
|
542 |
+
if self.args.audio_rep != "wave16k":
|
543 |
+
round_seconds_audio = len(audio_each_file) // self.args.audio_fps # assume 16,000,00 / 16,000 = 100 s
|
544 |
+
elif self.args.audio_rep == "mfcc":
|
545 |
+
round_seconds_audio = audio_each_file.shape[0] // self.args.audio_fps
|
546 |
+
else:
|
547 |
+
round_seconds_audio = audio_each_file.shape[0] // self.args.audio_sr
|
548 |
+
if facial_each_file != []:
|
549 |
+
round_seconds_facial = facial_each_file.shape[0] // self.args.pose_fps
|
550 |
+
logger.info(f"audio: {round_seconds_audio}s, pose: {round_seconds_skeleton}s, facial: {round_seconds_facial}s")
|
551 |
+
round_seconds_skeleton = min(round_seconds_audio, round_seconds_skeleton, round_seconds_facial)
|
552 |
+
max_round = max(round_seconds_audio, round_seconds_skeleton, round_seconds_facial)
|
553 |
+
if round_seconds_skeleton != max_round:
|
554 |
+
logger.warning(f"reduce to {round_seconds_skeleton}s, ignore {max_round-round_seconds_skeleton}s")
|
555 |
+
else:
|
556 |
+
logger.info(f"pose: {round_seconds_skeleton}s, audio: {round_seconds_audio}s")
|
557 |
+
round_seconds_skeleton = min(round_seconds_audio, round_seconds_skeleton)
|
558 |
+
max_round = max(round_seconds_audio, round_seconds_skeleton)
|
559 |
+
if round_seconds_skeleton != max_round:
|
560 |
+
logger.warning(f"reduce to {round_seconds_skeleton}s, ignore {max_round-round_seconds_skeleton}s")
|
561 |
+
|
562 |
+
clip_s_t, clip_e_t = clean_first_seconds, round_seconds_skeleton - clean_final_seconds # assume [10, 90]s
|
563 |
+
clip_s_f_audio, clip_e_f_audio = self.args.audio_fps * clip_s_t, clip_e_t * self.args.audio_fps # [160,000,90*160,000]
|
564 |
+
clip_s_f_pose, clip_e_f_pose = clip_s_t * self.args.pose_fps, clip_e_t * self.args.pose_fps # [150,90*15]
|
565 |
+
|
566 |
+
|
567 |
+
for ratio in self.args.multi_length_training:
|
568 |
+
if is_test:# stride = length for test
|
569 |
+
cut_length = clip_e_f_pose - clip_s_f_pose
|
570 |
+
self.args.stride = cut_length
|
571 |
+
self.max_length = cut_length
|
572 |
+
else:
|
573 |
+
self.args.stride = int(ratio*self.ori_stride)
|
574 |
+
cut_length = int(self.ori_length*ratio)
|
575 |
+
|
576 |
+
num_subdivision = math.floor((clip_e_f_pose - clip_s_f_pose - cut_length) / self.args.stride) + 1
|
577 |
+
logger.info(f"pose from frame {clip_s_f_pose} to {clip_e_f_pose}, length {cut_length}")
|
578 |
+
logger.info(f"{num_subdivision} clips is expected with stride {self.args.stride}")
|
579 |
+
|
580 |
+
if audio_each_file != []:
|
581 |
+
audio_short_length = math.floor(cut_length / self.args.pose_fps * self.args.audio_fps)
|
582 |
+
"""
|
583 |
+
for audio sr = 16000, fps = 15, pose_length = 34,
|
584 |
+
audio short length = 36266.7 -> 36266
|
585 |
+
this error is fine.
|
586 |
+
"""
|
587 |
+
logger.info(f"audio from frame {clip_s_f_audio} to {clip_e_f_audio}, length {audio_short_length}")
|
588 |
+
|
589 |
+
n_filtered_out = defaultdict(int)
|
590 |
+
sample_pose_list = []
|
591 |
+
sample_audio_list = []
|
592 |
+
sample_facial_list = []
|
593 |
+
sample_shape_list = []
|
594 |
+
sample_word_list = []
|
595 |
+
sample_emo_list = []
|
596 |
+
sample_sem_list = []
|
597 |
+
sample_vid_list = []
|
598 |
+
sample_trans_list = []
|
599 |
+
|
600 |
+
for i in range(num_subdivision): # cut into around 2s chip, (self npose)
|
601 |
+
start_idx = clip_s_f_pose + i * self.args.stride
|
602 |
+
fin_idx = start_idx + cut_length
|
603 |
+
sample_pose = pose_each_file[start_idx:fin_idx]
|
604 |
+
sample_trans = trans_each_file[start_idx:fin_idx]
|
605 |
+
sample_shape = shape_each_file[start_idx:fin_idx]
|
606 |
+
# print(sample_pose.shape)
|
607 |
+
if self.args.audio_rep is not None:
|
608 |
+
audio_start = clip_s_f_audio + math.floor(i * self.args.stride * self.args.audio_fps / self.args.pose_fps)
|
609 |
+
audio_end = audio_start + audio_short_length
|
610 |
+
sample_audio = audio_each_file[audio_start:audio_end]
|
611 |
+
else:
|
612 |
+
sample_audio = np.array([-1])
|
613 |
+
sample_facial = facial_each_file[start_idx:fin_idx] if self.args.facial_rep is not None else np.array([-1])
|
614 |
+
sample_word = word_each_file[start_idx:fin_idx] if self.args.word_rep is not None else np.array([-1])
|
615 |
+
sample_emo = emo_each_file[start_idx:fin_idx] if self.args.emo_rep is not None else np.array([-1])
|
616 |
+
sample_sem = sem_each_file[start_idx:fin_idx] if self.args.sem_rep is not None else np.array([-1])
|
617 |
+
sample_vid = vid_each_file[start_idx:fin_idx] if self.args.id_rep is not None else np.array([-1])
|
618 |
+
|
619 |
+
if sample_pose.any() != None:
|
620 |
+
# filtering motion skeleton data
|
621 |
+
sample_pose, filtering_message = MotionPreprocessor(sample_pose).get()
|
622 |
+
is_correct_motion = (sample_pose != [])
|
623 |
+
if is_correct_motion or disable_filtering:
|
624 |
+
sample_pose_list.append(sample_pose)
|
625 |
+
sample_audio_list.append(sample_audio)
|
626 |
+
sample_facial_list.append(sample_facial)
|
627 |
+
sample_shape_list.append(sample_shape)
|
628 |
+
sample_word_list.append(sample_word)
|
629 |
+
sample_vid_list.append(sample_vid)
|
630 |
+
sample_emo_list.append(sample_emo)
|
631 |
+
sample_sem_list.append(sample_sem)
|
632 |
+
sample_trans_list.append(sample_trans)
|
633 |
+
else:
|
634 |
+
n_filtered_out[filtering_message] += 1
|
635 |
+
|
636 |
+
if len(sample_pose_list) > 0:
|
637 |
+
with dst_lmdb_env.begin(write=True) as txn:
|
638 |
+
for pose, audio, facial, shape, word, vid, emo, sem, trans in zip(
|
639 |
+
sample_pose_list,
|
640 |
+
sample_audio_list,
|
641 |
+
sample_facial_list,
|
642 |
+
sample_shape_list,
|
643 |
+
sample_word_list,
|
644 |
+
sample_vid_list,
|
645 |
+
sample_emo_list,
|
646 |
+
sample_sem_list,
|
647 |
+
sample_trans_list,):
|
648 |
+
k = "{:005}".format(self.n_out_samples).encode("ascii")
|
649 |
+
v = [pose, audio, facial, shape, word, emo, sem, vid, trans]
|
650 |
+
v = pickle.dumps(v,5)
|
651 |
+
txn.put(k, v)
|
652 |
+
self.n_out_samples += 1
|
653 |
+
return n_filtered_out
|
654 |
+
|
655 |
+
def __getitem__(self, idx):
|
656 |
+
with self.lmdb_env.begin(write=False) as txn:
|
657 |
+
key = "{:005}".format(idx).encode("ascii")
|
658 |
+
sample = txn.get(key)
|
659 |
+
sample = pickle.loads(sample)
|
660 |
+
tar_pose, in_audio, in_facial, in_shape, in_word, emo, sem, vid, trans = sample
|
661 |
+
#print(in_shape)
|
662 |
+
#vid = torch.from_numpy(vid).int()
|
663 |
+
emo = torch.from_numpy(emo).int()
|
664 |
+
sem = torch.from_numpy(sem).float()
|
665 |
+
in_audio = torch.from_numpy(in_audio).float()
|
666 |
+
in_word = torch.from_numpy(in_word).float() if self.args.word_cache else torch.from_numpy(in_word).int()
|
667 |
+
if self.loader_type == "test":
|
668 |
+
tar_pose = torch.from_numpy(tar_pose).float()
|
669 |
+
trans = torch.from_numpy(trans).float()
|
670 |
+
in_facial = torch.from_numpy(in_facial).float()
|
671 |
+
vid = torch.from_numpy(vid).float()
|
672 |
+
in_shape = torch.from_numpy(in_shape).float()
|
673 |
+
else:
|
674 |
+
in_shape = torch.from_numpy(in_shape).reshape((in_shape.shape[0], -1)).float()
|
675 |
+
trans = torch.from_numpy(trans).reshape((trans.shape[0], -1)).float()
|
676 |
+
vid = torch.from_numpy(vid).reshape((vid.shape[0], -1)).float()
|
677 |
+
tar_pose = torch.from_numpy(tar_pose).reshape((tar_pose.shape[0], -1)).float()
|
678 |
+
in_facial = torch.from_numpy(in_facial).reshape((in_facial.shape[0], -1)).float()
|
679 |
+
return {"pose":tar_pose, "audio":in_audio, "facial":in_facial, "beta": in_shape, "word":in_word, "id":vid, "emo":emo, "sem":sem, "trans":trans}
|
680 |
+
|
681 |
+
|
682 |
+
class MotionPreprocessor:
|
683 |
+
def __init__(self, skeletons):
|
684 |
+
self.skeletons = skeletons
|
685 |
+
#self.mean_pose = mean_pose
|
686 |
+
self.filtering_message = "PASS"
|
687 |
+
|
688 |
+
def get(self):
|
689 |
+
assert (self.skeletons is not None)
|
690 |
+
|
691 |
+
# filtering
|
692 |
+
if self.skeletons != []:
|
693 |
+
if self.check_pose_diff():
|
694 |
+
self.skeletons = []
|
695 |
+
self.filtering_message = "pose"
|
696 |
+
# elif self.check_spine_angle():
|
697 |
+
# self.skeletons = []
|
698 |
+
# self.filtering_message = "spine angle"
|
699 |
+
# elif self.check_static_motion():
|
700 |
+
# self.skeletons = []
|
701 |
+
# self.filtering_message = "motion"
|
702 |
+
|
703 |
+
# if self.skeletons != []:
|
704 |
+
# self.skeletons = self.skeletons.tolist()
|
705 |
+
# for i, frame in enumerate(self.skeletons):
|
706 |
+
# assert not np.isnan(self.skeletons[i]).any() # missing joints
|
707 |
+
|
708 |
+
return self.skeletons, self.filtering_message
|
709 |
+
|
710 |
+
def check_static_motion(self, verbose=True):
|
711 |
+
def get_variance(skeleton, joint_idx):
|
712 |
+
wrist_pos = skeleton[:, joint_idx]
|
713 |
+
variance = np.sum(np.var(wrist_pos, axis=0))
|
714 |
+
return variance
|
715 |
+
|
716 |
+
left_arm_var = get_variance(self.skeletons, 6)
|
717 |
+
right_arm_var = get_variance(self.skeletons, 9)
|
718 |
+
|
719 |
+
th = 0.0014 # exclude 13110
|
720 |
+
# th = 0.002 # exclude 16905
|
721 |
+
if left_arm_var < th and right_arm_var < th:
|
722 |
+
if verbose:
|
723 |
+
print("skip - check_static_motion left var {}, right var {}".format(left_arm_var, right_arm_var))
|
724 |
+
return True
|
725 |
+
else:
|
726 |
+
if verbose:
|
727 |
+
print("pass - check_static_motion left var {}, right var {}".format(left_arm_var, right_arm_var))
|
728 |
+
return False
|
729 |
+
|
730 |
+
|
731 |
+
def check_pose_diff(self, verbose=False):
|
732 |
+
# diff = np.abs(self.skeletons - self.mean_pose) # 186*1
|
733 |
+
# diff = np.mean(diff)
|
734 |
+
|
735 |
+
# # th = 0.017
|
736 |
+
# th = 0.02 #0.02 # exclude 3594
|
737 |
+
# if diff < th:
|
738 |
+
# if verbose:
|
739 |
+
# print("skip - check_pose_diff {:.5f}".format(diff))
|
740 |
+
# return True
|
741 |
+
# # th = 3.5 #0.02 # exclude 3594
|
742 |
+
# # if 3.5 < diff < 5:
|
743 |
+
# # if verbose:
|
744 |
+
# # print("skip - check_pose_diff {:.5f}".format(diff))
|
745 |
+
# # return True
|
746 |
+
# else:
|
747 |
+
# if verbose:
|
748 |
+
# print("pass - check_pose_diff {:.5f}".format(diff))
|
749 |
+
return False
|
750 |
+
|
751 |
+
|
752 |
+
def check_spine_angle(self, verbose=True):
|
753 |
+
def angle_between(v1, v2):
|
754 |
+
v1_u = v1 / np.linalg.norm(v1)
|
755 |
+
v2_u = v2 / np.linalg.norm(v2)
|
756 |
+
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
|
757 |
+
|
758 |
+
angles = []
|
759 |
+
for i in range(self.skeletons.shape[0]):
|
760 |
+
spine_vec = self.skeletons[i, 1] - self.skeletons[i, 0]
|
761 |
+
angle = angle_between(spine_vec, [0, -1, 0])
|
762 |
+
angles.append(angle)
|
763 |
+
|
764 |
+
if np.rad2deg(max(angles)) > 30 or np.rad2deg(np.mean(angles)) > 20: # exclude 4495
|
765 |
+
# if np.rad2deg(max(angles)) > 20: # exclude 8270
|
766 |
+
if verbose:
|
767 |
+
print("skip - check_spine_angle {:.5f}, {:.5f}".format(max(angles), np.mean(angles)))
|
768 |
+
return True
|
769 |
+
else:
|
770 |
+
if verbose:
|
771 |
+
print("pass - check_spine_angle {:.5f}".format(max(angles)))
|
772 |
+
return False
|
dataloaders/beat_sep_lower.py
ADDED
@@ -0,0 +1,876 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import pickle
|
3 |
+
import math
|
4 |
+
import shutil
|
5 |
+
import numpy as np
|
6 |
+
import lmdb as lmdb
|
7 |
+
import textgrid as tg
|
8 |
+
import pandas as pd
|
9 |
+
import torch
|
10 |
+
import glob
|
11 |
+
import json
|
12 |
+
from termcolor import colored
|
13 |
+
from loguru import logger
|
14 |
+
from collections import defaultdict
|
15 |
+
from torch.utils.data import Dataset
|
16 |
+
import torch.distributed as dist
|
17 |
+
#import pyarrow
|
18 |
+
import pickle
|
19 |
+
import librosa
|
20 |
+
import smplx
|
21 |
+
|
22 |
+
from .build_vocab import Vocab
|
23 |
+
from .utils.audio_features import Wav2Vec2Model
|
24 |
+
from .data_tools import joints_list
|
25 |
+
from .utils import rotation_conversions as rc
|
26 |
+
from .utils import other_tools
|
27 |
+
|
28 |
+
class CustomDataset(Dataset):
|
29 |
+
def __init__(self, args, loader_type, augmentation=None, kwargs=None, build_cache=True):
|
30 |
+
self.args = args
|
31 |
+
self.loader_type = loader_type
|
32 |
+
|
33 |
+
self.rank = dist.get_rank()
|
34 |
+
self.ori_stride = self.args.stride
|
35 |
+
self.ori_length = self.args.pose_length
|
36 |
+
self.alignment = [0,0] # for trinity
|
37 |
+
|
38 |
+
self.ori_joint_list = joints_list[self.args.ori_joints]
|
39 |
+
self.tar_joint_list = joints_list[self.args.tar_joints]
|
40 |
+
if 'smplx' in self.args.pose_rep:
|
41 |
+
self.joint_mask = np.zeros(len(list(self.ori_joint_list.keys()))*3)
|
42 |
+
self.joints = len(list(self.tar_joint_list.keys()))
|
43 |
+
for joint_name in self.tar_joint_list:
|
44 |
+
self.joint_mask[self.ori_joint_list[joint_name][1] - self.ori_joint_list[joint_name][0]:self.ori_joint_list[joint_name][1]] = 1
|
45 |
+
else:
|
46 |
+
self.joints = len(list(self.ori_joint_list.keys()))+1
|
47 |
+
self.joint_mask = np.zeros(self.joints*3)
|
48 |
+
for joint_name in self.tar_joint_list:
|
49 |
+
if joint_name == "Hips":
|
50 |
+
self.joint_mask[3:6] = 1
|
51 |
+
else:
|
52 |
+
self.joint_mask[self.ori_joint_list[joint_name][1] - self.ori_joint_list[joint_name][0]:self.ori_joint_list[joint_name][1]] = 1
|
53 |
+
# select trainable joints
|
54 |
+
self.smplx = smplx.create(
|
55 |
+
self.args.data_path_1+"smplx_models/",
|
56 |
+
model_type='smplx',
|
57 |
+
gender='NEUTRAL_2020',
|
58 |
+
use_face_contour=False,
|
59 |
+
num_betas=300,
|
60 |
+
num_expression_coeffs=100,
|
61 |
+
ext='npz',
|
62 |
+
use_pca=False,
|
63 |
+
).cuda().eval()
|
64 |
+
|
65 |
+
split_rule = pd.read_csv(args.data_path+"train_test_split.csv")
|
66 |
+
self.selected_file = split_rule.loc[(split_rule['type'] == loader_type) & (split_rule['id'].str.split("_").str[0].astype(int).isin(self.args.training_speakers))]
|
67 |
+
if args.additional_data and loader_type == 'train':
|
68 |
+
split_b = split_rule.loc[(split_rule['type'] == 'additional') & (split_rule['id'].str.split("_").str[0].astype(int).isin(self.args.training_speakers))]
|
69 |
+
#self.selected_file = split_rule.loc[(split_rule['type'] == 'additional') & (split_rule['id'].str.split("_").str[0].astype(int).isin(self.args.training_speakers))]
|
70 |
+
self.selected_file = pd.concat([self.selected_file, split_b])
|
71 |
+
if self.selected_file.empty:
|
72 |
+
logger.warning(f"{loader_type} is empty for speaker {self.args.training_speakers}, use train set 0-8 instead")
|
73 |
+
self.selected_file = split_rule.loc[(split_rule['type'] == 'train') & (split_rule['id'].str.split("_").str[0].astype(int).isin(self.args.training_speakers))]
|
74 |
+
self.selected_file = self.selected_file.iloc[0:8]
|
75 |
+
self.data_dir = args.data_path
|
76 |
+
|
77 |
+
if loader_type == "test":
|
78 |
+
self.args.multi_length_training = [1.0]
|
79 |
+
self.max_length = int(args.pose_length * self.args.multi_length_training[-1])
|
80 |
+
self.max_audio_pre_len = math.floor(args.pose_length / args.pose_fps * self.args.audio_sr)
|
81 |
+
if self.max_audio_pre_len > self.args.test_length*self.args.audio_sr:
|
82 |
+
self.max_audio_pre_len = self.args.test_length*self.args.audio_sr
|
83 |
+
|
84 |
+
if args.word_rep is not None:
|
85 |
+
with open(f"{args.data_path}weights/vocab.pkl", 'rb') as f:
|
86 |
+
self.lang_model = pickle.load(f)
|
87 |
+
|
88 |
+
preloaded_dir = self.args.root_path + self.args.cache_path + loader_type + f"/{args.pose_rep}_cache"
|
89 |
+
# if args.pose_norm:
|
90 |
+
# # careful for rotation vectors
|
91 |
+
# if not os.path.exists(args.data_path+args.mean_pose_path+f"{args.pose_rep.split('_')[0]}/bvh_mean.npy"):
|
92 |
+
# self.calculate_mean_pose()
|
93 |
+
# self.mean_pose = np.load(args.data_path+args.mean_pose_path+f"{args.pose_rep.split('_')[0]}/bvh_mean.npy")
|
94 |
+
# self.std_pose = np.load(args.data_path+args.mean_pose_path+f"{args.pose_rep.split('_')[0]}/bvh_std.npy")
|
95 |
+
# if args.audio_norm:
|
96 |
+
# if not os.path.exists(args.data_path+args.mean_pose_path+f"{args.audio_rep.split('_')[0]}/bvh_mean.npy"):
|
97 |
+
# self.calculate_mean_audio()
|
98 |
+
# self.mean_audio = np.load(args.data_path+args.mean_pose_path+f"{args.audio_rep.split('_')[0]}/npy_mean.npy")
|
99 |
+
# self.std_audio = np.load(args.data_path+args.mean_pose_path+f"{args.audio_rep.split('_')[0]}/npy_std.npy")
|
100 |
+
# if args.facial_norm:
|
101 |
+
# if not os.path.exists(args.data_path+args.mean_pose_path+f"{args.pose_rep.split('_')[0]}/bvh_mean.npy"):
|
102 |
+
# self.calculate_mean_face()
|
103 |
+
# self.mean_facial = np.load(args.data_path+args.mean_pose_path+f"{args.facial_rep}/json_mean.npy")
|
104 |
+
# self.std_facial = np.load(args.data_path+args.mean_pose_path+f"{args.facial_rep}/json_std.npy")
|
105 |
+
if self.args.beat_align:
|
106 |
+
if not os.path.exists(args.data_path+f"weights/mean_vel_{args.pose_rep}.npy"):
|
107 |
+
self.calculate_mean_velocity(args.data_path+f"weights/mean_vel_{args.pose_rep}.npy")
|
108 |
+
self.avg_vel = np.load(args.data_path+f"weights/mean_vel_{args.pose_rep}.npy")
|
109 |
+
|
110 |
+
if build_cache and self.rank == 0:
|
111 |
+
self.build_cache(preloaded_dir)
|
112 |
+
self.lmdb_env = lmdb.open(preloaded_dir, readonly=True, lock=False)
|
113 |
+
with self.lmdb_env.begin() as txn:
|
114 |
+
self.n_samples = txn.stat()["entries"]
|
115 |
+
|
116 |
+
|
117 |
+
def calculate_mean_velocity(self, save_path):
|
118 |
+
self.smplx = smplx.create(
|
119 |
+
self.args.data_path_1+"smplx_models/",
|
120 |
+
model_type='smplx',
|
121 |
+
gender='NEUTRAL_2020',
|
122 |
+
use_face_contour=False,
|
123 |
+
num_betas=300,
|
124 |
+
num_expression_coeffs=100,
|
125 |
+
ext='npz',
|
126 |
+
use_pca=False,
|
127 |
+
).cuda().eval()
|
128 |
+
dir_p = self.data_dir + self.args.pose_rep + "/"
|
129 |
+
all_list = []
|
130 |
+
from tqdm import tqdm
|
131 |
+
for tar in tqdm(os.listdir(dir_p)):
|
132 |
+
if tar.endswith(".npz"):
|
133 |
+
m_data = np.load(dir_p+tar, allow_pickle=True)
|
134 |
+
betas, poses, trans, exps = m_data["betas"], m_data["poses"], m_data["trans"], m_data["expressions"]
|
135 |
+
n, c = poses.shape[0], poses.shape[1]
|
136 |
+
betas = betas.reshape(1, 300)
|
137 |
+
betas = np.tile(betas, (n, 1))
|
138 |
+
betas = torch.from_numpy(betas).cuda().float()
|
139 |
+
poses = torch.from_numpy(poses.reshape(n, c)).cuda().float()
|
140 |
+
exps = torch.from_numpy(exps.reshape(n, 100)).cuda().float()
|
141 |
+
trans = torch.from_numpy(trans.reshape(n, 3)).cuda().float()
|
142 |
+
max_length = 128
|
143 |
+
s, r = n//max_length, n%max_length
|
144 |
+
#print(n, s, r)
|
145 |
+
all_tensor = []
|
146 |
+
for i in range(s):
|
147 |
+
with torch.no_grad():
|
148 |
+
joints = self.smplx(
|
149 |
+
betas=betas[i*max_length:(i+1)*max_length],
|
150 |
+
transl=trans[i*max_length:(i+1)*max_length],
|
151 |
+
expression=exps[i*max_length:(i+1)*max_length],
|
152 |
+
jaw_pose=poses[i*max_length:(i+1)*max_length, 66:69],
|
153 |
+
global_orient=poses[i*max_length:(i+1)*max_length,:3],
|
154 |
+
body_pose=poses[i*max_length:(i+1)*max_length,3:21*3+3],
|
155 |
+
left_hand_pose=poses[i*max_length:(i+1)*max_length,25*3:40*3],
|
156 |
+
right_hand_pose=poses[i*max_length:(i+1)*max_length,40*3:55*3],
|
157 |
+
return_verts=True,
|
158 |
+
return_joints=True,
|
159 |
+
leye_pose=poses[i*max_length:(i+1)*max_length, 69:72],
|
160 |
+
reye_pose=poses[i*max_length:(i+1)*max_length, 72:75],
|
161 |
+
)['joints'][:, :55, :].reshape(max_length, 55*3)
|
162 |
+
all_tensor.append(joints)
|
163 |
+
if r != 0:
|
164 |
+
with torch.no_grad():
|
165 |
+
joints = self.smplx(
|
166 |
+
betas=betas[s*max_length:s*max_length+r],
|
167 |
+
transl=trans[s*max_length:s*max_length+r],
|
168 |
+
expression=exps[s*max_length:s*max_length+r],
|
169 |
+
jaw_pose=poses[s*max_length:s*max_length+r, 66:69],
|
170 |
+
global_orient=poses[s*max_length:s*max_length+r,:3],
|
171 |
+
body_pose=poses[s*max_length:s*max_length+r,3:21*3+3],
|
172 |
+
left_hand_pose=poses[s*max_length:s*max_length+r,25*3:40*3],
|
173 |
+
right_hand_pose=poses[s*max_length:s*max_length+r,40*3:55*3],
|
174 |
+
return_verts=True,
|
175 |
+
return_joints=True,
|
176 |
+
leye_pose=poses[s*max_length:s*max_length+r, 69:72],
|
177 |
+
reye_pose=poses[s*max_length:s*max_length+r, 72:75],
|
178 |
+
)['joints'][:, :55, :].reshape(r, 55*3)
|
179 |
+
all_tensor.append(joints)
|
180 |
+
joints = torch.cat(all_tensor, axis=0)
|
181 |
+
joints = joints.permute(1, 0)
|
182 |
+
dt = 1/30
|
183 |
+
# first steps is forward diff (t+1 - t) / dt
|
184 |
+
init_vel = (joints[:, 1:2] - joints[:, :1]) / dt
|
185 |
+
# middle steps are second order (t+1 - t-1) / 2dt
|
186 |
+
middle_vel = (joints[:, 2:] - joints[:, 0:-2]) / (2 * dt)
|
187 |
+
# last step is backward diff (t - t-1) / dt
|
188 |
+
final_vel = (joints[:, -1:] - joints[:, -2:-1]) / dt
|
189 |
+
#print(joints.shape, init_vel.shape, middle_vel.shape, final_vel.shape)
|
190 |
+
vel_seq = torch.cat([init_vel, middle_vel, final_vel], dim=1).permute(1, 0).reshape(n, 55, 3)
|
191 |
+
#print(vel_seq.shape)
|
192 |
+
#.permute(1, 0).reshape(n, 55, 3)
|
193 |
+
vel_seq_np = vel_seq.cpu().numpy()
|
194 |
+
vel_joints_np = np.linalg.norm(vel_seq_np, axis=2) # n * 55
|
195 |
+
all_list.append(vel_joints_np)
|
196 |
+
avg_vel = np.mean(np.concatenate(all_list, axis=0),axis=0) # 55
|
197 |
+
np.save(save_path, avg_vel)
|
198 |
+
|
199 |
+
|
200 |
+
def build_cache(self, preloaded_dir):
|
201 |
+
logger.info(f"Audio bit rate: {self.args.audio_fps}")
|
202 |
+
logger.info("Reading data '{}'...".format(self.data_dir))
|
203 |
+
logger.info("Creating the dataset cache...")
|
204 |
+
if self.args.new_cache:
|
205 |
+
if os.path.exists(preloaded_dir):
|
206 |
+
shutil.rmtree(preloaded_dir)
|
207 |
+
if os.path.exists(preloaded_dir):
|
208 |
+
logger.info("Found the cache {}".format(preloaded_dir))
|
209 |
+
elif self.loader_type == "test":
|
210 |
+
self.cache_generation(
|
211 |
+
preloaded_dir, True,
|
212 |
+
0, 0,
|
213 |
+
is_test=True)
|
214 |
+
else:
|
215 |
+
self.cache_generation(
|
216 |
+
preloaded_dir, self.args.disable_filtering,
|
217 |
+
self.args.clean_first_seconds, self.args.clean_final_seconds,
|
218 |
+
is_test=False)
|
219 |
+
|
220 |
+
def __len__(self):
|
221 |
+
return self.n_samples
|
222 |
+
|
223 |
+
|
224 |
+
def cache_generation(self, out_lmdb_dir, disable_filtering, clean_first_seconds, clean_final_seconds, is_test=False):
|
225 |
+
# if "wav2vec2" in self.args.audio_rep:
|
226 |
+
# self.wav2vec_model = Wav2Vec2Model.from_pretrained(f"{self.args.data_path_1}/hub/transformer/wav2vec2-base-960h")
|
227 |
+
# self.wav2vec_model.feature_extractor._freeze_parameters()
|
228 |
+
# self.wav2vec_model = self.wav2vec_model.cuda()
|
229 |
+
# self.wav2vec_model.eval()
|
230 |
+
|
231 |
+
self.n_out_samples = 0
|
232 |
+
# create db for samples
|
233 |
+
if not os.path.exists(out_lmdb_dir): os.makedirs(out_lmdb_dir)
|
234 |
+
dst_lmdb_env = lmdb.open(out_lmdb_dir, map_size= int(1024 ** 3 * 500))# 500G
|
235 |
+
n_filtered_out = defaultdict(int)
|
236 |
+
|
237 |
+
for index, file_name in self.selected_file.iterrows():
|
238 |
+
f_name = file_name["id"]
|
239 |
+
ext = ".npz" if "smplx" in self.args.pose_rep else ".bvh"
|
240 |
+
pose_file = self.data_dir + self.args.pose_rep + "/" + f_name + ext
|
241 |
+
pose_each_file = []
|
242 |
+
trans_each_file = []
|
243 |
+
trans_v_each_file = []
|
244 |
+
shape_each_file = []
|
245 |
+
audio_each_file = []
|
246 |
+
facial_each_file = []
|
247 |
+
word_each_file = []
|
248 |
+
emo_each_file = []
|
249 |
+
sem_each_file = []
|
250 |
+
vid_each_file = []
|
251 |
+
id_pose = f_name #1_wayne_0_1_1
|
252 |
+
|
253 |
+
logger.info(colored(f"# ---- Building cache for Pose {id_pose} ---- #", "blue"))
|
254 |
+
if "smplx" in self.args.pose_rep:
|
255 |
+
pose_data = np.load(pose_file, allow_pickle=True)
|
256 |
+
assert 30%self.args.pose_fps == 0, 'pose_fps should be an aliquot part of 30'
|
257 |
+
stride = int(30/self.args.pose_fps)
|
258 |
+
pose_each_file = pose_data["poses"][::stride]
|
259 |
+
trans_each_file = pose_data["trans"][::stride]
|
260 |
+
trans_each_file[:,0] = trans_each_file[:,0] - trans_each_file[0,0]
|
261 |
+
trans_each_file[:,2] = trans_each_file[:,2] - trans_each_file[0,2]
|
262 |
+
trans_v_each_file = np.zeros_like(trans_each_file)
|
263 |
+
trans_v_each_file[1:,0] = trans_each_file[1:,0] - trans_each_file[:-1,0]
|
264 |
+
trans_v_each_file[0,0] = trans_v_each_file[1,0]
|
265 |
+
trans_v_each_file[1:,2] = trans_each_file[1:,2] - trans_each_file[:-1,2]
|
266 |
+
trans_v_each_file[0,2] = trans_v_each_file[1,2]
|
267 |
+
trans_v_each_file[:,1] = trans_each_file[:,1]
|
268 |
+
shape_each_file = np.repeat(pose_data["betas"].reshape(1, 300), pose_each_file.shape[0], axis=0)
|
269 |
+
|
270 |
+
assert self.args.pose_fps == 30, "should 30"
|
271 |
+
m_data = np.load(pose_file, allow_pickle=True)
|
272 |
+
betas, poses, trans, exps = m_data["betas"], m_data["poses"], m_data["trans"], m_data["expressions"]
|
273 |
+
n, c = poses.shape[0], poses.shape[1]
|
274 |
+
betas = betas.reshape(1, 300)
|
275 |
+
betas = np.tile(betas, (n, 1))
|
276 |
+
betas = torch.from_numpy(betas).cuda().float()
|
277 |
+
poses = torch.from_numpy(poses.reshape(n, c)).cuda().float()
|
278 |
+
exps = torch.from_numpy(exps.reshape(n, 100)).cuda().float()
|
279 |
+
trans = torch.from_numpy(trans.reshape(n, 3)).cuda().float()
|
280 |
+
max_length = 128 # 为什么这里需要一��max_length
|
281 |
+
s, r = n//max_length, n%max_length
|
282 |
+
#print(n, s, r)
|
283 |
+
all_tensor = []
|
284 |
+
for i in range(s):
|
285 |
+
with torch.no_grad():
|
286 |
+
joints = self.smplx(
|
287 |
+
betas=betas[i*max_length:(i+1)*max_length],
|
288 |
+
transl=trans[i*max_length:(i+1)*max_length],
|
289 |
+
expression=exps[i*max_length:(i+1)*max_length],
|
290 |
+
jaw_pose=poses[i*max_length:(i+1)*max_length, 66:69],
|
291 |
+
global_orient=poses[i*max_length:(i+1)*max_length,:3],
|
292 |
+
body_pose=poses[i*max_length:(i+1)*max_length,3:21*3+3],
|
293 |
+
left_hand_pose=poses[i*max_length:(i+1)*max_length,25*3:40*3],
|
294 |
+
right_hand_pose=poses[i*max_length:(i+1)*max_length,40*3:55*3],
|
295 |
+
return_verts=True,
|
296 |
+
return_joints=True,
|
297 |
+
leye_pose=poses[i*max_length:(i+1)*max_length, 69:72],
|
298 |
+
reye_pose=poses[i*max_length:(i+1)*max_length, 72:75],
|
299 |
+
)['joints'][:, (7,8,10,11), :].reshape(max_length, 4, 3).cpu()
|
300 |
+
all_tensor.append(joints)
|
301 |
+
if r != 0:
|
302 |
+
with torch.no_grad():
|
303 |
+
joints = self.smplx(
|
304 |
+
betas=betas[s*max_length:s*max_length+r],
|
305 |
+
transl=trans[s*max_length:s*max_length+r],
|
306 |
+
expression=exps[s*max_length:s*max_length+r],
|
307 |
+
jaw_pose=poses[s*max_length:s*max_length+r, 66:69],
|
308 |
+
global_orient=poses[s*max_length:s*max_length+r,:3],
|
309 |
+
body_pose=poses[s*max_length:s*max_length+r,3:21*3+3],
|
310 |
+
left_hand_pose=poses[s*max_length:s*max_length+r,25*3:40*3],
|
311 |
+
right_hand_pose=poses[s*max_length:s*max_length+r,40*3:55*3],
|
312 |
+
return_verts=True,
|
313 |
+
return_joints=True,
|
314 |
+
leye_pose=poses[s*max_length:s*max_length+r, 69:72],
|
315 |
+
reye_pose=poses[s*max_length:s*max_length+r, 72:75],
|
316 |
+
)['joints'][:, (7,8,10,11), :].reshape(r, 4, 3).cpu()
|
317 |
+
all_tensor.append(joints)
|
318 |
+
joints = torch.cat(all_tensor, axis=0) # all, 4, 3
|
319 |
+
# print(joints.shape)
|
320 |
+
feetv = torch.zeros(joints.shape[1], joints.shape[0])
|
321 |
+
joints = joints.permute(1, 0, 2)
|
322 |
+
#print(joints.shape, feetv.shape)
|
323 |
+
feetv[:, :-1] = (joints[:, 1:] - joints[:, :-1]).norm(dim=-1)
|
324 |
+
#print(feetv.shape)
|
325 |
+
contacts = (feetv < 0.01).numpy().astype(float)
|
326 |
+
# print(contacts.shape, contacts)
|
327 |
+
contacts = contacts.transpose(1, 0)
|
328 |
+
pose_each_file = pose_each_file * self.joint_mask
|
329 |
+
pose_each_file = pose_each_file[:, self.joint_mask.astype(bool)]
|
330 |
+
pose_each_file = np.concatenate([pose_each_file, contacts], axis=1)
|
331 |
+
# print(pose_each_file.shape)
|
332 |
+
|
333 |
+
|
334 |
+
if self.args.facial_rep is not None:
|
335 |
+
logger.info(f"# ---- Building cache for Facial {id_pose} and Pose {id_pose} ---- #")
|
336 |
+
facial_each_file = pose_data["expressions"][::stride]
|
337 |
+
if self.args.facial_norm:
|
338 |
+
facial_each_file = (facial_each_file - self.mean_facial) / self.std_facial
|
339 |
+
|
340 |
+
else:
|
341 |
+
assert 120%self.args.pose_fps == 0, 'pose_fps should be an aliquot part of 120'
|
342 |
+
stride = int(120/self.args.pose_fps)
|
343 |
+
with open(pose_file, "r") as pose_data:
|
344 |
+
for j, line in enumerate(pose_data.readlines()):
|
345 |
+
if j < 431: continue
|
346 |
+
if j%stride != 0:continue
|
347 |
+
data = np.fromstring(line, dtype=float, sep=" ")
|
348 |
+
rot_data = rc.euler_angles_to_matrix(torch.from_numpy(np.deg2rad(data)).reshape(-1, self.joints,3), "XYZ")
|
349 |
+
rot_data = rc.matrix_to_axis_angle(rot_data).reshape(-1, self.joints*3)
|
350 |
+
rot_data = rot_data.numpy() * self.joint_mask
|
351 |
+
|
352 |
+
pose_each_file.append(rot_data)
|
353 |
+
trans_each_file.append(data[:3])
|
354 |
+
|
355 |
+
pose_each_file = np.array(pose_each_file)
|
356 |
+
# print(pose_each_file.shape)
|
357 |
+
trans_each_file = np.array(trans_each_file)
|
358 |
+
shape_each_file = np.repeat(np.array(-1).reshape(1, 1), pose_each_file.shape[0], axis=0)
|
359 |
+
if self.args.facial_rep is not None:
|
360 |
+
logger.info(f"# ---- Building cache for Facial {id_pose} and Pose {id_pose} ---- #")
|
361 |
+
facial_file = pose_file.replace(self.args.pose_rep, self.args.facial_rep).replace("bvh", "json")
|
362 |
+
assert 60%self.args.pose_fps == 0, 'pose_fps should be an aliquot part of 120'
|
363 |
+
stride = int(60/self.args.pose_fps)
|
364 |
+
if not os.path.exists(facial_file):
|
365 |
+
logger.warning(f"# ---- file not found for Facial {id_pose}, skip all files with the same id ---- #")
|
366 |
+
self.selected_file = self.selected_file.drop(self.selected_file[self.selected_file['id'] == id_pose].index)
|
367 |
+
continue
|
368 |
+
with open(facial_file, 'r') as facial_data_file:
|
369 |
+
facial_data = json.load(facial_data_file)
|
370 |
+
for j, frame_data in enumerate(facial_data['frames']):
|
371 |
+
if j%stride != 0:continue
|
372 |
+
facial_each_file.append(frame_data['weights'])
|
373 |
+
facial_each_file = np.array(facial_each_file)
|
374 |
+
if self.args.facial_norm:
|
375 |
+
facial_each_file = (facial_each_file - self.mean_facial) / self.std_facial
|
376 |
+
|
377 |
+
if self.args.id_rep is not None:
|
378 |
+
vid_each_file = np.repeat(np.array(int(f_name.split("_")[0])-1).reshape(1, 1), pose_each_file.shape[0], axis=0)
|
379 |
+
|
380 |
+
if self.args.audio_rep is not None:
|
381 |
+
logger.info(f"# ---- Building cache for Audio {id_pose} and Pose {id_pose} ---- #")
|
382 |
+
audio_file = pose_file.replace(self.args.pose_rep, 'wave16k').replace(ext, ".wav")
|
383 |
+
if not os.path.exists(audio_file):
|
384 |
+
logger.warning(f"# ---- file not found for Audio {id_pose}, skip all files with the same id ---- #")
|
385 |
+
self.selected_file = self.selected_file.drop(self.selected_file[self.selected_file['id'] == id_pose].index)
|
386 |
+
continue
|
387 |
+
audio_save_path = audio_file.replace("wave16k", "onset_amplitude").replace(".wav", ".npy")
|
388 |
+
if self.args.audio_rep == "onset+amplitude" and os.path.exists(audio_save_path):
|
389 |
+
audio_each_file = np.load(audio_save_path)
|
390 |
+
logger.warning(f"# ---- file found cache for Audio {id_pose} ---- #")
|
391 |
+
elif self.args.audio_rep == "onset+amplitude":
|
392 |
+
audio_each_file, sr = librosa.load(audio_file)
|
393 |
+
audio_each_file = librosa.resample(audio_each_file, orig_sr=sr, target_sr=self.args.audio_sr)
|
394 |
+
from numpy.lib import stride_tricks
|
395 |
+
frame_length = 1024
|
396 |
+
# hop_length = 512
|
397 |
+
shape = (audio_each_file.shape[-1] - frame_length + 1, frame_length)
|
398 |
+
strides = (audio_each_file.strides[-1], audio_each_file.strides[-1])
|
399 |
+
rolling_view = stride_tricks.as_strided(audio_each_file, shape=shape, strides=strides)
|
400 |
+
amplitude_envelope = np.max(np.abs(rolling_view), axis=1)
|
401 |
+
# pad the last frame_length-1 samples
|
402 |
+
amplitude_envelope = np.pad(amplitude_envelope, (0, frame_length-1), mode='constant', constant_values=amplitude_envelope[-1])
|
403 |
+
audio_onset_f = librosa.onset.onset_detect(y=audio_each_file, sr=self.args.audio_sr, units='frames')
|
404 |
+
onset_array = np.zeros(len(audio_each_file), dtype=float)
|
405 |
+
onset_array[audio_onset_f] = 1.0
|
406 |
+
# print(amplitude_envelope.shape, audio_each_file.shape, onset_array.shape)
|
407 |
+
audio_each_file = np.concatenate([amplitude_envelope.reshape(-1, 1), onset_array.reshape(-1, 1)], axis=1)
|
408 |
+
audio_save_path = audio_file.replace("wave16k", "onset_amplitude").replace(".wav", ".npy")
|
409 |
+
np.save(audio_save_path, audio_each_file)
|
410 |
+
|
411 |
+
elif self.args.audio_rep == "mfcc":
|
412 |
+
audio_each_file = librosa.feature.melspectrogram(y=audio_each_file, sr=self.args.audio_sr, n_mels=128, hop_length=int(self.args.audio_sr/self.args.audio_fps))
|
413 |
+
audio_each_file = audio_each_file.transpose(1, 0)
|
414 |
+
# print(audio_each_file.shape, pose_each_file.shape)
|
415 |
+
if self.args.audio_norm and self.args.audio_rep == "wave16k":
|
416 |
+
audio_each_file = (audio_each_file - self.mean_audio) / self.std_audio
|
417 |
+
|
418 |
+
time_offset = 0
|
419 |
+
if self.args.word_rep is not None:
|
420 |
+
logger.info(f"# ---- Building cache for Word {id_pose} and Pose {id_pose} ---- #")
|
421 |
+
word_file = f"{self.data_dir}{self.args.word_rep}/{id_pose}.TextGrid"
|
422 |
+
if not os.path.exists(word_file):
|
423 |
+
logger.warning(f"# ---- file not found for Word {id_pose}, skip all files with the same id ---- #")
|
424 |
+
self.selected_file = self.selected_file.drop(self.selected_file[self.selected_file['id'] == id_pose].index)
|
425 |
+
continue
|
426 |
+
word_save_path = f"{self.data_dir}{self.args.t_pre_encoder}/{id_pose}.npy"
|
427 |
+
if os.path.exists(word_save_path):
|
428 |
+
word_each_file = np.load(word_save_path)
|
429 |
+
logger.warning(f"# ---- file found cache for Word {id_pose} ---- #")
|
430 |
+
else:
|
431 |
+
tgrid = tg.TextGrid.fromFile(word_file)
|
432 |
+
if self.args.t_pre_encoder == "bert":
|
433 |
+
from transformers import AutoTokenizer, BertModel
|
434 |
+
tokenizer = AutoTokenizer.from_pretrained(self.args.data_path_1 + "hub/bert-base-uncased", local_files_only=True)
|
435 |
+
model = BertModel.from_pretrained(self.args.data_path_1 + "hub/bert-base-uncased", local_files_only=True).eval()
|
436 |
+
list_word = []
|
437 |
+
all_hidden = []
|
438 |
+
max_len = 400
|
439 |
+
last = 0
|
440 |
+
word_token_mapping = []
|
441 |
+
first = True
|
442 |
+
for i, word in enumerate(tgrid[0]):
|
443 |
+
last = i
|
444 |
+
if (i%max_len != 0) or (i==0):
|
445 |
+
if word.mark == "":
|
446 |
+
list_word.append(".")
|
447 |
+
else:
|
448 |
+
list_word.append(word.mark)
|
449 |
+
else:
|
450 |
+
max_counter = max_len
|
451 |
+
str_word = ' '.join(map(str, list_word))
|
452 |
+
if first:
|
453 |
+
global_len = 0
|
454 |
+
end = -1
|
455 |
+
offset_word = []
|
456 |
+
for k, wordvalue in enumerate(list_word):
|
457 |
+
start = end+1
|
458 |
+
end = start+len(wordvalue)
|
459 |
+
offset_word.append((start, end))
|
460 |
+
#print(offset_word)
|
461 |
+
token_scan = tokenizer.encode_plus(str_word, return_offsets_mapping=True)['offset_mapping']
|
462 |
+
#print(token_scan)
|
463 |
+
for start, end in offset_word:
|
464 |
+
sub_mapping = []
|
465 |
+
for i, (start_t, end_t) in enumerate(token_scan[1:-1]):
|
466 |
+
if int(start) <= int(start_t) and int(end_t) <= int(end):
|
467 |
+
#print(i+global_len)
|
468 |
+
sub_mapping.append(i+global_len)
|
469 |
+
word_token_mapping.append(sub_mapping)
|
470 |
+
#print(len(word_token_mapping))
|
471 |
+
global_len = word_token_mapping[-1][-1] + 1
|
472 |
+
list_word = []
|
473 |
+
if word.mark == "":
|
474 |
+
list_word.append(".")
|
475 |
+
else:
|
476 |
+
list_word.append(word.mark)
|
477 |
+
|
478 |
+
with torch.no_grad():
|
479 |
+
inputs = tokenizer(str_word, return_tensors="pt")
|
480 |
+
outputs = model(**inputs)
|
481 |
+
last_hidden_states = outputs.last_hidden_state.reshape(-1, 768).cpu().numpy()[1:-1, :]
|
482 |
+
all_hidden.append(last_hidden_states)
|
483 |
+
|
484 |
+
#list_word = list_word[:10]
|
485 |
+
if list_word == []:
|
486 |
+
pass
|
487 |
+
else:
|
488 |
+
if first:
|
489 |
+
global_len = 0
|
490 |
+
str_word = ' '.join(map(str, list_word))
|
491 |
+
end = -1
|
492 |
+
offset_word = []
|
493 |
+
for k, wordvalue in enumerate(list_word):
|
494 |
+
start = end+1
|
495 |
+
end = start+len(wordvalue)
|
496 |
+
offset_word.append((start, end))
|
497 |
+
#print(offset_word)
|
498 |
+
token_scan = tokenizer.encode_plus(str_word, return_offsets_mapping=True)['offset_mapping']
|
499 |
+
#print(token_scan)
|
500 |
+
for start, end in offset_word:
|
501 |
+
sub_mapping = []
|
502 |
+
for i, (start_t, end_t) in enumerate(token_scan[1:-1]):
|
503 |
+
if int(start) <= int(start_t) and int(end_t) <= int(end):
|
504 |
+
sub_mapping.append(i+global_len)
|
505 |
+
#print(sub_mapping)
|
506 |
+
word_token_mapping.append(sub_mapping)
|
507 |
+
#print(len(word_token_mapping))
|
508 |
+
with torch.no_grad():
|
509 |
+
inputs = tokenizer(str_word, return_tensors="pt")
|
510 |
+
outputs = model(**inputs)
|
511 |
+
last_hidden_states = outputs.last_hidden_state.reshape(-1, 768).cpu().numpy()[1:-1, :]
|
512 |
+
all_hidden.append(last_hidden_states)
|
513 |
+
last_hidden_states = np.concatenate(all_hidden, axis=0)
|
514 |
+
|
515 |
+
for i in range(pose_each_file.shape[0]):
|
516 |
+
found_flag = False
|
517 |
+
current_time = i/self.args.pose_fps + time_offset
|
518 |
+
j_last = 0
|
519 |
+
for j, word in enumerate(tgrid[0]):
|
520 |
+
word_n, word_s, word_e = word.mark, word.minTime, word.maxTime
|
521 |
+
if word_s<=current_time and current_time<=word_e:
|
522 |
+
if self.args.word_cache and self.args.t_pre_encoder == 'bert':
|
523 |
+
mapping_index = word_token_mapping[j]
|
524 |
+
#print(mapping_index, word_s, word_e)
|
525 |
+
s_t = np.linspace(word_s, word_e, len(mapping_index)+1)
|
526 |
+
#print(s_t)
|
527 |
+
for tt, t_sep in enumerate(s_t[1:]):
|
528 |
+
if current_time <= t_sep:
|
529 |
+
#if len(mapping_index) > 1: print(mapping_index[tt])
|
530 |
+
word_each_file.append(last_hidden_states[mapping_index[tt]])
|
531 |
+
break
|
532 |
+
else:
|
533 |
+
if word_n == " ":
|
534 |
+
word_each_file.append(self.lang_model.PAD_token)
|
535 |
+
else:
|
536 |
+
word_each_file.append(self.lang_model.get_word_index(word_n))
|
537 |
+
found_flag = True
|
538 |
+
j_last = j
|
539 |
+
break
|
540 |
+
else: continue
|
541 |
+
if not found_flag:
|
542 |
+
if self.args.word_cache and self.args.t_pre_encoder == 'bert':
|
543 |
+
word_each_file.append(last_hidden_states[j_last])
|
544 |
+
else:
|
545 |
+
word_each_file.append(self.lang_model.UNK_token)
|
546 |
+
word_each_file = np.array(word_each_file)
|
547 |
+
word_save_path = f"{self.data_dir}{self.args.t_pre_encoder}/{id_pose}.npy"
|
548 |
+
np.save(word_save_path, word_each_file)
|
549 |
+
#print(word_each_file.shape)
|
550 |
+
#print(word_each_file.shape)
|
551 |
+
|
552 |
+
if self.args.emo_rep is not None:
|
553 |
+
logger.info(f"# ---- Building cache for Emo {id_pose} and Pose {id_pose} ---- #")
|
554 |
+
rtype, start = int(id_pose.split('_')[3]), int(id_pose.split('_')[3])
|
555 |
+
if rtype == 0 or rtype == 2 or rtype == 4 or rtype == 6:
|
556 |
+
if start >= 1 and start <= 64:
|
557 |
+
score = 0
|
558 |
+
elif start >= 65 and start <= 72:
|
559 |
+
score = 1
|
560 |
+
elif start >= 73 and start <= 80:
|
561 |
+
score = 2
|
562 |
+
elif start >= 81 and start <= 86:
|
563 |
+
score = 3
|
564 |
+
elif start >= 87 and start <= 94:
|
565 |
+
score = 4
|
566 |
+
elif start >= 95 and start <= 102:
|
567 |
+
score = 5
|
568 |
+
elif start >= 103 and start <= 110:
|
569 |
+
score = 6
|
570 |
+
elif start >= 111 and start <= 118:
|
571 |
+
score = 7
|
572 |
+
else: pass
|
573 |
+
else:
|
574 |
+
# you may denote as unknown in the future
|
575 |
+
score = 0
|
576 |
+
emo_each_file = np.repeat(np.array(score).reshape(1, 1), pose_each_file.shape[0], axis=0)
|
577 |
+
#print(emo_each_file)
|
578 |
+
|
579 |
+
if self.args.sem_rep is not None:
|
580 |
+
logger.info(f"# ---- Building cache for Sem {id_pose} and Pose {id_pose} ---- #")
|
581 |
+
sem_file = f"{self.data_dir}{self.args.sem_rep}/{id_pose}.txt"
|
582 |
+
sem_all = pd.read_csv(sem_file,
|
583 |
+
sep='\t',
|
584 |
+
names=["name", "start_time", "end_time", "duration", "score", "keywords"])
|
585 |
+
# we adopt motion-level semantic score here.
|
586 |
+
for i in range(pose_each_file.shape[0]):
|
587 |
+
found_flag = False
|
588 |
+
for j, (start, end, score) in enumerate(zip(sem_all['start_time'],sem_all['end_time'], sem_all['score'])):
|
589 |
+
current_time = i/self.args.pose_fps + time_offset
|
590 |
+
if start<=current_time and current_time<=end:
|
591 |
+
sem_each_file.append(score)
|
592 |
+
found_flag=True
|
593 |
+
break
|
594 |
+
else: continue
|
595 |
+
if not found_flag: sem_each_file.append(0.)
|
596 |
+
sem_each_file = np.array(sem_each_file)
|
597 |
+
#print(sem_each_file)
|
598 |
+
|
599 |
+
filtered_result = self._sample_from_clip(
|
600 |
+
dst_lmdb_env,
|
601 |
+
audio_each_file, pose_each_file, trans_each_file, trans_v_each_file,shape_each_file, facial_each_file, word_each_file,
|
602 |
+
vid_each_file, emo_each_file, sem_each_file,
|
603 |
+
disable_filtering, clean_first_seconds, clean_final_seconds, is_test,
|
604 |
+
)
|
605 |
+
for type in filtered_result.keys():
|
606 |
+
n_filtered_out[type] += filtered_result[type]
|
607 |
+
|
608 |
+
with dst_lmdb_env.begin() as txn:
|
609 |
+
logger.info(colored(f"no. of samples: {txn.stat()['entries']}", "cyan"))
|
610 |
+
n_total_filtered = 0
|
611 |
+
for type, n_filtered in n_filtered_out.items():
|
612 |
+
logger.info("{}: {}".format(type, n_filtered))
|
613 |
+
n_total_filtered += n_filtered
|
614 |
+
logger.info(colored("no. of excluded samples: {} ({:.1f}%)".format(
|
615 |
+
n_total_filtered, 100 * n_total_filtered / (txn.stat()["entries"] + n_total_filtered)), "cyan"))
|
616 |
+
dst_lmdb_env.sync()
|
617 |
+
dst_lmdb_env.close()
|
618 |
+
|
619 |
+
def _sample_from_clip(
|
620 |
+
self, dst_lmdb_env, audio_each_file, pose_each_file, trans_each_file, trans_v_each_file,shape_each_file, facial_each_file, word_each_file,
|
621 |
+
vid_each_file, emo_each_file, sem_each_file,
|
622 |
+
disable_filtering, clean_first_seconds, clean_final_seconds, is_test,
|
623 |
+
):
|
624 |
+
"""
|
625 |
+
for data cleaning, we ignore the data for first and final n s
|
626 |
+
for test, we return all data
|
627 |
+
"""
|
628 |
+
# audio_start = int(self.alignment[0] * self.args.audio_fps)
|
629 |
+
# pose_start = int(self.alignment[1] * self.args.pose_fps)
|
630 |
+
#logger.info(f"before: {audio_each_file.shape} {pose_each_file.shape}")
|
631 |
+
# audio_each_file = audio_each_file[audio_start:]
|
632 |
+
# pose_each_file = pose_each_file[pose_start:]
|
633 |
+
# trans_each_file =
|
634 |
+
#logger.info(f"after alignment: {audio_each_file.shape} {pose_each_file.shape}")
|
635 |
+
#print(pose_each_file.shape)
|
636 |
+
round_seconds_skeleton = pose_each_file.shape[0] // self.args.pose_fps # assume 1500 frames / 15 fps = 100 s
|
637 |
+
#print(round_seconds_skeleton)
|
638 |
+
if audio_each_file != []:
|
639 |
+
if self.args.audio_rep != "wave16k":
|
640 |
+
round_seconds_audio = len(audio_each_file) // self.args.audio_fps # assume 16,000,00 / 16,000 = 100 s
|
641 |
+
elif self.args.audio_rep == "mfcc":
|
642 |
+
round_seconds_audio = audio_each_file.shape[0] // self.args.audio_fps
|
643 |
+
else:
|
644 |
+
round_seconds_audio = audio_each_file.shape[0] // self.args.audio_sr
|
645 |
+
if facial_each_file != []:
|
646 |
+
round_seconds_facial = facial_each_file.shape[0] // self.args.pose_fps
|
647 |
+
logger.info(f"audio: {round_seconds_audio}s, pose: {round_seconds_skeleton}s, facial: {round_seconds_facial}s")
|
648 |
+
round_seconds_skeleton = min(round_seconds_audio, round_seconds_skeleton, round_seconds_facial)
|
649 |
+
max_round = max(round_seconds_audio, round_seconds_skeleton, round_seconds_facial)
|
650 |
+
if round_seconds_skeleton != max_round:
|
651 |
+
logger.warning(f"reduce to {round_seconds_skeleton}s, ignore {max_round-round_seconds_skeleton}s")
|
652 |
+
else:
|
653 |
+
logger.info(f"pose: {round_seconds_skeleton}s, audio: {round_seconds_audio}s")
|
654 |
+
round_seconds_skeleton = min(round_seconds_audio, round_seconds_skeleton)
|
655 |
+
max_round = max(round_seconds_audio, round_seconds_skeleton)
|
656 |
+
if round_seconds_skeleton != max_round:
|
657 |
+
logger.warning(f"reduce to {round_seconds_skeleton}s, ignore {max_round-round_seconds_skeleton}s")
|
658 |
+
|
659 |
+
clip_s_t, clip_e_t = clean_first_seconds, round_seconds_skeleton - clean_final_seconds # assume [10, 90]s
|
660 |
+
clip_s_f_audio, clip_e_f_audio = self.args.audio_fps * clip_s_t, clip_e_t * self.args.audio_fps # [160,000,90*160,000]
|
661 |
+
clip_s_f_pose, clip_e_f_pose = clip_s_t * self.args.pose_fps, clip_e_t * self.args.pose_fps # [150,90*15]
|
662 |
+
|
663 |
+
|
664 |
+
for ratio in self.args.multi_length_training:
|
665 |
+
if is_test:# stride = length for test
|
666 |
+
cut_length = clip_e_f_pose - clip_s_f_pose
|
667 |
+
self.args.stride = cut_length
|
668 |
+
self.max_length = cut_length
|
669 |
+
else:
|
670 |
+
self.args.stride = int(ratio*self.ori_stride)
|
671 |
+
cut_length = int(self.ori_length*ratio)
|
672 |
+
|
673 |
+
num_subdivision = math.floor((clip_e_f_pose - clip_s_f_pose - cut_length) / self.args.stride) + 1
|
674 |
+
logger.info(f"pose from frame {clip_s_f_pose} to {clip_e_f_pose}, length {cut_length}")
|
675 |
+
logger.info(f"{num_subdivision} clips is expected with stride {self.args.stride}")
|
676 |
+
|
677 |
+
if audio_each_file != []:
|
678 |
+
audio_short_length = math.floor(cut_length / self.args.pose_fps * self.args.audio_fps)
|
679 |
+
"""
|
680 |
+
for audio sr = 16000, fps = 15, pose_length = 34,
|
681 |
+
audio short length = 36266.7 -> 36266
|
682 |
+
this error is fine.
|
683 |
+
"""
|
684 |
+
logger.info(f"audio from frame {clip_s_f_audio} to {clip_e_f_audio}, length {audio_short_length}")
|
685 |
+
|
686 |
+
n_filtered_out = defaultdict(int)
|
687 |
+
sample_pose_list = []
|
688 |
+
sample_audio_list = []
|
689 |
+
sample_facial_list = []
|
690 |
+
sample_shape_list = []
|
691 |
+
sample_word_list = []
|
692 |
+
sample_emo_list = []
|
693 |
+
sample_sem_list = []
|
694 |
+
sample_vid_list = []
|
695 |
+
sample_trans_list = []
|
696 |
+
sample_trans_v_list = []
|
697 |
+
|
698 |
+
for i in range(num_subdivision): # cut into around 2s chip, (self npose)
|
699 |
+
start_idx = clip_s_f_pose + i * self.args.stride
|
700 |
+
fin_idx = start_idx + cut_length
|
701 |
+
sample_pose = pose_each_file[start_idx:fin_idx]
|
702 |
+
|
703 |
+
sample_trans = trans_each_file[start_idx:fin_idx]
|
704 |
+
sample_trans_v = trans_v_each_file[start_idx:fin_idx]
|
705 |
+
sample_shape = shape_each_file[start_idx:fin_idx]
|
706 |
+
# print(sample_pose.shape)
|
707 |
+
if self.args.audio_rep is not None:
|
708 |
+
audio_start = clip_s_f_audio + math.floor(i * self.args.stride * self.args.audio_fps / self.args.pose_fps)
|
709 |
+
audio_end = audio_start + audio_short_length
|
710 |
+
sample_audio = audio_each_file[audio_start:audio_end]
|
711 |
+
else:
|
712 |
+
sample_audio = np.array([-1])
|
713 |
+
sample_facial = facial_each_file[start_idx:fin_idx] if self.args.facial_rep is not None else np.array([-1])
|
714 |
+
sample_word = word_each_file[start_idx:fin_idx] if self.args.word_rep is not None else np.array([-1])
|
715 |
+
sample_emo = emo_each_file[start_idx:fin_idx] if self.args.emo_rep is not None else np.array([-1])
|
716 |
+
sample_sem = sem_each_file[start_idx:fin_idx] if self.args.sem_rep is not None else np.array([-1])
|
717 |
+
sample_vid = vid_each_file[start_idx:fin_idx] if self.args.id_rep is not None else np.array([-1])
|
718 |
+
|
719 |
+
if sample_pose.any() != None:
|
720 |
+
# filtering motion skeleton data
|
721 |
+
sample_pose, filtering_message = MotionPreprocessor(sample_pose).get()
|
722 |
+
is_correct_motion = (sample_pose != [])
|
723 |
+
if is_correct_motion or disable_filtering:
|
724 |
+
sample_pose_list.append(sample_pose)
|
725 |
+
sample_audio_list.append(sample_audio)
|
726 |
+
sample_facial_list.append(sample_facial)
|
727 |
+
sample_shape_list.append(sample_shape)
|
728 |
+
sample_word_list.append(sample_word)
|
729 |
+
sample_vid_list.append(sample_vid)
|
730 |
+
sample_emo_list.append(sample_emo)
|
731 |
+
sample_sem_list.append(sample_sem)
|
732 |
+
sample_trans_list.append(sample_trans)
|
733 |
+
sample_trans_v_list.append(sample_trans_v)
|
734 |
+
else:
|
735 |
+
n_filtered_out[filtering_message] += 1
|
736 |
+
|
737 |
+
if len(sample_pose_list) > 0:
|
738 |
+
with dst_lmdb_env.begin(write=True) as txn:
|
739 |
+
for pose, audio, facial, shape, word, vid, emo, sem, trans,trans_v in zip(
|
740 |
+
sample_pose_list,
|
741 |
+
sample_audio_list,
|
742 |
+
sample_facial_list,
|
743 |
+
sample_shape_list,
|
744 |
+
sample_word_list,
|
745 |
+
sample_vid_list,
|
746 |
+
sample_emo_list,
|
747 |
+
sample_sem_list,
|
748 |
+
sample_trans_list,
|
749 |
+
sample_trans_v_list,):
|
750 |
+
k = "{:005}".format(self.n_out_samples).encode("ascii")
|
751 |
+
v = [pose, audio, facial, shape, word, emo, sem, vid, trans,trans_v]
|
752 |
+
v = pickle.dumps(v,5)
|
753 |
+
txn.put(k, v)
|
754 |
+
self.n_out_samples += 1
|
755 |
+
return n_filtered_out
|
756 |
+
|
757 |
+
def __getitem__(self, idx):
|
758 |
+
with self.lmdb_env.begin(write=False) as txn:
|
759 |
+
key = "{:005}".format(idx).encode("ascii")
|
760 |
+
sample = txn.get(key)
|
761 |
+
sample = pickle.loads(sample)
|
762 |
+
tar_pose, in_audio, in_facial, in_shape, in_word, emo, sem, vid, trans,trans_v = sample
|
763 |
+
#print(in_shape)
|
764 |
+
#vid = torch.from_numpy(vid).int()
|
765 |
+
emo = torch.from_numpy(emo).int()
|
766 |
+
sem = torch.from_numpy(sem).float()
|
767 |
+
in_audio = torch.from_numpy(in_audio).float()
|
768 |
+
in_word = torch.from_numpy(in_word).float() if self.args.word_cache else torch.from_numpy(in_word).int()
|
769 |
+
if self.loader_type == "test":
|
770 |
+
tar_pose = torch.from_numpy(tar_pose).float()
|
771 |
+
trans = torch.from_numpy(trans).float()
|
772 |
+
trans_v = torch.from_numpy(trans_v).float()
|
773 |
+
in_facial = torch.from_numpy(in_facial).float()
|
774 |
+
vid = torch.from_numpy(vid).float()
|
775 |
+
in_shape = torch.from_numpy(in_shape).float()
|
776 |
+
else:
|
777 |
+
in_shape = torch.from_numpy(in_shape).reshape((in_shape.shape[0], -1)).float()
|
778 |
+
trans = torch.from_numpy(trans).reshape((trans.shape[0], -1)).float()
|
779 |
+
trans_v = torch.from_numpy(trans_v).reshape((trans_v.shape[0], -1)).float()
|
780 |
+
vid = torch.from_numpy(vid).reshape((vid.shape[0], -1)).float()
|
781 |
+
tar_pose = torch.from_numpy(tar_pose).reshape((tar_pose.shape[0], -1)).float()
|
782 |
+
in_facial = torch.from_numpy(in_facial).reshape((in_facial.shape[0], -1)).float()
|
783 |
+
return {"pose":tar_pose, "audio":in_audio, "facial":in_facial, "beta": in_shape, "word":in_word, "id":vid, "emo":emo, "sem":sem, "trans":trans,"trans_v":trans_v}
|
784 |
+
|
785 |
+
|
786 |
+
class MotionPreprocessor:
|
787 |
+
def __init__(self, skeletons):
|
788 |
+
self.skeletons = skeletons
|
789 |
+
#self.mean_pose = mean_pose
|
790 |
+
self.filtering_message = "PASS"
|
791 |
+
|
792 |
+
def get(self):
|
793 |
+
assert (self.skeletons is not None)
|
794 |
+
|
795 |
+
# filtering
|
796 |
+
if self.skeletons != []:
|
797 |
+
if self.check_pose_diff():
|
798 |
+
self.skeletons = []
|
799 |
+
self.filtering_message = "pose"
|
800 |
+
# elif self.check_spine_angle():
|
801 |
+
# self.skeletons = []
|
802 |
+
# self.filtering_message = "spine angle"
|
803 |
+
# elif self.check_static_motion():
|
804 |
+
# self.skeletons = []
|
805 |
+
# self.filtering_message = "motion"
|
806 |
+
|
807 |
+
# if self.skeletons != []:
|
808 |
+
# self.skeletons = self.skeletons.tolist()
|
809 |
+
# for i, frame in enumerate(self.skeletons):
|
810 |
+
# assert not np.isnan(self.skeletons[i]).any() # missing joints
|
811 |
+
|
812 |
+
return self.skeletons, self.filtering_message
|
813 |
+
|
814 |
+
def check_static_motion(self, verbose=True):
|
815 |
+
def get_variance(skeleton, joint_idx):
|
816 |
+
wrist_pos = skeleton[:, joint_idx]
|
817 |
+
variance = np.sum(np.var(wrist_pos, axis=0))
|
818 |
+
return variance
|
819 |
+
|
820 |
+
left_arm_var = get_variance(self.skeletons, 6)
|
821 |
+
right_arm_var = get_variance(self.skeletons, 9)
|
822 |
+
|
823 |
+
th = 0.0014 # exclude 13110
|
824 |
+
# th = 0.002 # exclude 16905
|
825 |
+
if left_arm_var < th and right_arm_var < th:
|
826 |
+
if verbose:
|
827 |
+
print("skip - check_static_motion left var {}, right var {}".format(left_arm_var, right_arm_var))
|
828 |
+
return True
|
829 |
+
else:
|
830 |
+
if verbose:
|
831 |
+
print("pass - check_static_motion left var {}, right var {}".format(left_arm_var, right_arm_var))
|
832 |
+
return False
|
833 |
+
|
834 |
+
|
835 |
+
def check_pose_diff(self, verbose=False):
|
836 |
+
# diff = np.abs(self.skeletons - self.mean_pose) # 186*1
|
837 |
+
# diff = np.mean(diff)
|
838 |
+
|
839 |
+
# # th = 0.017
|
840 |
+
# th = 0.02 #0.02 # exclude 3594
|
841 |
+
# if diff < th:
|
842 |
+
# if verbose:
|
843 |
+
# print("skip - check_pose_diff {:.5f}".format(diff))
|
844 |
+
# return True
|
845 |
+
# # th = 3.5 #0.02 # exclude 3594
|
846 |
+
# # if 3.5 < diff < 5:
|
847 |
+
# # if verbose:
|
848 |
+
# # print("skip - check_pose_diff {:.5f}".format(diff))
|
849 |
+
# # return True
|
850 |
+
# else:
|
851 |
+
# if verbose:
|
852 |
+
# print("pass - check_pose_diff {:.5f}".format(diff))
|
853 |
+
return False
|
854 |
+
|
855 |
+
|
856 |
+
def check_spine_angle(self, verbose=True):
|
857 |
+
def angle_between(v1, v2):
|
858 |
+
v1_u = v1 / np.linalg.norm(v1)
|
859 |
+
v2_u = v2 / np.linalg.norm(v2)
|
860 |
+
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
|
861 |
+
|
862 |
+
angles = []
|
863 |
+
for i in range(self.skeletons.shape[0]):
|
864 |
+
spine_vec = self.skeletons[i, 1] - self.skeletons[i, 0]
|
865 |
+
angle = angle_between(spine_vec, [0, -1, 0])
|
866 |
+
angles.append(angle)
|
867 |
+
|
868 |
+
if np.rad2deg(max(angles)) > 30 or np.rad2deg(np.mean(angles)) > 20: # exclude 4495
|
869 |
+
# if np.rad2deg(max(angles)) > 20: # exclude 8270
|
870 |
+
if verbose:
|
871 |
+
print("skip - check_spine_angle {:.5f}, {:.5f}".format(max(angles), np.mean(angles)))
|
872 |
+
return True
|
873 |
+
else:
|
874 |
+
if verbose:
|
875 |
+
print("pass - check_spine_angle {:.5f}".format(max(angles)))
|
876 |
+
return False
|
dataloaders/beat_sep_lower_single.py
ADDED
@@ -0,0 +1,730 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import pickle
|
3 |
+
import math
|
4 |
+
import shutil
|
5 |
+
import numpy as np
|
6 |
+
import lmdb as lmdb
|
7 |
+
import textgrid as tg
|
8 |
+
import pandas as pd
|
9 |
+
import torch
|
10 |
+
import glob
|
11 |
+
import json
|
12 |
+
from termcolor import colored
|
13 |
+
from loguru import logger
|
14 |
+
from collections import defaultdict
|
15 |
+
from torch.utils.data import Dataset
|
16 |
+
import torch.distributed as dist
|
17 |
+
#import pyarrow
|
18 |
+
import pickle
|
19 |
+
import librosa
|
20 |
+
import smplx
|
21 |
+
|
22 |
+
from .build_vocab import Vocab
|
23 |
+
from .utils.audio_features import Wav2Vec2Model
|
24 |
+
from .data_tools import joints_list
|
25 |
+
from .utils import rotation_conversions as rc
|
26 |
+
from .utils import other_tools
|
27 |
+
|
28 |
+
class CustomDataset(Dataset):
|
29 |
+
def __init__(self, args, loader_type, augmentation=None, kwargs=None, build_cache=True):
|
30 |
+
|
31 |
+
self.audio_file_path = args.audio_file_path
|
32 |
+
self.textgrid_file_path = args.textgrid_file_path
|
33 |
+
self.default_pose_file = "./demo/examples/2_scott_0_1_1.npz"
|
34 |
+
|
35 |
+
self.args = args
|
36 |
+
self.loader_type = loader_type
|
37 |
+
|
38 |
+
self.rank = 0
|
39 |
+
self.ori_stride = self.args.stride
|
40 |
+
self.ori_length = self.args.pose_length
|
41 |
+
self.alignment = [0,0] # for trinity
|
42 |
+
|
43 |
+
self.ori_joint_list = joints_list[self.args.ori_joints]
|
44 |
+
self.tar_joint_list = joints_list[self.args.tar_joints]
|
45 |
+
if 'smplx' in self.args.pose_rep:
|
46 |
+
self.joint_mask = np.zeros(len(list(self.ori_joint_list.keys()))*3)
|
47 |
+
self.joints = len(list(self.tar_joint_list.keys()))
|
48 |
+
for joint_name in self.tar_joint_list:
|
49 |
+
self.joint_mask[self.ori_joint_list[joint_name][1] - self.ori_joint_list[joint_name][0]:self.ori_joint_list[joint_name][1]] = 1
|
50 |
+
else:
|
51 |
+
self.joints = len(list(self.ori_joint_list.keys()))+1
|
52 |
+
self.joint_mask = np.zeros(self.joints*3)
|
53 |
+
for joint_name in self.tar_joint_list:
|
54 |
+
if joint_name == "Hips":
|
55 |
+
self.joint_mask[3:6] = 1
|
56 |
+
else:
|
57 |
+
self.joint_mask[self.ori_joint_list[joint_name][1] - self.ori_joint_list[joint_name][0]:self.ori_joint_list[joint_name][1]] = 1
|
58 |
+
# select trainable joints
|
59 |
+
self.smplx = smplx.create(
|
60 |
+
self.args.data_path_1+"smplx_models/",
|
61 |
+
model_type='smplx',
|
62 |
+
gender='NEUTRAL_2020',
|
63 |
+
use_face_contour=False,
|
64 |
+
num_betas=300,
|
65 |
+
num_expression_coeffs=100,
|
66 |
+
ext='npz',
|
67 |
+
use_pca=False,
|
68 |
+
).cuda().eval()
|
69 |
+
|
70 |
+
split_rule = pd.read_csv(args.data_path+"train_test_split.csv")
|
71 |
+
self.selected_file = split_rule.loc[(split_rule['type'] == loader_type) & (split_rule['id'].str.split("_").str[0].astype(int).isin(self.args.training_speakers))]
|
72 |
+
if args.additional_data and loader_type == 'train':
|
73 |
+
split_b = split_rule.loc[(split_rule['type'] == 'additional') & (split_rule['id'].str.split("_").str[0].astype(int).isin(self.args.training_speakers))]
|
74 |
+
#self.selected_file = split_rule.loc[(split_rule['type'] == 'additional') & (split_rule['id'].str.split("_").str[0].astype(int).isin(self.args.training_speakers))]
|
75 |
+
self.selected_file = pd.concat([self.selected_file, split_b])
|
76 |
+
if self.selected_file.empty:
|
77 |
+
logger.warning(f"{loader_type} is empty for speaker {self.args.training_speakers}, use train set 0-8 instead")
|
78 |
+
self.selected_file = split_rule.loc[(split_rule['type'] == 'train') & (split_rule['id'].str.split("_").str[0].astype(int).isin(self.args.training_speakers))]
|
79 |
+
self.selected_file = self.selected_file.iloc[0:8]
|
80 |
+
self.data_dir = args.data_path
|
81 |
+
|
82 |
+
if loader_type == "test":
|
83 |
+
self.args.multi_length_training = [1.0]
|
84 |
+
self.max_length = int(args.pose_length * self.args.multi_length_training[-1])
|
85 |
+
self.max_audio_pre_len = math.floor(args.pose_length / args.pose_fps * self.args.audio_sr)
|
86 |
+
if self.max_audio_pre_len > self.args.test_length*self.args.audio_sr:
|
87 |
+
self.max_audio_pre_len = self.args.test_length*self.args.audio_sr
|
88 |
+
|
89 |
+
if args.word_rep is not None:
|
90 |
+
with open(f"{args.data_path}weights/vocab.pkl", 'rb') as f:
|
91 |
+
self.lang_model = pickle.load(f)
|
92 |
+
|
93 |
+
preloaded_dir = self.args.tmp_dir+'/' + loader_type + f"/{args.pose_rep}_cache"
|
94 |
+
|
95 |
+
if self.args.beat_align:
|
96 |
+
if not os.path.exists(args.data_path+f"weights/mean_vel_{args.pose_rep}.npy"):
|
97 |
+
self.calculate_mean_velocity(args.data_path+f"weights/mean_vel_{args.pose_rep}.npy")
|
98 |
+
self.avg_vel = np.load(args.data_path+f"weights/mean_vel_{args.pose_rep}.npy")
|
99 |
+
|
100 |
+
if build_cache and self.rank == 0:
|
101 |
+
self.build_cache(preloaded_dir)
|
102 |
+
self.lmdb_env = lmdb.open(preloaded_dir, readonly=True, lock=False)
|
103 |
+
with self.lmdb_env.begin() as txn:
|
104 |
+
self.n_samples = txn.stat()["entries"]
|
105 |
+
|
106 |
+
|
107 |
+
|
108 |
+
|
109 |
+
def calculate_mean_velocity(self, save_path):
|
110 |
+
self.smplx = smplx.create(
|
111 |
+
self.args.data_path_1+"smplx_models/",
|
112 |
+
model_type='smplx',
|
113 |
+
gender='NEUTRAL_2020',
|
114 |
+
use_face_contour=False,
|
115 |
+
num_betas=300,
|
116 |
+
num_expression_coeffs=100,
|
117 |
+
ext='npz',
|
118 |
+
use_pca=False,
|
119 |
+
).cuda().eval()
|
120 |
+
dir_p = self.data_dir + self.args.pose_rep + "/"
|
121 |
+
all_list = []
|
122 |
+
from tqdm import tqdm
|
123 |
+
for tar in tqdm(os.listdir(dir_p)):
|
124 |
+
if tar.endswith(".npz"):
|
125 |
+
m_data = np.load(dir_p+tar, allow_pickle=True)
|
126 |
+
betas, poses, trans, exps = m_data["betas"], m_data["poses"], m_data["trans"], m_data["expressions"]
|
127 |
+
n, c = poses.shape[0], poses.shape[1]
|
128 |
+
betas = betas.reshape(1, 300)
|
129 |
+
betas = np.tile(betas, (n, 1))
|
130 |
+
betas = torch.from_numpy(betas).cuda().float()
|
131 |
+
poses = torch.from_numpy(poses.reshape(n, c)).cuda().float()
|
132 |
+
exps = torch.from_numpy(exps.reshape(n, 100)).cuda().float()
|
133 |
+
trans = torch.from_numpy(trans.reshape(n, 3)).cuda().float()
|
134 |
+
max_length = 128
|
135 |
+
s, r = n//max_length, n%max_length
|
136 |
+
#print(n, s, r)
|
137 |
+
all_tensor = []
|
138 |
+
for i in range(s):
|
139 |
+
with torch.no_grad():
|
140 |
+
joints = self.smplx(
|
141 |
+
betas=betas[i*max_length:(i+1)*max_length],
|
142 |
+
transl=trans[i*max_length:(i+1)*max_length],
|
143 |
+
expression=exps[i*max_length:(i+1)*max_length],
|
144 |
+
jaw_pose=poses[i*max_length:(i+1)*max_length, 66:69],
|
145 |
+
global_orient=poses[i*max_length:(i+1)*max_length,:3],
|
146 |
+
body_pose=poses[i*max_length:(i+1)*max_length,3:21*3+3],
|
147 |
+
left_hand_pose=poses[i*max_length:(i+1)*max_length,25*3:40*3],
|
148 |
+
right_hand_pose=poses[i*max_length:(i+1)*max_length,40*3:55*3],
|
149 |
+
return_verts=True,
|
150 |
+
return_joints=True,
|
151 |
+
leye_pose=poses[i*max_length:(i+1)*max_length, 69:72],
|
152 |
+
reye_pose=poses[i*max_length:(i+1)*max_length, 72:75],
|
153 |
+
)['joints'][:, :55, :].reshape(max_length, 55*3)
|
154 |
+
all_tensor.append(joints)
|
155 |
+
if r != 0:
|
156 |
+
with torch.no_grad():
|
157 |
+
joints = self.smplx(
|
158 |
+
betas=betas[s*max_length:s*max_length+r],
|
159 |
+
transl=trans[s*max_length:s*max_length+r],
|
160 |
+
expression=exps[s*max_length:s*max_length+r],
|
161 |
+
jaw_pose=poses[s*max_length:s*max_length+r, 66:69],
|
162 |
+
global_orient=poses[s*max_length:s*max_length+r,:3],
|
163 |
+
body_pose=poses[s*max_length:s*max_length+r,3:21*3+3],
|
164 |
+
left_hand_pose=poses[s*max_length:s*max_length+r,25*3:40*3],
|
165 |
+
right_hand_pose=poses[s*max_length:s*max_length+r,40*3:55*3],
|
166 |
+
return_verts=True,
|
167 |
+
return_joints=True,
|
168 |
+
leye_pose=poses[s*max_length:s*max_length+r, 69:72],
|
169 |
+
reye_pose=poses[s*max_length:s*max_length+r, 72:75],
|
170 |
+
)['joints'][:, :55, :].reshape(r, 55*3)
|
171 |
+
all_tensor.append(joints)
|
172 |
+
joints = torch.cat(all_tensor, axis=0)
|
173 |
+
joints = joints.permute(1, 0)
|
174 |
+
dt = 1/30
|
175 |
+
# first steps is forward diff (t+1 - t) / dt
|
176 |
+
init_vel = (joints[:, 1:2] - joints[:, :1]) / dt
|
177 |
+
# middle steps are second order (t+1 - t-1) / 2dt
|
178 |
+
middle_vel = (joints[:, 2:] - joints[:, 0:-2]) / (2 * dt)
|
179 |
+
# last step is backward diff (t - t-1) / dt
|
180 |
+
final_vel = (joints[:, -1:] - joints[:, -2:-1]) / dt
|
181 |
+
#print(joints.shape, init_vel.shape, middle_vel.shape, final_vel.shape)
|
182 |
+
vel_seq = torch.cat([init_vel, middle_vel, final_vel], dim=1).permute(1, 0).reshape(n, 55, 3)
|
183 |
+
#print(vel_seq.shape)
|
184 |
+
#.permute(1, 0).reshape(n, 55, 3)
|
185 |
+
vel_seq_np = vel_seq.cpu().numpy()
|
186 |
+
vel_joints_np = np.linalg.norm(vel_seq_np, axis=2) # n * 55
|
187 |
+
all_list.append(vel_joints_np)
|
188 |
+
avg_vel = np.mean(np.concatenate(all_list, axis=0),axis=0) # 55
|
189 |
+
np.save(save_path, avg_vel)
|
190 |
+
|
191 |
+
|
192 |
+
def build_cache(self, preloaded_dir):
|
193 |
+
logger.info(f"Audio bit rate: {self.args.audio_fps}")
|
194 |
+
logger.info("Reading data '{}'...".format(self.data_dir))
|
195 |
+
logger.info("Creating the dataset cache...")
|
196 |
+
if self.args.new_cache:
|
197 |
+
if os.path.exists(preloaded_dir):
|
198 |
+
shutil.rmtree(preloaded_dir)
|
199 |
+
if os.path.exists(preloaded_dir):
|
200 |
+
logger.info("Found the cache {}".format(preloaded_dir))
|
201 |
+
elif self.loader_type == "test":
|
202 |
+
self.cache_generation(
|
203 |
+
preloaded_dir, True,
|
204 |
+
0, 0,
|
205 |
+
is_test=True)
|
206 |
+
else:
|
207 |
+
self.cache_generation(
|
208 |
+
preloaded_dir, self.args.disable_filtering,
|
209 |
+
self.args.clean_first_seconds, self.args.clean_final_seconds,
|
210 |
+
is_test=False)
|
211 |
+
|
212 |
+
def __len__(self):
|
213 |
+
return self.n_samples
|
214 |
+
|
215 |
+
|
216 |
+
def cache_generation(self, out_lmdb_dir, disable_filtering, clean_first_seconds, clean_final_seconds, is_test=False):
|
217 |
+
# if "wav2vec2" in self.args.audio_rep:
|
218 |
+
# self.wav2vec_model = Wav2Vec2Model.from_pretrained(f"{self.args.data_path_1}/hub/transformer/wav2vec2-base-960h")
|
219 |
+
# self.wav2vec_model.feature_extractor._freeze_parameters()
|
220 |
+
# self.wav2vec_model = self.wav2vec_model.cuda()
|
221 |
+
# self.wav2vec_model.eval()
|
222 |
+
|
223 |
+
self.n_out_samples = 0
|
224 |
+
# create db for samples
|
225 |
+
if not os.path.exists(out_lmdb_dir): os.makedirs(out_lmdb_dir)
|
226 |
+
dst_lmdb_env = lmdb.open(out_lmdb_dir, map_size= int(1024 ** 3 * 500))# 500G
|
227 |
+
n_filtered_out = defaultdict(int)
|
228 |
+
|
229 |
+
|
230 |
+
#f_name = file_name["id"]
|
231 |
+
ext = ".npz" if "smplx" in self.args.pose_rep else ".bvh"
|
232 |
+
pose_file = self.default_pose_file
|
233 |
+
pose_each_file = []
|
234 |
+
trans_each_file = []
|
235 |
+
trans_v_each_file = []
|
236 |
+
shape_each_file = []
|
237 |
+
audio_each_file = []
|
238 |
+
facial_each_file = []
|
239 |
+
word_each_file = []
|
240 |
+
emo_each_file = []
|
241 |
+
sem_each_file = []
|
242 |
+
vid_each_file = []
|
243 |
+
id_pose = "tmp" #1_wayne_0_1_1
|
244 |
+
|
245 |
+
logger.info(colored(f"# ---- Building cache for Pose {id_pose} ---- #", "blue"))
|
246 |
+
if "smplx" in self.args.pose_rep:
|
247 |
+
pose_data = np.load(pose_file, allow_pickle=True)
|
248 |
+
assert 30%self.args.pose_fps == 0, 'pose_fps should be an aliquot part of 30'
|
249 |
+
stride = int(30/self.args.pose_fps)
|
250 |
+
pose_each_file = pose_data["poses"][::stride]
|
251 |
+
trans_each_file = pose_data["trans"][::stride]
|
252 |
+
trans_each_file[:,0] = trans_each_file[:,0] - trans_each_file[0,0]
|
253 |
+
trans_each_file[:,2] = trans_each_file[:,2] - trans_each_file[0,2]
|
254 |
+
trans_v_each_file = np.zeros_like(trans_each_file)
|
255 |
+
trans_v_each_file[1:,0] = trans_each_file[1:,0] - trans_each_file[:-1,0]
|
256 |
+
trans_v_each_file[0,0] = trans_v_each_file[1,0]
|
257 |
+
trans_v_each_file[1:,2] = trans_each_file[1:,2] - trans_each_file[:-1,2]
|
258 |
+
trans_v_each_file[0,2] = trans_v_each_file[1,2]
|
259 |
+
trans_v_each_file[:,1] = trans_each_file[:,1]
|
260 |
+
shape_each_file = np.repeat(pose_data["betas"].reshape(1, 300), pose_each_file.shape[0], axis=0)
|
261 |
+
|
262 |
+
assert self.args.pose_fps == 30, "should 30"
|
263 |
+
m_data = np.load(pose_file, allow_pickle=True)
|
264 |
+
betas, poses, trans, exps = m_data["betas"], m_data["poses"], m_data["trans"], m_data["expressions"]
|
265 |
+
n, c = poses.shape[0], poses.shape[1]
|
266 |
+
betas = betas.reshape(1, 300)
|
267 |
+
betas = np.tile(betas, (n, 1))
|
268 |
+
betas = torch.from_numpy(betas).cuda().float()
|
269 |
+
poses = torch.from_numpy(poses.reshape(n, c)).cuda().float()
|
270 |
+
exps = torch.from_numpy(exps.reshape(n, 100)).cuda().float()
|
271 |
+
trans = torch.from_numpy(trans.reshape(n, 3)).cuda().float()
|
272 |
+
max_length = 128 # 为什么这里需要一个max_length
|
273 |
+
s, r = n//max_length, n%max_length
|
274 |
+
#print(n, s, r)
|
275 |
+
all_tensor = []
|
276 |
+
for i in range(s):
|
277 |
+
with torch.no_grad():
|
278 |
+
joints = self.smplx(
|
279 |
+
betas=betas[i*max_length:(i+1)*max_length],
|
280 |
+
transl=trans[i*max_length:(i+1)*max_length],
|
281 |
+
expression=exps[i*max_length:(i+1)*max_length],
|
282 |
+
jaw_pose=poses[i*max_length:(i+1)*max_length, 66:69],
|
283 |
+
global_orient=poses[i*max_length:(i+1)*max_length,:3],
|
284 |
+
body_pose=poses[i*max_length:(i+1)*max_length,3:21*3+3],
|
285 |
+
left_hand_pose=poses[i*max_length:(i+1)*max_length,25*3:40*3],
|
286 |
+
right_hand_pose=poses[i*max_length:(i+1)*max_length,40*3:55*3],
|
287 |
+
return_verts=True,
|
288 |
+
return_joints=True,
|
289 |
+
leye_pose=poses[i*max_length:(i+1)*max_length, 69:72],
|
290 |
+
reye_pose=poses[i*max_length:(i+1)*max_length, 72:75],
|
291 |
+
)['joints'][:, (7,8,10,11), :].reshape(max_length, 4, 3).cpu()
|
292 |
+
all_tensor.append(joints)
|
293 |
+
if r != 0:
|
294 |
+
with torch.no_grad():
|
295 |
+
joints = self.smplx(
|
296 |
+
betas=betas[s*max_length:s*max_length+r],
|
297 |
+
transl=trans[s*max_length:s*max_length+r],
|
298 |
+
expression=exps[s*max_length:s*max_length+r],
|
299 |
+
jaw_pose=poses[s*max_length:s*max_length+r, 66:69],
|
300 |
+
global_orient=poses[s*max_length:s*max_length+r,:3],
|
301 |
+
body_pose=poses[s*max_length:s*max_length+r,3:21*3+3],
|
302 |
+
left_hand_pose=poses[s*max_length:s*max_length+r,25*3:40*3],
|
303 |
+
right_hand_pose=poses[s*max_length:s*max_length+r,40*3:55*3],
|
304 |
+
return_verts=True,
|
305 |
+
return_joints=True,
|
306 |
+
leye_pose=poses[s*max_length:s*max_length+r, 69:72],
|
307 |
+
reye_pose=poses[s*max_length:s*max_length+r, 72:75],
|
308 |
+
)['joints'][:, (7,8,10,11), :].reshape(r, 4, 3).cpu()
|
309 |
+
all_tensor.append(joints)
|
310 |
+
joints = torch.cat(all_tensor, axis=0) # all, 4, 3
|
311 |
+
# print(joints.shape)
|
312 |
+
feetv = torch.zeros(joints.shape[1], joints.shape[0])
|
313 |
+
joints = joints.permute(1, 0, 2)
|
314 |
+
#print(joints.shape, feetv.shape)
|
315 |
+
feetv[:, :-1] = (joints[:, 1:] - joints[:, :-1]).norm(dim=-1)
|
316 |
+
#print(feetv.shape)
|
317 |
+
contacts = (feetv < 0.01).numpy().astype(float)
|
318 |
+
# print(contacts.shape, contacts)
|
319 |
+
contacts = contacts.transpose(1, 0)
|
320 |
+
pose_each_file = pose_each_file * self.joint_mask
|
321 |
+
pose_each_file = pose_each_file[:, self.joint_mask.astype(bool)]
|
322 |
+
pose_each_file = np.concatenate([pose_each_file, contacts], axis=1)
|
323 |
+
# print(pose_each_file.shape)
|
324 |
+
|
325 |
+
|
326 |
+
if self.args.facial_rep is not None:
|
327 |
+
logger.info(f"# ---- Building cache for Facial {id_pose} and Pose {id_pose} ---- #")
|
328 |
+
facial_each_file = pose_data["expressions"][::stride]
|
329 |
+
if self.args.facial_norm:
|
330 |
+
facial_each_file = (facial_each_file - self.mean_facial) / self.std_facial
|
331 |
+
|
332 |
+
if self.args.id_rep is not None:
|
333 |
+
vid_each_file = np.repeat(np.array(int(999)-1).reshape(1, 1), pose_each_file.shape[0], axis=0)
|
334 |
+
|
335 |
+
if self.args.audio_rep is not None:
|
336 |
+
logger.info(f"# ---- Building cache for Audio {id_pose} and Pose {id_pose} ---- #")
|
337 |
+
audio_file = self.audio_file_path
|
338 |
+
if not os.path.exists(audio_file):
|
339 |
+
logger.warning(f"# ---- file not found for Audio {id_pose}, skip all files with the same id ---- #")
|
340 |
+
self.selected_file = self.selected_file.drop(self.selected_file[self.selected_file['id'] == id_pose].index)
|
341 |
+
|
342 |
+
audio_save_path = audio_file.replace("wave16k", "onset_amplitude").replace(".wav", ".npy")
|
343 |
+
|
344 |
+
if self.args.audio_rep == "onset+amplitude":
|
345 |
+
audio_each_file, sr = librosa.load(audio_file)
|
346 |
+
audio_each_file = librosa.resample(audio_each_file, orig_sr=sr, target_sr=self.args.audio_sr)
|
347 |
+
from numpy.lib import stride_tricks
|
348 |
+
frame_length = 1024
|
349 |
+
# hop_length = 512
|
350 |
+
shape = (audio_each_file.shape[-1] - frame_length + 1, frame_length)
|
351 |
+
strides = (audio_each_file.strides[-1], audio_each_file.strides[-1])
|
352 |
+
rolling_view = stride_tricks.as_strided(audio_each_file, shape=shape, strides=strides)
|
353 |
+
amplitude_envelope = np.max(np.abs(rolling_view), axis=1)
|
354 |
+
# pad the last frame_length-1 samples
|
355 |
+
amplitude_envelope = np.pad(amplitude_envelope, (0, frame_length-1), mode='constant', constant_values=amplitude_envelope[-1])
|
356 |
+
audio_onset_f = librosa.onset.onset_detect(y=audio_each_file, sr=self.args.audio_sr, units='frames')
|
357 |
+
onset_array = np.zeros(len(audio_each_file), dtype=float)
|
358 |
+
onset_array[audio_onset_f] = 1.0
|
359 |
+
# print(amplitude_envelope.shape, audio_each_file.shape, onset_array.shape)
|
360 |
+
audio_each_file = np.concatenate([amplitude_envelope.reshape(-1, 1), onset_array.reshape(-1, 1)], axis=1)
|
361 |
+
|
362 |
+
|
363 |
+
elif self.args.audio_rep == "mfcc":
|
364 |
+
audio_each_file = librosa.feature.melspectrogram(y=audio_each_file, sr=self.args.audio_sr, n_mels=128, hop_length=int(self.args.audio_sr/self.args.audio_fps))
|
365 |
+
audio_each_file = audio_each_file.transpose(1, 0)
|
366 |
+
# print(audio_each_file.shape, pose_each_file.shape)
|
367 |
+
if self.args.audio_norm and self.args.audio_rep == "wave16k":
|
368 |
+
audio_each_file = (audio_each_file - self.mean_audio) / self.std_audio
|
369 |
+
|
370 |
+
time_offset = 0
|
371 |
+
if self.args.word_rep is not None:
|
372 |
+
logger.info(f"# ---- Building cache for Word {id_pose} and Pose {id_pose} ---- #")
|
373 |
+
word_file = self.textgrid_file_path
|
374 |
+
if not os.path.exists(word_file):
|
375 |
+
logger.warning(f"# ---- file not found for Word {id_pose}, skip all files with the same id ---- #")
|
376 |
+
self.selected_file = self.selected_file.drop(self.selected_file[self.selected_file['id'] == id_pose].index)
|
377 |
+
word_save_path = f"{self.data_dir}{self.args.t_pre_encoder}/{id_pose}.npy"
|
378 |
+
|
379 |
+
tgrid = tg.TextGrid.fromFile(word_file)
|
380 |
+
|
381 |
+
for i in range(pose_each_file.shape[0]):
|
382 |
+
found_flag = False
|
383 |
+
current_time = i/self.args.pose_fps + time_offset
|
384 |
+
j_last = 0
|
385 |
+
for j, word in enumerate(tgrid[0]):
|
386 |
+
word_n, word_s, word_e = word.mark, word.minTime, word.maxTime
|
387 |
+
if word_s<=current_time and current_time<=word_e:
|
388 |
+
if word_n == " ":
|
389 |
+
word_each_file.append(self.lang_model.PAD_token)
|
390 |
+
else:
|
391 |
+
word_each_file.append(self.lang_model.get_word_index(word_n))
|
392 |
+
found_flag = True
|
393 |
+
j_last = j
|
394 |
+
break
|
395 |
+
else: continue
|
396 |
+
if not found_flag:
|
397 |
+
word_each_file.append(self.lang_model.UNK_token)
|
398 |
+
word_each_file = np.array(word_each_file)
|
399 |
+
|
400 |
+
|
401 |
+
|
402 |
+
if self.args.emo_rep is not None:
|
403 |
+
logger.info(f"# ---- Building cache for Emo {id_pose} and Pose {id_pose} ---- #")
|
404 |
+
rtype, start = int(id_pose.split('_')[3]), int(id_pose.split('_')[3])
|
405 |
+
if rtype == 0 or rtype == 2 or rtype == 4 or rtype == 6:
|
406 |
+
if start >= 1 and start <= 64:
|
407 |
+
score = 0
|
408 |
+
elif start >= 65 and start <= 72:
|
409 |
+
score = 1
|
410 |
+
elif start >= 73 and start <= 80:
|
411 |
+
score = 2
|
412 |
+
elif start >= 81 and start <= 86:
|
413 |
+
score = 3
|
414 |
+
elif start >= 87 and start <= 94:
|
415 |
+
score = 4
|
416 |
+
elif start >= 95 and start <= 102:
|
417 |
+
score = 5
|
418 |
+
elif start >= 103 and start <= 110:
|
419 |
+
score = 6
|
420 |
+
elif start >= 111 and start <= 118:
|
421 |
+
score = 7
|
422 |
+
else: pass
|
423 |
+
else:
|
424 |
+
# you may denote as unknown in the future
|
425 |
+
score = 0
|
426 |
+
emo_each_file = np.repeat(np.array(score).reshape(1, 1), pose_each_file.shape[0], axis=0)
|
427 |
+
#print(emo_each_file)
|
428 |
+
|
429 |
+
if self.args.sem_rep is not None:
|
430 |
+
logger.info(f"# ---- Building cache for Sem {id_pose} and Pose {id_pose} ---- #")
|
431 |
+
sem_file = f"{self.data_dir}{self.args.sem_rep}/{id_pose}.txt"
|
432 |
+
sem_all = pd.read_csv(sem_file,
|
433 |
+
sep='\t',
|
434 |
+
names=["name", "start_time", "end_time", "duration", "score", "keywords"])
|
435 |
+
# we adopt motion-level semantic score here.
|
436 |
+
for i in range(pose_each_file.shape[0]):
|
437 |
+
found_flag = False
|
438 |
+
for j, (start, end, score) in enumerate(zip(sem_all['start_time'],sem_all['end_time'], sem_all['score'])):
|
439 |
+
current_time = i/self.args.pose_fps + time_offset
|
440 |
+
if start<=current_time and current_time<=end:
|
441 |
+
sem_each_file.append(score)
|
442 |
+
found_flag=True
|
443 |
+
break
|
444 |
+
else: continue
|
445 |
+
if not found_flag: sem_each_file.append(0.)
|
446 |
+
sem_each_file = np.array(sem_each_file)
|
447 |
+
#print(sem_each_file)
|
448 |
+
|
449 |
+
filtered_result = self._sample_from_clip(
|
450 |
+
dst_lmdb_env,
|
451 |
+
audio_each_file, pose_each_file, trans_each_file, trans_v_each_file,shape_each_file, facial_each_file, word_each_file,
|
452 |
+
vid_each_file, emo_each_file, sem_each_file,
|
453 |
+
disable_filtering, clean_first_seconds, clean_final_seconds, is_test,
|
454 |
+
)
|
455 |
+
for type in filtered_result.keys():
|
456 |
+
n_filtered_out[type] += filtered_result[type]
|
457 |
+
|
458 |
+
|
459 |
+
|
460 |
+
|
461 |
+
#### ---------for_end------------ ####
|
462 |
+
with dst_lmdb_env.begin() as txn:
|
463 |
+
logger.info(colored(f"no. of samples: {txn.stat()['entries']}", "cyan"))
|
464 |
+
n_total_filtered = 0
|
465 |
+
for type, n_filtered in n_filtered_out.items():
|
466 |
+
logger.info("{}: {}".format(type, n_filtered))
|
467 |
+
n_total_filtered += n_filtered
|
468 |
+
logger.info(colored("no. of excluded samples: {} ({:.1f}%)".format(
|
469 |
+
n_total_filtered, 100 * n_total_filtered / (txn.stat()["entries"] + n_total_filtered)), "cyan"))
|
470 |
+
dst_lmdb_env.sync()
|
471 |
+
dst_lmdb_env.close()
|
472 |
+
|
473 |
+
def _sample_from_clip(
|
474 |
+
self, dst_lmdb_env, audio_each_file, pose_each_file, trans_each_file, trans_v_each_file,shape_each_file, facial_each_file, word_each_file,
|
475 |
+
vid_each_file, emo_each_file, sem_each_file,
|
476 |
+
disable_filtering, clean_first_seconds, clean_final_seconds, is_test,
|
477 |
+
):
|
478 |
+
"""
|
479 |
+
for data cleaning, we ignore the data for first and final n s
|
480 |
+
for test, we return all data
|
481 |
+
"""
|
482 |
+
# audio_start = int(self.alignment[0] * self.args.audio_fps)
|
483 |
+
# pose_start = int(self.alignment[1] * self.args.pose_fps)
|
484 |
+
#logger.info(f"before: {audio_each_file.shape} {pose_each_file.shape}")
|
485 |
+
# audio_each_file = audio_each_file[audio_start:]
|
486 |
+
# pose_each_file = pose_each_file[pose_start:]
|
487 |
+
# trans_each_file =
|
488 |
+
#logger.info(f"after alignment: {audio_each_file.shape} {pose_each_file.shape}")
|
489 |
+
#print(pose_each_file.shape)
|
490 |
+
round_seconds_skeleton = pose_each_file.shape[0] // self.args.pose_fps # assume 1500 frames / 15 fps = 100 s
|
491 |
+
#print(round_seconds_skeleton)
|
492 |
+
if audio_each_file is not None:
|
493 |
+
if self.args.audio_rep != "wave16k":
|
494 |
+
round_seconds_audio = len(audio_each_file) // self.args.audio_fps # assume 16,000,00 / 16,000 = 100 s
|
495 |
+
elif self.args.audio_rep == "mfcc":
|
496 |
+
round_seconds_audio = audio_each_file.shape[0] // self.args.audio_fps
|
497 |
+
else:
|
498 |
+
round_seconds_audio = audio_each_file.shape[0] // self.args.audio_sr
|
499 |
+
if facial_each_file is not None:
|
500 |
+
round_seconds_facial = facial_each_file.shape[0] // self.args.pose_fps
|
501 |
+
logger.info(f"audio: {round_seconds_audio}s, pose: {round_seconds_skeleton}s, facial: {round_seconds_facial}s")
|
502 |
+
round_seconds_skeleton = min(round_seconds_audio, round_seconds_skeleton, round_seconds_facial)
|
503 |
+
max_round = max(round_seconds_audio, round_seconds_skeleton, round_seconds_facial)
|
504 |
+
if round_seconds_skeleton != max_round:
|
505 |
+
logger.warning(f"reduce to {round_seconds_skeleton}s, ignore {max_round-round_seconds_skeleton}s")
|
506 |
+
else:
|
507 |
+
logger.info(f"pose: {round_seconds_skeleton}s, audio: {round_seconds_audio}s")
|
508 |
+
round_seconds_skeleton = min(round_seconds_audio, round_seconds_skeleton)
|
509 |
+
max_round = max(round_seconds_audio, round_seconds_skeleton)
|
510 |
+
if round_seconds_skeleton != max_round:
|
511 |
+
logger.warning(f"reduce to {round_seconds_skeleton}s, ignore {max_round-round_seconds_skeleton}s")
|
512 |
+
|
513 |
+
clip_s_t, clip_e_t = clean_first_seconds, round_seconds_skeleton - clean_final_seconds # assume [10, 90]s
|
514 |
+
clip_s_f_audio, clip_e_f_audio = self.args.audio_fps * clip_s_t, clip_e_t * self.args.audio_fps # [160,000,90*160,000]
|
515 |
+
clip_s_f_pose, clip_e_f_pose = clip_s_t * self.args.pose_fps, clip_e_t * self.args.pose_fps # [150,90*15]
|
516 |
+
|
517 |
+
|
518 |
+
for ratio in self.args.multi_length_training:
|
519 |
+
if is_test:# stride = length for test
|
520 |
+
cut_length = clip_e_f_pose - clip_s_f_pose
|
521 |
+
self.args.stride = cut_length
|
522 |
+
self.max_length = cut_length
|
523 |
+
else:
|
524 |
+
self.args.stride = int(ratio*self.ori_stride)
|
525 |
+
cut_length = int(self.ori_length*ratio)
|
526 |
+
|
527 |
+
num_subdivision = math.floor((clip_e_f_pose - clip_s_f_pose - cut_length) / self.args.stride) + 1
|
528 |
+
logger.info(f"pose from frame {clip_s_f_pose} to {clip_e_f_pose}, length {cut_length}")
|
529 |
+
logger.info(f"{num_subdivision} clips is expected with stride {self.args.stride}")
|
530 |
+
|
531 |
+
if audio_each_file is not None:
|
532 |
+
audio_short_length = math.floor(cut_length / self.args.pose_fps * self.args.audio_fps)
|
533 |
+
"""
|
534 |
+
for audio sr = 16000, fps = 15, pose_length = 34,
|
535 |
+
audio short length = 36266.7 -> 36266
|
536 |
+
this error is fine.
|
537 |
+
"""
|
538 |
+
logger.info(f"audio from frame {clip_s_f_audio} to {clip_e_f_audio}, length {audio_short_length}")
|
539 |
+
|
540 |
+
n_filtered_out = defaultdict(int)
|
541 |
+
sample_pose_list = []
|
542 |
+
sample_audio_list = []
|
543 |
+
sample_facial_list = []
|
544 |
+
sample_shape_list = []
|
545 |
+
sample_word_list = []
|
546 |
+
sample_emo_list = []
|
547 |
+
sample_sem_list = []
|
548 |
+
sample_vid_list = []
|
549 |
+
sample_trans_list = []
|
550 |
+
sample_trans_v_list = []
|
551 |
+
|
552 |
+
for i in range(num_subdivision): # cut into around 2s chip, (self npose)
|
553 |
+
start_idx = clip_s_f_pose + i * self.args.stride
|
554 |
+
fin_idx = start_idx + cut_length
|
555 |
+
sample_pose = pose_each_file[start_idx:fin_idx]
|
556 |
+
|
557 |
+
sample_trans = trans_each_file[start_idx:fin_idx]
|
558 |
+
sample_trans_v = trans_v_each_file[start_idx:fin_idx]
|
559 |
+
sample_shape = shape_each_file[start_idx:fin_idx]
|
560 |
+
# print(sample_pose.shape)
|
561 |
+
if self.args.audio_rep is not None:
|
562 |
+
audio_start = clip_s_f_audio + math.floor(i * self.args.stride * self.args.audio_fps / self.args.pose_fps)
|
563 |
+
audio_end = audio_start + audio_short_length
|
564 |
+
sample_audio = audio_each_file[audio_start:audio_end]
|
565 |
+
else:
|
566 |
+
sample_audio = np.array([-1])
|
567 |
+
sample_facial = facial_each_file[start_idx:fin_idx] if self.args.facial_rep is not None else np.array([-1])
|
568 |
+
sample_word = word_each_file[start_idx:fin_idx] if self.args.word_rep is not None else np.array([-1])
|
569 |
+
sample_emo = emo_each_file[start_idx:fin_idx] if self.args.emo_rep is not None else np.array([-1])
|
570 |
+
sample_sem = sem_each_file[start_idx:fin_idx] if self.args.sem_rep is not None else np.array([-1])
|
571 |
+
sample_vid = vid_each_file[start_idx:fin_idx] if self.args.id_rep is not None else np.array([-1])
|
572 |
+
|
573 |
+
if sample_pose.any() != None:
|
574 |
+
# filtering motion skeleton data
|
575 |
+
sample_pose, filtering_message = MotionPreprocessor(sample_pose).get()
|
576 |
+
is_correct_motion = (sample_pose is not None)
|
577 |
+
if is_correct_motion or disable_filtering:
|
578 |
+
sample_pose_list.append(sample_pose)
|
579 |
+
sample_audio_list.append(sample_audio)
|
580 |
+
sample_facial_list.append(sample_facial)
|
581 |
+
sample_shape_list.append(sample_shape)
|
582 |
+
sample_word_list.append(sample_word)
|
583 |
+
sample_vid_list.append(sample_vid)
|
584 |
+
sample_emo_list.append(sample_emo)
|
585 |
+
sample_sem_list.append(sample_sem)
|
586 |
+
sample_trans_list.append(sample_trans)
|
587 |
+
sample_trans_v_list.append(sample_trans_v)
|
588 |
+
else:
|
589 |
+
n_filtered_out[filtering_message] += 1
|
590 |
+
|
591 |
+
if len(sample_pose_list) > 0:
|
592 |
+
with dst_lmdb_env.begin(write=True) as txn:
|
593 |
+
for pose, audio, facial, shape, word, vid, emo, sem, trans,trans_v in zip(
|
594 |
+
sample_pose_list,
|
595 |
+
sample_audio_list,
|
596 |
+
sample_facial_list,
|
597 |
+
sample_shape_list,
|
598 |
+
sample_word_list,
|
599 |
+
sample_vid_list,
|
600 |
+
sample_emo_list,
|
601 |
+
sample_sem_list,
|
602 |
+
sample_trans_list,
|
603 |
+
sample_trans_v_list,):
|
604 |
+
k = "{:005}".format(self.n_out_samples).encode("ascii")
|
605 |
+
v = [pose, audio, facial, shape, word, emo, sem, vid, trans,trans_v]
|
606 |
+
v = pickle.dumps(v,5)
|
607 |
+
txn.put(k, v)
|
608 |
+
self.n_out_samples += 1
|
609 |
+
return n_filtered_out
|
610 |
+
|
611 |
+
def __getitem__(self, idx):
|
612 |
+
with self.lmdb_env.begin(write=False) as txn:
|
613 |
+
key = "{:005}".format(idx).encode("ascii")
|
614 |
+
sample = txn.get(key)
|
615 |
+
sample = pickle.loads(sample)
|
616 |
+
tar_pose, in_audio, in_facial, in_shape, in_word, emo, sem, vid, trans,trans_v = sample
|
617 |
+
#print(in_shape)
|
618 |
+
#vid = torch.from_numpy(vid).int()
|
619 |
+
emo = torch.from_numpy(emo).int()
|
620 |
+
sem = torch.from_numpy(sem).float()
|
621 |
+
in_audio = torch.from_numpy(in_audio).float()
|
622 |
+
in_word = torch.from_numpy(in_word).float() if self.args.word_cache else torch.from_numpy(in_word).int()
|
623 |
+
if self.loader_type == "test":
|
624 |
+
tar_pose = torch.from_numpy(tar_pose).float()
|
625 |
+
trans = torch.from_numpy(trans).float()
|
626 |
+
trans_v = torch.from_numpy(trans_v).float()
|
627 |
+
in_facial = torch.from_numpy(in_facial).float()
|
628 |
+
vid = torch.from_numpy(vid).float()
|
629 |
+
in_shape = torch.from_numpy(in_shape).float()
|
630 |
+
else:
|
631 |
+
in_shape = torch.from_numpy(in_shape).reshape((in_shape.shape[0], -1)).float()
|
632 |
+
trans = torch.from_numpy(trans).reshape((trans.shape[0], -1)).float()
|
633 |
+
trans_v = torch.from_numpy(trans_v).reshape((trans_v.shape[0], -1)).float()
|
634 |
+
vid = torch.from_numpy(vid).reshape((vid.shape[0], -1)).float()
|
635 |
+
tar_pose = torch.from_numpy(tar_pose).reshape((tar_pose.shape[0], -1)).float()
|
636 |
+
in_facial = torch.from_numpy(in_facial).reshape((in_facial.shape[0], -1)).float()
|
637 |
+
return {"pose":tar_pose, "audio":in_audio, "facial":in_facial, "beta": in_shape, "word":in_word, "id":vid, "emo":emo, "sem":sem, "trans":trans,"trans_v":trans_v}
|
638 |
+
|
639 |
+
|
640 |
+
class MotionPreprocessor:
|
641 |
+
def __init__(self, skeletons):
|
642 |
+
self.skeletons = skeletons
|
643 |
+
#self.mean_pose = mean_pose
|
644 |
+
self.filtering_message = "PASS"
|
645 |
+
|
646 |
+
def get(self):
|
647 |
+
assert (self.skeletons is not None)
|
648 |
+
|
649 |
+
# filtering
|
650 |
+
if self.skeletons is not None:
|
651 |
+
if self.check_pose_diff():
|
652 |
+
self.skeletons = []
|
653 |
+
self.filtering_message = "pose"
|
654 |
+
# elif self.check_spine_angle():
|
655 |
+
# self.skeletons = []
|
656 |
+
# self.filtering_message = "spine angle"
|
657 |
+
# elif self.check_static_motion():
|
658 |
+
# self.skeletons = []
|
659 |
+
# self.filtering_message = "motion"
|
660 |
+
|
661 |
+
# if self.skeletons is not None:
|
662 |
+
# self.skeletons = self.skeletons.tolist()
|
663 |
+
# for i, frame in enumerate(self.skeletons):
|
664 |
+
# assert not np.isnan(self.skeletons[i]).any() # missing joints
|
665 |
+
|
666 |
+
return self.skeletons, self.filtering_message
|
667 |
+
|
668 |
+
def check_static_motion(self, verbose=True):
|
669 |
+
def get_variance(skeleton, joint_idx):
|
670 |
+
wrist_pos = skeleton[:, joint_idx]
|
671 |
+
variance = np.sum(np.var(wrist_pos, axis=0))
|
672 |
+
return variance
|
673 |
+
|
674 |
+
left_arm_var = get_variance(self.skeletons, 6)
|
675 |
+
right_arm_var = get_variance(self.skeletons, 9)
|
676 |
+
|
677 |
+
th = 0.0014 # exclude 13110
|
678 |
+
# th = 0.002 # exclude 16905
|
679 |
+
if left_arm_var < th and right_arm_var < th:
|
680 |
+
if verbose:
|
681 |
+
print("skip - check_static_motion left var {}, right var {}".format(left_arm_var, right_arm_var))
|
682 |
+
return True
|
683 |
+
else:
|
684 |
+
if verbose:
|
685 |
+
print("pass - check_static_motion left var {}, right var {}".format(left_arm_var, right_arm_var))
|
686 |
+
return False
|
687 |
+
|
688 |
+
|
689 |
+
def check_pose_diff(self, verbose=False):
|
690 |
+
# diff = np.abs(self.skeletons - self.mean_pose) # 186*1
|
691 |
+
# diff = np.mean(diff)
|
692 |
+
|
693 |
+
# # th = 0.017
|
694 |
+
# th = 0.02 #0.02 # exclude 3594
|
695 |
+
# if diff < th:
|
696 |
+
# if verbose:
|
697 |
+
# print("skip - check_pose_diff {:.5f}".format(diff))
|
698 |
+
# return True
|
699 |
+
# # th = 3.5 #0.02 # exclude 3594
|
700 |
+
# # if 3.5 < diff < 5:
|
701 |
+
# # if verbose:
|
702 |
+
# # print("skip - check_pose_diff {:.5f}".format(diff))
|
703 |
+
# # return True
|
704 |
+
# else:
|
705 |
+
# if verbose:
|
706 |
+
# print("pass - check_pose_diff {:.5f}".format(diff))
|
707 |
+
return False
|
708 |
+
|
709 |
+
|
710 |
+
def check_spine_angle(self, verbose=True):
|
711 |
+
def angle_between(v1, v2):
|
712 |
+
v1_u = v1 / np.linalg.norm(v1)
|
713 |
+
v2_u = v2 / np.linalg.norm(v2)
|
714 |
+
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
|
715 |
+
|
716 |
+
angles = []
|
717 |
+
for i in range(self.skeletons.shape[0]):
|
718 |
+
spine_vec = self.skeletons[i, 1] - self.skeletons[i, 0]
|
719 |
+
angle = angle_between(spine_vec, [0, -1, 0])
|
720 |
+
angles.append(angle)
|
721 |
+
|
722 |
+
if np.rad2deg(max(angles)) > 30 or np.rad2deg(np.mean(angles)) > 20: # exclude 4495
|
723 |
+
# if np.rad2deg(max(angles)) > 20: # exclude 8270
|
724 |
+
if verbose:
|
725 |
+
print("skip - check_spine_angle {:.5f}, {:.5f}".format(max(angles), np.mean(angles)))
|
726 |
+
return True
|
727 |
+
else:
|
728 |
+
if verbose:
|
729 |
+
print("pass - check_spine_angle {:.5f}".format(max(angles)))
|
730 |
+
return False
|
dataloaders/beat_smplx2020.py
ADDED
@@ -0,0 +1,763 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import pickle
|
3 |
+
import math
|
4 |
+
import shutil
|
5 |
+
import numpy as np
|
6 |
+
import lmdb as lmdb
|
7 |
+
import textgrid as tg
|
8 |
+
import pandas as pd
|
9 |
+
import torch
|
10 |
+
import glob
|
11 |
+
import json
|
12 |
+
from termcolor import colored
|
13 |
+
from loguru import logger
|
14 |
+
from collections import defaultdict
|
15 |
+
from torch.utils.data import Dataset
|
16 |
+
import torch.distributed as dist
|
17 |
+
import pyarrow
|
18 |
+
import librosa
|
19 |
+
import smplx
|
20 |
+
|
21 |
+
from .build_vocab import Vocab
|
22 |
+
from .utils.audio_features import Wav2Vec2Model
|
23 |
+
from .data_tools import joints_list
|
24 |
+
from .utils import rotation_conversions as rc
|
25 |
+
from .utils import other_tools
|
26 |
+
|
27 |
+
class CustomDataset(Dataset):
|
28 |
+
def __init__(self, args, loader_type, augmentation=None, kwargs=None, build_cache=True):
|
29 |
+
self.args = args
|
30 |
+
self.loader_type = loader_type
|
31 |
+
|
32 |
+
self.rank = dist.get_rank()
|
33 |
+
self.ori_stride = self.args.stride
|
34 |
+
self.ori_length = self.args.pose_length
|
35 |
+
self.alignment = [0,0] # for trinity
|
36 |
+
|
37 |
+
self.ori_joint_list = joints_list[self.args.ori_joints]
|
38 |
+
self.tar_joint_list = joints_list[self.args.tar_joints]
|
39 |
+
if 'smplx' in self.args.pose_rep:
|
40 |
+
self.joint_mask = np.zeros(len(list(self.ori_joint_list.keys()))*3)
|
41 |
+
self.joints = len(list(self.ori_joint_list.keys()))
|
42 |
+
for joint_name in self.tar_joint_list:
|
43 |
+
self.joint_mask[self.ori_joint_list[joint_name][1] - self.ori_joint_list[joint_name][0]:self.ori_joint_list[joint_name][1]] = 1
|
44 |
+
else:
|
45 |
+
self.joints = len(list(self.ori_joint_list.keys()))+1
|
46 |
+
self.joint_mask = np.zeros(self.joints*3)
|
47 |
+
for joint_name in self.tar_joint_list:
|
48 |
+
if joint_name == "Hips":
|
49 |
+
self.joint_mask[3:6] = 1
|
50 |
+
else:
|
51 |
+
self.joint_mask[self.ori_joint_list[joint_name][1] - self.ori_joint_list[joint_name][0]:self.ori_joint_list[joint_name][1]] = 1
|
52 |
+
# select trainable joints
|
53 |
+
|
54 |
+
split_rule = pd.read_csv(args.data_path+"train_test_split.csv")
|
55 |
+
self.selected_file = split_rule.loc[(split_rule['type'] == loader_type) & (split_rule['id'].str.split("_").str[0].astype(int).isin(self.args.training_speakers))]
|
56 |
+
if args.additional_data and loader_type == 'train':
|
57 |
+
split_b = split_rule.loc[(split_rule['type'] == 'additional') & (split_rule['id'].str.split("_").str[0].astype(int).isin(self.args.training_speakers))]
|
58 |
+
#self.selected_file = split_rule.loc[(split_rule['type'] == 'additional') & (split_rule['id'].str.split("_").str[0].astype(int).isin(self.args.training_speakers))]
|
59 |
+
self.selected_file = pd.concat([self.selected_file, split_b])
|
60 |
+
if self.selected_file.empty:
|
61 |
+
logger.warning(f"{loader_type} is empty for speaker {self.args.training_speakers}, use train set 0-8 instead")
|
62 |
+
self.selected_file = split_rule.loc[(split_rule['type'] == 'train') & (split_rule['id'].str.split("_").str[0].astype(int).isin(self.args.training_speakers))]
|
63 |
+
self.selected_file = self.selected_file.iloc[0:8]
|
64 |
+
self.data_dir = args.data_path
|
65 |
+
|
66 |
+
if loader_type == "test":
|
67 |
+
self.args.multi_length_training = [1.0]
|
68 |
+
self.max_length = int(args.pose_length * self.args.multi_length_training[-1])
|
69 |
+
self.max_audio_pre_len = math.floor(args.pose_length / args.pose_fps * self.args.audio_sr)
|
70 |
+
if self.max_audio_pre_len > self.args.test_length*self.args.audio_sr:
|
71 |
+
self.max_audio_pre_len = self.args.test_length*self.args.audio_sr
|
72 |
+
|
73 |
+
if args.word_rep is not None:
|
74 |
+
with open(f"{args.data_path}weights/vocab.pkl", 'rb') as f:
|
75 |
+
self.lang_model = pickle.load(f)
|
76 |
+
|
77 |
+
preloaded_dir = self.args.root_path + self.args.cache_path + loader_type + f"/{args.pose_rep}_cache"
|
78 |
+
# if args.pose_norm:
|
79 |
+
# # careful for rotation vectors
|
80 |
+
# if not os.path.exists(args.data_path+args.mean_pose_path+f"{args.pose_rep.split('_')[0]}/bvh_mean.npy"):
|
81 |
+
# self.calculate_mean_pose()
|
82 |
+
# self.mean_pose = np.load(args.data_path+args.mean_pose_path+f"{args.pose_rep.split('_')[0]}/bvh_mean.npy")
|
83 |
+
# self.std_pose = np.load(args.data_path+args.mean_pose_path+f"{args.pose_rep.split('_')[0]}/bvh_std.npy")
|
84 |
+
# if args.audio_norm:
|
85 |
+
# if not os.path.exists(args.data_path+args.mean_pose_path+f"{args.audio_rep.split('_')[0]}/bvh_mean.npy"):
|
86 |
+
# self.calculate_mean_audio()
|
87 |
+
# self.mean_audio = np.load(args.data_path+args.mean_pose_path+f"{args.audio_rep.split('_')[0]}/npy_mean.npy")
|
88 |
+
# self.std_audio = np.load(args.data_path+args.mean_pose_path+f"{args.audio_rep.split('_')[0]}/npy_std.npy")
|
89 |
+
# if args.facial_norm:
|
90 |
+
# if not os.path.exists(args.data_path+args.mean_pose_path+f"{args.pose_rep.split('_')[0]}/bvh_mean.npy"):
|
91 |
+
# self.calculate_mean_face()
|
92 |
+
# self.mean_facial = np.load(args.data_path+args.mean_pose_path+f"{args.facial_rep}/json_mean.npy")
|
93 |
+
# self.std_facial = np.load(args.data_path+args.mean_pose_path+f"{args.facial_rep}/json_std.npy")
|
94 |
+
if self.args.beat_align:
|
95 |
+
if not os.path.exists(args.data_path+f"weights/mean_vel_{args.pose_rep}.npy"):
|
96 |
+
self.calculate_mean_velocity(args.data_path+f"weights/mean_vel_{args.pose_rep}.npy")
|
97 |
+
self.avg_vel = np.load(args.data_path+f"weights/mean_vel_{args.pose_rep}.npy")
|
98 |
+
|
99 |
+
if build_cache and self.rank == 0:
|
100 |
+
self.build_cache(preloaded_dir)
|
101 |
+
self.lmdb_env = lmdb.open(preloaded_dir, readonly=True, lock=False)
|
102 |
+
with self.lmdb_env.begin() as txn:
|
103 |
+
self.n_samples = txn.stat()["entries"]
|
104 |
+
|
105 |
+
|
106 |
+
def calculate_mean_velocity(self, save_path):
|
107 |
+
self.smplx = smplx.create(
|
108 |
+
self.args.data_path_1+"smplx_models/",
|
109 |
+
model_type='smplx',
|
110 |
+
gender='NEUTRAL_2020',
|
111 |
+
use_face_contour=False,
|
112 |
+
num_betas=300,
|
113 |
+
num_expression_coeffs=100,
|
114 |
+
ext='npz',
|
115 |
+
use_pca=False,
|
116 |
+
).cuda().eval()
|
117 |
+
dir_p = self.data_dir + self.args.pose_rep + "/"
|
118 |
+
all_list = []
|
119 |
+
from tqdm import tqdm
|
120 |
+
for tar in tqdm(os.listdir(dir_p)):
|
121 |
+
if tar.endswith(".npz"):
|
122 |
+
m_data = np.load(dir_p+tar, allow_pickle=True)
|
123 |
+
betas, poses, trans, exps = m_data["betas"], m_data["poses"], m_data["trans"], m_data["expressions"]
|
124 |
+
n, c = poses.shape[0], poses.shape[1]
|
125 |
+
betas = betas.reshape(1, 300)
|
126 |
+
betas = np.tile(betas, (n, 1))
|
127 |
+
betas = torch.from_numpy(betas).cuda().float()
|
128 |
+
poses = torch.from_numpy(poses.reshape(n, c)).cuda().float()
|
129 |
+
exps = torch.from_numpy(exps.reshape(n, 100)).cuda().float()
|
130 |
+
trans = torch.from_numpy(trans.reshape(n, 3)).cuda().float()
|
131 |
+
max_length = 128
|
132 |
+
s, r = n//max_length, n%max_length
|
133 |
+
#print(n, s, r)
|
134 |
+
all_tensor = []
|
135 |
+
for i in range(s):
|
136 |
+
with torch.no_grad():
|
137 |
+
joints = self.smplx(
|
138 |
+
betas=betas[i*max_length:(i+1)*max_length],
|
139 |
+
transl=trans[i*max_length:(i+1)*max_length],
|
140 |
+
expression=exps[i*max_length:(i+1)*max_length],
|
141 |
+
jaw_pose=poses[i*max_length:(i+1)*max_length, 66:69],
|
142 |
+
global_orient=poses[i*max_length:(i+1)*max_length,:3],
|
143 |
+
body_pose=poses[i*max_length:(i+1)*max_length,3:21*3+3],
|
144 |
+
left_hand_pose=poses[i*max_length:(i+1)*max_length,25*3:40*3],
|
145 |
+
right_hand_pose=poses[i*max_length:(i+1)*max_length,40*3:55*3],
|
146 |
+
return_verts=True,
|
147 |
+
return_joints=True,
|
148 |
+
leye_pose=poses[i*max_length:(i+1)*max_length, 69:72],
|
149 |
+
reye_pose=poses[i*max_length:(i+1)*max_length, 72:75],
|
150 |
+
)['joints'][:, :55, :].reshape(max_length, 55*3)
|
151 |
+
all_tensor.append(joints)
|
152 |
+
if r != 0:
|
153 |
+
with torch.no_grad():
|
154 |
+
joints = self.smplx(
|
155 |
+
betas=betas[s*max_length:s*max_length+r],
|
156 |
+
transl=trans[s*max_length:s*max_length+r],
|
157 |
+
expression=exps[s*max_length:s*max_length+r],
|
158 |
+
jaw_pose=poses[s*max_length:s*max_length+r, 66:69],
|
159 |
+
global_orient=poses[s*max_length:s*max_length+r,:3],
|
160 |
+
body_pose=poses[s*max_length:s*max_length+r,3:21*3+3],
|
161 |
+
left_hand_pose=poses[s*max_length:s*max_length+r,25*3:40*3],
|
162 |
+
right_hand_pose=poses[s*max_length:s*max_length+r,40*3:55*3],
|
163 |
+
return_verts=True,
|
164 |
+
return_joints=True,
|
165 |
+
leye_pose=poses[s*max_length:s*max_length+r, 69:72],
|
166 |
+
reye_pose=poses[s*max_length:s*max_length+r, 72:75],
|
167 |
+
)['joints'][:, :55, :].reshape(r, 55*3)
|
168 |
+
all_tensor.append(joints)
|
169 |
+
joints = torch.cat(all_tensor, axis=0)
|
170 |
+
joints = joints.permute(1, 0)
|
171 |
+
dt = 1/30
|
172 |
+
# first steps is forward diff (t+1 - t) / dt
|
173 |
+
init_vel = (joints[:, 1:2] - joints[:, :1]) / dt
|
174 |
+
# middle steps are second order (t+1 - t-1) / 2dt
|
175 |
+
middle_vel = (joints[:, 2:] - joints[:, 0:-2]) / (2 * dt)
|
176 |
+
# last step is backward diff (t - t-1) / dt
|
177 |
+
final_vel = (joints[:, -1:] - joints[:, -2:-1]) / dt
|
178 |
+
#print(joints.shape, init_vel.shape, middle_vel.shape, final_vel.shape)
|
179 |
+
vel_seq = torch.cat([init_vel, middle_vel, final_vel], dim=1).permute(1, 0).reshape(n, 55, 3)
|
180 |
+
#print(vel_seq.shape)
|
181 |
+
#.permute(1, 0).reshape(n, 55, 3)
|
182 |
+
vel_seq_np = vel_seq.cpu().numpy()
|
183 |
+
vel_joints_np = np.linalg.norm(vel_seq_np, axis=2) # n * 55
|
184 |
+
all_list.append(vel_joints_np)
|
185 |
+
avg_vel = np.mean(np.concatenate(all_list, axis=0),axis=0) # 55
|
186 |
+
np.save(save_path, avg_vel)
|
187 |
+
|
188 |
+
|
189 |
+
def build_cache(self, preloaded_dir):
|
190 |
+
logger.info(f"Audio bit rate: {self.args.audio_fps}")
|
191 |
+
logger.info("Reading data '{}'...".format(self.data_dir))
|
192 |
+
logger.info("Creating the dataset cache...")
|
193 |
+
if self.args.new_cache:
|
194 |
+
if os.path.exists(preloaded_dir):
|
195 |
+
shutil.rmtree(preloaded_dir)
|
196 |
+
if os.path.exists(preloaded_dir):
|
197 |
+
logger.info("Found the cache {}".format(preloaded_dir))
|
198 |
+
elif self.loader_type == "test":
|
199 |
+
self.cache_generation(
|
200 |
+
preloaded_dir, True,
|
201 |
+
0, 0,
|
202 |
+
is_test=True)
|
203 |
+
else:
|
204 |
+
self.cache_generation(
|
205 |
+
preloaded_dir, self.args.disable_filtering,
|
206 |
+
self.args.clean_first_seconds, self.args.clean_final_seconds,
|
207 |
+
is_test=False)
|
208 |
+
|
209 |
+
def __len__(self):
|
210 |
+
return self.n_samples
|
211 |
+
|
212 |
+
|
213 |
+
def cache_generation(self, out_lmdb_dir, disable_filtering, clean_first_seconds, clean_final_seconds, is_test=False):
|
214 |
+
# if "wav2vec2" in self.args.audio_rep:
|
215 |
+
# self.wav2vec_model = Wav2Vec2Model.from_pretrained(f"{self.args.data_path_1}/hub/transformer/wav2vec2-base-960h")
|
216 |
+
# self.wav2vec_model.feature_extractor._freeze_parameters()
|
217 |
+
# self.wav2vec_model = self.wav2vec_model.cuda()
|
218 |
+
# self.wav2vec_model.eval()
|
219 |
+
|
220 |
+
self.n_out_samples = 0
|
221 |
+
# create db for samples
|
222 |
+
if not os.path.exists(out_lmdb_dir): os.makedirs(out_lmdb_dir)
|
223 |
+
dst_lmdb_env = lmdb.open(out_lmdb_dir, map_size= int(1024 ** 3 * 50))# 50G
|
224 |
+
n_filtered_out = defaultdict(int)
|
225 |
+
|
226 |
+
for index, file_name in self.selected_file.iterrows():
|
227 |
+
f_name = file_name["id"]
|
228 |
+
ext = ".npz" if "smplx" in self.args.pose_rep else ".bvh"
|
229 |
+
pose_file = self.data_dir + self.args.pose_rep + "/" + f_name + ext
|
230 |
+
pose_each_file = []
|
231 |
+
trans_each_file = []
|
232 |
+
shape_each_file = []
|
233 |
+
audio_each_file = []
|
234 |
+
facial_each_file = []
|
235 |
+
word_each_file = []
|
236 |
+
emo_each_file = []
|
237 |
+
sem_each_file = []
|
238 |
+
vid_each_file = []
|
239 |
+
id_pose = f_name #1_wayne_0_1_1
|
240 |
+
|
241 |
+
logger.info(colored(f"# ---- Building cache for Pose {id_pose} ---- #", "blue"))
|
242 |
+
if "smplx" in self.args.pose_rep:
|
243 |
+
pose_data = np.load(pose_file, allow_pickle=True)
|
244 |
+
assert 30%self.args.pose_fps == 0, 'pose_fps should be an aliquot part of 30'
|
245 |
+
stride = int(30/self.args.pose_fps)
|
246 |
+
pose_each_file = pose_data["poses"][::stride] * self.joint_mask
|
247 |
+
trans_each_file = pose_data["trans"][::stride]
|
248 |
+
shape_each_file = np.repeat(pose_data["betas"].reshape(1, 300), pose_each_file.shape[0], axis=0)
|
249 |
+
if self.args.facial_rep is not None:
|
250 |
+
logger.info(f"# ---- Building cache for Facial {id_pose} and Pose {id_pose} ---- #")
|
251 |
+
facial_each_file = pose_data["expressions"][::stride]
|
252 |
+
if self.args.facial_norm:
|
253 |
+
facial_each_file = (facial_each_file - self.mean_facial) / self.std_facial
|
254 |
+
|
255 |
+
else:
|
256 |
+
assert 120%self.args.pose_fps == 0, 'pose_fps should be an aliquot part of 120'
|
257 |
+
stride = int(120/self.args.pose_fps)
|
258 |
+
with open(pose_file, "r") as pose_data:
|
259 |
+
for j, line in enumerate(pose_data.readlines()):
|
260 |
+
if j < 431: continue
|
261 |
+
if j%stride != 0:continue
|
262 |
+
data = np.fromstring(line, dtype=float, sep=" ")
|
263 |
+
rot_data = rc.euler_angles_to_matrix(torch.from_numpy(np.deg2rad(data)).reshape(-1, self.joints,3), "XYZ")
|
264 |
+
rot_data = rc.matrix_to_axis_angle(rot_data).reshape(-1, self.joints*3)
|
265 |
+
rot_data = rot_data.numpy() * self.joint_mask
|
266 |
+
|
267 |
+
pose_each_file.append(rot_data)
|
268 |
+
trans_each_file.append(data[:3])
|
269 |
+
|
270 |
+
pose_each_file = np.array(pose_each_file)
|
271 |
+
# print(pose_each_file.shape)
|
272 |
+
trans_each_file = np.array(trans_each_file)
|
273 |
+
shape_each_file = np.repeat(np.array(-1).reshape(1, 1), pose_each_file.shape[0], axis=0)
|
274 |
+
if self.args.facial_rep is not None:
|
275 |
+
logger.info(f"# ---- Building cache for Facial {id_pose} and Pose {id_pose} ---- #")
|
276 |
+
facial_file = pose_file.replace(self.args.pose_rep, self.args.facial_rep).replace("bvh", "json")
|
277 |
+
assert 60%self.args.pose_fps == 0, 'pose_fps should be an aliquot part of 120'
|
278 |
+
stride = int(60/self.args.pose_fps)
|
279 |
+
if not os.path.exists(facial_file):
|
280 |
+
logger.warning(f"# ---- file not found for Facial {id_pose}, skip all files with the same id ---- #")
|
281 |
+
self.selected_file = self.selected_file.drop(self.selected_file[self.selected_file['id'] == id_pose].index)
|
282 |
+
continue
|
283 |
+
with open(facial_file, 'r') as facial_data_file:
|
284 |
+
facial_data = json.load(facial_data_file)
|
285 |
+
for j, frame_data in enumerate(facial_data['frames']):
|
286 |
+
if j%stride != 0:continue
|
287 |
+
facial_each_file.append(frame_data['weights'])
|
288 |
+
facial_each_file = np.array(facial_each_file)
|
289 |
+
if self.args.facial_norm:
|
290 |
+
facial_each_file = (facial_each_file - self.mean_facial) / self.std_facial
|
291 |
+
|
292 |
+
if self.args.id_rep is not None:
|
293 |
+
vid_each_file = np.repeat(np.array(int(f_name.split("_")[0])-1).reshape(1, 1), pose_each_file.shape[0], axis=0)
|
294 |
+
|
295 |
+
if self.args.audio_rep is not None:
|
296 |
+
logger.info(f"# ---- Building cache for Audio {id_pose} and Pose {id_pose} ---- #")
|
297 |
+
audio_file = pose_file.replace(self.args.pose_rep, 'wave16k').replace(ext, ".wav")
|
298 |
+
if not os.path.exists(audio_file):
|
299 |
+
logger.warning(f"# ---- file not found for Audio {id_pose}, skip all files with the same id ---- #")
|
300 |
+
self.selected_file = self.selected_file.drop(self.selected_file[self.selected_file['id'] == id_pose].index)
|
301 |
+
continue
|
302 |
+
audio_each_file, sr = librosa.load(audio_file)
|
303 |
+
audio_each_file = librosa.resample(audio_each_file, orig_sr=sr, target_sr=self.args.audio_sr)
|
304 |
+
if self.args.audio_rep == "onset+amplitude":
|
305 |
+
from numpy.lib import stride_tricks
|
306 |
+
frame_length = 1024
|
307 |
+
# hop_length = 512
|
308 |
+
shape = (audio_each_file.shape[-1] - frame_length + 1, frame_length)
|
309 |
+
strides = (audio_each_file.strides[-1], audio_each_file.strides[-1])
|
310 |
+
rolling_view = stride_tricks.as_strided(audio_each_file, shape=shape, strides=strides)
|
311 |
+
amplitude_envelope = np.max(np.abs(rolling_view), axis=1)
|
312 |
+
# pad the last frame_length-1 samples
|
313 |
+
amplitude_envelope = np.pad(amplitude_envelope, (0, frame_length-1), mode='constant', constant_values=amplitude_envelope[-1])
|
314 |
+
audio_onset_f = librosa.onset.onset_detect(y=audio_each_file, sr=self.args.audio_sr, units='frames')
|
315 |
+
onset_array = np.zeros(len(audio_each_file), dtype=float)
|
316 |
+
onset_array[audio_onset_f] = 1.0
|
317 |
+
# print(amplitude_envelope.shape, audio_each_file.shape, onset_array.shape)
|
318 |
+
audio_each_file = np.concatenate([amplitude_envelope.reshape(-1, 1), onset_array.reshape(-1, 1)], axis=1)
|
319 |
+
elif self.args.audio_rep == "mfcc":
|
320 |
+
audio_each_file = librosa.feature.mfcc(audio_each_file, sr=self.args.audio_sr, n_mfcc=13, hop_length=int(self.args.audio_sr/self.args.audio_fps))
|
321 |
+
|
322 |
+
if self.args.audio_norm and self.args.audio_rep == "wave16k":
|
323 |
+
audio_each_file = (audio_each_file - self.mean_audio) / self.std_audio
|
324 |
+
|
325 |
+
time_offset = 0
|
326 |
+
if self.args.word_rep is not None:
|
327 |
+
logger.info(f"# ---- Building cache for Word {id_pose} and Pose {id_pose} ---- #")
|
328 |
+
word_file = f"{self.data_dir}{self.args.word_rep}/{id_pose}.TextGrid"
|
329 |
+
if not os.path.exists(word_file):
|
330 |
+
logger.warning(f"# ---- file not found for Word {id_pose}, skip all files with the same id ---- #")
|
331 |
+
self.selected_file = self.selected_file.drop(self.selected_file[self.selected_file['id'] == id_pose].index)
|
332 |
+
continue
|
333 |
+
tgrid = tg.TextGrid.fromFile(word_file)
|
334 |
+
if self.args.t_pre_encoder == "bert":
|
335 |
+
from transformers import AutoTokenizer, BertModel
|
336 |
+
tokenizer = AutoTokenizer.from_pretrained(self.args.data_path_1 + "hub/bert-base-uncased", local_files_only=True)
|
337 |
+
model = BertModel.from_pretrained(self.args.data_path_1 + "hub/bert-base-uncased", local_files_only=True).eval()
|
338 |
+
list_word = []
|
339 |
+
all_hidden = []
|
340 |
+
max_len = 400
|
341 |
+
last = 0
|
342 |
+
word_token_mapping = []
|
343 |
+
first = True
|
344 |
+
for i, word in enumerate(tgrid[0]):
|
345 |
+
last = i
|
346 |
+
if (i%max_len != 0) or (i==0):
|
347 |
+
if word.mark == "":
|
348 |
+
list_word.append(".")
|
349 |
+
else:
|
350 |
+
list_word.append(word.mark)
|
351 |
+
else:
|
352 |
+
max_counter = max_len
|
353 |
+
str_word = ' '.join(map(str, list_word))
|
354 |
+
if first:
|
355 |
+
global_len = 0
|
356 |
+
end = -1
|
357 |
+
offset_word = []
|
358 |
+
for k, wordvalue in enumerate(list_word):
|
359 |
+
start = end+1
|
360 |
+
end = start+len(wordvalue)
|
361 |
+
offset_word.append((start, end))
|
362 |
+
#print(offset_word)
|
363 |
+
token_scan = tokenizer.encode_plus(str_word, return_offsets_mapping=True)['offset_mapping']
|
364 |
+
#print(token_scan)
|
365 |
+
for start, end in offset_word:
|
366 |
+
sub_mapping = []
|
367 |
+
for i, (start_t, end_t) in enumerate(token_scan[1:-1]):
|
368 |
+
if int(start) <= int(start_t) and int(end_t) <= int(end):
|
369 |
+
#print(i+global_len)
|
370 |
+
sub_mapping.append(i+global_len)
|
371 |
+
word_token_mapping.append(sub_mapping)
|
372 |
+
#print(len(word_token_mapping))
|
373 |
+
global_len = word_token_mapping[-1][-1] + 1
|
374 |
+
list_word = []
|
375 |
+
if word.mark == "":
|
376 |
+
list_word.append(".")
|
377 |
+
else:
|
378 |
+
list_word.append(word.mark)
|
379 |
+
|
380 |
+
with torch.no_grad():
|
381 |
+
inputs = tokenizer(str_word, return_tensors="pt")
|
382 |
+
outputs = model(**inputs)
|
383 |
+
last_hidden_states = outputs.last_hidden_state.reshape(-1, 768).cpu().numpy()[1:-1, :]
|
384 |
+
all_hidden.append(last_hidden_states)
|
385 |
+
|
386 |
+
#list_word = list_word[:10]
|
387 |
+
if list_word == []:
|
388 |
+
pass
|
389 |
+
else:
|
390 |
+
if first:
|
391 |
+
global_len = 0
|
392 |
+
str_word = ' '.join(map(str, list_word))
|
393 |
+
end = -1
|
394 |
+
offset_word = []
|
395 |
+
for k, wordvalue in enumerate(list_word):
|
396 |
+
start = end+1
|
397 |
+
end = start+len(wordvalue)
|
398 |
+
offset_word.append((start, end))
|
399 |
+
#print(offset_word)
|
400 |
+
token_scan = tokenizer.encode_plus(str_word, return_offsets_mapping=True)['offset_mapping']
|
401 |
+
#print(token_scan)
|
402 |
+
for start, end in offset_word:
|
403 |
+
sub_mapping = []
|
404 |
+
for i, (start_t, end_t) in enumerate(token_scan[1:-1]):
|
405 |
+
if int(start) <= int(start_t) and int(end_t) <= int(end):
|
406 |
+
sub_mapping.append(i+global_len)
|
407 |
+
#print(sub_mapping)
|
408 |
+
word_token_mapping.append(sub_mapping)
|
409 |
+
#print(len(word_token_mapping))
|
410 |
+
with torch.no_grad():
|
411 |
+
inputs = tokenizer(str_word, return_tensors="pt")
|
412 |
+
outputs = model(**inputs)
|
413 |
+
last_hidden_states = outputs.last_hidden_state.reshape(-1, 768).cpu().numpy()[1:-1, :]
|
414 |
+
all_hidden.append(last_hidden_states)
|
415 |
+
last_hidden_states = np.concatenate(all_hidden, axis=0)
|
416 |
+
|
417 |
+
for i in range(pose_each_file.shape[0]):
|
418 |
+
found_flag = False
|
419 |
+
current_time = i/self.args.pose_fps + time_offset
|
420 |
+
j_last = 0
|
421 |
+
for j, word in enumerate(tgrid[0]):
|
422 |
+
word_n, word_s, word_e = word.mark, word.minTime, word.maxTime
|
423 |
+
if word_s<=current_time and current_time<=word_e:
|
424 |
+
if self.args.word_cache and self.args.t_pre_encoder == 'bert':
|
425 |
+
mapping_index = word_token_mapping[j]
|
426 |
+
#print(mapping_index, word_s, word_e)
|
427 |
+
s_t = np.linspace(word_s, word_e, len(mapping_index)+1)
|
428 |
+
#print(s_t)
|
429 |
+
for tt, t_sep in enumerate(s_t[1:]):
|
430 |
+
if current_time <= t_sep:
|
431 |
+
#if len(mapping_index) > 1: print(mapping_index[tt])
|
432 |
+
word_each_file.append(last_hidden_states[mapping_index[tt]])
|
433 |
+
break
|
434 |
+
else:
|
435 |
+
if word_n == " ":
|
436 |
+
word_each_file.append(self.lang_model.PAD_token)
|
437 |
+
else:
|
438 |
+
word_each_file.append(self.lang_model.get_word_index(word_n))
|
439 |
+
found_flag = True
|
440 |
+
j_last = j
|
441 |
+
break
|
442 |
+
else: continue
|
443 |
+
if not found_flag:
|
444 |
+
if self.args.word_cache and self.args.t_pre_encoder == 'bert':
|
445 |
+
word_each_file.append(last_hidden_states[j_last])
|
446 |
+
else:
|
447 |
+
word_each_file.append(self.lang_model.UNK_token)
|
448 |
+
word_each_file = np.array(word_each_file)
|
449 |
+
#print(word_each_file.shape)
|
450 |
+
|
451 |
+
if self.args.emo_rep is not None:
|
452 |
+
logger.info(f"# ---- Building cache for Emo {id_pose} and Pose {id_pose} ---- #")
|
453 |
+
rtype, start = int(id_pose.split('_')[3]), int(id_pose.split('_')[3])
|
454 |
+
if rtype == 0 or rtype == 2 or rtype == 4 or rtype == 6:
|
455 |
+
if start >= 1 and start <= 64:
|
456 |
+
score = 0
|
457 |
+
elif start >= 65 and start <= 72:
|
458 |
+
score = 1
|
459 |
+
elif start >= 73 and start <= 80:
|
460 |
+
score = 2
|
461 |
+
elif start >= 81 and start <= 86:
|
462 |
+
score = 3
|
463 |
+
elif start >= 87 and start <= 94:
|
464 |
+
score = 4
|
465 |
+
elif start >= 95 and start <= 102:
|
466 |
+
score = 5
|
467 |
+
elif start >= 103 and start <= 110:
|
468 |
+
score = 6
|
469 |
+
elif start >= 111 and start <= 118:
|
470 |
+
score = 7
|
471 |
+
else: pass
|
472 |
+
else:
|
473 |
+
# you may denote as unknown in the future
|
474 |
+
score = 0
|
475 |
+
emo_each_file = np.repeat(np.array(score).reshape(1, 1), pose_each_file.shape[0], axis=0)
|
476 |
+
#print(emo_each_file)
|
477 |
+
|
478 |
+
if self.args.sem_rep is not None:
|
479 |
+
logger.info(f"# ---- Building cache for Sem {id_pose} and Pose {id_pose} ---- #")
|
480 |
+
sem_file = f"{self.data_dir}{self.args.sem_rep}/{id_pose}.txt"
|
481 |
+
sem_all = pd.read_csv(sem_file,
|
482 |
+
sep='\t',
|
483 |
+
names=["name", "start_time", "end_time", "duration", "score", "keywords"])
|
484 |
+
# we adopt motion-level semantic score here.
|
485 |
+
for i in range(pose_each_file.shape[0]):
|
486 |
+
found_flag = False
|
487 |
+
for j, (start, end, score) in enumerate(zip(sem_all['start_time'],sem_all['end_time'], sem_all['score'])):
|
488 |
+
current_time = i/self.args.pose_fps + time_offset
|
489 |
+
if start<=current_time and current_time<=end:
|
490 |
+
sem_each_file.append(score)
|
491 |
+
found_flag=True
|
492 |
+
break
|
493 |
+
else: continue
|
494 |
+
if not found_flag: sem_each_file.append(0.)
|
495 |
+
sem_each_file = np.array(sem_each_file)
|
496 |
+
#print(sem_each_file)
|
497 |
+
|
498 |
+
filtered_result = self._sample_from_clip(
|
499 |
+
dst_lmdb_env,
|
500 |
+
audio_each_file, pose_each_file, trans_each_file, shape_each_file, facial_each_file, word_each_file,
|
501 |
+
vid_each_file, emo_each_file, sem_each_file,
|
502 |
+
disable_filtering, clean_first_seconds, clean_final_seconds, is_test,
|
503 |
+
)
|
504 |
+
for type in filtered_result.keys():
|
505 |
+
n_filtered_out[type] += filtered_result[type]
|
506 |
+
|
507 |
+
with dst_lmdb_env.begin() as txn:
|
508 |
+
logger.info(colored(f"no. of samples: {txn.stat()['entries']}", "cyan"))
|
509 |
+
n_total_filtered = 0
|
510 |
+
for type, n_filtered in n_filtered_out.items():
|
511 |
+
logger.info("{}: {}".format(type, n_filtered))
|
512 |
+
n_total_filtered += n_filtered
|
513 |
+
logger.info(colored("no. of excluded samples: {} ({:.1f}%)".format(
|
514 |
+
n_total_filtered, 100 * n_total_filtered / (txn.stat()["entries"] + n_total_filtered)), "cyan"))
|
515 |
+
dst_lmdb_env.sync()
|
516 |
+
dst_lmdb_env.close()
|
517 |
+
|
518 |
+
def _sample_from_clip(
|
519 |
+
self, dst_lmdb_env, audio_each_file, pose_each_file, trans_each_file, shape_each_file, facial_each_file, word_each_file,
|
520 |
+
vid_each_file, emo_each_file, sem_each_file,
|
521 |
+
disable_filtering, clean_first_seconds, clean_final_seconds, is_test,
|
522 |
+
):
|
523 |
+
"""
|
524 |
+
for data cleaning, we ignore the data for first and final n s
|
525 |
+
for test, we return all data
|
526 |
+
"""
|
527 |
+
# audio_start = int(self.alignment[0] * self.args.audio_fps)
|
528 |
+
# pose_start = int(self.alignment[1] * self.args.pose_fps)
|
529 |
+
#logger.info(f"before: {audio_each_file.shape} {pose_each_file.shape}")
|
530 |
+
# audio_each_file = audio_each_file[audio_start:]
|
531 |
+
# pose_each_file = pose_each_file[pose_start:]
|
532 |
+
# trans_each_file =
|
533 |
+
#logger.info(f"after alignment: {audio_each_file.shape} {pose_each_file.shape}")
|
534 |
+
#print(pose_each_file.shape)
|
535 |
+
round_seconds_skeleton = pose_each_file.shape[0] // self.args.pose_fps # assume 1500 frames / 15 fps = 100 s
|
536 |
+
#print(round_seconds_skeleton)
|
537 |
+
if audio_each_file != []:
|
538 |
+
round_seconds_audio = len(audio_each_file) // self.args.audio_fps # assume 16,000,00 / 16,000 = 100 s
|
539 |
+
if facial_each_file != []:
|
540 |
+
round_seconds_facial = facial_each_file.shape[0] // self.args.pose_fps
|
541 |
+
logger.info(f"audio: {round_seconds_audio}s, pose: {round_seconds_skeleton}s, facial: {round_seconds_facial}s")
|
542 |
+
round_seconds_skeleton = min(round_seconds_audio, round_seconds_skeleton, round_seconds_facial)
|
543 |
+
max_round = max(round_seconds_audio, round_seconds_skeleton, round_seconds_facial)
|
544 |
+
if round_seconds_skeleton != max_round:
|
545 |
+
logger.warning(f"reduce to {round_seconds_skeleton}s, ignore {max_round-round_seconds_skeleton}s")
|
546 |
+
else:
|
547 |
+
logger.info(f"pose: {round_seconds_skeleton}s, audio: {round_seconds_audio}s")
|
548 |
+
round_seconds_skeleton = min(round_seconds_audio, round_seconds_skeleton)
|
549 |
+
max_round = max(round_seconds_audio, round_seconds_skeleton)
|
550 |
+
if round_seconds_skeleton != max_round:
|
551 |
+
logger.warning(f"reduce to {round_seconds_skeleton}s, ignore {max_round-round_seconds_skeleton}s")
|
552 |
+
|
553 |
+
clip_s_t, clip_e_t = clean_first_seconds, round_seconds_skeleton - clean_final_seconds # assume [10, 90]s
|
554 |
+
clip_s_f_audio, clip_e_f_audio = self.args.audio_fps * clip_s_t, clip_e_t * self.args.audio_fps # [160,000,90*160,000]
|
555 |
+
clip_s_f_pose, clip_e_f_pose = clip_s_t * self.args.pose_fps, clip_e_t * self.args.pose_fps # [150,90*15]
|
556 |
+
|
557 |
+
|
558 |
+
for ratio in self.args.multi_length_training:
|
559 |
+
if is_test:# stride = length for test
|
560 |
+
cut_length = clip_e_f_pose - clip_s_f_pose
|
561 |
+
self.args.stride = cut_length
|
562 |
+
self.max_length = cut_length
|
563 |
+
else:
|
564 |
+
self.args.stride = int(ratio*self.ori_stride)
|
565 |
+
cut_length = int(self.ori_length*ratio)
|
566 |
+
|
567 |
+
num_subdivision = math.floor((clip_e_f_pose - clip_s_f_pose - cut_length) / self.args.stride) + 1
|
568 |
+
logger.info(f"pose from frame {clip_s_f_pose} to {clip_e_f_pose}, length {cut_length}")
|
569 |
+
logger.info(f"{num_subdivision} clips is expected with stride {self.args.stride}")
|
570 |
+
|
571 |
+
if audio_each_file != []:
|
572 |
+
audio_short_length = math.floor(cut_length / self.args.pose_fps * self.args.audio_fps)
|
573 |
+
"""
|
574 |
+
for audio sr = 16000, fps = 15, pose_length = 34,
|
575 |
+
audio short length = 36266.7 -> 36266
|
576 |
+
this error is fine.
|
577 |
+
"""
|
578 |
+
logger.info(f"audio from frame {clip_s_f_audio} to {clip_e_f_audio}, length {audio_short_length}")
|
579 |
+
|
580 |
+
n_filtered_out = defaultdict(int)
|
581 |
+
sample_pose_list = []
|
582 |
+
sample_audio_list = []
|
583 |
+
sample_facial_list = []
|
584 |
+
sample_shape_list = []
|
585 |
+
sample_word_list = []
|
586 |
+
sample_emo_list = []
|
587 |
+
sample_sem_list = []
|
588 |
+
sample_vid_list = []
|
589 |
+
sample_trans_list = []
|
590 |
+
|
591 |
+
for i in range(num_subdivision): # cut into around 2s chip, (self npose)
|
592 |
+
start_idx = clip_s_f_pose + i * self.args.stride
|
593 |
+
fin_idx = start_idx + cut_length
|
594 |
+
sample_pose = pose_each_file[start_idx:fin_idx]
|
595 |
+
sample_trans = trans_each_file[start_idx:fin_idx]
|
596 |
+
sample_shape = shape_each_file[start_idx:fin_idx]
|
597 |
+
# print(sample_pose.shape)
|
598 |
+
if self.args.audio_rep is not None:
|
599 |
+
audio_start = clip_s_f_audio + math.floor(i * self.args.stride * self.args.audio_fps / self.args.pose_fps)
|
600 |
+
audio_end = audio_start + audio_short_length
|
601 |
+
sample_audio = audio_each_file[audio_start:audio_end]
|
602 |
+
else:
|
603 |
+
sample_audio = np.array([-1])
|
604 |
+
sample_facial = facial_each_file[start_idx:fin_idx] if self.args.facial_rep is not None else np.array([-1])
|
605 |
+
sample_word = word_each_file[start_idx:fin_idx] if self.args.word_rep is not None else np.array([-1])
|
606 |
+
sample_emo = emo_each_file[start_idx:fin_idx] if self.args.emo_rep is not None else np.array([-1])
|
607 |
+
sample_sem = sem_each_file[start_idx:fin_idx] if self.args.sem_rep is not None else np.array([-1])
|
608 |
+
sample_vid = vid_each_file[start_idx:fin_idx] if self.args.id_rep is not None else np.array([-1])
|
609 |
+
|
610 |
+
if sample_pose.any() != None:
|
611 |
+
# filtering motion skeleton data
|
612 |
+
sample_pose, filtering_message = MotionPreprocessor(sample_pose).get()
|
613 |
+
is_correct_motion = (sample_pose != [])
|
614 |
+
if is_correct_motion or disable_filtering:
|
615 |
+
sample_pose_list.append(sample_pose)
|
616 |
+
sample_audio_list.append(sample_audio)
|
617 |
+
sample_facial_list.append(sample_facial)
|
618 |
+
sample_shape_list.append(sample_shape)
|
619 |
+
sample_word_list.append(sample_word)
|
620 |
+
sample_vid_list.append(sample_vid)
|
621 |
+
sample_emo_list.append(sample_emo)
|
622 |
+
sample_sem_list.append(sample_sem)
|
623 |
+
sample_trans_list.append(sample_trans)
|
624 |
+
else:
|
625 |
+
n_filtered_out[filtering_message] += 1
|
626 |
+
|
627 |
+
if len(sample_pose_list) > 0:
|
628 |
+
with dst_lmdb_env.begin(write=True) as txn:
|
629 |
+
for pose, audio, facial, shape, word, vid, emo, sem, trans in zip(
|
630 |
+
sample_pose_list,
|
631 |
+
sample_audio_list,
|
632 |
+
sample_facial_list,
|
633 |
+
sample_shape_list,
|
634 |
+
sample_word_list,
|
635 |
+
sample_vid_list,
|
636 |
+
sample_emo_list,
|
637 |
+
sample_sem_list,
|
638 |
+
sample_trans_list,):
|
639 |
+
k = "{:005}".format(self.n_out_samples).encode("ascii")
|
640 |
+
v = [pose, audio, facial, shape, word, emo, sem, vid, trans]
|
641 |
+
v = pyarrow.serialize(v).to_buffer()
|
642 |
+
txn.put(k, v)
|
643 |
+
self.n_out_samples += 1
|
644 |
+
return n_filtered_out
|
645 |
+
|
646 |
+
def __getitem__(self, idx):
|
647 |
+
with self.lmdb_env.begin(write=False) as txn:
|
648 |
+
key = "{:005}".format(idx).encode("ascii")
|
649 |
+
sample = txn.get(key)
|
650 |
+
sample = pyarrow.deserialize(sample)
|
651 |
+
tar_pose, in_audio, in_facial, in_shape, in_word, emo, sem, vid, trans = sample
|
652 |
+
#print(in_shape)
|
653 |
+
#vid = torch.from_numpy(vid).int()
|
654 |
+
emo = torch.from_numpy(emo).int()
|
655 |
+
sem = torch.from_numpy(sem).float()
|
656 |
+
in_audio = torch.from_numpy(in_audio).float()
|
657 |
+
in_word = torch.from_numpy(in_word).float() if self.args.word_cache else torch.from_numpy(in_word).int()
|
658 |
+
if self.loader_type == "test":
|
659 |
+
tar_pose = torch.from_numpy(tar_pose).float()
|
660 |
+
trans = torch.from_numpy(trans).float()
|
661 |
+
in_facial = torch.from_numpy(in_facial).float()
|
662 |
+
vid = torch.from_numpy(vid).float()
|
663 |
+
in_shape = torch.from_numpy(in_shape).float()
|
664 |
+
else:
|
665 |
+
in_shape = torch.from_numpy(in_shape).reshape((in_shape.shape[0], -1)).float()
|
666 |
+
trans = torch.from_numpy(trans).reshape((trans.shape[0], -1)).float()
|
667 |
+
vid = torch.from_numpy(vid).reshape((vid.shape[0], -1)).float()
|
668 |
+
tar_pose = torch.from_numpy(tar_pose).reshape((tar_pose.shape[0], -1)).float()
|
669 |
+
in_facial = torch.from_numpy(in_facial).reshape((in_facial.shape[0], -1)).float()
|
670 |
+
return {"pose":tar_pose, "audio":in_audio, "facial":in_facial, "beta": in_shape, "word":in_word, "id":vid, "emo":emo, "sem":sem, "trans":trans}
|
671 |
+
|
672 |
+
|
673 |
+
class MotionPreprocessor:
|
674 |
+
def __init__(self, skeletons):
|
675 |
+
self.skeletons = skeletons
|
676 |
+
#self.mean_pose = mean_pose
|
677 |
+
self.filtering_message = "PASS"
|
678 |
+
|
679 |
+
def get(self):
|
680 |
+
assert (self.skeletons is not None)
|
681 |
+
|
682 |
+
# filtering
|
683 |
+
if self.skeletons != []:
|
684 |
+
if self.check_pose_diff():
|
685 |
+
self.skeletons = []
|
686 |
+
self.filtering_message = "pose"
|
687 |
+
# elif self.check_spine_angle():
|
688 |
+
# self.skeletons = []
|
689 |
+
# self.filtering_message = "spine angle"
|
690 |
+
# elif self.check_static_motion():
|
691 |
+
# self.skeletons = []
|
692 |
+
# self.filtering_message = "motion"
|
693 |
+
|
694 |
+
# if self.skeletons != []:
|
695 |
+
# self.skeletons = self.skeletons.tolist()
|
696 |
+
# for i, frame in enumerate(self.skeletons):
|
697 |
+
# assert not np.isnan(self.skeletons[i]).any() # missing joints
|
698 |
+
|
699 |
+
return self.skeletons, self.filtering_message
|
700 |
+
|
701 |
+
def check_static_motion(self, verbose=True):
|
702 |
+
def get_variance(skeleton, joint_idx):
|
703 |
+
wrist_pos = skeleton[:, joint_idx]
|
704 |
+
variance = np.sum(np.var(wrist_pos, axis=0))
|
705 |
+
return variance
|
706 |
+
|
707 |
+
left_arm_var = get_variance(self.skeletons, 6)
|
708 |
+
right_arm_var = get_variance(self.skeletons, 9)
|
709 |
+
|
710 |
+
th = 0.0014 # exclude 13110
|
711 |
+
# th = 0.002 # exclude 16905
|
712 |
+
if left_arm_var < th and right_arm_var < th:
|
713 |
+
if verbose:
|
714 |
+
print("skip - check_static_motion left var {}, right var {}".format(left_arm_var, right_arm_var))
|
715 |
+
return True
|
716 |
+
else:
|
717 |
+
if verbose:
|
718 |
+
print("pass - check_static_motion left var {}, right var {}".format(left_arm_var, right_arm_var))
|
719 |
+
return False
|
720 |
+
|
721 |
+
|
722 |
+
def check_pose_diff(self, verbose=False):
|
723 |
+
# diff = np.abs(self.skeletons - self.mean_pose) # 186*1
|
724 |
+
# diff = np.mean(diff)
|
725 |
+
|
726 |
+
# # th = 0.017
|
727 |
+
# th = 0.02 #0.02 # exclude 3594
|
728 |
+
# if diff < th:
|
729 |
+
# if verbose:
|
730 |
+
# print("skip - check_pose_diff {:.5f}".format(diff))
|
731 |
+
# return True
|
732 |
+
# # th = 3.5 #0.02 # exclude 3594
|
733 |
+
# # if 3.5 < diff < 5:
|
734 |
+
# # if verbose:
|
735 |
+
# # print("skip - check_pose_diff {:.5f}".format(diff))
|
736 |
+
# # return True
|
737 |
+
# else:
|
738 |
+
# if verbose:
|
739 |
+
# print("pass - check_pose_diff {:.5f}".format(diff))
|
740 |
+
return False
|
741 |
+
|
742 |
+
|
743 |
+
def check_spine_angle(self, verbose=True):
|
744 |
+
def angle_between(v1, v2):
|
745 |
+
v1_u = v1 / np.linalg.norm(v1)
|
746 |
+
v2_u = v2 / np.linalg.norm(v2)
|
747 |
+
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
|
748 |
+
|
749 |
+
angles = []
|
750 |
+
for i in range(self.skeletons.shape[0]):
|
751 |
+
spine_vec = self.skeletons[i, 1] - self.skeletons[i, 0]
|
752 |
+
angle = angle_between(spine_vec, [0, -1, 0])
|
753 |
+
angles.append(angle)
|
754 |
+
|
755 |
+
if np.rad2deg(max(angles)) > 30 or np.rad2deg(np.mean(angles)) > 20: # exclude 4495
|
756 |
+
# if np.rad2deg(max(angles)) > 20: # exclude 8270
|
757 |
+
if verbose:
|
758 |
+
print("skip - check_spine_angle {:.5f}, {:.5f}".format(max(angles), np.mean(angles)))
|
759 |
+
return True
|
760 |
+
else:
|
761 |
+
if verbose:
|
762 |
+
print("pass - check_spine_angle {:.5f}".format(max(angles)))
|
763 |
+
return False
|
dataloaders/build_vocab.py
ADDED
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import glob
|
3 |
+
import os
|
4 |
+
import pickle
|
5 |
+
import lmdb
|
6 |
+
#import pyarrow
|
7 |
+
import fasttext
|
8 |
+
from loguru import logger
|
9 |
+
from scipy import linalg
|
10 |
+
|
11 |
+
|
12 |
+
class Vocab:
|
13 |
+
PAD_token = 0
|
14 |
+
SOS_token = 1
|
15 |
+
EOS_token = 2
|
16 |
+
UNK_token = 3
|
17 |
+
|
18 |
+
def __init__(self, name, insert_default_tokens=True):
|
19 |
+
self.name = name
|
20 |
+
self.trimmed = False
|
21 |
+
self.word_embedding_weights = None
|
22 |
+
self.reset_dictionary(insert_default_tokens)
|
23 |
+
|
24 |
+
def reset_dictionary(self, insert_default_tokens=True):
|
25 |
+
self.word2index = {}
|
26 |
+
self.word2count = {}
|
27 |
+
if insert_default_tokens:
|
28 |
+
self.index2word = {self.PAD_token: "<PAD>", self.SOS_token: "<SOS>",
|
29 |
+
self.EOS_token: "<EOS>", self.UNK_token: "<UNK>"}
|
30 |
+
else:
|
31 |
+
self.index2word = {self.UNK_token: "<UNK>"}
|
32 |
+
self.n_words = len(self.index2word) # count default tokens
|
33 |
+
|
34 |
+
def index_word(self, word):
|
35 |
+
if word not in self.word2index:
|
36 |
+
self.word2index[word] = self.n_words
|
37 |
+
self.word2count[word] = 1
|
38 |
+
self.index2word[self.n_words] = word
|
39 |
+
self.n_words += 1
|
40 |
+
else:
|
41 |
+
self.word2count[word] += 1
|
42 |
+
|
43 |
+
def add_vocab(self, other_vocab):
|
44 |
+
for word, _ in other_vocab.word2count.items():
|
45 |
+
self.index_word(word)
|
46 |
+
|
47 |
+
# remove words below a certain count threshold
|
48 |
+
def trim(self, min_count):
|
49 |
+
if self.trimmed:
|
50 |
+
return
|
51 |
+
self.trimmed = True
|
52 |
+
|
53 |
+
keep_words = []
|
54 |
+
|
55 |
+
for k, v in self.word2count.items():
|
56 |
+
if v >= min_count:
|
57 |
+
keep_words.append(k)
|
58 |
+
|
59 |
+
print(' word trimming, kept %s / %s = %.4f' % (
|
60 |
+
len(keep_words), len(self.word2index), len(keep_words) / len(self.word2index)
|
61 |
+
))
|
62 |
+
|
63 |
+
# reinitialize dictionary
|
64 |
+
self.reset_dictionary()
|
65 |
+
for word in keep_words:
|
66 |
+
self.index_word(word)
|
67 |
+
|
68 |
+
def get_word_index(self, word):
|
69 |
+
if word in self.word2index:
|
70 |
+
return self.word2index[word]
|
71 |
+
else:
|
72 |
+
return self.UNK_token
|
73 |
+
|
74 |
+
def load_word_vectors(self, pretrained_path, embedding_dim=300):
|
75 |
+
print(" loading word vectors from '{}'...".format(pretrained_path))
|
76 |
+
|
77 |
+
# initialize embeddings to random values for special words
|
78 |
+
init_sd = 1 / np.sqrt(embedding_dim)
|
79 |
+
weights = np.random.normal(0, scale=init_sd, size=[self.n_words, embedding_dim])
|
80 |
+
weights = weights.astype(np.float32)
|
81 |
+
|
82 |
+
# read word vectors
|
83 |
+
word_model = fasttext.load_model(pretrained_path)
|
84 |
+
for word, id in self.word2index.items():
|
85 |
+
vec = word_model.get_word_vector(word)
|
86 |
+
weights[id] = vec
|
87 |
+
self.word_embedding_weights = weights
|
88 |
+
|
89 |
+
def __get_embedding_weight(self, pretrained_path, embedding_dim=300):
|
90 |
+
""" function modified from http://ronny.rest/blog/post_2017_08_04_glove/ """
|
91 |
+
print("Loading word embedding '{}'...".format(pretrained_path))
|
92 |
+
cache_path = pretrained_path
|
93 |
+
weights = None
|
94 |
+
|
95 |
+
# use cached file if it exists
|
96 |
+
if os.path.exists(cache_path): #
|
97 |
+
with open(cache_path, 'rb') as f:
|
98 |
+
print(' using cached result from {}'.format(cache_path))
|
99 |
+
weights = pickle.load(f)
|
100 |
+
if weights.shape != (self.n_words, embedding_dim):
|
101 |
+
logging.warning(' failed to load word embedding weights. reinitializing...')
|
102 |
+
weights = None
|
103 |
+
|
104 |
+
if weights is None:
|
105 |
+
# initialize embeddings to random values for special and OOV words
|
106 |
+
init_sd = 1 / np.sqrt(embedding_dim)
|
107 |
+
weights = np.random.normal(0, scale=init_sd, size=[self.n_words, embedding_dim])
|
108 |
+
weights = weights.astype(np.float32)
|
109 |
+
|
110 |
+
with open(pretrained_path, encoding="utf-8", mode="r") as textFile:
|
111 |
+
num_embedded_words = 0
|
112 |
+
for line_raw in textFile:
|
113 |
+
# extract the word, and embeddings vector
|
114 |
+
line = line_raw.split()
|
115 |
+
try:
|
116 |
+
word, vector = (line[0], np.array(line[1:], dtype=np.float32))
|
117 |
+
# if word == 'love': # debugging
|
118 |
+
# print(word, vector)
|
119 |
+
|
120 |
+
# if it is in our vocab, then update the corresponding weights
|
121 |
+
id = self.word2index.get(word, None)
|
122 |
+
if id is not None:
|
123 |
+
weights[id] = vector
|
124 |
+
num_embedded_words += 1
|
125 |
+
except ValueError:
|
126 |
+
print(' parsing error at {}...'.format(line_raw[:50]))
|
127 |
+
continue
|
128 |
+
print(' {} / {} word vectors are found in the embedding'.format(num_embedded_words, len(self.word2index)))
|
129 |
+
|
130 |
+
with open(cache_path, 'wb') as f:
|
131 |
+
pickle.dump(weights, f)
|
132 |
+
return weights
|
133 |
+
|
134 |
+
|
135 |
+
def build_vocab(name, data_path, cache_path, word_vec_path=None, feat_dim=None):
|
136 |
+
print(' building a language model...')
|
137 |
+
#if not os.path.exists(cache_path):
|
138 |
+
lang_model = Vocab(name)
|
139 |
+
print(' indexing words from {}'.format(data_path))
|
140 |
+
index_words_from_textgrid(lang_model, data_path)
|
141 |
+
|
142 |
+
if word_vec_path is not None:
|
143 |
+
lang_model.load_word_vectors(word_vec_path, feat_dim)
|
144 |
+
else:
|
145 |
+
print(' loaded from {}'.format(cache_path))
|
146 |
+
with open(cache_path, 'rb') as f:
|
147 |
+
lang_model = pickle.load(f)
|
148 |
+
if word_vec_path is None:
|
149 |
+
lang_model.word_embedding_weights = None
|
150 |
+
elif lang_model.word_embedding_weights.shape[0] != lang_model.n_words:
|
151 |
+
logging.warning(' failed to load word embedding weights. check this')
|
152 |
+
assert False
|
153 |
+
|
154 |
+
with open(cache_path, 'wb') as f:
|
155 |
+
pickle.dump(lang_model, f)
|
156 |
+
|
157 |
+
|
158 |
+
return lang_model
|
159 |
+
|
160 |
+
|
161 |
+
def index_words(lang_model, data_path):
|
162 |
+
#index words form text
|
163 |
+
with open(data_path, "r") as f:
|
164 |
+
for line in f.readlines():
|
165 |
+
line = line.replace(",", " ")
|
166 |
+
line = line.replace(".", " ")
|
167 |
+
line = line.replace("?", " ")
|
168 |
+
line = line.replace("!", " ")
|
169 |
+
for word in line.split():
|
170 |
+
lang_model.index_word(word)
|
171 |
+
print(' indexed %d words' % lang_model.n_words)
|
172 |
+
|
173 |
+
def index_words_from_textgrid(lang_model, data_path):
|
174 |
+
import textgrid as tg
|
175 |
+
from tqdm import tqdm
|
176 |
+
#trainvaltest=os.listdir(data_path)
|
177 |
+
# for loadtype in trainvaltest:
|
178 |
+
# if "." in loadtype: continue #ignore .ipynb_checkpoints
|
179 |
+
texts = os.listdir(data_path+"/textgrid/")
|
180 |
+
#print(texts)
|
181 |
+
for textfile in tqdm(texts):
|
182 |
+
tgrid = tg.TextGrid.fromFile(data_path+"/textgrid/"+textfile)
|
183 |
+
for word in tgrid[0]:
|
184 |
+
word_n, word_s, word_e = word.mark, word.minTime, word.maxTime
|
185 |
+
word_n = word_n.replace(",", " ")
|
186 |
+
word_n = word_n.replace(".", " ")
|
187 |
+
word_n = word_n.replace("?", " ")
|
188 |
+
word_n = word_n.replace("!", " ")
|
189 |
+
#print(word_n)
|
190 |
+
lang_model.index_word(word_n)
|
191 |
+
print(' indexed %d words' % lang_model.n_words)
|
192 |
+
print(lang_model.word2index, lang_model.word2count)
|
193 |
+
|
194 |
+
if __name__ == "__main__":
|
195 |
+
# 11195 for all, 5793 for 4 speakers
|
196 |
+
# build_vocab("beat_english_15_141", "/home/ma-user/work/datasets/beat_cache/beat_english_15_141/", "/home/ma-user/work/datasets/beat_cache/beat_english_15_141/vocab.pkl", "/home/ma-user/work/datasets/cc.en.300.bin", 300)
|
197 |
+
build_vocab("beat_chinese_v1.0.0", "/data/datasets/beat_chinese_v1.0.0/", "/data/datasets/beat_chinese_v1.0.0/weights/vocab.pkl", "/home/ma-user/work/cc.zh.300.bin", 300)
|
198 |
+
|
199 |
+
|
dataloaders/data_tools.py
ADDED
@@ -0,0 +1,1756 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import glob
|
3 |
+
import os
|
4 |
+
import pickle
|
5 |
+
import lmdb
|
6 |
+
#import pyarrow
|
7 |
+
import fasttext
|
8 |
+
from loguru import logger
|
9 |
+
from scipy import linalg
|
10 |
+
from .pymo.parsers import BVHParser
|
11 |
+
from .pymo.viz_tools import *
|
12 |
+
from .pymo.preprocessing import *
|
13 |
+
|
14 |
+
|
15 |
+
|
16 |
+
|
17 |
+
# pose version fpsxx_trinity/japanese_joints(_xxx)
|
18 |
+
joints_list = {
|
19 |
+
"trinity_joints":{
|
20 |
+
'Hips': [6,6],
|
21 |
+
'Spine': [3,9],
|
22 |
+
'Spine1': [3,12],
|
23 |
+
'Spine2': [3,15],
|
24 |
+
'Spine3': [3,18],
|
25 |
+
'Neck': [3,21],
|
26 |
+
'Neck1': [3,24],
|
27 |
+
'Head': [3,27],
|
28 |
+
'RShoulder': [3,30],
|
29 |
+
'RArm': [3,33],
|
30 |
+
'RArm1': [3,36],
|
31 |
+
'RHand': [3,39],
|
32 |
+
'RHandT1': [3,42],
|
33 |
+
'RHandT2': [3,45],
|
34 |
+
'RHandT3': [3,48],
|
35 |
+
'RHandI1': [3,51],
|
36 |
+
'RHandI2': [3,54],
|
37 |
+
'RHandI3': [3,57],
|
38 |
+
'RHandM1': [3,60],
|
39 |
+
'RHandM2': [3,63],
|
40 |
+
'RHandM3': [3,66],
|
41 |
+
'RHandR1': [3,69],
|
42 |
+
'RHandR2': [3,72],
|
43 |
+
'RHandR3': [3,75],
|
44 |
+
'RHandP1': [3,78],
|
45 |
+
'RHandP2': [3,81],
|
46 |
+
'RHandP3': [3,84],
|
47 |
+
'LShoulder': [3,87],
|
48 |
+
'LArm': [3,90],
|
49 |
+
'LArm1': [3,93],
|
50 |
+
'LHand': [3,96],
|
51 |
+
'LHandT1': [3,99],
|
52 |
+
'LHandT2': [3,102],
|
53 |
+
'LHandT3': [3,105],
|
54 |
+
'LHandI1': [3,108],
|
55 |
+
'LHandI2': [3,111],
|
56 |
+
'LHandI3': [3,114],
|
57 |
+
'LHandM1': [3,117],
|
58 |
+
'LHandM2': [3,120],
|
59 |
+
'LHandM3': [3,123],
|
60 |
+
'LHandR1': [3,126],
|
61 |
+
'LHandR2': [3,129],
|
62 |
+
'LHandR3': [3,132],
|
63 |
+
'LHandP1': [3,135],
|
64 |
+
'LHandP2': [3,138],
|
65 |
+
'LHandP3': [3,141],
|
66 |
+
'RUpLeg': [3,144],
|
67 |
+
'RLeg': [3,147],
|
68 |
+
'RFoot': [3,150],
|
69 |
+
'RFootF': [3,153],
|
70 |
+
'RToeBase': [3,156],
|
71 |
+
'LUpLeg': [3,159],
|
72 |
+
'LLeg': [3,162],
|
73 |
+
'LFoot': [3,165],
|
74 |
+
'LFootF': [3,168],
|
75 |
+
'LToeBase': [3,171],},
|
76 |
+
"trinity_joints_123":{
|
77 |
+
'Spine': 3 ,
|
78 |
+
'Neck': 3 ,
|
79 |
+
'Neck1': 3 ,
|
80 |
+
'RShoulder': 3 ,
|
81 |
+
'RArm': 3 ,
|
82 |
+
'RArm1': 3 ,
|
83 |
+
'RHand': 3 ,
|
84 |
+
'RHandT1': 3 ,
|
85 |
+
'RHandT2': 3 ,
|
86 |
+
'RHandT3': 3 ,
|
87 |
+
'RHandI1': 3 ,
|
88 |
+
'RHandI2': 3 ,
|
89 |
+
'RHandI3': 3 ,
|
90 |
+
'RHandM1': 3 ,
|
91 |
+
'RHandM2': 3 ,
|
92 |
+
'RHandM3': 3 ,
|
93 |
+
'RHandR1': 3 ,
|
94 |
+
'RHandR2': 3 ,
|
95 |
+
'RHandR3': 3 ,
|
96 |
+
'RHandP1': 3 ,
|
97 |
+
'RHandP2': 3 ,
|
98 |
+
'RHandP3': 3 ,
|
99 |
+
'LShoulder': 3 ,
|
100 |
+
'LArm': 3 ,
|
101 |
+
'LArm1': 3 ,
|
102 |
+
'LHand': 3 ,
|
103 |
+
'LHandT1': 3 ,
|
104 |
+
'LHandT2': 3 ,
|
105 |
+
'LHandT3': 3 ,
|
106 |
+
'LHandI1': 3 ,
|
107 |
+
'LHandI2': 3 ,
|
108 |
+
'LHandI3': 3 ,
|
109 |
+
'LHandM1': 3 ,
|
110 |
+
'LHandM2': 3 ,
|
111 |
+
'LHandM3': 3 ,
|
112 |
+
'LHandR1': 3 ,
|
113 |
+
'LHandR2': 3 ,
|
114 |
+
'LHandR3': 3 ,
|
115 |
+
'LHandP1': 3 ,
|
116 |
+
'LHandP2': 3 ,
|
117 |
+
'LHandP3': 3 ,},
|
118 |
+
"trinity_joints_168":{
|
119 |
+
'Hips': 3 ,
|
120 |
+
'Spine': 3 ,
|
121 |
+
'Spine1': 3 ,
|
122 |
+
'Spine2': 3 ,
|
123 |
+
'Spine3': 3 ,
|
124 |
+
'Neck': 3 ,
|
125 |
+
'Neck1': 3 ,
|
126 |
+
'Head': 3 ,
|
127 |
+
'RShoulder': 3 ,
|
128 |
+
'RArm': 3 ,
|
129 |
+
'RArm1': 3 ,
|
130 |
+
'RHand': 3 ,
|
131 |
+
'RHandT1': 3 ,
|
132 |
+
'RHandT2': 3 ,
|
133 |
+
'RHandT3': 3 ,
|
134 |
+
'RHandI1': 3 ,
|
135 |
+
'RHandI2': 3 ,
|
136 |
+
'RHandI3': 3 ,
|
137 |
+
'RHandM1': 3 ,
|
138 |
+
'RHandM2': 3 ,
|
139 |
+
'RHandM3': 3 ,
|
140 |
+
'RHandR1': 3 ,
|
141 |
+
'RHandR2': 3 ,
|
142 |
+
'RHandR3': 3 ,
|
143 |
+
'RHandP1': 3 ,
|
144 |
+
'RHandP2': 3 ,
|
145 |
+
'RHandP3': 3 ,
|
146 |
+
'LShoulder': 3 ,
|
147 |
+
'LArm': 3 ,
|
148 |
+
'LArm1': 3 ,
|
149 |
+
'LHand': 3 ,
|
150 |
+
'LHandT1': 3 ,
|
151 |
+
'LHandT2': 3 ,
|
152 |
+
'LHandT3': 3 ,
|
153 |
+
'LHandI1': 3 ,
|
154 |
+
'LHandI2': 3 ,
|
155 |
+
'LHandI3': 3 ,
|
156 |
+
'LHandM1': 3 ,
|
157 |
+
'LHandM2': 3 ,
|
158 |
+
'LHandM3': 3 ,
|
159 |
+
'LHandR1': 3 ,
|
160 |
+
'LHandR2': 3 ,
|
161 |
+
'LHandR3': 3 ,
|
162 |
+
'LHandP1': 3 ,
|
163 |
+
'LHandP2': 3 ,
|
164 |
+
'LHandP3': 3 ,
|
165 |
+
'RUpLeg': 3 ,
|
166 |
+
'RLeg': 3 ,
|
167 |
+
'RFoot': 3 ,
|
168 |
+
'RFootF': 3 ,
|
169 |
+
'RToeBase': 3 ,
|
170 |
+
'LUpLeg': 3 ,
|
171 |
+
'LLeg': 3 ,
|
172 |
+
'LFoot': 3 ,
|
173 |
+
'LFootF': 3 ,
|
174 |
+
'LToeBase': 3 ,},
|
175 |
+
"trinity_joints_138":{
|
176 |
+
"Hips": 3 ,
|
177 |
+
'Spine': 3 ,
|
178 |
+
'Spine1': 3 ,
|
179 |
+
'Spine2': 3 ,
|
180 |
+
'Spine3': 3 ,
|
181 |
+
'Neck': 3 ,
|
182 |
+
'Neck1': 3 ,
|
183 |
+
'Head': 3 ,
|
184 |
+
'RShoulder': 3 ,
|
185 |
+
'RArm': 3 ,
|
186 |
+
'RArm1': 3 ,
|
187 |
+
'RHand': 3 ,
|
188 |
+
'RHandT1': 3 ,
|
189 |
+
'RHandT2': 3 ,
|
190 |
+
'RHandT3': 3 ,
|
191 |
+
'RHandI1': 3 ,
|
192 |
+
'RHandI2': 3 ,
|
193 |
+
'RHandI3': 3 ,
|
194 |
+
'RHandM1': 3 ,
|
195 |
+
'RHandM2': 3 ,
|
196 |
+
'RHandM3': 3 ,
|
197 |
+
'RHandR1': 3 ,
|
198 |
+
'RHandR2': 3 ,
|
199 |
+
'RHandR3': 3 ,
|
200 |
+
'RHandP1': 3 ,
|
201 |
+
'RHandP2': 3 ,
|
202 |
+
'RHandP3': 3 ,
|
203 |
+
'LShoulder': 3 ,
|
204 |
+
'LArm': 3 ,
|
205 |
+
'LArm1': 3 ,
|
206 |
+
'LHand': 3 ,
|
207 |
+
'LHandT1': 3 ,
|
208 |
+
'LHandT2': 3 ,
|
209 |
+
'LHandT3': 3 ,
|
210 |
+
'LHandI1': 3 ,
|
211 |
+
'LHandI2': 3 ,
|
212 |
+
'LHandI3': 3 ,
|
213 |
+
'LHandM1': 3 ,
|
214 |
+
'LHandM2': 3 ,
|
215 |
+
'LHandM3': 3 ,
|
216 |
+
'LHandR1': 3 ,
|
217 |
+
'LHandR2': 3 ,
|
218 |
+
'LHandR3': 3 ,
|
219 |
+
'LHandP1': 3 ,
|
220 |
+
'LHandP2': 3 ,
|
221 |
+
'LHandP3': 3 ,},
|
222 |
+
"beat_smplx_joints": {
|
223 |
+
'pelvis': [3,3],
|
224 |
+
'left_hip': [3,6],
|
225 |
+
'right_hip': [3,9],
|
226 |
+
'spine1': [3,12],
|
227 |
+
'left_knee': [3,15],
|
228 |
+
'right_knee': [3,18],
|
229 |
+
'spine2': [3,21],
|
230 |
+
'left_ankle': [3,24],
|
231 |
+
'right_ankle': [3,27],
|
232 |
+
|
233 |
+
'spine3': [3,30],
|
234 |
+
'left_foot': [3,33],
|
235 |
+
'right_foot': [3,36],
|
236 |
+
'neck': [3,39],
|
237 |
+
'left_collar': [3,42],
|
238 |
+
'right_collar': [3,45],
|
239 |
+
'head': [3,48],
|
240 |
+
'left_shoulder': [3,51],
|
241 |
+
|
242 |
+
'right_shoulder': [3,54],
|
243 |
+
'left_elbow': [3,57],
|
244 |
+
'right_elbow': [3,60],
|
245 |
+
'left_wrist': [3,63],
|
246 |
+
'right_wrist': [3,66],
|
247 |
+
|
248 |
+
'jaw': [3,69],
|
249 |
+
'left_eye_smplhf': [3,72],
|
250 |
+
'right_eye_smplhf': [3,75],
|
251 |
+
'left_index1': [3,78],
|
252 |
+
'left_index2': [3,81],
|
253 |
+
|
254 |
+
'left_index3': [3,84],
|
255 |
+
'left_middle1': [3,87],
|
256 |
+
'left_middle2': [3,90],
|
257 |
+
'left_middle3': [3,93],
|
258 |
+
'left_pinky1': [3,96],
|
259 |
+
|
260 |
+
'left_pinky2': [3,99],
|
261 |
+
'left_pinky3': [3,102],
|
262 |
+
'left_ring1': [3,105],
|
263 |
+
'left_ring2': [3,108],
|
264 |
+
|
265 |
+
'left_ring3': [3,111],
|
266 |
+
'left_thumb1': [3,114],
|
267 |
+
'left_thumb2': [3,117],
|
268 |
+
'left_thumb3': [3,120],
|
269 |
+
'right_index1': [3,123],
|
270 |
+
'right_index2': [3,126],
|
271 |
+
'right_index3': [3,129],
|
272 |
+
'right_middle1': [3,132],
|
273 |
+
|
274 |
+
'right_middle2': [3,135],
|
275 |
+
'right_middle3': [3,138],
|
276 |
+
'right_pinky1': [3,141],
|
277 |
+
'right_pinky2': [3,144],
|
278 |
+
'right_pinky3': [3,147],
|
279 |
+
|
280 |
+
'right_ring1': [3,150],
|
281 |
+
'right_ring2': [3,153],
|
282 |
+
'right_ring3': [3,156],
|
283 |
+
'right_thumb1': [3,159],
|
284 |
+
'right_thumb2': [3,162],
|
285 |
+
'right_thumb3': [3,165],
|
286 |
+
|
287 |
+
# 'nose': [3,168],
|
288 |
+
# 'right_eye': [3,171],
|
289 |
+
# 'left_eye': [3,174],
|
290 |
+
# 'right_ear': [3,177],
|
291 |
+
|
292 |
+
# 'left_ear': [3,180],
|
293 |
+
# 'left_big_toe': [3,183],
|
294 |
+
# 'left_small_toe': [3,186],
|
295 |
+
# 'left_heel': [3,189],
|
296 |
+
|
297 |
+
# 'right_big_toe': [3,192],
|
298 |
+
# 'right_small_toe': [3,195],
|
299 |
+
# 'right_heel': [3,198],
|
300 |
+
# 'left_thumb': [3,201],
|
301 |
+
# 'left_index': [3,204],
|
302 |
+
# 'left_middle': [3,207],
|
303 |
+
|
304 |
+
# 'left_ring': [3,210],
|
305 |
+
# 'left_pinky': [3,213],
|
306 |
+
# 'right_thumb': [3,216],
|
307 |
+
# 'right_index': [3,219],
|
308 |
+
# 'right_middle': [3,222],
|
309 |
+
# 'right_ring': [3,225],
|
310 |
+
|
311 |
+
# 'right_pinky': [3,228],
|
312 |
+
# 'right_eye_brow1': [3,231],
|
313 |
+
# 'right_eye_brow2': [3,234],
|
314 |
+
# 'right_eye_brow3': [3,237],
|
315 |
+
|
316 |
+
# 'right_eye_brow4': [3,240],
|
317 |
+
# 'right_eye_brow5': [3,243],
|
318 |
+
# 'left_eye_brow5': [3,246],
|
319 |
+
# 'left_eye_brow4': [3,249],
|
320 |
+
|
321 |
+
# 'left_eye_brow3': [3,252],
|
322 |
+
# 'left_eye_brow2': [3,255],
|
323 |
+
# 'left_eye_brow1': [3,258],
|
324 |
+
# 'nose1': [3,261],
|
325 |
+
# 'nose2': [3,264],
|
326 |
+
# 'nose3': [3,267],
|
327 |
+
|
328 |
+
# 'nose4': [3,270],
|
329 |
+
# 'right_nose_2': [3,273],
|
330 |
+
# 'right_nose_1': [3,276],
|
331 |
+
# 'nose_middle': [3,279],
|
332 |
+
# 'left_nose_1': [3,282],
|
333 |
+
# 'left_nose_2': [3,285],
|
334 |
+
|
335 |
+
# 'right_eye1': [3,288],
|
336 |
+
# 'right_eye2': [3,291],
|
337 |
+
# 'right_eye3': [3,294],
|
338 |
+
# 'right_eye4': [3,297],
|
339 |
+
|
340 |
+
# 'right_eye5': [3,300],
|
341 |
+
# 'right_eye6': [3,303],
|
342 |
+
# 'left_eye4': [3,306],
|
343 |
+
# 'left_eye3': [3,309],
|
344 |
+
|
345 |
+
# 'left_eye2': [3,312],
|
346 |
+
# 'left_eye1': [3,315],
|
347 |
+
# 'left_eye6': [3,318],
|
348 |
+
# 'left_eye5': [3,321],
|
349 |
+
# 'right_mouth_1': [3,324],
|
350 |
+
# 'right_mouth_2': [3,327],
|
351 |
+
# 'right_mouth_3': [3,330],
|
352 |
+
# 'mouth_top': [3,333],
|
353 |
+
# 'left_mouth_3': [3,336],
|
354 |
+
# 'left_mouth_2': [3,339],
|
355 |
+
# 'left_mouth_1': [3,342],
|
356 |
+
# 'left_mouth_5': [3,345],
|
357 |
+
# 'left_mouth_4': [3,348],
|
358 |
+
# 'mouth_bottom': [3,351],
|
359 |
+
# 'right_mouth_4': [3,354],
|
360 |
+
# 'right_mouth_5': [3,357],
|
361 |
+
# 'right_lip_1': [3,360],
|
362 |
+
# 'right_lip_2': [3,363],
|
363 |
+
# 'lip_top': [3,366],
|
364 |
+
# 'left_lip_2': [3,369],
|
365 |
+
|
366 |
+
# 'left_lip_1': [3,372],
|
367 |
+
# 'left_lip_3': [3,375],
|
368 |
+
# 'lip_bottom': [3,378],
|
369 |
+
# 'right_lip_3': [3,381],
|
370 |
+
# 'right_contour_1': [3,384],
|
371 |
+
# 'right_contour_2': [3,387],
|
372 |
+
# 'right_contour_3': [3,390],
|
373 |
+
# 'right_contour_4': [3,393],
|
374 |
+
# 'right_contour_5': [3,396],
|
375 |
+
# 'right_contour_6': [3,399],
|
376 |
+
# 'right_contour_7': [3,402],
|
377 |
+
# 'right_contour_8': [3,405],
|
378 |
+
# 'contour_middle': [3,408],
|
379 |
+
# 'left_contour_8': [3,411],
|
380 |
+
# 'left_contour_7': [3,414],
|
381 |
+
# 'left_contour_6': [3,417],
|
382 |
+
# 'left_contour_5': [3,420],
|
383 |
+
# 'left_contour_4': [3,423],
|
384 |
+
# 'left_contour_3': [3,426],
|
385 |
+
# 'left_contour_2': [3,429],
|
386 |
+
# 'left_contour_1': [3,432],
|
387 |
+
},
|
388 |
+
|
389 |
+
"beat_smplx_no_eyes": {
|
390 |
+
"pelvis":3,
|
391 |
+
"left_hip":3,
|
392 |
+
"right_hip":3,
|
393 |
+
"spine1":3,
|
394 |
+
"left_knee":3,
|
395 |
+
"right_knee":3,
|
396 |
+
"spine2":3,
|
397 |
+
"left_ankle":3,
|
398 |
+
"right_ankle":3,
|
399 |
+
"spine3":3,
|
400 |
+
"left_foot":3,
|
401 |
+
"right_foot":3,
|
402 |
+
"neck":3,
|
403 |
+
"left_collar":3,
|
404 |
+
"right_collar":3,
|
405 |
+
"head":3,
|
406 |
+
"left_shoulder":3,
|
407 |
+
"right_shoulder":3,
|
408 |
+
"left_elbow":3,
|
409 |
+
"right_elbow":3,
|
410 |
+
"left_wrist":3,
|
411 |
+
"right_wrist":3,
|
412 |
+
"jaw":3,
|
413 |
+
# "left_eye_smplhf":3,
|
414 |
+
# "right_eye_smplhf":3,
|
415 |
+
"left_index1":3,
|
416 |
+
"left_index2":3,
|
417 |
+
"left_index3":3,
|
418 |
+
"left_middle1":3,
|
419 |
+
"left_middle2":3,
|
420 |
+
"left_middle3":3,
|
421 |
+
"left_pinky1":3,
|
422 |
+
"left_pinky2":3,
|
423 |
+
"left_pinky3":3,
|
424 |
+
"left_ring1":3,
|
425 |
+
"left_ring2":3,
|
426 |
+
"left_ring3":3,
|
427 |
+
"left_thumb1":3,
|
428 |
+
"left_thumb2":3,
|
429 |
+
"left_thumb3":3,
|
430 |
+
"right_index1":3,
|
431 |
+
"right_index2":3,
|
432 |
+
"right_index3":3,
|
433 |
+
"right_middle1":3,
|
434 |
+
"right_middle2":3,
|
435 |
+
"right_middle3":3,
|
436 |
+
"right_pinky1":3,
|
437 |
+
"right_pinky2":3,
|
438 |
+
"right_pinky3":3,
|
439 |
+
"right_ring1":3,
|
440 |
+
"right_ring2":3,
|
441 |
+
"right_ring3":3,
|
442 |
+
"right_thumb1":3,
|
443 |
+
"right_thumb2":3,
|
444 |
+
"right_thumb3":3,
|
445 |
+
},
|
446 |
+
|
447 |
+
"beat_smplx_full": {
|
448 |
+
"pelvis":3,
|
449 |
+
"left_hip":3,
|
450 |
+
"right_hip":3,
|
451 |
+
"spine1":3,
|
452 |
+
"left_knee":3,
|
453 |
+
"right_knee":3,
|
454 |
+
"spine2":3,
|
455 |
+
"left_ankle":3,
|
456 |
+
"right_ankle":3,
|
457 |
+
"spine3":3,
|
458 |
+
"left_foot":3,
|
459 |
+
"right_foot":3,
|
460 |
+
"neck":3,
|
461 |
+
"left_collar":3,
|
462 |
+
"right_collar":3,
|
463 |
+
"head":3,
|
464 |
+
"left_shoulder":3,
|
465 |
+
"right_shoulder":3,
|
466 |
+
"left_elbow":3,
|
467 |
+
"right_elbow":3,
|
468 |
+
"left_wrist":3,
|
469 |
+
"right_wrist":3,
|
470 |
+
"jaw":3,
|
471 |
+
"left_eye_smplhf":3,
|
472 |
+
"right_eye_smplhf":3,
|
473 |
+
"left_index1":3,
|
474 |
+
"left_index2":3,
|
475 |
+
"left_index3":3,
|
476 |
+
"left_middle1":3,
|
477 |
+
"left_middle2":3,
|
478 |
+
"left_middle3":3,
|
479 |
+
"left_pinky1":3,
|
480 |
+
"left_pinky2":3,
|
481 |
+
"left_pinky3":3,
|
482 |
+
"left_ring1":3,
|
483 |
+
"left_ring2":3,
|
484 |
+
"left_ring3":3,
|
485 |
+
"left_thumb1":3,
|
486 |
+
"left_thumb2":3,
|
487 |
+
"left_thumb3":3,
|
488 |
+
"right_index1":3,
|
489 |
+
"right_index2":3,
|
490 |
+
"right_index3":3,
|
491 |
+
"right_middle1":3,
|
492 |
+
"right_middle2":3,
|
493 |
+
"right_middle3":3,
|
494 |
+
"right_pinky1":3,
|
495 |
+
"right_pinky2":3,
|
496 |
+
"right_pinky3":3,
|
497 |
+
"right_ring1":3,
|
498 |
+
"right_ring2":3,
|
499 |
+
"right_ring3":3,
|
500 |
+
"right_thumb1":3,
|
501 |
+
"right_thumb2":3,
|
502 |
+
"right_thumb3":3,
|
503 |
+
},
|
504 |
+
|
505 |
+
"beat_smplx_upall": {
|
506 |
+
# "pelvis":3,
|
507 |
+
# "left_hip":3,
|
508 |
+
# "right_hip":3,
|
509 |
+
"spine1":3,
|
510 |
+
# "left_knee":3,
|
511 |
+
# "right_knee":3,
|
512 |
+
"spine2":3,
|
513 |
+
# "left_ankle":3,
|
514 |
+
# "right_ankle":3,
|
515 |
+
"spine3":3,
|
516 |
+
# "left_foot":3,
|
517 |
+
# "right_foot":3,
|
518 |
+
"neck":3,
|
519 |
+
"left_collar":3,
|
520 |
+
"right_collar":3,
|
521 |
+
"head":3,
|
522 |
+
"left_shoulder":3,
|
523 |
+
"right_shoulder":3,
|
524 |
+
"left_elbow":3,
|
525 |
+
"right_elbow":3,
|
526 |
+
"left_wrist":3,
|
527 |
+
"right_wrist":3,
|
528 |
+
# "jaw":3,
|
529 |
+
# "left_eye_smplhf":3,
|
530 |
+
# "right_eye_smplhf":3,
|
531 |
+
"left_index1":3,
|
532 |
+
"left_index2":3,
|
533 |
+
"left_index3":3,
|
534 |
+
"left_middle1":3,
|
535 |
+
"left_middle2":3,
|
536 |
+
"left_middle3":3,
|
537 |
+
"left_pinky1":3,
|
538 |
+
"left_pinky2":3,
|
539 |
+
"left_pinky3":3,
|
540 |
+
"left_ring1":3,
|
541 |
+
"left_ring2":3,
|
542 |
+
"left_ring3":3,
|
543 |
+
"left_thumb1":3,
|
544 |
+
"left_thumb2":3,
|
545 |
+
"left_thumb3":3,
|
546 |
+
"right_index1":3,
|
547 |
+
"right_index2":3,
|
548 |
+
"right_index3":3,
|
549 |
+
"right_middle1":3,
|
550 |
+
"right_middle2":3,
|
551 |
+
"right_middle3":3,
|
552 |
+
"right_pinky1":3,
|
553 |
+
"right_pinky2":3,
|
554 |
+
"right_pinky3":3,
|
555 |
+
"right_ring1":3,
|
556 |
+
"right_ring2":3,
|
557 |
+
"right_ring3":3,
|
558 |
+
"right_thumb1":3,
|
559 |
+
"right_thumb2":3,
|
560 |
+
"right_thumb3":3,
|
561 |
+
},
|
562 |
+
|
563 |
+
"beat_smplx_upper": {
|
564 |
+
#"pelvis":3,
|
565 |
+
# "left_hip":3,
|
566 |
+
# "right_hip":3,
|
567 |
+
"spine1":3,
|
568 |
+
# "left_knee":3,
|
569 |
+
# "right_knee":3,
|
570 |
+
"spine2":3,
|
571 |
+
# "left_ankle":3,
|
572 |
+
# "right_ankle":3,
|
573 |
+
"spine3":3,
|
574 |
+
# "left_foot":3,
|
575 |
+
# "right_foot":3,
|
576 |
+
"neck":3,
|
577 |
+
"left_collar":3,
|
578 |
+
"right_collar":3,
|
579 |
+
"head":3,
|
580 |
+
"left_shoulder":3,
|
581 |
+
"right_shoulder":3,
|
582 |
+
"left_elbow":3,
|
583 |
+
"right_elbow":3,
|
584 |
+
"left_wrist":3,
|
585 |
+
"right_wrist":3,
|
586 |
+
# "jaw":3,
|
587 |
+
# "left_eye_smplhf":3,
|
588 |
+
# "right_eye_smplhf":3,
|
589 |
+
# "left_index1":3,
|
590 |
+
# "left_index2":3,
|
591 |
+
# "left_index3":3,
|
592 |
+
# "left_middle1":3,
|
593 |
+
# "left_middle2":3,
|
594 |
+
# "left_middle3":3,
|
595 |
+
# "left_pinky1":3,
|
596 |
+
# "left_pinky2":3,
|
597 |
+
# "left_pinky3":3,
|
598 |
+
# "left_ring1":3,
|
599 |
+
# "left_ring2":3,
|
600 |
+
# "left_ring3":3,
|
601 |
+
# "left_thumb1":3,
|
602 |
+
# "left_thumb2":3,
|
603 |
+
# "left_thumb3":3,
|
604 |
+
# "right_index1":3,
|
605 |
+
# "right_index2":3,
|
606 |
+
# "right_index3":3,
|
607 |
+
# "right_middle1":3,
|
608 |
+
# "right_middle2":3,
|
609 |
+
# "right_middle3":3,
|
610 |
+
# "right_pinky1":3,
|
611 |
+
# "right_pinky2":3,
|
612 |
+
# "right_pinky3":3,
|
613 |
+
# "right_ring1":3,
|
614 |
+
# "right_ring2":3,
|
615 |
+
# "right_ring3":3,
|
616 |
+
# "right_thumb1":3,
|
617 |
+
# "right_thumb2":3,
|
618 |
+
# "right_thumb3":3,
|
619 |
+
},
|
620 |
+
|
621 |
+
"beat_smplx_hands": {
|
622 |
+
#"pelvis":3,
|
623 |
+
# "left_hip":3,
|
624 |
+
# "right_hip":3,
|
625 |
+
# "spine1":3,
|
626 |
+
# "left_knee":3,
|
627 |
+
# "right_knee":3,
|
628 |
+
# "spine2":3,
|
629 |
+
# "left_ankle":3,
|
630 |
+
# "right_ankle":3,
|
631 |
+
# "spine3":3,
|
632 |
+
# "left_foot":3,
|
633 |
+
# "right_foot":3,
|
634 |
+
# "neck":3,
|
635 |
+
# "left_collar":3,
|
636 |
+
# "right_collar":3,
|
637 |
+
# "head":3,
|
638 |
+
# "left_shoulder":3,
|
639 |
+
# "right_shoulder":3,
|
640 |
+
# "left_elbow":3,
|
641 |
+
# "right_elbow":3,
|
642 |
+
# "left_wrist":3,
|
643 |
+
# "right_wrist":3,
|
644 |
+
# "jaw":3,
|
645 |
+
# "left_eye_smplhf":3,
|
646 |
+
# "right_eye_smplhf":3,
|
647 |
+
"left_index1":3,
|
648 |
+
"left_index2":3,
|
649 |
+
"left_index3":3,
|
650 |
+
"left_middle1":3,
|
651 |
+
"left_middle2":3,
|
652 |
+
"left_middle3":3,
|
653 |
+
"left_pinky1":3,
|
654 |
+
"left_pinky2":3,
|
655 |
+
"left_pinky3":3,
|
656 |
+
"left_ring1":3,
|
657 |
+
"left_ring2":3,
|
658 |
+
"left_ring3":3,
|
659 |
+
"left_thumb1":3,
|
660 |
+
"left_thumb2":3,
|
661 |
+
"left_thumb3":3,
|
662 |
+
"right_index1":3,
|
663 |
+
"right_index2":3,
|
664 |
+
"right_index3":3,
|
665 |
+
"right_middle1":3,
|
666 |
+
"right_middle2":3,
|
667 |
+
"right_middle3":3,
|
668 |
+
"right_pinky1":3,
|
669 |
+
"right_pinky2":3,
|
670 |
+
"right_pinky3":3,
|
671 |
+
"right_ring1":3,
|
672 |
+
"right_ring2":3,
|
673 |
+
"right_ring3":3,
|
674 |
+
"right_thumb1":3,
|
675 |
+
"right_thumb2":3,
|
676 |
+
"right_thumb3":3,
|
677 |
+
},
|
678 |
+
|
679 |
+
"beat_smplx_lower": {
|
680 |
+
"pelvis":3,
|
681 |
+
"left_hip":3,
|
682 |
+
"right_hip":3,
|
683 |
+
# "spine1":3,
|
684 |
+
"left_knee":3,
|
685 |
+
"right_knee":3,
|
686 |
+
# "spine2":3,
|
687 |
+
"left_ankle":3,
|
688 |
+
"right_ankle":3,
|
689 |
+
# "spine3":3,
|
690 |
+
"left_foot":3,
|
691 |
+
"right_foot":3,
|
692 |
+
# "neck":3,
|
693 |
+
# "left_collar":3,
|
694 |
+
# "right_collar":3,
|
695 |
+
# "head":3,
|
696 |
+
# "left_shoulder":3,
|
697 |
+
# "right_shoulder":3,
|
698 |
+
# "left_elbow":3,
|
699 |
+
# "right_elbow":3,
|
700 |
+
# "left_wrist":3,
|
701 |
+
# "right_wrist":3,
|
702 |
+
# "jaw":3,
|
703 |
+
# "left_eye_smplhf":3,
|
704 |
+
# "right_eye_smplhf":3,
|
705 |
+
# "left_index1":3,
|
706 |
+
# "left_index2":3,
|
707 |
+
# "left_index3":3,
|
708 |
+
# "left_middle1":3,
|
709 |
+
# "left_middle2":3,
|
710 |
+
# "left_middle3":3,
|
711 |
+
# "left_pinky1":3,
|
712 |
+
# "left_pinky2":3,
|
713 |
+
# "left_pinky3":3,
|
714 |
+
# "left_ring1":3,
|
715 |
+
# "left_ring2":3,
|
716 |
+
# "left_ring3":3,
|
717 |
+
# "left_thumb1":3,
|
718 |
+
# "left_thumb2":3,
|
719 |
+
# "left_thumb3":3,
|
720 |
+
# "right_index1":3,
|
721 |
+
# "right_index2":3,
|
722 |
+
# "right_index3":3,
|
723 |
+
# "right_middle1":3,
|
724 |
+
# "right_middle2":3,
|
725 |
+
# "right_middle3":3,
|
726 |
+
# "right_pinky1":3,
|
727 |
+
# "right_pinky2":3,
|
728 |
+
# "right_pinky3":3,
|
729 |
+
# "right_ring1":3,
|
730 |
+
# "right_ring2":3,
|
731 |
+
# "right_ring3":3,
|
732 |
+
# "right_thumb1":3,
|
733 |
+
# "right_thumb2":3,
|
734 |
+
# "right_thumb3":3,
|
735 |
+
},
|
736 |
+
|
737 |
+
"beat_smplx_face": {
|
738 |
+
# "pelvis":3,
|
739 |
+
# "left_hip":3,
|
740 |
+
# "right_hip":3,
|
741 |
+
# # "spine1":3,
|
742 |
+
# "left_knee":3,
|
743 |
+
# "right_knee":3,
|
744 |
+
# # "spine2":3,
|
745 |
+
# "left_ankle":3,
|
746 |
+
# "right_ankle":3,
|
747 |
+
# # "spine3":3,
|
748 |
+
# "left_foot":3,
|
749 |
+
# "right_foot":3,
|
750 |
+
# "neck":3,
|
751 |
+
# "left_collar":3,
|
752 |
+
# "right_collar":3,
|
753 |
+
# "head":3,
|
754 |
+
# "left_shoulder":3,
|
755 |
+
# "right_shoulder":3,
|
756 |
+
# "left_elbow":3,
|
757 |
+
# "right_elbow":3,
|
758 |
+
# "left_wrist":3,
|
759 |
+
# "right_wrist":3,
|
760 |
+
"jaw":3,
|
761 |
+
# "left_eye_smplhf":3,
|
762 |
+
# "right_eye_smplhf":3,
|
763 |
+
# "left_index1":3,
|
764 |
+
# "left_index2":3,
|
765 |
+
# "left_index3":3,
|
766 |
+
# "left_middle1":3,
|
767 |
+
# "left_middle2":3,
|
768 |
+
# "left_middle3":3,
|
769 |
+
# "left_pinky1":3,
|
770 |
+
# "left_pinky2":3,
|
771 |
+
# "left_pinky3":3,
|
772 |
+
# "left_ring1":3,
|
773 |
+
# "left_ring2":3,
|
774 |
+
# "left_ring3":3,
|
775 |
+
# "left_thumb1":3,
|
776 |
+
# "left_thumb2":3,
|
777 |
+
# "left_thumb3":3,
|
778 |
+
# "right_index1":3,
|
779 |
+
# "right_index2":3,
|
780 |
+
# "right_index3":3,
|
781 |
+
# "right_middle1":3,
|
782 |
+
# "right_middle2":3,
|
783 |
+
# "right_middle3":3,
|
784 |
+
# "right_pinky1":3,
|
785 |
+
# "right_pinky2":3,
|
786 |
+
# "right_pinky3":3,
|
787 |
+
# "right_ring1":3,
|
788 |
+
# "right_ring2":3,
|
789 |
+
# "right_ring3":3,
|
790 |
+
# "right_thumb1":3,
|
791 |
+
# "right_thumb2":3,
|
792 |
+
# "right_thumb3":3,
|
793 |
+
},
|
794 |
+
|
795 |
+
"beat_joints": {
|
796 |
+
'Hips': [6,6],
|
797 |
+
'Spine': [3,9],
|
798 |
+
'Spine1': [3,12],
|
799 |
+
'Spine2': [3,15],
|
800 |
+
'Spine3': [3,18],
|
801 |
+
'Neck': [3,21],
|
802 |
+
'Neck1': [3,24],
|
803 |
+
'Head': [3,27],
|
804 |
+
'HeadEnd': [3,30],
|
805 |
+
|
806 |
+
'RShoulder': [3,33],
|
807 |
+
'RArm': [3,36],
|
808 |
+
'RArm1': [3,39],
|
809 |
+
'RHand': [3,42],
|
810 |
+
'RHandM1': [3,45],
|
811 |
+
'RHandM2': [3,48],
|
812 |
+
'RHandM3': [3,51],
|
813 |
+
'RHandM4': [3,54],
|
814 |
+
|
815 |
+
'RHandR': [3,57],
|
816 |
+
'RHandR1': [3,60],
|
817 |
+
'RHandR2': [3,63],
|
818 |
+
'RHandR3': [3,66],
|
819 |
+
'RHandR4': [3,69],
|
820 |
+
|
821 |
+
'RHandP': [3,72],
|
822 |
+
'RHandP1': [3,75],
|
823 |
+
'RHandP2': [3,78],
|
824 |
+
'RHandP3': [3,81],
|
825 |
+
'RHandP4': [3,84],
|
826 |
+
|
827 |
+
'RHandI': [3,87],
|
828 |
+
'RHandI1': [3,90],
|
829 |
+
'RHandI2': [3,93],
|
830 |
+
'RHandI3': [3,96],
|
831 |
+
'RHandI4': [3,99],
|
832 |
+
|
833 |
+
'RHandT1': [3,102],
|
834 |
+
'RHandT2': [3,105],
|
835 |
+
'RHandT3': [3,108],
|
836 |
+
'RHandT4': [3,111],
|
837 |
+
|
838 |
+
'LShoulder': [3,114],
|
839 |
+
'LArm': [3,117],
|
840 |
+
'LArm1': [3,120],
|
841 |
+
'LHand': [3,123],
|
842 |
+
'LHandM1': [3,126],
|
843 |
+
'LHandM2': [3,129],
|
844 |
+
'LHandM3': [3,132],
|
845 |
+
'LHandM4': [3,135],
|
846 |
+
|
847 |
+
'LHandR': [3,138],
|
848 |
+
'LHandR1': [3,141],
|
849 |
+
'LHandR2': [3,144],
|
850 |
+
'LHandR3': [3,147],
|
851 |
+
'LHandR4': [3,150],
|
852 |
+
|
853 |
+
'LHandP': [3,153],
|
854 |
+
'LHandP1': [3,156],
|
855 |
+
'LHandP2': [3,159],
|
856 |
+
'LHandP3': [3,162],
|
857 |
+
'LHandP4': [3,165],
|
858 |
+
|
859 |
+
'LHandI': [3,168],
|
860 |
+
'LHandI1': [3,171],
|
861 |
+
'LHandI2': [3,174],
|
862 |
+
'LHandI3': [3,177],
|
863 |
+
'LHandI4': [3,180],
|
864 |
+
|
865 |
+
'LHandT1': [3,183],
|
866 |
+
'LHandT2': [3,186],
|
867 |
+
'LHandT3': [3,189],
|
868 |
+
'LHandT4': [3,192],
|
869 |
+
|
870 |
+
'RUpLeg': [3,195],
|
871 |
+
'RLeg': [3,198],
|
872 |
+
'RFoot': [3,201],
|
873 |
+
'RFootF': [3,204],
|
874 |
+
'RToeBase': [3,207],
|
875 |
+
'RToeBaseEnd': [3,210],
|
876 |
+
|
877 |
+
'LUpLeg': [3,213],
|
878 |
+
'LLeg': [3,216],
|
879 |
+
'LFoot': [3,219],
|
880 |
+
'LFootF': [3,222],
|
881 |
+
'LToeBase': [3,225],
|
882 |
+
'LToeBaseEnd': [3,228],},
|
883 |
+
|
884 |
+
"beat_full":{
|
885 |
+
'Hips': 3,
|
886 |
+
'Spine': 3 ,
|
887 |
+
'Spine1': 3 ,
|
888 |
+
'Spine2': 3 ,
|
889 |
+
'Spine3': 3 ,
|
890 |
+
'Neck': 3 ,
|
891 |
+
'Neck1': 3 ,
|
892 |
+
'Head' : 3,
|
893 |
+
'HeadEnd' : 3,
|
894 |
+
'RShoulder': 3 ,
|
895 |
+
'RArm': 3 ,
|
896 |
+
'RArm1': 3 ,
|
897 |
+
'RHand': 3 ,
|
898 |
+
'RHandM1': 3 ,
|
899 |
+
'RHandM2': 3 ,
|
900 |
+
'RHandM3': 3 ,
|
901 |
+
'RHandM4': 3 ,
|
902 |
+
'RHandR': 3 ,
|
903 |
+
'RHandR1': 3 ,
|
904 |
+
'RHandR2': 3 ,
|
905 |
+
'RHandR3': 3 ,
|
906 |
+
'RHandR4': 3 ,
|
907 |
+
'RHandP': 3 ,
|
908 |
+
'RHandP1': 3 ,
|
909 |
+
'RHandP2': 3 ,
|
910 |
+
'RHandP3': 3 ,
|
911 |
+
'RHandP4': 3 ,
|
912 |
+
'RHandI': 3 ,
|
913 |
+
'RHandI1': 3 ,
|
914 |
+
'RHandI2': 3 ,
|
915 |
+
'RHandI3': 3 ,
|
916 |
+
'RHandI4': 3 ,
|
917 |
+
'RHandT1': 3 ,
|
918 |
+
'RHandT2': 3 ,
|
919 |
+
'RHandT3': 3 ,
|
920 |
+
'RHandT4': 3 ,
|
921 |
+
'LShoulder': 3 ,
|
922 |
+
'LArm': 3 ,
|
923 |
+
'LArm1': 3 ,
|
924 |
+
'LHand': 3 ,
|
925 |
+
'LHandM1': 3 ,
|
926 |
+
'LHandM2': 3 ,
|
927 |
+
'LHandM3': 3 ,
|
928 |
+
'LHandM4': 3 ,
|
929 |
+
'LHandR': 3 ,
|
930 |
+
'LHandR1': 3 ,
|
931 |
+
'LHandR2': 3 ,
|
932 |
+
'LHandR3': 3 ,
|
933 |
+
'LHandR4': 3 ,
|
934 |
+
'LHandP': 3 ,
|
935 |
+
'LHandP1': 3 ,
|
936 |
+
'LHandP2': 3 ,
|
937 |
+
'LHandP3': 3 ,
|
938 |
+
'LHandP4': 3 ,
|
939 |
+
'LHandI': 3 ,
|
940 |
+
'LHandI1': 3 ,
|
941 |
+
'LHandI2': 3 ,
|
942 |
+
'LHandI3': 3 ,
|
943 |
+
'LHandI4': 3 ,
|
944 |
+
'LHandT1': 3 ,
|
945 |
+
'LHandT2': 3 ,
|
946 |
+
'LHandT3': 3 ,
|
947 |
+
'LHandT4': 3 ,
|
948 |
+
'RUpLeg': 3,
|
949 |
+
'RLeg': 3,
|
950 |
+
'RFoot': 3,
|
951 |
+
'RFootF': 3,
|
952 |
+
'RToeBase': 3,
|
953 |
+
'RToeBaseEnd': 3,
|
954 |
+
'LUpLeg': 3,
|
955 |
+
'LLeg': 3,
|
956 |
+
'LFoot': 3,
|
957 |
+
'LFootF': 3,
|
958 |
+
'LToeBase': 3,
|
959 |
+
'LToeBaseEnd': 3,
|
960 |
+
},
|
961 |
+
|
962 |
+
"japanese_joints":{
|
963 |
+
'Hips': [6,6],
|
964 |
+
'Spine': [6,12],
|
965 |
+
'Spine1': [6,18],
|
966 |
+
'Spine2': [6,24],
|
967 |
+
'Spine3': [6,30],
|
968 |
+
'Neck': [6,36],
|
969 |
+
'Neck1': [6,42],
|
970 |
+
'Head': [6,48],
|
971 |
+
'RShoulder': [6,54],
|
972 |
+
'RArm': [6,60],
|
973 |
+
'RArm1': [6,66],
|
974 |
+
'RHand': [6,72],
|
975 |
+
'RHandM1': [6,78],
|
976 |
+
'RHandM2': [6,84],
|
977 |
+
'RHandM3': [6,90],
|
978 |
+
'RHandR': [6,96],
|
979 |
+
'RHandR1': [6,102],
|
980 |
+
'RHandR2': [6,108],
|
981 |
+
'RHandR3': [6,114],
|
982 |
+
'RHandP': [6,120],
|
983 |
+
'RHandP1': [6,126],
|
984 |
+
'RHandP2': [6,132],
|
985 |
+
'RHandP3': [6,138],
|
986 |
+
'RHandI': [6,144],
|
987 |
+
'RHandI1': [6,150],
|
988 |
+
'RHandI2': [6,156],
|
989 |
+
'RHandI3': [6,162],
|
990 |
+
'RHandT1': [6,168],
|
991 |
+
'RHandT2': [6,174],
|
992 |
+
'RHandT3': [6,180],
|
993 |
+
'LShoulder': [6,186],
|
994 |
+
'LArm': [6,192],
|
995 |
+
'LArm1': [6,198],
|
996 |
+
'LHand': [6,204],
|
997 |
+
'LHandM1': [6,210],
|
998 |
+
'LHandM2': [6,216],
|
999 |
+
'LHandM3': [6,222],
|
1000 |
+
'LHandR': [6,228],
|
1001 |
+
'LHandR1': [6,234],
|
1002 |
+
'LHandR2': [6,240],
|
1003 |
+
'LHandR3': [6,246],
|
1004 |
+
'LHandP': [6,252],
|
1005 |
+
'LHandP1': [6,258],
|
1006 |
+
'LHandP2': [6,264],
|
1007 |
+
'LHandP3': [6,270],
|
1008 |
+
'LHandI': [6,276],
|
1009 |
+
'LHandI1': [6,282],
|
1010 |
+
'LHandI2': [6,288],
|
1011 |
+
'LHandI3': [6,294],
|
1012 |
+
'LHandT1': [6,300],
|
1013 |
+
'LHandT2': [6,306],
|
1014 |
+
'LHandT3': [6,312],
|
1015 |
+
'RUpLeg': [6,318],
|
1016 |
+
'RLeg': [6,324],
|
1017 |
+
'RFoot': [6,330],
|
1018 |
+
'RFootF': [6,336],
|
1019 |
+
'RToeBase': [6,342],
|
1020 |
+
'LUpLeg': [6,348],
|
1021 |
+
'LLeg': [6,354],
|
1022 |
+
'LFoot': [6,360],
|
1023 |
+
'LFootF': [6,366],
|
1024 |
+
'LToeBase': [6,372],},
|
1025 |
+
|
1026 |
+
"yostar":{
|
1027 |
+
'Hips': [6,6],
|
1028 |
+
'Spine': [3,9],
|
1029 |
+
'Spine1': [3,12],
|
1030 |
+
'Bone040': [3,15],
|
1031 |
+
'Bone041': [3,18],
|
1032 |
+
|
1033 |
+
'Bone034': [3,21],
|
1034 |
+
'Bone035': [3,24],
|
1035 |
+
'Bone036': [3,27],
|
1036 |
+
'Bone037': [3,30],
|
1037 |
+
'Bone038': [3,33],
|
1038 |
+
'Bone039': [3,36],
|
1039 |
+
|
1040 |
+
'RibbonL1': [3,39],
|
1041 |
+
'RibbonL1_end': [3,42],
|
1042 |
+
|
1043 |
+
'Chest': [3,45],
|
1044 |
+
'L_eri': [3,48],
|
1045 |
+
'R_eri': [3,51],
|
1046 |
+
'Neck': [3,54],
|
1047 |
+
'Head': [3,57],
|
1048 |
+
'Head_end': [3,60],
|
1049 |
+
|
1050 |
+
'RBackHair_1': [3,63],
|
1051 |
+
'RBackHair_2': [3,66],
|
1052 |
+
'RBackHair_3': [3,69],
|
1053 |
+
'RBackHair_4': [3,72],
|
1054 |
+
'RBackHair_end': [3,75],
|
1055 |
+
|
1056 |
+
'RFrontHair': [3,78],
|
1057 |
+
'CFrontHair_1': [3,81],
|
1058 |
+
'CFrontHair_2': [3,84],
|
1059 |
+
'CFrontHair_3': [3,87],
|
1060 |
+
'CFrontHair_emd': [3,90],
|
1061 |
+
|
1062 |
+
'LFrontHair_1': [3,93],
|
1063 |
+
'LFrontHair_2': [3,96],
|
1064 |
+
'LFrontHair_3': [3,99],
|
1065 |
+
|
1066 |
+
'LBackHair_1': [3,102],
|
1067 |
+
'LBackHair_2': [3,105],
|
1068 |
+
'LBackHair_3': [3,108],
|
1069 |
+
'LBackHair_4': [3,111],
|
1070 |
+
'LBackHair_end': [3,114],
|
1071 |
+
|
1072 |
+
'LSideHair_1': [3,117],
|
1073 |
+
'LSideHair_2': [3,120],
|
1074 |
+
'LSideHair_3': [3,123],
|
1075 |
+
'LSideHair_4': [3,126],
|
1076 |
+
'LSideHair_5': [3,129],
|
1077 |
+
'LSideHair_6': [3,132],
|
1078 |
+
'LSideHair_7': [3,135],
|
1079 |
+
'LSideHair_end': [3,138],
|
1080 |
+
|
1081 |
+
'CBackHair_1': [3,141],
|
1082 |
+
'CBackHair_2': [3,144],
|
1083 |
+
'CBackHair_3': [3,147],
|
1084 |
+
'CBackHair_4': [3,150],
|
1085 |
+
'CBackHair_end': [3,153],
|
1086 |
+
|
1087 |
+
'RSideHair_1': [3,156],
|
1088 |
+
'RSideHair_2': [3,159],
|
1089 |
+
'RSideHair_3': [3,162],
|
1090 |
+
'RSideHair_4': [3,165],
|
1091 |
+
|
1092 |
+
'RibbonR_1': [3,168],
|
1093 |
+
'RibbonR_2': [3,171],
|
1094 |
+
'RibbonR_3': [3,174],
|
1095 |
+
|
1096 |
+
'RibbonL_1': [3,177],
|
1097 |
+
'RibbonL_2': [3,180],
|
1098 |
+
'RibbonL_3': [3,183],
|
1099 |
+
|
1100 |
+
'LeftEye': [3,186],
|
1101 |
+
'LeftEye_end': [3,189],
|
1102 |
+
'RightEye': [3,192],
|
1103 |
+
'RightEye_end': [3,195],
|
1104 |
+
|
1105 |
+
'LeftShoulder': [3,198],
|
1106 |
+
'LeftArm': [3,201],
|
1107 |
+
'LeftForearm': [3,204],
|
1108 |
+
'LeftHand': [3,207],
|
1109 |
+
'LeftHandThumb1': [3,210],
|
1110 |
+
'LeftHandThumb2': [3,213],
|
1111 |
+
'LeftHandThumb3': [3,216],
|
1112 |
+
'LeftHandThumb_end': [3,219],
|
1113 |
+
|
1114 |
+
'LeftHandIndex1': [3,222],
|
1115 |
+
'LeftHandIndex2': [3,225],
|
1116 |
+
'LeftHandIndex3': [3,228],
|
1117 |
+
'LeftHandIndex_end': [3,231],
|
1118 |
+
|
1119 |
+
'LeftHandMiddle1': [3,234],
|
1120 |
+
'LeftHandMiddle2': [3,237],
|
1121 |
+
'LeftHandMiddle3': [3,240],
|
1122 |
+
'LeftHandMiddle_end': [3,243],
|
1123 |
+
|
1124 |
+
'LeftHandRing1': [3,246],
|
1125 |
+
'LeftHandRing2': [3,249],
|
1126 |
+
'LeftHandRing3': [3,252],
|
1127 |
+
'LeftHandRing_end': [3,255],
|
1128 |
+
|
1129 |
+
'LeftHandPinky1': [3,258],
|
1130 |
+
'LeftHandPinky2': [3,261],
|
1131 |
+
'LeftHandPinky3': [3,264],
|
1132 |
+
'LeftHandPinky_end': [3,267],
|
1133 |
+
|
1134 |
+
'RightShoulder': [3,270],
|
1135 |
+
'RightArm': [3,273],
|
1136 |
+
'RightForearm': [3,276],
|
1137 |
+
'RightHand': [3,279],
|
1138 |
+
'RightHandThumb1': [3,282],
|
1139 |
+
'RightHandThumb2': [3,285],
|
1140 |
+
'RightHandThumb3': [3,288],
|
1141 |
+
'RightHandThumb_end': [3,291],
|
1142 |
+
|
1143 |
+
'RightHandIndex1': [3,294],
|
1144 |
+
'RightHandIndex2': [3,297],
|
1145 |
+
'RightHandIndex3': [3,300],
|
1146 |
+
'RightHandIndex_end': [3,303],
|
1147 |
+
|
1148 |
+
'RightHandMiddle1': [3,306],
|
1149 |
+
'RightHandMiddle2': [3,309],
|
1150 |
+
'RightHandMiddle3': [3,312],
|
1151 |
+
'RightHandMiddle_end': [3,315],
|
1152 |
+
|
1153 |
+
'RightHandRing1': [3,318],
|
1154 |
+
'RightHandRing2': [3,321],
|
1155 |
+
'RightHandRing3': [3,324],
|
1156 |
+
'RightHandRing_end': [3,327],
|
1157 |
+
|
1158 |
+
'RightHandPinky1': [3,330],
|
1159 |
+
'RightHandPinky2': [3,333],
|
1160 |
+
'RightHandPinky3': [3,336],
|
1161 |
+
'RightHandPinky_end': [3,339],
|
1162 |
+
|
1163 |
+
'RibbonR1': [3,342],
|
1164 |
+
'RibbonR1_end': [3,345],
|
1165 |
+
'RibbonR2': [3,348],
|
1166 |
+
'RibbonR2_end': [3,351],
|
1167 |
+
'RibbonL2': [3,354],
|
1168 |
+
'RibbonL2_end': [3,357],
|
1169 |
+
|
1170 |
+
'LeftUpLeg': [3,360],
|
1171 |
+
'LeftLeg': [3,363],
|
1172 |
+
'LeftFoot': [3,366],
|
1173 |
+
'LeftToe': [3,369],
|
1174 |
+
'LeftToe_end': [3,372],
|
1175 |
+
|
1176 |
+
'RightUpLeg': [3,375],
|
1177 |
+
'RightLEg': [3,378],
|
1178 |
+
'RightFoot': [3,381],
|
1179 |
+
'RightToe': [3,384],
|
1180 |
+
'RightToe_end': [3,387],
|
1181 |
+
|
1182 |
+
'bone_skirtF00': [3, 390],
|
1183 |
+
'bone_skirtF01': [3, 393],
|
1184 |
+
'bone_skirtF02': [3, 396],
|
1185 |
+
'bone_skirtF03': [3, 399],
|
1186 |
+
'Bone020': [3, 402],
|
1187 |
+
'Bone026': [3, 405],
|
1188 |
+
|
1189 |
+
'bone_skirtF_R_00': [3, 408],
|
1190 |
+
'bone_skirtF_R_01': [3, 411],
|
1191 |
+
'bone_skirtF_R_02': [3, 414],
|
1192 |
+
'bone_skirtF_R_03': [3, 417],
|
1193 |
+
'Bone019': [3, 420],
|
1194 |
+
'Bone028': [3, 423],
|
1195 |
+
|
1196 |
+
'bone_skirtR00': [3, 426],
|
1197 |
+
'bone_skirtR01': [3, 429],
|
1198 |
+
'bone_skirtR02': [3, 432],
|
1199 |
+
'bone_skirtR03': [3, 435],
|
1200 |
+
'Bone018': [3, 438],
|
1201 |
+
'Bone029': [3, 441],
|
1202 |
+
|
1203 |
+
'bone_skirtF_L_00': [3, 444],
|
1204 |
+
'bone_skirtF_L_01': [3, 447],
|
1205 |
+
'bone_skirtF_L_02': [3, 450],
|
1206 |
+
'bone_skirtF_L_03': [3, 453],
|
1207 |
+
'Bone021': [3, 456],
|
1208 |
+
'Bone027': [3, 459],
|
1209 |
+
|
1210 |
+
'bone_skirtL00': [3, 462],
|
1211 |
+
'bone_skirtL01': [3, 465],
|
1212 |
+
'bone_skirtL02': [3, 468],
|
1213 |
+
'bone_skirtL03': [3, 471],
|
1214 |
+
'Bone022': [3, 474],
|
1215 |
+
'Bone033': [3, 477],
|
1216 |
+
|
1217 |
+
'bone_skirtB_L_00': [3, 480],
|
1218 |
+
'bone_skirtB_L_01': [3, 483],
|
1219 |
+
'bone_skirtB_L_02': [3, 486],
|
1220 |
+
'bone_skirtB_L_03': [3, 489],
|
1221 |
+
'Bone023': [3, 492],
|
1222 |
+
'Bone032': [3, 495],
|
1223 |
+
|
1224 |
+
'bone_skirtB00': [3, 498],
|
1225 |
+
'bone_skirtB01': [3, 501],
|
1226 |
+
'bone_skirtB02': [3, 504],
|
1227 |
+
'bone_skirtB03': [3, 507],
|
1228 |
+
'Bone024': [3, 510],
|
1229 |
+
'Bone031': [3, 513],
|
1230 |
+
|
1231 |
+
'bone_skirtB_R_00': [3, 516],
|
1232 |
+
'bone_skirtB_R_01': [3, 519],
|
1233 |
+
'bone_skirtB_R_02': [3, 521],
|
1234 |
+
'bone_skirtB_R_03': [3, 524],
|
1235 |
+
'Bone025': [3, 527],
|
1236 |
+
'Bone030': [3, 530],
|
1237 |
+
},
|
1238 |
+
|
1239 |
+
"yostar_fullbody_213":{
|
1240 |
+
'Hips': 3 ,
|
1241 |
+
'Spine': 3 ,
|
1242 |
+
'Spine1': 3 ,
|
1243 |
+
'Chest': 3 ,
|
1244 |
+
'L_eri': 3 ,
|
1245 |
+
'R_eri': 3 ,
|
1246 |
+
'Neck': 3 ,
|
1247 |
+
'Head': 3 ,
|
1248 |
+
'Head_end': 3 ,
|
1249 |
+
|
1250 |
+
'LeftEye': 3,
|
1251 |
+
'LeftEye_end': 3,
|
1252 |
+
'RightEye': 3,
|
1253 |
+
'RightEye_end': 3,
|
1254 |
+
|
1255 |
+
'LeftShoulder': 3,
|
1256 |
+
'LeftArm': 3,
|
1257 |
+
'LeftForearm': 3,
|
1258 |
+
'LeftHand': 3,
|
1259 |
+
'LeftHandThumb1': 3,
|
1260 |
+
'LeftHandThumb2': 3,
|
1261 |
+
'LeftHandThumb3': 3,
|
1262 |
+
'LeftHandThumb_end': 3,
|
1263 |
+
|
1264 |
+
'LeftHandIndex1': 3,
|
1265 |
+
'LeftHandIndex2': 3,
|
1266 |
+
'LeftHandIndex3': 3,
|
1267 |
+
'LeftHandIndex_end': 3,
|
1268 |
+
|
1269 |
+
'LeftHandMiddle1': 3,
|
1270 |
+
'LeftHandMiddle2': 3,
|
1271 |
+
'LeftHandMiddle3': 3,
|
1272 |
+
'LeftHandMiddle_end': 3,
|
1273 |
+
|
1274 |
+
'LeftHandRing1': 3,
|
1275 |
+
'LeftHandRing2': 3,
|
1276 |
+
'LeftHandRing3': 3,
|
1277 |
+
'LeftHandRing_end': 3,
|
1278 |
+
|
1279 |
+
'LeftHandPinky1': 3,
|
1280 |
+
'LeftHandPinky2': 3,
|
1281 |
+
'LeftHandPinky3': 3,
|
1282 |
+
'LeftHandPinky_end':3,
|
1283 |
+
|
1284 |
+
'RightShoulder': 3,
|
1285 |
+
'RightArm': 3,
|
1286 |
+
'RightForearm': 3,
|
1287 |
+
'RightHand': 3,
|
1288 |
+
'RightHandThumb1': 3,
|
1289 |
+
'RightHandThumb2': 3,
|
1290 |
+
'RightHandThumb3': 3,
|
1291 |
+
'RightHandThumb_end': 3,
|
1292 |
+
|
1293 |
+
'RightHandIndex1': 3,
|
1294 |
+
'RightHandIndex2': 3,
|
1295 |
+
'RightHandIndex3': 3,
|
1296 |
+
'RightHandIndex_end': 3,
|
1297 |
+
|
1298 |
+
'RightHandMiddle1': 3,
|
1299 |
+
'RightHandMiddle2': 3,
|
1300 |
+
'RightHandMiddle3': 3,
|
1301 |
+
'RightHandMiddle_end': 3,
|
1302 |
+
|
1303 |
+
'RightHandRing1': 3,
|
1304 |
+
'RightHandRing2': 3,
|
1305 |
+
'RightHandRing3': 3,
|
1306 |
+
'RightHandRing_end': 3,
|
1307 |
+
|
1308 |
+
'RightHandPinky1': 3,
|
1309 |
+
'RightHandPinky2': 3,
|
1310 |
+
'RightHandPinky3': 3,
|
1311 |
+
'RightHandPinky_end': 3,
|
1312 |
+
|
1313 |
+
'LeftUpLeg': 3,
|
1314 |
+
'LeftLeg': 3,
|
1315 |
+
'LeftFoot': 3,
|
1316 |
+
'LeftToe': 3,
|
1317 |
+
'LeftToe_end': 3,
|
1318 |
+
|
1319 |
+
'RightUpLeg': 3,
|
1320 |
+
'RightLEg': 3,
|
1321 |
+
'RightFoot': 3,
|
1322 |
+
'RightToe': 3,
|
1323 |
+
'RightToe_end': 3,
|
1324 |
+
},
|
1325 |
+
"yostar_mainbody_48": {
|
1326 |
+
#'Hips': 3 ,
|
1327 |
+
'Spine': 3 ,
|
1328 |
+
'Spine1': 3 ,
|
1329 |
+
'Chest': 3 ,
|
1330 |
+
'L_eri': 3 ,
|
1331 |
+
'R_eri': 3 ,
|
1332 |
+
'Neck': 3 ,
|
1333 |
+
'Head': 3 ,
|
1334 |
+
'Head_end': 3 ,
|
1335 |
+
|
1336 |
+
'LeftShoulder': 3,
|
1337 |
+
'LeftArm': 3,
|
1338 |
+
'LeftForearm': 3,
|
1339 |
+
'LeftHand': 3,
|
1340 |
+
|
1341 |
+
'RightShoulder': 3,
|
1342 |
+
'RightArm': 3,
|
1343 |
+
'RightForearm': 3,
|
1344 |
+
'RightHand': 3,
|
1345 |
+
},
|
1346 |
+
"yostar_mainbody_69": {
|
1347 |
+
'Hips': 3 ,
|
1348 |
+
'Spine': 3 ,
|
1349 |
+
'Spine1': 3 ,
|
1350 |
+
'Chest': 3 ,
|
1351 |
+
'L_eri': 3 ,
|
1352 |
+
'R_eri': 3 ,
|
1353 |
+
'Neck': 3 ,
|
1354 |
+
'Head': 3 ,
|
1355 |
+
'Head_end': 3 ,
|
1356 |
+
|
1357 |
+
'LeftShoulder': 3,
|
1358 |
+
'LeftArm': 3,
|
1359 |
+
'LeftForearm': 3,
|
1360 |
+
'LeftHand': 3,
|
1361 |
+
|
1362 |
+
'RightShoulder': 3,
|
1363 |
+
'RightArm': 3,
|
1364 |
+
'RightForearm': 3,
|
1365 |
+
'RightHand': 3,
|
1366 |
+
|
1367 |
+
'LeftUpLeg': 3,
|
1368 |
+
'LeftLeg': 3,
|
1369 |
+
'LeftFoot': 3,
|
1370 |
+
|
1371 |
+
'RightUpLeg': 3,
|
1372 |
+
'RightLEg': 3,
|
1373 |
+
'RightFoot': 3,
|
1374 |
+
},
|
1375 |
+
|
1376 |
+
"yostar_upbody_168": {
|
1377 |
+
#'Hips': 3 ,
|
1378 |
+
'Spine': 3 ,
|
1379 |
+
'Spine1': 3 ,
|
1380 |
+
'Chest': 3 ,
|
1381 |
+
'L_eri': 3 ,
|
1382 |
+
'R_eri': 3 ,
|
1383 |
+
'Neck': 3 ,
|
1384 |
+
'Head': 3 ,
|
1385 |
+
'Head_end': 3 ,
|
1386 |
+
|
1387 |
+
'LeftShoulder': 3,
|
1388 |
+
'LeftArm': 3,
|
1389 |
+
'LeftForearm': 3,
|
1390 |
+
'LeftHand': 3,
|
1391 |
+
'LeftHandThumb1': 3,
|
1392 |
+
'LeftHandThumb2': 3,
|
1393 |
+
'LeftHandThumb3': 3,
|
1394 |
+
'LeftHandThumb_end': 3,
|
1395 |
+
|
1396 |
+
'LeftHandIndex1': 3,
|
1397 |
+
'LeftHandIndex2': 3,
|
1398 |
+
'LeftHandIndex3': 3,
|
1399 |
+
'LeftHandIndex_end': 3,
|
1400 |
+
|
1401 |
+
'LeftHandMiddle1': 3,
|
1402 |
+
'LeftHandMiddle2': 3,
|
1403 |
+
'LeftHandMiddle3': 3,
|
1404 |
+
'LeftHandMiddle_end': 3,
|
1405 |
+
|
1406 |
+
'LeftHandRing1': 3,
|
1407 |
+
'LeftHandRing2': 3,
|
1408 |
+
'LeftHandRing3': 3,
|
1409 |
+
'LeftHandRing_end': 3,
|
1410 |
+
|
1411 |
+
'LeftHandPinky1': 3,
|
1412 |
+
'LeftHandPinky2': 3,
|
1413 |
+
'LeftHandPinky3': 3,
|
1414 |
+
'LeftHandPinky_end':3,
|
1415 |
+
|
1416 |
+
'RightShoulder': 3,
|
1417 |
+
'RightArm': 3,
|
1418 |
+
'RightForearm': 3,
|
1419 |
+
'RightHand': 3,
|
1420 |
+
'RightHandThumb1': 3,
|
1421 |
+
'RightHandThumb2': 3,
|
1422 |
+
'RightHandThumb3': 3,
|
1423 |
+
'RightHandThumb_end': 3,
|
1424 |
+
|
1425 |
+
'RightHandIndex1': 3,
|
1426 |
+
'RightHandIndex2': 3,
|
1427 |
+
'RightHandIndex3': 3,
|
1428 |
+
'RightHandIndex_end': 3,
|
1429 |
+
|
1430 |
+
'RightHandMiddle1': 3,
|
1431 |
+
'RightHandMiddle2': 3,
|
1432 |
+
'RightHandMiddle3': 3,
|
1433 |
+
'RightHandMiddle_end': 3,
|
1434 |
+
|
1435 |
+
'RightHandRing1': 3,
|
1436 |
+
'RightHandRing2': 3,
|
1437 |
+
'RightHandRing3': 3,
|
1438 |
+
'RightHandRing_end': 3,
|
1439 |
+
|
1440 |
+
'RightHandPinky1': 3,
|
1441 |
+
'RightHandPinky2': 3,
|
1442 |
+
'RightHandPinky3': 3,
|
1443 |
+
'RightHandPinky_end': 3,
|
1444 |
+
},
|
1445 |
+
"spine_neck_141":{
|
1446 |
+
'Spine': 3 ,
|
1447 |
+
'Neck': 3 ,
|
1448 |
+
'Neck1': 3 ,
|
1449 |
+
'RShoulder': 3 ,
|
1450 |
+
'RArm': 3 ,
|
1451 |
+
'RArm1': 3 ,
|
1452 |
+
'RHand': 3 ,
|
1453 |
+
'RHandM1': 3 ,
|
1454 |
+
'RHandM2': 3 ,
|
1455 |
+
'RHandM3': 3 ,
|
1456 |
+
'RHandR': 3 ,
|
1457 |
+
'RHandR1': 3 ,
|
1458 |
+
'RHandR2': 3 ,
|
1459 |
+
'RHandR3': 3 ,
|
1460 |
+
'RHandP': 3 ,
|
1461 |
+
'RHandP1': 3 ,
|
1462 |
+
'RHandP2': 3 ,
|
1463 |
+
'RHandP3': 3 ,
|
1464 |
+
'RHandI': 3 ,
|
1465 |
+
'RHandI1': 3 ,
|
1466 |
+
'RHandI2': 3 ,
|
1467 |
+
'RHandI3': 3 ,
|
1468 |
+
'RHandT1': 3 ,
|
1469 |
+
'RHandT2': 3 ,
|
1470 |
+
'RHandT3': 3 ,
|
1471 |
+
'LShoulder': 3 ,
|
1472 |
+
'LArm': 3 ,
|
1473 |
+
'LArm1': 3 ,
|
1474 |
+
'LHand': 3 ,
|
1475 |
+
'LHandM1': 3 ,
|
1476 |
+
'LHandM2': 3 ,
|
1477 |
+
'LHandM3': 3 ,
|
1478 |
+
'LHandR': 3 ,
|
1479 |
+
'LHandR1': 3 ,
|
1480 |
+
'LHandR2': 3 ,
|
1481 |
+
'LHandR3': 3 ,
|
1482 |
+
'LHandP': 3 ,
|
1483 |
+
'LHandP1': 3 ,
|
1484 |
+
'LHandP2': 3 ,
|
1485 |
+
'LHandP3': 3 ,
|
1486 |
+
'LHandI': 3 ,
|
1487 |
+
'LHandI1': 3 ,
|
1488 |
+
'LHandI2': 3 ,
|
1489 |
+
'LHandI3': 3 ,
|
1490 |
+
'LHandT1': 3 ,
|
1491 |
+
'LHandT2': 3 ,
|
1492 |
+
'LHandT3': 3 ,},
|
1493 |
+
}
|
1494 |
+
|
1495 |
+
|
1496 |
+
class FIDCalculator(object):
|
1497 |
+
'''
|
1498 |
+
todo
|
1499 |
+
'''
|
1500 |
+
def __init__(self):
|
1501 |
+
self.gt_rot = None # pandas dataframe for n frames * joints * 6
|
1502 |
+
self.gt_pos = None # n frames * (joints + 13) * 3
|
1503 |
+
self.op_rot = None # pandas dataframe for n frames * joints * 6
|
1504 |
+
self.op_pos = None # n frames * (joints + 13) * 3
|
1505 |
+
|
1506 |
+
|
1507 |
+
def load(self, path, load_type, save_pos=False):
|
1508 |
+
'''
|
1509 |
+
select gt or op for load_type
|
1510 |
+
'''
|
1511 |
+
parser = BVHParser()
|
1512 |
+
parsed_data = parser.parse(path)
|
1513 |
+
if load_type == 'gt':
|
1514 |
+
self.gt_rot = parsed_data.values
|
1515 |
+
elif load_type == 'op':
|
1516 |
+
self.op_rot = parsed_data.values
|
1517 |
+
else: print('error, select gt or op for load_type')
|
1518 |
+
|
1519 |
+
if save_pos:
|
1520 |
+
mp = MocapParameterizer('position')
|
1521 |
+
positions = mp.fit_transform([parsed_data])
|
1522 |
+
if load_type == 'gt':
|
1523 |
+
self.gt_pos = positions[0].values
|
1524 |
+
elif load_type == 'op':
|
1525 |
+
self.op_pos = positions[0].values
|
1526 |
+
else: print('error, select gt or op for load_type')
|
1527 |
+
|
1528 |
+
|
1529 |
+
def _joint_selector(self, selected_joints, ori_data):
|
1530 |
+
selected_data = pd.DataFrame(columns=[])
|
1531 |
+
|
1532 |
+
for joint_name in selected_joints:
|
1533 |
+
selected_data[joint_name] = ori_data[joint_name]
|
1534 |
+
return selected_data.to_numpy()
|
1535 |
+
|
1536 |
+
|
1537 |
+
def cal_vol(self, dtype):
|
1538 |
+
if dtype == 'pos':
|
1539 |
+
gt = self.gt_pos
|
1540 |
+
op = self.op_pos
|
1541 |
+
else:
|
1542 |
+
gt = self.gt_rot
|
1543 |
+
op = self.op_rot
|
1544 |
+
|
1545 |
+
gt_v = gt.to_numpy()[1:, :] - gt.to_numpy()[0:-1, :]
|
1546 |
+
op_v = op.to_numpy()[1:, :] - op.to_numpy()[0:-1, :]
|
1547 |
+
if dtype == 'pos':
|
1548 |
+
self.gt_vol_pos = pd.DataFrame(gt_v, columns = gt.columns.tolist())
|
1549 |
+
self.op_vol_pos = pd.DataFrame(op_v, columns = gt.columns.tolist())
|
1550 |
+
else:
|
1551 |
+
self.gt_vol_rot = pd.DataFrame(gt_v, columns = gt.columns.tolist())
|
1552 |
+
self.op_vol_rot = pd.DataFrame(op_v, columns = gt.columns.tolist())
|
1553 |
+
|
1554 |
+
|
1555 |
+
@staticmethod
|
1556 |
+
def frechet_distance(samples_A, samples_B):
|
1557 |
+
A_mu = np.mean(samples_A, axis=0)
|
1558 |
+
A_sigma = np.cov(samples_A, rowvar=False)
|
1559 |
+
B_mu = np.mean(samples_B, axis=0)
|
1560 |
+
B_sigma = np.cov(samples_B, rowvar=False)
|
1561 |
+
try:
|
1562 |
+
frechet_dist = FIDCalculator.calculate_frechet_distance(A_mu, A_sigma, B_mu, B_sigma)
|
1563 |
+
except ValueError:
|
1564 |
+
frechet_dist = 1e+10
|
1565 |
+
return frechet_dist
|
1566 |
+
|
1567 |
+
|
1568 |
+
@staticmethod
|
1569 |
+
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
|
1570 |
+
""" from https://github.com/mseitzer/pytorch-fid/blob/master/fid_score.py """
|
1571 |
+
"""Numpy implementation of the Frechet Distance.
|
1572 |
+
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
|
1573 |
+
and X_2 ~ N(mu_2, C_2) is
|
1574 |
+
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
|
1575 |
+
Stable version by Dougal J. Sutherland.
|
1576 |
+
Params:
|
1577 |
+
-- mu1 : Numpy array containing the activations of a layer of the
|
1578 |
+
inception net (like returned by the function 'get_predictions')
|
1579 |
+
for generated samples.
|
1580 |
+
-- mu2 : The sample mean over activations, precalculated on an
|
1581 |
+
representative data set.
|
1582 |
+
-- sigma1: The covariance matrix over activations for generated samples.
|
1583 |
+
-- sigma2: The covariance matrix over activations, precalculated on an
|
1584 |
+
representative data set.
|
1585 |
+
Returns:
|
1586 |
+
-- : The Frechet Distance.
|
1587 |
+
"""
|
1588 |
+
|
1589 |
+
mu1 = np.atleast_1d(mu1)
|
1590 |
+
mu2 = np.atleast_1d(mu2)
|
1591 |
+
#print(mu1[0], mu2[0])
|
1592 |
+
sigma1 = np.atleast_2d(sigma1)
|
1593 |
+
sigma2 = np.atleast_2d(sigma2)
|
1594 |
+
#print(sigma1[0], sigma2[0])
|
1595 |
+
assert mu1.shape == mu2.shape, \
|
1596 |
+
'Training and test mean vectors have different lengths'
|
1597 |
+
assert sigma1.shape == sigma2.shape, \
|
1598 |
+
'Training and test covariances have different dimensions'
|
1599 |
+
|
1600 |
+
diff = mu1 - mu2
|
1601 |
+
|
1602 |
+
# Product might be almost singular
|
1603 |
+
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
|
1604 |
+
#print(diff, covmean[0])
|
1605 |
+
if not np.isfinite(covmean).all():
|
1606 |
+
msg = ('fid calculation produces singular product; '
|
1607 |
+
'adding %s to diagonal of cov estimates') % eps
|
1608 |
+
print(msg)
|
1609 |
+
offset = np.eye(sigma1.shape[0]) * eps
|
1610 |
+
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
|
1611 |
+
|
1612 |
+
# Numerical error might give slight imaginary component
|
1613 |
+
if np.iscomplexobj(covmean):
|
1614 |
+
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
|
1615 |
+
m = np.max(np.abs(covmean.imag))
|
1616 |
+
raise ValueError('Imaginary component {}'.format(m))
|
1617 |
+
covmean = covmean.real
|
1618 |
+
|
1619 |
+
tr_covmean = np.trace(covmean)
|
1620 |
+
|
1621 |
+
return (diff.dot(diff) + np.trace(sigma1) +
|
1622 |
+
np.trace(sigma2) - 2 * tr_covmean)
|
1623 |
+
|
1624 |
+
|
1625 |
+
def calculate_fid(self, cal_type, joint_type, high_level_opt):
|
1626 |
+
|
1627 |
+
if cal_type == 'pos':
|
1628 |
+
if self.gt_pos.shape != self.op_pos.shape:
|
1629 |
+
min_val = min(self.gt_pos.shape[0],self.op_pos.shape[0])
|
1630 |
+
gt = self.gt_pos[:min_val]
|
1631 |
+
op = self.op_pos[:min_val]
|
1632 |
+
else:
|
1633 |
+
gt = self.gt_pos
|
1634 |
+
op = self.op_pos
|
1635 |
+
full_body = gt.columns.tolist()
|
1636 |
+
elif cal_type == 'rot':
|
1637 |
+
if self.gt_rot.shape != self.op_rot.shape:
|
1638 |
+
min_val = min(self.gt_rot.shape[0],self.op_rot.shape[0])
|
1639 |
+
gt = self.gt_rot[:min_val]
|
1640 |
+
op = self.op_rot[:min_val]
|
1641 |
+
else:
|
1642 |
+
gt = self.gt_rot
|
1643 |
+
op = self.op_rot
|
1644 |
+
full_body_with_offset = gt.columns.tolist()
|
1645 |
+
full_body = [o for o in full_body_with_offset if ('position' not in o)]
|
1646 |
+
elif cal_type == 'pos_vol':
|
1647 |
+
assert self.gt_vol_pos.shape == self.op_vol_pos.shape
|
1648 |
+
gt = self.gt_vol_pos
|
1649 |
+
op = self.op_vol_pos
|
1650 |
+
full_body_with_offset = gt.columns.tolist()
|
1651 |
+
full_body = gt.columns.tolist()
|
1652 |
+
elif cal_type == 'rot_vol':
|
1653 |
+
assert self.gt_vol_rot.shape == self.op_vol_rot.shape
|
1654 |
+
gt = self.gt_vol_rot
|
1655 |
+
op = self.op_vol_rot
|
1656 |
+
full_body_with_offset = gt.columns.tolist()
|
1657 |
+
full_body = [o for o in full_body_with_offset if ('position' not in o)]
|
1658 |
+
#print(f'full_body contains {len(full_body)//3} joints')
|
1659 |
+
|
1660 |
+
if joint_type == 'full_upper_body':
|
1661 |
+
selected_body = [o for o in full_body if ('Leg' not in o) and ('Foot' not in o) and ('Toe' not in o)]
|
1662 |
+
elif joint_type == 'upper_body':
|
1663 |
+
selected_body = [o for o in full_body if ('Hand' not in o) and ('Leg' not in o) and ('Foot' not in o) and ('Toe' not in o)]
|
1664 |
+
elif joint_type == 'fingers':
|
1665 |
+
selected_body = [o for o in full_body if ('Hand' in o)]
|
1666 |
+
elif joint_type == 'indivdual':
|
1667 |
+
pass
|
1668 |
+
else: print('error, plz select correct joint type')
|
1669 |
+
#print(f'calculate fid for {len(selected_body)//3} joints')
|
1670 |
+
|
1671 |
+
gt = self._joint_selector(selected_body, gt)
|
1672 |
+
op = self._joint_selector(selected_body, op)
|
1673 |
+
|
1674 |
+
if high_level_opt == 'fid':
|
1675 |
+
fid = FIDCalculator.frechet_distance(gt, op)
|
1676 |
+
return fid
|
1677 |
+
elif high_level_opt == 'var':
|
1678 |
+
var_gt = gt.var()
|
1679 |
+
var_op = op.var()
|
1680 |
+
return var_gt, var_op
|
1681 |
+
elif high_level_opt == 'mean':
|
1682 |
+
mean_gt = gt.mean()
|
1683 |
+
mean_op = op.mean()
|
1684 |
+
return mean_gt, mean_op
|
1685 |
+
else: return 0
|
1686 |
+
|
1687 |
+
|
1688 |
+
def result2target_vis(pose_version, res_bvhlist, save_path, demo_name, verbose=True):
|
1689 |
+
if "trinity" in pose_version:
|
1690 |
+
ori_list = joints_list[pose_version[6:-4]]
|
1691 |
+
target_list = joints_list[pose_version[6:]]
|
1692 |
+
file_content_length = 336
|
1693 |
+
elif "beat" in pose_version or "spine_neck_141" in pose_version:
|
1694 |
+
ori_list = joints_list["beat_joints"]
|
1695 |
+
target_list = joints_list["spine_neck_141"]
|
1696 |
+
file_content_length = 431
|
1697 |
+
elif "yostar" in pose_version:
|
1698 |
+
ori_list = joints_list["yostar"]
|
1699 |
+
target_list = joints_list[pose_version]
|
1700 |
+
file_content_length = 1056
|
1701 |
+
else:
|
1702 |
+
ori_list = joints_list["japanese_joints"]
|
1703 |
+
target_list = joints_list[pose_version]
|
1704 |
+
file_content_length = 366
|
1705 |
+
|
1706 |
+
bvh_files_dirs = sorted(glob.glob(f'{res_bvhlist}*.bvh'), key=str)
|
1707 |
+
#test_seq_list = os.list_dir(demo_name).sort()
|
1708 |
+
|
1709 |
+
counter = 0
|
1710 |
+
if not os.path.exists(save_path):
|
1711 |
+
os.makedirs(save_path)
|
1712 |
+
for i, bvh_file_dir in enumerate(bvh_files_dirs):
|
1713 |
+
short_name = bvh_file_dir.split("/")[-1][11:]
|
1714 |
+
#print(short_name)
|
1715 |
+
wirte_file = open(os.path.join(save_path, f'res_{short_name}'),'w+')
|
1716 |
+
with open(f"{demo_name}{short_name}",'r') as pose_data_pre:
|
1717 |
+
pose_data_pre_file = pose_data_pre.readlines()
|
1718 |
+
for j, line in enumerate(pose_data_pre_file[0:file_content_length]):
|
1719 |
+
wirte_file.write(line)
|
1720 |
+
offset_data = pose_data_pre_file[file_content_length]
|
1721 |
+
offset_data = np.fromstring(offset_data, dtype=float, sep=' ')
|
1722 |
+
wirte_file.close()
|
1723 |
+
|
1724 |
+
wirte_file = open(os.path.join(save_path, f'res_{short_name}'),'r')
|
1725 |
+
ori_lines = wirte_file.readlines()
|
1726 |
+
with open(bvh_file_dir, 'r') as pose_data:
|
1727 |
+
pose_data_file = pose_data.readlines()
|
1728 |
+
ori_lines[file_content_length-2] = 'Frames: ' + str(len(pose_data_file)-1) + '\n'
|
1729 |
+
wirte_file.close()
|
1730 |
+
|
1731 |
+
wirte_file = open(os.path.join(save_path, f'res_{short_name}'),'w+')
|
1732 |
+
wirte_file.writelines(i for i in ori_lines[:file_content_length])
|
1733 |
+
wirte_file.close()
|
1734 |
+
|
1735 |
+
with open(os.path.join(save_path, f'res_{short_name}'),'a+') as wirte_file:
|
1736 |
+
with open(bvh_file_dir, 'r') as pose_data:
|
1737 |
+
data_each_file = []
|
1738 |
+
pose_data_file = pose_data.readlines()
|
1739 |
+
for j, line in enumerate(pose_data_file):
|
1740 |
+
if not j:
|
1741 |
+
pass
|
1742 |
+
else:
|
1743 |
+
data = np.fromstring(line, dtype=float, sep=' ')
|
1744 |
+
data_rotation = offset_data.copy()
|
1745 |
+
for iii, (k, v) in enumerate(target_list.items()): # here is 147 rotations by 3
|
1746 |
+
#print(data_rotation[ori_list[k][1]-v:ori_list[k][1]], data[iii*3:iii*3+3])
|
1747 |
+
data_rotation[ori_list[k][1]-v:ori_list[k][1]] = data[iii*3:iii*3+3]
|
1748 |
+
data_each_file.append(data_rotation)
|
1749 |
+
|
1750 |
+
for line_data in data_each_file:
|
1751 |
+
line_data = np.array2string(line_data, max_line_width=np.inf, precision=6, suppress_small=False, separator=' ')
|
1752 |
+
wirte_file.write(line_data[1:-2]+'\n')
|
1753 |
+
|
1754 |
+
counter += 1
|
1755 |
+
if verbose:
|
1756 |
+
logger.info('data_shape:', data_rotation.shape, 'process:', counter, '/', len(bvh_files_dirs))
|
dataloaders/mix_sep.py
ADDED
@@ -0,0 +1,637 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import pickle
|
3 |
+
import math
|
4 |
+
import shutil
|
5 |
+
import numpy as np
|
6 |
+
import lmdb as lmdb
|
7 |
+
import textgrid as tg
|
8 |
+
import pandas as pd
|
9 |
+
import torch
|
10 |
+
import glob
|
11 |
+
import json
|
12 |
+
from termcolor import colored
|
13 |
+
from loguru import logger
|
14 |
+
from collections import defaultdict
|
15 |
+
from torch.utils.data import Dataset
|
16 |
+
import torch.distributed as dist
|
17 |
+
#import pyarrow
|
18 |
+
import pickle
|
19 |
+
import librosa
|
20 |
+
import smplx
|
21 |
+
import glob
|
22 |
+
|
23 |
+
from .build_vocab import Vocab
|
24 |
+
from .utils.audio_features import Wav2Vec2Model
|
25 |
+
from .data_tools import joints_list
|
26 |
+
from .utils import rotation_conversions as rc
|
27 |
+
from .utils import other_tools
|
28 |
+
|
29 |
+
# ACCAD 120
|
30 |
+
# BioMotionLab_NTroje 120
|
31 |
+
# CMU 很复杂
|
32 |
+
# EKUT 100
|
33 |
+
# Eyes_Japan_Dataset 很复杂
|
34 |
+
# HumanEva 很复杂
|
35 |
+
# KIT 100
|
36 |
+
# MPI_HDM05 120
|
37 |
+
# MPI_Limits 120
|
38 |
+
# MPI_mosh 很复杂
|
39 |
+
# SFU 120
|
40 |
+
# SSM_synced 很复杂
|
41 |
+
# TCD_handMocap 很复杂
|
42 |
+
# TotalCapture 60
|
43 |
+
# Transitions_mocap 120
|
44 |
+
|
45 |
+
all_sequences = [
|
46 |
+
'ACCAD',
|
47 |
+
'BioMotionLab_NTroje',
|
48 |
+
'CMU',
|
49 |
+
'EKUT',
|
50 |
+
'Eyes_Japan_Dataset',
|
51 |
+
'HumanEva',
|
52 |
+
'KIT',
|
53 |
+
'MPI_HDM05',
|
54 |
+
'MPI_Limits',
|
55 |
+
'MPI_mosh',
|
56 |
+
'SFU',
|
57 |
+
'SSM_synced',
|
58 |
+
'TCD_handMocap',
|
59 |
+
'TotalCapture',
|
60 |
+
'Transitions_mocap',
|
61 |
+
]
|
62 |
+
amass_test_split = ['Transitions_mocap', 'SSM_synced']
|
63 |
+
amass_vald_split = ['HumanEva', 'MPI_HDM05', 'SFU', 'MPI_mosh']
|
64 |
+
amass_train_split = ['BioMotionLab_NTroje', 'Eyes_Japan_Dataset', 'TotalCapture', 'KIT', 'ACCAD', 'CMU', 'MPI_Limits',
|
65 |
+
'TCD_handMocap', 'EKUT']
|
66 |
+
|
67 |
+
# 上面这些spilt方式是MOTION CLIP的,但是由于motionx中的framerate处理有问题,我先暂且只挑部分数据集进行训练
|
68 |
+
# 这些都是120fps的
|
69 |
+
# amass_test_split = ['SFU']
|
70 |
+
# amass_vald_split = ['MPI_Limits']
|
71 |
+
# amass_train_split = ['BioMotionLab_NTroje', 'MPI_HDM05', 'ACCAD','Transitions_mocap']
|
72 |
+
|
73 |
+
|
74 |
+
amass_splits = {
|
75 |
+
'test': amass_test_split,
|
76 |
+
'val': amass_vald_split,
|
77 |
+
'train': amass_train_split
|
78 |
+
}
|
79 |
+
# assert len(amass_splits['train'] + amass_splits['test'] + amass_splits['vald']) == len(all_sequences) == 15
|
80 |
+
|
81 |
+
class CustomDataset(Dataset):
|
82 |
+
def __init__(self, args, loader_type, augmentation=None, kwargs=None, build_cache=True):
|
83 |
+
self.args = args
|
84 |
+
self.loader_type = loader_type
|
85 |
+
|
86 |
+
self.rank = 0
|
87 |
+
self.ori_stride = self.args.stride
|
88 |
+
self.ori_length = self.args.pose_length
|
89 |
+
self.alignment = [0,0] # for trinity
|
90 |
+
|
91 |
+
self.ori_joint_list = joints_list[self.args.ori_joints]
|
92 |
+
self.tar_joint_list = joints_list[self.args.tar_joints]
|
93 |
+
if 'smplx' in self.args.pose_rep:
|
94 |
+
self.joint_mask = np.zeros(len(list(self.ori_joint_list.keys()))*3)
|
95 |
+
self.joints = len(list(self.tar_joint_list.keys()))
|
96 |
+
for joint_name in self.tar_joint_list:
|
97 |
+
self.joint_mask[self.ori_joint_list[joint_name][1] - self.ori_joint_list[joint_name][0]:self.ori_joint_list[joint_name][1]] = 1
|
98 |
+
else:
|
99 |
+
self.joints = len(list(self.ori_joint_list.keys()))+1
|
100 |
+
self.joint_mask = np.zeros(self.joints*3)
|
101 |
+
for joint_name in self.tar_joint_list:
|
102 |
+
if joint_name == "Hips":
|
103 |
+
self.joint_mask[3:6] = 1
|
104 |
+
else:
|
105 |
+
self.joint_mask[self.ori_joint_list[joint_name][1] - self.ori_joint_list[joint_name][0]:self.ori_joint_list[joint_name][1]] = 1
|
106 |
+
# select trainable joints
|
107 |
+
|
108 |
+
split_rule = pd.read_csv(args.data_path+"train_test_split.csv")
|
109 |
+
self.selected_file = split_rule.loc[(split_rule['type'] == loader_type) & (split_rule['id'].str.split("_").str[0].astype(int).isin(self.args.training_speakers))]
|
110 |
+
if args.additional_data and loader_type == 'train':
|
111 |
+
split_b = split_rule.loc[(split_rule['type'] == 'additional') & (split_rule['id'].str.split("_").str[0].astype(int).isin(self.args.training_speakers))]
|
112 |
+
#self.selected_file = split_rule.loc[(split_rule['type'] == 'additional') & (split_rule['id'].str.split("_").str[0].astype(int).isin(self.args.training_speakers))]
|
113 |
+
self.selected_file = pd.concat([self.selected_file, split_b])
|
114 |
+
if self.selected_file.empty:
|
115 |
+
logger.warning(f"{loader_type} is empty for speaker {self.args.training_speakers}, use train set 0-8 instead")
|
116 |
+
self.selected_file = split_rule.loc[(split_rule['type'] == 'train') & (split_rule['id'].str.split("_").str[0].astype(int).isin(self.args.training_speakers))]
|
117 |
+
self.selected_file = self.selected_file.iloc[0:8]
|
118 |
+
self.data_dir = args.data_path
|
119 |
+
self.use_amass = args.use_amass
|
120 |
+
self.beatx_during_time = 0
|
121 |
+
self.amass_during_time = 0
|
122 |
+
|
123 |
+
if loader_type == "test":
|
124 |
+
self.args.multi_length_training = [1.0]
|
125 |
+
self.max_length = int(args.pose_length * self.args.multi_length_training[-1])
|
126 |
+
self.max_audio_pre_len = math.floor(args.pose_length / args.pose_fps * self.args.audio_sr)
|
127 |
+
if self.max_audio_pre_len > self.args.test_length*self.args.audio_sr:
|
128 |
+
self.max_audio_pre_len = self.args.test_length*self.args.audio_sr
|
129 |
+
preloaded_dir = self.args.root_path + self.args.cache_path + loader_type + f"/{args.pose_rep}_cache"
|
130 |
+
|
131 |
+
if self.args.beat_align:
|
132 |
+
if not os.path.exists(args.data_path+f"weights/mean_vel_{args.pose_rep}.npy"):
|
133 |
+
self.calculate_mean_velocity(args.data_path+f"weights/mean_vel_{args.pose_rep}.npy")
|
134 |
+
self.avg_vel = np.load(args.data_path+f"weights/mean_vel_{args.pose_rep}.npy")
|
135 |
+
|
136 |
+
if build_cache and self.rank == 0:
|
137 |
+
self.build_cache(preloaded_dir)
|
138 |
+
self.lmdb_env = lmdb.open(preloaded_dir, readonly=True, lock=False)
|
139 |
+
with self.lmdb_env.begin() as txn:
|
140 |
+
self.n_samples = txn.stat()["entries"]
|
141 |
+
|
142 |
+
self.norm = True
|
143 |
+
self.mean = np.load('./mean_std/beatx_2_330_mean.npy')
|
144 |
+
self.std = np.load('./mean_std/beatx_2_330_std.npy')
|
145 |
+
|
146 |
+
self.trans_mean = np.load('./mean_std/beatx_2_trans_mean.npy')
|
147 |
+
self.trans_std = np.load('./mean_std/beatx_2_trans_std.npy')
|
148 |
+
|
149 |
+
def load_amass(self,data):
|
150 |
+
## 这个是用来
|
151 |
+
# 修改amass数据里面的朝向,原本在blender里面是Z轴向上,目标是Y轴向上,当时面向目前没改
|
152 |
+
|
153 |
+
data_dict = {key: data[key] for key in data}
|
154 |
+
frames = data_dict['poses'].shape[0]
|
155 |
+
b = data_dict['poses'][...,:3]
|
156 |
+
b = rc.axis_angle_to_matrix(torch.from_numpy(b))
|
157 |
+
rot_matrix = np.array([[1.0, 0.0, 0.0], [0.0 , 0.0, 1.0], [0.0, -1.0, 0.0]])
|
158 |
+
c = np.einsum('ij,kjl->kil',rot_matrix,b)
|
159 |
+
c = rc.matrix_to_axis_angle(torch.from_numpy(c))
|
160 |
+
data_dict['poses'][...,:3] = c
|
161 |
+
|
162 |
+
trans_matrix1 = np.array([[1.0, 0.0, 0.0], [0.0 , 0.0, -1.0], [0.0, 1.0, 0.0]])
|
163 |
+
data_dict['trans'] = np.einsum("bi,ij->bj",data_dict['trans'],trans_matrix1)
|
164 |
+
|
165 |
+
betas300 = np.zeros(300)
|
166 |
+
betas300[:16] = data_dict['betas']
|
167 |
+
data_dict['betas'] = betas300
|
168 |
+
data_dict["expressions"] = np.zeros((frames,100))
|
169 |
+
|
170 |
+
return data_dict
|
171 |
+
|
172 |
+
|
173 |
+
def calculate_mean_velocity(self, save_path):
|
174 |
+
self.smplx = smplx.create(
|
175 |
+
self.args.data_path_1+"smplx_models/",
|
176 |
+
model_type='smplx',
|
177 |
+
gender='NEUTRAL_2020',
|
178 |
+
use_face_contour=False,
|
179 |
+
num_betas=300,
|
180 |
+
num_expression_coeffs=100,
|
181 |
+
ext='npz',
|
182 |
+
use_pca=False,
|
183 |
+
).cuda().eval()
|
184 |
+
dir_p = self.data_dir + self.args.pose_rep + "/"
|
185 |
+
all_list = []
|
186 |
+
from tqdm import tqdm
|
187 |
+
for tar in tqdm(os.listdir(dir_p)):
|
188 |
+
if tar.endswith(".npz"):
|
189 |
+
m_data = np.load(dir_p+tar, allow_pickle=True)
|
190 |
+
betas, poses, trans, exps = m_data["betas"], m_data["poses"], m_data["trans"], m_data["expressions"]
|
191 |
+
n, c = poses.shape[0], poses.shape[1]
|
192 |
+
betas = betas.reshape(1, 300)
|
193 |
+
betas = np.tile(betas, (n, 1))
|
194 |
+
betas = torch.from_numpy(betas).cuda().float()
|
195 |
+
poses = torch.from_numpy(poses.reshape(n, c)).cuda().float()
|
196 |
+
exps = torch.from_numpy(exps.reshape(n, 100)).cuda().float()
|
197 |
+
trans = torch.from_numpy(trans.reshape(n, 3)).cuda().float()
|
198 |
+
max_length = 128
|
199 |
+
s, r = n//max_length, n%max_length
|
200 |
+
#print(n, s, r)
|
201 |
+
all_tensor = []
|
202 |
+
for i in range(s):
|
203 |
+
with torch.no_grad():
|
204 |
+
joints = self.smplx(
|
205 |
+
betas=betas[i*max_length:(i+1)*max_length],
|
206 |
+
transl=trans[i*max_length:(i+1)*max_length],
|
207 |
+
expression=exps[i*max_length:(i+1)*max_length],
|
208 |
+
jaw_pose=poses[i*max_length:(i+1)*max_length, 66:69],
|
209 |
+
global_orient=poses[i*max_length:(i+1)*max_length,:3],
|
210 |
+
body_pose=poses[i*max_length:(i+1)*max_length,3:21*3+3],
|
211 |
+
left_hand_pose=poses[i*max_length:(i+1)*max_length,25*3:40*3],
|
212 |
+
right_hand_pose=poses[i*max_length:(i+1)*max_length,40*3:55*3],
|
213 |
+
return_verts=True,
|
214 |
+
return_joints=True,
|
215 |
+
leye_pose=poses[i*max_length:(i+1)*max_length, 69:72],
|
216 |
+
reye_pose=poses[i*max_length:(i+1)*max_length, 72:75],
|
217 |
+
)['joints'][:, :55, :].reshape(max_length, 55*3)
|
218 |
+
all_tensor.append(joints)
|
219 |
+
if r != 0:
|
220 |
+
with torch.no_grad():
|
221 |
+
joints = self.smplx(
|
222 |
+
betas=betas[s*max_length:s*max_length+r],
|
223 |
+
transl=trans[s*max_length:s*max_length+r],
|
224 |
+
expression=exps[s*max_length:s*max_length+r],
|
225 |
+
jaw_pose=poses[s*max_length:s*max_length+r, 66:69],
|
226 |
+
global_orient=poses[s*max_length:s*max_length+r,:3],
|
227 |
+
body_pose=poses[s*max_length:s*max_length+r,3:21*3+3],
|
228 |
+
left_hand_pose=poses[s*max_length:s*max_length+r,25*3:40*3],
|
229 |
+
right_hand_pose=poses[s*max_length:s*max_length+r,40*3:55*3],
|
230 |
+
return_verts=True,
|
231 |
+
return_joints=True,
|
232 |
+
leye_pose=poses[s*max_length:s*max_length+r, 69:72],
|
233 |
+
reye_pose=poses[s*max_length:s*max_length+r, 72:75],
|
234 |
+
)['joints'][:, :55, :].reshape(r, 55*3)
|
235 |
+
all_tensor.append(joints)
|
236 |
+
joints = torch.cat(all_tensor, axis=0)
|
237 |
+
joints = joints.permute(1, 0)
|
238 |
+
dt = 1/30
|
239 |
+
# first steps is forward diff (t+1 - t) / dt
|
240 |
+
init_vel = (joints[:, 1:2] - joints[:, :1]) / dt
|
241 |
+
# middle steps are second order (t+1 - t-1) / 2dt
|
242 |
+
middle_vel = (joints[:, 2:] - joints[:, 0:-2]) / (2 * dt)
|
243 |
+
# last step is backward diff (t - t-1) / dt
|
244 |
+
final_vel = (joints[:, -1:] - joints[:, -2:-1]) / dt
|
245 |
+
#print(joints.shape, init_vel.shape, middle_vel.shape, final_vel.shape)
|
246 |
+
vel_seq = torch.cat([init_vel, middle_vel, final_vel], dim=1).permute(1, 0).reshape(n, 55, 3)
|
247 |
+
#print(vel_seq.shape)
|
248 |
+
#.permute(1, 0).reshape(n, 55, 3)
|
249 |
+
vel_seq_np = vel_seq.cpu().numpy()
|
250 |
+
vel_joints_np = np.linalg.norm(vel_seq_np, axis=2) # n * 55
|
251 |
+
all_list.append(vel_joints_np)
|
252 |
+
avg_vel = np.mean(np.concatenate(all_list, axis=0),axis=0) # 55
|
253 |
+
np.save(save_path, avg_vel)
|
254 |
+
|
255 |
+
|
256 |
+
def build_cache(self, preloaded_dir):
|
257 |
+
logger.info(f"Audio bit rate: {self.args.audio_fps}")
|
258 |
+
logger.info("Reading data '{}'...".format(self.data_dir))
|
259 |
+
logger.info("Creating the dataset cache...")
|
260 |
+
if self.args.new_cache:
|
261 |
+
if os.path.exists(preloaded_dir):
|
262 |
+
shutil.rmtree(preloaded_dir)
|
263 |
+
if os.path.exists(preloaded_dir):
|
264 |
+
logger.info("Found the cache {}".format(preloaded_dir))
|
265 |
+
elif self.loader_type == "test":
|
266 |
+
self.cache_generation(
|
267 |
+
preloaded_dir, True,
|
268 |
+
0, 0,
|
269 |
+
is_test=True)
|
270 |
+
else:
|
271 |
+
self.cache_generation(
|
272 |
+
preloaded_dir, self.args.disable_filtering,
|
273 |
+
self.args.clean_first_seconds, self.args.clean_final_seconds,
|
274 |
+
is_test=False)
|
275 |
+
logger.info(f"BEATX during time is {self.beatx_during_time}s !")
|
276 |
+
logger.info(f"AMASS during time is {self.amass_during_time}s !")
|
277 |
+
|
278 |
+
## 对于BEATX train ,val ,test: 69800s ,7695s, 18672s ,总计 26.7h
|
279 |
+
##
|
280 |
+
|
281 |
+
def __len__(self):
|
282 |
+
return self.n_samples
|
283 |
+
|
284 |
+
|
285 |
+
def cache_generation(self, out_lmdb_dir, disable_filtering, clean_first_seconds, clean_final_seconds, is_test=False):
|
286 |
+
# if "wav2vec2" in self.args.audio_rep:
|
287 |
+
# self.wav2vec_model = Wav2Vec2Model.from_pretrained(f"{self.args.data_path_1}/hub/transformer/wav2vec2-base-960h")
|
288 |
+
# self.wav2vec_model.feature_extractor._freeze_parameters()
|
289 |
+
# self.wav2vec_model = self.wav2vec_model.cuda()
|
290 |
+
# self.wav2vec_model.eval()
|
291 |
+
|
292 |
+
self.n_out_samples = 0
|
293 |
+
# create db for samples
|
294 |
+
if not os.path.exists(out_lmdb_dir): os.makedirs(out_lmdb_dir)
|
295 |
+
dst_lmdb_env = lmdb.open(out_lmdb_dir, map_size= int(1024 ** 3 * 50))# 50G
|
296 |
+
n_filtered_out = defaultdict(int)
|
297 |
+
|
298 |
+
for index, file_name in self.selected_file.iterrows():
|
299 |
+
f_name = file_name["id"]
|
300 |
+
ext = ".npz" if "smplx" in self.args.pose_rep else ".bvh"
|
301 |
+
pose_file = self.data_dir + self.args.pose_rep + "/" + f_name + ext
|
302 |
+
pose_each_file = []
|
303 |
+
trans_each_file = []
|
304 |
+
trans_v_each_file = []
|
305 |
+
shape_each_file = []
|
306 |
+
audio_each_file = []
|
307 |
+
facial_each_file = []
|
308 |
+
word_each_file = []
|
309 |
+
emo_each_file = []
|
310 |
+
sem_each_file = []
|
311 |
+
vid_each_file = []
|
312 |
+
id_pose = f_name #1_wayne_0_1_1
|
313 |
+
|
314 |
+
logger.info(colored(f"# ---- Building cache for Pose {id_pose} ---- #", "blue"))
|
315 |
+
if "smplx" in self.args.pose_rep:
|
316 |
+
pose_data = np.load(pose_file, allow_pickle=True)
|
317 |
+
assert 30%self.args.pose_fps == 0, 'pose_fps should be an aliquot part of 30'
|
318 |
+
stride = int(30/self.args.pose_fps)
|
319 |
+
pose_each_file = pose_data["poses"][::stride] * self.joint_mask
|
320 |
+
pose_each_file = pose_each_file[:, self.joint_mask.astype(bool)]
|
321 |
+
# print(pose_each_file.shape)
|
322 |
+
self.beatx_during_time += pose_each_file.shape[0]/30
|
323 |
+
trans_each_file = pose_data["trans"][::stride]
|
324 |
+
trans_each_file[:,0] = trans_each_file[:,0] - trans_each_file[0,0]
|
325 |
+
trans_each_file[:,2] = trans_each_file[:,2] - trans_each_file[0,2]
|
326 |
+
trans_v_each_file = np.zeros_like(trans_each_file)
|
327 |
+
trans_v_each_file[1:,0] = trans_each_file[1:,0] - trans_each_file[:-1,0]
|
328 |
+
trans_v_each_file[0,0] = trans_v_each_file[1,0]
|
329 |
+
trans_v_each_file[1:,2] = trans_each_file[1:,2] - trans_each_file[:-1,2]
|
330 |
+
trans_v_each_file[0,2] = trans_v_each_file[1,2]
|
331 |
+
trans_v_each_file[:,1] = trans_each_file[:,1]
|
332 |
+
|
333 |
+
|
334 |
+
shape_each_file = np.repeat(pose_data["betas"].reshape(1, 300), pose_each_file.shape[0], axis=0)
|
335 |
+
if self.args.facial_rep is not None:
|
336 |
+
logger.info(f"# ---- Building cache for Facial {id_pose} and Pose {id_pose} ---- #")
|
337 |
+
facial_each_file = pose_data["expressions"][::stride]
|
338 |
+
if self.args.facial_norm:
|
339 |
+
facial_each_file = (facial_each_file - self.mean_facial) / self.std_facial
|
340 |
+
|
341 |
+
if self.args.id_rep is not None:
|
342 |
+
vid_each_file = np.repeat(np.array(int(f_name.split("_")[0])-1).reshape(1, 1), pose_each_file.shape[0], axis=0)
|
343 |
+
|
344 |
+
filtered_result = self._sample_from_clip(
|
345 |
+
dst_lmdb_env,
|
346 |
+
pose_each_file, trans_each_file,trans_v_each_file, shape_each_file,
|
347 |
+
vid_each_file,
|
348 |
+
disable_filtering, clean_first_seconds, clean_final_seconds, is_test,
|
349 |
+
)
|
350 |
+
for type in filtered_result.keys():
|
351 |
+
n_filtered_out[type] += filtered_result[type]
|
352 |
+
|
353 |
+
if self.args.use_amass:
|
354 |
+
amass_dir = '/mnt/fu09a/chenbohong/PantoMatrix/scripts/EMAGE_2024/datasets/AMASS_SMPLX'
|
355 |
+
for dataset in amass_splits[self.loader_type]:
|
356 |
+
search_path = os.path.join(amass_dir,dataset, '**', '*.npz')
|
357 |
+
npz_files = glob.glob(search_path, recursive=True)
|
358 |
+
for index, file_name in enumerate(npz_files):
|
359 |
+
f_name = file_name.split('/')[-1]
|
360 |
+
ext = ".npz" if "smplx" in self.args.pose_rep else ".bvh"
|
361 |
+
pose_file = file_name
|
362 |
+
pose_each_file = []
|
363 |
+
trans_each_file = []
|
364 |
+
trans_v_each_file = []
|
365 |
+
shape_each_file = []
|
366 |
+
audio_each_file = []
|
367 |
+
facial_each_file = []
|
368 |
+
word_each_file = []
|
369 |
+
emo_each_file = []
|
370 |
+
sem_each_file = []
|
371 |
+
vid_each_file = []
|
372 |
+
id_pose = f_name #1_wayne_0_1_1
|
373 |
+
|
374 |
+
logger.info(colored(f"# ---- Building cache for Pose {id_pose} ---- #", "blue"))
|
375 |
+
if "smplx" in self.args.pose_rep:
|
376 |
+
pose_data = np.load(pose_file, allow_pickle=True)
|
377 |
+
if len(pose_data.files)==6:
|
378 |
+
logger.info(colored(f"# ---- state file ---- #", "red"))
|
379 |
+
continue
|
380 |
+
assert 30%self.args.pose_fps == 0, 'pose_fps should be an aliquot part of 30'
|
381 |
+
pose_each_file = self.load_amass(pose_data)
|
382 |
+
fps = pose_data['mocap_frame_rate']
|
383 |
+
stride =round(fps/30)
|
384 |
+
pose_each_file = pose_data["poses"][::stride] * self.joint_mask
|
385 |
+
pose_each_file = pose_each_file[:, self.joint_mask.astype(bool)]
|
386 |
+
trans_each_file = pose_data["trans"][::stride]
|
387 |
+
|
388 |
+
|
389 |
+
trans_each_file[:,0] = trans_each_file[:,0] - trans_each_file[0,0]
|
390 |
+
trans_each_file[:,2] = trans_each_file[:,2] - trans_each_file[0,2]
|
391 |
+
trans_v_each_file = np.zeros_like(trans_each_file)
|
392 |
+
trans_v_each_file[1:,0] = trans_each_file[1:,0] - trans_each_file[:-1,0]
|
393 |
+
trans_v_each_file[0,0] = trans_v_each_file[1,0]
|
394 |
+
trans_v_each_file[1:,2] = trans_each_file[1:,2] - trans_each_file[:-1,2]
|
395 |
+
trans_v_each_file[0,2] = trans_v_each_file[1,2]
|
396 |
+
trans_v_each_file[:,1] = trans_each_file[:,1]
|
397 |
+
|
398 |
+
|
399 |
+
|
400 |
+
shape_each_file = np.repeat(pose_data["betas"].reshape(1, -1), pose_each_file.shape[0], axis=0)
|
401 |
+
|
402 |
+
if self.args.id_rep is not None:
|
403 |
+
vid_each_file = np.repeat(np.array(int(100)-1).reshape(1, 1), pose_each_file.shape[0], axis=0)
|
404 |
+
|
405 |
+
filtered_result = self._sample_from_clip(
|
406 |
+
dst_lmdb_env,
|
407 |
+
pose_each_file, trans_each_file,trans_v_each_file, shape_each_file,
|
408 |
+
vid_each_file,
|
409 |
+
disable_filtering, clean_first_seconds, clean_final_seconds, is_test,
|
410 |
+
)
|
411 |
+
for type in filtered_result.keys():
|
412 |
+
n_filtered_out[type] += filtered_result[type]
|
413 |
+
|
414 |
+
|
415 |
+
with dst_lmdb_env.begin() as txn:
|
416 |
+
logger.info(colored(f"no. of samples: {txn.stat()['entries']}", "cyan"))
|
417 |
+
n_total_filtered = 0
|
418 |
+
for type, n_filtered in n_filtered_out.items():
|
419 |
+
logger.info("{}: {}".format(type, n_filtered))
|
420 |
+
n_total_filtered += n_filtered
|
421 |
+
logger.info(colored("no. of excluded samples: {} ({:.1f}%)".format(
|
422 |
+
n_total_filtered, 100 * n_total_filtered / (txn.stat()["entries"] + n_total_filtered)), "cyan"))
|
423 |
+
dst_lmdb_env.sync()
|
424 |
+
dst_lmdb_env.close()
|
425 |
+
|
426 |
+
def _sample_from_clip(
|
427 |
+
self, dst_lmdb_env, pose_each_file, trans_each_file, trans_v_each_file,shape_each_file,
|
428 |
+
vid_each_file,
|
429 |
+
disable_filtering, clean_first_seconds, clean_final_seconds, is_test,
|
430 |
+
):
|
431 |
+
"""
|
432 |
+
for data cleaning, we ignore the data for first and final n s
|
433 |
+
for test, we return all data
|
434 |
+
"""
|
435 |
+
# audio_start = int(self.alignment[0] * self.args.audio_fps)
|
436 |
+
# pose_start = int(self.alignment[1] * self.args.pose_fps)
|
437 |
+
#logger.info(f"before: {audio_each_file.shape} {pose_each_file.shape}")
|
438 |
+
# audio_each_file = audio_each_file[audio_start:]
|
439 |
+
# pose_each_file = pose_each_file[pose_start:]
|
440 |
+
# trans_each_file =
|
441 |
+
#logger.info(f"after alignment: {audio_each_file.shape} {pose_each_file.shape}")
|
442 |
+
#print(pose_each_file.shape)
|
443 |
+
round_seconds_skeleton = pose_each_file.shape[0] // self.args.pose_fps # assume 1500 frames / 15 fps = 100 s
|
444 |
+
#print(round_seconds_skeleton)
|
445 |
+
|
446 |
+
clip_s_t, clip_e_t = clean_first_seconds, round_seconds_skeleton - clean_final_seconds # assume [10, 90]s
|
447 |
+
clip_s_f_audio, clip_e_f_audio = self.args.audio_fps * clip_s_t, clip_e_t * self.args.audio_fps # [160,000,90*160,000]
|
448 |
+
clip_s_f_pose, clip_e_f_pose = clip_s_t * self.args.pose_fps, clip_e_t * self.args.pose_fps # [150,90*15]
|
449 |
+
|
450 |
+
|
451 |
+
for ratio in self.args.multi_length_training:
|
452 |
+
if is_test:# stride = length for test
|
453 |
+
cut_length = clip_e_f_pose - clip_s_f_pose
|
454 |
+
self.args.stride = cut_length
|
455 |
+
self.max_length = cut_length
|
456 |
+
else:
|
457 |
+
self.args.stride = int(ratio*self.ori_stride)
|
458 |
+
cut_length = int(self.ori_length*ratio)
|
459 |
+
|
460 |
+
num_subdivision = math.floor((clip_e_f_pose - clip_s_f_pose - cut_length) / self.args.stride) + 1
|
461 |
+
logger.info(f"pose from frame {clip_s_f_pose} to {clip_e_f_pose}, length {cut_length}")
|
462 |
+
logger.info(f"{num_subdivision} clips is expected with stride {self.args.stride}")
|
463 |
+
|
464 |
+
|
465 |
+
n_filtered_out = defaultdict(int)
|
466 |
+
sample_pose_list = []
|
467 |
+
sample_audio_list = []
|
468 |
+
sample_shape_list = []
|
469 |
+
sample_vid_list = []
|
470 |
+
sample_trans_list = []
|
471 |
+
sample_trans_v_list = []
|
472 |
+
|
473 |
+
for i in range(num_subdivision): # cut into around 2s chip, (self npose)
|
474 |
+
start_idx = clip_s_f_pose + i * self.args.stride
|
475 |
+
fin_idx = start_idx + cut_length
|
476 |
+
sample_pose = pose_each_file[start_idx:fin_idx]
|
477 |
+
sample_trans = trans_each_file[start_idx:fin_idx]
|
478 |
+
sample_trans_v = trans_v_each_file[start_idx:fin_idx]
|
479 |
+
sample_shape = shape_each_file[start_idx:fin_idx]
|
480 |
+
# print(sample_pose.shape)
|
481 |
+
|
482 |
+
|
483 |
+
sample_vid = vid_each_file[start_idx:fin_idx] if self.args.id_rep is not None else np.array([-1])
|
484 |
+
|
485 |
+
if sample_pose.any() != None:
|
486 |
+
# filtering motion skeleton data
|
487 |
+
sample_pose, filtering_message = MotionPreprocessor(sample_pose).get()
|
488 |
+
is_correct_motion = (sample_pose is not None)
|
489 |
+
if is_correct_motion or disable_filtering:
|
490 |
+
sample_pose_list.append(sample_pose)
|
491 |
+
|
492 |
+
sample_shape_list.append(sample_shape)
|
493 |
+
|
494 |
+
sample_vid_list.append(sample_vid)
|
495 |
+
|
496 |
+
|
497 |
+
sample_trans_list.append(sample_trans)
|
498 |
+
sample_trans_v_list.append(sample_trans_v)
|
499 |
+
else:
|
500 |
+
n_filtered_out[filtering_message] += 1
|
501 |
+
|
502 |
+
if len(sample_pose_list) > 0:
|
503 |
+
with dst_lmdb_env.begin(write=True) as txn:
|
504 |
+
for pose, shape, vid, trans,trans_v in zip(
|
505 |
+
sample_pose_list,
|
506 |
+
sample_shape_list,
|
507 |
+
sample_vid_list,
|
508 |
+
sample_trans_list,
|
509 |
+
sample_trans_v_list,
|
510 |
+
):
|
511 |
+
k = "{:005}".format(self.n_out_samples).encode("ascii")
|
512 |
+
v = [pose , shape, vid, trans,trans_v]
|
513 |
+
v = pickle.dumps(v,5)
|
514 |
+
txn.put(k, v)
|
515 |
+
self.n_out_samples += 1
|
516 |
+
return n_filtered_out
|
517 |
+
|
518 |
+
def __getitem__(self, idx):
|
519 |
+
with self.lmdb_env.begin(write=False) as txn:
|
520 |
+
key = "{:005}".format(idx).encode("ascii")
|
521 |
+
sample = txn.get(key)
|
522 |
+
sample = pickle.loads(sample)
|
523 |
+
tar_pose, in_shape, vid, trans,trans_v = sample
|
524 |
+
tar_pose = torch.from_numpy(tar_pose).float()
|
525 |
+
tar_pose = rc.axis_angle_to_matrix(tar_pose.reshape(-1, 55, 3))
|
526 |
+
tar_pose = rc.matrix_to_rotation_6d(tar_pose).reshape(-1, 55*6)
|
527 |
+
|
528 |
+
if self.norm:
|
529 |
+
tar_pose = (tar_pose - self.mean) / self.std
|
530 |
+
trans_v = (trans_v-self.trans_mean)/self.trans_std
|
531 |
+
|
532 |
+
if self.loader_type == "test":
|
533 |
+
tar_pose = tar_pose.float()
|
534 |
+
trans = torch.from_numpy(trans).float()
|
535 |
+
trans_v = torch.from_numpy(trans_v).float()
|
536 |
+
vid = torch.from_numpy(vid).float()
|
537 |
+
in_shape = torch.from_numpy(in_shape).float()
|
538 |
+
else:
|
539 |
+
in_shape = torch.from_numpy(in_shape).reshape((in_shape.shape[0], -1)).float()
|
540 |
+
trans = torch.from_numpy(trans).reshape((trans.shape[0], -1)).float()
|
541 |
+
trans_v = torch.from_numpy(trans_v).reshape((trans_v.shape[0], -1)).float()
|
542 |
+
vid = torch.from_numpy(vid).reshape((vid.shape[0], -1)).float()
|
543 |
+
tar_pose = tar_pose.reshape((tar_pose.shape[0], -1)).float()
|
544 |
+
tar_pose = torch.cat([tar_pose, trans_v], dim=1)
|
545 |
+
return tar_pose
|
546 |
+
|
547 |
+
class MotionPreprocessor:
|
548 |
+
def __init__(self, skeletons):
|
549 |
+
self.skeletons = skeletons
|
550 |
+
#self.mean_pose = mean_pose
|
551 |
+
self.filtering_message = "PASS"
|
552 |
+
|
553 |
+
def get(self):
|
554 |
+
assert (self.skeletons is not None)
|
555 |
+
|
556 |
+
# filtering
|
557 |
+
if self.skeletons is not None:
|
558 |
+
if self.check_pose_diff():
|
559 |
+
self.skeletons = []
|
560 |
+
self.filtering_message = "pose"
|
561 |
+
# elif self.check_spine_angle():
|
562 |
+
# self.skeletons = []
|
563 |
+
# self.filtering_message = "spine angle"
|
564 |
+
# elif self.check_static_motion():
|
565 |
+
# self.skeletons = []
|
566 |
+
# self.filtering_message = "motion"
|
567 |
+
|
568 |
+
# if self.skeletons != []:
|
569 |
+
# self.skeletons = self.skeletons.tolist()
|
570 |
+
# for i, frame in enumerate(self.skeletons):
|
571 |
+
# assert not np.isnan(self.skeletons[i]).any() # missing joints
|
572 |
+
|
573 |
+
return self.skeletons, self.filtering_message
|
574 |
+
|
575 |
+
def check_static_motion(self, verbose=True):
|
576 |
+
def get_variance(skeleton, joint_idx):
|
577 |
+
wrist_pos = skeleton[:, joint_idx]
|
578 |
+
variance = np.sum(np.var(wrist_pos, axis=0))
|
579 |
+
return variance
|
580 |
+
|
581 |
+
left_arm_var = get_variance(self.skeletons, 6)
|
582 |
+
right_arm_var = get_variance(self.skeletons, 9)
|
583 |
+
|
584 |
+
th = 0.0014 # exclude 13110
|
585 |
+
# th = 0.002 # exclude 16905
|
586 |
+
if left_arm_var < th and right_arm_var < th:
|
587 |
+
if verbose:
|
588 |
+
print("skip - check_static_motion left var {}, right var {}".format(left_arm_var, right_arm_var))
|
589 |
+
return True
|
590 |
+
else:
|
591 |
+
if verbose:
|
592 |
+
print("pass - check_static_motion left var {}, right var {}".format(left_arm_var, right_arm_var))
|
593 |
+
return False
|
594 |
+
|
595 |
+
|
596 |
+
def check_pose_diff(self, verbose=False):
|
597 |
+
# diff = np.abs(self.skeletons - self.mean_pose) # 186*1
|
598 |
+
# diff = np.mean(diff)
|
599 |
+
|
600 |
+
# # th = 0.017
|
601 |
+
# th = 0.02 #0.02 # exclude 3594
|
602 |
+
# if diff < th:
|
603 |
+
# if verbose:
|
604 |
+
# print("skip - check_pose_diff {:.5f}".format(diff))
|
605 |
+
# return True
|
606 |
+
# # th = 3.5 #0.02 # exclude 3594
|
607 |
+
# # if 3.5 < diff < 5:
|
608 |
+
# # if verbose:
|
609 |
+
# # print("skip - check_pose_diff {:.5f}".format(diff))
|
610 |
+
# # return True
|
611 |
+
# else:
|
612 |
+
# if verbose:
|
613 |
+
# print("pass - check_pose_diff {:.5f}".format(diff))
|
614 |
+
return False
|
615 |
+
|
616 |
+
|
617 |
+
def check_spine_angle(self, verbose=True):
|
618 |
+
def angle_between(v1, v2):
|
619 |
+
v1_u = v1 / np.linalg.norm(v1)
|
620 |
+
v2_u = v2 / np.linalg.norm(v2)
|
621 |
+
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
|
622 |
+
|
623 |
+
angles = []
|
624 |
+
for i in range(self.skeletons.shape[0]):
|
625 |
+
spine_vec = self.skeletons[i, 1] - self.skeletons[i, 0]
|
626 |
+
angle = angle_between(spine_vec, [0, -1, 0])
|
627 |
+
angles.append(angle)
|
628 |
+
|
629 |
+
if np.rad2deg(max(angles)) > 30 or np.rad2deg(np.mean(angles)) > 20: # exclude 4495
|
630 |
+
# if np.rad2deg(max(angles)) > 20: # exclude 8270
|
631 |
+
if verbose:
|
632 |
+
print("skip - check_spine_angle {:.5f}, {:.5f}".format(max(angles), np.mean(angles)))
|
633 |
+
return True
|
634 |
+
else:
|
635 |
+
if verbose:
|
636 |
+
print("pass - check_spine_angle {:.5f}".format(max(angles)))
|
637 |
+
return False
|
dataloaders/pymo/Quaternions.py
ADDED
@@ -0,0 +1,468 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
|
3 |
+
class Quaternions:
|
4 |
+
"""
|
5 |
+
Quaternions is a wrapper around a numpy ndarray
|
6 |
+
that allows it to act as if it were an narray of
|
7 |
+
a quaternion data type.
|
8 |
+
|
9 |
+
Therefore addition, subtraction, multiplication,
|
10 |
+
division, negation, absolute, are all defined
|
11 |
+
in terms of quaternion operations such as quaternion
|
12 |
+
multiplication.
|
13 |
+
|
14 |
+
This allows for much neater code and many routines
|
15 |
+
which conceptually do the same thing to be written
|
16 |
+
in the same way for point data and for rotation data.
|
17 |
+
|
18 |
+
The Quaternions class has been desgined such that it
|
19 |
+
should support broadcasting and slicing in all of the
|
20 |
+
usual ways.
|
21 |
+
"""
|
22 |
+
|
23 |
+
def __init__(self, qs):
|
24 |
+
if isinstance(qs, np.ndarray):
|
25 |
+
|
26 |
+
if len(qs.shape) == 1: qs = np.array([qs])
|
27 |
+
self.qs = qs
|
28 |
+
return
|
29 |
+
|
30 |
+
if isinstance(qs, Quaternions):
|
31 |
+
self.qs = qs.qs
|
32 |
+
return
|
33 |
+
|
34 |
+
raise TypeError('Quaternions must be constructed from iterable, numpy array, or Quaternions, not %s' % type(qs))
|
35 |
+
|
36 |
+
def __str__(self): return "Quaternions("+ str(self.qs) + ")"
|
37 |
+
def __repr__(self): return "Quaternions("+ repr(self.qs) + ")"
|
38 |
+
|
39 |
+
""" Helper Methods for Broadcasting and Data extraction """
|
40 |
+
|
41 |
+
@classmethod
|
42 |
+
def _broadcast(cls, sqs, oqs, scalar=False):
|
43 |
+
|
44 |
+
if isinstance(oqs, float): return sqs, oqs * np.ones(sqs.shape[:-1])
|
45 |
+
|
46 |
+
ss = np.array(sqs.shape) if not scalar else np.array(sqs.shape[:-1])
|
47 |
+
os = np.array(oqs.shape)
|
48 |
+
|
49 |
+
if len(ss) != len(os):
|
50 |
+
raise TypeError('Quaternions cannot broadcast together shapes %s and %s' % (sqs.shape, oqs.shape))
|
51 |
+
|
52 |
+
if np.all(ss == os): return sqs, oqs
|
53 |
+
|
54 |
+
if not np.all((ss == os) | (os == np.ones(len(os))) | (ss == np.ones(len(ss)))):
|
55 |
+
raise TypeError('Quaternions cannot broadcast together shapes %s and %s' % (sqs.shape, oqs.shape))
|
56 |
+
|
57 |
+
sqsn, oqsn = sqs.copy(), oqs.copy()
|
58 |
+
|
59 |
+
for a in np.where(ss == 1)[0]: sqsn = sqsn.repeat(os[a], axis=a)
|
60 |
+
for a in np.where(os == 1)[0]: oqsn = oqsn.repeat(ss[a], axis=a)
|
61 |
+
|
62 |
+
return sqsn, oqsn
|
63 |
+
|
64 |
+
""" Adding Quaterions is just Defined as Multiplication """
|
65 |
+
|
66 |
+
def __add__(self, other): return self * other
|
67 |
+
def __sub__(self, other): return self / other
|
68 |
+
|
69 |
+
""" Quaterion Multiplication """
|
70 |
+
|
71 |
+
def __mul__(self, other):
|
72 |
+
"""
|
73 |
+
Quaternion multiplication has three main methods.
|
74 |
+
|
75 |
+
When multiplying a Quaternions array by Quaternions
|
76 |
+
normal quaternion multiplication is performed.
|
77 |
+
|
78 |
+
When multiplying a Quaternions array by a vector
|
79 |
+
array of the same shape, where the last axis is 3,
|
80 |
+
it is assumed to be a Quaternion by 3D-Vector
|
81 |
+
multiplication and the 3D-Vectors are rotated
|
82 |
+
in space by the Quaternions.
|
83 |
+
|
84 |
+
When multipplying a Quaternions array by a scalar
|
85 |
+
or vector of different shape it is assumed to be
|
86 |
+
a Quaternions by Scalars multiplication and the
|
87 |
+
Quaternions are scaled using Slerp and the identity
|
88 |
+
quaternions.
|
89 |
+
"""
|
90 |
+
|
91 |
+
""" If Quaternions type do Quaternions * Quaternions """
|
92 |
+
if isinstance(other, Quaternions):
|
93 |
+
|
94 |
+
sqs, oqs = Quaternions._broadcast(self.qs, other.qs)
|
95 |
+
|
96 |
+
q0 = sqs[...,0]; q1 = sqs[...,1];
|
97 |
+
q2 = sqs[...,2]; q3 = sqs[...,3];
|
98 |
+
r0 = oqs[...,0]; r1 = oqs[...,1];
|
99 |
+
r2 = oqs[...,2]; r3 = oqs[...,3];
|
100 |
+
|
101 |
+
qs = np.empty(sqs.shape)
|
102 |
+
qs[...,0] = r0 * q0 - r1 * q1 - r2 * q2 - r3 * q3
|
103 |
+
qs[...,1] = r0 * q1 + r1 * q0 - r2 * q3 + r3 * q2
|
104 |
+
qs[...,2] = r0 * q2 + r1 * q3 + r2 * q0 - r3 * q1
|
105 |
+
qs[...,3] = r0 * q3 - r1 * q2 + r2 * q1 + r3 * q0
|
106 |
+
|
107 |
+
return Quaternions(qs)
|
108 |
+
|
109 |
+
""" If array type do Quaternions * Vectors """
|
110 |
+
if isinstance(other, np.ndarray) and other.shape[-1] == 3:
|
111 |
+
vs = Quaternions(np.concatenate([np.zeros(other.shape[:-1] + (1,)), other], axis=-1))
|
112 |
+
return (self * (vs * -self)).imaginaries
|
113 |
+
|
114 |
+
""" If float do Quaternions * Scalars """
|
115 |
+
if isinstance(other, np.ndarray) or isinstance(other, float):
|
116 |
+
return Quaternions.slerp(Quaternions.id_like(self), self, other)
|
117 |
+
|
118 |
+
raise TypeError('Cannot multiply/add Quaternions with type %s' % str(type(other)))
|
119 |
+
|
120 |
+
def __div__(self, other):
|
121 |
+
"""
|
122 |
+
When a Quaternion type is supplied, division is defined
|
123 |
+
as multiplication by the inverse of that Quaternion.
|
124 |
+
|
125 |
+
When a scalar or vector is supplied it is defined
|
126 |
+
as multiplicaion of one over the supplied value.
|
127 |
+
Essentially a scaling.
|
128 |
+
"""
|
129 |
+
|
130 |
+
if isinstance(other, Quaternions): return self * (-other)
|
131 |
+
if isinstance(other, np.ndarray): return self * (1.0 / other)
|
132 |
+
if isinstance(other, float): return self * (1.0 / other)
|
133 |
+
raise TypeError('Cannot divide/subtract Quaternions with type %s' + str(type(other)))
|
134 |
+
|
135 |
+
def __eq__(self, other): return self.qs == other.qs
|
136 |
+
def __ne__(self, other): return self.qs != other.qs
|
137 |
+
|
138 |
+
def __neg__(self):
|
139 |
+
""" Invert Quaternions """
|
140 |
+
return Quaternions(self.qs * np.array([[1, -1, -1, -1]]))
|
141 |
+
|
142 |
+
def __abs__(self):
|
143 |
+
""" Unify Quaternions To Single Pole """
|
144 |
+
qabs = self.normalized().copy()
|
145 |
+
top = np.sum(( qabs.qs) * np.array([1,0,0,0]), axis=-1)
|
146 |
+
bot = np.sum((-qabs.qs) * np.array([1,0,0,0]), axis=-1)
|
147 |
+
qabs.qs[top < bot] = -qabs.qs[top < bot]
|
148 |
+
return qabs
|
149 |
+
|
150 |
+
def __iter__(self): return iter(self.qs)
|
151 |
+
def __len__(self): return len(self.qs)
|
152 |
+
|
153 |
+
def __getitem__(self, k): return Quaternions(self.qs[k])
|
154 |
+
def __setitem__(self, k, v): self.qs[k] = v.qs
|
155 |
+
|
156 |
+
@property
|
157 |
+
def lengths(self):
|
158 |
+
return np.sum(self.qs**2.0, axis=-1)**0.5
|
159 |
+
|
160 |
+
@property
|
161 |
+
def reals(self):
|
162 |
+
return self.qs[...,0]
|
163 |
+
|
164 |
+
@property
|
165 |
+
def imaginaries(self):
|
166 |
+
return self.qs[...,1:4]
|
167 |
+
|
168 |
+
@property
|
169 |
+
def shape(self): return self.qs.shape[:-1]
|
170 |
+
|
171 |
+
def repeat(self, n, **kwargs):
|
172 |
+
return Quaternions(self.qs.repeat(n, **kwargs))
|
173 |
+
|
174 |
+
def normalized(self):
|
175 |
+
return Quaternions(self.qs / self.lengths[...,np.newaxis])
|
176 |
+
|
177 |
+
def log(self):
|
178 |
+
norm = abs(self.normalized())
|
179 |
+
imgs = norm.imaginaries
|
180 |
+
lens = np.sqrt(np.sum(imgs**2, axis=-1))
|
181 |
+
lens = np.arctan2(lens, norm.reals) / (lens + 1e-10)
|
182 |
+
return imgs * lens[...,np.newaxis]
|
183 |
+
|
184 |
+
def constrained(self, axis):
|
185 |
+
|
186 |
+
rl = self.reals
|
187 |
+
im = np.sum(axis * self.imaginaries, axis=-1)
|
188 |
+
|
189 |
+
t1 = -2 * np.arctan2(rl, im) + np.pi
|
190 |
+
t2 = -2 * np.arctan2(rl, im) - np.pi
|
191 |
+
|
192 |
+
top = Quaternions.exp(axis[np.newaxis] * (t1[:,np.newaxis] / 2.0))
|
193 |
+
bot = Quaternions.exp(axis[np.newaxis] * (t2[:,np.newaxis] / 2.0))
|
194 |
+
img = self.dot(top) > self.dot(bot)
|
195 |
+
|
196 |
+
ret = top.copy()
|
197 |
+
ret[ img] = top[ img]
|
198 |
+
ret[~img] = bot[~img]
|
199 |
+
return ret
|
200 |
+
|
201 |
+
def constrained_x(self): return self.constrained(np.array([1,0,0]))
|
202 |
+
def constrained_y(self): return self.constrained(np.array([0,1,0]))
|
203 |
+
def constrained_z(self): return self.constrained(np.array([0,0,1]))
|
204 |
+
|
205 |
+
def dot(self, q): return np.sum(self.qs * q.qs, axis=-1)
|
206 |
+
|
207 |
+
def copy(self): return Quaternions(np.copy(self.qs))
|
208 |
+
|
209 |
+
def reshape(self, s):
|
210 |
+
self.qs.reshape(s)
|
211 |
+
return self
|
212 |
+
|
213 |
+
def interpolate(self, ws):
|
214 |
+
return Quaternions.exp(np.average(abs(self).log, axis=0, weights=ws))
|
215 |
+
|
216 |
+
def euler(self, order='xyz'):
|
217 |
+
|
218 |
+
q = self.normalized().qs
|
219 |
+
q0 = q[...,0]
|
220 |
+
q1 = q[...,1]
|
221 |
+
q2 = q[...,2]
|
222 |
+
q3 = q[...,3]
|
223 |
+
es = np.zeros(self.shape + (3,))
|
224 |
+
|
225 |
+
if order == 'xyz':
|
226 |
+
es[...,0] = np.arctan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 * q1 + q2 * q2))
|
227 |
+
es[...,1] = np.arcsin((2 * (q0 * q2 - q3 * q1)).clip(-1,1))
|
228 |
+
es[...,2] = np.arctan2(2 * (q0 * q3 + q1 * q2), 1 - 2 * (q2 * q2 + q3 * q3))
|
229 |
+
elif order == 'yzx':
|
230 |
+
es[...,0] = np.arctan2(2 * (q1 * q0 - q2 * q3), -q1 * q1 + q2 * q2 - q3 * q3 + q0 * q0)
|
231 |
+
es[...,1] = np.arctan2(2 * (q2 * q0 - q1 * q3), q1 * q1 - q2 * q2 - q3 * q3 + q0 * q0)
|
232 |
+
es[...,2] = np.arcsin((2 * (q1 * q2 + q3 * q0)).clip(-1,1))
|
233 |
+
else:
|
234 |
+
raise NotImplementedError('Cannot convert from ordering %s' % order)
|
235 |
+
|
236 |
+
"""
|
237 |
+
|
238 |
+
# These conversion don't appear to work correctly for Maya.
|
239 |
+
# http://bediyap.com/programming/convert-quaternion-to-euler-rotations/
|
240 |
+
|
241 |
+
if order == 'xyz':
|
242 |
+
es[...,0] = np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
|
243 |
+
es[...,1] = np.arcsin((2 * (q1 * q3 + q0 * q2)).clip(-1,1))
|
244 |
+
es[...,2] = np.arctan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
|
245 |
+
elif order == 'yzx':
|
246 |
+
es[...,0] = np.arctan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
|
247 |
+
es[...,1] = np.arcsin((2 * (q1 * q2 + q0 * q3)).clip(-1,1))
|
248 |
+
es[...,2] = np.arctan2(2 * (q0 * q2 - q1 * q3), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
|
249 |
+
elif order == 'zxy':
|
250 |
+
es[...,0] = np.arctan2(2 * (q0 * q2 - q1 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
|
251 |
+
es[...,1] = np.arcsin((2 * (q0 * q1 + q2 * q3)).clip(-1,1))
|
252 |
+
es[...,2] = np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
|
253 |
+
elif order == 'xzy':
|
254 |
+
es[...,0] = np.arctan2(2 * (q0 * q2 + q1 * q3), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
|
255 |
+
es[...,1] = np.arcsin((2 * (q0 * q3 - q1 * q2)).clip(-1,1))
|
256 |
+
es[...,2] = np.arctan2(2 * (q0 * q1 + q2 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
|
257 |
+
elif order == 'yxz':
|
258 |
+
es[...,0] = np.arctan2(2 * (q1 * q2 + q0 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
|
259 |
+
es[...,1] = np.arcsin((2 * (q0 * q1 - q2 * q3)).clip(-1,1))
|
260 |
+
es[...,2] = np.arctan2(2 * (q1 * q3 + q0 * q2), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
|
261 |
+
elif order == 'zyx':
|
262 |
+
es[...,0] = np.arctan2(2 * (q0 * q1 + q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
|
263 |
+
es[...,1] = np.arcsin((2 * (q0 * q2 - q1 * q3)).clip(-1,1))
|
264 |
+
es[...,2] = np.arctan2(2 * (q0 * q3 + q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
|
265 |
+
else:
|
266 |
+
raise KeyError('Unknown ordering %s' % order)
|
267 |
+
|
268 |
+
"""
|
269 |
+
|
270 |
+
# https://github.com/ehsan/ogre/blob/master/OgreMain/src/OgreMatrix3.cpp
|
271 |
+
# Use this class and convert from matrix
|
272 |
+
|
273 |
+
return es
|
274 |
+
|
275 |
+
|
276 |
+
def average(self):
|
277 |
+
|
278 |
+
if len(self.shape) == 1:
|
279 |
+
|
280 |
+
import numpy.core.umath_tests as ut
|
281 |
+
system = ut.matrix_multiply(self.qs[:,:,np.newaxis], self.qs[:,np.newaxis,:]).sum(axis=0)
|
282 |
+
w, v = np.linalg.eigh(system)
|
283 |
+
qiT_dot_qref = (self.qs[:,:,np.newaxis] * v[np.newaxis,:,:]).sum(axis=1)
|
284 |
+
return Quaternions(v[:,np.argmin((1.-qiT_dot_qref**2).sum(axis=0))])
|
285 |
+
|
286 |
+
else:
|
287 |
+
|
288 |
+
raise NotImplementedError('Cannot average multi-dimensionsal Quaternions')
|
289 |
+
|
290 |
+
def angle_axis(self):
|
291 |
+
|
292 |
+
norm = self.normalized()
|
293 |
+
s = np.sqrt(1 - (norm.reals**2.0))
|
294 |
+
s[s == 0] = 0.001
|
295 |
+
|
296 |
+
angles = 2.0 * np.arccos(norm.reals)
|
297 |
+
axis = norm.imaginaries / s[...,np.newaxis]
|
298 |
+
|
299 |
+
return angles, axis
|
300 |
+
|
301 |
+
|
302 |
+
def transforms(self):
|
303 |
+
|
304 |
+
qw = self.qs[...,0]
|
305 |
+
qx = self.qs[...,1]
|
306 |
+
qy = self.qs[...,2]
|
307 |
+
qz = self.qs[...,3]
|
308 |
+
|
309 |
+
x2 = qx + qx; y2 = qy + qy; z2 = qz + qz;
|
310 |
+
xx = qx * x2; yy = qy * y2; wx = qw * x2;
|
311 |
+
xy = qx * y2; yz = qy * z2; wy = qw * y2;
|
312 |
+
xz = qx * z2; zz = qz * z2; wz = qw * z2;
|
313 |
+
|
314 |
+
m = np.empty(self.shape + (3,3))
|
315 |
+
m[...,0,0] = 1.0 - (yy + zz)
|
316 |
+
m[...,0,1] = xy - wz
|
317 |
+
m[...,0,2] = xz + wy
|
318 |
+
m[...,1,0] = xy + wz
|
319 |
+
m[...,1,1] = 1.0 - (xx + zz)
|
320 |
+
m[...,1,2] = yz - wx
|
321 |
+
m[...,2,0] = xz - wy
|
322 |
+
m[...,2,1] = yz + wx
|
323 |
+
m[...,2,2] = 1.0 - (xx + yy)
|
324 |
+
|
325 |
+
return m
|
326 |
+
|
327 |
+
def ravel(self):
|
328 |
+
return self.qs.ravel()
|
329 |
+
|
330 |
+
@classmethod
|
331 |
+
def id(cls, n):
|
332 |
+
|
333 |
+
if isinstance(n, tuple):
|
334 |
+
qs = np.zeros(n + (4,))
|
335 |
+
qs[...,0] = 1.0
|
336 |
+
return Quaternions(qs)
|
337 |
+
|
338 |
+
if isinstance(n, int) or isinstance(n, long):
|
339 |
+
qs = np.zeros((n,4))
|
340 |
+
qs[:,0] = 1.0
|
341 |
+
return Quaternions(qs)
|
342 |
+
|
343 |
+
raise TypeError('Cannot Construct Quaternion from %s type' % str(type(n)))
|
344 |
+
|
345 |
+
@classmethod
|
346 |
+
def id_like(cls, a):
|
347 |
+
qs = np.zeros(a.shape + (4,))
|
348 |
+
qs[...,0] = 1.0
|
349 |
+
return Quaternions(qs)
|
350 |
+
|
351 |
+
@classmethod
|
352 |
+
def exp(cls, ws):
|
353 |
+
|
354 |
+
ts = np.sum(ws**2.0, axis=-1)**0.5
|
355 |
+
ts[ts == 0] = 0.001
|
356 |
+
ls = np.sin(ts) / ts
|
357 |
+
|
358 |
+
qs = np.empty(ws.shape[:-1] + (4,))
|
359 |
+
qs[...,0] = np.cos(ts)
|
360 |
+
qs[...,1] = ws[...,0] * ls
|
361 |
+
qs[...,2] = ws[...,1] * ls
|
362 |
+
qs[...,3] = ws[...,2] * ls
|
363 |
+
|
364 |
+
return Quaternions(qs).normalized()
|
365 |
+
|
366 |
+
@classmethod
|
367 |
+
def slerp(cls, q0s, q1s, a):
|
368 |
+
|
369 |
+
fst, snd = cls._broadcast(q0s.qs, q1s.qs)
|
370 |
+
fst, a = cls._broadcast(fst, a, scalar=True)
|
371 |
+
snd, a = cls._broadcast(snd, a, scalar=True)
|
372 |
+
|
373 |
+
len = np.sum(fst * snd, axis=-1)
|
374 |
+
|
375 |
+
neg = len < 0.0
|
376 |
+
len[neg] = -len[neg]
|
377 |
+
snd[neg] = -snd[neg]
|
378 |
+
|
379 |
+
amount0 = np.zeros(a.shape)
|
380 |
+
amount1 = np.zeros(a.shape)
|
381 |
+
|
382 |
+
linear = (1.0 - len) < 0.01
|
383 |
+
omegas = np.arccos(len[~linear])
|
384 |
+
sinoms = np.sin(omegas)
|
385 |
+
|
386 |
+
amount0[ linear] = 1.0 - a[linear]
|
387 |
+
amount1[ linear] = a[linear]
|
388 |
+
amount0[~linear] = np.sin((1.0 - a[~linear]) * omegas) / sinoms
|
389 |
+
amount1[~linear] = np.sin( a[~linear] * omegas) / sinoms
|
390 |
+
|
391 |
+
return Quaternions(
|
392 |
+
amount0[...,np.newaxis] * fst +
|
393 |
+
amount1[...,np.newaxis] * snd)
|
394 |
+
|
395 |
+
@classmethod
|
396 |
+
def between(cls, v0s, v1s):
|
397 |
+
a = np.cross(v0s, v1s)
|
398 |
+
w = np.sqrt((v0s**2).sum(axis=-1) * (v1s**2).sum(axis=-1)) + (v0s * v1s).sum(axis=-1)
|
399 |
+
return Quaternions(np.concatenate([w[...,np.newaxis], a], axis=-1)).normalized()
|
400 |
+
|
401 |
+
@classmethod
|
402 |
+
def from_angle_axis(cls, angles, axis):
|
403 |
+
axis = axis / (np.sqrt(np.sum(axis**2, axis=-1)) + 1e-10)[...,np.newaxis]
|
404 |
+
sines = np.sin(angles / 2.0)[...,np.newaxis]
|
405 |
+
cosines = np.cos(angles / 2.0)[...,np.newaxis]
|
406 |
+
return Quaternions(np.concatenate([cosines, axis * sines], axis=-1))
|
407 |
+
|
408 |
+
@classmethod
|
409 |
+
def from_euler(cls, es, order='xyz', world=False):
|
410 |
+
|
411 |
+
axis = {
|
412 |
+
'x' : np.array([1,0,0]),
|
413 |
+
'y' : np.array([0,1,0]),
|
414 |
+
'z' : np.array([0,0,1]),
|
415 |
+
}
|
416 |
+
|
417 |
+
q0s = Quaternions.from_angle_axis(es[...,0], axis[order[0]])
|
418 |
+
q1s = Quaternions.from_angle_axis(es[...,1], axis[order[1]])
|
419 |
+
q2s = Quaternions.from_angle_axis(es[...,2], axis[order[2]])
|
420 |
+
|
421 |
+
return (q2s * (q1s * q0s)) if world else (q0s * (q1s * q2s))
|
422 |
+
|
423 |
+
@classmethod
|
424 |
+
def from_transforms(cls, ts):
|
425 |
+
|
426 |
+
d0, d1, d2 = ts[...,0,0], ts[...,1,1], ts[...,2,2]
|
427 |
+
|
428 |
+
q0 = ( d0 + d1 + d2 + 1.0) / 4.0
|
429 |
+
q1 = ( d0 - d1 - d2 + 1.0) / 4.0
|
430 |
+
q2 = (-d0 + d1 - d2 + 1.0) / 4.0
|
431 |
+
q3 = (-d0 - d1 + d2 + 1.0) / 4.0
|
432 |
+
|
433 |
+
q0 = np.sqrt(q0.clip(0,None))
|
434 |
+
q1 = np.sqrt(q1.clip(0,None))
|
435 |
+
q2 = np.sqrt(q2.clip(0,None))
|
436 |
+
q3 = np.sqrt(q3.clip(0,None))
|
437 |
+
|
438 |
+
c0 = (q0 >= q1) & (q0 >= q2) & (q0 >= q3)
|
439 |
+
c1 = (q1 >= q0) & (q1 >= q2) & (q1 >= q3)
|
440 |
+
c2 = (q2 >= q0) & (q2 >= q1) & (q2 >= q3)
|
441 |
+
c3 = (q3 >= q0) & (q3 >= q1) & (q3 >= q2)
|
442 |
+
|
443 |
+
q1[c0] *= np.sign(ts[c0,2,1] - ts[c0,1,2])
|
444 |
+
q2[c0] *= np.sign(ts[c0,0,2] - ts[c0,2,0])
|
445 |
+
q3[c0] *= np.sign(ts[c0,1,0] - ts[c0,0,1])
|
446 |
+
|
447 |
+
q0[c1] *= np.sign(ts[c1,2,1] - ts[c1,1,2])
|
448 |
+
q2[c1] *= np.sign(ts[c1,1,0] + ts[c1,0,1])
|
449 |
+
q3[c1] *= np.sign(ts[c1,0,2] + ts[c1,2,0])
|
450 |
+
|
451 |
+
q0[c2] *= np.sign(ts[c2,0,2] - ts[c2,2,0])
|
452 |
+
q1[c2] *= np.sign(ts[c2,1,0] + ts[c2,0,1])
|
453 |
+
q3[c2] *= np.sign(ts[c2,2,1] + ts[c2,1,2])
|
454 |
+
|
455 |
+
q0[c3] *= np.sign(ts[c3,1,0] - ts[c3,0,1])
|
456 |
+
q1[c3] *= np.sign(ts[c3,2,0] + ts[c3,0,2])
|
457 |
+
q2[c3] *= np.sign(ts[c3,2,1] + ts[c3,1,2])
|
458 |
+
|
459 |
+
qs = np.empty(ts.shape[:-2] + (4,))
|
460 |
+
qs[...,0] = q0
|
461 |
+
qs[...,1] = q1
|
462 |
+
qs[...,2] = q2
|
463 |
+
qs[...,3] = q3
|
464 |
+
|
465 |
+
return cls(qs)
|
466 |
+
|
467 |
+
|
468 |
+
|
dataloaders/pymo/__init__.py
ADDED
File without changes
|
dataloaders/pymo/data.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
|
3 |
+
class Joint():
|
4 |
+
def __init__(self, name, parent=None, children=None):
|
5 |
+
self.name = name
|
6 |
+
self.parent = parent
|
7 |
+
self.children = children
|
8 |
+
|
9 |
+
class MocapData():
|
10 |
+
def __init__(self):
|
11 |
+
self.skeleton = {}
|
12 |
+
self.values = None
|
13 |
+
self.channel_names = []
|
14 |
+
self.framerate = 0.0
|
15 |
+
self.root_name = ''
|
16 |
+
|
17 |
+
def traverse(self, j=None):
|
18 |
+
stack = [self.root_name]
|
19 |
+
while stack:
|
20 |
+
joint = stack.pop()
|
21 |
+
yield joint
|
22 |
+
for c in self.skeleton[joint]['children']:
|
23 |
+
stack.append(c)
|
24 |
+
|
25 |
+
def clone(self):
|
26 |
+
import copy
|
27 |
+
new_data = MocapData()
|
28 |
+
new_data.skeleton = copy.copy(self.skeleton)
|
29 |
+
new_data.values = copy.copy(self.values)
|
30 |
+
new_data.channel_names = copy.copy(self.channel_names)
|
31 |
+
new_data.root_name = copy.copy(self.root_name)
|
32 |
+
new_data.framerate = copy.copy(self.framerate)
|
33 |
+
return new_data
|
34 |
+
|
35 |
+
def get_all_channels(self):
|
36 |
+
'''Returns all of the channels parsed from the file as a 2D numpy array'''
|
37 |
+
|
38 |
+
frames = [f[1] for f in self.values]
|
39 |
+
return np.asarray([[channel[2] for channel in frame] for frame in frames])
|
40 |
+
|
41 |
+
def get_skeleton_tree(self):
|
42 |
+
tree = []
|
43 |
+
root_key = [j for j in self.skeleton if self.skeleton[j]['parent']==None][0]
|
44 |
+
|
45 |
+
root_joint = Joint(root_key)
|
46 |
+
|
47 |
+
def get_empty_channels(self):
|
48 |
+
#TODO
|
49 |
+
pass
|
50 |
+
|
51 |
+
def get_constant_channels(self):
|
52 |
+
#TODO
|
53 |
+
pass
|
dataloaders/pymo/features.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
A set of mocap feature extraction functions
|
3 |
+
|
4 |
+
Created by Omid Alemi | Nov 17 2017
|
5 |
+
|
6 |
+
'''
|
7 |
+
import numpy as np
|
8 |
+
import pandas as pd
|
9 |
+
import peakutils
|
10 |
+
import matplotlib.pyplot as plt
|
11 |
+
|
12 |
+
def get_foot_contact_idxs(signal, t=0.02, min_dist=120):
|
13 |
+
up_idxs = peakutils.indexes(signal, thres=t/max(signal), min_dist=min_dist)
|
14 |
+
down_idxs = peakutils.indexes(-signal, thres=t/min(signal), min_dist=min_dist)
|
15 |
+
|
16 |
+
return [up_idxs, down_idxs]
|
17 |
+
|
18 |
+
|
19 |
+
def create_foot_contact_signal(mocap_track, col_name, start=1, t=0.02, min_dist=120):
|
20 |
+
signal = mocap_track.values[col_name].values
|
21 |
+
idxs = get_foot_contact_idxs(signal, t, min_dist)
|
22 |
+
|
23 |
+
step_signal = []
|
24 |
+
|
25 |
+
c = start
|
26 |
+
for f in range(len(signal)):
|
27 |
+
if f in idxs[1]:
|
28 |
+
c = 0
|
29 |
+
elif f in idxs[0]:
|
30 |
+
c = 1
|
31 |
+
|
32 |
+
step_signal.append(c)
|
33 |
+
|
34 |
+
return step_signal
|
35 |
+
|
36 |
+
def plot_foot_up_down(mocap_track, col_name, t=0.02, min_dist=120):
|
37 |
+
|
38 |
+
signal = mocap_track.values[col_name].values
|
39 |
+
idxs = get_foot_contact_idxs(signal, t, min_dist)
|
40 |
+
|
41 |
+
plt.plot(mocap_track.values.index, signal)
|
42 |
+
plt.plot(mocap_track.values.index[idxs[0]], signal[idxs[0]], 'ro')
|
43 |
+
plt.plot(mocap_track.values.index[idxs[1]], signal[idxs[1]], 'go')
|
dataloaders/pymo/mocapplayer/data-template.js
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
var dataBuffer = `$$DATA$$`;
|
2 |
+
|
3 |
+
start(dataBuffer);
|
dataloaders/pymo/mocapplayer/js/skeletonFactory.js
ADDED
@@ -0,0 +1,233 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
bm_v = new THREE.MeshPhongMaterial({
|
2 |
+
color: 0x08519c,
|
3 |
+
emissive: 0x08306b,
|
4 |
+
specular: 0x08519c,
|
5 |
+
shininess: 10,
|
6 |
+
side: THREE.DoubleSide
|
7 |
+
});
|
8 |
+
|
9 |
+
jm_v = new THREE.MeshPhongMaterial({
|
10 |
+
color: 0x08306b,
|
11 |
+
emissive: 0x000000,
|
12 |
+
specular: 0x111111,
|
13 |
+
shininess: 90,
|
14 |
+
side: THREE.DoubleSide
|
15 |
+
});
|
16 |
+
|
17 |
+
bm_a = new THREE.MeshPhongMaterial({
|
18 |
+
color: 0x980043,
|
19 |
+
emissive: 0x67001f,
|
20 |
+
specular: 0x6a51a3,
|
21 |
+
shininess: 10,
|
22 |
+
side: THREE.DoubleSide
|
23 |
+
});
|
24 |
+
|
25 |
+
jm_a = new THREE.MeshPhongMaterial({
|
26 |
+
color: 0x67001f,
|
27 |
+
emissive: 0x000000,
|
28 |
+
specular: 0x111111,
|
29 |
+
shininess: 90,
|
30 |
+
side: THREE.DoubleSide
|
31 |
+
});
|
32 |
+
|
33 |
+
bm_b = new THREE.MeshPhongMaterial({
|
34 |
+
color: 0x3f007d,
|
35 |
+
emissive: 0x3f007d,
|
36 |
+
specular: 0x807dba,
|
37 |
+
shininess: 2,
|
38 |
+
side: THREE.DoubleSide
|
39 |
+
});
|
40 |
+
|
41 |
+
jm_b = new THREE.MeshPhongMaterial({
|
42 |
+
color: 0x3f007d,
|
43 |
+
emissive: 0x000000,
|
44 |
+
specular: 0x807dba,
|
45 |
+
shininess: 90,
|
46 |
+
side: THREE.DoubleSide
|
47 |
+
});
|
48 |
+
|
49 |
+
//------------------
|
50 |
+
|
51 |
+
|
52 |
+
jointmaterial = new THREE.MeshLambertMaterial({
|
53 |
+
color: 0xc57206,
|
54 |
+
emissive: 0x271c18,
|
55 |
+
side: THREE.DoubleSide,
|
56 |
+
// shading: THREE.FlatShading,
|
57 |
+
wireframe: false,
|
58 |
+
shininess: 90,
|
59 |
+
});
|
60 |
+
|
61 |
+
bonematerial = new THREE.MeshPhongMaterial({
|
62 |
+
color: 0xbd9a6d,
|
63 |
+
emissive: 0x271c18,
|
64 |
+
side: THREE.DoubleSide,
|
65 |
+
// shading: THREE.FlatShading,
|
66 |
+
wireframe: false
|
67 |
+
});
|
68 |
+
|
69 |
+
jointmaterial2 = new THREE.MeshPhongMaterial({
|
70 |
+
color: 0x1562a2,
|
71 |
+
emissive: 0x000000,
|
72 |
+
specular: 0x111111,
|
73 |
+
shininess: 30,
|
74 |
+
side: THREE.DoubleSide
|
75 |
+
});
|
76 |
+
|
77 |
+
bonematerial2 = new THREE.MeshPhongMaterial({
|
78 |
+
color: 0x552211,
|
79 |
+
emissive: 0x882211,
|
80 |
+
// emissive: 0x000000,
|
81 |
+
specular: 0x111111,
|
82 |
+
shininess: 30,
|
83 |
+
side: THREE.DoubleSide
|
84 |
+
});
|
85 |
+
|
86 |
+
bonematerial3 = new THREE.MeshPhongMaterial({
|
87 |
+
color: 0x176793,
|
88 |
+
emissive: 0x000000,
|
89 |
+
specular: 0x111111,
|
90 |
+
shininess: 90,
|
91 |
+
side: THREE.DoubleSide
|
92 |
+
});
|
93 |
+
|
94 |
+
|
95 |
+
|
96 |
+
jointmaterial4 = new THREE.MeshPhongMaterial({
|
97 |
+
color: 0xFF8A00,
|
98 |
+
emissive: 0x000000,
|
99 |
+
specular: 0x111111,
|
100 |
+
shininess: 90,
|
101 |
+
side: THREE.DoubleSide
|
102 |
+
});
|
103 |
+
|
104 |
+
|
105 |
+
bonematerial4 = new THREE.MeshPhongMaterial({
|
106 |
+
color: 0x53633D,
|
107 |
+
emissive: 0x000000,
|
108 |
+
specular: 0xFFC450,
|
109 |
+
shininess: 90,
|
110 |
+
side: THREE.DoubleSide
|
111 |
+
});
|
112 |
+
|
113 |
+
|
114 |
+
|
115 |
+
bonematerial44 = new THREE.MeshPhongMaterial({
|
116 |
+
color: 0x582A72,
|
117 |
+
emissive: 0x000000,
|
118 |
+
specular: 0xFFC450,
|
119 |
+
shininess: 90,
|
120 |
+
side: THREE.DoubleSide
|
121 |
+
});
|
122 |
+
|
123 |
+
jointmaterial5 = new THREE.MeshPhongMaterial({
|
124 |
+
color: 0xAA5533,
|
125 |
+
emissive: 0x000000,
|
126 |
+
specular: 0x111111,
|
127 |
+
shininess: 30,
|
128 |
+
side: THREE.DoubleSide
|
129 |
+
});
|
130 |
+
|
131 |
+
bonematerial5 = new THREE.MeshPhongMaterial({
|
132 |
+
color: 0x552211,
|
133 |
+
emissive: 0x772211,
|
134 |
+
specular: 0x111111,
|
135 |
+
shininess: 30,
|
136 |
+
side: THREE.DoubleSide
|
137 |
+
});
|
138 |
+
|
139 |
+
|
140 |
+
markermaterial = new THREE.MeshPhongMaterial({
|
141 |
+
color: 0xc57206,
|
142 |
+
emissive: 0x271c18,
|
143 |
+
side: THREE.DoubleSide,
|
144 |
+
// shading: THREE.FlatShading,
|
145 |
+
wireframe: false,
|
146 |
+
shininess: 20,
|
147 |
+
});
|
148 |
+
|
149 |
+
markermaterial2 = new THREE.MeshPhongMaterial({
|
150 |
+
color: 0x1562a2,
|
151 |
+
emissive: 0x271c18,
|
152 |
+
side: THREE.DoubleSide,
|
153 |
+
// shading: THREE.FlatShading,
|
154 |
+
wireframe: false,
|
155 |
+
shininess: 20,
|
156 |
+
});
|
157 |
+
|
158 |
+
markermaterial3 = new THREE.MeshPhongMaterial({
|
159 |
+
color: 0x555555,
|
160 |
+
emissive: 0x999999,
|
161 |
+
side: THREE.DoubleSide,
|
162 |
+
// shading: THREE.FlatShading,
|
163 |
+
wireframe: false,
|
164 |
+
shininess: 20,
|
165 |
+
});
|
166 |
+
|
167 |
+
|
168 |
+
var makeMarkerGeometry_Sphere10 = function(markerName, scale) {
|
169 |
+
return new THREE.SphereGeometry(10, 60, 60);
|
170 |
+
};
|
171 |
+
|
172 |
+
var makeMarkerGeometry_Sphere3 = function(markerName, scale) {
|
173 |
+
return new THREE.SphereGeometry(3, 60, 60);
|
174 |
+
};
|
175 |
+
|
176 |
+
var makeMarkerGeometry_SphereX = function(markerName, scale) {
|
177 |
+
return new THREE.SphereGeometry(5, 60, 60);
|
178 |
+
};
|
179 |
+
|
180 |
+
var makeJointGeometry_SphereX = function(X) {
|
181 |
+
return function(jointName, scale) {
|
182 |
+
return new THREE.SphereGeometry(X, 60, 60);
|
183 |
+
};
|
184 |
+
};
|
185 |
+
|
186 |
+
|
187 |
+
var makeJointGeometry_Sphere1 = function(jointName, scale) {
|
188 |
+
return new THREE.SphereGeometry(2 / scale, 60, 60);
|
189 |
+
};
|
190 |
+
|
191 |
+
var makeJointGeometry_Sphere2 = function(jointName, scale) {
|
192 |
+
return new THREE.SphereGeometry(1 / scale, 60, 60);
|
193 |
+
};
|
194 |
+
|
195 |
+
var makeJointGeometry_Dode = function(jointName, scale) {
|
196 |
+
return new THREE.DodecahedronGeometry(1 / scale, 0);
|
197 |
+
};
|
198 |
+
|
199 |
+
var makeBoneGeometry_Cylinder1 = function(joint1Name, joint2Name, length, scale) {
|
200 |
+
return new THREE.CylinderGeometry(1.5 / scale, 0.7 / scale, length, 40);
|
201 |
+
};
|
202 |
+
|
203 |
+
var makeBoneGeometry_Cylinder2 = function(joint1Name, joint2Name, length, scale) {
|
204 |
+
// if (joint1Name.includes("LeftHip"))
|
205 |
+
// length = 400;
|
206 |
+
return new THREE.CylinderGeometry(1.5 / scale, 0.2 / scale, length, 40);
|
207 |
+
};
|
208 |
+
|
209 |
+
var makeBoneGeometry_Cylinder3 = function(joint1Name, joint2Name, length, scale) {
|
210 |
+
var c1 = new THREE.CylinderGeometry(1.5 / scale, 0.2 / scale, length / 1, 20);
|
211 |
+
var c2 = new THREE.CylinderGeometry(0.2 / scale, 1.5 / scale, length / 1, 40);
|
212 |
+
|
213 |
+
var material = new THREE.MeshPhongMaterial({
|
214 |
+
color: 0xF7FE2E
|
215 |
+
});
|
216 |
+
var mmesh = new THREE.Mesh(c1, material);
|
217 |
+
mmesh.updateMatrix();
|
218 |
+
c2.merge(mmesh.geometry, mmesh.matrix);
|
219 |
+
return c2;
|
220 |
+
};
|
221 |
+
|
222 |
+
var makeBoneGeometry_Box1 = function(joint1Name, joint2Name, length, scale) {
|
223 |
+
return new THREE.BoxGeometry(1 / scale, length, 1 / scale, 40);
|
224 |
+
};
|
225 |
+
|
226 |
+
|
227 |
+
var makeJointGeometry_Empty = function(jointName, scale) {
|
228 |
+
return new THREE.SphereGeometry(0.001, 60, 60);
|
229 |
+
};
|
230 |
+
|
231 |
+
var makeBoneGeometry_Empty = function(joint1Name, joint2Name, length, scale) {
|
232 |
+
return new THREE.CylinderGeometry(0.001, 0.001, 0.001, 40);
|
233 |
+
};
|
dataloaders/pymo/mocapplayer/libs/jquery.min.js
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*! jQuery v2.2.3 | (c) jQuery Foundation | jquery.org/license */
|
2 |
+
!function(a,b){"object"==typeof module&&"object"==typeof module.exports?module.exports=a.document?b(a,!0):function(a){if(!a.document)throw new Error("jQuery requires a window with a document");return b(a)}:b(a)}("undefined"!=typeof window?window:this,function(a,b){var c=[],d=a.document,e=c.slice,f=c.concat,g=c.push,h=c.indexOf,i={},j=i.toString,k=i.hasOwnProperty,l={},m="2.2.3",n=function(a,b){return new n.fn.init(a,b)},o=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,p=/^-ms-/,q=/-([\da-z])/gi,r=function(a,b){return b.toUpperCase()};n.fn=n.prototype={jquery:m,constructor:n,selector:"",length:0,toArray:function(){return e.call(this)},get:function(a){return null!=a?0>a?this[a+this.length]:this[a]:e.call(this)},pushStack:function(a){var b=n.merge(this.constructor(),a);return b.prevObject=this,b.context=this.context,b},each:function(a){return n.each(this,a)},map:function(a){return this.pushStack(n.map(this,function(b,c){return a.call(b,c,b)}))},slice:function(){return this.pushStack(e.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(a){var b=this.length,c=+a+(0>a?b:0);return this.pushStack(c>=0&&b>c?[this[c]]:[])},end:function(){return this.prevObject||this.constructor()},push:g,sort:c.sort,splice:c.splice},n.extend=n.fn.extend=function(){var a,b,c,d,e,f,g=arguments[0]||{},h=1,i=arguments.length,j=!1;for("boolean"==typeof g&&(j=g,g=arguments[h]||{},h++),"object"==typeof g||n.isFunction(g)||(g={}),h===i&&(g=this,h--);i>h;h++)if(null!=(a=arguments[h]))for(b in a)c=g[b],d=a[b],g!==d&&(j&&d&&(n.isPlainObject(d)||(e=n.isArray(d)))?(e?(e=!1,f=c&&n.isArray(c)?c:[]):f=c&&n.isPlainObject(c)?c:{},g[b]=n.extend(j,f,d)):void 0!==d&&(g[b]=d));return g},n.extend({expando:"jQuery"+(m+Math.random()).replace(/\D/g,""),isReady:!0,error:function(a){throw new Error(a)},noop:function(){},isFunction:function(a){return"function"===n.type(a)},isArray:Array.isArray,isWindow:function(a){return null!=a&&a===a.window},isNumeric:function(a){var b=a&&a.toString();return!n.isArray(a)&&b-parseFloat(b)+1>=0},isPlainObject:function(a){var b;if("object"!==n.type(a)||a.nodeType||n.isWindow(a))return!1;if(a.constructor&&!k.call(a,"constructor")&&!k.call(a.constructor.prototype||{},"isPrototypeOf"))return!1;for(b in a);return void 0===b||k.call(a,b)},isEmptyObject:function(a){var b;for(b in a)return!1;return!0},type:function(a){return null==a?a+"":"object"==typeof a||"function"==typeof a?i[j.call(a)]||"object":typeof a},globalEval:function(a){var b,c=eval;a=n.trim(a),a&&(1===a.indexOf("use strict")?(b=d.createElement("script"),b.text=a,d.head.appendChild(b).parentNode.removeChild(b)):c(a))},camelCase:function(a){return a.replace(p,"ms-").replace(q,r)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toLowerCase()===b.toLowerCase()},each:function(a,b){var c,d=0;if(s(a)){for(c=a.length;c>d;d++)if(b.call(a[d],d,a[d])===!1)break}else for(d in a)if(b.call(a[d],d,a[d])===!1)break;return a},trim:function(a){return null==a?"":(a+"").replace(o,"")},makeArray:function(a,b){var c=b||[];return null!=a&&(s(Object(a))?n.merge(c,"string"==typeof a?[a]:a):g.call(c,a)),c},inArray:function(a,b,c){return null==b?-1:h.call(b,a,c)},merge:function(a,b){for(var c=+b.length,d=0,e=a.length;c>d;d++)a[e++]=b[d];return a.length=e,a},grep:function(a,b,c){for(var d,e=[],f=0,g=a.length,h=!c;g>f;f++)d=!b(a[f],f),d!==h&&e.push(a[f]);return e},map:function(a,b,c){var d,e,g=0,h=[];if(s(a))for(d=a.length;d>g;g++)e=b(a[g],g,c),null!=e&&h.push(e);else for(g in a)e=b(a[g],g,c),null!=e&&h.push(e);return f.apply([],h)},guid:1,proxy:function(a,b){var c,d,f;return"string"==typeof b&&(c=a[b],b=a,a=c),n.isFunction(a)?(d=e.call(arguments,2),f=function(){return a.apply(b||this,d.concat(e.call(arguments)))},f.guid=a.guid=a.guid||n.guid++,f):void 0},now:Date.now,support:l}),"function"==typeof Symbol&&(n.fn[Symbol.iterator]=c[Symbol.iterator]),n.each("Boolean Number String Function Array Date RegExp Object Error Symbol".split(" "),function(a,b){i["[object "+b+"]"]=b.toLowerCase()});function s(a){var b=!!a&&"length"in a&&a.length,c=n.type(a);return"function"===c||n.isWindow(a)?!1:"array"===c||0===b||"number"==typeof b&&b>0&&b-1 in a}var t=function(a){var b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u="sizzle"+1*new Date,v=a.document,w=0,x=0,y=ga(),z=ga(),A=ga(),B=function(a,b){return a===b&&(l=!0),0},C=1<<31,D={}.hasOwnProperty,E=[],F=E.pop,G=E.push,H=E.push,I=E.slice,J=function(a,b){for(var c=0,d=a.length;d>c;c++)if(a[c]===b)return c;return-1},K="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",L="[\\x20\\t\\r\\n\\f]",M="(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+",N="\\["+L+"*("+M+")(?:"+L+"*([*^$|!~]?=)"+L+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+M+"))|)"+L+"*\\]",O=":("+M+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+N+")*)|.*)\\)|)",P=new RegExp(L+"+","g"),Q=new RegExp("^"+L+"+|((?:^|[^\\\\])(?:\\\\.)*)"+L+"+$","g"),R=new RegExp("^"+L+"*,"+L+"*"),S=new RegExp("^"+L+"*([>+~]|"+L+")"+L+"*"),T=new RegExp("="+L+"*([^\\]'\"]*?)"+L+"*\\]","g"),U=new RegExp(O),V=new RegExp("^"+M+"$"),W={ID:new RegExp("^#("+M+")"),CLASS:new RegExp("^\\.("+M+")"),TAG:new RegExp("^("+M+"|[*])"),ATTR:new RegExp("^"+N),PSEUDO:new RegExp("^"+O),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+L+"*(even|odd|(([+-]|)(\\d*)n|)"+L+"*(?:([+-]|)"+L+"*(\\d+)|))"+L+"*\\)|)","i"),bool:new RegExp("^(?:"+K+")$","i"),needsContext:new RegExp("^"+L+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+L+"*((?:-\\d)?\\d*)"+L+"*\\)|)(?=[^-]|$)","i")},X=/^(?:input|select|textarea|button)$/i,Y=/^h\d$/i,Z=/^[^{]+\{\s*\[native \w/,$=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,_=/[+~]/,aa=/'|\\/g,ba=new RegExp("\\\\([\\da-f]{1,6}"+L+"?|("+L+")|.)","ig"),ca=function(a,b,c){var d="0x"+b-65536;return d!==d||c?b:0>d?String.fromCharCode(d+65536):String.fromCharCode(d>>10|55296,1023&d|56320)},da=function(){m()};try{H.apply(E=I.call(v.childNodes),v.childNodes),E[v.childNodes.length].nodeType}catch(ea){H={apply:E.length?function(a,b){G.apply(a,I.call(b))}:function(a,b){var c=a.length,d=0;while(a[c++]=b[d++]);a.length=c-1}}}function fa(a,b,d,e){var f,h,j,k,l,o,r,s,w=b&&b.ownerDocument,x=b?b.nodeType:9;if(d=d||[],"string"!=typeof a||!a||1!==x&&9!==x&&11!==x)return d;if(!e&&((b?b.ownerDocument||b:v)!==n&&m(b),b=b||n,p)){if(11!==x&&(o=$.exec(a)))if(f=o[1]){if(9===x){if(!(j=b.getElementById(f)))return d;if(j.id===f)return d.push(j),d}else if(w&&(j=w.getElementById(f))&&t(b,j)&&j.id===f)return d.push(j),d}else{if(o[2])return H.apply(d,b.getElementsByTagName(a)),d;if((f=o[3])&&c.getElementsByClassName&&b.getElementsByClassName)return H.apply(d,b.getElementsByClassName(f)),d}if(c.qsa&&!A[a+" "]&&(!q||!q.test(a))){if(1!==x)w=b,s=a;else if("object"!==b.nodeName.toLowerCase()){(k=b.getAttribute("id"))?k=k.replace(aa,"\\$&"):b.setAttribute("id",k=u),r=g(a),h=r.length,l=V.test(k)?"#"+k:"[id='"+k+"']";while(h--)r[h]=l+" "+qa(r[h]);s=r.join(","),w=_.test(a)&&oa(b.parentNode)||b}if(s)try{return H.apply(d,w.querySelectorAll(s)),d}catch(y){}finally{k===u&&b.removeAttribute("id")}}}return i(a.replace(Q,"$1"),b,d,e)}function ga(){var a=[];function b(c,e){return a.push(c+" ")>d.cacheLength&&delete b[a.shift()],b[c+" "]=e}return b}function ha(a){return a[u]=!0,a}function ia(a){var b=n.createElement("div");try{return!!a(b)}catch(c){return!1}finally{b.parentNode&&b.parentNode.removeChild(b),b=null}}function ja(a,b){var c=a.split("|"),e=c.length;while(e--)d.attrHandle[c[e]]=b}function ka(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&(~b.sourceIndex||C)-(~a.sourceIndex||C);if(d)return d;if(c)while(c=c.nextSibling)if(c===b)return-1;return a?1:-1}function la(a){return function(b){var c=b.nodeName.toLowerCase();return"input"===c&&b.type===a}}function ma(a){return function(b){var c=b.nodeName.toLowerCase();return("input"===c||"button"===c)&&b.type===a}}function na(a){return ha(function(b){return b=+b,ha(function(c,d){var e,f=a([],c.length,b),g=f.length;while(g--)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function oa(a){return a&&"undefined"!=typeof a.getElementsByTagName&&a}c=fa.support={},f=fa.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return b?"HTML"!==b.nodeName:!1},m=fa.setDocument=function(a){var b,e,g=a?a.ownerDocument||a:v;return g!==n&&9===g.nodeType&&g.documentElement?(n=g,o=n.documentElement,p=!f(n),(e=n.defaultView)&&e.top!==e&&(e.addEventListener?e.addEventListener("unload",da,!1):e.attachEvent&&e.attachEvent("onunload",da)),c.attributes=ia(function(a){return a.className="i",!a.getAttribute("className")}),c.getElementsByTagName=ia(function(a){return a.appendChild(n.createComment("")),!a.getElementsByTagName("*").length}),c.getElementsByClassName=Z.test(n.getElementsByClassName),c.getById=ia(function(a){return o.appendChild(a).id=u,!n.getElementsByName||!n.getElementsByName(u).length}),c.getById?(d.find.ID=function(a,b){if("undefined"!=typeof b.getElementById&&p){var c=b.getElementById(a);return c?[c]:[]}},d.filter.ID=function(a){var b=a.replace(ba,ca);return function(a){return a.getAttribute("id")===b}}):(delete d.find.ID,d.filter.ID=function(a){var b=a.replace(ba,ca);return function(a){var c="undefined"!=typeof a.getAttributeNode&&a.getAttributeNode("id");return c&&c.value===b}}),d.find.TAG=c.getElementsByTagName?function(a,b){return"undefined"!=typeof b.getElementsByTagName?b.getElementsByTagName(a):c.qsa?b.querySelectorAll(a):void 0}:function(a,b){var c,d=[],e=0,f=b.getElementsByTagName(a);if("*"===a){while(c=f[e++])1===c.nodeType&&d.push(c);return d}return f},d.find.CLASS=c.getElementsByClassName&&function(a,b){return"undefined"!=typeof b.getElementsByClassName&&p?b.getElementsByClassName(a):void 0},r=[],q=[],(c.qsa=Z.test(n.querySelectorAll))&&(ia(function(a){o.appendChild(a).innerHTML="<a id='"+u+"'></a><select id='"+u+"-\r\\' msallowcapture=''><option selected=''></option></select>",a.querySelectorAll("[msallowcapture^='']").length&&q.push("[*^$]="+L+"*(?:''|\"\")"),a.querySelectorAll("[selected]").length||q.push("\\["+L+"*(?:value|"+K+")"),a.querySelectorAll("[id~="+u+"-]").length||q.push("~="),a.querySelectorAll(":checked").length||q.push(":checked"),a.querySelectorAll("a#"+u+"+*").length||q.push(".#.+[+~]")}),ia(function(a){var b=n.createElement("input");b.setAttribute("type","hidden"),a.appendChild(b).setAttribute("name","D"),a.querySelectorAll("[name=d]").length&&q.push("name"+L+"*[*^$|!~]?="),a.querySelectorAll(":enabled").length||q.push(":enabled",":disabled"),a.querySelectorAll("*,:x"),q.push(",.*:")})),(c.matchesSelector=Z.test(s=o.matches||o.webkitMatchesSelector||o.mozMatchesSelector||o.oMatchesSelector||o.msMatchesSelector))&&ia(function(a){c.disconnectedMatch=s.call(a,"div"),s.call(a,"[s!='']:x"),r.push("!=",O)}),q=q.length&&new RegExp(q.join("|")),r=r.length&&new RegExp(r.join("|")),b=Z.test(o.compareDocumentPosition),t=b||Z.test(o.contains)?function(a,b){var c=9===a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a===d||!(!d||1!==d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)while(b=b.parentNode)if(b===a)return!0;return!1},B=b?function(a,b){if(a===b)return l=!0,0;var d=!a.compareDocumentPosition-!b.compareDocumentPosition;return d?d:(d=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1,1&d||!c.sortDetached&&b.compareDocumentPosition(a)===d?a===n||a.ownerDocument===v&&t(v,a)?-1:b===n||b.ownerDocument===v&&t(v,b)?1:k?J(k,a)-J(k,b):0:4&d?-1:1)}:function(a,b){if(a===b)return l=!0,0;var c,d=0,e=a.parentNode,f=b.parentNode,g=[a],h=[b];if(!e||!f)return a===n?-1:b===n?1:e?-1:f?1:k?J(k,a)-J(k,b):0;if(e===f)return ka(a,b);c=a;while(c=c.parentNode)g.unshift(c);c=b;while(c=c.parentNode)h.unshift(c);while(g[d]===h[d])d++;return d?ka(g[d],h[d]):g[d]===v?-1:h[d]===v?1:0},n):n},fa.matches=function(a,b){return fa(a,null,null,b)},fa.matchesSelector=function(a,b){if((a.ownerDocument||a)!==n&&m(a),b=b.replace(T,"='$1']"),c.matchesSelector&&p&&!A[b+" "]&&(!r||!r.test(b))&&(!q||!q.test(b)))try{var d=s.call(a,b);if(d||c.disconnectedMatch||a.document&&11!==a.document.nodeType)return d}catch(e){}return fa(b,n,null,[a]).length>0},fa.contains=function(a,b){return(a.ownerDocument||a)!==n&&m(a),t(a,b)},fa.attr=function(a,b){(a.ownerDocument||a)!==n&&m(a);var e=d.attrHandle[b.toLowerCase()],f=e&&D.call(d.attrHandle,b.toLowerCase())?e(a,b,!p):void 0;return void 0!==f?f:c.attributes||!p?a.getAttribute(b):(f=a.getAttributeNode(b))&&f.specified?f.value:null},fa.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)},fa.uniqueSort=function(a){var b,d=[],e=0,f=0;if(l=!c.detectDuplicates,k=!c.sortStable&&a.slice(0),a.sort(B),l){while(b=a[f++])b===a[f]&&(e=d.push(f));while(e--)a.splice(d[e],1)}return k=null,a},e=fa.getText=function(a){var b,c="",d=0,f=a.nodeType;if(f){if(1===f||9===f||11===f){if("string"==typeof a.textContent)return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=e(a)}else if(3===f||4===f)return a.nodeValue}else while(b=a[d++])c+=e(b);return c},d=fa.selectors={cacheLength:50,createPseudo:ha,match:W,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(ba,ca),a[3]=(a[3]||a[4]||a[5]||"").replace(ba,ca),"~="===a[2]&&(a[3]=" "+a[3]+" "),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),"nth"===a[1].slice(0,3)?(a[3]||fa.error(a[0]),a[4]=+(a[4]?a[5]+(a[6]||1):2*("even"===a[3]||"odd"===a[3])),a[5]=+(a[7]+a[8]||"odd"===a[3])):a[3]&&fa.error(a[0]),a},PSEUDO:function(a){var b,c=!a[6]&&a[2];return W.CHILD.test(a[0])?null:(a[3]?a[2]=a[4]||a[5]||"":c&&U.test(c)&&(b=g(c,!0))&&(b=c.indexOf(")",c.length-b)-c.length)&&(a[0]=a[0].slice(0,b),a[2]=c.slice(0,b)),a.slice(0,3))}},filter:{TAG:function(a){var b=a.replace(ba,ca).toLowerCase();return"*"===a?function(){return!0}:function(a){return a.nodeName&&a.nodeName.toLowerCase()===b}},CLASS:function(a){var b=y[a+" "];return b||(b=new RegExp("(^|"+L+")"+a+"("+L+"|$)"))&&y(a,function(a){return b.test("string"==typeof a.className&&a.className||"undefined"!=typeof a.getAttribute&&a.getAttribute("class")||"")})},ATTR:function(a,b,c){return function(d){var e=fa.attr(d,a);return null==e?"!="===b:b?(e+="","="===b?e===c:"!="===b?e!==c:"^="===b?c&&0===e.indexOf(c):"*="===b?c&&e.indexOf(c)>-1:"$="===b?c&&e.slice(-c.length)===c:"~="===b?(" "+e.replace(P," ")+" ").indexOf(c)>-1:"|="===b?e===c||e.slice(0,c.length+1)===c+"-":!1):!0}},CHILD:function(a,b,c,d,e){var f="nth"!==a.slice(0,3),g="last"!==a.slice(-4),h="of-type"===b;return 1===d&&0===e?function(a){return!!a.parentNode}:function(b,c,i){var j,k,l,m,n,o,p=f!==g?"nextSibling":"previousSibling",q=b.parentNode,r=h&&b.nodeName.toLowerCase(),s=!i&&!h,t=!1;if(q){if(f){while(p){m=b;while(m=m[p])if(h?m.nodeName.toLowerCase()===r:1===m.nodeType)return!1;o=p="only"===a&&!o&&"nextSibling"}return!0}if(o=[g?q.firstChild:q.lastChild],g&&s){m=q,l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),j=k[a]||[],n=j[0]===w&&j[1],t=n&&j[2],m=n&&q.childNodes[n];while(m=++n&&m&&m[p]||(t=n=0)||o.pop())if(1===m.nodeType&&++t&&m===b){k[a]=[w,n,t];break}}else if(s&&(m=b,l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),j=k[a]||[],n=j[0]===w&&j[1],t=n),t===!1)while(m=++n&&m&&m[p]||(t=n=0)||o.pop())if((h?m.nodeName.toLowerCase()===r:1===m.nodeType)&&++t&&(s&&(l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),k[a]=[w,t]),m===b))break;return t-=e,t===d||t%d===0&&t/d>=0}}},PSEUDO:function(a,b){var c,e=d.pseudos[a]||d.setFilters[a.toLowerCase()]||fa.error("unsupported pseudo: "+a);return e[u]?e(b):e.length>1?(c=[a,a,"",b],d.setFilters.hasOwnProperty(a.toLowerCase())?ha(function(a,c){var d,f=e(a,b),g=f.length;while(g--)d=J(a,f[g]),a[d]=!(c[d]=f[g])}):function(a){return e(a,0,c)}):e}},pseudos:{not:ha(function(a){var b=[],c=[],d=h(a.replace(Q,"$1"));return d[u]?ha(function(a,b,c,e){var f,g=d(a,null,e,[]),h=a.length;while(h--)(f=g[h])&&(a[h]=!(b[h]=f))}):function(a,e,f){return b[0]=a,d(b,null,f,c),b[0]=null,!c.pop()}}),has:ha(function(a){return function(b){return fa(a,b).length>0}}),contains:ha(function(a){return a=a.replace(ba,ca),function(b){return(b.textContent||b.innerText||e(b)).indexOf(a)>-1}}),lang:ha(function(a){return V.test(a||"")||fa.error("unsupported lang: "+a),a=a.replace(ba,ca).toLowerCase(),function(b){var c;do if(c=p?b.lang:b.getAttribute("xml:lang")||b.getAttribute("lang"))return c=c.toLowerCase(),c===a||0===c.indexOf(a+"-");while((b=b.parentNode)&&1===b.nodeType);return!1}}),target:function(b){var c=a.location&&a.location.hash;return c&&c.slice(1)===b.id},root:function(a){return a===o},focus:function(a){return a===n.activeElement&&(!n.hasFocus||n.hasFocus())&&!!(a.type||a.href||~a.tabIndex)},enabled:function(a){return a.disabled===!1},disabled:function(a){return a.disabled===!0},checked:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&!!a.checked||"option"===b&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},empty:function(a){for(a=a.firstChild;a;a=a.nextSibling)if(a.nodeType<6)return!1;return!0},parent:function(a){return!d.pseudos.empty(a)},header:function(a){return Y.test(a.nodeName)},input:function(a){return X.test(a.nodeName)},button:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&"button"===a.type||"button"===b},text:function(a){var b;return"input"===a.nodeName.toLowerCase()&&"text"===a.type&&(null==(b=a.getAttribute("type"))||"text"===b.toLowerCase())},first:na(function(){return[0]}),last:na(function(a,b){return[b-1]}),eq:na(function(a,b,c){return[0>c?c+b:c]}),even:na(function(a,b){for(var c=0;b>c;c+=2)a.push(c);return a}),odd:na(function(a,b){for(var c=1;b>c;c+=2)a.push(c);return a}),lt:na(function(a,b,c){for(var d=0>c?c+b:c;--d>=0;)a.push(d);return a}),gt:na(function(a,b,c){for(var d=0>c?c+b:c;++d<b;)a.push(d);return a})}},d.pseudos.nth=d.pseudos.eq;for(b in{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})d.pseudos[b]=la(b);for(b in{submit:!0,reset:!0})d.pseudos[b]=ma(b);function pa(){}pa.prototype=d.filters=d.pseudos,d.setFilters=new pa,g=fa.tokenize=function(a,b){var c,e,f,g,h,i,j,k=z[a+" "];if(k)return b?0:k.slice(0);h=a,i=[],j=d.preFilter;while(h){c&&!(e=R.exec(h))||(e&&(h=h.slice(e[0].length)||h),i.push(f=[])),c=!1,(e=S.exec(h))&&(c=e.shift(),f.push({value:c,type:e[0].replace(Q," ")}),h=h.slice(c.length));for(g in d.filter)!(e=W[g].exec(h))||j[g]&&!(e=j[g](e))||(c=e.shift(),f.push({value:c,type:g,matches:e}),h=h.slice(c.length));if(!c)break}return b?h.length:h?fa.error(a):z(a,i).slice(0)};function qa(a){for(var b=0,c=a.length,d="";c>b;b++)d+=a[b].value;return d}function ra(a,b,c){var d=b.dir,e=c&&"parentNode"===d,f=x++;return b.first?function(b,c,f){while(b=b[d])if(1===b.nodeType||e)return a(b,c,f)}:function(b,c,g){var h,i,j,k=[w,f];if(g){while(b=b[d])if((1===b.nodeType||e)&&a(b,c,g))return!0}else while(b=b[d])if(1===b.nodeType||e){if(j=b[u]||(b[u]={}),i=j[b.uniqueID]||(j[b.uniqueID]={}),(h=i[d])&&h[0]===w&&h[1]===f)return k[2]=h[2];if(i[d]=k,k[2]=a(b,c,g))return!0}}}function sa(a){return a.length>1?function(b,c,d){var e=a.length;while(e--)if(!a[e](b,c,d))return!1;return!0}:a[0]}function ta(a,b,c){for(var d=0,e=b.length;e>d;d++)fa(a,b[d],c);return c}function ua(a,b,c,d,e){for(var f,g=[],h=0,i=a.length,j=null!=b;i>h;h++)(f=a[h])&&(c&&!c(f,d,e)||(g.push(f),j&&b.push(h)));return g}function va(a,b,c,d,e,f){return d&&!d[u]&&(d=va(d)),e&&!e[u]&&(e=va(e,f)),ha(function(f,g,h,i){var j,k,l,m=[],n=[],o=g.length,p=f||ta(b||"*",h.nodeType?[h]:h,[]),q=!a||!f&&b?p:ua(p,m,a,h,i),r=c?e||(f?a:o||d)?[]:g:q;if(c&&c(q,r,h,i),d){j=ua(r,n),d(j,[],h,i),k=j.length;while(k--)(l=j[k])&&(r[n[k]]=!(q[n[k]]=l))}if(f){if(e||a){if(e){j=[],k=r.length;while(k--)(l=r[k])&&j.push(q[k]=l);e(null,r=[],j,i)}k=r.length;while(k--)(l=r[k])&&(j=e?J(f,l):m[k])>-1&&(f[j]=!(g[j]=l))}}else r=ua(r===g?r.splice(o,r.length):r),e?e(null,g,r,i):H.apply(g,r)})}function wa(a){for(var b,c,e,f=a.length,g=d.relative[a[0].type],h=g||d.relative[" "],i=g?1:0,k=ra(function(a){return a===b},h,!0),l=ra(function(a){return J(b,a)>-1},h,!0),m=[function(a,c,d){var e=!g&&(d||c!==j)||((b=c).nodeType?k(a,c,d):l(a,c,d));return b=null,e}];f>i;i++)if(c=d.relative[a[i].type])m=[ra(sa(m),c)];else{if(c=d.filter[a[i].type].apply(null,a[i].matches),c[u]){for(e=++i;f>e;e++)if(d.relative[a[e].type])break;return va(i>1&&sa(m),i>1&&qa(a.slice(0,i-1).concat({value:" "===a[i-2].type?"*":""})).replace(Q,"$1"),c,e>i&&wa(a.slice(i,e)),f>e&&wa(a=a.slice(e)),f>e&&qa(a))}m.push(c)}return sa(m)}function xa(a,b){var c=b.length>0,e=a.length>0,f=function(f,g,h,i,k){var l,o,q,r=0,s="0",t=f&&[],u=[],v=j,x=f||e&&d.find.TAG("*",k),y=w+=null==v?1:Math.random()||.1,z=x.length;for(k&&(j=g===n||g||k);s!==z&&null!=(l=x[s]);s++){if(e&&l){o=0,g||l.ownerDocument===n||(m(l),h=!p);while(q=a[o++])if(q(l,g||n,h)){i.push(l);break}k&&(w=y)}c&&((l=!q&&l)&&r--,f&&t.push(l))}if(r+=s,c&&s!==r){o=0;while(q=b[o++])q(t,u,g,h);if(f){if(r>0)while(s--)t[s]||u[s]||(u[s]=F.call(i));u=ua(u)}H.apply(i,u),k&&!f&&u.length>0&&r+b.length>1&&fa.uniqueSort(i)}return k&&(w=y,j=v),t};return c?ha(f):f}return h=fa.compile=function(a,b){var c,d=[],e=[],f=A[a+" "];if(!f){b||(b=g(a)),c=b.length;while(c--)f=wa(b[c]),f[u]?d.push(f):e.push(f);f=A(a,xa(e,d)),f.selector=a}return f},i=fa.select=function(a,b,e,f){var i,j,k,l,m,n="function"==typeof a&&a,o=!f&&g(a=n.selector||a);if(e=e||[],1===o.length){if(j=o[0]=o[0].slice(0),j.length>2&&"ID"===(k=j[0]).type&&c.getById&&9===b.nodeType&&p&&d.relative[j[1].type]){if(b=(d.find.ID(k.matches[0].replace(ba,ca),b)||[])[0],!b)return e;n&&(b=b.parentNode),a=a.slice(j.shift().value.length)}i=W.needsContext.test(a)?0:j.length;while(i--){if(k=j[i],d.relative[l=k.type])break;if((m=d.find[l])&&(f=m(k.matches[0].replace(ba,ca),_.test(j[0].type)&&oa(b.parentNode)||b))){if(j.splice(i,1),a=f.length&&qa(j),!a)return H.apply(e,f),e;break}}}return(n||h(a,o))(f,b,!p,e,!b||_.test(a)&&oa(b.parentNode)||b),e},c.sortStable=u.split("").sort(B).join("")===u,c.detectDuplicates=!!l,m(),c.sortDetached=ia(function(a){return 1&a.compareDocumentPosition(n.createElement("div"))}),ia(function(a){return a.innerHTML="<a href='#'></a>","#"===a.firstChild.getAttribute("href")})||ja("type|href|height|width",function(a,b,c){return c?void 0:a.getAttribute(b,"type"===b.toLowerCase()?1:2)}),c.attributes&&ia(function(a){return a.innerHTML="<input/>",a.firstChild.setAttribute("value",""),""===a.firstChild.getAttribute("value")})||ja("value",function(a,b,c){return c||"input"!==a.nodeName.toLowerCase()?void 0:a.defaultValue}),ia(function(a){return null==a.getAttribute("disabled")})||ja(K,function(a,b,c){var d;return c?void 0:a[b]===!0?b.toLowerCase():(d=a.getAttributeNode(b))&&d.specified?d.value:null}),fa}(a);n.find=t,n.expr=t.selectors,n.expr[":"]=n.expr.pseudos,n.uniqueSort=n.unique=t.uniqueSort,n.text=t.getText,n.isXMLDoc=t.isXML,n.contains=t.contains;var u=function(a,b,c){var d=[],e=void 0!==c;while((a=a[b])&&9!==a.nodeType)if(1===a.nodeType){if(e&&n(a).is(c))break;d.push(a)}return d},v=function(a,b){for(var c=[];a;a=a.nextSibling)1===a.nodeType&&a!==b&&c.push(a);return c},w=n.expr.match.needsContext,x=/^<([\w-]+)\s*\/?>(?:<\/\1>|)$/,y=/^.[^:#\[\.,]*$/;function z(a,b,c){if(n.isFunction(b))return n.grep(a,function(a,d){return!!b.call(a,d,a)!==c});if(b.nodeType)return n.grep(a,function(a){return a===b!==c});if("string"==typeof b){if(y.test(b))return n.filter(b,a,c);b=n.filter(b,a)}return n.grep(a,function(a){return h.call(b,a)>-1!==c})}n.filter=function(a,b,c){var d=b[0];return c&&(a=":not("+a+")"),1===b.length&&1===d.nodeType?n.find.matchesSelector(d,a)?[d]:[]:n.find.matches(a,n.grep(b,function(a){return 1===a.nodeType}))},n.fn.extend({find:function(a){var b,c=this.length,d=[],e=this;if("string"!=typeof a)return this.pushStack(n(a).filter(function(){for(b=0;c>b;b++)if(n.contains(e[b],this))return!0}));for(b=0;c>b;b++)n.find(a,e[b],d);return d=this.pushStack(c>1?n.unique(d):d),d.selector=this.selector?this.selector+" "+a:a,d},filter:function(a){return this.pushStack(z(this,a||[],!1))},not:function(a){return this.pushStack(z(this,a||[],!0))},is:function(a){return!!z(this,"string"==typeof a&&w.test(a)?n(a):a||[],!1).length}});var A,B=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/,C=n.fn.init=function(a,b,c){var e,f;if(!a)return this;if(c=c||A,"string"==typeof a){if(e="<"===a[0]&&">"===a[a.length-1]&&a.length>=3?[null,a,null]:B.exec(a),!e||!e[1]&&b)return!b||b.jquery?(b||c).find(a):this.constructor(b).find(a);if(e[1]){if(b=b instanceof n?b[0]:b,n.merge(this,n.parseHTML(e[1],b&&b.nodeType?b.ownerDocument||b:d,!0)),x.test(e[1])&&n.isPlainObject(b))for(e in b)n.isFunction(this[e])?this[e](b[e]):this.attr(e,b[e]);return this}return f=d.getElementById(e[2]),f&&f.parentNode&&(this.length=1,this[0]=f),this.context=d,this.selector=a,this}return a.nodeType?(this.context=this[0]=a,this.length=1,this):n.isFunction(a)?void 0!==c.ready?c.ready(a):a(n):(void 0!==a.selector&&(this.selector=a.selector,this.context=a.context),n.makeArray(a,this))};C.prototype=n.fn,A=n(d);var D=/^(?:parents|prev(?:Until|All))/,E={children:!0,contents:!0,next:!0,prev:!0};n.fn.extend({has:function(a){var b=n(a,this),c=b.length;return this.filter(function(){for(var a=0;c>a;a++)if(n.contains(this,b[a]))return!0})},closest:function(a,b){for(var c,d=0,e=this.length,f=[],g=w.test(a)||"string"!=typeof a?n(a,b||this.context):0;e>d;d++)for(c=this[d];c&&c!==b;c=c.parentNode)if(c.nodeType<11&&(g?g.index(c)>-1:1===c.nodeType&&n.find.matchesSelector(c,a))){f.push(c);break}return this.pushStack(f.length>1?n.uniqueSort(f):f)},index:function(a){return a?"string"==typeof a?h.call(n(a),this[0]):h.call(this,a.jquery?a[0]:a):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(a,b){return this.pushStack(n.uniqueSort(n.merge(this.get(),n(a,b))))},addBack:function(a){return this.add(null==a?this.prevObject:this.prevObject.filter(a))}});function F(a,b){while((a=a[b])&&1!==a.nodeType);return a}n.each({parent:function(a){var b=a.parentNode;return b&&11!==b.nodeType?b:null},parents:function(a){return u(a,"parentNode")},parentsUntil:function(a,b,c){return u(a,"parentNode",c)},next:function(a){return F(a,"nextSibling")},prev:function(a){return F(a,"previousSibling")},nextAll:function(a){return u(a,"nextSibling")},prevAll:function(a){return u(a,"previousSibling")},nextUntil:function(a,b,c){return u(a,"nextSibling",c)},prevUntil:function(a,b,c){return u(a,"previousSibling",c)},siblings:function(a){return v((a.parentNode||{}).firstChild,a)},children:function(a){return v(a.firstChild)},contents:function(a){return a.contentDocument||n.merge([],a.childNodes)}},function(a,b){n.fn[a]=function(c,d){var e=n.map(this,b,c);return"Until"!==a.slice(-5)&&(d=c),d&&"string"==typeof d&&(e=n.filter(d,e)),this.length>1&&(E[a]||n.uniqueSort(e),D.test(a)&&e.reverse()),this.pushStack(e)}});var G=/\S+/g;function H(a){var b={};return n.each(a.match(G)||[],function(a,c){b[c]=!0}),b}n.Callbacks=function(a){a="string"==typeof a?H(a):n.extend({},a);var b,c,d,e,f=[],g=[],h=-1,i=function(){for(e=a.once,d=b=!0;g.length;h=-1){c=g.shift();while(++h<f.length)f[h].apply(c[0],c[1])===!1&&a.stopOnFalse&&(h=f.length,c=!1)}a.memory||(c=!1),b=!1,e&&(f=c?[]:"")},j={add:function(){return f&&(c&&!b&&(h=f.length-1,g.push(c)),function d(b){n.each(b,function(b,c){n.isFunction(c)?a.unique&&j.has(c)||f.push(c):c&&c.length&&"string"!==n.type(c)&&d(c)})}(arguments),c&&!b&&i()),this},remove:function(){return n.each(arguments,function(a,b){var c;while((c=n.inArray(b,f,c))>-1)f.splice(c,1),h>=c&&h--}),this},has:function(a){return a?n.inArray(a,f)>-1:f.length>0},empty:function(){return f&&(f=[]),this},disable:function(){return e=g=[],f=c="",this},disabled:function(){return!f},lock:function(){return e=g=[],c||(f=c=""),this},locked:function(){return!!e},fireWith:function(a,c){return e||(c=c||[],c=[a,c.slice?c.slice():c],g.push(c),b||i()),this},fire:function(){return j.fireWith(this,arguments),this},fired:function(){return!!d}};return j},n.extend({Deferred:function(a){var b=[["resolve","done",n.Callbacks("once memory"),"resolved"],["reject","fail",n.Callbacks("once memory"),"rejected"],["notify","progress",n.Callbacks("memory")]],c="pending",d={state:function(){return c},always:function(){return e.done(arguments).fail(arguments),this},then:function(){var a=arguments;return n.Deferred(function(c){n.each(b,function(b,f){var g=n.isFunction(a[b])&&a[b];e[f[1]](function(){var a=g&&g.apply(this,arguments);a&&n.isFunction(a.promise)?a.promise().progress(c.notify).done(c.resolve).fail(c.reject):c[f[0]+"With"](this===d?c.promise():this,g?[a]:arguments)})}),a=null}).promise()},promise:function(a){return null!=a?n.extend(a,d):d}},e={};return d.pipe=d.then,n.each(b,function(a,f){var g=f[2],h=f[3];d[f[1]]=g.add,h&&g.add(function(){c=h},b[1^a][2].disable,b[2][2].lock),e[f[0]]=function(){return e[f[0]+"With"](this===e?d:this,arguments),this},e[f[0]+"With"]=g.fireWith}),d.promise(e),a&&a.call(e,e),e},when:function(a){var b=0,c=e.call(arguments),d=c.length,f=1!==d||a&&n.isFunction(a.promise)?d:0,g=1===f?a:n.Deferred(),h=function(a,b,c){return function(d){b[a]=this,c[a]=arguments.length>1?e.call(arguments):d,c===i?g.notifyWith(b,c):--f||g.resolveWith(b,c)}},i,j,k;if(d>1)for(i=new Array(d),j=new Array(d),k=new Array(d);d>b;b++)c[b]&&n.isFunction(c[b].promise)?c[b].promise().progress(h(b,j,i)).done(h(b,k,c)).fail(g.reject):--f;return f||g.resolveWith(k,c),g.promise()}});var I;n.fn.ready=function(a){return n.ready.promise().done(a),this},n.extend({isReady:!1,readyWait:1,holdReady:function(a){a?n.readyWait++:n.ready(!0)},ready:function(a){(a===!0?--n.readyWait:n.isReady)||(n.isReady=!0,a!==!0&&--n.readyWait>0||(I.resolveWith(d,[n]),n.fn.triggerHandler&&(n(d).triggerHandler("ready"),n(d).off("ready"))))}});function J(){d.removeEventListener("DOMContentLoaded",J),a.removeEventListener("load",J),n.ready()}n.ready.promise=function(b){return I||(I=n.Deferred(),"complete"===d.readyState||"loading"!==d.readyState&&!d.documentElement.doScroll?a.setTimeout(n.ready):(d.addEventListener("DOMContentLoaded",J),a.addEventListener("load",J))),I.promise(b)},n.ready.promise();var K=function(a,b,c,d,e,f,g){var h=0,i=a.length,j=null==c;if("object"===n.type(c)){e=!0;for(h in c)K(a,b,h,c[h],!0,f,g)}else if(void 0!==d&&(e=!0,n.isFunction(d)||(g=!0),j&&(g?(b.call(a,d),b=null):(j=b,b=function(a,b,c){return j.call(n(a),c)})),b))for(;i>h;h++)b(a[h],c,g?d:d.call(a[h],h,b(a[h],c)));return e?a:j?b.call(a):i?b(a[0],c):f},L=function(a){return 1===a.nodeType||9===a.nodeType||!+a.nodeType};function M(){this.expando=n.expando+M.uid++}M.uid=1,M.prototype={register:function(a,b){var c=b||{};return a.nodeType?a[this.expando]=c:Object.defineProperty(a,this.expando,{value:c,writable:!0,configurable:!0}),a[this.expando]},cache:function(a){if(!L(a))return{};var b=a[this.expando];return b||(b={},L(a)&&(a.nodeType?a[this.expando]=b:Object.defineProperty(a,this.expando,{value:b,configurable:!0}))),b},set:function(a,b,c){var d,e=this.cache(a);if("string"==typeof b)e[b]=c;else for(d in b)e[d]=b[d];return e},get:function(a,b){return void 0===b?this.cache(a):a[this.expando]&&a[this.expando][b]},access:function(a,b,c){var d;return void 0===b||b&&"string"==typeof b&&void 0===c?(d=this.get(a,b),void 0!==d?d:this.get(a,n.camelCase(b))):(this.set(a,b,c),void 0!==c?c:b)},remove:function(a,b){var c,d,e,f=a[this.expando];if(void 0!==f){if(void 0===b)this.register(a);else{n.isArray(b)?d=b.concat(b.map(n.camelCase)):(e=n.camelCase(b),b in f?d=[b,e]:(d=e,d=d in f?[d]:d.match(G)||[])),c=d.length;while(c--)delete f[d[c]]}(void 0===b||n.isEmptyObject(f))&&(a.nodeType?a[this.expando]=void 0:delete a[this.expando])}},hasData:function(a){var b=a[this.expando];return void 0!==b&&!n.isEmptyObject(b)}};var N=new M,O=new M,P=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,Q=/[A-Z]/g;function R(a,b,c){var d;if(void 0===c&&1===a.nodeType)if(d="data-"+b.replace(Q,"-$&").toLowerCase(),c=a.getAttribute(d),"string"==typeof c){try{c="true"===c?!0:"false"===c?!1:"null"===c?null:+c+""===c?+c:P.test(c)?n.parseJSON(c):c;
|
3 |
+
}catch(e){}O.set(a,b,c)}else c=void 0;return c}n.extend({hasData:function(a){return O.hasData(a)||N.hasData(a)},data:function(a,b,c){return O.access(a,b,c)},removeData:function(a,b){O.remove(a,b)},_data:function(a,b,c){return N.access(a,b,c)},_removeData:function(a,b){N.remove(a,b)}}),n.fn.extend({data:function(a,b){var c,d,e,f=this[0],g=f&&f.attributes;if(void 0===a){if(this.length&&(e=O.get(f),1===f.nodeType&&!N.get(f,"hasDataAttrs"))){c=g.length;while(c--)g[c]&&(d=g[c].name,0===d.indexOf("data-")&&(d=n.camelCase(d.slice(5)),R(f,d,e[d])));N.set(f,"hasDataAttrs",!0)}return e}return"object"==typeof a?this.each(function(){O.set(this,a)}):K(this,function(b){var c,d;if(f&&void 0===b){if(c=O.get(f,a)||O.get(f,a.replace(Q,"-$&").toLowerCase()),void 0!==c)return c;if(d=n.camelCase(a),c=O.get(f,d),void 0!==c)return c;if(c=R(f,d,void 0),void 0!==c)return c}else d=n.camelCase(a),this.each(function(){var c=O.get(this,d);O.set(this,d,b),a.indexOf("-")>-1&&void 0!==c&&O.set(this,a,b)})},null,b,arguments.length>1,null,!0)},removeData:function(a){return this.each(function(){O.remove(this,a)})}}),n.extend({queue:function(a,b,c){var d;return a?(b=(b||"fx")+"queue",d=N.get(a,b),c&&(!d||n.isArray(c)?d=N.access(a,b,n.makeArray(c)):d.push(c)),d||[]):void 0},dequeue:function(a,b){b=b||"fx";var c=n.queue(a,b),d=c.length,e=c.shift(),f=n._queueHooks(a,b),g=function(){n.dequeue(a,b)};"inprogress"===e&&(e=c.shift(),d--),e&&("fx"===b&&c.unshift("inprogress"),delete f.stop,e.call(a,g,f)),!d&&f&&f.empty.fire()},_queueHooks:function(a,b){var c=b+"queueHooks";return N.get(a,c)||N.access(a,c,{empty:n.Callbacks("once memory").add(function(){N.remove(a,[b+"queue",c])})})}}),n.fn.extend({queue:function(a,b){var c=2;return"string"!=typeof a&&(b=a,a="fx",c--),arguments.length<c?n.queue(this[0],a):void 0===b?this:this.each(function(){var c=n.queue(this,a,b);n._queueHooks(this,a),"fx"===a&&"inprogress"!==c[0]&&n.dequeue(this,a)})},dequeue:function(a){return this.each(function(){n.dequeue(this,a)})},clearQueue:function(a){return this.queue(a||"fx",[])},promise:function(a,b){var c,d=1,e=n.Deferred(),f=this,g=this.length,h=function(){--d||e.resolveWith(f,[f])};"string"!=typeof a&&(b=a,a=void 0),a=a||"fx";while(g--)c=N.get(f[g],a+"queueHooks"),c&&c.empty&&(d++,c.empty.add(h));return h(),e.promise(b)}});var S=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,T=new RegExp("^(?:([+-])=|)("+S+")([a-z%]*)$","i"),U=["Top","Right","Bottom","Left"],V=function(a,b){return a=b||a,"none"===n.css(a,"display")||!n.contains(a.ownerDocument,a)};function W(a,b,c,d){var e,f=1,g=20,h=d?function(){return d.cur()}:function(){return n.css(a,b,"")},i=h(),j=c&&c[3]||(n.cssNumber[b]?"":"px"),k=(n.cssNumber[b]||"px"!==j&&+i)&&T.exec(n.css(a,b));if(k&&k[3]!==j){j=j||k[3],c=c||[],k=+i||1;do f=f||".5",k/=f,n.style(a,b,k+j);while(f!==(f=h()/i)&&1!==f&&--g)}return c&&(k=+k||+i||0,e=c[1]?k+(c[1]+1)*c[2]:+c[2],d&&(d.unit=j,d.start=k,d.end=e)),e}var X=/^(?:checkbox|radio)$/i,Y=/<([\w:-]+)/,Z=/^$|\/(?:java|ecma)script/i,$={option:[1,"<select multiple='multiple'>","</select>"],thead:[1,"<table>","</table>"],col:[2,"<table><colgroup>","</colgroup></table>"],tr:[2,"<table><tbody>","</tbody></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],_default:[0,"",""]};$.optgroup=$.option,$.tbody=$.tfoot=$.colgroup=$.caption=$.thead,$.th=$.td;function _(a,b){var c="undefined"!=typeof a.getElementsByTagName?a.getElementsByTagName(b||"*"):"undefined"!=typeof a.querySelectorAll?a.querySelectorAll(b||"*"):[];return void 0===b||b&&n.nodeName(a,b)?n.merge([a],c):c}function aa(a,b){for(var c=0,d=a.length;d>c;c++)N.set(a[c],"globalEval",!b||N.get(b[c],"globalEval"))}var ba=/<|&#?\w+;/;function ca(a,b,c,d,e){for(var f,g,h,i,j,k,l=b.createDocumentFragment(),m=[],o=0,p=a.length;p>o;o++)if(f=a[o],f||0===f)if("object"===n.type(f))n.merge(m,f.nodeType?[f]:f);else if(ba.test(f)){g=g||l.appendChild(b.createElement("div")),h=(Y.exec(f)||["",""])[1].toLowerCase(),i=$[h]||$._default,g.innerHTML=i[1]+n.htmlPrefilter(f)+i[2],k=i[0];while(k--)g=g.lastChild;n.merge(m,g.childNodes),g=l.firstChild,g.textContent=""}else m.push(b.createTextNode(f));l.textContent="",o=0;while(f=m[o++])if(d&&n.inArray(f,d)>-1)e&&e.push(f);else if(j=n.contains(f.ownerDocument,f),g=_(l.appendChild(f),"script"),j&&aa(g),c){k=0;while(f=g[k++])Z.test(f.type||"")&&c.push(f)}return l}!function(){var a=d.createDocumentFragment(),b=a.appendChild(d.createElement("div")),c=d.createElement("input");c.setAttribute("type","radio"),c.setAttribute("checked","checked"),c.setAttribute("name","t"),b.appendChild(c),l.checkClone=b.cloneNode(!0).cloneNode(!0).lastChild.checked,b.innerHTML="<textarea>x</textarea>",l.noCloneChecked=!!b.cloneNode(!0).lastChild.defaultValue}();var da=/^key/,ea=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,fa=/^([^.]*)(?:\.(.+)|)/;function ga(){return!0}function ha(){return!1}function ia(){try{return d.activeElement}catch(a){}}function ja(a,b,c,d,e,f){var g,h;if("object"==typeof b){"string"!=typeof c&&(d=d||c,c=void 0);for(h in b)ja(a,h,c,d,b[h],f);return a}if(null==d&&null==e?(e=c,d=c=void 0):null==e&&("string"==typeof c?(e=d,d=void 0):(e=d,d=c,c=void 0)),e===!1)e=ha;else if(!e)return a;return 1===f&&(g=e,e=function(a){return n().off(a),g.apply(this,arguments)},e.guid=g.guid||(g.guid=n.guid++)),a.each(function(){n.event.add(this,b,e,d,c)})}n.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,o,p,q,r=N.get(a);if(r){c.handler&&(f=c,c=f.handler,e=f.selector),c.guid||(c.guid=n.guid++),(i=r.events)||(i=r.events={}),(g=r.handle)||(g=r.handle=function(b){return"undefined"!=typeof n&&n.event.triggered!==b.type?n.event.dispatch.apply(a,arguments):void 0}),b=(b||"").match(G)||[""],j=b.length;while(j--)h=fa.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o&&(l=n.event.special[o]||{},o=(e?l.delegateType:l.bindType)||o,l=n.event.special[o]||{},k=n.extend({type:o,origType:q,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&n.expr.match.needsContext.test(e),namespace:p.join(".")},f),(m=i[o])||(m=i[o]=[],m.delegateCount=0,l.setup&&l.setup.call(a,d,p,g)!==!1||a.addEventListener&&a.addEventListener(o,g)),l.add&&(l.add.call(a,k),k.handler.guid||(k.handler.guid=c.guid)),e?m.splice(m.delegateCount++,0,k):m.push(k),n.event.global[o]=!0)}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,o,p,q,r=N.hasData(a)&&N.get(a);if(r&&(i=r.events)){b=(b||"").match(G)||[""],j=b.length;while(j--)if(h=fa.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o){l=n.event.special[o]||{},o=(d?l.delegateType:l.bindType)||o,m=i[o]||[],h=h[2]&&new RegExp("(^|\\.)"+p.join("\\.(?:.*\\.|)")+"(\\.|$)"),g=f=m.length;while(f--)k=m[f],!e&&q!==k.origType||c&&c.guid!==k.guid||h&&!h.test(k.namespace)||d&&d!==k.selector&&("**"!==d||!k.selector)||(m.splice(f,1),k.selector&&m.delegateCount--,l.remove&&l.remove.call(a,k));g&&!m.length&&(l.teardown&&l.teardown.call(a,p,r.handle)!==!1||n.removeEvent(a,o,r.handle),delete i[o])}else for(o in i)n.event.remove(a,o+b[j],c,d,!0);n.isEmptyObject(i)&&N.remove(a,"handle events")}},dispatch:function(a){a=n.event.fix(a);var b,c,d,f,g,h=[],i=e.call(arguments),j=(N.get(this,"events")||{})[a.type]||[],k=n.event.special[a.type]||{};if(i[0]=a,a.delegateTarget=this,!k.preDispatch||k.preDispatch.call(this,a)!==!1){h=n.event.handlers.call(this,a,j),b=0;while((f=h[b++])&&!a.isPropagationStopped()){a.currentTarget=f.elem,c=0;while((g=f.handlers[c++])&&!a.isImmediatePropagationStopped())a.rnamespace&&!a.rnamespace.test(g.namespace)||(a.handleObj=g,a.data=g.data,d=((n.event.special[g.origType]||{}).handle||g.handler).apply(f.elem,i),void 0!==d&&(a.result=d)===!1&&(a.preventDefault(),a.stopPropagation()))}return k.postDispatch&&k.postDispatch.call(this,a),a.result}},handlers:function(a,b){var c,d,e,f,g=[],h=b.delegateCount,i=a.target;if(h&&i.nodeType&&("click"!==a.type||isNaN(a.button)||a.button<1))for(;i!==this;i=i.parentNode||this)if(1===i.nodeType&&(i.disabled!==!0||"click"!==a.type)){for(d=[],c=0;h>c;c++)f=b[c],e=f.selector+" ",void 0===d[e]&&(d[e]=f.needsContext?n(e,this).index(i)>-1:n.find(e,this,null,[i]).length),d[e]&&d.push(f);d.length&&g.push({elem:i,handlers:d})}return h<b.length&&g.push({elem:this,handlers:b.slice(h)}),g},props:"altKey bubbles cancelable ctrlKey currentTarget detail eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "),fixHooks:{},keyHooks:{props:"char charCode key keyCode".split(" "),filter:function(a,b){return null==a.which&&(a.which=null!=b.charCode?b.charCode:b.keyCode),a}},mouseHooks:{props:"button buttons clientX clientY offsetX offsetY pageX pageY screenX screenY toElement".split(" "),filter:function(a,b){var c,e,f,g=b.button;return null==a.pageX&&null!=b.clientX&&(c=a.target.ownerDocument||d,e=c.documentElement,f=c.body,a.pageX=b.clientX+(e&&e.scrollLeft||f&&f.scrollLeft||0)-(e&&e.clientLeft||f&&f.clientLeft||0),a.pageY=b.clientY+(e&&e.scrollTop||f&&f.scrollTop||0)-(e&&e.clientTop||f&&f.clientTop||0)),a.which||void 0===g||(a.which=1&g?1:2&g?3:4&g?2:0),a}},fix:function(a){if(a[n.expando])return a;var b,c,e,f=a.type,g=a,h=this.fixHooks[f];h||(this.fixHooks[f]=h=ea.test(f)?this.mouseHooks:da.test(f)?this.keyHooks:{}),e=h.props?this.props.concat(h.props):this.props,a=new n.Event(g),b=e.length;while(b--)c=e[b],a[c]=g[c];return a.target||(a.target=d),3===a.target.nodeType&&(a.target=a.target.parentNode),h.filter?h.filter(a,g):a},special:{load:{noBubble:!0},focus:{trigger:function(){return this!==ia()&&this.focus?(this.focus(),!1):void 0},delegateType:"focusin"},blur:{trigger:function(){return this===ia()&&this.blur?(this.blur(),!1):void 0},delegateType:"focusout"},click:{trigger:function(){return"checkbox"===this.type&&this.click&&n.nodeName(this,"input")?(this.click(),!1):void 0},_default:function(a){return n.nodeName(a.target,"a")}},beforeunload:{postDispatch:function(a){void 0!==a.result&&a.originalEvent&&(a.originalEvent.returnValue=a.result)}}}},n.removeEvent=function(a,b,c){a.removeEventListener&&a.removeEventListener(b,c)},n.Event=function(a,b){return this instanceof n.Event?(a&&a.type?(this.originalEvent=a,this.type=a.type,this.isDefaultPrevented=a.defaultPrevented||void 0===a.defaultPrevented&&a.returnValue===!1?ga:ha):this.type=a,b&&n.extend(this,b),this.timeStamp=a&&a.timeStamp||n.now(),void(this[n.expando]=!0)):new n.Event(a,b)},n.Event.prototype={constructor:n.Event,isDefaultPrevented:ha,isPropagationStopped:ha,isImmediatePropagationStopped:ha,preventDefault:function(){var a=this.originalEvent;this.isDefaultPrevented=ga,a&&a.preventDefault()},stopPropagation:function(){var a=this.originalEvent;this.isPropagationStopped=ga,a&&a.stopPropagation()},stopImmediatePropagation:function(){var a=this.originalEvent;this.isImmediatePropagationStopped=ga,a&&a.stopImmediatePropagation(),this.stopPropagation()}},n.each({mouseenter:"mouseover",mouseleave:"mouseout",pointerenter:"pointerover",pointerleave:"pointerout"},function(a,b){n.event.special[a]={delegateType:b,bindType:b,handle:function(a){var c,d=this,e=a.relatedTarget,f=a.handleObj;return e&&(e===d||n.contains(d,e))||(a.type=f.origType,c=f.handler.apply(this,arguments),a.type=b),c}}}),n.fn.extend({on:function(a,b,c,d){return ja(this,a,b,c,d)},one:function(a,b,c,d){return ja(this,a,b,c,d,1)},off:function(a,b,c){var d,e;if(a&&a.preventDefault&&a.handleObj)return d=a.handleObj,n(a.delegateTarget).off(d.namespace?d.origType+"."+d.namespace:d.origType,d.selector,d.handler),this;if("object"==typeof a){for(e in a)this.off(e,b,a[e]);return this}return b!==!1&&"function"!=typeof b||(c=b,b=void 0),c===!1&&(c=ha),this.each(function(){n.event.remove(this,a,c,b)})}});var ka=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:-]+)[^>]*)\/>/gi,la=/<script|<style|<link/i,ma=/checked\s*(?:[^=]|=\s*.checked.)/i,na=/^true\/(.*)/,oa=/^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g;function pa(a,b){return n.nodeName(a,"table")&&n.nodeName(11!==b.nodeType?b:b.firstChild,"tr")?a.getElementsByTagName("tbody")[0]||a.appendChild(a.ownerDocument.createElement("tbody")):a}function qa(a){return a.type=(null!==a.getAttribute("type"))+"/"+a.type,a}function ra(a){var b=na.exec(a.type);return b?a.type=b[1]:a.removeAttribute("type"),a}function sa(a,b){var c,d,e,f,g,h,i,j;if(1===b.nodeType){if(N.hasData(a)&&(f=N.access(a),g=N.set(b,f),j=f.events)){delete g.handle,g.events={};for(e in j)for(c=0,d=j[e].length;d>c;c++)n.event.add(b,e,j[e][c])}O.hasData(a)&&(h=O.access(a),i=n.extend({},h),O.set(b,i))}}function ta(a,b){var c=b.nodeName.toLowerCase();"input"===c&&X.test(a.type)?b.checked=a.checked:"input"!==c&&"textarea"!==c||(b.defaultValue=a.defaultValue)}function ua(a,b,c,d){b=f.apply([],b);var e,g,h,i,j,k,m=0,o=a.length,p=o-1,q=b[0],r=n.isFunction(q);if(r||o>1&&"string"==typeof q&&!l.checkClone&&ma.test(q))return a.each(function(e){var f=a.eq(e);r&&(b[0]=q.call(this,e,f.html())),ua(f,b,c,d)});if(o&&(e=ca(b,a[0].ownerDocument,!1,a,d),g=e.firstChild,1===e.childNodes.length&&(e=g),g||d)){for(h=n.map(_(e,"script"),qa),i=h.length;o>m;m++)j=e,m!==p&&(j=n.clone(j,!0,!0),i&&n.merge(h,_(j,"script"))),c.call(a[m],j,m);if(i)for(k=h[h.length-1].ownerDocument,n.map(h,ra),m=0;i>m;m++)j=h[m],Z.test(j.type||"")&&!N.access(j,"globalEval")&&n.contains(k,j)&&(j.src?n._evalUrl&&n._evalUrl(j.src):n.globalEval(j.textContent.replace(oa,"")))}return a}function va(a,b,c){for(var d,e=b?n.filter(b,a):a,f=0;null!=(d=e[f]);f++)c||1!==d.nodeType||n.cleanData(_(d)),d.parentNode&&(c&&n.contains(d.ownerDocument,d)&&aa(_(d,"script")),d.parentNode.removeChild(d));return a}n.extend({htmlPrefilter:function(a){return a.replace(ka,"<$1></$2>")},clone:function(a,b,c){var d,e,f,g,h=a.cloneNode(!0),i=n.contains(a.ownerDocument,a);if(!(l.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||n.isXMLDoc(a)))for(g=_(h),f=_(a),d=0,e=f.length;e>d;d++)ta(f[d],g[d]);if(b)if(c)for(f=f||_(a),g=g||_(h),d=0,e=f.length;e>d;d++)sa(f[d],g[d]);else sa(a,h);return g=_(h,"script"),g.length>0&&aa(g,!i&&_(a,"script")),h},cleanData:function(a){for(var b,c,d,e=n.event.special,f=0;void 0!==(c=a[f]);f++)if(L(c)){if(b=c[N.expando]){if(b.events)for(d in b.events)e[d]?n.event.remove(c,d):n.removeEvent(c,d,b.handle);c[N.expando]=void 0}c[O.expando]&&(c[O.expando]=void 0)}}}),n.fn.extend({domManip:ua,detach:function(a){return va(this,a,!0)},remove:function(a){return va(this,a)},text:function(a){return K(this,function(a){return void 0===a?n.text(this):this.empty().each(function(){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||(this.textContent=a)})},null,a,arguments.length)},append:function(){return ua(this,arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=pa(this,a);b.appendChild(a)}})},prepend:function(){return ua(this,arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=pa(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return ua(this,arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return ua(this,arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},empty:function(){for(var a,b=0;null!=(a=this[b]);b++)1===a.nodeType&&(n.cleanData(_(a,!1)),a.textContent="");return this},clone:function(a,b){return a=null==a?!1:a,b=null==b?a:b,this.map(function(){return n.clone(this,a,b)})},html:function(a){return K(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a&&1===b.nodeType)return b.innerHTML;if("string"==typeof a&&!la.test(a)&&!$[(Y.exec(a)||["",""])[1].toLowerCase()]){a=n.htmlPrefilter(a);try{for(;d>c;c++)b=this[c]||{},1===b.nodeType&&(n.cleanData(_(b,!1)),b.innerHTML=a);b=0}catch(e){}}b&&this.empty().append(a)},null,a,arguments.length)},replaceWith:function(){var a=[];return ua(this,arguments,function(b){var c=this.parentNode;n.inArray(this,a)<0&&(n.cleanData(_(this)),c&&c.replaceChild(b,this))},a)}}),n.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){n.fn[a]=function(a){for(var c,d=[],e=n(a),f=e.length-1,h=0;f>=h;h++)c=h===f?this:this.clone(!0),n(e[h])[b](c),g.apply(d,c.get());return this.pushStack(d)}});var wa,xa={HTML:"block",BODY:"block"};function ya(a,b){var c=n(b.createElement(a)).appendTo(b.body),d=n.css(c[0],"display");return c.detach(),d}function za(a){var b=d,c=xa[a];return c||(c=ya(a,b),"none"!==c&&c||(wa=(wa||n("<iframe frameborder='0' width='0' height='0'/>")).appendTo(b.documentElement),b=wa[0].contentDocument,b.write(),b.close(),c=ya(a,b),wa.detach()),xa[a]=c),c}var Aa=/^margin/,Ba=new RegExp("^("+S+")(?!px)[a-z%]+$","i"),Ca=function(b){var c=b.ownerDocument.defaultView;return c&&c.opener||(c=a),c.getComputedStyle(b)},Da=function(a,b,c,d){var e,f,g={};for(f in b)g[f]=a.style[f],a.style[f]=b[f];e=c.apply(a,d||[]);for(f in b)a.style[f]=g[f];return e},Ea=d.documentElement;!function(){var b,c,e,f,g=d.createElement("div"),h=d.createElement("div");if(h.style){h.style.backgroundClip="content-box",h.cloneNode(!0).style.backgroundClip="",l.clearCloneStyle="content-box"===h.style.backgroundClip,g.style.cssText="border:0;width:8px;height:0;top:0;left:-9999px;padding:0;margin-top:1px;position:absolute",g.appendChild(h);function i(){h.style.cssText="-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;position:relative;display:block;margin:auto;border:1px;padding:1px;top:1%;width:50%",h.innerHTML="",Ea.appendChild(g);var d=a.getComputedStyle(h);b="1%"!==d.top,f="2px"===d.marginLeft,c="4px"===d.width,h.style.marginRight="50%",e="4px"===d.marginRight,Ea.removeChild(g)}n.extend(l,{pixelPosition:function(){return i(),b},boxSizingReliable:function(){return null==c&&i(),c},pixelMarginRight:function(){return null==c&&i(),e},reliableMarginLeft:function(){return null==c&&i(),f},reliableMarginRight:function(){var b,c=h.appendChild(d.createElement("div"));return c.style.cssText=h.style.cssText="-webkit-box-sizing:content-box;box-sizing:content-box;display:block;margin:0;border:0;padding:0",c.style.marginRight=c.style.width="0",h.style.width="1px",Ea.appendChild(g),b=!parseFloat(a.getComputedStyle(c).marginRight),Ea.removeChild(g),h.removeChild(c),b}})}}();function Fa(a,b,c){var d,e,f,g,h=a.style;return c=c||Ca(a),g=c?c.getPropertyValue(b)||c[b]:void 0,""!==g&&void 0!==g||n.contains(a.ownerDocument,a)||(g=n.style(a,b)),c&&!l.pixelMarginRight()&&Ba.test(g)&&Aa.test(b)&&(d=h.width,e=h.minWidth,f=h.maxWidth,h.minWidth=h.maxWidth=h.width=g,g=c.width,h.width=d,h.minWidth=e,h.maxWidth=f),void 0!==g?g+"":g}function Ga(a,b){return{get:function(){return a()?void delete this.get:(this.get=b).apply(this,arguments)}}}var Ha=/^(none|table(?!-c[ea]).+)/,Ia={position:"absolute",visibility:"hidden",display:"block"},Ja={letterSpacing:"0",fontWeight:"400"},Ka=["Webkit","O","Moz","ms"],La=d.createElement("div").style;function Ma(a){if(a in La)return a;var b=a[0].toUpperCase()+a.slice(1),c=Ka.length;while(c--)if(a=Ka[c]+b,a in La)return a}function Na(a,b,c){var d=T.exec(b);return d?Math.max(0,d[2]-(c||0))+(d[3]||"px"):b}function Oa(a,b,c,d,e){for(var f=c===(d?"border":"content")?4:"width"===b?1:0,g=0;4>f;f+=2)"margin"===c&&(g+=n.css(a,c+U[f],!0,e)),d?("content"===c&&(g-=n.css(a,"padding"+U[f],!0,e)),"margin"!==c&&(g-=n.css(a,"border"+U[f]+"Width",!0,e))):(g+=n.css(a,"padding"+U[f],!0,e),"padding"!==c&&(g+=n.css(a,"border"+U[f]+"Width",!0,e)));return g}function Pa(b,c,e){var f=!0,g="width"===c?b.offsetWidth:b.offsetHeight,h=Ca(b),i="border-box"===n.css(b,"boxSizing",!1,h);if(d.msFullscreenElement&&a.top!==a&&b.getClientRects().length&&(g=Math.round(100*b.getBoundingClientRect()[c])),0>=g||null==g){if(g=Fa(b,c,h),(0>g||null==g)&&(g=b.style[c]),Ba.test(g))return g;f=i&&(l.boxSizingReliable()||g===b.style[c]),g=parseFloat(g)||0}return g+Oa(b,c,e||(i?"border":"content"),f,h)+"px"}function Qa(a,b){for(var c,d,e,f=[],g=0,h=a.length;h>g;g++)d=a[g],d.style&&(f[g]=N.get(d,"olddisplay"),c=d.style.display,b?(f[g]||"none"!==c||(d.style.display=""),""===d.style.display&&V(d)&&(f[g]=N.access(d,"olddisplay",za(d.nodeName)))):(e=V(d),"none"===c&&e||N.set(d,"olddisplay",e?c:n.css(d,"display"))));for(g=0;h>g;g++)d=a[g],d.style&&(b&&"none"!==d.style.display&&""!==d.style.display||(d.style.display=b?f[g]||"":"none"));return a}n.extend({cssHooks:{opacity:{get:function(a,b){if(b){var c=Fa(a,"opacity");return""===c?"1":c}}}},cssNumber:{animationIterationCount:!0,columnCount:!0,fillOpacity:!0,flexGrow:!0,flexShrink:!0,fontWeight:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{"float":"cssFloat"},style:function(a,b,c,d){if(a&&3!==a.nodeType&&8!==a.nodeType&&a.style){var e,f,g,h=n.camelCase(b),i=a.style;return b=n.cssProps[h]||(n.cssProps[h]=Ma(h)||h),g=n.cssHooks[b]||n.cssHooks[h],void 0===c?g&&"get"in g&&void 0!==(e=g.get(a,!1,d))?e:i[b]:(f=typeof c,"string"===f&&(e=T.exec(c))&&e[1]&&(c=W(a,b,e),f="number"),null!=c&&c===c&&("number"===f&&(c+=e&&e[3]||(n.cssNumber[h]?"":"px")),l.clearCloneStyle||""!==c||0!==b.indexOf("background")||(i[b]="inherit"),g&&"set"in g&&void 0===(c=g.set(a,c,d))||(i[b]=c)),void 0)}},css:function(a,b,c,d){var e,f,g,h=n.camelCase(b);return b=n.cssProps[h]||(n.cssProps[h]=Ma(h)||h),g=n.cssHooks[b]||n.cssHooks[h],g&&"get"in g&&(e=g.get(a,!0,c)),void 0===e&&(e=Fa(a,b,d)),"normal"===e&&b in Ja&&(e=Ja[b]),""===c||c?(f=parseFloat(e),c===!0||isFinite(f)?f||0:e):e}}),n.each(["height","width"],function(a,b){n.cssHooks[b]={get:function(a,c,d){return c?Ha.test(n.css(a,"display"))&&0===a.offsetWidth?Da(a,Ia,function(){return Pa(a,b,d)}):Pa(a,b,d):void 0},set:function(a,c,d){var e,f=d&&Ca(a),g=d&&Oa(a,b,d,"border-box"===n.css(a,"boxSizing",!1,f),f);return g&&(e=T.exec(c))&&"px"!==(e[3]||"px")&&(a.style[b]=c,c=n.css(a,b)),Na(a,c,g)}}}),n.cssHooks.marginLeft=Ga(l.reliableMarginLeft,function(a,b){return b?(parseFloat(Fa(a,"marginLeft"))||a.getBoundingClientRect().left-Da(a,{marginLeft:0},function(){return a.getBoundingClientRect().left}))+"px":void 0}),n.cssHooks.marginRight=Ga(l.reliableMarginRight,function(a,b){return b?Da(a,{display:"inline-block"},Fa,[a,"marginRight"]):void 0}),n.each({margin:"",padding:"",border:"Width"},function(a,b){n.cssHooks[a+b]={expand:function(c){for(var d=0,e={},f="string"==typeof c?c.split(" "):[c];4>d;d++)e[a+U[d]+b]=f[d]||f[d-2]||f[0];return e}},Aa.test(a)||(n.cssHooks[a+b].set=Na)}),n.fn.extend({css:function(a,b){return K(this,function(a,b,c){var d,e,f={},g=0;if(n.isArray(b)){for(d=Ca(a),e=b.length;e>g;g++)f[b[g]]=n.css(a,b[g],!1,d);return f}return void 0!==c?n.style(a,b,c):n.css(a,b)},a,b,arguments.length>1)},show:function(){return Qa(this,!0)},hide:function(){return Qa(this)},toggle:function(a){return"boolean"==typeof a?a?this.show():this.hide():this.each(function(){V(this)?n(this).show():n(this).hide()})}});function Ra(a,b,c,d,e){return new Ra.prototype.init(a,b,c,d,e)}n.Tween=Ra,Ra.prototype={constructor:Ra,init:function(a,b,c,d,e,f){this.elem=a,this.prop=c,this.easing=e||n.easing._default,this.options=b,this.start=this.now=this.cur(),this.end=d,this.unit=f||(n.cssNumber[c]?"":"px")},cur:function(){var a=Ra.propHooks[this.prop];return a&&a.get?a.get(this):Ra.propHooks._default.get(this)},run:function(a){var b,c=Ra.propHooks[this.prop];return this.options.duration?this.pos=b=n.easing[this.easing](a,this.options.duration*a,0,1,this.options.duration):this.pos=b=a,this.now=(this.end-this.start)*b+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),c&&c.set?c.set(this):Ra.propHooks._default.set(this),this}},Ra.prototype.init.prototype=Ra.prototype,Ra.propHooks={_default:{get:function(a){var b;return 1!==a.elem.nodeType||null!=a.elem[a.prop]&&null==a.elem.style[a.prop]?a.elem[a.prop]:(b=n.css(a.elem,a.prop,""),b&&"auto"!==b?b:0)},set:function(a){n.fx.step[a.prop]?n.fx.step[a.prop](a):1!==a.elem.nodeType||null==a.elem.style[n.cssProps[a.prop]]&&!n.cssHooks[a.prop]?a.elem[a.prop]=a.now:n.style(a.elem,a.prop,a.now+a.unit)}}},Ra.propHooks.scrollTop=Ra.propHooks.scrollLeft={set:function(a){a.elem.nodeType&&a.elem.parentNode&&(a.elem[a.prop]=a.now)}},n.easing={linear:function(a){return a},swing:function(a){return.5-Math.cos(a*Math.PI)/2},_default:"swing"},n.fx=Ra.prototype.init,n.fx.step={};var Sa,Ta,Ua=/^(?:toggle|show|hide)$/,Va=/queueHooks$/;function Wa(){return a.setTimeout(function(){Sa=void 0}),Sa=n.now()}function Xa(a,b){var c,d=0,e={height:a};for(b=b?1:0;4>d;d+=2-b)c=U[d],e["margin"+c]=e["padding"+c]=a;return b&&(e.opacity=e.width=a),e}function Ya(a,b,c){for(var d,e=(_a.tweeners[b]||[]).concat(_a.tweeners["*"]),f=0,g=e.length;g>f;f++)if(d=e[f].call(c,b,a))return d}function Za(a,b,c){var d,e,f,g,h,i,j,k,l=this,m={},o=a.style,p=a.nodeType&&V(a),q=N.get(a,"fxshow");c.queue||(h=n._queueHooks(a,"fx"),null==h.unqueued&&(h.unqueued=0,i=h.empty.fire,h.empty.fire=function(){h.unqueued||i()}),h.unqueued++,l.always(function(){l.always(function(){h.unqueued--,n.queue(a,"fx").length||h.empty.fire()})})),1===a.nodeType&&("height"in b||"width"in b)&&(c.overflow=[o.overflow,o.overflowX,o.overflowY],j=n.css(a,"display"),k="none"===j?N.get(a,"olddisplay")||za(a.nodeName):j,"inline"===k&&"none"===n.css(a,"float")&&(o.display="inline-block")),c.overflow&&(o.overflow="hidden",l.always(function(){o.overflow=c.overflow[0],o.overflowX=c.overflow[1],o.overflowY=c.overflow[2]}));for(d in b)if(e=b[d],Ua.exec(e)){if(delete b[d],f=f||"toggle"===e,e===(p?"hide":"show")){if("show"!==e||!q||void 0===q[d])continue;p=!0}m[d]=q&&q[d]||n.style(a,d)}else j=void 0;if(n.isEmptyObject(m))"inline"===("none"===j?za(a.nodeName):j)&&(o.display=j);else{q?"hidden"in q&&(p=q.hidden):q=N.access(a,"fxshow",{}),f&&(q.hidden=!p),p?n(a).show():l.done(function(){n(a).hide()}),l.done(function(){var b;N.remove(a,"fxshow");for(b in m)n.style(a,b,m[b])});for(d in m)g=Ya(p?q[d]:0,d,l),d in q||(q[d]=g.start,p&&(g.end=g.start,g.start="width"===d||"height"===d?1:0))}}function $a(a,b){var c,d,e,f,g;for(c in a)if(d=n.camelCase(c),e=b[d],f=a[c],n.isArray(f)&&(e=f[1],f=a[c]=f[0]),c!==d&&(a[d]=f,delete a[c]),g=n.cssHooks[d],g&&"expand"in g){f=g.expand(f),delete a[d];for(c in f)c in a||(a[c]=f[c],b[c]=e)}else b[d]=e}function _a(a,b,c){var d,e,f=0,g=_a.prefilters.length,h=n.Deferred().always(function(){delete i.elem}),i=function(){if(e)return!1;for(var b=Sa||Wa(),c=Math.max(0,j.startTime+j.duration-b),d=c/j.duration||0,f=1-d,g=0,i=j.tweens.length;i>g;g++)j.tweens[g].run(f);return h.notifyWith(a,[j,f,c]),1>f&&i?c:(h.resolveWith(a,[j]),!1)},j=h.promise({elem:a,props:n.extend({},b),opts:n.extend(!0,{specialEasing:{},easing:n.easing._default},c),originalProperties:b,originalOptions:c,startTime:Sa||Wa(),duration:c.duration,tweens:[],createTween:function(b,c){var d=n.Tween(a,j.opts,b,c,j.opts.specialEasing[b]||j.opts.easing);return j.tweens.push(d),d},stop:function(b){var c=0,d=b?j.tweens.length:0;if(e)return this;for(e=!0;d>c;c++)j.tweens[c].run(1);return b?(h.notifyWith(a,[j,1,0]),h.resolveWith(a,[j,b])):h.rejectWith(a,[j,b]),this}}),k=j.props;for($a(k,j.opts.specialEasing);g>f;f++)if(d=_a.prefilters[f].call(j,a,k,j.opts))return n.isFunction(d.stop)&&(n._queueHooks(j.elem,j.opts.queue).stop=n.proxy(d.stop,d)),d;return n.map(k,Ya,j),n.isFunction(j.opts.start)&&j.opts.start.call(a,j),n.fx.timer(n.extend(i,{elem:a,anim:j,queue:j.opts.queue})),j.progress(j.opts.progress).done(j.opts.done,j.opts.complete).fail(j.opts.fail).always(j.opts.always)}n.Animation=n.extend(_a,{tweeners:{"*":[function(a,b){var c=this.createTween(a,b);return W(c.elem,a,T.exec(b),c),c}]},tweener:function(a,b){n.isFunction(a)?(b=a,a=["*"]):a=a.match(G);for(var c,d=0,e=a.length;e>d;d++)c=a[d],_a.tweeners[c]=_a.tweeners[c]||[],_a.tweeners[c].unshift(b)},prefilters:[Za],prefilter:function(a,b){b?_a.prefilters.unshift(a):_a.prefilters.push(a)}}),n.speed=function(a,b,c){var d=a&&"object"==typeof a?n.extend({},a):{complete:c||!c&&b||n.isFunction(a)&&a,duration:a,easing:c&&b||b&&!n.isFunction(b)&&b};return d.duration=n.fx.off?0:"number"==typeof d.duration?d.duration:d.duration in n.fx.speeds?n.fx.speeds[d.duration]:n.fx.speeds._default,null!=d.queue&&d.queue!==!0||(d.queue="fx"),d.old=d.complete,d.complete=function(){n.isFunction(d.old)&&d.old.call(this),d.queue&&n.dequeue(this,d.queue)},d},n.fn.extend({fadeTo:function(a,b,c,d){return this.filter(V).css("opacity",0).show().end().animate({opacity:b},a,c,d)},animate:function(a,b,c,d){var e=n.isEmptyObject(a),f=n.speed(b,c,d),g=function(){var b=_a(this,n.extend({},a),f);(e||N.get(this,"finish"))&&b.stop(!0)};return g.finish=g,e||f.queue===!1?this.each(g):this.queue(f.queue,g)},stop:function(a,b,c){var d=function(a){var b=a.stop;delete a.stop,b(c)};return"string"!=typeof a&&(c=b,b=a,a=void 0),b&&a!==!1&&this.queue(a||"fx",[]),this.each(function(){var b=!0,e=null!=a&&a+"queueHooks",f=n.timers,g=N.get(this);if(e)g[e]&&g[e].stop&&d(g[e]);else for(e in g)g[e]&&g[e].stop&&Va.test(e)&&d(g[e]);for(e=f.length;e--;)f[e].elem!==this||null!=a&&f[e].queue!==a||(f[e].anim.stop(c),b=!1,f.splice(e,1));!b&&c||n.dequeue(this,a)})},finish:function(a){return a!==!1&&(a=a||"fx"),this.each(function(){var b,c=N.get(this),d=c[a+"queue"],e=c[a+"queueHooks"],f=n.timers,g=d?d.length:0;for(c.finish=!0,n.queue(this,a,[]),e&&e.stop&&e.stop.call(this,!0),b=f.length;b--;)f[b].elem===this&&f[b].queue===a&&(f[b].anim.stop(!0),f.splice(b,1));for(b=0;g>b;b++)d[b]&&d[b].finish&&d[b].finish.call(this);delete c.finish})}}),n.each(["toggle","show","hide"],function(a,b){var c=n.fn[b];n.fn[b]=function(a,d,e){return null==a||"boolean"==typeof a?c.apply(this,arguments):this.animate(Xa(b,!0),a,d,e)}}),n.each({slideDown:Xa("show"),slideUp:Xa("hide"),slideToggle:Xa("toggle"),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(a,b){n.fn[a]=function(a,c,d){return this.animate(b,a,c,d)}}),n.timers=[],n.fx.tick=function(){var a,b=0,c=n.timers;for(Sa=n.now();b<c.length;b++)a=c[b],a()||c[b]!==a||c.splice(b--,1);c.length||n.fx.stop(),Sa=void 0},n.fx.timer=function(a){n.timers.push(a),a()?n.fx.start():n.timers.pop()},n.fx.interval=13,n.fx.start=function(){Ta||(Ta=a.setInterval(n.fx.tick,n.fx.interval))},n.fx.stop=function(){a.clearInterval(Ta),Ta=null},n.fx.speeds={slow:600,fast:200,_default:400},n.fn.delay=function(b,c){return b=n.fx?n.fx.speeds[b]||b:b,c=c||"fx",this.queue(c,function(c,d){var e=a.setTimeout(c,b);d.stop=function(){a.clearTimeout(e)}})},function(){var a=d.createElement("input"),b=d.createElement("select"),c=b.appendChild(d.createElement("option"));a.type="checkbox",l.checkOn=""!==a.value,l.optSelected=c.selected,b.disabled=!0,l.optDisabled=!c.disabled,a=d.createElement("input"),a.value="t",a.type="radio",l.radioValue="t"===a.value}();var ab,bb=n.expr.attrHandle;n.fn.extend({attr:function(a,b){return K(this,n.attr,a,b,arguments.length>1)},removeAttr:function(a){return this.each(function(){n.removeAttr(this,a)})}}),n.extend({attr:function(a,b,c){var d,e,f=a.nodeType;if(3!==f&&8!==f&&2!==f)return"undefined"==typeof a.getAttribute?n.prop(a,b,c):(1===f&&n.isXMLDoc(a)||(b=b.toLowerCase(),e=n.attrHooks[b]||(n.expr.match.bool.test(b)?ab:void 0)),void 0!==c?null===c?void n.removeAttr(a,b):e&&"set"in e&&void 0!==(d=e.set(a,c,b))?d:(a.setAttribute(b,c+""),c):e&&"get"in e&&null!==(d=e.get(a,b))?d:(d=n.find.attr(a,b),null==d?void 0:d))},attrHooks:{type:{set:function(a,b){if(!l.radioValue&&"radio"===b&&n.nodeName(a,"input")){var c=a.value;return a.setAttribute("type",b),c&&(a.value=c),b}}}},removeAttr:function(a,b){var c,d,e=0,f=b&&b.match(G);if(f&&1===a.nodeType)while(c=f[e++])d=n.propFix[c]||c,n.expr.match.bool.test(c)&&(a[d]=!1),a.removeAttribute(c)}}),ab={set:function(a,b,c){return b===!1?n.removeAttr(a,c):a.setAttribute(c,c),c}},n.each(n.expr.match.bool.source.match(/\w+/g),function(a,b){var c=bb[b]||n.find.attr;bb[b]=function(a,b,d){var e,f;return d||(f=bb[b],bb[b]=e,e=null!=c(a,b,d)?b.toLowerCase():null,bb[b]=f),e}});var cb=/^(?:input|select|textarea|button)$/i,db=/^(?:a|area)$/i;n.fn.extend({prop:function(a,b){return K(this,n.prop,a,b,arguments.length>1)},removeProp:function(a){return this.each(function(){delete this[n.propFix[a]||a]})}}),n.extend({prop:function(a,b,c){var d,e,f=a.nodeType;if(3!==f&&8!==f&&2!==f)return 1===f&&n.isXMLDoc(a)||(b=n.propFix[b]||b,
|
4 |
+
e=n.propHooks[b]),void 0!==c?e&&"set"in e&&void 0!==(d=e.set(a,c,b))?d:a[b]=c:e&&"get"in e&&null!==(d=e.get(a,b))?d:a[b]},propHooks:{tabIndex:{get:function(a){var b=n.find.attr(a,"tabindex");return b?parseInt(b,10):cb.test(a.nodeName)||db.test(a.nodeName)&&a.href?0:-1}}},propFix:{"for":"htmlFor","class":"className"}}),l.optSelected||(n.propHooks.selected={get:function(a){var b=a.parentNode;return b&&b.parentNode&&b.parentNode.selectedIndex,null},set:function(a){var b=a.parentNode;b&&(b.selectedIndex,b.parentNode&&b.parentNode.selectedIndex)}}),n.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){n.propFix[this.toLowerCase()]=this});var eb=/[\t\r\n\f]/g;function fb(a){return a.getAttribute&&a.getAttribute("class")||""}n.fn.extend({addClass:function(a){var b,c,d,e,f,g,h,i=0;if(n.isFunction(a))return this.each(function(b){n(this).addClass(a.call(this,b,fb(this)))});if("string"==typeof a&&a){b=a.match(G)||[];while(c=this[i++])if(e=fb(c),d=1===c.nodeType&&(" "+e+" ").replace(eb," ")){g=0;while(f=b[g++])d.indexOf(" "+f+" ")<0&&(d+=f+" ");h=n.trim(d),e!==h&&c.setAttribute("class",h)}}return this},removeClass:function(a){var b,c,d,e,f,g,h,i=0;if(n.isFunction(a))return this.each(function(b){n(this).removeClass(a.call(this,b,fb(this)))});if(!arguments.length)return this.attr("class","");if("string"==typeof a&&a){b=a.match(G)||[];while(c=this[i++])if(e=fb(c),d=1===c.nodeType&&(" "+e+" ").replace(eb," ")){g=0;while(f=b[g++])while(d.indexOf(" "+f+" ")>-1)d=d.replace(" "+f+" "," ");h=n.trim(d),e!==h&&c.setAttribute("class",h)}}return this},toggleClass:function(a,b){var c=typeof a;return"boolean"==typeof b&&"string"===c?b?this.addClass(a):this.removeClass(a):n.isFunction(a)?this.each(function(c){n(this).toggleClass(a.call(this,c,fb(this),b),b)}):this.each(function(){var b,d,e,f;if("string"===c){d=0,e=n(this),f=a.match(G)||[];while(b=f[d++])e.hasClass(b)?e.removeClass(b):e.addClass(b)}else void 0!==a&&"boolean"!==c||(b=fb(this),b&&N.set(this,"__className__",b),this.setAttribute&&this.setAttribute("class",b||a===!1?"":N.get(this,"__className__")||""))})},hasClass:function(a){var b,c,d=0;b=" "+a+" ";while(c=this[d++])if(1===c.nodeType&&(" "+fb(c)+" ").replace(eb," ").indexOf(b)>-1)return!0;return!1}});var gb=/\r/g,hb=/[\x20\t\r\n\f]+/g;n.fn.extend({val:function(a){var b,c,d,e=this[0];{if(arguments.length)return d=n.isFunction(a),this.each(function(c){var e;1===this.nodeType&&(e=d?a.call(this,c,n(this).val()):a,null==e?e="":"number"==typeof e?e+="":n.isArray(e)&&(e=n.map(e,function(a){return null==a?"":a+""})),b=n.valHooks[this.type]||n.valHooks[this.nodeName.toLowerCase()],b&&"set"in b&&void 0!==b.set(this,e,"value")||(this.value=e))});if(e)return b=n.valHooks[e.type]||n.valHooks[e.nodeName.toLowerCase()],b&&"get"in b&&void 0!==(c=b.get(e,"value"))?c:(c=e.value,"string"==typeof c?c.replace(gb,""):null==c?"":c)}}}),n.extend({valHooks:{option:{get:function(a){var b=n.find.attr(a,"value");return null!=b?b:n.trim(n.text(a)).replace(hb," ")}},select:{get:function(a){for(var b,c,d=a.options,e=a.selectedIndex,f="select-one"===a.type||0>e,g=f?null:[],h=f?e+1:d.length,i=0>e?h:f?e:0;h>i;i++)if(c=d[i],(c.selected||i===e)&&(l.optDisabled?!c.disabled:null===c.getAttribute("disabled"))&&(!c.parentNode.disabled||!n.nodeName(c.parentNode,"optgroup"))){if(b=n(c).val(),f)return b;g.push(b)}return g},set:function(a,b){var c,d,e=a.options,f=n.makeArray(b),g=e.length;while(g--)d=e[g],(d.selected=n.inArray(n.valHooks.option.get(d),f)>-1)&&(c=!0);return c||(a.selectedIndex=-1),f}}}}),n.each(["radio","checkbox"],function(){n.valHooks[this]={set:function(a,b){return n.isArray(b)?a.checked=n.inArray(n(a).val(),b)>-1:void 0}},l.checkOn||(n.valHooks[this].get=function(a){return null===a.getAttribute("value")?"on":a.value})});var ib=/^(?:focusinfocus|focusoutblur)$/;n.extend(n.event,{trigger:function(b,c,e,f){var g,h,i,j,l,m,o,p=[e||d],q=k.call(b,"type")?b.type:b,r=k.call(b,"namespace")?b.namespace.split("."):[];if(h=i=e=e||d,3!==e.nodeType&&8!==e.nodeType&&!ib.test(q+n.event.triggered)&&(q.indexOf(".")>-1&&(r=q.split("."),q=r.shift(),r.sort()),l=q.indexOf(":")<0&&"on"+q,b=b[n.expando]?b:new n.Event(q,"object"==typeof b&&b),b.isTrigger=f?2:3,b.namespace=r.join("."),b.rnamespace=b.namespace?new RegExp("(^|\\.)"+r.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,b.result=void 0,b.target||(b.target=e),c=null==c?[b]:n.makeArray(c,[b]),o=n.event.special[q]||{},f||!o.trigger||o.trigger.apply(e,c)!==!1)){if(!f&&!o.noBubble&&!n.isWindow(e)){for(j=o.delegateType||q,ib.test(j+q)||(h=h.parentNode);h;h=h.parentNode)p.push(h),i=h;i===(e.ownerDocument||d)&&p.push(i.defaultView||i.parentWindow||a)}g=0;while((h=p[g++])&&!b.isPropagationStopped())b.type=g>1?j:o.bindType||q,m=(N.get(h,"events")||{})[b.type]&&N.get(h,"handle"),m&&m.apply(h,c),m=l&&h[l],m&&m.apply&&L(h)&&(b.result=m.apply(h,c),b.result===!1&&b.preventDefault());return b.type=q,f||b.isDefaultPrevented()||o._default&&o._default.apply(p.pop(),c)!==!1||!L(e)||l&&n.isFunction(e[q])&&!n.isWindow(e)&&(i=e[l],i&&(e[l]=null),n.event.triggered=q,e[q](),n.event.triggered=void 0,i&&(e[l]=i)),b.result}},simulate:function(a,b,c){var d=n.extend(new n.Event,c,{type:a,isSimulated:!0});n.event.trigger(d,null,b),d.isDefaultPrevented()&&c.preventDefault()}}),n.fn.extend({trigger:function(a,b){return this.each(function(){n.event.trigger(a,b,this)})},triggerHandler:function(a,b){var c=this[0];return c?n.event.trigger(a,b,c,!0):void 0}}),n.each("blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error contextmenu".split(" "),function(a,b){n.fn[b]=function(a,c){return arguments.length>0?this.on(b,null,a,c):this.trigger(b)}}),n.fn.extend({hover:function(a,b){return this.mouseenter(a).mouseleave(b||a)}}),l.focusin="onfocusin"in a,l.focusin||n.each({focus:"focusin",blur:"focusout"},function(a,b){var c=function(a){n.event.simulate(b,a.target,n.event.fix(a))};n.event.special[b]={setup:function(){var d=this.ownerDocument||this,e=N.access(d,b);e||d.addEventListener(a,c,!0),N.access(d,b,(e||0)+1)},teardown:function(){var d=this.ownerDocument||this,e=N.access(d,b)-1;e?N.access(d,b,e):(d.removeEventListener(a,c,!0),N.remove(d,b))}}});var jb=a.location,kb=n.now(),lb=/\?/;n.parseJSON=function(a){return JSON.parse(a+"")},n.parseXML=function(b){var c;if(!b||"string"!=typeof b)return null;try{c=(new a.DOMParser).parseFromString(b,"text/xml")}catch(d){c=void 0}return c&&!c.getElementsByTagName("parsererror").length||n.error("Invalid XML: "+b),c};var mb=/#.*$/,nb=/([?&])_=[^&]*/,ob=/^(.*?):[ \t]*([^\r\n]*)$/gm,pb=/^(?:about|app|app-storage|.+-extension|file|res|widget):$/,qb=/^(?:GET|HEAD)$/,rb=/^\/\//,sb={},tb={},ub="*/".concat("*"),vb=d.createElement("a");vb.href=jb.href;function wb(a){return function(b,c){"string"!=typeof b&&(c=b,b="*");var d,e=0,f=b.toLowerCase().match(G)||[];if(n.isFunction(c))while(d=f[e++])"+"===d[0]?(d=d.slice(1)||"*",(a[d]=a[d]||[]).unshift(c)):(a[d]=a[d]||[]).push(c)}}function xb(a,b,c,d){var e={},f=a===tb;function g(h){var i;return e[h]=!0,n.each(a[h]||[],function(a,h){var j=h(b,c,d);return"string"!=typeof j||f||e[j]?f?!(i=j):void 0:(b.dataTypes.unshift(j),g(j),!1)}),i}return g(b.dataTypes[0])||!e["*"]&&g("*")}function yb(a,b){var c,d,e=n.ajaxSettings.flatOptions||{};for(c in b)void 0!==b[c]&&((e[c]?a:d||(d={}))[c]=b[c]);return d&&n.extend(!0,a,d),a}function zb(a,b,c){var d,e,f,g,h=a.contents,i=a.dataTypes;while("*"===i[0])i.shift(),void 0===d&&(d=a.mimeType||b.getResponseHeader("Content-Type"));if(d)for(e in h)if(h[e]&&h[e].test(d)){i.unshift(e);break}if(i[0]in c)f=i[0];else{for(e in c){if(!i[0]||a.converters[e+" "+i[0]]){f=e;break}g||(g=e)}f=f||g}return f?(f!==i[0]&&i.unshift(f),c[f]):void 0}function Ab(a,b,c,d){var e,f,g,h,i,j={},k=a.dataTypes.slice();if(k[1])for(g in a.converters)j[g.toLowerCase()]=a.converters[g];f=k.shift();while(f)if(a.responseFields[f]&&(c[a.responseFields[f]]=b),!i&&d&&a.dataFilter&&(b=a.dataFilter(b,a.dataType)),i=f,f=k.shift())if("*"===f)f=i;else if("*"!==i&&i!==f){if(g=j[i+" "+f]||j["* "+f],!g)for(e in j)if(h=e.split(" "),h[1]===f&&(g=j[i+" "+h[0]]||j["* "+h[0]])){g===!0?g=j[e]:j[e]!==!0&&(f=h[0],k.unshift(h[1]));break}if(g!==!0)if(g&&a["throws"])b=g(b);else try{b=g(b)}catch(l){return{state:"parsererror",error:g?l:"No conversion from "+i+" to "+f}}}return{state:"success",data:b}}n.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:jb.href,type:"GET",isLocal:pb.test(jb.protocol),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":ub,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/\bxml\b/,html:/\bhtml/,json:/\bjson\b/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":!0,"text json":n.parseJSON,"text xml":n.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(a,b){return b?yb(yb(a,n.ajaxSettings),b):yb(n.ajaxSettings,a)},ajaxPrefilter:wb(sb),ajaxTransport:wb(tb),ajax:function(b,c){"object"==typeof b&&(c=b,b=void 0),c=c||{};var e,f,g,h,i,j,k,l,m=n.ajaxSetup({},c),o=m.context||m,p=m.context&&(o.nodeType||o.jquery)?n(o):n.event,q=n.Deferred(),r=n.Callbacks("once memory"),s=m.statusCode||{},t={},u={},v=0,w="canceled",x={readyState:0,getResponseHeader:function(a){var b;if(2===v){if(!h){h={};while(b=ob.exec(g))h[b[1].toLowerCase()]=b[2]}b=h[a.toLowerCase()]}return null==b?null:b},getAllResponseHeaders:function(){return 2===v?g:null},setRequestHeader:function(a,b){var c=a.toLowerCase();return v||(a=u[c]=u[c]||a,t[a]=b),this},overrideMimeType:function(a){return v||(m.mimeType=a),this},statusCode:function(a){var b;if(a)if(2>v)for(b in a)s[b]=[s[b],a[b]];else x.always(a[x.status]);return this},abort:function(a){var b=a||w;return e&&e.abort(b),z(0,b),this}};if(q.promise(x).complete=r.add,x.success=x.done,x.error=x.fail,m.url=((b||m.url||jb.href)+"").replace(mb,"").replace(rb,jb.protocol+"//"),m.type=c.method||c.type||m.method||m.type,m.dataTypes=n.trim(m.dataType||"*").toLowerCase().match(G)||[""],null==m.crossDomain){j=d.createElement("a");try{j.href=m.url,j.href=j.href,m.crossDomain=vb.protocol+"//"+vb.host!=j.protocol+"//"+j.host}catch(y){m.crossDomain=!0}}if(m.data&&m.processData&&"string"!=typeof m.data&&(m.data=n.param(m.data,m.traditional)),xb(sb,m,c,x),2===v)return x;k=n.event&&m.global,k&&0===n.active++&&n.event.trigger("ajaxStart"),m.type=m.type.toUpperCase(),m.hasContent=!qb.test(m.type),f=m.url,m.hasContent||(m.data&&(f=m.url+=(lb.test(f)?"&":"?")+m.data,delete m.data),m.cache===!1&&(m.url=nb.test(f)?f.replace(nb,"$1_="+kb++):f+(lb.test(f)?"&":"?")+"_="+kb++)),m.ifModified&&(n.lastModified[f]&&x.setRequestHeader("If-Modified-Since",n.lastModified[f]),n.etag[f]&&x.setRequestHeader("If-None-Match",n.etag[f])),(m.data&&m.hasContent&&m.contentType!==!1||c.contentType)&&x.setRequestHeader("Content-Type",m.contentType),x.setRequestHeader("Accept",m.dataTypes[0]&&m.accepts[m.dataTypes[0]]?m.accepts[m.dataTypes[0]]+("*"!==m.dataTypes[0]?", "+ub+"; q=0.01":""):m.accepts["*"]);for(l in m.headers)x.setRequestHeader(l,m.headers[l]);if(m.beforeSend&&(m.beforeSend.call(o,x,m)===!1||2===v))return x.abort();w="abort";for(l in{success:1,error:1,complete:1})x[l](m[l]);if(e=xb(tb,m,c,x)){if(x.readyState=1,k&&p.trigger("ajaxSend",[x,m]),2===v)return x;m.async&&m.timeout>0&&(i=a.setTimeout(function(){x.abort("timeout")},m.timeout));try{v=1,e.send(t,z)}catch(y){if(!(2>v))throw y;z(-1,y)}}else z(-1,"No Transport");function z(b,c,d,h){var j,l,t,u,w,y=c;2!==v&&(v=2,i&&a.clearTimeout(i),e=void 0,g=h||"",x.readyState=b>0?4:0,j=b>=200&&300>b||304===b,d&&(u=zb(m,x,d)),u=Ab(m,u,x,j),j?(m.ifModified&&(w=x.getResponseHeader("Last-Modified"),w&&(n.lastModified[f]=w),w=x.getResponseHeader("etag"),w&&(n.etag[f]=w)),204===b||"HEAD"===m.type?y="nocontent":304===b?y="notmodified":(y=u.state,l=u.data,t=u.error,j=!t)):(t=y,!b&&y||(y="error",0>b&&(b=0))),x.status=b,x.statusText=(c||y)+"",j?q.resolveWith(o,[l,y,x]):q.rejectWith(o,[x,y,t]),x.statusCode(s),s=void 0,k&&p.trigger(j?"ajaxSuccess":"ajaxError",[x,m,j?l:t]),r.fireWith(o,[x,y]),k&&(p.trigger("ajaxComplete",[x,m]),--n.active||n.event.trigger("ajaxStop")))}return x},getJSON:function(a,b,c){return n.get(a,b,c,"json")},getScript:function(a,b){return n.get(a,void 0,b,"script")}}),n.each(["get","post"],function(a,b){n[b]=function(a,c,d,e){return n.isFunction(c)&&(e=e||d,d=c,c=void 0),n.ajax(n.extend({url:a,type:b,dataType:e,data:c,success:d},n.isPlainObject(a)&&a))}}),n._evalUrl=function(a){return n.ajax({url:a,type:"GET",dataType:"script",async:!1,global:!1,"throws":!0})},n.fn.extend({wrapAll:function(a){var b;return n.isFunction(a)?this.each(function(b){n(this).wrapAll(a.call(this,b))}):(this[0]&&(b=n(a,this[0].ownerDocument).eq(0).clone(!0),this[0].parentNode&&b.insertBefore(this[0]),b.map(function(){var a=this;while(a.firstElementChild)a=a.firstElementChild;return a}).append(this)),this)},wrapInner:function(a){return n.isFunction(a)?this.each(function(b){n(this).wrapInner(a.call(this,b))}):this.each(function(){var b=n(this),c=b.contents();c.length?c.wrapAll(a):b.append(a)})},wrap:function(a){var b=n.isFunction(a);return this.each(function(c){n(this).wrapAll(b?a.call(this,c):a)})},unwrap:function(){return this.parent().each(function(){n.nodeName(this,"body")||n(this).replaceWith(this.childNodes)}).end()}}),n.expr.filters.hidden=function(a){return!n.expr.filters.visible(a)},n.expr.filters.visible=function(a){return a.offsetWidth>0||a.offsetHeight>0||a.getClientRects().length>0};var Bb=/%20/g,Cb=/\[\]$/,Db=/\r?\n/g,Eb=/^(?:submit|button|image|reset|file)$/i,Fb=/^(?:input|select|textarea|keygen)/i;function Gb(a,b,c,d){var e;if(n.isArray(b))n.each(b,function(b,e){c||Cb.test(a)?d(a,e):Gb(a+"["+("object"==typeof e&&null!=e?b:"")+"]",e,c,d)});else if(c||"object"!==n.type(b))d(a,b);else for(e in b)Gb(a+"["+e+"]",b[e],c,d)}n.param=function(a,b){var c,d=[],e=function(a,b){b=n.isFunction(b)?b():null==b?"":b,d[d.length]=encodeURIComponent(a)+"="+encodeURIComponent(b)};if(void 0===b&&(b=n.ajaxSettings&&n.ajaxSettings.traditional),n.isArray(a)||a.jquery&&!n.isPlainObject(a))n.each(a,function(){e(this.name,this.value)});else for(c in a)Gb(c,a[c],b,e);return d.join("&").replace(Bb,"+")},n.fn.extend({serialize:function(){return n.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var a=n.prop(this,"elements");return a?n.makeArray(a):this}).filter(function(){var a=this.type;return this.name&&!n(this).is(":disabled")&&Fb.test(this.nodeName)&&!Eb.test(a)&&(this.checked||!X.test(a))}).map(function(a,b){var c=n(this).val();return null==c?null:n.isArray(c)?n.map(c,function(a){return{name:b.name,value:a.replace(Db,"\r\n")}}):{name:b.name,value:c.replace(Db,"\r\n")}}).get()}}),n.ajaxSettings.xhr=function(){try{return new a.XMLHttpRequest}catch(b){}};var Hb={0:200,1223:204},Ib=n.ajaxSettings.xhr();l.cors=!!Ib&&"withCredentials"in Ib,l.ajax=Ib=!!Ib,n.ajaxTransport(function(b){var c,d;return l.cors||Ib&&!b.crossDomain?{send:function(e,f){var g,h=b.xhr();if(h.open(b.type,b.url,b.async,b.username,b.password),b.xhrFields)for(g in b.xhrFields)h[g]=b.xhrFields[g];b.mimeType&&h.overrideMimeType&&h.overrideMimeType(b.mimeType),b.crossDomain||e["X-Requested-With"]||(e["X-Requested-With"]="XMLHttpRequest");for(g in e)h.setRequestHeader(g,e[g]);c=function(a){return function(){c&&(c=d=h.onload=h.onerror=h.onabort=h.onreadystatechange=null,"abort"===a?h.abort():"error"===a?"number"!=typeof h.status?f(0,"error"):f(h.status,h.statusText):f(Hb[h.status]||h.status,h.statusText,"text"!==(h.responseType||"text")||"string"!=typeof h.responseText?{binary:h.response}:{text:h.responseText},h.getAllResponseHeaders()))}},h.onload=c(),d=h.onerror=c("error"),void 0!==h.onabort?h.onabort=d:h.onreadystatechange=function(){4===h.readyState&&a.setTimeout(function(){c&&d()})},c=c("abort");try{h.send(b.hasContent&&b.data||null)}catch(i){if(c)throw i}},abort:function(){c&&c()}}:void 0}),n.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/\b(?:java|ecma)script\b/},converters:{"text script":function(a){return n.globalEval(a),a}}}),n.ajaxPrefilter("script",function(a){void 0===a.cache&&(a.cache=!1),a.crossDomain&&(a.type="GET")}),n.ajaxTransport("script",function(a){if(a.crossDomain){var b,c;return{send:function(e,f){b=n("<script>").prop({charset:a.scriptCharset,src:a.url}).on("load error",c=function(a){b.remove(),c=null,a&&f("error"===a.type?404:200,a.type)}),d.head.appendChild(b[0])},abort:function(){c&&c()}}}});var Jb=[],Kb=/(=)\?(?=&|$)|\?\?/;n.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var a=Jb.pop()||n.expando+"_"+kb++;return this[a]=!0,a}}),n.ajaxPrefilter("json jsonp",function(b,c,d){var e,f,g,h=b.jsonp!==!1&&(Kb.test(b.url)?"url":"string"==typeof b.data&&0===(b.contentType||"").indexOf("application/x-www-form-urlencoded")&&Kb.test(b.data)&&"data");return h||"jsonp"===b.dataTypes[0]?(e=b.jsonpCallback=n.isFunction(b.jsonpCallback)?b.jsonpCallback():b.jsonpCallback,h?b[h]=b[h].replace(Kb,"$1"+e):b.jsonp!==!1&&(b.url+=(lb.test(b.url)?"&":"?")+b.jsonp+"="+e),b.converters["script json"]=function(){return g||n.error(e+" was not called"),g[0]},b.dataTypes[0]="json",f=a[e],a[e]=function(){g=arguments},d.always(function(){void 0===f?n(a).removeProp(e):a[e]=f,b[e]&&(b.jsonpCallback=c.jsonpCallback,Jb.push(e)),g&&n.isFunction(f)&&f(g[0]),g=f=void 0}),"script"):void 0}),n.parseHTML=function(a,b,c){if(!a||"string"!=typeof a)return null;"boolean"==typeof b&&(c=b,b=!1),b=b||d;var e=x.exec(a),f=!c&&[];return e?[b.createElement(e[1])]:(e=ca([a],b,f),f&&f.length&&n(f).remove(),n.merge([],e.childNodes))};var Lb=n.fn.load;n.fn.load=function(a,b,c){if("string"!=typeof a&&Lb)return Lb.apply(this,arguments);var d,e,f,g=this,h=a.indexOf(" ");return h>-1&&(d=n.trim(a.slice(h)),a=a.slice(0,h)),n.isFunction(b)?(c=b,b=void 0):b&&"object"==typeof b&&(e="POST"),g.length>0&&n.ajax({url:a,type:e||"GET",dataType:"html",data:b}).done(function(a){f=arguments,g.html(d?n("<div>").append(n.parseHTML(a)).find(d):a)}).always(c&&function(a,b){g.each(function(){c.apply(this,f||[a.responseText,b,a])})}),this},n.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(a,b){n.fn[b]=function(a){return this.on(b,a)}}),n.expr.filters.animated=function(a){return n.grep(n.timers,function(b){return a===b.elem}).length};function Mb(a){return n.isWindow(a)?a:9===a.nodeType&&a.defaultView}n.offset={setOffset:function(a,b,c){var d,e,f,g,h,i,j,k=n.css(a,"position"),l=n(a),m={};"static"===k&&(a.style.position="relative"),h=l.offset(),f=n.css(a,"top"),i=n.css(a,"left"),j=("absolute"===k||"fixed"===k)&&(f+i).indexOf("auto")>-1,j?(d=l.position(),g=d.top,e=d.left):(g=parseFloat(f)||0,e=parseFloat(i)||0),n.isFunction(b)&&(b=b.call(a,c,n.extend({},h))),null!=b.top&&(m.top=b.top-h.top+g),null!=b.left&&(m.left=b.left-h.left+e),"using"in b?b.using.call(a,m):l.css(m)}},n.fn.extend({offset:function(a){if(arguments.length)return void 0===a?this:this.each(function(b){n.offset.setOffset(this,a,b)});var b,c,d=this[0],e={top:0,left:0},f=d&&d.ownerDocument;if(f)return b=f.documentElement,n.contains(b,d)?(e=d.getBoundingClientRect(),c=Mb(f),{top:e.top+c.pageYOffset-b.clientTop,left:e.left+c.pageXOffset-b.clientLeft}):e},position:function(){if(this[0]){var a,b,c=this[0],d={top:0,left:0};return"fixed"===n.css(c,"position")?b=c.getBoundingClientRect():(a=this.offsetParent(),b=this.offset(),n.nodeName(a[0],"html")||(d=a.offset()),d.top+=n.css(a[0],"borderTopWidth",!0),d.left+=n.css(a[0],"borderLeftWidth",!0)),{top:b.top-d.top-n.css(c,"marginTop",!0),left:b.left-d.left-n.css(c,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var a=this.offsetParent;while(a&&"static"===n.css(a,"position"))a=a.offsetParent;return a||Ea})}}),n.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(a,b){var c="pageYOffset"===b;n.fn[a]=function(d){return K(this,function(a,d,e){var f=Mb(a);return void 0===e?f?f[b]:a[d]:void(f?f.scrollTo(c?f.pageXOffset:e,c?e:f.pageYOffset):a[d]=e)},a,d,arguments.length)}}),n.each(["top","left"],function(a,b){n.cssHooks[b]=Ga(l.pixelPosition,function(a,c){return c?(c=Fa(a,b),Ba.test(c)?n(a).position()[b]+"px":c):void 0})}),n.each({Height:"height",Width:"width"},function(a,b){n.each({padding:"inner"+a,content:b,"":"outer"+a},function(c,d){n.fn[d]=function(d,e){var f=arguments.length&&(c||"boolean"!=typeof d),g=c||(d===!0||e===!0?"margin":"border");return K(this,function(b,c,d){var e;return n.isWindow(b)?b.document.documentElement["client"+a]:9===b.nodeType?(e=b.documentElement,Math.max(b.body["scroll"+a],e["scroll"+a],b.body["offset"+a],e["offset"+a],e["client"+a])):void 0===d?n.css(b,c,g):n.style(b,c,d,g)},b,f?d:void 0,f,null)}})}),n.fn.extend({bind:function(a,b,c){return this.on(a,null,b,c)},unbind:function(a,b){return this.off(a,null,b)},delegate:function(a,b,c,d){return this.on(b,a,c,d)},undelegate:function(a,b,c){return 1===arguments.length?this.off(a,"**"):this.off(b,a||"**",c)},size:function(){return this.length}}),n.fn.andSelf=n.fn.addBack,"function"==typeof define&&define.amd&&define("jquery",[],function(){return n});var Nb=a.jQuery,Ob=a.$;return n.noConflict=function(b){return a.$===n&&(a.$=Ob),b&&a.jQuery===n&&(a.jQuery=Nb),n},b||(a.jQuery=a.$=n),n});
|
dataloaders/pymo/mocapplayer/libs/math.min.js
ADDED
The diff for this file is too large to render.
See raw diff
|
|
dataloaders/pymo/mocapplayer/libs/mocapjs.js
ADDED
@@ -0,0 +1,1312 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*!
|
2 |
+
* The MIT License (MIT)
|
3 |
+
*
|
4 |
+
* Copyright (c) 2016 Omid Alemi
|
5 |
+
*
|
6 |
+
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
7 |
+
* of this software and associated documentation files (the "Software"), to deal
|
8 |
+
* in the Software without restriction, including without limitation the rights
|
9 |
+
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
10 |
+
* copies of the Software, and to permit persons to whom the Software is
|
11 |
+
* furnished to do so, subject to the following conditions:
|
12 |
+
*
|
13 |
+
* The above copyright notice and this permission notice shall be included in all
|
14 |
+
* copies or substantial portions of the Software.
|
15 |
+
*
|
16 |
+
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
17 |
+
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
18 |
+
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
19 |
+
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
20 |
+
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
21 |
+
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
22 |
+
* SOFTWARE.
|
23 |
+
*
|
24 |
+
*/
|
25 |
+
/******/ (function(modules) { // webpackBootstrap
|
26 |
+
/******/ // The module cache
|
27 |
+
/******/ var installedModules = {};
|
28 |
+
/******/
|
29 |
+
/******/ // The require function
|
30 |
+
/******/ function __webpack_require__(moduleId) {
|
31 |
+
/******/
|
32 |
+
/******/ // Check if module is in cache
|
33 |
+
/******/ if(installedModules[moduleId])
|
34 |
+
/******/ return installedModules[moduleId].exports;
|
35 |
+
/******/
|
36 |
+
/******/ // Create a new module (and put it into the cache)
|
37 |
+
/******/ var module = installedModules[moduleId] = {
|
38 |
+
/******/ exports: {},
|
39 |
+
/******/ id: moduleId,
|
40 |
+
/******/ loaded: false
|
41 |
+
/******/ };
|
42 |
+
/******/
|
43 |
+
/******/ // Execute the module function
|
44 |
+
/******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__);
|
45 |
+
/******/
|
46 |
+
/******/ // Flag the module as loaded
|
47 |
+
/******/ module.loaded = true;
|
48 |
+
/******/
|
49 |
+
/******/ // Return the exports of the module
|
50 |
+
/******/ return module.exports;
|
51 |
+
/******/ }
|
52 |
+
/******/
|
53 |
+
/******/
|
54 |
+
/******/ // expose the modules object (__webpack_modules__)
|
55 |
+
/******/ __webpack_require__.m = modules;
|
56 |
+
/******/
|
57 |
+
/******/ // expose the module cache
|
58 |
+
/******/ __webpack_require__.c = installedModules;
|
59 |
+
/******/
|
60 |
+
/******/ // __webpack_public_path__
|
61 |
+
/******/ __webpack_require__.p = "";
|
62 |
+
/******/
|
63 |
+
/******/ // Load entry module and return exports
|
64 |
+
/******/ return __webpack_require__(0);
|
65 |
+
/******/ })
|
66 |
+
/************************************************************************/
|
67 |
+
/******/ ([
|
68 |
+
/* 0 */
|
69 |
+
/***/ function(module, exports, __webpack_require__) {
|
70 |
+
|
71 |
+
BVHCharacter = __webpack_require__(1);
|
72 |
+
C3DCharacter = __webpack_require__(5);
|
73 |
+
MocapParsers = __webpack_require__(2);
|
74 |
+
|
75 |
+
/***/ },
|
76 |
+
/* 1 */
|
77 |
+
/***/ function(module, exports, __webpack_require__) {
|
78 |
+
|
79 |
+
var parsers = __webpack_require__(2);
|
80 |
+
|
81 |
+
var BVHCharacter = BVHCharacter || {};
|
82 |
+
|
83 |
+
|
84 |
+
BVHCharacter = function(n, jm, bm, jg, bg) {
|
85 |
+
this.name = n;
|
86 |
+
|
87 |
+
this.jointMaterial = jm;
|
88 |
+
this.boneMaterial = bm;
|
89 |
+
this.makeJointGeometryFCN = jg;
|
90 |
+
this.makeBoneGeometryFCN = bg;
|
91 |
+
|
92 |
+
this.bvh = [];
|
93 |
+
this.skeleton = new THREE.Group();
|
94 |
+
|
95 |
+
this.skelScale = 1;
|
96 |
+
this.jointMeshes = [];
|
97 |
+
this.boneMeshes = [];
|
98 |
+
this.rootMeshes = [];
|
99 |
+
|
100 |
+
this.originPosition = new THREE.Vector3(0, 0, 0);
|
101 |
+
|
102 |
+
this.ready = false;
|
103 |
+
this.frameTime = 1 / 30;
|
104 |
+
this.frameCount = 0;
|
105 |
+
this.animIndex = 0;
|
106 |
+
this.animStartTimeRef = 0;
|
107 |
+
this.animOffset = 0;
|
108 |
+
this.playing = true;
|
109 |
+
|
110 |
+
this.debug = true;
|
111 |
+
this.useWorker = true;
|
112 |
+
|
113 |
+
this.webSocket = [];
|
114 |
+
this.streamProtocol = "BVHStream";
|
115 |
+
this.keepStreamedFrames = true;
|
116 |
+
this.isStreaming = false;
|
117 |
+
|
118 |
+
var self = this;
|
119 |
+
|
120 |
+
//
|
121 |
+
|
122 |
+
this.log = function(m) {
|
123 |
+
if (self.debug)
|
124 |
+
console.log(self.name + ": " + m.toString());
|
125 |
+
};
|
126 |
+
|
127 |
+
this.loadFromURL = function(url, callback) {
|
128 |
+
self.log("Loading the mocap file ...");
|
129 |
+
//Pace.start();
|
130 |
+
reader = new parsers.bvhParser(this.name + "READER");
|
131 |
+
this.url = url;
|
132 |
+
reader.load(url, self.createSkel, self.fillFrames);
|
133 |
+
|
134 |
+
this.callb = callback;
|
135 |
+
};
|
136 |
+
|
137 |
+
this.fillFrames = function() {
|
138 |
+
// self.log("Ready!");
|
139 |
+
self.ready = true;
|
140 |
+
self.playing = true;
|
141 |
+
|
142 |
+
if (self.callb)
|
143 |
+
self.callb();
|
144 |
+
}
|
145 |
+
|
146 |
+
this.createSkel = function(data) {
|
147 |
+
self.bvh = data;
|
148 |
+
self.frameCount = data.frameCount;
|
149 |
+
self.frameTime = data.frameTime;
|
150 |
+
|
151 |
+
self.log("Mocap file loaded.");
|
152 |
+
|
153 |
+
self.log("Creating the WebGL Joints.");
|
154 |
+
self.buildSkelJoints(self.bvh.getSkeleton(), 0);
|
155 |
+
|
156 |
+
self.log("Creating the WebGL Bones.");
|
157 |
+
self.buildSkelBones(self.jointMeshes[0]);
|
158 |
+
|
159 |
+
self.skeleton.add(self.jointMeshes[0]);
|
160 |
+
self.setSkeletonScale(self.skelScale);
|
161 |
+
self.setSkelUp();
|
162 |
+
};
|
163 |
+
|
164 |
+
|
165 |
+
// Beginning of the Stream Code
|
166 |
+
this.onHeaderReceived = function(data) {
|
167 |
+
self.log("Loading the mocap header (skeleton) from the stream...");
|
168 |
+
headerReader = new parsers.bvhStreamParser();
|
169 |
+
headerReader.readHeader(data, self.createSkel);
|
170 |
+
|
171 |
+
if (self.callb)
|
172 |
+
self.callb();
|
173 |
+
|
174 |
+
Pace.stop();
|
175 |
+
}
|
176 |
+
|
177 |
+
this.onDataChunckReceived = function(rawFrames) {
|
178 |
+
var aa = [];
|
179 |
+
|
180 |
+
for (f = 1; f < rawFrames.length; f++) {
|
181 |
+
var parts = rawFrames[f].trim().split(" ");
|
182 |
+
for (var j = 0; j < parts.length; j++)
|
183 |
+
parts[j] = +parts[j];
|
184 |
+
aa.push(parts);
|
185 |
+
}
|
186 |
+
diff = self.bvh.fillFrameArray(aa);
|
187 |
+
self.frameCount = self.bvh.frameArray.length;
|
188 |
+
|
189 |
+
|
190 |
+
if (!self.playing) {
|
191 |
+
self.animStartTimeRef = Date.now();
|
192 |
+
// self.animOffset -= rawFrames.length;
|
193 |
+
}
|
194 |
+
/*
|
195 |
+
// else
|
196 |
+
// self.animOffset = self.animIndex;
|
197 |
+
if (diff > 0)
|
198 |
+
self.animOffset -= rawFrames.length + 1;
|
199 |
+
// self.animIndex -= rawFrames.length; //math.max(0,math.min(rawFrames.length, self.bvh.bufferSize));
|
200 |
+
*/
|
201 |
+
self.fillFrames();
|
202 |
+
Pace.stop();
|
203 |
+
}
|
204 |
+
|
205 |
+
this.loadFromStream = function(url, callback) {
|
206 |
+
self.log("Connecting to the stream server...");
|
207 |
+
self.isStreaming = true;
|
208 |
+
this.callb = callback;
|
209 |
+
self.webSocket = new WebSocket(url);
|
210 |
+
|
211 |
+
self.webSocket.onerror = function(event) {
|
212 |
+
self.log("Error connecting to the stream server " + event.origin);
|
213 |
+
};
|
214 |
+
|
215 |
+
self.webSocket.onopen = function(event) {
|
216 |
+
self.log("Connected to the stream server " + event.origin);
|
217 |
+
Pace.stop();
|
218 |
+
};
|
219 |
+
|
220 |
+
self.webSocket.onmessage = function(event) {
|
221 |
+
// I'm not doing much of a type and content checking here. Let's just trust the sender for now!
|
222 |
+
// Protocol for header:
|
223 |
+
// $HEADER$
|
224 |
+
// BVH...
|
225 |
+
// Protocl for data chunk with id#:
|
226 |
+
// $FRAMES$id#$
|
227 |
+
|
228 |
+
var messageLines = event.data.split('\n');
|
229 |
+
|
230 |
+
// self.log("Received somthing!");
|
231 |
+
// self.log("The first line is : " + messageLines[0]);
|
232 |
+
|
233 |
+
if (messageLines.length < 1)
|
234 |
+
return;
|
235 |
+
|
236 |
+
if (messageLines[0] == "$HEADER$") {
|
237 |
+
self.onHeaderReceived(event.data);
|
238 |
+
|
239 |
+
} else if (messageLines[0].startsWith("$FRAMES$")) {
|
240 |
+
chunckID = parseInt(messageLines[0].split("$")[2]);
|
241 |
+
self.onDataChunckReceived(messageLines, chunckID);
|
242 |
+
}
|
243 |
+
};
|
244 |
+
|
245 |
+
};
|
246 |
+
|
247 |
+
this.requestFrames = function(i) {
|
248 |
+
self.webSocket.send("$GETFRAMES" + i + "$");
|
249 |
+
}
|
250 |
+
|
251 |
+
// End of the Stream Code
|
252 |
+
|
253 |
+
this.setOriginPosition = function(x, y, z) {
|
254 |
+
self.originPosition.set(x, y, z);
|
255 |
+
};
|
256 |
+
|
257 |
+
this.setSkeletonScale = function(s) {
|
258 |
+
self.rootMeshes.forEach(function(c) {
|
259 |
+
c.scale.set(s, s, s);
|
260 |
+
});
|
261 |
+
self.jointMeshes[0].scale.set(s, s, s);
|
262 |
+
self.jointMeshes[0].position.multiplyScalar(s);
|
263 |
+
};
|
264 |
+
|
265 |
+
this.buildSkelJoints = function(joint, parent) {
|
266 |
+
var jointMesh = new THREE.Mesh(self.makeJointGeometryFCN(joint.name, self.skelScale), self.jointMaterial);
|
267 |
+
jointMesh.bvhIndex = joint.jointIndex;
|
268 |
+
jointMesh.offsetVec = new THREE.Vector3(joint.offset[0], joint.offset[1], joint.offset[2]);
|
269 |
+
jointMesh.name = joint.name;
|
270 |
+
jointMesh.jointparent = parent;
|
271 |
+
var a, b, c;
|
272 |
+
if (!joint.isEndSite()) {
|
273 |
+
a = joint.channelNames[joint.channelNames.length - 3][0];
|
274 |
+
b = joint.channelNames[joint.channelNames.length - 2][0];
|
275 |
+
c = joint.channelNames[joint.channelNames.length - 1][0];
|
276 |
+
}
|
277 |
+
jointMesh.rotOrder = a + b + c;
|
278 |
+
self.jointMeshes.push(jointMesh);
|
279 |
+
|
280 |
+
jointMesh.position.set(jointMesh.offsetVec.x, jointMesh.offsetVec.y, jointMesh.offsetVec.z);
|
281 |
+
|
282 |
+
// var axisHelper = new THREE.AxisHelper( 10 / self.skelScale );
|
283 |
+
// jointMesh.add( axisHelper );
|
284 |
+
|
285 |
+
|
286 |
+
joint.children.forEach(function(child) {
|
287 |
+
jointMesh.add(self.buildSkelJoints(child, 1));
|
288 |
+
});
|
289 |
+
|
290 |
+
return jointMesh;
|
291 |
+
};
|
292 |
+
|
293 |
+
this.buildSkelBones = function(rootJointMesh) {
|
294 |
+
rootJointMesh.traverse(function(childJointMesh) {
|
295 |
+
if (childJointMesh.parent !== null)
|
296 |
+
{
|
297 |
+
if (typeof childJointMesh.bvhIndex === "undefined")
|
298 |
+
return;
|
299 |
+
// move origin (.translate)
|
300 |
+
// rotate
|
301 |
+
// translate (offset + position)
|
302 |
+
h = math.abs(childJointMesh.offsetVec.length());
|
303 |
+
var bgeometry = self.makeBoneGeometryFCN(childJointMesh.parent.name, childJointMesh.name, h, self.skelScale);
|
304 |
+
|
305 |
+
//BEGIN - Universal
|
306 |
+
if (childJointMesh.offsetVec.y !== 0)
|
307 |
+
// bgeometry.translate(0, Math.sign(childJointMesh.offsetVec.y) * h / 2, 0);
|
308 |
+
bgeometry.translate(0, -h/2, 0);
|
309 |
+
else
|
310 |
+
bgeometry.translate(0, -h / 2, 0);
|
311 |
+
|
312 |
+
|
313 |
+
dx = Math.atan2(childJointMesh.offsetVec.z,childJointMesh.offsetVec.y);
|
314 |
+
dy = Math.atan2(childJointMesh.offsetVec.x,childJointMesh.offsetVec.z);
|
315 |
+
dz = Math.atan2(childJointMesh.offsetVec.x,childJointMesh.offsetVec.y);
|
316 |
+
|
317 |
+
|
318 |
+
osx = math.sign(childJointMesh.offsetVec.x) === 0 ? 0: math.sign(childJointMesh.offsetVec.x);
|
319 |
+
osy = math.sign(childJointMesh.offsetVec.y) === 0 ? 0: math.sign(childJointMesh.offsetVec.y);
|
320 |
+
osz = math.sign(childJointMesh.offsetVec.z) === 0 ? 0: math.sign(childJointMesh.offsetVec.z);
|
321 |
+
|
322 |
+
osxy = math.sign(childJointMesh.offsetVec.x) === 0 ? 0: math.sign(childJointMesh.offsetVec.y);
|
323 |
+
osyx = math.sign(childJointMesh.offsetVec.y) === 0 ? 0: math.sign(childJointMesh.offsetVec.x);
|
324 |
+
osyz = math.sign(childJointMesh.offsetVec.y) === 0 ? 0: math.sign(childJointMesh.offsetVec.z);
|
325 |
+
oszy = math.sign(childJointMesh.offsetVec.z) === 0 ? 0: math.sign(childJointMesh.offsetVec.y);
|
326 |
+
|
327 |
+
|
328 |
+
if (osz <0)
|
329 |
+
bgeometry.rotateZ(1*(math.pi-dz));
|
330 |
+
else if (osz === 0)
|
331 |
+
bgeometry.rotateZ(1*(math.pi-dz));
|
332 |
+
// console.log();
|
333 |
+
else if (osz > 0)
|
334 |
+
bgeometry.rotateZ(1*(2*math.pi-dz));
|
335 |
+
|
336 |
+
|
337 |
+
if (oszy >0)
|
338 |
+
bgeometry.rotateX(-1 *(2*math.pi-dx));
|
339 |
+
else if (childJointMesh.offsetVec.z === 0)
|
340 |
+
// bgeometry.rotateX(-1*(math.pi-dx));
|
341 |
+
console.log();
|
342 |
+
else if (oszy < 0)
|
343 |
+
bgeometry.rotateX(-1*(2*math.pi-dx));
|
344 |
+
|
345 |
+
// bgeometry.rotateY(math.pi-dy);
|
346 |
+
|
347 |
+
//END - Universal
|
348 |
+
|
349 |
+
var boneMesh = new THREE.Mesh(bgeometry, self.boneMaterial);
|
350 |
+
|
351 |
+
boneMesh.joint = childJointMesh.parent;
|
352 |
+
boneMesh.name = childJointMesh.parent.name + " > " + childJointMesh.name;
|
353 |
+
|
354 |
+
childJointMesh.parent.add(boneMesh);
|
355 |
+
self.boneMeshes.push(boneMesh);
|
356 |
+
}
|
357 |
+
});
|
358 |
+
};
|
359 |
+
|
360 |
+
this.animFrame = function(frame) {
|
361 |
+
var torad = Math.PI / 180;
|
362 |
+
|
363 |
+
if (frame >= self.frameCount) {
|
364 |
+
self.playing = false;
|
365 |
+
return;
|
366 |
+
}
|
367 |
+
|
368 |
+
|
369 |
+
this.jointMeshes[0].traverse(function(joint) {
|
370 |
+
|
371 |
+
if (typeof joint.bvhIndex === "undefined") {
|
372 |
+
return;
|
373 |
+
}
|
374 |
+
|
375 |
+
|
376 |
+
var bj = self.bvh.jointArray[joint.bvhIndex];
|
377 |
+
var offsetVec = joint.offsetVec;
|
378 |
+
|
379 |
+
var thisEuler = [];
|
380 |
+
|
381 |
+
|
382 |
+
thisEuler = new THREE.Euler(
|
383 |
+
(bj.channels[frame][bj.rotationIndex.x] * torad),
|
384 |
+
(bj.channels[frame][bj.rotationIndex.y] * torad),
|
385 |
+
(bj.channels[frame][bj.rotationIndex.z] * torad), joint.rotOrder);
|
386 |
+
|
387 |
+
|
388 |
+
joint.localRotMat = new THREE.Matrix4();
|
389 |
+
joint.localRotMat.makeRotationFromEuler(thisEuler);
|
390 |
+
joint.rotation.setFromRotationMatrix(joint.localRotMat);
|
391 |
+
|
392 |
+
if (joint.jointparent !== 0) {
|
393 |
+
// joint.position.set(offsetVec.x, offsetVec.y, offsetVec.z);
|
394 |
+
} else { // root
|
395 |
+
joint.position.set(
|
396 |
+
bj.channels[frame][bj.positionIndex.x] * self.skelScale + self.originPosition.x,
|
397 |
+
bj.channels[frame][bj.positionIndex.y] * self.skelScale + self.originPosition.y,
|
398 |
+
bj.channels[frame][bj.positionIndex.z] * self.skelScale + self.originPosition.z);
|
399 |
+
}
|
400 |
+
});
|
401 |
+
|
402 |
+
if (self.isStreaming) {
|
403 |
+
self.bvh.consumeFrames(frame);
|
404 |
+
self.frameCount = self.bvh.frameArray.length;
|
405 |
+
// console.log(self.frameCount);
|
406 |
+
if (self.frameCount <= 0)
|
407 |
+
self.playing = false;
|
408 |
+
|
409 |
+
self.animOffset = 0; // self.animOffset - frame;
|
410 |
+
self.animStartTimeRef = Date.now();
|
411 |
+
}
|
412 |
+
};
|
413 |
+
|
414 |
+
this.setSkelUp = function() {
|
415 |
+
this.jointMeshes[0].traverse(function(joint) {
|
416 |
+
if (typeof joint.bvhIndex === "undefined")
|
417 |
+
return;
|
418 |
+
|
419 |
+
var bj = self.bvh.jointArray[joint.bvhIndex];
|
420 |
+
|
421 |
+
var offsetVec = joint.offsetVec;
|
422 |
+
var torad = Math.PI / 180;
|
423 |
+
var thisEuler = [];
|
424 |
+
|
425 |
+
thisEuler = new THREE.Euler(0, 0, 0, joint.rotOrder);
|
426 |
+
|
427 |
+
joint.localRotMat = new THREE.Matrix4();
|
428 |
+
joint.localRotMat.makeRotationFromEuler(thisEuler);
|
429 |
+
joint.rotation.setFromRotationMatrix(joint.localRotMat);
|
430 |
+
|
431 |
+
if (joint.jointparent !== 0) {
|
432 |
+
// joint.position.set(offsetVec.x, offsetVec.y, offsetVec.z);
|
433 |
+
} else { // root
|
434 |
+
joint.position.set(self.originPosition.x, self.originPosition.y, self.originPosition.z);
|
435 |
+
}
|
436 |
+
});
|
437 |
+
};
|
438 |
+
};
|
439 |
+
|
440 |
+
|
441 |
+
module.exports = BVHCharacter;
|
442 |
+
|
443 |
+
/***/ },
|
444 |
+
/* 2 */
|
445 |
+
/***/ function(module, exports, __webpack_require__) {
|
446 |
+
|
447 |
+
module.exports ={
|
448 |
+
bvhParser: __webpack_require__(3),
|
449 |
+
bvhStreamParser: __webpack_require__(4)
|
450 |
+
};
|
451 |
+
|
452 |
+
/***/ },
|
453 |
+
/* 3 */
|
454 |
+
/***/ function(module, exports) {
|
455 |
+
|
456 |
+
// By Ankit
|
457 |
+
var BVHReader = function () {
|
458 |
+
this.load = function (url, callbackHeader, callbackFrameArray) {
|
459 |
+
$.get(url, function (str) {
|
460 |
+
|
461 |
+
var dataReturn = parse(str);
|
462 |
+
|
463 |
+
|
464 |
+
var jointStack = dataReturn[0];
|
465 |
+
var jointMap = dataReturn[1];
|
466 |
+
var jointArray = dataReturn[2];
|
467 |
+
var connectivityMatrix = dataReturn[3]
|
468 |
+
_bvh = new BVHReader.BVH.Skeleton(jointStack[0], jointMap, jointArray, dataReturn[3], dataReturn[4], dataReturn[5], []);
|
469 |
+
|
470 |
+
if (callbackHeader)
|
471 |
+
callbackHeader(_bvh,'BVH');
|
472 |
+
console.log("Blah");
|
473 |
+
_bvh.fillFrameArray(dataReturn[6]);
|
474 |
+
|
475 |
+
if (callbackFrameArray)
|
476 |
+
callbackFrameArray();
|
477 |
+
|
478 |
+
});
|
479 |
+
};
|
480 |
+
|
481 |
+
function parse(str) {
|
482 |
+
var lines = str.split('\n');
|
483 |
+
var jointStack = [];
|
484 |
+
var jointMap = {};
|
485 |
+
var jointArray = [];
|
486 |
+
var connectivityMatrix = [];
|
487 |
+
var frameCount, frameTime, frameArray = [];
|
488 |
+
var i = 0;
|
489 |
+
//parse structure
|
490 |
+
for (i = 1; i < lines.length; i++) {
|
491 |
+
if (!parseLine(lines[i], jointStack, jointMap, jointArray, connectivityMatrix)) {
|
492 |
+
break;
|
493 |
+
}
|
494 |
+
}
|
495 |
+
|
496 |
+
for (i = i + 1; i < lines.length; i++) {
|
497 |
+
var line = lines[i].trim();
|
498 |
+
//when encountering last line
|
499 |
+
if (line === "")
|
500 |
+
break;
|
501 |
+
if (line.indexOf("Frames") === 0) {
|
502 |
+
frameCount = +(line.split(/\b/)[2]);
|
503 |
+
} else if (line.indexOf("Frame Time") === 0) {
|
504 |
+
frameTime = +( line.substr(line.indexOf(":") + 1).trim() )
|
505 |
+
} else {
|
506 |
+
var parts = line.split(" ");
|
507 |
+
for (var j = 0; j < parts.length; j++)
|
508 |
+
parts[j] = +parts[j];
|
509 |
+
frameArray.push(parts);
|
510 |
+
}
|
511 |
+
}
|
512 |
+
|
513 |
+
//parse motion
|
514 |
+
return [jointStack, jointMap, jointArray, connectivityMatrix, frameCount, frameTime, frameArray];
|
515 |
+
}
|
516 |
+
|
517 |
+
//parses individual line in the bvh file.
|
518 |
+
var parseLine = function (line, jointStack, jointMap, jointArray, connectivityMatrix) {
|
519 |
+
line = line.trim();
|
520 |
+
if (line.indexOf("ROOT") > -1 || line.indexOf("JOINT") > -1 || line.indexOf("End") > -1) {
|
521 |
+
var parts = line.split(" ");
|
522 |
+
var title = parts[1]; //temporary variable to be used after creating the joint object
|
523 |
+
parts[1] = parts[1] + "-" + jointArray.length;
|
524 |
+
var joint = new BVHReader.BVH.Joint(parts[1]);
|
525 |
+
joint.title = title;
|
526 |
+
jointStack.push(joint);
|
527 |
+
|
528 |
+
joint.jointIndex = Object.keys(jointMap).length;
|
529 |
+
jointMap[parts[1]] = joint;
|
530 |
+
jointArray.push(joint);
|
531 |
+
//if the joint is not an end site
|
532 |
+
if( line.indexOf("End") != 0 ){
|
533 |
+
if (jointArray.length == 1) {
|
534 |
+
joint.channelOffset = 0;
|
535 |
+
} else {
|
536 |
+
joint.channelOffset = jointArray[jointArray.length - 2].channelOffset + jointArray[jointArray.length - 2].channelLength;
|
537 |
+
}
|
538 |
+
}else{
|
539 |
+
//channelLength is 0 for end joints
|
540 |
+
joint.channelLength = 0;
|
541 |
+
joint.channelOffset = jointArray[jointArray.length - 2].channelOffset + jointArray[jointArray.length - 2].channelLength;
|
542 |
+
}
|
543 |
+
|
544 |
+
} else if (line.indexOf("{") === 0) {
|
545 |
+
|
546 |
+
} else if (line.indexOf("OFFSET") === 0) {
|
547 |
+
var parts = line.split(" ");
|
548 |
+
jointStack[jointStack.length - 1]["offset"] = parts.slice(1);
|
549 |
+
for(x in jointStack[jointStack.length - 1]["offset"]){
|
550 |
+
jointStack[jointStack.length - 1]["offset"][x] = +jointStack[jointStack.length - 1]["offset"][x]
|
551 |
+
}
|
552 |
+
} else if (line.indexOf("CHANNELS") === 0) {
|
553 |
+
var parts = line.split(" ");
|
554 |
+
jointStack[jointStack.length - 1].setChannelNames(parts.slice(2));
|
555 |
+
jointStack[jointStack.length - 1]["channelLength"] = +parts[1];
|
556 |
+
} else if (line.indexOf("}") === 0) {
|
557 |
+
if (jointStack.length > 1) {
|
558 |
+
child = jointStack.pop();
|
559 |
+
jointStack[jointStack.length - 1].children.push(child);
|
560 |
+
child.parent = jointStack[jointStack.length - 1];
|
561 |
+
|
562 |
+
connectivityMatrix.push([child.parent, child])
|
563 |
+
|
564 |
+
// if(!connectivityMatrix[child.name]){
|
565 |
+
// connectivityMatrix[child.name] = {}
|
566 |
+
// }
|
567 |
+
// connectivityMatrix[child.name][child.parent.name] = 1;
|
568 |
+
|
569 |
+
// if(!connectivityMatrix[child.parent.name]){
|
570 |
+
// connectivityMatrix[child.parent.name] = {}
|
571 |
+
// }
|
572 |
+
// connectivityMatrix[child.parent.name][child.name] = 1;
|
573 |
+
}
|
574 |
+
} else if (line.indexOf("MOTION") == 0) {
|
575 |
+
return false;
|
576 |
+
}
|
577 |
+
|
578 |
+
return true;
|
579 |
+
};
|
580 |
+
};
|
581 |
+
|
582 |
+
BVHReader.BVH = BVHReader.BVH || {};
|
583 |
+
|
584 |
+
BVHReader.BVH.Joint = function (name, index) {
|
585 |
+
|
586 |
+
this.name = name;
|
587 |
+
this.children = [];
|
588 |
+
this.isEndSite = function () {
|
589 |
+
return this.children.length == 0;
|
590 |
+
};
|
591 |
+
this.rotationIndex = {};
|
592 |
+
this.positionIndex = {};
|
593 |
+
|
594 |
+
this.getChannels = function () {
|
595 |
+
var allChannels = [];
|
596 |
+
for (i = 0; i < this.skeleton.frameArray.length; i++) {
|
597 |
+
allChannels.push(this.getChannelsAt(i));
|
598 |
+
}
|
599 |
+
return allChannels;
|
600 |
+
};
|
601 |
+
this.getChannelsAt = function (frameNum) {
|
602 |
+
var channelsAtFrame = this.skeleton.frameArray[frameNum];
|
603 |
+
return channelsAtFrame.slice(this.channelOffset, this.channelOffset + this.channelLength);
|
604 |
+
};
|
605 |
+
|
606 |
+
this.setChannelNames = function (nameArr){
|
607 |
+
this.channelNames = nameArr;
|
608 |
+
for(i in this.channelNames){
|
609 |
+
var name = this.channelNames[i];
|
610 |
+
switch(name){
|
611 |
+
case "Xposition": this.positionIndex.x = i; break;
|
612 |
+
case "Yposition": this.positionIndex.y = i; break;
|
613 |
+
case "Zposition": this.positionIndex.z = i; break;
|
614 |
+
|
615 |
+
case "Xrotation": this.rotationIndex.x = i; break;
|
616 |
+
case "Yrotation": this.rotationIndex.y = i; break;
|
617 |
+
case "Zrotation": this.rotationIndex.z = i; break;
|
618 |
+
}
|
619 |
+
}
|
620 |
+
}
|
621 |
+
};
|
622 |
+
|
623 |
+
BVHReader.BVH.Skeleton = function (root, map, arr, connectivityMatrix, frameCount, frameTime, frameArray) {
|
624 |
+
thisSkeleton = this;
|
625 |
+
this.root = root;
|
626 |
+
this.jointMap = map;
|
627 |
+
this.jointArray = arr;
|
628 |
+
this.connectivityMatrix = connectivityMatrix;
|
629 |
+
this.frameCount = frameCount;
|
630 |
+
this.frameTime = frameTime;
|
631 |
+
this.frameArray = frameArray;
|
632 |
+
|
633 |
+
for (i = 0; i < this.jointArray.length; i++) {
|
634 |
+
this.jointArray[i].skeleton = thisSkeleton;
|
635 |
+
}
|
636 |
+
|
637 |
+
|
638 |
+
|
639 |
+
this.fillFrameArray = function (fa) {
|
640 |
+
this.frameArray = fa;
|
641 |
+
this.frameCount = fa.length;
|
642 |
+
//all the structures are ready. let's calculate the positions
|
643 |
+
for(j=0; j < this.jointArray.length; j++){
|
644 |
+
var joint = this.jointArray[j];
|
645 |
+
updateWithPositions(joint);
|
646 |
+
}
|
647 |
+
}
|
648 |
+
|
649 |
+
this.getChannels = function () {
|
650 |
+
return frameArray;
|
651 |
+
};
|
652 |
+
this.getChannelsAt = function (frameNum) {
|
653 |
+
//How do I know which column is what?
|
654 |
+
//Why do you need the column index?
|
655 |
+
return frameArray[frameNum];
|
656 |
+
};
|
657 |
+
this.getFrameRate = function () {
|
658 |
+
return frameCount / frameTime;
|
659 |
+
};
|
660 |
+
this.getSkeleton = function () {
|
661 |
+
return root;
|
662 |
+
};
|
663 |
+
|
664 |
+
this.getHeadJoint = function () {
|
665 |
+
// do a quick search in the joint names to see if any of them matches head, else return the something!!!!
|
666 |
+
return jointMap["Head"];
|
667 |
+
};
|
668 |
+
this.getPositionsAt = function (frameNum) {
|
669 |
+
//for each joint, calculate its position in XYZ
|
670 |
+
//return an array of joints, each with .x, .y, and .z properties
|
671 |
+
posFrame = [];
|
672 |
+
|
673 |
+
for (j=0;j<this.jointArray.length;j++) {
|
674 |
+
posFrame.push(this.jointArray[j].positions[frameNum]);
|
675 |
+
}
|
676 |
+
|
677 |
+
posFrame = posFrame.map(function(d) {
|
678 |
+
return {
|
679 |
+
x : d[0],
|
680 |
+
y : d[1],
|
681 |
+
z : d[2],
|
682 |
+
};
|
683 |
+
});
|
684 |
+
|
685 |
+
return posFrame;
|
686 |
+
};
|
687 |
+
this.getTPose = function () {
|
688 |
+
// This function is basically the same as the getPositionsAt except that all the rotations will be 0
|
689 |
+
console.log("Not yet implemented");
|
690 |
+
};
|
691 |
+
|
692 |
+
function updatePositions(rootOffset, removeRoot, orientation, camera) {
|
693 |
+
//TODO: compelte the specification of this
|
694 |
+
|
695 |
+
for(j=0; j < this.jointArray.length; j++){
|
696 |
+
var joint = this.jointArray[j];
|
697 |
+
updateWithPositions(joint);
|
698 |
+
}
|
699 |
+
}
|
700 |
+
|
701 |
+
function updateWithPositions(joint){
|
702 |
+
var channelNames = joint.channelNames;
|
703 |
+
joint.channels = joint.getChannels();
|
704 |
+
joint.rotations = [];
|
705 |
+
joint.positions = [];
|
706 |
+
joint.rotmat = [];
|
707 |
+
for(i in joint.channels){
|
708 |
+
var channel = joint.channels[i];
|
709 |
+
var xpos = channel[joint.positionIndex.x] || 0,
|
710 |
+
ypos = channel[joint.positionIndex.y] || 0,
|
711 |
+
zpos = channel[joint.positionIndex.z] || 0,
|
712 |
+
xangle = deg2rad(channel[joint.rotationIndex.x] || 0),
|
713 |
+
yangle = deg2rad(channel[joint.rotationIndex.y] || 0),
|
714 |
+
zangle= deg2rad(channel[joint.rotationIndex.z] || 0);
|
715 |
+
|
716 |
+
// var rotMatrix = math.transpose(getRotationMatrix(xangle, yangle, zangle, "xyz"));
|
717 |
+
// var rotMatrix = getRotationMatrix1(xangle, yangle, zangle, "xyz"); //this also works
|
718 |
+
var posMatrix = [xpos, ypos, zpos];
|
719 |
+
|
720 |
+
if(joint.parent){
|
721 |
+
posMatrix = [0,0,0]; //At least for the bvhs that we have, this should be set to 0
|
722 |
+
|
723 |
+
// var t = vectorAdd(joint.offset, posMatrix);
|
724 |
+
// var u = matrixMultiply(t, joint.parent.rotations[i]);
|
725 |
+
|
726 |
+
// joint.positions[i] = vectorAdd(u, joint.parent.positions[i]);
|
727 |
+
// joint.rotations[i] = matrixMultiply( rotMatrix, joint.parent.rotations[i]);
|
728 |
+
// joint.rotmat[i] = rotMatrix;
|
729 |
+
|
730 |
+
if (i==0 && (joint.name == "Spine" || joint.name == "L_Femur")) {
|
731 |
+
/*console.log("head's rot mat: ");
|
732 |
+
console.log(joint.rotations[i]);
|
733 |
+
console.log(t);
|
734 |
+
console.log(u);
|
735 |
+
|
736 |
+
console.log("x: "+xangle + "y: "+yangle + "z: "+zangle );
|
737 |
+
console.log(posMatrix);
|
738 |
+
*/
|
739 |
+
}
|
740 |
+
|
741 |
+
}else{
|
742 |
+
//its the root
|
743 |
+
// joint.rotations[i] = rotMatrix;
|
744 |
+
// joint.rotmat[i] = rotMatrix;
|
745 |
+
joint.positions[i] = posMatrix;//vectorAdd(joint.offset , posMatrix);
|
746 |
+
// ^ we can safely ignore the root's offset
|
747 |
+
}
|
748 |
+
}
|
749 |
+
}
|
750 |
+
|
751 |
+
function deg2rad(deg){
|
752 |
+
return deg * (Math.PI/180);
|
753 |
+
}
|
754 |
+
|
755 |
+
|
756 |
+
function getRotationMatrix(alpha, beta, gamma) {
|
757 |
+
|
758 |
+
//inputs are the intrinsic rotation angles in RADIANTS
|
759 |
+
var ca = Math.cos(alpha),
|
760 |
+
sa = Math.sin(alpha),
|
761 |
+
|
762 |
+
cb = Math.cos(beta),
|
763 |
+
sb = Math.sin(beta),
|
764 |
+
|
765 |
+
cg = Math.cos(gamma),
|
766 |
+
sg = Math.sin(gamma),
|
767 |
+
|
768 |
+
Rx = [[1, 0, 0], [0, ca, -sa], [0, sa, ca]];
|
769 |
+
|
770 |
+
Ry = [[cb, 0, sb], [0, 1, 0], [-sb, 0, cb]];
|
771 |
+
|
772 |
+
Rz = [[cg, -sg, 0], [sg, cg, 0], [0, 0, 1]];
|
773 |
+
|
774 |
+
|
775 |
+
|
776 |
+
|
777 |
+
var Rzm = math.matrix(Rz);
|
778 |
+
var Rym = math.matrix(Ry);
|
779 |
+
var Rxm = math.matrix(Rx);
|
780 |
+
|
781 |
+
var tt = math.multiply(Rzm, Rym);
|
782 |
+
|
783 |
+
return math.multiply(tt,Rxm).toArray();
|
784 |
+
//rotationMatrix = math. //Rz*Ry*Rx;
|
785 |
+
|
786 |
+
// R = Rx*Ry*Rz;
|
787 |
+
}
|
788 |
+
|
789 |
+
function getRotationMatrix1 (xangle, yangle, zangle, order){
|
790 |
+
var c1 = Math.cos(xangle),
|
791 |
+
c2 = Math.cos(yangle),
|
792 |
+
c3 = Math.cos(zangle),
|
793 |
+
s1 = Math.sin(xangle),
|
794 |
+
s2 = Math.sin(yangle),
|
795 |
+
s3 = Math.sin(zangle);
|
796 |
+
|
797 |
+
if(order === undefined || order.trim() === ""){
|
798 |
+
order = "zxy";
|
799 |
+
}
|
800 |
+
|
801 |
+
var rotMat = [
|
802 |
+
[1,0,0],
|
803 |
+
[0,1,0],
|
804 |
+
[0,0,1]
|
805 |
+
];
|
806 |
+
|
807 |
+
switch(order){
|
808 |
+
case "___zxy":
|
809 |
+
rotMat = [
|
810 |
+
[c2*c3-s1*s2*s3, c2*s3+s1*s2*c3, -s2*c1],
|
811 |
+
[-c1*s3, c1*c3, s1],
|
812 |
+
[s2*c3+c2*s1*s3, s2*s3-c2*s1*c3, c2*c1]
|
813 |
+
];
|
814 |
+
break;
|
815 |
+
default:
|
816 |
+
for (o in order){
|
817 |
+
var axis = order[o];
|
818 |
+
var t;
|
819 |
+
switch(axis){
|
820 |
+
case "x":
|
821 |
+
t = [
|
822 |
+
[1, 0, 0],
|
823 |
+
[0, c1, s1],
|
824 |
+
[0, -s1, c1],
|
825 |
+
]
|
826 |
+
break;
|
827 |
+
case "y":
|
828 |
+
t = [
|
829 |
+
[c2,0,-s2],
|
830 |
+
[0,1,0],
|
831 |
+
[s2,0,c2]
|
832 |
+
]
|
833 |
+
break;
|
834 |
+
case "z":
|
835 |
+
t = [[c3,s3,0],[-s3,c3,0],[0,0,1]]
|
836 |
+
break;
|
837 |
+
}
|
838 |
+
|
839 |
+
rotMat = matrixMultiply(t, rotMat)
|
840 |
+
}
|
841 |
+
}
|
842 |
+
|
843 |
+
return rotMat;
|
844 |
+
}
|
845 |
+
};
|
846 |
+
|
847 |
+
function vectorAdd(a, b){
|
848 |
+
return math.add(math.matrix(a), math.matrix(b)).toArray();
|
849 |
+
}
|
850 |
+
|
851 |
+
function matrixMultiply(m1, m2) {
|
852 |
+
var a = math.matrix(m1);
|
853 |
+
var b = math.matrix(m2);
|
854 |
+
return math.multiply(a, b).toArray();
|
855 |
+
}
|
856 |
+
|
857 |
+
|
858 |
+
module.exports = BVHReader;
|
859 |
+
|
860 |
+
/***/ },
|
861 |
+
/* 4 */
|
862 |
+
/***/ function(module, exports) {
|
863 |
+
|
864 |
+
// BVH parser by Ankit
|
865 |
+
// Stream by Omid
|
866 |
+
|
867 |
+
|
868 |
+
var BVHStreamParser = function () {
|
869 |
+
this.readHeader = function (str, callback) {
|
870 |
+
var dataReturn = parseHeader(str);
|
871 |
+
var jointStack = dataReturn[0];
|
872 |
+
var jointMap = dataReturn[1];
|
873 |
+
var jointArray = dataReturn[2];
|
874 |
+
var connectivityMatrix = dataReturn[3]
|
875 |
+
if (callback)
|
876 |
+
callback(new BVHStreamParser.BVH.Skeleton(jointStack[0], jointMap, jointArray, dataReturn[3], 0, dataReturn[5], dataReturn[6]),'BVH');
|
877 |
+
};
|
878 |
+
|
879 |
+
function parseHeader(str) {
|
880 |
+
var lines = str.split('\n');
|
881 |
+
var jointStack = [];
|
882 |
+
var jointMap = {};
|
883 |
+
var jointArray = [];
|
884 |
+
var connectivityMatrix = [];
|
885 |
+
var frameCount, frameTime, frameArray = [];
|
886 |
+
var i = 0;
|
887 |
+
//parse structure
|
888 |
+
for (i = 2; i < lines.length; i++) { // start from 2 to skip the $HEADER$ command
|
889 |
+
if (!parseLine(lines[i], jointStack, jointMap, jointArray, connectivityMatrix)) {
|
890 |
+
break;
|
891 |
+
}
|
892 |
+
}
|
893 |
+
|
894 |
+
for (i = i + 1; i < lines.length; i++) {
|
895 |
+
var line = lines[i].trim();
|
896 |
+
//when encountering last line
|
897 |
+
if (line === "")
|
898 |
+
break;
|
899 |
+
if (line.indexOf("Frames") === 0) {
|
900 |
+
frameCount = +(line.split(/\b/)[2]);
|
901 |
+
} else if (line.indexOf("Frame Time") === 0) {
|
902 |
+
frameTime = +( line.substr(line.indexOf(":") + 1).trim() )
|
903 |
+
} else { /// maybe this should be removed
|
904 |
+
var parts = line.split(" ");
|
905 |
+
for (var j = 0; j < parts.length; j++)
|
906 |
+
parts[j] = +parts[j];
|
907 |
+
frameArray.push(parts);
|
908 |
+
}
|
909 |
+
}
|
910 |
+
|
911 |
+
//parse motion
|
912 |
+
return [jointStack, jointMap, jointArray, connectivityMatrix, frameCount, frameTime, frameArray];
|
913 |
+
}
|
914 |
+
|
915 |
+
//parses individual line in the bvh file.
|
916 |
+
var parseLine = function (line, jointStack, jointMap, jointArray, connectivityMatrix) {
|
917 |
+
line = line.trim();
|
918 |
+
if (line.indexOf("ROOT") > -1 || line.indexOf("JOINT") > -1 || line.indexOf("End") > -1) {
|
919 |
+
var parts = line.split(" ");
|
920 |
+
var title = parts[1]; //temporary variable to be used after creating the joint object
|
921 |
+
parts[1] = parts[1] + "-" + jointArray.length;
|
922 |
+
var joint = new BVHStreamParser.BVH.Joint(parts[1]);
|
923 |
+
joint.title = title;
|
924 |
+
jointStack.push(joint);
|
925 |
+
|
926 |
+
joint.jointIndex = Object.keys(jointMap).length;
|
927 |
+
jointMap[parts[1]] = joint;
|
928 |
+
jointArray.push(joint);
|
929 |
+
//if the joint is not an end site
|
930 |
+
if( line.indexOf("End") != 0 ){
|
931 |
+
if (jointArray.length == 1) {
|
932 |
+
joint.channelOffset = 0;
|
933 |
+
} else {
|
934 |
+
joint.channelOffset = jointArray[jointArray.length - 2].channelOffset + jointArray[jointArray.length - 2].channelLength;
|
935 |
+
}
|
936 |
+
}else{
|
937 |
+
//channelLength is 0 for end joints
|
938 |
+
joint.channelLength = 0;
|
939 |
+
joint.channelOffset = jointArray[jointArray.length - 2].channelOffset + jointArray[jointArray.length - 2].channelLength;
|
940 |
+
}
|
941 |
+
|
942 |
+
} else if (line.indexOf("{") === 0) {
|
943 |
+
|
944 |
+
} else if (line.indexOf("OFFSET") === 0) {
|
945 |
+
var parts = line.split(" ");
|
946 |
+
jointStack[jointStack.length - 1]["offset"] = parts.slice(1);
|
947 |
+
for(x in jointStack[jointStack.length - 1]["offset"]){
|
948 |
+
jointStack[jointStack.length - 1]["offset"][x] = +jointStack[jointStack.length - 1]["offset"][x]
|
949 |
+
}
|
950 |
+
} else if (line.indexOf("CHANNELS") === 0) {
|
951 |
+
var parts = line.split(" ");
|
952 |
+
jointStack[jointStack.length - 1].setChannelNames(parts.slice(2));
|
953 |
+
jointStack[jointStack.length - 1]["channelLength"] = +parts[1];
|
954 |
+
} else if (line.indexOf("}") === 0) {
|
955 |
+
if (jointStack.length > 1) {
|
956 |
+
child = jointStack.pop();
|
957 |
+
jointStack[jointStack.length - 1].children.push(child);
|
958 |
+
child.parent = jointStack[jointStack.length - 1];
|
959 |
+
|
960 |
+
connectivityMatrix.push([child.parent, child])
|
961 |
+
}
|
962 |
+
} else if (line.indexOf("MOTION") == 0) {
|
963 |
+
return false;
|
964 |
+
}
|
965 |
+
|
966 |
+
return true;
|
967 |
+
};
|
968 |
+
};
|
969 |
+
|
970 |
+
BVHStreamParser.BVH = BVHStreamParser.BVH || {};
|
971 |
+
|
972 |
+
BVHStreamParser.BVH.Joint = function (name, index) {
|
973 |
+
|
974 |
+
this.name = name;
|
975 |
+
this.children = [];
|
976 |
+
this.isEndSite = function () {
|
977 |
+
return this.children.length == 0;
|
978 |
+
};
|
979 |
+
this.rotationIndex = {};
|
980 |
+
this.positionIndex = {};
|
981 |
+
|
982 |
+
this.getChannels = function () {
|
983 |
+
var allChannels = [];
|
984 |
+
for (i = 0; i < this.skeleton.frameArray.length; i++) {
|
985 |
+
allChannels.push(this.getChannelsAt(i));
|
986 |
+
}
|
987 |
+
return allChannels;
|
988 |
+
};
|
989 |
+
this.getChannelsAt = function (frameNum) {
|
990 |
+
var channelsAtFrame = this.skeleton.frameArray[frameNum];
|
991 |
+
return channelsAtFrame.slice(this.channelOffset, this.channelOffset + this.channelLength);
|
992 |
+
};
|
993 |
+
|
994 |
+
this.setChannelNames = function (nameArr){
|
995 |
+
this.channelNames = nameArr;
|
996 |
+
for(i in this.channelNames){
|
997 |
+
var name = this.channelNames[i];
|
998 |
+
switch(name){
|
999 |
+
case "Xposition": this.positionIndex.x = i; break;
|
1000 |
+
case "Yposition": this.positionIndex.y = i; break;
|
1001 |
+
case "Zposition": this.positionIndex.z = i; break;
|
1002 |
+
|
1003 |
+
case "Xrotation": this.rotationIndex.x = i; break;
|
1004 |
+
case "Yrotation": this.rotationIndex.y = i; break;
|
1005 |
+
case "Zrotation": this.rotationIndex.z = i; break;
|
1006 |
+
}
|
1007 |
+
}
|
1008 |
+
}
|
1009 |
+
};
|
1010 |
+
|
1011 |
+
BVHStreamParser.BVH.Skeleton = function (root, map, arr, connectivityMatrix, frameCount, frameTime, frameArray) {
|
1012 |
+
thisSkeleton = this;
|
1013 |
+
this.root = root;
|
1014 |
+
this.jointMap = map;
|
1015 |
+
this.jointArray = arr;
|
1016 |
+
this.connectivityMatrix = connectivityMatrix;
|
1017 |
+
this.frameCount = frameCount;
|
1018 |
+
this.frameTime = frameTime;
|
1019 |
+
this.frameArray = frameArray;
|
1020 |
+
this.bufferSize = 500;
|
1021 |
+
|
1022 |
+
for (i = 0; i < this.jointArray.length; i++) {
|
1023 |
+
this.jointArray[i].skeleton = thisSkeleton;
|
1024 |
+
}
|
1025 |
+
|
1026 |
+
this.fillFrameArray = function (fa) {
|
1027 |
+
this.frameArray.push.apply(this.frameArray,fa);
|
1028 |
+
//this.frameArray.push.apply(this.frameArray,fa);
|
1029 |
+
|
1030 |
+
diff = this.frameArray.length - this.bufferSize;
|
1031 |
+
// console.log('diff = ' + diff);
|
1032 |
+
|
1033 |
+
/*
|
1034 |
+
if (diff > 0)
|
1035 |
+
for (i=0;i<diff;i++)
|
1036 |
+
this.frameArray.shift();
|
1037 |
+
|
1038 |
+
this.frameCount = this.frameArray.length;
|
1039 |
+
*/
|
1040 |
+
|
1041 |
+
if (diff > 0)
|
1042 |
+
addedCount = this.frameCount;
|
1043 |
+
else
|
1044 |
+
addedCount = fa.length;
|
1045 |
+
|
1046 |
+
for(j=0; j < this.jointArray.length; j++){
|
1047 |
+
var joint = this.jointArray[j];
|
1048 |
+
updateWithPositionsSinceLast(joint, addedCount);
|
1049 |
+
}
|
1050 |
+
|
1051 |
+
return diff;
|
1052 |
+
}
|
1053 |
+
|
1054 |
+
this.consumeFrames = function (index) {
|
1055 |
+
for (i=0;i<=index;i++) {
|
1056 |
+
this.frameArray.shift();
|
1057 |
+
for (j=0;j<this.jointArray.length;j++)
|
1058 |
+
this.jointArray[j].channels.shift();
|
1059 |
+
}
|
1060 |
+
this.frameCount = this.frameArray.length;
|
1061 |
+
}
|
1062 |
+
|
1063 |
+
this.getChannels = function () {
|
1064 |
+
return frameArray;
|
1065 |
+
};
|
1066 |
+
this.getChannelsAt = function (frameNum) {
|
1067 |
+
//How do I know which column is what?
|
1068 |
+
//Why do you need the column index?
|
1069 |
+
return frameArray[frameNum];
|
1070 |
+
};
|
1071 |
+
this.getFrameRate = function () {
|
1072 |
+
return frameCount / frameTime;
|
1073 |
+
};
|
1074 |
+
this.getSkeleton = function () {
|
1075 |
+
return root;
|
1076 |
+
};
|
1077 |
+
|
1078 |
+
this.getHeadJoint = function () {
|
1079 |
+
// do a quick search in the joint names to see if any of them matches head, else return the something!!!!
|
1080 |
+
return jointMap["Head"];
|
1081 |
+
};
|
1082 |
+
this.getPositionsAt = function (frameNum) {
|
1083 |
+
//for each joint, calculate its position in XYZ
|
1084 |
+
//return an array of joints, each with .x, .y, and .z properties
|
1085 |
+
posFrame = [];
|
1086 |
+
|
1087 |
+
for (j=0;j<this.jointArray.length;j++) {
|
1088 |
+
posFrame.push(this.jointArray[j].positions[frameNum]);
|
1089 |
+
}
|
1090 |
+
|
1091 |
+
posFrame = posFrame.map(function(d) {
|
1092 |
+
return {
|
1093 |
+
x : d[0],
|
1094 |
+
y : d[1],
|
1095 |
+
z : d[2],
|
1096 |
+
};
|
1097 |
+
});
|
1098 |
+
|
1099 |
+
return posFrame;
|
1100 |
+
};
|
1101 |
+
this.getTPose = function () {
|
1102 |
+
// This function is basically the same as the getPositionsAt except that all the rotations will be 0
|
1103 |
+
console.log("Not yet implemented");
|
1104 |
+
};
|
1105 |
+
|
1106 |
+
function updatePositions(rootOffset, removeRoot, orientation, camera) {
|
1107 |
+
//TODO: compelte the specification of this
|
1108 |
+
|
1109 |
+
for(j=0; j < this.jointArray.length; j++){
|
1110 |
+
var joint = this.jointArray[j];
|
1111 |
+
updateWithPositions(joint);
|
1112 |
+
}
|
1113 |
+
}
|
1114 |
+
|
1115 |
+
function updateWithPositions(joint){
|
1116 |
+
var channelNames = joint.channelNames;
|
1117 |
+
joint.channels = joint.getChannels();
|
1118 |
+
joint.rotations = [];
|
1119 |
+
joint.positions = [];
|
1120 |
+
joint.rotmat = [];
|
1121 |
+
for(i in joint.channels){
|
1122 |
+
var channel = joint.channels[i];
|
1123 |
+
var xpos = channel[joint.positionIndex.x] || 0,
|
1124 |
+
ypos = channel[joint.positionIndex.y] || 0,
|
1125 |
+
zpos = channel[joint.positionIndex.z] || 0;
|
1126 |
+
// xangle = deg2rad(channel[joint.rotationIndex.x] || 0),
|
1127 |
+
// yangle = deg2rad(channel[joint.rotationIndex.y] || 0),
|
1128 |
+
// zangle= deg2rad(channel[joint.rotationIndex.z] || 0);
|
1129 |
+
|
1130 |
+
var posMatrix = [xpos, ypos, zpos];
|
1131 |
+
|
1132 |
+
if(!joint.parent){
|
1133 |
+
//its the root
|
1134 |
+
joint.positions[i] = posMatrix;//vectorAdd(joint.offset , posMatrix);
|
1135 |
+
// ^ we can safely ignore the root's offset
|
1136 |
+
}
|
1137 |
+
}
|
1138 |
+
}
|
1139 |
+
|
1140 |
+
function updateWithPositionsSinceLast(joint, addedCount){
|
1141 |
+
var channelNames = joint.channelNames;
|
1142 |
+
joint.channels = joint.getChannels();
|
1143 |
+
joint.rotations = [];
|
1144 |
+
joint.positions = [];
|
1145 |
+
joint.rotmat = [];
|
1146 |
+
for(i=joint.channels.length - addedCount;i < joint.channels.length; i++){
|
1147 |
+
var channel = joint.channels[i];
|
1148 |
+
var xpos = channel[joint.positionIndex.x] || 0,
|
1149 |
+
ypos = channel[joint.positionIndex.y] || 0,
|
1150 |
+
zpos = channel[joint.positionIndex.z] || 0;
|
1151 |
+
// xangle = deg2rad(channel[joint.rotationIndex.x] || 0),
|
1152 |
+
// yangle = deg2rad(channel[joint.rotationIndex.y] || 0),
|
1153 |
+
// zangle= deg2rad(channel[joint.rotationIndex.z] || 0);
|
1154 |
+
|
1155 |
+
var posMatrix = [xpos, ypos, zpos];
|
1156 |
+
|
1157 |
+
if(!joint.parent){
|
1158 |
+
//its the root
|
1159 |
+
joint.positions[i] = posMatrix;//vectorAdd(joint.offset , posMatrix);
|
1160 |
+
// ^ we can safely ignore the root's offset
|
1161 |
+
}
|
1162 |
+
}
|
1163 |
+
}
|
1164 |
+
|
1165 |
+
function deg2rad(deg){
|
1166 |
+
return deg * (Math.PI/180);
|
1167 |
+
}
|
1168 |
+
};
|
1169 |
+
|
1170 |
+
module.exports = BVHStreamParser;
|
1171 |
+
|
1172 |
+
/***/ },
|
1173 |
+
/* 5 */
|
1174 |
+
/***/ function(module, exports) {
|
1175 |
+
|
1176 |
+
var C3DCharacter = C3DCharacter || {};
|
1177 |
+
|
1178 |
+
C3DCharacter = function(n, jm, jg){
|
1179 |
+
this.name = n;
|
1180 |
+
|
1181 |
+
this.markerMaterial = jm;
|
1182 |
+
this.makeMarkerGeometryFCN = jg;
|
1183 |
+
|
1184 |
+
this.originPosition = new THREE.Vector3(0,0,0);
|
1185 |
+
|
1186 |
+
this.markerdata = [];
|
1187 |
+
this.ready = false;
|
1188 |
+
this.scale = 0.5;
|
1189 |
+
this.markerMeshes = [];
|
1190 |
+
|
1191 |
+
this.frameTime = 1/30;
|
1192 |
+
this.frameCount = 0;
|
1193 |
+
|
1194 |
+
this.animIndex = 0;
|
1195 |
+
this.animStartTimeRef = 0;
|
1196 |
+
this.animOffset = 0;
|
1197 |
+
this.playing = true;
|
1198 |
+
|
1199 |
+
this.debug = true;
|
1200 |
+
|
1201 |
+
var self = this;
|
1202 |
+
|
1203 |
+
//
|
1204 |
+
|
1205 |
+
this.log = function(m) {
|
1206 |
+
if (self.debug)
|
1207 |
+
console.log(self.name + ": "+m.toString());
|
1208 |
+
};
|
1209 |
+
|
1210 |
+
this.loadFromURL = function(url, callback) {
|
1211 |
+
self.log("Loading the mocap file ...");
|
1212 |
+
Pace.start();
|
1213 |
+
url2 = "../" + url;
|
1214 |
+
self.url = url;
|
1215 |
+
|
1216 |
+
Papa.parse(url2, {
|
1217 |
+
worker: true,
|
1218 |
+
delimiter: ",",
|
1219 |
+
dynamicTyping: true,
|
1220 |
+
download: true,
|
1221 |
+
header: false,
|
1222 |
+
complete: function(results) {
|
1223 |
+
self.processData(results);
|
1224 |
+
if (callback)
|
1225 |
+
callback();
|
1226 |
+
}
|
1227 |
+
});
|
1228 |
+
};
|
1229 |
+
|
1230 |
+
this.loadFromBuffer = function(data, callback) {
|
1231 |
+
self.log("Loading the mocap from buffer...");
|
1232 |
+
Pace.start();
|
1233 |
+
var preData = data.split('\n');
|
1234 |
+
preData = preData.map(function(d,i){
|
1235 |
+
var cols = d.split(',');
|
1236 |
+
var floats = cols;
|
1237 |
+
// console.log(i);
|
1238 |
+
if (i!=0) {
|
1239 |
+
floats = cols.map(function(p, j){
|
1240 |
+
return parseFloat(p);
|
1241 |
+
});
|
1242 |
+
}
|
1243 |
+
|
1244 |
+
return floats;
|
1245 |
+
});
|
1246 |
+
preData.pop();
|
1247 |
+
|
1248 |
+
this.processData({data: preData});
|
1249 |
+
if (callback)
|
1250 |
+
callback();
|
1251 |
+
}
|
1252 |
+
|
1253 |
+
this.processData = function(results) {
|
1254 |
+
//self.markerdata = results.data;
|
1255 |
+
// console.log(results);
|
1256 |
+
|
1257 |
+
for (i=0;i<results.data[0].length-3;i+=3) {
|
1258 |
+
var markerMesh = new THREE.Mesh(self.makeMarkerGeometryFCN(results.data[0][i], self.scale), self.markerMaterial);
|
1259 |
+
markerMesh.markerIndex = i;
|
1260 |
+
markerMesh.name = results.data[0][i];
|
1261 |
+
scene.add(markerMesh);
|
1262 |
+
self.markerMeshes.push(markerMesh);
|
1263 |
+
}
|
1264 |
+
|
1265 |
+
self.markerNames = results.data[0];
|
1266 |
+
|
1267 |
+
for (f=1;f<results.data.length;f++) {
|
1268 |
+
self.markerdata[f-1] = [];
|
1269 |
+
for (m=0;m<results.data[f].length-3;m+=3) {
|
1270 |
+
marker = {};
|
1271 |
+
marker.x = results.data[f][m];
|
1272 |
+
marker.y = results.data[f][m+1];
|
1273 |
+
marker.z = results.data[f][m+2];
|
1274 |
+
marker.name = self.markerNames[m];
|
1275 |
+
|
1276 |
+
self.markerdata[f-1].push(marker);
|
1277 |
+
}
|
1278 |
+
}
|
1279 |
+
|
1280 |
+
self.frameCount = self.markerdata.length;
|
1281 |
+
self.log("Done parsing!");
|
1282 |
+
self.ready = true;
|
1283 |
+
}
|
1284 |
+
|
1285 |
+
this.setOriginPosition = function (x, y, z) {
|
1286 |
+
self.originPosition.set(x,y,z);
|
1287 |
+
};
|
1288 |
+
|
1289 |
+
this.setSkeletonScale = function(s) {
|
1290 |
+
self.rootMeshes.forEach(function (c) {
|
1291 |
+
c.scale.set(s,s,s);
|
1292 |
+
});
|
1293 |
+
self.jointMeshes[0].scale.set(s,s,s);
|
1294 |
+
self.jointMeshes[0].position.multiplyScalar(s);
|
1295 |
+
};
|
1296 |
+
|
1297 |
+
|
1298 |
+
this.animFrame = function (frame) {
|
1299 |
+
for (m=0;m<self.markerMeshes.length; m++) {
|
1300 |
+
self.markerMeshes[m].position.set(
|
1301 |
+
self.markerdata[frame][m].x * self.scale + self.originPosition.x,
|
1302 |
+
self.markerdata[frame][m].y * self.scale + self.originPosition.y,
|
1303 |
+
self.markerdata[frame][m].z * self.scale + self.originPosition.z);
|
1304 |
+
}
|
1305 |
+
};
|
1306 |
+
};
|
1307 |
+
|
1308 |
+
module.exports = C3DCharacter;
|
1309 |
+
|
1310 |
+
/***/ }
|
1311 |
+
/******/ ]);
|
1312 |
+
//# sourceMappingURL=mocapjs.js.map
|
dataloaders/pymo/mocapplayer/libs/pace.min.js
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
/*! pace 1.0.2 */
|
2 |
+
(function(){var a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z,A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X=[].slice,Y={}.hasOwnProperty,Z=function(a,b){function c(){this.constructor=a}for(var d in b)Y.call(b,d)&&(a[d]=b[d]);return c.prototype=b.prototype,a.prototype=new c,a.__super__=b.prototype,a},$=[].indexOf||function(a){for(var b=0,c=this.length;c>b;b++)if(b in this&&this[b]===a)return b;return-1};for(u={catchupTime:100,initialRate:.03,minTime:250,ghostTime:100,maxProgressPerFrame:20,easeFactor:1.25,startOnPageLoad:!0,restartOnPushState:!0,restartOnRequestAfter:500,target:"body",elements:{checkInterval:100,selectors:["body"]},eventLag:{minSamples:10,sampleCount:3,lagThreshold:3},ajax:{trackMethods:["GET"],trackWebSockets:!0,ignoreURLs:[]}},C=function(){var a;return null!=(a="undefined"!=typeof performance&&null!==performance&&"function"==typeof performance.now?performance.now():void 0)?a:+new Date},E=window.requestAnimationFrame||window.mozRequestAnimationFrame||window.webkitRequestAnimationFrame||window.msRequestAnimationFrame,t=window.cancelAnimationFrame||window.mozCancelAnimationFrame,null==E&&(E=function(a){return setTimeout(a,50)},t=function(a){return clearTimeout(a)}),G=function(a){var b,c;return b=C(),(c=function(){var d;return d=C()-b,d>=33?(b=C(),a(d,function(){return E(c)})):setTimeout(c,33-d)})()},F=function(){var a,b,c;return c=arguments[0],b=arguments[1],a=3<=arguments.length?X.call(arguments,2):[],"function"==typeof c[b]?c[b].apply(c,a):c[b]},v=function(){var a,b,c,d,e,f,g;for(b=arguments[0],d=2<=arguments.length?X.call(arguments,1):[],f=0,g=d.length;g>f;f++)if(c=d[f])for(a in c)Y.call(c,a)&&(e=c[a],null!=b[a]&&"object"==typeof b[a]&&null!=e&&"object"==typeof e?v(b[a],e):b[a]=e);return b},q=function(a){var b,c,d,e,f;for(c=b=0,e=0,f=a.length;f>e;e++)d=a[e],c+=Math.abs(d),b++;return c/b},x=function(a,b){var c,d,e;if(null==a&&(a="options"),null==b&&(b=!0),e=document.querySelector("[data-pace-"+a+"]")){if(c=e.getAttribute("data-pace-"+a),!b)return c;try{return JSON.parse(c)}catch(f){return d=f,"undefined"!=typeof console&&null!==console?console.error("Error parsing inline pace options",d):void 0}}},g=function(){function a(){}return a.prototype.on=function(a,b,c,d){var e;return null==d&&(d=!1),null==this.bindings&&(this.bindings={}),null==(e=this.bindings)[a]&&(e[a]=[]),this.bindings[a].push({handler:b,ctx:c,once:d})},a.prototype.once=function(a,b,c){return this.on(a,b,c,!0)},a.prototype.off=function(a,b){var c,d,e;if(null!=(null!=(d=this.bindings)?d[a]:void 0)){if(null==b)return delete this.bindings[a];for(c=0,e=[];c<this.bindings[a].length;)e.push(this.bindings[a][c].handler===b?this.bindings[a].splice(c,1):c++);return e}},a.prototype.trigger=function(){var a,b,c,d,e,f,g,h,i;if(c=arguments[0],a=2<=arguments.length?X.call(arguments,1):[],null!=(g=this.bindings)?g[c]:void 0){for(e=0,i=[];e<this.bindings[c].length;)h=this.bindings[c][e],d=h.handler,b=h.ctx,f=h.once,d.apply(null!=b?b:this,a),i.push(f?this.bindings[c].splice(e,1):e++);return i}},a}(),j=window.Pace||{},window.Pace=j,v(j,g.prototype),D=j.options=v({},u,window.paceOptions,x()),U=["ajax","document","eventLag","elements"],Q=0,S=U.length;S>Q;Q++)K=U[Q],D[K]===!0&&(D[K]=u[K]);i=function(a){function b(){return V=b.__super__.constructor.apply(this,arguments)}return Z(b,a),b}(Error),b=function(){function a(){this.progress=0}return a.prototype.getElement=function(){var a;if(null==this.el){if(a=document.querySelector(D.target),!a)throw new i;this.el=document.createElement("div"),this.el.className="pace pace-active",document.body.className=document.body.className.replace(/pace-done/g,""),document.body.className+=" pace-running",this.el.innerHTML='<div class="pace-progress">\n <div class="pace-progress-inner"></div>\n</div>\n<div class="pace-activity"></div>',null!=a.firstChild?a.insertBefore(this.el,a.firstChild):a.appendChild(this.el)}return this.el},a.prototype.finish=function(){var a;return a=this.getElement(),a.className=a.className.replace("pace-active",""),a.className+=" pace-inactive",document.body.className=document.body.className.replace("pace-running",""),document.body.className+=" pace-done"},a.prototype.update=function(a){return this.progress=a,this.render()},a.prototype.destroy=function(){try{this.getElement().parentNode.removeChild(this.getElement())}catch(a){i=a}return this.el=void 0},a.prototype.render=function(){var a,b,c,d,e,f,g;if(null==document.querySelector(D.target))return!1;for(a=this.getElement(),d="translate3d("+this.progress+"%, 0, 0)",g=["webkitTransform","msTransform","transform"],e=0,f=g.length;f>e;e++)b=g[e],a.children[0].style[b]=d;return(!this.lastRenderedProgress||this.lastRenderedProgress|0!==this.progress|0)&&(a.children[0].setAttribute("data-progress-text",""+(0|this.progress)+"%"),this.progress>=100?c="99":(c=this.progress<10?"0":"",c+=0|this.progress),a.children[0].setAttribute("data-progress",""+c)),this.lastRenderedProgress=this.progress},a.prototype.done=function(){return this.progress>=100},a}(),h=function(){function a(){this.bindings={}}return a.prototype.trigger=function(a,b){var c,d,e,f,g;if(null!=this.bindings[a]){for(f=this.bindings[a],g=[],d=0,e=f.length;e>d;d++)c=f[d],g.push(c.call(this,b));return g}},a.prototype.on=function(a,b){var c;return null==(c=this.bindings)[a]&&(c[a]=[]),this.bindings[a].push(b)},a}(),P=window.XMLHttpRequest,O=window.XDomainRequest,N=window.WebSocket,w=function(a,b){var c,d,e;e=[];for(d in b.prototype)try{e.push(null==a[d]&&"function"!=typeof b[d]?"function"==typeof Object.defineProperty?Object.defineProperty(a,d,{get:function(){return b.prototype[d]},configurable:!0,enumerable:!0}):a[d]=b.prototype[d]:void 0)}catch(f){c=f}return e},A=[],j.ignore=function(){var a,b,c;return b=arguments[0],a=2<=arguments.length?X.call(arguments,1):[],A.unshift("ignore"),c=b.apply(null,a),A.shift(),c},j.track=function(){var a,b,c;return b=arguments[0],a=2<=arguments.length?X.call(arguments,1):[],A.unshift("track"),c=b.apply(null,a),A.shift(),c},J=function(a){var b;if(null==a&&(a="GET"),"track"===A[0])return"force";if(!A.length&&D.ajax){if("socket"===a&&D.ajax.trackWebSockets)return!0;if(b=a.toUpperCase(),$.call(D.ajax.trackMethods,b)>=0)return!0}return!1},k=function(a){function b(){var a,c=this;b.__super__.constructor.apply(this,arguments),a=function(a){var b;return b=a.open,a.open=function(d,e){return J(d)&&c.trigger("request",{type:d,url:e,request:a}),b.apply(a,arguments)}},window.XMLHttpRequest=function(b){var c;return c=new P(b),a(c),c};try{w(window.XMLHttpRequest,P)}catch(d){}if(null!=O){window.XDomainRequest=function(){var b;return b=new O,a(b),b};try{w(window.XDomainRequest,O)}catch(d){}}if(null!=N&&D.ajax.trackWebSockets){window.WebSocket=function(a,b){var d;return d=null!=b?new N(a,b):new N(a),J("socket")&&c.trigger("request",{type:"socket",url:a,protocols:b,request:d}),d};try{w(window.WebSocket,N)}catch(d){}}}return Z(b,a),b}(h),R=null,y=function(){return null==R&&(R=new k),R},I=function(a){var b,c,d,e;for(e=D.ajax.ignoreURLs,c=0,d=e.length;d>c;c++)if(b=e[c],"string"==typeof b){if(-1!==a.indexOf(b))return!0}else if(b.test(a))return!0;return!1},y().on("request",function(b){var c,d,e,f,g;return f=b.type,e=b.request,g=b.url,I(g)?void 0:j.running||D.restartOnRequestAfter===!1&&"force"!==J(f)?void 0:(d=arguments,c=D.restartOnRequestAfter||0,"boolean"==typeof c&&(c=0),setTimeout(function(){var b,c,g,h,i,k;if(b="socket"===f?e.readyState<2:0<(h=e.readyState)&&4>h){for(j.restart(),i=j.sources,k=[],c=0,g=i.length;g>c;c++){if(K=i[c],K instanceof a){K.watch.apply(K,d);break}k.push(void 0)}return k}},c))}),a=function(){function a(){var a=this;this.elements=[],y().on("request",function(){return a.watch.apply(a,arguments)})}return a.prototype.watch=function(a){var b,c,d,e;return d=a.type,b=a.request,e=a.url,I(e)?void 0:(c="socket"===d?new n(b):new o(b),this.elements.push(c))},a}(),o=function(){function a(a){var b,c,d,e,f,g,h=this;if(this.progress=0,null!=window.ProgressEvent)for(c=null,a.addEventListener("progress",function(a){return h.progress=a.lengthComputable?100*a.loaded/a.total:h.progress+(100-h.progress)/2},!1),g=["load","abort","timeout","error"],d=0,e=g.length;e>d;d++)b=g[d],a.addEventListener(b,function(){return h.progress=100},!1);else f=a.onreadystatechange,a.onreadystatechange=function(){var b;return 0===(b=a.readyState)||4===b?h.progress=100:3===a.readyState&&(h.progress=50),"function"==typeof f?f.apply(null,arguments):void 0}}return a}(),n=function(){function a(a){var b,c,d,e,f=this;for(this.progress=0,e=["error","open"],c=0,d=e.length;d>c;c++)b=e[c],a.addEventListener(b,function(){return f.progress=100},!1)}return a}(),d=function(){function a(a){var b,c,d,f;for(null==a&&(a={}),this.elements=[],null==a.selectors&&(a.selectors=[]),f=a.selectors,c=0,d=f.length;d>c;c++)b=f[c],this.elements.push(new e(b))}return a}(),e=function(){function a(a){this.selector=a,this.progress=0,this.check()}return a.prototype.check=function(){var a=this;return document.querySelector(this.selector)?this.done():setTimeout(function(){return a.check()},D.elements.checkInterval)},a.prototype.done=function(){return this.progress=100},a}(),c=function(){function a(){var a,b,c=this;this.progress=null!=(b=this.states[document.readyState])?b:100,a=document.onreadystatechange,document.onreadystatechange=function(){return null!=c.states[document.readyState]&&(c.progress=c.states[document.readyState]),"function"==typeof a?a.apply(null,arguments):void 0}}return a.prototype.states={loading:0,interactive:50,complete:100},a}(),f=function(){function a(){var a,b,c,d,e,f=this;this.progress=0,a=0,e=[],d=0,c=C(),b=setInterval(function(){var g;return g=C()-c-50,c=C(),e.push(g),e.length>D.eventLag.sampleCount&&e.shift(),a=q(e),++d>=D.eventLag.minSamples&&a<D.eventLag.lagThreshold?(f.progress=100,clearInterval(b)):f.progress=100*(3/(a+3))},50)}return a}(),m=function(){function a(a){this.source=a,this.last=this.sinceLastUpdate=0,this.rate=D.initialRate,this.catchup=0,this.progress=this.lastProgress=0,null!=this.source&&(this.progress=F(this.source,"progress"))}return a.prototype.tick=function(a,b){var c;return null==b&&(b=F(this.source,"progress")),b>=100&&(this.done=!0),b===this.last?this.sinceLastUpdate+=a:(this.sinceLastUpdate&&(this.rate=(b-this.last)/this.sinceLastUpdate),this.catchup=(b-this.progress)/D.catchupTime,this.sinceLastUpdate=0,this.last=b),b>this.progress&&(this.progress+=this.catchup*a),c=1-Math.pow(this.progress/100,D.easeFactor),this.progress+=c*this.rate*a,this.progress=Math.min(this.lastProgress+D.maxProgressPerFrame,this.progress),this.progress=Math.max(0,this.progress),this.progress=Math.min(100,this.progress),this.lastProgress=this.progress,this.progress},a}(),L=null,H=null,r=null,M=null,p=null,s=null,j.running=!1,z=function(){return D.restartOnPushState?j.restart():void 0},null!=window.history.pushState&&(T=window.history.pushState,window.history.pushState=function(){return z(),T.apply(window.history,arguments)}),null!=window.history.replaceState&&(W=window.history.replaceState,window.history.replaceState=function(){return z(),W.apply(window.history,arguments)}),l={ajax:a,elements:d,document:c,eventLag:f},(B=function(){var a,c,d,e,f,g,h,i;for(j.sources=L=[],g=["ajax","elements","document","eventLag"],c=0,e=g.length;e>c;c++)a=g[c],D[a]!==!1&&L.push(new l[a](D[a]));for(i=null!=(h=D.extraSources)?h:[],d=0,f=i.length;f>d;d++)K=i[d],L.push(new K(D));return j.bar=r=new b,H=[],M=new m})(),j.stop=function(){return j.trigger("stop"),j.running=!1,r.destroy(),s=!0,null!=p&&("function"==typeof t&&t(p),p=null),B()},j.restart=function(){return j.trigger("restart"),j.stop(),j.start()},j.go=function(){var a;return j.running=!0,r.render(),a=C(),s=!1,p=G(function(b,c){var d,e,f,g,h,i,k,l,n,o,p,q,t,u,v,w;for(l=100-r.progress,e=p=0,f=!0,i=q=0,u=L.length;u>q;i=++q)for(K=L[i],o=null!=H[i]?H[i]:H[i]=[],h=null!=(w=K.elements)?w:[K],k=t=0,v=h.length;v>t;k=++t)g=h[k],n=null!=o[k]?o[k]:o[k]=new m(g),f&=n.done,n.done||(e++,p+=n.tick(b));return d=p/e,r.update(M.tick(b,d)),r.done()||f||s?(r.update(100),j.trigger("done"),setTimeout(function(){return r.finish(),j.running=!1,j.trigger("hide")},Math.max(D.ghostTime,Math.max(D.minTime-(C()-a),0)))):c()})},j.start=function(a){v(D,a),j.running=!0;try{r.render()}catch(b){i=b}return document.querySelector(".pace")?(j.trigger("start"),j.go()):setTimeout(j.start,50)},"function"==typeof define&&define.amd?define(["pace"],function(){return j}):"object"==typeof exports?module.exports=j:D.startOnPageLoad&&j.start()}).call(this);
|
dataloaders/pymo/mocapplayer/libs/papaparse.min.js
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*!
|
2 |
+
Papa Parse
|
3 |
+
v4.1.2
|
4 |
+
https://github.com/mholt/PapaParse
|
5 |
+
*/
|
6 |
+
!function(e){"use strict";function t(t,r){if(r=r||{},r.worker&&S.WORKERS_SUPPORTED){var n=f();return n.userStep=r.step,n.userChunk=r.chunk,n.userComplete=r.complete,n.userError=r.error,r.step=m(r.step),r.chunk=m(r.chunk),r.complete=m(r.complete),r.error=m(r.error),delete r.worker,void n.postMessage({input:t,config:r,workerId:n.id})}var o=null;return"string"==typeof t?o=r.download?new i(r):new a(r):(e.File&&t instanceof File||t instanceof Object)&&(o=new s(r)),o.stream(t)}function r(e,t){function r(){"object"==typeof t&&("string"==typeof t.delimiter&&1==t.delimiter.length&&-1==S.BAD_DELIMITERS.indexOf(t.delimiter)&&(u=t.delimiter),("boolean"==typeof t.quotes||t.quotes instanceof Array)&&(o=t.quotes),"string"==typeof t.newline&&(h=t.newline))}function n(e){if("object"!=typeof e)return[];var t=[];for(var r in e)t.push(r);return t}function i(e,t){var r="";"string"==typeof e&&(e=JSON.parse(e)),"string"==typeof t&&(t=JSON.parse(t));var n=e instanceof Array&&e.length>0,i=!(t[0]instanceof Array);if(n){for(var a=0;a<e.length;a++)a>0&&(r+=u),r+=s(e[a],a);t.length>0&&(r+=h)}for(var o=0;o<t.length;o++){for(var f=n?e.length:t[o].length,c=0;f>c;c++){c>0&&(r+=u);var d=n&&i?e[c]:c;r+=s(t[o][d],c)}o<t.length-1&&(r+=h)}return r}function s(e,t){if("undefined"==typeof e||null===e)return"";e=e.toString().replace(/"/g,'""');var r="boolean"==typeof o&&o||o instanceof Array&&o[t]||a(e,S.BAD_DELIMITERS)||e.indexOf(u)>-1||" "==e.charAt(0)||" "==e.charAt(e.length-1);return r?'"'+e+'"':e}function a(e,t){for(var r=0;r<t.length;r++)if(e.indexOf(t[r])>-1)return!0;return!1}var o=!1,u=",",h="\r\n";if(r(),"string"==typeof e&&(e=JSON.parse(e)),e instanceof Array){if(!e.length||e[0]instanceof Array)return i(null,e);if("object"==typeof e[0])return i(n(e[0]),e)}else if("object"==typeof e)return"string"==typeof e.data&&(e.data=JSON.parse(e.data)),e.data instanceof Array&&(e.fields||(e.fields=e.data[0]instanceof Array?e.fields:n(e.data[0])),e.data[0]instanceof Array||"object"==typeof e.data[0]||(e.data=[e.data])),i(e.fields||[],e.data||[]);throw"exception: Unable to serialize unrecognized input"}function n(t){function r(e){var t=_(e);t.chunkSize=parseInt(t.chunkSize),e.step||e.chunk||(t.chunkSize=null),this._handle=new o(t),this._handle.streamer=this,this._config=t}this._handle=null,this._paused=!1,this._finished=!1,this._input=null,this._baseIndex=0,this._partialLine="",this._rowCount=0,this._start=0,this._nextChunk=null,this.isFirstChunk=!0,this._completeResults={data:[],errors:[],meta:{}},r.call(this,t),this.parseChunk=function(t){if(this.isFirstChunk&&m(this._config.beforeFirstChunk)){var r=this._config.beforeFirstChunk(t);void 0!==r&&(t=r)}this.isFirstChunk=!1;var n=this._partialLine+t;this._partialLine="";var i=this._handle.parse(n,this._baseIndex,!this._finished);if(!this._handle.paused()&&!this._handle.aborted()){var s=i.meta.cursor;this._finished||(this._partialLine=n.substring(s-this._baseIndex),this._baseIndex=s),i&&i.data&&(this._rowCount+=i.data.length);var a=this._finished||this._config.preview&&this._rowCount>=this._config.preview;if(y)e.postMessage({results:i,workerId:S.WORKER_ID,finished:a});else if(m(this._config.chunk)){if(this._config.chunk(i,this._handle),this._paused)return;i=void 0,this._completeResults=void 0}return this._config.step||this._config.chunk||(this._completeResults.data=this._completeResults.data.concat(i.data),this._completeResults.errors=this._completeResults.errors.concat(i.errors),this._completeResults.meta=i.meta),!a||!m(this._config.complete)||i&&i.meta.aborted||this._config.complete(this._completeResults),a||i&&i.meta.paused||this._nextChunk(),i}},this._sendError=function(t){m(this._config.error)?this._config.error(t):y&&this._config.error&&e.postMessage({workerId:S.WORKER_ID,error:t,finished:!1})}}function i(e){function t(e){var t=e.getResponseHeader("Content-Range");return parseInt(t.substr(t.lastIndexOf("/")+1))}e=e||{},e.chunkSize||(e.chunkSize=S.RemoteChunkSize),n.call(this,e);var r;this._nextChunk=k?function(){this._readChunk(),this._chunkLoaded()}:function(){this._readChunk()},this.stream=function(e){this._input=e,this._nextChunk()},this._readChunk=function(){if(this._finished)return void this._chunkLoaded();if(r=new XMLHttpRequest,k||(r.onload=g(this._chunkLoaded,this),r.onerror=g(this._chunkError,this)),r.open("GET",this._input,!k),this._config.chunkSize){var e=this._start+this._config.chunkSize-1;r.setRequestHeader("Range","bytes="+this._start+"-"+e),r.setRequestHeader("If-None-Match","webkit-no-cache")}try{r.send()}catch(t){this._chunkError(t.message)}k&&0==r.status?this._chunkError():this._start+=this._config.chunkSize},this._chunkLoaded=function(){if(4==r.readyState){if(r.status<200||r.status>=400)return void this._chunkError();this._finished=!this._config.chunkSize||this._start>t(r),this.parseChunk(r.responseText)}},this._chunkError=function(e){var t=r.statusText||e;this._sendError(t)}}function s(e){e=e||{},e.chunkSize||(e.chunkSize=S.LocalChunkSize),n.call(this,e);var t,r,i="undefined"!=typeof FileReader;this.stream=function(e){this._input=e,r=e.slice||e.webkitSlice||e.mozSlice,i?(t=new FileReader,t.onload=g(this._chunkLoaded,this),t.onerror=g(this._chunkError,this)):t=new FileReaderSync,this._nextChunk()},this._nextChunk=function(){this._finished||this._config.preview&&!(this._rowCount<this._config.preview)||this._readChunk()},this._readChunk=function(){var e=this._input;if(this._config.chunkSize){var n=Math.min(this._start+this._config.chunkSize,this._input.size);e=r.call(e,this._start,n)}var s=t.readAsText(e,this._config.encoding);i||this._chunkLoaded({target:{result:s}})},this._chunkLoaded=function(e){this._start+=this._config.chunkSize,this._finished=!this._config.chunkSize||this._start>=this._input.size,this.parseChunk(e.target.result)},this._chunkError=function(){this._sendError(t.error)}}function a(e){e=e||{},n.call(this,e);var t,r;this.stream=function(e){return t=e,r=e,this._nextChunk()},this._nextChunk=function(){if(!this._finished){var e=this._config.chunkSize,t=e?r.substr(0,e):r;return r=e?r.substr(e):"",this._finished=!r,this.parseChunk(t)}}}function o(e){function t(){if(b&&d&&(h("Delimiter","UndetectableDelimiter","Unable to auto-detect delimiting character; defaulted to '"+S.DefaultDelimiter+"'"),d=!1),e.skipEmptyLines)for(var t=0;t<b.data.length;t++)1==b.data[t].length&&""==b.data[t][0]&&b.data.splice(t--,1);return r()&&n(),i()}function r(){return e.header&&0==y.length}function n(){if(b){for(var e=0;r()&&e<b.data.length;e++)for(var t=0;t<b.data[e].length;t++)y.push(b.data[e][t]);b.data.splice(0,1)}}function i(){if(!b||!e.header&&!e.dynamicTyping)return b;for(var t=0;t<b.data.length;t++){for(var r={},n=0;n<b.data[t].length;n++){if(e.dynamicTyping){var i=b.data[t][n];b.data[t][n]="true"==i||"TRUE"==i?!0:"false"==i||"FALSE"==i?!1:o(i)}e.header&&(n>=y.length?(r.__parsed_extra||(r.__parsed_extra=[]),r.__parsed_extra.push(b.data[t][n])):r[y[n]]=b.data[t][n])}e.header&&(b.data[t]=r,n>y.length?h("FieldMismatch","TooManyFields","Too many fields: expected "+y.length+" fields but parsed "+n,t):n<y.length&&h("FieldMismatch","TooFewFields","Too few fields: expected "+y.length+" fields but parsed "+n,t))}return e.header&&b.meta&&(b.meta.fields=y),b}function s(t){for(var r,n,i,s=[","," ","|",";",S.RECORD_SEP,S.UNIT_SEP],a=0;a<s.length;a++){var o=s[a],h=0,f=0;i=void 0;for(var c=new u({delimiter:o,preview:10}).parse(t),d=0;d<c.data.length;d++){var l=c.data[d].length;f+=l,"undefined"!=typeof i?l>1&&(h+=Math.abs(l-i),i=l):i=l}c.data.length>0&&(f/=c.data.length),("undefined"==typeof n||n>h)&&f>1.99&&(n=h,r=o)}return e.delimiter=r,{successful:!!r,bestDelimiter:r}}function a(e){e=e.substr(0,1048576);var t=e.split("\r");if(1==t.length)return"\n";for(var r=0,n=0;n<t.length;n++)"\n"==t[n][0]&&r++;return r>=t.length/2?"\r\n":"\r"}function o(e){var t=l.test(e);return t?parseFloat(e):e}function h(e,t,r,n){b.errors.push({type:e,code:t,message:r,row:n})}var f,c,d,l=/^\s*-?(\d*\.?\d+|\d+\.?\d*)(e[-+]?\d+)?\s*$/i,p=this,g=0,v=!1,k=!1,y=[],b={data:[],errors:[],meta:{}};if(m(e.step)){var R=e.step;e.step=function(n){if(b=n,r())t();else{if(t(),0==b.data.length)return;g+=n.data.length,e.preview&&g>e.preview?c.abort():R(b,p)}}}this.parse=function(r,n,i){if(e.newline||(e.newline=a(r)),d=!1,!e.delimiter){var o=s(r);o.successful?e.delimiter=o.bestDelimiter:(d=!0,e.delimiter=S.DefaultDelimiter),b.meta.delimiter=e.delimiter}var h=_(e);return e.preview&&e.header&&h.preview++,f=r,c=new u(h),b=c.parse(f,n,i),t(),v?{meta:{paused:!0}}:b||{meta:{paused:!1}}},this.paused=function(){return v},this.pause=function(){v=!0,c.abort(),f=f.substr(c.getCharIndex())},this.resume=function(){v=!1,p.streamer.parseChunk(f)},this.aborted=function(){return k},this.abort=function(){k=!0,c.abort(),b.meta.aborted=!0,m(e.complete)&&e.complete(b),f=""}}function u(e){e=e||{};var t=e.delimiter,r=e.newline,n=e.comments,i=e.step,s=e.preview,a=e.fastMode;if(("string"!=typeof t||S.BAD_DELIMITERS.indexOf(t)>-1)&&(t=","),n===t)throw"Comment character same as delimiter";n===!0?n="#":("string"!=typeof n||S.BAD_DELIMITERS.indexOf(n)>-1)&&(n=!1),"\n"!=r&&"\r"!=r&&"\r\n"!=r&&(r="\n");var o=0,u=!1;this.parse=function(e,h,f){function c(e){b.push(e),S=o}function d(t){return f?p():("undefined"==typeof t&&(t=e.substr(o)),w.push(t),o=g,c(w),y&&_(),p())}function l(t){o=t,c(w),w=[],O=e.indexOf(r,o)}function p(e){return{data:b,errors:R,meta:{delimiter:t,linebreak:r,aborted:u,truncated:!!e,cursor:S+(h||0)}}}function _(){i(p()),b=[],R=[]}if("string"!=typeof e)throw"Input must be a string";var g=e.length,m=t.length,v=r.length,k=n.length,y="function"==typeof i;o=0;var b=[],R=[],w=[],S=0;if(!e)return p();if(a||a!==!1&&-1===e.indexOf('"')){for(var C=e.split(r),E=0;E<C.length;E++){var w=C[E];if(o+=w.length,E!==C.length-1)o+=r.length;else if(f)return p();if(!n||w.substr(0,k)!=n){if(y){if(b=[],c(w.split(t)),_(),u)return p()}else c(w.split(t));if(s&&E>=s)return b=b.slice(0,s),p(!0)}}return p()}for(var x=e.indexOf(t,o),O=e.indexOf(r,o);;)if('"'!=e[o])if(n&&0===w.length&&e.substr(o,k)===n){if(-1==O)return p();o=O+v,O=e.indexOf(r,o),x=e.indexOf(t,o)}else if(-1!==x&&(O>x||-1===O))w.push(e.substring(o,x)),o=x+m,x=e.indexOf(t,o);else{if(-1===O)break;if(w.push(e.substring(o,O)),l(O+v),y&&(_(),u))return p();if(s&&b.length>=s)return p(!0)}else{var I=o;for(o++;;){var I=e.indexOf('"',I+1);if(-1===I)return f||R.push({type:"Quotes",code:"MissingQuotes",message:"Quoted field unterminated",row:b.length,index:o}),d();if(I===g-1){var D=e.substring(o,I).replace(/""/g,'"');return d(D)}if('"'!=e[I+1]){if(e[I+1]==t){w.push(e.substring(o,I).replace(/""/g,'"')),o=I+1+m,x=e.indexOf(t,o),O=e.indexOf(r,o);break}if(e.substr(I+1,v)===r){if(w.push(e.substring(o,I).replace(/""/g,'"')),l(I+1+v),x=e.indexOf(t,o),y&&(_(),u))return p();if(s&&b.length>=s)return p(!0);break}}else I++}}return d()},this.abort=function(){u=!0},this.getCharIndex=function(){return o}}function h(){var e=document.getElementsByTagName("script");return e.length?e[e.length-1].src:""}function f(){if(!S.WORKERS_SUPPORTED)return!1;if(!b&&null===S.SCRIPT_PATH)throw new Error("Script path cannot be determined automatically when Papa Parse is loaded asynchronously. You need to set Papa.SCRIPT_PATH manually.");var t=S.SCRIPT_PATH||v;t+=(-1!==t.indexOf("?")?"&":"?")+"papaworker";var r=new e.Worker(t);return r.onmessage=c,r.id=w++,R[r.id]=r,r}function c(e){var t=e.data,r=R[t.workerId],n=!1;if(t.error)r.userError(t.error,t.file);else if(t.results&&t.results.data){var i=function(){n=!0,d(t.workerId,{data:[],errors:[],meta:{aborted:!0}})},s={abort:i,pause:l,resume:l};if(m(r.userStep)){for(var a=0;a<t.results.data.length&&(r.userStep({data:[t.results.data[a]],errors:t.results.errors,meta:t.results.meta},s),!n);a++);delete t.results}else m(r.userChunk)&&(r.userChunk(t.results,s,t.file),delete t.results)}t.finished&&!n&&d(t.workerId,t.results)}function d(e,t){var r=R[e];m(r.userComplete)&&r.userComplete(t),r.terminate(),delete R[e]}function l(){throw"Not implemented."}function p(t){var r=t.data;if("undefined"==typeof S.WORKER_ID&&r&&(S.WORKER_ID=r.workerId),"string"==typeof r.input)e.postMessage({workerId:S.WORKER_ID,results:S.parse(r.input,r.config),finished:!0});else if(e.File&&r.input instanceof File||r.input instanceof Object){var n=S.parse(r.input,r.config);n&&e.postMessage({workerId:S.WORKER_ID,results:n,finished:!0})}}function _(e){if("object"!=typeof e)return e;var t=e instanceof Array?[]:{};for(var r in e)t[r]=_(e[r]);return t}function g(e,t){return function(){e.apply(t,arguments)}}function m(e){return"function"==typeof e}var v,k=!e.document&&!!e.postMessage,y=k&&/(\?|&)papaworker(=|&|$)/.test(e.location.search),b=!1,R={},w=0,S={};if(S.parse=t,S.unparse=r,S.RECORD_SEP=String.fromCharCode(30),S.UNIT_SEP=String.fromCharCode(31),S.BYTE_ORDER_MARK="",S.BAD_DELIMITERS=["\r","\n",'"',S.BYTE_ORDER_MARK],S.WORKERS_SUPPORTED=!k&&!!e.Worker,S.SCRIPT_PATH=null,S.LocalChunkSize=10485760,S.RemoteChunkSize=5242880,S.DefaultDelimiter=",",S.Parser=u,S.ParserHandle=o,S.NetworkStreamer=i,S.FileStreamer=s,S.StringStreamer=a,"undefined"!=typeof module&&module.exports?module.exports=S:m(e.define)&&e.define.amd?define(function(){return S}):e.Papa=S,e.jQuery){var C=e.jQuery;C.fn.parse=function(t){function r(){if(0==a.length)return void(m(t.complete)&&t.complete());var e=a[0];if(m(t.before)){var r=t.before(e.file,e.inputElem);if("object"==typeof r){if("abort"==r.action)return void n("AbortError",e.file,e.inputElem,r.reason);if("skip"==r.action)return void i();"object"==typeof r.config&&(e.instanceConfig=C.extend(e.instanceConfig,r.config))}else if("skip"==r)return void i()}var s=e.instanceConfig.complete;e.instanceConfig.complete=function(t){m(s)&&s(t,e.file,e.inputElem),i()},S.parse(e.file,e.instanceConfig)}function n(e,r,n,i){m(t.error)&&t.error({name:e},r,n,i)}function i(){a.splice(0,1),r()}var s=t.config||{},a=[];return this.each(function(){var t="INPUT"==C(this).prop("tagName").toUpperCase()&&"file"==C(this).attr("type").toLowerCase()&&e.FileReader;if(!t||!this.files||0==this.files.length)return!0;for(var r=0;r<this.files.length;r++)a.push({file:this.files[r],inputElem:this,instanceConfig:C.extend({},s)})}),r(),this}}y?e.onmessage=p:S.WORKERS_SUPPORTED&&(v=h(),document.body?document.addEventListener("DOMContentLoaded",function(){b=!0},!0):b=!0),i.prototype=Object.create(n.prototype),i.prototype.constructor=i,s.prototype=Object.create(n.prototype),s.prototype.constructor=s,a.prototype=Object.create(a.prototype),a.prototype.constructor=a}("undefined"!=typeof window?window:this);
|
dataloaders/pymo/mocapplayer/libs/threejs/Detector.js
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/**
|
2 |
+
* @author alteredq / http://alteredqualia.com/
|
3 |
+
* @author mr.doob / http://mrdoob.com/
|
4 |
+
*/
|
5 |
+
|
6 |
+
var Detector = {
|
7 |
+
|
8 |
+
canvas: !! window.CanvasRenderingContext2D,
|
9 |
+
webgl: ( function () {
|
10 |
+
|
11 |
+
try {
|
12 |
+
|
13 |
+
var canvas = document.createElement( 'canvas' ); return !! ( window.WebGLRenderingContext && ( canvas.getContext( 'webgl' ) || canvas.getContext( 'experimental-webgl' ) ) );
|
14 |
+
|
15 |
+
} catch ( e ) {
|
16 |
+
|
17 |
+
return false;
|
18 |
+
|
19 |
+
}
|
20 |
+
|
21 |
+
} )(),
|
22 |
+
workers: !! window.Worker,
|
23 |
+
fileapi: window.File && window.FileReader && window.FileList && window.Blob,
|
24 |
+
|
25 |
+
getWebGLErrorMessage: function () {
|
26 |
+
|
27 |
+
var element = document.createElement( 'div' );
|
28 |
+
element.id = 'webgl-error-message';
|
29 |
+
element.style.fontFamily = 'monospace';
|
30 |
+
element.style.fontSize = '13px';
|
31 |
+
element.style.fontWeight = 'normal';
|
32 |
+
element.style.textAlign = 'center';
|
33 |
+
element.style.background = '#fff';
|
34 |
+
element.style.color = '#000';
|
35 |
+
element.style.padding = '1.5em';
|
36 |
+
element.style.width = '400px';
|
37 |
+
element.style.margin = '5em auto 0';
|
38 |
+
|
39 |
+
if ( ! this.webgl ) {
|
40 |
+
|
41 |
+
element.innerHTML = window.WebGLRenderingContext ? [
|
42 |
+
'Your graphics card does not seem to support <a href="http://khronos.org/webgl/wiki/Getting_a_WebGL_Implementation" style="color:#000">WebGL</a>.<br />',
|
43 |
+
'Find out how to get it <a href="http://get.webgl.org/" style="color:#000">here</a>.'
|
44 |
+
].join( '\n' ) : [
|
45 |
+
'Your browser does not seem to support <a href="http://khronos.org/webgl/wiki/Getting_a_WebGL_Implementation" style="color:#000">WebGL</a>.<br/>',
|
46 |
+
'Find out how to get it <a href="http://get.webgl.org/" style="color:#000">here</a>.'
|
47 |
+
].join( '\n' );
|
48 |
+
|
49 |
+
}
|
50 |
+
|
51 |
+
return element;
|
52 |
+
|
53 |
+
},
|
54 |
+
|
55 |
+
addGetWebGLMessage: function ( parameters ) {
|
56 |
+
|
57 |
+
var parent, id, element;
|
58 |
+
|
59 |
+
parameters = parameters || {};
|
60 |
+
|
61 |
+
parent = parameters.parent !== undefined ? parameters.parent : document.body;
|
62 |
+
id = parameters.id !== undefined ? parameters.id : 'oldie';
|
63 |
+
|
64 |
+
element = Detector.getWebGLErrorMessage();
|
65 |
+
element.id = id;
|
66 |
+
|
67 |
+
parent.appendChild( element );
|
68 |
+
|
69 |
+
}
|
70 |
+
|
71 |
+
};
|
72 |
+
|
73 |
+
// browserify support
|
74 |
+
if ( typeof module === 'object' ) {
|
75 |
+
|
76 |
+
module.exports = Detector;
|
77 |
+
|
78 |
+
}
|
dataloaders/pymo/mocapplayer/libs/threejs/OrbitControls.js
ADDED
@@ -0,0 +1,1037 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/**
|
2 |
+
* @author qiao / https://github.com/qiao
|
3 |
+
* @author mrdoob / http://mrdoob.com
|
4 |
+
* @author alteredq / http://alteredqualia.com/
|
5 |
+
* @author WestLangley / http://github.com/WestLangley
|
6 |
+
* @author erich666 / http://erichaines.com
|
7 |
+
*/
|
8 |
+
|
9 |
+
// This set of controls performs orbiting, dollying (zooming), and panning.
|
10 |
+
// Unlike TrackballControls, it maintains the "up" direction object.up (+Y by default).
|
11 |
+
//
|
12 |
+
// Orbit - left mouse / touch: one finger move
|
13 |
+
// Zoom - middle mouse, or mousewheel / touch: two finger spread or squish
|
14 |
+
// Pan - right mouse, or arrow keys / touch: three finter swipe
|
15 |
+
|
16 |
+
THREE.OrbitControls = function ( object, domElement ) {
|
17 |
+
|
18 |
+
this.object = object;
|
19 |
+
|
20 |
+
this.domElement = ( domElement !== undefined ) ? domElement : document;
|
21 |
+
|
22 |
+
// Set to false to disable this control
|
23 |
+
this.enabled = true;
|
24 |
+
|
25 |
+
// "target" sets the location of focus, where the object orbits around
|
26 |
+
this.target = new THREE.Vector3();
|
27 |
+
|
28 |
+
// How far you can dolly in and out ( PerspectiveCamera only )
|
29 |
+
this.minDistance = 0;
|
30 |
+
this.maxDistance = Infinity;
|
31 |
+
|
32 |
+
// How far you can zoom in and out ( OrthographicCamera only )
|
33 |
+
this.minZoom = 0;
|
34 |
+
this.maxZoom = Infinity;
|
35 |
+
|
36 |
+
// How far you can orbit vertically, upper and lower limits.
|
37 |
+
// Range is 0 to Math.PI radians.
|
38 |
+
this.minPolarAngle = 0; // radians
|
39 |
+
this.maxPolarAngle = Math.PI; // radians
|
40 |
+
|
41 |
+
// How far you can orbit horizontally, upper and lower limits.
|
42 |
+
// If set, must be a sub-interval of the interval [ - Math.PI, Math.PI ].
|
43 |
+
this.minAzimuthAngle = - Infinity; // radians
|
44 |
+
this.maxAzimuthAngle = Infinity; // radians
|
45 |
+
|
46 |
+
// Set to true to enable damping (inertia)
|
47 |
+
// If damping is enabled, you must call controls.update() in your animation loop
|
48 |
+
this.enableDamping = false;
|
49 |
+
this.dampingFactor = 0.25;
|
50 |
+
|
51 |
+
// This option actually enables dollying in and out; left as "zoom" for backwards compatibility.
|
52 |
+
// Set to false to disable zooming
|
53 |
+
this.enableZoom = true;
|
54 |
+
this.zoomSpeed = 1.0;
|
55 |
+
|
56 |
+
// Set to false to disable rotating
|
57 |
+
this.enableRotate = true;
|
58 |
+
this.rotateSpeed = 1.0;
|
59 |
+
|
60 |
+
// Set to false to disable panning
|
61 |
+
this.enablePan = true;
|
62 |
+
this.keyPanSpeed = 7.0; // pixels moved per arrow key push
|
63 |
+
|
64 |
+
// Set to true to automatically rotate around the target
|
65 |
+
// If auto-rotate is enabled, you must call controls.update() in your animation loop
|
66 |
+
this.autoRotate = false;
|
67 |
+
this.autoRotateSpeed = 2.0; // 30 seconds per round when fps is 60
|
68 |
+
|
69 |
+
// Set to false to disable use of the keys
|
70 |
+
this.enableKeys = true;
|
71 |
+
|
72 |
+
// The four arrow keys
|
73 |
+
this.keys = { LEFT: 37, UP: 38, RIGHT: 39, BOTTOM: 40 };
|
74 |
+
|
75 |
+
// Mouse buttons
|
76 |
+
this.mouseButtons = { ORBIT: THREE.MOUSE.LEFT, ZOOM: THREE.MOUSE.MIDDLE, PAN: THREE.MOUSE.RIGHT };
|
77 |
+
|
78 |
+
// for reset
|
79 |
+
this.target0 = this.target.clone();
|
80 |
+
this.position0 = this.object.position.clone();
|
81 |
+
this.zoom0 = this.object.zoom;
|
82 |
+
|
83 |
+
//
|
84 |
+
// public methods
|
85 |
+
//
|
86 |
+
|
87 |
+
this.getPolarAngle = function () {
|
88 |
+
|
89 |
+
return spherical.phi;
|
90 |
+
|
91 |
+
};
|
92 |
+
|
93 |
+
this.getAzimuthalAngle = function () {
|
94 |
+
|
95 |
+
return spherical.theta;
|
96 |
+
|
97 |
+
};
|
98 |
+
|
99 |
+
this.reset = function () {
|
100 |
+
|
101 |
+
scope.target.copy( scope.target0 );
|
102 |
+
scope.object.position.copy( scope.position0 );
|
103 |
+
scope.object.zoom = scope.zoom0;
|
104 |
+
|
105 |
+
scope.object.updateProjectionMatrix();
|
106 |
+
scope.dispatchEvent( changeEvent );
|
107 |
+
|
108 |
+
scope.update();
|
109 |
+
|
110 |
+
state = STATE.NONE;
|
111 |
+
|
112 |
+
};
|
113 |
+
|
114 |
+
// this method is exposed, but perhaps it would be better if we can make it private...
|
115 |
+
this.update = function() {
|
116 |
+
|
117 |
+
var offset = new THREE.Vector3();
|
118 |
+
|
119 |
+
// so camera.up is the orbit axis
|
120 |
+
var quat = new THREE.Quaternion().setFromUnitVectors( object.up, new THREE.Vector3( 0, 1, 0 ) );
|
121 |
+
var quatInverse = quat.clone().inverse();
|
122 |
+
|
123 |
+
var lastPosition = new THREE.Vector3();
|
124 |
+
var lastQuaternion = new THREE.Quaternion();
|
125 |
+
|
126 |
+
return function () {
|
127 |
+
|
128 |
+
var position = scope.object.position;
|
129 |
+
|
130 |
+
offset.copy( position ).sub( scope.target );
|
131 |
+
|
132 |
+
// rotate offset to "y-axis-is-up" space
|
133 |
+
offset.applyQuaternion( quat );
|
134 |
+
|
135 |
+
// angle from z-axis around y-axis
|
136 |
+
spherical.setFromVector3( offset );
|
137 |
+
|
138 |
+
if ( scope.autoRotate && state === STATE.NONE ) {
|
139 |
+
|
140 |
+
rotateLeft( getAutoRotationAngle() );
|
141 |
+
|
142 |
+
}
|
143 |
+
|
144 |
+
spherical.theta += sphericalDelta.theta;
|
145 |
+
spherical.phi += sphericalDelta.phi;
|
146 |
+
|
147 |
+
// restrict theta to be between desired limits
|
148 |
+
spherical.theta = Math.max( scope.minAzimuthAngle, Math.min( scope.maxAzimuthAngle, spherical.theta ) );
|
149 |
+
|
150 |
+
// restrict phi to be between desired limits
|
151 |
+
spherical.phi = Math.max( scope.minPolarAngle, Math.min( scope.maxPolarAngle, spherical.phi ) );
|
152 |
+
|
153 |
+
spherical.makeSafe();
|
154 |
+
|
155 |
+
|
156 |
+
spherical.radius *= scale;
|
157 |
+
|
158 |
+
// restrict radius to be between desired limits
|
159 |
+
spherical.radius = Math.max( scope.minDistance, Math.min( scope.maxDistance, spherical.radius ) );
|
160 |
+
|
161 |
+
// move target to panned location
|
162 |
+
scope.target.add( panOffset );
|
163 |
+
|
164 |
+
offset.setFromSpherical( spherical );
|
165 |
+
|
166 |
+
// rotate offset back to "camera-up-vector-is-up" space
|
167 |
+
offset.applyQuaternion( quatInverse );
|
168 |
+
|
169 |
+
position.copy( scope.target ).add( offset );
|
170 |
+
|
171 |
+
scope.object.lookAt( scope.target );
|
172 |
+
|
173 |
+
if ( scope.enableDamping === true ) {
|
174 |
+
|
175 |
+
sphericalDelta.theta *= ( 1 - scope.dampingFactor );
|
176 |
+
sphericalDelta.phi *= ( 1 - scope.dampingFactor );
|
177 |
+
|
178 |
+
} else {
|
179 |
+
|
180 |
+
sphericalDelta.set( 0, 0, 0 );
|
181 |
+
|
182 |
+
}
|
183 |
+
|
184 |
+
scale = 1;
|
185 |
+
panOffset.set( 0, 0, 0 );
|
186 |
+
|
187 |
+
// update condition is:
|
188 |
+
// min(camera displacement, camera rotation in radians)^2 > EPS
|
189 |
+
// using small-angle approximation cos(x/2) = 1 - x^2 / 8
|
190 |
+
|
191 |
+
if ( zoomChanged ||
|
192 |
+
lastPosition.distanceToSquared( scope.object.position ) > EPS ||
|
193 |
+
8 * ( 1 - lastQuaternion.dot( scope.object.quaternion ) ) > EPS ) {
|
194 |
+
|
195 |
+
scope.dispatchEvent( changeEvent );
|
196 |
+
|
197 |
+
lastPosition.copy( scope.object.position );
|
198 |
+
lastQuaternion.copy( scope.object.quaternion );
|
199 |
+
zoomChanged = false;
|
200 |
+
|
201 |
+
return true;
|
202 |
+
|
203 |
+
}
|
204 |
+
|
205 |
+
return false;
|
206 |
+
|
207 |
+
};
|
208 |
+
|
209 |
+
}();
|
210 |
+
|
211 |
+
this.dispose = function() {
|
212 |
+
|
213 |
+
scope.domElement.removeEventListener( 'contextmenu', onContextMenu, false );
|
214 |
+
scope.domElement.removeEventListener( 'mousedown', onMouseDown, false );
|
215 |
+
scope.domElement.removeEventListener( 'mousewheel', onMouseWheel, false );
|
216 |
+
scope.domElement.removeEventListener( 'MozMousePixelScroll', onMouseWheel, false ); // firefox
|
217 |
+
|
218 |
+
scope.domElement.removeEventListener( 'touchstart', onTouchStart, false );
|
219 |
+
scope.domElement.removeEventListener( 'touchend', onTouchEnd, false );
|
220 |
+
scope.domElement.removeEventListener( 'touchmove', onTouchMove, false );
|
221 |
+
|
222 |
+
document.removeEventListener( 'mousemove', onMouseMove, false );
|
223 |
+
document.removeEventListener( 'mouseup', onMouseUp, false );
|
224 |
+
document.removeEventListener( 'mouseout', onMouseUp, false );
|
225 |
+
|
226 |
+
window.removeEventListener( 'keydown', onKeyDown, false );
|
227 |
+
|
228 |
+
//scope.dispatchEvent( { type: 'dispose' } ); // should this be added here?
|
229 |
+
|
230 |
+
};
|
231 |
+
|
232 |
+
//
|
233 |
+
// internals
|
234 |
+
//
|
235 |
+
|
236 |
+
var scope = this;
|
237 |
+
|
238 |
+
var changeEvent = { type: 'change' };
|
239 |
+
var startEvent = { type: 'start' };
|
240 |
+
var endEvent = { type: 'end' };
|
241 |
+
|
242 |
+
var STATE = { NONE : - 1, ROTATE : 0, DOLLY : 1, PAN : 2, TOUCH_ROTATE : 3, TOUCH_DOLLY : 4, TOUCH_PAN : 5 };
|
243 |
+
|
244 |
+
var state = STATE.NONE;
|
245 |
+
|
246 |
+
var EPS = 0.000001;
|
247 |
+
|
248 |
+
// current position in spherical coordinates
|
249 |
+
var spherical = new THREE.Spherical();
|
250 |
+
var sphericalDelta = new THREE.Spherical();
|
251 |
+
|
252 |
+
var scale = 1;
|
253 |
+
var panOffset = new THREE.Vector3();
|
254 |
+
var zoomChanged = false;
|
255 |
+
|
256 |
+
var rotateStart = new THREE.Vector2();
|
257 |
+
var rotateEnd = new THREE.Vector2();
|
258 |
+
var rotateDelta = new THREE.Vector2();
|
259 |
+
|
260 |
+
var panStart = new THREE.Vector2();
|
261 |
+
var panEnd = new THREE.Vector2();
|
262 |
+
var panDelta = new THREE.Vector2();
|
263 |
+
|
264 |
+
var dollyStart = new THREE.Vector2();
|
265 |
+
var dollyEnd = new THREE.Vector2();
|
266 |
+
var dollyDelta = new THREE.Vector2();
|
267 |
+
|
268 |
+
function getAutoRotationAngle() {
|
269 |
+
|
270 |
+
return 2 * Math.PI / 60 / 60 * scope.autoRotateSpeed;
|
271 |
+
|
272 |
+
}
|
273 |
+
|
274 |
+
function getZoomScale() {
|
275 |
+
|
276 |
+
return Math.pow( 0.95, scope.zoomSpeed );
|
277 |
+
|
278 |
+
}
|
279 |
+
|
280 |
+
function rotateLeft( angle ) {
|
281 |
+
|
282 |
+
sphericalDelta.theta -= angle;
|
283 |
+
|
284 |
+
}
|
285 |
+
|
286 |
+
function rotateUp( angle ) {
|
287 |
+
|
288 |
+
sphericalDelta.phi -= angle;
|
289 |
+
|
290 |
+
}
|
291 |
+
|
292 |
+
var panLeft = function() {
|
293 |
+
|
294 |
+
var v = new THREE.Vector3();
|
295 |
+
|
296 |
+
return function panLeft( distance, objectMatrix ) {
|
297 |
+
|
298 |
+
v.setFromMatrixColumn( objectMatrix, 0 ); // get X column of objectMatrix
|
299 |
+
v.multiplyScalar( - distance );
|
300 |
+
|
301 |
+
panOffset.add( v );
|
302 |
+
|
303 |
+
};
|
304 |
+
|
305 |
+
}();
|
306 |
+
|
307 |
+
var panUp = function() {
|
308 |
+
|
309 |
+
var v = new THREE.Vector3();
|
310 |
+
|
311 |
+
return function panUp( distance, objectMatrix ) {
|
312 |
+
|
313 |
+
v.setFromMatrixColumn( objectMatrix, 1 ); // get Y column of objectMatrix
|
314 |
+
v.multiplyScalar( distance );
|
315 |
+
|
316 |
+
panOffset.add( v );
|
317 |
+
|
318 |
+
};
|
319 |
+
|
320 |
+
}();
|
321 |
+
|
322 |
+
// deltaX and deltaY are in pixels; right and down are positive
|
323 |
+
var pan = function() {
|
324 |
+
|
325 |
+
var offset = new THREE.Vector3();
|
326 |
+
|
327 |
+
return function( deltaX, deltaY ) {
|
328 |
+
|
329 |
+
var element = scope.domElement === document ? scope.domElement.body : scope.domElement;
|
330 |
+
|
331 |
+
if ( scope.object instanceof THREE.PerspectiveCamera ) {
|
332 |
+
|
333 |
+
// perspective
|
334 |
+
var position = scope.object.position;
|
335 |
+
offset.copy( position ).sub( scope.target );
|
336 |
+
var targetDistance = offset.length();
|
337 |
+
|
338 |
+
// half of the fov is center to top of screen
|
339 |
+
targetDistance *= Math.tan( ( scope.object.fov / 2 ) * Math.PI / 180.0 );
|
340 |
+
|
341 |
+
// we actually don't use screenWidth, since perspective camera is fixed to screen height
|
342 |
+
panLeft( 2 * deltaX * targetDistance / element.clientHeight, scope.object.matrix );
|
343 |
+
panUp( 2 * deltaY * targetDistance / element.clientHeight, scope.object.matrix );
|
344 |
+
|
345 |
+
} else if ( scope.object instanceof THREE.OrthographicCamera ) {
|
346 |
+
|
347 |
+
// orthographic
|
348 |
+
panLeft( deltaX * ( scope.object.right - scope.object.left ) / scope.object.zoom / element.clientWidth, scope.object.matrix );
|
349 |
+
panUp( deltaY * ( scope.object.top - scope.object.bottom ) / scope.object.zoom / element.clientHeight, scope.object.matrix );
|
350 |
+
|
351 |
+
} else {
|
352 |
+
|
353 |
+
// camera neither orthographic nor perspective
|
354 |
+
console.warn( 'WARNING: OrbitControls.js encountered an unknown camera type - pan disabled.' );
|
355 |
+
scope.enablePan = false;
|
356 |
+
|
357 |
+
}
|
358 |
+
|
359 |
+
};
|
360 |
+
|
361 |
+
}();
|
362 |
+
|
363 |
+
function dollyIn( dollyScale ) {
|
364 |
+
|
365 |
+
if ( scope.object instanceof THREE.PerspectiveCamera ) {
|
366 |
+
|
367 |
+
scale /= dollyScale;
|
368 |
+
|
369 |
+
} else if ( scope.object instanceof THREE.OrthographicCamera ) {
|
370 |
+
|
371 |
+
scope.object.zoom = Math.max( scope.minZoom, Math.min( scope.maxZoom, scope.object.zoom * dollyScale ) );
|
372 |
+
scope.object.updateProjectionMatrix();
|
373 |
+
zoomChanged = true;
|
374 |
+
|
375 |
+
} else {
|
376 |
+
|
377 |
+
console.warn( 'WARNING: OrbitControls.js encountered an unknown camera type - dolly/zoom disabled.' );
|
378 |
+
scope.enableZoom = false;
|
379 |
+
|
380 |
+
}
|
381 |
+
|
382 |
+
}
|
383 |
+
|
384 |
+
function dollyOut( dollyScale ) {
|
385 |
+
|
386 |
+
if ( scope.object instanceof THREE.PerspectiveCamera ) {
|
387 |
+
|
388 |
+
scale *= dollyScale;
|
389 |
+
|
390 |
+
} else if ( scope.object instanceof THREE.OrthographicCamera ) {
|
391 |
+
|
392 |
+
scope.object.zoom = Math.max( scope.minZoom, Math.min( scope.maxZoom, scope.object.zoom / dollyScale ) );
|
393 |
+
scope.object.updateProjectionMatrix();
|
394 |
+
zoomChanged = true;
|
395 |
+
|
396 |
+
} else {
|
397 |
+
|
398 |
+
console.warn( 'WARNING: OrbitControls.js encountered an unknown camera type - dolly/zoom disabled.' );
|
399 |
+
scope.enableZoom = false;
|
400 |
+
|
401 |
+
}
|
402 |
+
|
403 |
+
}
|
404 |
+
|
405 |
+
//
|
406 |
+
// event callbacks - update the object state
|
407 |
+
//
|
408 |
+
|
409 |
+
function handleMouseDownRotate( event ) {
|
410 |
+
|
411 |
+
//console.log( 'handleMouseDownRotate' );
|
412 |
+
|
413 |
+
rotateStart.set( event.clientX, event.clientY );
|
414 |
+
|
415 |
+
}
|
416 |
+
|
417 |
+
function handleMouseDownDolly( event ) {
|
418 |
+
|
419 |
+
//console.log( 'handleMouseDownDolly' );
|
420 |
+
|
421 |
+
dollyStart.set( event.clientX, event.clientY );
|
422 |
+
|
423 |
+
}
|
424 |
+
|
425 |
+
function handleMouseDownPan( event ) {
|
426 |
+
|
427 |
+
//console.log( 'handleMouseDownPan' );
|
428 |
+
|
429 |
+
panStart.set( event.clientX, event.clientY );
|
430 |
+
|
431 |
+
}
|
432 |
+
|
433 |
+
function handleMouseMoveRotate( event ) {
|
434 |
+
|
435 |
+
//console.log( 'handleMouseMoveRotate' );
|
436 |
+
|
437 |
+
rotateEnd.set( event.clientX, event.clientY );
|
438 |
+
rotateDelta.subVectors( rotateEnd, rotateStart );
|
439 |
+
|
440 |
+
var element = scope.domElement === document ? scope.domElement.body : scope.domElement;
|
441 |
+
|
442 |
+
// rotating across whole screen goes 360 degrees around
|
443 |
+
rotateLeft( 2 * Math.PI * rotateDelta.x / element.clientWidth * scope.rotateSpeed );
|
444 |
+
|
445 |
+
// rotating up and down along whole screen attempts to go 360, but limited to 180
|
446 |
+
rotateUp( 2 * Math.PI * rotateDelta.y / element.clientHeight * scope.rotateSpeed );
|
447 |
+
|
448 |
+
rotateStart.copy( rotateEnd );
|
449 |
+
|
450 |
+
scope.update();
|
451 |
+
|
452 |
+
}
|
453 |
+
|
454 |
+
function handleMouseMoveDolly( event ) {
|
455 |
+
|
456 |
+
//console.log( 'handleMouseMoveDolly' );
|
457 |
+
|
458 |
+
dollyEnd.set( event.clientX, event.clientY );
|
459 |
+
|
460 |
+
dollyDelta.subVectors( dollyEnd, dollyStart );
|
461 |
+
|
462 |
+
if ( dollyDelta.y > 0 ) {
|
463 |
+
|
464 |
+
dollyIn( getZoomScale() );
|
465 |
+
|
466 |
+
} else if ( dollyDelta.y < 0 ) {
|
467 |
+
|
468 |
+
dollyOut( getZoomScale() );
|
469 |
+
|
470 |
+
}
|
471 |
+
|
472 |
+
dollyStart.copy( dollyEnd );
|
473 |
+
|
474 |
+
scope.update();
|
475 |
+
|
476 |
+
}
|
477 |
+
|
478 |
+
function handleMouseMovePan( event ) {
|
479 |
+
|
480 |
+
//console.log( 'handleMouseMovePan' );
|
481 |
+
|
482 |
+
panEnd.set( event.clientX, event.clientY );
|
483 |
+
|
484 |
+
panDelta.subVectors( panEnd, panStart );
|
485 |
+
|
486 |
+
pan( panDelta.x, panDelta.y );
|
487 |
+
|
488 |
+
panStart.copy( panEnd );
|
489 |
+
|
490 |
+
scope.update();
|
491 |
+
|
492 |
+
}
|
493 |
+
|
494 |
+
function handleMouseUp( event ) {
|
495 |
+
|
496 |
+
//console.log( 'handleMouseUp' );
|
497 |
+
|
498 |
+
}
|
499 |
+
|
500 |
+
function handleMouseWheel( event ) {
|
501 |
+
|
502 |
+
//console.log( 'handleMouseWheel' );
|
503 |
+
|
504 |
+
var delta = 0;
|
505 |
+
|
506 |
+
if ( event.wheelDelta !== undefined ) {
|
507 |
+
|
508 |
+
// WebKit / Opera / Explorer 9
|
509 |
+
|
510 |
+
delta = event.wheelDelta;
|
511 |
+
|
512 |
+
} else if ( event.detail !== undefined ) {
|
513 |
+
|
514 |
+
// Firefox
|
515 |
+
|
516 |
+
delta = - event.detail;
|
517 |
+
|
518 |
+
}
|
519 |
+
|
520 |
+
if ( delta > 0 ) {
|
521 |
+
|
522 |
+
dollyOut( getZoomScale() );
|
523 |
+
|
524 |
+
} else if ( delta < 0 ) {
|
525 |
+
|
526 |
+
dollyIn( getZoomScale() );
|
527 |
+
|
528 |
+
}
|
529 |
+
|
530 |
+
scope.update();
|
531 |
+
|
532 |
+
}
|
533 |
+
|
534 |
+
function handleKeyDown( event ) {
|
535 |
+
|
536 |
+
//console.log( 'handleKeyDown' );
|
537 |
+
|
538 |
+
switch ( event.keyCode ) {
|
539 |
+
|
540 |
+
case scope.keys.UP:
|
541 |
+
pan( 0, scope.keyPanSpeed );
|
542 |
+
scope.update();
|
543 |
+
break;
|
544 |
+
|
545 |
+
case scope.keys.BOTTOM:
|
546 |
+
pan( 0, - scope.keyPanSpeed );
|
547 |
+
scope.update();
|
548 |
+
break;
|
549 |
+
|
550 |
+
case scope.keys.LEFT:
|
551 |
+
pan( scope.keyPanSpeed, 0 );
|
552 |
+
scope.update();
|
553 |
+
break;
|
554 |
+
|
555 |
+
case scope.keys.RIGHT:
|
556 |
+
pan( - scope.keyPanSpeed, 0 );
|
557 |
+
scope.update();
|
558 |
+
break;
|
559 |
+
|
560 |
+
}
|
561 |
+
|
562 |
+
}
|
563 |
+
|
564 |
+
function handleTouchStartRotate( event ) {
|
565 |
+
|
566 |
+
//console.log( 'handleTouchStartRotate' );
|
567 |
+
|
568 |
+
rotateStart.set( event.touches[ 0 ].pageX, event.touches[ 0 ].pageY );
|
569 |
+
|
570 |
+
}
|
571 |
+
|
572 |
+
function handleTouchStartDolly( event ) {
|
573 |
+
|
574 |
+
//console.log( 'handleTouchStartDolly' );
|
575 |
+
|
576 |
+
var dx = event.touches[ 0 ].pageX - event.touches[ 1 ].pageX;
|
577 |
+
var dy = event.touches[ 0 ].pageY - event.touches[ 1 ].pageY;
|
578 |
+
|
579 |
+
var distance = Math.sqrt( dx * dx + dy * dy );
|
580 |
+
|
581 |
+
dollyStart.set( 0, distance );
|
582 |
+
|
583 |
+
}
|
584 |
+
|
585 |
+
function handleTouchStartPan( event ) {
|
586 |
+
|
587 |
+
//console.log( 'handleTouchStartPan' );
|
588 |
+
|
589 |
+
panStart.set( event.touches[ 0 ].pageX, event.touches[ 0 ].pageY );
|
590 |
+
|
591 |
+
}
|
592 |
+
|
593 |
+
function handleTouchMoveRotate( event ) {
|
594 |
+
|
595 |
+
//console.log( 'handleTouchMoveRotate' );
|
596 |
+
|
597 |
+
rotateEnd.set( event.touches[ 0 ].pageX, event.touches[ 0 ].pageY );
|
598 |
+
rotateDelta.subVectors( rotateEnd, rotateStart );
|
599 |
+
|
600 |
+
var element = scope.domElement === document ? scope.domElement.body : scope.domElement;
|
601 |
+
|
602 |
+
// rotating across whole screen goes 360 degrees around
|
603 |
+
rotateLeft( 2 * Math.PI * rotateDelta.x / element.clientWidth * scope.rotateSpeed );
|
604 |
+
|
605 |
+
// rotating up and down along whole screen attempts to go 360, but limited to 180
|
606 |
+
rotateUp( 2 * Math.PI * rotateDelta.y / element.clientHeight * scope.rotateSpeed );
|
607 |
+
|
608 |
+
rotateStart.copy( rotateEnd );
|
609 |
+
|
610 |
+
scope.update();
|
611 |
+
|
612 |
+
}
|
613 |
+
|
614 |
+
function handleTouchMoveDolly( event ) {
|
615 |
+
|
616 |
+
//console.log( 'handleTouchMoveDolly' );
|
617 |
+
|
618 |
+
var dx = event.touches[ 0 ].pageX - event.touches[ 1 ].pageX;
|
619 |
+
var dy = event.touches[ 0 ].pageY - event.touches[ 1 ].pageY;
|
620 |
+
|
621 |
+
var distance = Math.sqrt( dx * dx + dy * dy );
|
622 |
+
|
623 |
+
dollyEnd.set( 0, distance );
|
624 |
+
|
625 |
+
dollyDelta.subVectors( dollyEnd, dollyStart );
|
626 |
+
|
627 |
+
if ( dollyDelta.y > 0 ) {
|
628 |
+
|
629 |
+
dollyOut( getZoomScale() );
|
630 |
+
|
631 |
+
} else if ( dollyDelta.y < 0 ) {
|
632 |
+
|
633 |
+
dollyIn( getZoomScale() );
|
634 |
+
|
635 |
+
}
|
636 |
+
|
637 |
+
dollyStart.copy( dollyEnd );
|
638 |
+
|
639 |
+
scope.update();
|
640 |
+
|
641 |
+
}
|
642 |
+
|
643 |
+
function handleTouchMovePan( event ) {
|
644 |
+
|
645 |
+
//console.log( 'handleTouchMovePan' );
|
646 |
+
|
647 |
+
panEnd.set( event.touches[ 0 ].pageX, event.touches[ 0 ].pageY );
|
648 |
+
|
649 |
+
panDelta.subVectors( panEnd, panStart );
|
650 |
+
|
651 |
+
pan( panDelta.x, panDelta.y );
|
652 |
+
|
653 |
+
panStart.copy( panEnd );
|
654 |
+
|
655 |
+
scope.update();
|
656 |
+
|
657 |
+
}
|
658 |
+
|
659 |
+
function handleTouchEnd( event ) {
|
660 |
+
|
661 |
+
//console.log( 'handleTouchEnd' );
|
662 |
+
|
663 |
+
}
|
664 |
+
|
665 |
+
//
|
666 |
+
// event handlers - FSM: listen for events and reset state
|
667 |
+
//
|
668 |
+
|
669 |
+
function onMouseDown( event ) {
|
670 |
+
|
671 |
+
if ( scope.enabled === false ) return;
|
672 |
+
|
673 |
+
event.preventDefault();
|
674 |
+
|
675 |
+
if ( event.button === scope.mouseButtons.ORBIT ) {
|
676 |
+
|
677 |
+
if ( scope.enableRotate === false ) return;
|
678 |
+
|
679 |
+
handleMouseDownRotate( event );
|
680 |
+
|
681 |
+
state = STATE.ROTATE;
|
682 |
+
|
683 |
+
} else if ( event.button === scope.mouseButtons.ZOOM ) {
|
684 |
+
|
685 |
+
if ( scope.enableZoom === false ) return;
|
686 |
+
|
687 |
+
handleMouseDownDolly( event );
|
688 |
+
|
689 |
+
state = STATE.DOLLY;
|
690 |
+
|
691 |
+
} else if ( event.button === scope.mouseButtons.PAN ) {
|
692 |
+
|
693 |
+
if ( scope.enablePan === false ) return;
|
694 |
+
|
695 |
+
handleMouseDownPan( event );
|
696 |
+
|
697 |
+
state = STATE.PAN;
|
698 |
+
|
699 |
+
}
|
700 |
+
|
701 |
+
if ( state !== STATE.NONE ) {
|
702 |
+
|
703 |
+
document.addEventListener( 'mousemove', onMouseMove, false );
|
704 |
+
document.addEventListener( 'mouseup', onMouseUp, false );
|
705 |
+
document.addEventListener( 'mouseout', onMouseUp, false );
|
706 |
+
|
707 |
+
scope.dispatchEvent( startEvent );
|
708 |
+
|
709 |
+
}
|
710 |
+
|
711 |
+
}
|
712 |
+
|
713 |
+
function onMouseMove( event ) {
|
714 |
+
|
715 |
+
if ( scope.enabled === false ) return;
|
716 |
+
|
717 |
+
event.preventDefault();
|
718 |
+
|
719 |
+
if ( state === STATE.ROTATE ) {
|
720 |
+
|
721 |
+
if ( scope.enableRotate === false ) return;
|
722 |
+
|
723 |
+
handleMouseMoveRotate( event );
|
724 |
+
|
725 |
+
} else if ( state === STATE.DOLLY ) {
|
726 |
+
|
727 |
+
if ( scope.enableZoom === false ) return;
|
728 |
+
|
729 |
+
handleMouseMoveDolly( event );
|
730 |
+
|
731 |
+
} else if ( state === STATE.PAN ) {
|
732 |
+
|
733 |
+
if ( scope.enablePan === false ) return;
|
734 |
+
|
735 |
+
handleMouseMovePan( event );
|
736 |
+
|
737 |
+
}
|
738 |
+
|
739 |
+
}
|
740 |
+
|
741 |
+
function onMouseUp( event ) {
|
742 |
+
|
743 |
+
if ( scope.enabled === false ) return;
|
744 |
+
|
745 |
+
handleMouseUp( event );
|
746 |
+
|
747 |
+
document.removeEventListener( 'mousemove', onMouseMove, false );
|
748 |
+
document.removeEventListener( 'mouseup', onMouseUp, false );
|
749 |
+
document.removeEventListener( 'mouseout', onMouseUp, false );
|
750 |
+
|
751 |
+
scope.dispatchEvent( endEvent );
|
752 |
+
|
753 |
+
state = STATE.NONE;
|
754 |
+
|
755 |
+
}
|
756 |
+
|
757 |
+
function onMouseWheel( event ) {
|
758 |
+
|
759 |
+
if ( scope.enabled === false || scope.enableZoom === false || ( state !== STATE.NONE && state !== STATE.ROTATE ) ) return;
|
760 |
+
|
761 |
+
event.preventDefault();
|
762 |
+
event.stopPropagation();
|
763 |
+
|
764 |
+
handleMouseWheel( event );
|
765 |
+
|
766 |
+
scope.dispatchEvent( startEvent ); // not sure why these are here...
|
767 |
+
scope.dispatchEvent( endEvent );
|
768 |
+
|
769 |
+
}
|
770 |
+
|
771 |
+
function onKeyDown( event ) {
|
772 |
+
|
773 |
+
if ( scope.enabled === false || scope.enableKeys === false || scope.enablePan === false ) return;
|
774 |
+
|
775 |
+
handleKeyDown( event );
|
776 |
+
|
777 |
+
}
|
778 |
+
|
779 |
+
function onTouchStart( event ) {
|
780 |
+
|
781 |
+
if ( scope.enabled === false ) return;
|
782 |
+
|
783 |
+
switch ( event.touches.length ) {
|
784 |
+
|
785 |
+
case 1: // one-fingered touch: rotate
|
786 |
+
|
787 |
+
if ( scope.enableRotate === false ) return;
|
788 |
+
|
789 |
+
handleTouchStartRotate( event );
|
790 |
+
|
791 |
+
state = STATE.TOUCH_ROTATE;
|
792 |
+
|
793 |
+
break;
|
794 |
+
|
795 |
+
case 2: // two-fingered touch: dolly
|
796 |
+
|
797 |
+
if ( scope.enableZoom === false ) return;
|
798 |
+
|
799 |
+
handleTouchStartDolly( event );
|
800 |
+
|
801 |
+
state = STATE.TOUCH_DOLLY;
|
802 |
+
|
803 |
+
break;
|
804 |
+
|
805 |
+
case 3: // three-fingered touch: pan
|
806 |
+
|
807 |
+
if ( scope.enablePan === false ) return;
|
808 |
+
|
809 |
+
handleTouchStartPan( event );
|
810 |
+
|
811 |
+
state = STATE.TOUCH_PAN;
|
812 |
+
|
813 |
+
break;
|
814 |
+
|
815 |
+
default:
|
816 |
+
|
817 |
+
state = STATE.NONE;
|
818 |
+
|
819 |
+
}
|
820 |
+
|
821 |
+
if ( state !== STATE.NONE ) {
|
822 |
+
|
823 |
+
scope.dispatchEvent( startEvent );
|
824 |
+
|
825 |
+
}
|
826 |
+
|
827 |
+
}
|
828 |
+
|
829 |
+
function onTouchMove( event ) {
|
830 |
+
|
831 |
+
if ( scope.enabled === false ) return;
|
832 |
+
|
833 |
+
event.preventDefault();
|
834 |
+
event.stopPropagation();
|
835 |
+
|
836 |
+
switch ( event.touches.length ) {
|
837 |
+
|
838 |
+
case 1: // one-fingered touch: rotate
|
839 |
+
|
840 |
+
if ( scope.enableRotate === false ) return;
|
841 |
+
if ( state !== STATE.TOUCH_ROTATE ) return; // is this needed?...
|
842 |
+
|
843 |
+
handleTouchMoveRotate( event );
|
844 |
+
|
845 |
+
break;
|
846 |
+
|
847 |
+
case 2: // two-fingered touch: dolly
|
848 |
+
|
849 |
+
if ( scope.enableZoom === false ) return;
|
850 |
+
if ( state !== STATE.TOUCH_DOLLY ) return; // is this needed?...
|
851 |
+
|
852 |
+
handleTouchMoveDolly( event );
|
853 |
+
|
854 |
+
break;
|
855 |
+
|
856 |
+
case 3: // three-fingered touch: pan
|
857 |
+
|
858 |
+
if ( scope.enablePan === false ) return;
|
859 |
+
if ( state !== STATE.TOUCH_PAN ) return; // is this needed?...
|
860 |
+
|
861 |
+
handleTouchMovePan( event );
|
862 |
+
|
863 |
+
break;
|
864 |
+
|
865 |
+
default:
|
866 |
+
|
867 |
+
state = STATE.NONE;
|
868 |
+
|
869 |
+
}
|
870 |
+
|
871 |
+
}
|
872 |
+
|
873 |
+
function onTouchEnd( event ) {
|
874 |
+
|
875 |
+
if ( scope.enabled === false ) return;
|
876 |
+
|
877 |
+
handleTouchEnd( event );
|
878 |
+
|
879 |
+
scope.dispatchEvent( endEvent );
|
880 |
+
|
881 |
+
state = STATE.NONE;
|
882 |
+
|
883 |
+
}
|
884 |
+
|
885 |
+
function onContextMenu( event ) {
|
886 |
+
|
887 |
+
event.preventDefault();
|
888 |
+
|
889 |
+
}
|
890 |
+
|
891 |
+
//
|
892 |
+
|
893 |
+
scope.domElement.addEventListener( 'contextmenu', onContextMenu, false );
|
894 |
+
|
895 |
+
scope.domElement.addEventListener( 'mousedown', onMouseDown, false );
|
896 |
+
scope.domElement.addEventListener( 'mousewheel', onMouseWheel, false );
|
897 |
+
scope.domElement.addEventListener( 'MozMousePixelScroll', onMouseWheel, false ); // firefox
|
898 |
+
|
899 |
+
scope.domElement.addEventListener( 'touchstart', onTouchStart, false );
|
900 |
+
scope.domElement.addEventListener( 'touchend', onTouchEnd, false );
|
901 |
+
scope.domElement.addEventListener( 'touchmove', onTouchMove, false );
|
902 |
+
|
903 |
+
window.addEventListener( 'keydown', onKeyDown, false );
|
904 |
+
|
905 |
+
// force an update at start
|
906 |
+
|
907 |
+
this.update();
|
908 |
+
|
909 |
+
};
|
910 |
+
|
911 |
+
THREE.OrbitControls.prototype = Object.create( THREE.EventDispatcher.prototype );
|
912 |
+
THREE.OrbitControls.prototype.constructor = THREE.OrbitControls;
|
913 |
+
|
914 |
+
Object.defineProperties( THREE.OrbitControls.prototype, {
|
915 |
+
|
916 |
+
center: {
|
917 |
+
|
918 |
+
get: function () {
|
919 |
+
|
920 |
+
console.warn( 'THREE.OrbitControls: .center has been renamed to .target' );
|
921 |
+
return this.target;
|
922 |
+
|
923 |
+
}
|
924 |
+
|
925 |
+
},
|
926 |
+
|
927 |
+
// backward compatibility
|
928 |
+
|
929 |
+
noZoom: {
|
930 |
+
|
931 |
+
get: function () {
|
932 |
+
|
933 |
+
console.warn( 'THREE.OrbitControls: .noZoom has been deprecated. Use .enableZoom instead.' );
|
934 |
+
return ! this.enableZoom;
|
935 |
+
|
936 |
+
},
|
937 |
+
|
938 |
+
set: function ( value ) {
|
939 |
+
|
940 |
+
console.warn( 'THREE.OrbitControls: .noZoom has been deprecated. Use .enableZoom instead.' );
|
941 |
+
this.enableZoom = ! value;
|
942 |
+
|
943 |
+
}
|
944 |
+
|
945 |
+
},
|
946 |
+
|
947 |
+
noRotate: {
|
948 |
+
|
949 |
+
get: function () {
|
950 |
+
|
951 |
+
console.warn( 'THREE.OrbitControls: .noRotate has been deprecated. Use .enableRotate instead.' );
|
952 |
+
return ! this.enableRotate;
|
953 |
+
|
954 |
+
},
|
955 |
+
|
956 |
+
set: function ( value ) {
|
957 |
+
|
958 |
+
console.warn( 'THREE.OrbitControls: .noRotate has been deprecated. Use .enableRotate instead.' );
|
959 |
+
this.enableRotate = ! value;
|
960 |
+
|
961 |
+
}
|
962 |
+
|
963 |
+
},
|
964 |
+
|
965 |
+
noPan: {
|
966 |
+
|
967 |
+
get: function () {
|
968 |
+
|
969 |
+
console.warn( 'THREE.OrbitControls: .noPan has been deprecated. Use .enablePan instead.' );
|
970 |
+
return ! this.enablePan;
|
971 |
+
|
972 |
+
},
|
973 |
+
|
974 |
+
set: function ( value ) {
|
975 |
+
|
976 |
+
console.warn( 'THREE.OrbitControls: .noPan has been deprecated. Use .enablePan instead.' );
|
977 |
+
this.enablePan = ! value;
|
978 |
+
|
979 |
+
}
|
980 |
+
|
981 |
+
},
|
982 |
+
|
983 |
+
noKeys: {
|
984 |
+
|
985 |
+
get: function () {
|
986 |
+
|
987 |
+
console.warn( 'THREE.OrbitControls: .noKeys has been deprecated. Use .enableKeys instead.' );
|
988 |
+
return ! this.enableKeys;
|
989 |
+
|
990 |
+
},
|
991 |
+
|
992 |
+
set: function ( value ) {
|
993 |
+
|
994 |
+
console.warn( 'THREE.OrbitControls: .noKeys has been deprecated. Use .enableKeys instead.' );
|
995 |
+
this.enableKeys = ! value;
|
996 |
+
|
997 |
+
}
|
998 |
+
|
999 |
+
},
|
1000 |
+
|
1001 |
+
staticMoving : {
|
1002 |
+
|
1003 |
+
get: function () {
|
1004 |
+
|
1005 |
+
console.warn( 'THREE.OrbitControls: .staticMoving has been deprecated. Use .enableDamping instead.' );
|
1006 |
+
return ! this.enableDamping;
|
1007 |
+
|
1008 |
+
},
|
1009 |
+
|
1010 |
+
set: function ( value ) {
|
1011 |
+
|
1012 |
+
console.warn( 'THREE.OrbitControls: .staticMoving has been deprecated. Use .enableDamping instead.' );
|
1013 |
+
this.enableDamping = ! value;
|
1014 |
+
|
1015 |
+
}
|
1016 |
+
|
1017 |
+
},
|
1018 |
+
|
1019 |
+
dynamicDampingFactor : {
|
1020 |
+
|
1021 |
+
get: function () {
|
1022 |
+
|
1023 |
+
console.warn( 'THREE.OrbitControls: .dynamicDampingFactor has been renamed. Use .dampingFactor instead.' );
|
1024 |
+
return this.dampingFactor;
|
1025 |
+
|
1026 |
+
},
|
1027 |
+
|
1028 |
+
set: function ( value ) {
|
1029 |
+
|
1030 |
+
console.warn( 'THREE.OrbitControls: .dynamicDampingFactor has been renamed. Use .dampingFactor instead.' );
|
1031 |
+
this.dampingFactor = value;
|
1032 |
+
|
1033 |
+
}
|
1034 |
+
|
1035 |
+
}
|
1036 |
+
|
1037 |
+
} );
|
dataloaders/pymo/mocapplayer/libs/threejs/dat.gui.min.js
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/**
|
2 |
+
* dat-gui JavaScript Controller Library
|
3 |
+
* http://code.google.com/p/dat-gui
|
4 |
+
*
|
5 |
+
* Copyright 2011 Data Arts Team, Google Creative Lab
|
6 |
+
*
|
7 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
8 |
+
* you may not use this file except in compliance with the License.
|
9 |
+
* You may obtain a copy of the License at
|
10 |
+
*
|
11 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
12 |
+
*/
|
13 |
+
!function(e,t){"object"==typeof exports&&"object"==typeof module?module.exports=t():"function"==typeof define&&define.amd?define([],t):"object"==typeof exports?exports.dat=t():e.dat=t()}(this,function(){return function(e){function t(o){if(n[o])return n[o].exports;var i=n[o]={exports:{},id:o,loaded:!1};return e[o].call(i.exports,i,i.exports,t),i.loaded=!0,i.exports}var n={};return t.m=e,t.c=n,t.p="",t(0)}([function(e,t,n){"use strict";t.__esModule=!0,t["default"]=n(1),e.exports=t["default"]},function(e,t,n){"use strict";t.__esModule=!0,t["default"]={color:{Color:n(2),math:n(6),interpret:n(3)},controllers:{Controller:n(7),BooleanController:n(8),OptionController:n(10),StringController:n(11),NumberController:n(12),NumberControllerBox:n(13),NumberControllerSlider:n(14),FunctionController:n(15),ColorController:n(16)},dom:{dom:n(9)},gui:{GUI:n(17)},GUI:n(17)},e.exports=t["default"]},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{"default":e}}function i(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function a(e,t,n){Object.defineProperty(e,t,{get:function(){return"RGB"===this.__state.space?this.__state[t]:(p.recalculateRGB(this,t,n),this.__state[t])},set:function(e){"RGB"!==this.__state.space&&(p.recalculateRGB(this,t,n),this.__state.space="RGB"),this.__state[t]=e}})}function r(e,t){Object.defineProperty(e,t,{get:function(){return"HSV"===this.__state.space?this.__state[t]:(p.recalculateHSV(this),this.__state[t])},set:function(e){"HSV"!==this.__state.space&&(p.recalculateHSV(this),this.__state.space="HSV"),this.__state[t]=e}})}t.__esModule=!0;var s=n(3),l=o(s),d=n(6),u=o(d),c=n(4),f=o(c),h=n(5),_=o(h),p=function(){function e(){if(i(this,e),this.__state=l["default"].apply(this,arguments),this.__state===!1)throw new Error("Failed to interpret color arguments");this.__state.a=this.__state.a||1}return e.prototype.toString=function(){return f["default"](this)},e.prototype.toOriginal=function(){return this.__state.conversion.write(this)},e}();p.recalculateRGB=function(e,t,n){if("HEX"===e.__state.space)e.__state[t]=u["default"].component_from_hex(e.__state.hex,n);else{if("HSV"!==e.__state.space)throw new Error("Corrupted color state");_["default"].extend(e.__state,u["default"].hsv_to_rgb(e.__state.h,e.__state.s,e.__state.v))}},p.recalculateHSV=function(e){var t=u["default"].rgb_to_hsv(e.r,e.g,e.b);_["default"].extend(e.__state,{s:t.s,v:t.v}),_["default"].isNaN(t.h)?_["default"].isUndefined(e.__state.h)&&(e.__state.h=0):e.__state.h=t.h},p.COMPONENTS=["r","g","b","h","s","v","hex","a"],a(p.prototype,"r",2),a(p.prototype,"g",1),a(p.prototype,"b",0),r(p.prototype,"h"),r(p.prototype,"s"),r(p.prototype,"v"),Object.defineProperty(p.prototype,"a",{get:function(){return this.__state.a},set:function(e){this.__state.a=e}}),Object.defineProperty(p.prototype,"hex",{get:function(){return"HEX"!==!this.__state.space&&(this.__state.hex=u["default"].rgb_to_hex(this.r,this.g,this.b)),this.__state.hex},set:function(e){this.__state.space="HEX",this.__state.hex=e}}),t["default"]=p,e.exports=t["default"]},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{"default":e}}t.__esModule=!0;var i=n(4),a=o(i),r=n(5),s=o(r),l=[{litmus:s["default"].isString,conversions:{THREE_CHAR_HEX:{read:function(e){var t=e.match(/^#([A-F0-9])([A-F0-9])([A-F0-9])$/i);return null!==t&&{space:"HEX",hex:parseInt("0x"+t[1].toString()+t[1].toString()+t[2].toString()+t[2].toString()+t[3].toString()+t[3].toString(),0)}},write:a["default"]},SIX_CHAR_HEX:{read:function(e){var t=e.match(/^#([A-F0-9]{6})$/i);return null!==t&&{space:"HEX",hex:parseInt("0x"+t[1].toString(),0)}},write:a["default"]},CSS_RGB:{read:function(e){var t=e.match(/^rgb\(\s*(.+)\s*,\s*(.+)\s*,\s*(.+)\s*\)/);return null!==t&&{space:"RGB",r:parseFloat(t[1]),g:parseFloat(t[2]),b:parseFloat(t[3])}},write:a["default"]},CSS_RGBA:{read:function(e){var t=e.match(/^rgba\(\s*(.+)\s*,\s*(.+)\s*,\s*(.+)\s*,\s*(.+)\s*\)/);return null!==t&&{space:"RGB",r:parseFloat(t[1]),g:parseFloat(t[2]),b:parseFloat(t[3]),a:parseFloat(t[4])}},write:a["default"]}}},{litmus:s["default"].isNumber,conversions:{HEX:{read:function(e){return{space:"HEX",hex:e,conversionName:"HEX"}},write:function(e){return e.hex}}}},{litmus:s["default"].isArray,conversions:{RGB_ARRAY:{read:function(e){return 3===e.length&&{space:"RGB",r:e[0],g:e[1],b:e[2]}},write:function(e){return[e.r,e.g,e.b]}},RGBA_ARRAY:{read:function(e){return 4===e.length&&{space:"RGB",r:e[0],g:e[1],b:e[2],a:e[3]}},write:function(e){return[e.r,e.g,e.b,e.a]}}}},{litmus:s["default"].isObject,conversions:{RGBA_OBJ:{read:function(e){return!!(s["default"].isNumber(e.r)&&s["default"].isNumber(e.g)&&s["default"].isNumber(e.b)&&s["default"].isNumber(e.a))&&{space:"RGB",r:e.r,g:e.g,b:e.b,a:e.a}},write:function(e){return{r:e.r,g:e.g,b:e.b,a:e.a}}},RGB_OBJ:{read:function(e){return!!(s["default"].isNumber(e.r)&&s["default"].isNumber(e.g)&&s["default"].isNumber(e.b))&&{space:"RGB",r:e.r,g:e.g,b:e.b}},write:function(e){return{r:e.r,g:e.g,b:e.b}}},HSVA_OBJ:{read:function(e){return!!(s["default"].isNumber(e.h)&&s["default"].isNumber(e.s)&&s["default"].isNumber(e.v)&&s["default"].isNumber(e.a))&&{space:"HSV",h:e.h,s:e.s,v:e.v,a:e.a}},write:function(e){return{h:e.h,s:e.s,v:e.v,a:e.a}}},HSV_OBJ:{read:function(e){return!!(s["default"].isNumber(e.h)&&s["default"].isNumber(e.s)&&s["default"].isNumber(e.v))&&{space:"HSV",h:e.h,s:e.s,v:e.v}},write:function(e){return{h:e.h,s:e.s,v:e.v}}}}}],d=void 0,u=void 0,c=function(){u=!1;var e=arguments.length>1?s["default"].toArray(arguments):arguments[0];return s["default"].each(l,function(t){if(t.litmus(e))return s["default"].each(t.conversions,function(t,n){if(d=t.read(e),u===!1&&d!==!1)return u=d,d.conversionName=n,d.conversion=t,s["default"].BREAK}),s["default"].BREAK}),u};t["default"]=c,e.exports=t["default"]},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{"default":e}}t.__esModule=!0;var i=n(5),a=o(i);t["default"]=function(e){if(1===e.a||a["default"].isUndefined(e.a)){for(var t=e.hex.toString(16);t.length<6;)t="0"+t;return"#"+t}return"rgba("+Math.round(e.r)+","+Math.round(e.g)+","+Math.round(e.b)+","+e.a+")"},e.exports=t["default"]},function(e,t){"use strict";t.__esModule=!0;var n=Array.prototype.forEach,o=Array.prototype.slice,i={BREAK:{},extend:function(e){return this.each(o.call(arguments,1),function(t){if(!this.isUndefined(t)){var n=Object.keys(t);n.forEach(function(n){this.isUndefined(t[n])||(e[n]=t[n])}.bind(this))}},this),e},defaults:function(e){return this.each(o.call(arguments,1),function(t){if(!this.isUndefined(t)){var n=Object.keys(t);n.forEach(function(n){this.isUndefined(e[n])&&(e[n]=t[n])}.bind(this))}},this),e},compose:function(){var e=o.call(arguments);return function(){for(var t=o.call(arguments),n=e.length-1;n>=0;n--)t=[e[n].apply(this,t)];return t[0]}},each:function(e,t,o){if(e)if(n&&e.forEach&&e.forEach===n)e.forEach(t,o);else if(e.length===e.length+0){var i=void 0,a=void 0;for(i=0,a=e.length;i<a;i++)if(i in e&&t.call(o,e[i],i)===this.BREAK)return}else{if(this.isUndefined(e))return;var r=Object.keys(e);r.forEach(function(n){t.call(o,e[n],n)===this.BREAK}.bind(this))}},defer:function(e){setTimeout(e,0)},debounce:function(e,t){var n=void 0;return function(){function o(){n=null}var i=this,a=arguments,r=!n;clearTimeout(n),n=setTimeout(o,t),r&&e.apply(i,a)}},toArray:function(e){return e.toArray?e.toArray():o.call(e)},isUndefined:function(e){return void 0===e},isNull:function(e){return null===e},isNaN:function(e){function t(t){return e.apply(this,arguments)}return t.toString=function(){return e.toString()},t}(function(e){return isNaN(e)}),isArray:Array.isArray||function(e){return e.constructor===Array},isObject:function(e){return e===Object(e)},isNumber:function(e){return e===e+0},isString:function(e){return e===e+""},isBoolean:function(e){return e===!1||e===!0},isFunction:function(e){return"[object Function]"===Object.prototype.toString.call(e)}};t["default"]=i,e.exports=t["default"]},function(e,t){"use strict";t.__esModule=!0;var n=void 0,o={hsv_to_rgb:function(e,t,n){var o=Math.floor(e/60)%6,i=e/60-Math.floor(e/60),a=n*(1-t),r=n*(1-i*t),s=n*(1-(1-i)*t),l=[[n,s,a],[r,n,a],[a,n,s],[a,r,n],[s,a,n],[n,a,r]][o];return{r:255*l[0],g:255*l[1],b:255*l[2]}},rgb_to_hsv:function(e,t,n){var o=Math.min(e,t,n),i=Math.max(e,t,n),a=i-o,r=void 0,s=void 0;return 0===i?{h:NaN,s:0,v:0}:(s=a/i,r=e===i?(t-n)/a:t===i?2+(n-e)/a:4+(e-t)/a,r/=6,r<0&&(r+=1),{h:360*r,s:s,v:i/255})},rgb_to_hex:function(e,t,n){var o=this.hex_with_component(0,2,e);return o=this.hex_with_component(o,1,t),o=this.hex_with_component(o,0,n)},component_from_hex:function(e,t){return e>>8*t&255},hex_with_component:function(e,t,o){return o<<(n=8*t)|e&~(255<<n)}};t["default"]=o,e.exports=t["default"]},function(e,t){"use strict";function n(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}t.__esModule=!0;var o=function(){function e(t,o){n(this,e),this.initialValue=t[o],this.domElement=document.createElement("div"),this.object=t,this.property=o,this.__onChange=void 0,this.__onFinishChange=void 0}return e.prototype.onChange=function(e){return this.__onChange=e,this},e.prototype.onFinishChange=function(e){return this.__onFinishChange=e,this},e.prototype.setValue=function(e){return this.object[this.property]=e,this.__onChange&&this.__onChange.call(this,e),this.updateDisplay(),this},e.prototype.getValue=function(){return this.object[this.property]},e.prototype.updateDisplay=function(){return this},e.prototype.isModified=function(){return this.initialValue!==this.getValue()},e}();t["default"]=o,e.exports=t["default"]},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{"default":e}}function i(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}t.__esModule=!0;var r=n(7),s=o(r),l=n(9),d=o(l),u=function(e){function t(n,o){function a(){r.setValue(!r.__prev)}i(this,t),e.call(this,n,o);var r=this;this.__prev=this.getValue(),this.__checkbox=document.createElement("input"),this.__checkbox.setAttribute("type","checkbox"),d["default"].bind(this.__checkbox,"change",a,!1),this.domElement.appendChild(this.__checkbox),this.updateDisplay()}return a(t,e),t.prototype.setValue=function(t){var n=e.prototype.setValue.call(this,t);return this.__onFinishChange&&this.__onFinishChange.call(this,this.getValue()),this.__prev=this.getValue(),n},t.prototype.updateDisplay=function(){return this.getValue()===!0?(this.__checkbox.setAttribute("checked","checked"),this.__checkbox.checked=!0):this.__checkbox.checked=!1,e.prototype.updateDisplay.call(this)},t}(s["default"]);t["default"]=u,e.exports=t["default"]},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{"default":e}}function i(e){if("0"===e||r["default"].isUndefined(e))return 0;var t=e.match(d);return r["default"].isNull(t)?0:parseFloat(t[1])}t.__esModule=!0;var a=n(5),r=o(a),s={HTMLEvents:["change"],MouseEvents:["click","mousemove","mousedown","mouseup","mouseover"],KeyboardEvents:["keydown"]},l={};r["default"].each(s,function(e,t){r["default"].each(e,function(e){l[e]=t})});var d=/(\d+(\.\d+)?)px/,u={makeSelectable:function(e,t){void 0!==e&&void 0!==e.style&&(e.onselectstart=t?function(){return!1}:function(){},e.style.MozUserSelect=t?"auto":"none",e.style.KhtmlUserSelect=t?"auto":"none",e.unselectable=t?"on":"off")},makeFullscreen:function(e,t,n){var o=n,i=t;r["default"].isUndefined(i)&&(i=!0),r["default"].isUndefined(o)&&(o=!0),e.style.position="absolute",i&&(e.style.left=0,e.style.right=0),o&&(e.style.top=0,e.style.bottom=0)},fakeEvent:function(e,t,n,o){var i=n||{},a=l[t];if(!a)throw new Error("Event type "+t+" not supported.");var s=document.createEvent(a);switch(a){case"MouseEvents":var d=i.x||i.clientX||0,u=i.y||i.clientY||0;s.initMouseEvent(t,i.bubbles||!1,i.cancelable||!0,window,i.clickCount||1,0,0,d,u,!1,!1,!1,!1,0,null);break;case"KeyboardEvents":var c=s.initKeyboardEvent||s.initKeyEvent;r["default"].defaults(i,{cancelable:!0,ctrlKey:!1,altKey:!1,shiftKey:!1,metaKey:!1,keyCode:void 0,charCode:void 0}),c(t,i.bubbles||!1,i.cancelable,window,i.ctrlKey,i.altKey,i.shiftKey,i.metaKey,i.keyCode,i.charCode);break;default:s.initEvent(t,i.bubbles||!1,i.cancelable||!0)}r["default"].defaults(s,o),e.dispatchEvent(s)},bind:function(e,t,n,o){var i=o||!1;return e.addEventListener?e.addEventListener(t,n,i):e.attachEvent&&e.attachEvent("on"+t,n),u},unbind:function(e,t,n,o){var i=o||!1;return e.removeEventListener?e.removeEventListener(t,n,i):e.detachEvent&&e.detachEvent("on"+t,n),u},addClass:function(e,t){if(void 0===e.className)e.className=t;else if(e.className!==t){var n=e.className.split(/ +/);n.indexOf(t)===-1&&(n.push(t),e.className=n.join(" ").replace(/^\s+/,"").replace(/\s+$/,""))}return u},removeClass:function(e,t){if(t)if(e.className===t)e.removeAttribute("class");else{var n=e.className.split(/ +/),o=n.indexOf(t);o!==-1&&(n.splice(o,1),e.className=n.join(" "))}else e.className=void 0;return u},hasClass:function(e,t){return new RegExp("(?:^|\\s+)"+t+"(?:\\s+|$)").test(e.className)||!1},getWidth:function(e){var t=getComputedStyle(e);return i(t["border-left-width"])+i(t["border-right-width"])+i(t["padding-left"])+i(t["padding-right"])+i(t.width)},getHeight:function(e){var t=getComputedStyle(e);return i(t["border-top-width"])+i(t["border-bottom-width"])+i(t["padding-top"])+i(t["padding-bottom"])+i(t.height)},getOffset:function(e){var t=e,n={left:0,top:0};if(t.offsetParent)do n.left+=t.offsetLeft,n.top+=t.offsetTop,t=t.offsetParent;while(t);return n},isActive:function(e){return e===document.activeElement&&(e.type||e.href)}};t["default"]=u,e.exports=t["default"]},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{"default":e}}function i(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}t.__esModule=!0;var r=n(7),s=o(r),l=n(9),d=o(l),u=n(5),c=o(u),f=function(e){function t(n,o,a){i(this,t),e.call(this,n,o);var r=a,s=this;this.__select=document.createElement("select"),c["default"].isArray(r)&&!function(){var e={};c["default"].each(r,function(t){e[t]=t}),r=e}(),c["default"].each(r,function(e,t){var n=document.createElement("option");n.innerHTML=t,n.setAttribute("value",e),s.__select.appendChild(n)}),this.updateDisplay(),d["default"].bind(this.__select,"change",function(){var e=this.options[this.selectedIndex].value;s.setValue(e)}),this.domElement.appendChild(this.__select)}return a(t,e),t.prototype.setValue=function(t){var n=e.prototype.setValue.call(this,t);return this.__onFinishChange&&this.__onFinishChange.call(this,this.getValue()),n},t.prototype.updateDisplay=function(){return d["default"].isActive(this.__select)?this:(this.__select.value=this.getValue(),e.prototype.updateDisplay.call(this))},t}(s["default"]);t["default"]=f,e.exports=t["default"]},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{"default":e}}function i(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}t.__esModule=!0;var r=n(7),s=o(r),l=n(9),d=o(l),u=function(e){function t(n,o){function a(){s.setValue(s.__input.value)}function r(){s.__onFinishChange&&s.__onFinishChange.call(s,s.getValue())}i(this,t),e.call(this,n,o);var s=this;this.__input=document.createElement("input"),this.__input.setAttribute("type","text"),d["default"].bind(this.__input,"keyup",a),d["default"].bind(this.__input,"change",a),d["default"].bind(this.__input,"blur",r),d["default"].bind(this.__input,"keydown",function(e){13===e.keyCode&&this.blur()}),this.updateDisplay(),this.domElement.appendChild(this.__input)}return a(t,e),t.prototype.updateDisplay=function(){return d["default"].isActive(this.__input)||(this.__input.value=this.getValue()),e.prototype.updateDisplay.call(this)},t}(s["default"]);t["default"]=u,e.exports=t["default"]},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{"default":e}}function i(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}function r(e){var t=e.toString();return t.indexOf(".")>-1?t.length-t.indexOf(".")-1:0}t.__esModule=!0;var s=n(7),l=o(s),d=n(5),u=o(d),c=function(e){function t(n,o,a){i(this,t),e.call(this,n,o);var s=a||{};this.__min=s.min,this.__max=s.max,this.__step=s.step,u["default"].isUndefined(this.__step)?0===this.initialValue?this.__impliedStep=1:this.__impliedStep=Math.pow(10,Math.floor(Math.log(Math.abs(this.initialValue))/Math.LN10))/10:this.__impliedStep=this.__step,this.__precision=r(this.__impliedStep)}return a(t,e),t.prototype.setValue=function(t){var n=t;return void 0!==this.__min&&n<this.__min?n=this.__min:void 0!==this.__max&&n>this.__max&&(n=this.__max),void 0!==this.__step&&n%this.__step!==0&&(n=Math.round(n/this.__step)*this.__step),e.prototype.setValue.call(this,n)},t.prototype.min=function(e){return this.__min=e,this},t.prototype.max=function(e){return this.__max=e,this},t.prototype.step=function(e){return this.__step=e,this.__impliedStep=e,this.__precision=r(e),this},t}(l["default"]);t["default"]=c,e.exports=t["default"]},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{"default":e}}function i(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}function r(e,t){var n=Math.pow(10,t);return Math.round(e*n)/n}t.__esModule=!0;var s=n(12),l=o(s),d=n(9),u=o(d),c=n(5),f=o(c),h=function(e){function t(n,o,a){function r(){var e=parseFloat(h.__input.value);f["default"].isNaN(e)||h.setValue(e)}function s(){r(),h.__onFinishChange&&h.__onFinishChange.call(h,h.getValue())}function l(e){document.activeElement.blur();var t=_-e.clientY;h.setValue(h.getValue()+t*h.__impliedStep),_=e.clientY}function d(){u["default"].unbind(window,"mousemove",l),u["default"].unbind(window,"mouseup",d)}function c(e){u["default"].bind(window,"mousemove",l),u["default"].bind(window,"mouseup",d),_=e.clientY}i(this,t),e.call(this,n,o,a),this.__truncationSuspended=!1;var h=this,_=void 0;this.__input=document.createElement("input"),this.__input.setAttribute("type","text"),u["default"].bind(this.__input,"change",r),u["default"].bind(this.__input,"blur",s),u["default"].bind(this.__input,"mousedown",c),u["default"].bind(this.__input,"keydown",function(e){13===e.keyCode&&(h.__truncationSuspended=!0,this.blur(),h.__truncationSuspended=!1)}),this.updateDisplay(),this.domElement.appendChild(this.__input)}return a(t,e),t.prototype.updateDisplay=function(){return u["default"].isActive(this.__input)?this:(this.__input.value=this.__truncationSuspended?this.getValue():r(this.getValue(),this.__precision),e.prototype.updateDisplay.call(this))},t}(l["default"]);t["default"]=h,e.exports=t["default"]},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{"default":e}}function i(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}function r(e,t,n,o,i){return o+(i-o)*((e-t)/(n-t))}t.__esModule=!0;var s=n(12),l=o(s),d=n(9),u=o(d),c=function(e){function t(n,o,a,s,l){function d(e){document.activeElement.blur(),u["default"].bind(window,"mousemove",c),u["default"].bind(window,"mouseup",f),c(e)}function c(e){e.preventDefault();var t=u["default"].getOffset(h.__background),n=u["default"].getWidth(h.__background);return h.setValue(r(e.clientX,t.left,t.left+n,h.__min,h.__max)),!1}function f(){u["default"].unbind(window,"mousemove",c),u["default"].unbind(window,"mouseup",f),h.__onFinishChange&&h.__onFinishChange.call(h,h.getValue())}i(this,t),e.call(this,n,o,{min:a,max:s,step:l});var h=this;this.__background=document.createElement("div"),this.__foreground=document.createElement("div"),u["default"].bind(this.__background,"mousedown",d),u["default"].addClass(this.__background,"slider"),u["default"].addClass(this.__foreground,"slider-fg"),this.updateDisplay(),this.__background.appendChild(this.__foreground),this.domElement.appendChild(this.__background)}return a(t,e),t.prototype.updateDisplay=function(){var t=(this.getValue()-this.__min)/(this.__max-this.__min);return this.__foreground.style.width=100*t+"%",e.prototype.updateDisplay.call(this)},t}(l["default"]);t["default"]=c,e.exports=t["default"]},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{"default":e}}function i(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}t.__esModule=!0;var r=n(7),s=o(r),l=n(9),d=o(l),u=function(e){function t(n,o,a){i(this,t),e.call(this,n,o);var r=this;this.__button=document.createElement("div"),this.__button.innerHTML=void 0===a?"Fire":a,d["default"].bind(this.__button,"click",function(e){return e.preventDefault(),r.fire(),!1}),d["default"].addClass(this.__button,"button"),this.domElement.appendChild(this.__button)}return a(t,e),t.prototype.fire=function(){this.__onChange&&this.__onChange.call(this),this.getValue().call(this.object),this.__onFinishChange&&this.__onFinishChange.call(this,this.getValue())},t}(s["default"]);t["default"]=u,e.exports=t["default"]},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{"default":e}}function i(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}function r(e,t,n,o){e.style.background="",b["default"].each(v,function(i){e.style.cssText+="background: "+i+"linear-gradient("+t+", "+n+" 0%, "+o+" 100%); "})}function s(e){e.style.background="",e.style.cssText+="background: -moz-linear-gradient(top, #ff0000 0%, #ff00ff 17%, #0000ff 34%, #00ffff 50%, #00ff00 67%, #ffff00 84%, #ff0000 100%);",e.style.cssText+="background: -webkit-linear-gradient(top, #ff0000 0%,#ff00ff 17%,#0000ff 34%,#00ffff 50%,#00ff00 67%,#ffff00 84%,#ff0000 100%);",e.style.cssText+="background: -o-linear-gradient(top, #ff0000 0%,#ff00ff 17%,#0000ff 34%,#00ffff 50%,#00ff00 67%,#ffff00 84%,#ff0000 100%);",e.style.cssText+="background: -ms-linear-gradient(top, #ff0000 0%,#ff00ff 17%,#0000ff 34%,#00ffff 50%,#00ff00 67%,#ffff00 84%,#ff0000 100%);",e.style.cssText+="background: linear-gradient(top, #ff0000 0%,#ff00ff 17%,#0000ff 34%,#00ffff 50%,#00ff00 67%,#ffff00 84%,#ff0000 100%);"}t.__esModule=!0;var l=n(7),d=o(l),u=n(9),c=o(u),f=n(2),h=o(f),_=n(3),p=o(_),m=n(5),b=o(m),g=function(e){function t(n,o){function a(e){_(e),c["default"].bind(window,"mousemove",_),c["default"].bind(window,"mouseup",l)}function l(){c["default"].unbind(window,"mousemove",_),c["default"].unbind(window,"mouseup",l),f()}function d(){var e=p["default"](this.value);e!==!1?(g.__color.__state=e,g.setValue(g.__color.toOriginal())):this.value=g.__color.toString()}function u(){c["default"].unbind(window,"mousemove",m),c["default"].unbind(window,"mouseup",u),f()}function f(){g.__onFinishChange&&g.__onFinishChange.call(g,g.__color.toString())}function _(e){e.preventDefault();var t=c["default"].getWidth(g.__saturation_field),n=c["default"].getOffset(g.__saturation_field),o=(e.clientX-n.left+document.body.scrollLeft)/t,i=1-(e.clientY-n.top+document.body.scrollTop)/t;return i>1?i=1:i<0&&(i=0),o>1?o=1:o<0&&(o=0),g.__color.v=i,g.__color.s=o,g.setValue(g.__color.toOriginal()),!1}function m(e){e.preventDefault();var t=c["default"].getHeight(g.__hue_field),n=c["default"].getOffset(g.__hue_field),o=1-(e.clientY-n.top+document.body.scrollTop)/t;return o>1?o=1:o<0&&(o=0),g.__color.h=360*o,g.setValue(g.__color.toOriginal()),!1}i(this,t),e.call(this,n,o),this.__color=new h["default"](this.getValue()),this.__temp=new h["default"](0);var g=this;this.domElement=document.createElement("div"),c["default"].makeSelectable(this.domElement,!1),this.__selector=document.createElement("div"),this.__selector.className="selector",this.__saturation_field=document.createElement("div"),this.__saturation_field.className="saturation-field",this.__field_knob=document.createElement("div"),this.__field_knob.className="field-knob",this.__field_knob_border="2px solid ",this.__hue_knob=document.createElement("div"),this.__hue_knob.className="hue-knob",this.__hue_field=document.createElement("div"),this.__hue_field.className="hue-field",this.__input=document.createElement("input"),this.__input.type="text",this.__input_textShadow="0 1px 1px ",c["default"].bind(this.__input,"keydown",function(e){13===e.keyCode&&d.call(this)}),c["default"].bind(this.__input,"blur",d),c["default"].bind(this.__selector,"mousedown",function(){c["default"].addClass(this,"drag").bind(window,"mouseup",function(){c["default"].removeClass(g.__selector,"drag")})});var v=document.createElement("div");b["default"].extend(this.__selector.style,{width:"122px",height:"102px",padding:"3px",backgroundColor:"#222",boxShadow:"0px 1px 3px rgba(0,0,0,0.3)"}),b["default"].extend(this.__field_knob.style,{position:"absolute",width:"12px",height:"12px",border:this.__field_knob_border+(this.__color.v<.5?"#fff":"#000"),boxShadow:"0px 1px 3px rgba(0,0,0,0.5)",borderRadius:"12px",zIndex:1}),b["default"].extend(this.__hue_knob.style,{position:"absolute",width:"15px",height:"2px",borderRight:"4px solid #fff",zIndex:1}),b["default"].extend(this.__saturation_field.style,{width:"100px",height:"100px",border:"1px solid #555",marginRight:"3px",display:"inline-block",cursor:"pointer"}),b["default"].extend(v.style,{width:"100%",height:"100%",background:"none"}),r(v,"top","rgba(0,0,0,0)","#000"),b["default"].extend(this.__hue_field.style,{width:"15px",height:"100px",border:"1px solid #555",cursor:"ns-resize",position:"absolute",top:"3px",right:"3px"}),s(this.__hue_field),b["default"].extend(this.__input.style,{outline:"none",textAlign:"center",color:"#fff",border:0,fontWeight:"bold",textShadow:this.__input_textShadow+"rgba(0,0,0,0.7)"}),c["default"].bind(this.__saturation_field,"mousedown",a),c["default"].bind(this.__field_knob,"mousedown",a),c["default"].bind(this.__hue_field,"mousedown",function(e){m(e),c["default"].bind(window,"mousemove",m),c["default"].bind(window,"mouseup",u)}),this.__saturation_field.appendChild(v),this.__selector.appendChild(this.__field_knob),this.__selector.appendChild(this.__saturation_field),this.__selector.appendChild(this.__hue_field),this.__hue_field.appendChild(this.__hue_knob),this.domElement.appendChild(this.__input),this.domElement.appendChild(this.__selector),this.updateDisplay()}return a(t,e),t.prototype.updateDisplay=function(){var e=p["default"](this.getValue());if(e!==!1){var t=!1;b["default"].each(h["default"].COMPONENTS,function(n){if(!b["default"].isUndefined(e[n])&&!b["default"].isUndefined(this.__color.__state[n])&&e[n]!==this.__color.__state[n])return t=!0,{}},this),t&&b["default"].extend(this.__color.__state,e)}b["default"].extend(this.__temp.__state,this.__color.__state),this.__temp.a=1;var n=this.__color.v<.5||this.__color.s>.5?255:0,o=255-n;b["default"].extend(this.__field_knob.style,{marginLeft:100*this.__color.s-7+"px",marginTop:100*(1-this.__color.v)-7+"px",backgroundColor:this.__temp.toString(),border:this.__field_knob_border+"rgb("+n+","+n+","+n+")"}),this.__hue_knob.style.marginTop=100*(1-this.__color.h/360)+"px",this.__temp.s=1,this.__temp.v=1,r(this.__saturation_field,"left","#fff",this.__temp.toString()),b["default"].extend(this.__input.style,{backgroundColor:this.__input.value=this.__color.toString(),color:"rgb("+n+","+n+","+n+")",textShadow:this.__input_textShadow+"rgba("+o+","+o+","+o+",.7)"})},t}(d["default"]),v=["-moz-","-o-","-webkit-","-ms-",""];t["default"]=g,e.exports=t["default"]},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{"default":e}}function i(e,t,n){var o=document.createElement("li");return t&&o.appendChild(t),n?e.__ul.insertBefore(o,n):e.__ul.appendChild(o),e.onResize(),o}function a(e,t){var n=e.__preset_select[e.__preset_select.selectedIndex];t?n.innerHTML=n.value+"*":n.innerHTML=n.value}function r(e,t,n){if(n.__li=t,n.__gui=e,U["default"].extend(n,{options:function(t){if(arguments.length>1){var o=n.__li.nextElementSibling;return n.remove(),l(e,n.object,n.property,{before:o,factoryArgs:[U["default"].toArray(arguments)]})}if(U["default"].isArray(t)||U["default"].isObject(t)){var o=n.__li.nextElementSibling;return n.remove(),l(e,n.object,n.property,{before:o,factoryArgs:[t]})}},name:function(e){return n.__li.firstElementChild.firstElementChild.innerHTML=e,n},listen:function(){return n.__gui.listen(n),n},remove:function(){return n.__gui.remove(n),n}}),n instanceof R["default"])!function(){var e=new N["default"](n.object,n.property,{min:n.__min,max:n.__max,step:n.__step});U["default"].each(["updateDisplay","onChange","onFinishChange","step"],function(t){var o=n[t],i=e[t];n[t]=e[t]=function(){var t=Array.prototype.slice.call(arguments);return i.apply(e,t),o.apply(n,t)}}),I["default"].addClass(t,"has-slider"),n.domElement.insertBefore(e.domElement,n.domElement.firstElementChild)}();else if(n instanceof N["default"]){var o=function(t){return U["default"].isNumber(n.__min)&&U["default"].isNumber(n.__max)?(n.remove(),l(e,n.object,n.property,{before:n.__li.nextElementSibling,factoryArgs:[n.__min,n.__max,n.__step]})):t};n.min=U["default"].compose(o,n.min),n.max=U["default"].compose(o,n.max)}else n instanceof S["default"]?(I["default"].bind(t,"click",function(){I["default"].fakeEvent(n.__checkbox,"click")}),I["default"].bind(n.__checkbox,"click",function(e){e.stopPropagation()})):n instanceof T["default"]?(I["default"].bind(t,"click",function(){I["default"].fakeEvent(n.__button,"click")}),I["default"].bind(t,"mouseover",function(){I["default"].addClass(n.__button,"hover")}),I["default"].bind(t,"mouseout",function(){I["default"].removeClass(n.__button,"hover")})):n instanceof j["default"]&&(I["default"].addClass(t,"color"),n.updateDisplay=U["default"].compose(function(e){return t.style.borderLeftColor=n.__color.toString(),
|
14 |
+
e},n.updateDisplay),n.updateDisplay());n.setValue=U["default"].compose(function(t){return e.getRoot().__preset_select&&n.isModified()&&a(e.getRoot(),!0),t},n.setValue)}function s(e,t){var n=e.getRoot(),o=n.__rememberedObjects.indexOf(t.object);if(o!==-1){var i=n.__rememberedObjectIndecesToControllers[o];if(void 0===i&&(i={},n.__rememberedObjectIndecesToControllers[o]=i),i[t.property]=t,n.load&&n.load.remembered){var a=n.load.remembered,r=void 0;if(a[e.preset])r=a[e.preset];else{if(!a[Q])return;r=a[Q]}if(r[o]&&void 0!==r[o][t.property]){var s=r[o][t.property];t.initialValue=s,t.setValue(s)}}}}function l(e,t,n,o){if(void 0===t[n])throw new Error('Object "'+t+'" has no property "'+n+'"');var a=void 0;if(o.color)a=new j["default"](t,n);else{var l=[t,n].concat(o.factoryArgs);a=E["default"].apply(e,l)}o.before instanceof A["default"]&&(o.before=o.before.__li),s(e,a),I["default"].addClass(a.domElement,"c");var d=document.createElement("span");I["default"].addClass(d,"property-name"),d.innerHTML=a.property;var u=document.createElement("div");u.appendChild(d),u.appendChild(a.domElement);var c=i(e,u,o.before);return I["default"].addClass(c,ne.CLASS_CONTROLLER_ROW),a instanceof j["default"]?I["default"].addClass(c,"color"):I["default"].addClass(c,typeof a.getValue()),r(e,c,a),e.__controllers.push(a),a}function d(e,t){return document.location.href+"."+t}function u(e,t,n){var o=document.createElement("option");o.innerHTML=t,o.value=t,e.__preset_select.appendChild(o),n&&(e.__preset_select.selectedIndex=e.__preset_select.length-1)}function c(e,t){t.style.display=e.useLocalStorage?"block":"none"}function f(e){var t=e.__save_row=document.createElement("li");I["default"].addClass(e.domElement,"has-save"),e.__ul.insertBefore(t,e.__ul.firstChild),I["default"].addClass(t,"save-row");var n=document.createElement("span");n.innerHTML=" ",I["default"].addClass(n,"button gears");var o=document.createElement("span");o.innerHTML="Save",I["default"].addClass(o,"button"),I["default"].addClass(o,"save");var i=document.createElement("span");i.innerHTML="New",I["default"].addClass(i,"button"),I["default"].addClass(i,"save-as");var a=document.createElement("span");a.innerHTML="Revert",I["default"].addClass(a,"button"),I["default"].addClass(a,"revert");var r=e.__preset_select=document.createElement("select");e.load&&e.load.remembered?U["default"].each(e.load.remembered,function(t,n){u(e,n,n===e.preset)}):u(e,Q,!1),I["default"].bind(r,"change",function(){for(var t=0;t<e.__preset_select.length;t++)e.__preset_select[t].innerHTML=e.__preset_select[t].value;e.preset=this.value}),t.appendChild(r),t.appendChild(n),t.appendChild(o),t.appendChild(i),t.appendChild(a),J&&!function(){var t=document.getElementById("dg-local-explain"),n=document.getElementById("dg-local-storage"),o=document.getElementById("dg-save-locally");o.style.display="block","true"===localStorage.getItem(d(e,"isLocal"))&&n.setAttribute("checked","checked"),c(e,t),I["default"].bind(n,"change",function(){e.useLocalStorage=!e.useLocalStorage,c(e,t)})}();var s=document.getElementById("dg-new-constructor");I["default"].bind(s,"keydown",function(e){!e.metaKey||67!==e.which&&67!==e.keyCode||q.hide()}),I["default"].bind(n,"click",function(){s.innerHTML=JSON.stringify(e.getSaveObject(),void 0,2),q.show(),s.focus(),s.select()}),I["default"].bind(o,"click",function(){e.save()}),I["default"].bind(i,"click",function(){var t=prompt("Enter a new preset name.");t&&e.saveAs(t)}),I["default"].bind(a,"click",function(){e.revert()})}function h(e){function t(t){return t.preventDefault(),e.width+=i-t.clientX,e.onResize(),i=t.clientX,!1}function n(){I["default"].removeClass(e.__closeButton,ne.CLASS_DRAG),I["default"].unbind(window,"mousemove",t),I["default"].unbind(window,"mouseup",n)}function o(o){return o.preventDefault(),i=o.clientX,I["default"].addClass(e.__closeButton,ne.CLASS_DRAG),I["default"].bind(window,"mousemove",t),I["default"].bind(window,"mouseup",n),!1}var i=void 0;e.__resize_handle=document.createElement("div"),U["default"].extend(e.__resize_handle.style,{width:"6px",marginLeft:"-3px",height:"200px",cursor:"ew-resize",position:"absolute"}),I["default"].bind(e.__resize_handle,"mousedown",o),I["default"].bind(e.__closeButton,"mousedown",o),e.domElement.insertBefore(e.__resize_handle,e.domElement.firstElementChild)}function _(e,t){e.domElement.style.width=t+"px",e.__save_row&&e.autoPlace&&(e.__save_row.style.width=t+"px"),e.__closeButton&&(e.__closeButton.style.width=t+"px")}function p(e,t){var n={};return U["default"].each(e.__rememberedObjects,function(o,i){var a={},r=e.__rememberedObjectIndecesToControllers[i];U["default"].each(r,function(e,n){a[n]=t?e.initialValue:e.getValue()}),n[i]=a}),n}function m(e){for(var t=0;t<e.__preset_select.length;t++)e.__preset_select[t].value===e.preset&&(e.__preset_select.selectedIndex=t)}function b(e){0!==e.length&&P["default"].call(window,function(){b(e)}),U["default"].each(e,function(e){e.updateDisplay()})}var g=n(18),v=o(g),y=n(19),w=o(y),x=n(20),E=o(x),C=n(7),A=o(C),k=n(8),S=o(k),O=n(15),T=o(O),L=n(13),N=o(L),M=n(14),R=o(M),B=n(16),j=o(B),H=n(21),P=o(H),D=n(22),V=o(D),F=n(9),I=o(F),z=n(5),U=o(z),G=n(23),X=o(G);v["default"].inject(X["default"]);var K="dg",W=72,Y=20,Q="Default",J=function(){try{return"localStorage"in window&&null!==window.localStorage}catch(e){return!1}}(),q=void 0,Z=!0,$=void 0,ee=!1,te=[],ne=function oe(e){function t(){var e=n.getRoot();e.width+=1,U["default"].defer(function(){e.width-=1})}var n=this,o=e||{};this.domElement=document.createElement("div"),this.__ul=document.createElement("ul"),this.domElement.appendChild(this.__ul),I["default"].addClass(this.domElement,K),this.__folders={},this.__controllers=[],this.__rememberedObjects=[],this.__rememberedObjectIndecesToControllers=[],this.__listening=[],o=U["default"].defaults(o,{autoPlace:!0,width:oe.DEFAULT_WIDTH}),o=U["default"].defaults(o,{resizable:o.autoPlace,hideable:o.autoPlace}),U["default"].isUndefined(o.load)?o.load={preset:Q}:o.preset&&(o.load.preset=o.preset),U["default"].isUndefined(o.parent)&&o.hideable&&te.push(this),o.resizable=U["default"].isUndefined(o.parent)&&o.resizable,o.autoPlace&&U["default"].isUndefined(o.scrollable)&&(o.scrollable=!0);var a=J&&"true"===localStorage.getItem(d(this,"isLocal")),r=void 0;if(Object.defineProperties(this,{parent:{get:function(){return o.parent}},scrollable:{get:function(){return o.scrollable}},autoPlace:{get:function(){return o.autoPlace}},preset:{get:function(){return n.parent?n.getRoot().preset:o.load.preset},set:function(e){n.parent?n.getRoot().preset=e:o.load.preset=e,m(this),n.revert()}},width:{get:function(){return o.width},set:function(e){o.width=e,_(n,e)}},name:{get:function(){return o.name},set:function(e){o.name=e,titleRowName&&(titleRowName.innerHTML=o.name)}},closed:{get:function(){return o.closed},set:function(e){o.closed=e,o.closed?I["default"].addClass(n.__ul,oe.CLASS_CLOSED):I["default"].removeClass(n.__ul,oe.CLASS_CLOSED),this.onResize(),n.__closeButton&&(n.__closeButton.innerHTML=e?oe.TEXT_OPEN:oe.TEXT_CLOSED)}},load:{get:function(){return o.load}},useLocalStorage:{get:function(){return a},set:function(e){J&&(a=e,e?I["default"].bind(window,"unload",r):I["default"].unbind(window,"unload",r),localStorage.setItem(d(n,"isLocal"),e))}}}),U["default"].isUndefined(o.parent)){if(o.closed=!1,I["default"].addClass(this.domElement,oe.CLASS_MAIN),I["default"].makeSelectable(this.domElement,!1),J&&a){n.useLocalStorage=!0;var s=localStorage.getItem(d(this,"gui"));s&&(o.load=JSON.parse(s))}this.__closeButton=document.createElement("div"),this.__closeButton.innerHTML=oe.TEXT_CLOSED,I["default"].addClass(this.__closeButton,oe.CLASS_CLOSE_BUTTON),this.domElement.appendChild(this.__closeButton),I["default"].bind(this.__closeButton,"click",function(){n.closed=!n.closed})}else{void 0===o.closed&&(o.closed=!0);var l=document.createTextNode(o.name);I["default"].addClass(l,"controller-name");var u=i(n,l),c=function(e){return e.preventDefault(),n.closed=!n.closed,!1};I["default"].addClass(this.__ul,oe.CLASS_CLOSED),I["default"].addClass(u,"title"),I["default"].bind(u,"click",c),o.closed||(this.closed=!1)}o.autoPlace&&(U["default"].isUndefined(o.parent)&&(Z&&($=document.createElement("div"),I["default"].addClass($,K),I["default"].addClass($,oe.CLASS_AUTO_PLACE_CONTAINER),document.body.appendChild($),Z=!1),$.appendChild(this.domElement),I["default"].addClass(this.domElement,oe.CLASS_AUTO_PLACE)),this.parent||_(n,o.width)),this.__resizeHandler=function(){n.onResize()},I["default"].bind(window,"resize",this.__resizeHandler),I["default"].bind(this.__ul,"webkitTransitionEnd",this.__resizeHandler),I["default"].bind(this.__ul,"transitionend",this.__resizeHandler),I["default"].bind(this.__ul,"oTransitionEnd",this.__resizeHandler),this.onResize(),o.resizable&&h(this),r=function(){J&&"true"===localStorage.getItem(d(n,"isLocal"))&&localStorage.setItem(d(n,"gui"),JSON.stringify(n.getSaveObject()))},this.saveToLocalStorageIfPossible=r,o.parent||t()};ne.toggleHide=function(){ee=!ee,U["default"].each(te,function(e){e.domElement.style.display=ee?"none":""})},ne.CLASS_AUTO_PLACE="a",ne.CLASS_AUTO_PLACE_CONTAINER="ac",ne.CLASS_MAIN="main",ne.CLASS_CONTROLLER_ROW="cr",ne.CLASS_TOO_TALL="taller-than-window",ne.CLASS_CLOSED="closed",ne.CLASS_CLOSE_BUTTON="close-button",ne.CLASS_DRAG="drag",ne.DEFAULT_WIDTH=245,ne.TEXT_CLOSED="Close Controls",ne.TEXT_OPEN="Open Controls",ne._keydownHandler=function(e){"text"===document.activeElement.type||e.which!==W&&e.keyCode!==W||ne.toggleHide()},I["default"].bind(window,"keydown",ne._keydownHandler,!1),U["default"].extend(ne.prototype,{add:function(e,t){return l(this,e,t,{factoryArgs:Array.prototype.slice.call(arguments,2)})},addColor:function(e,t){return l(this,e,t,{color:!0})},remove:function(e){this.__ul.removeChild(e.__li),this.__controllers.splice(this.__controllers.indexOf(e),1);var t=this;U["default"].defer(function(){t.onResize()})},destroy:function(){this.autoPlace&&$.removeChild(this.domElement),I["default"].unbind(window,"keydown",ne._keydownHandler,!1),I["default"].unbind(window,"resize",this.__resizeHandler),this.saveToLocalStorageIfPossible&&I["default"].unbind(window,"unload",this.saveToLocalStorageIfPossible)},addFolder:function(e){if(void 0!==this.__folders[e])throw new Error('You already have a folder in this GUI by the name "'+e+'"');var t={name:e,parent:this};t.autoPlace=this.autoPlace,this.load&&this.load.folders&&this.load.folders[e]&&(t.closed=this.load.folders[e].closed,t.load=this.load.folders[e]);var n=new ne(t);this.__folders[e]=n;var o=i(this,n.domElement);return I["default"].addClass(o,"folder"),n},open:function(){this.closed=!1},close:function(){this.closed=!0},onResize:U["default"].debounce(function(){var e=this.getRoot();if(e.scrollable){var t=I["default"].getOffset(e.__ul).top,n=0;U["default"].each(e.__ul.childNodes,function(t){e.autoPlace&&t===e.__save_row||(n+=I["default"].getHeight(t))}),window.innerHeight-t-Y<n?(I["default"].addClass(e.domElement,ne.CLASS_TOO_TALL),e.__ul.style.height=window.innerHeight-t-Y+"px"):(I["default"].removeClass(e.domElement,ne.CLASS_TOO_TALL),e.__ul.style.height="auto")}e.__resize_handle&&U["default"].defer(function(){e.__resize_handle.style.height=e.__ul.offsetHeight+"px"}),e.__closeButton&&(e.__closeButton.style.width=e.width+"px")},200),remember:function(){if(U["default"].isUndefined(q)&&(q=new V["default"],q.domElement.innerHTML=w["default"]),this.parent)throw new Error("You can only call remember on a top level GUI.");var e=this;U["default"].each(Array.prototype.slice.call(arguments),function(t){0===e.__rememberedObjects.length&&f(e),e.__rememberedObjects.indexOf(t)===-1&&e.__rememberedObjects.push(t)}),this.autoPlace&&_(this,this.width)},getRoot:function(){for(var e=this;e.parent;)e=e.parent;return e},getSaveObject:function(){var e=this.load;return e.closed=this.closed,this.__rememberedObjects.length>0&&(e.preset=this.preset,e.remembered||(e.remembered={}),e.remembered[this.preset]=p(this)),e.folders={},U["default"].each(this.__folders,function(t,n){e.folders[n]=t.getSaveObject()}),e},save:function(){this.load.remembered||(this.load.remembered={}),this.load.remembered[this.preset]=p(this),a(this,!1),this.saveToLocalStorageIfPossible()},saveAs:function(e){this.load.remembered||(this.load.remembered={},this.load.remembered[Q]=p(this,!0)),this.load.remembered[e]=p(this),this.preset=e,u(this,e,!0),this.saveToLocalStorageIfPossible()},revert:function(e){U["default"].each(this.__controllers,function(t){this.getRoot().load.remembered?s(e||this.getRoot(),t):t.setValue(t.initialValue),t.__onFinishChange&&t.__onFinishChange.call(t,t.getValue())},this),U["default"].each(this.__folders,function(e){e.revert(e)}),e||a(this.getRoot(),!1)},listen:function(e){var t=0===this.__listening.length;this.__listening.push(e),t&&b(this.__listening)},updateDisplay:function(){U["default"].each(this.__controllers,function(e){e.updateDisplay()}),U["default"].each(this.__folders,function(e){e.updateDisplay()})}}),e.exports=ne},function(e,t){"use strict";e.exports={load:function(e,t){var n=t||document,o=n.createElement("link");o.type="text/css",o.rel="stylesheet",o.href=e,n.getElementsByTagName("head")[0].appendChild(o)},inject:function(e,t){var n=t||document,o=document.createElement("style");o.type="text/css",o.innerHTML=e;var i=n.getElementsByTagName("head")[0];try{i.appendChild(o)}catch(a){}}}},function(e,t){e.exports='<div id=dg-save class="dg dialogue">Here\'s the new load parameter for your <code>GUI</code>\'s constructor:<textarea id=dg-new-constructor></textarea><div id=dg-save-locally><input id=dg-local-storage type="checkbox"> Automatically save values to <code>localStorage</code> on exit.<div id=dg-local-explain>The values saved to <code>localStorage</code> will override those passed to <code>dat.GUI</code>\'s constructor. This makes it easier to work incrementally, but <code>localStorage</code> is fragile, and your friends may not see the same values you do.</div></div></div>'},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{"default":e}}t.__esModule=!0;var i=n(10),a=o(i),r=n(13),s=o(r),l=n(14),d=o(l),u=n(11),c=o(u),f=n(15),h=o(f),_=n(8),p=o(_),m=n(5),b=o(m),g=function(e,t){var n=e[t];return b["default"].isArray(arguments[2])||b["default"].isObject(arguments[2])?new a["default"](e,t,arguments[2]):b["default"].isNumber(n)?b["default"].isNumber(arguments[2])&&b["default"].isNumber(arguments[3])?b["default"].isNumber(arguments[4])?new d["default"](e,t,arguments[2],arguments[3],arguments[4]):new d["default"](e,t,arguments[2],arguments[3]):b["default"].isNumber(arguments[4])?new s["default"](e,t,{min:arguments[2],max:arguments[3],step:arguments[4]}):new s["default"](e,t,{min:arguments[2],max:arguments[3]}):b["default"].isString(n)?new c["default"](e,t):b["default"].isFunction(n)?new h["default"](e,t,""):b["default"].isBoolean(n)?new p["default"](e,t):null};t["default"]=g,e.exports=t["default"]},function(e,t){"use strict";function n(e){setTimeout(e,1e3/60)}t.__esModule=!0,t["default"]=window.requestAnimationFrame||window.webkitRequestAnimationFrame||window.mozRequestAnimationFrame||window.oRequestAnimationFrame||window.msRequestAnimationFrame||n,e.exports=t["default"]},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{"default":e}}function i(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}t.__esModule=!0;var a=n(9),r=o(a),s=n(5),l=o(s),d=function(){function e(){i(this,e),this.backgroundElement=document.createElement("div"),l["default"].extend(this.backgroundElement.style,{backgroundColor:"rgba(0,0,0,0.8)",top:0,left:0,display:"none",zIndex:"1000",opacity:0,WebkitTransition:"opacity 0.2s linear",transition:"opacity 0.2s linear"}),r["default"].makeFullscreen(this.backgroundElement),this.backgroundElement.style.position="fixed",this.domElement=document.createElement("div"),l["default"].extend(this.domElement.style,{position:"fixed",display:"none",zIndex:"1001",opacity:0,WebkitTransition:"-webkit-transform 0.2s ease-out, opacity 0.2s linear",transition:"transform 0.2s ease-out, opacity 0.2s linear"}),document.body.appendChild(this.backgroundElement),document.body.appendChild(this.domElement);var t=this;r["default"].bind(this.backgroundElement,"click",function(){t.hide()})}return e.prototype.show=function(){var e=this;this.backgroundElement.style.display="block",this.domElement.style.display="block",this.domElement.style.opacity=0,this.domElement.style.webkitTransform="scale(1.1)",this.layout(),l["default"].defer(function(){e.backgroundElement.style.opacity=1,e.domElement.style.opacity=1,e.domElement.style.webkitTransform="scale(1)"})},e.prototype.hide=function t(){var e=this,t=function n(){e.domElement.style.display="none",e.backgroundElement.style.display="none",r["default"].unbind(e.domElement,"webkitTransitionEnd",n),r["default"].unbind(e.domElement,"transitionend",n),r["default"].unbind(e.domElement,"oTransitionEnd",n)};r["default"].bind(this.domElement,"webkitTransitionEnd",t),r["default"].bind(this.domElement,"transitionend",t),r["default"].bind(this.domElement,"oTransitionEnd",t),this.backgroundElement.style.opacity=0,this.domElement.style.opacity=0,this.domElement.style.webkitTransform="scale(1.1)"},e.prototype.layout=function(){this.domElement.style.left=window.innerWidth/2-r["default"].getWidth(this.domElement)/2+"px",this.domElement.style.top=window.innerHeight/2-r["default"].getHeight(this.domElement)/2+"px"},e}();t["default"]=d,e.exports=t["default"]},function(e,t,n){t=e.exports=n(24)(),t.push([e.id,".dg ul{list-style:none;margin:0;padding:0;width:100%;clear:both}.dg.ac{position:fixed;top:0;left:0;right:0;height:0;z-index:0}.dg:not(.ac) .main{overflow:hidden}.dg.main{-webkit-transition:opacity .1s linear;transition:opacity .1s linear}.dg.main.taller-than-window{overflow-y:auto}.dg.main.taller-than-window .close-button{opacity:1;margin-top:-1px;border-top:1px solid #2c2c2c}.dg.main ul.closed .close-button{opacity:1!important}.dg.main .close-button.drag,.dg.main:hover .close-button{opacity:1}.dg.main .close-button{-webkit-transition:opacity .1s linear;transition:opacity .1s linear;border:0;position:absolute;line-height:19px;height:20px;cursor:pointer;text-align:center;background-color:#000}.dg.main .close-button:hover{background-color:#111}.dg.a{float:right;margin-right:15px;overflow-x:hidden}.dg.a.has-save>ul{margin-top:27px}.dg.a.has-save>ul.closed{margin-top:0}.dg.a .save-row{position:fixed;top:0;z-index:1002}.dg li{-webkit-transition:height .1s ease-out;transition:height .1s ease-out}.dg li:not(.folder){cursor:auto;height:27px;line-height:27px;overflow:hidden;padding:0 4px 0 5px}.dg li.folder{padding:0;border-left:4px solid transparent}.dg li.title{cursor:pointer;margin-left:-4px}.dg .closed li:not(.title),.dg .closed ul li,.dg .closed ul li>*{height:0;overflow:hidden;border:0}.dg .cr{clear:both;padding-left:3px;height:27px}.dg .property-name{cursor:default;float:left;clear:left;width:40%;overflow:hidden;text-overflow:ellipsis}.dg .c{float:left;width:60%}.dg .c input[type=text]{border:0;margin-top:4px;padding:3px;width:100%;float:right}.dg .has-slider input[type=text]{width:30%;margin-left:0}.dg .slider{float:left;width:66%;margin-left:-5px;margin-right:0;height:19px;margin-top:4px}.dg .slider-fg{height:100%}.dg .c input[type=checkbox]{margin-top:9px}.dg .c select{margin-top:5px}.dg .cr.boolean,.dg .cr.boolean *,.dg .cr.function,.dg .cr.function *,.dg .cr.function .property-name{cursor:pointer}.dg .selector{display:none;position:absolute;margin-left:-9px;margin-top:23px;z-index:10}.dg .c:hover .selector,.dg .selector.drag{display:block}.dg li.save-row{padding:0}.dg li.save-row .button{display:inline-block;padding:0 6px}.dg.dialogue{background-color:#222;width:460px;padding:15px;font-size:13px;line-height:15px}#dg-new-constructor{padding:10px;color:#222;font-family:Monaco,monospace;font-size:10px;border:0;resize:none;box-shadow:inset 1px 1px 1px #888;word-wrap:break-word;margin:9pt 0;display:block;width:440px;overflow-y:scroll;height:75pt;position:relative}#dg-local-explain{display:none;font-size:11px;line-height:17px;border-radius:3px;background-color:#333;padding:8px;margin-top:10px}#dg-local-explain code{font-size:10px}#dat-gui-save-locally{display:none}.dg{color:#eee;font:11px 'Lucida Grande',sans-serif;text-shadow:0 -1px 0 #111}.dg.main::-webkit-scrollbar{width:5px;background:#1a1a1a}.dg.main::-webkit-scrollbar-corner{height:0;display:none}.dg.main::-webkit-scrollbar-thumb{border-radius:5px;background:#676767}.dg li:not(.folder){background:#1a1a1a;border-bottom:1px solid #2c2c2c}.dg li.save-row{line-height:25px;background:#dad5cb;border:0}.dg li.save-row select{margin-left:5px;width:81pt}.dg li.save-row .button{margin-left:5px;margin-top:1px;border-radius:2px;font-size:9px;line-height:7px;padding:4px 4px 5px;background:#c5bdad;color:#fff;text-shadow:0 1px 0 #b0a58f;box-shadow:0 -1px 0 #b0a58f;cursor:pointer}.dg li.save-row .button.gears{background:#c5bdad url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAsAAAANCAYAAAB/9ZQ7AAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAQJJREFUeNpiYKAU/P//PwGIC/ApCABiBSAW+I8AClAcgKxQ4T9hoMAEUrxx2QSGN6+egDX+/vWT4e7N82AMYoPAx/evwWoYoSYbACX2s7KxCxzcsezDh3evFoDEBYTEEqycggWAzA9AuUSQQgeYPa9fPv6/YWm/Acx5IPb7ty/fw+QZblw67vDs8R0YHyQhgObx+yAJkBqmG5dPPDh1aPOGR/eugW0G4vlIoTIfyFcA+QekhhHJhPdQxbiAIguMBTQZrPD7108M6roWYDFQiIAAv6Aow/1bFwXgis+f2LUAynwoIaNcz8XNx3Dl7MEJUDGQpx9gtQ8YCueB+D26OECAAQDadt7e46D42QAAAABJRU5ErkJggg==) 2px 1px no-repeat;height:7px;width:8px}.dg li.save-row .button:hover{background-color:#bab19e;box-shadow:0 -1px 0 #b0a58f}.dg li.folder{border-bottom:0}.dg li.title{padding-left:1pc;background:#000 url(data:image/gif;base64,R0lGODlhBQAFAJEAAP////Pz8////////yH5BAEAAAIALAAAAAAFAAUAAAIIlI+hKgFxoCgAOw==) 6px 10px no-repeat;cursor:pointer;border-bottom:1px solid hsla(0,0%,100%,.2)}.dg .closed li.title{background-image:url(data:image/gif;base64,R0lGODlhBQAFAJEAAP////Pz8////////yH5BAEAAAIALAAAAAAFAAUAAAIIlGIWqMCbWAEAOw==)}.dg .cr.boolean{border-left:3px solid #806787}.dg .cr.color{border-left:3px solid}.dg .cr.function{border-left:3px solid #e61d5f}.dg .cr.number{border-left:3px solid #2fa1d6}.dg .cr.number input[type=text]{color:#2fa1d6}.dg .cr.string{border-left:3px solid #1ed36f}.dg .cr.string input[type=text]{color:#1ed36f}.dg .cr.boolean:hover,.dg .cr.function:hover{background:#111}.dg .c input[type=text]{background:#303030;outline:0}.dg .c input[type=text]:hover{background:#3c3c3c}.dg .c input[type=text]:focus{background:#494949;color:#fff}.dg .c .slider{background:#303030;cursor:ew-resize}.dg .c .slider-fg{background:#2fa1d6;max-width:100%}.dg .c .slider:hover{background:#3c3c3c}.dg .c .slider:hover .slider-fg{background:#44abda}",""])},function(e,t){e.exports=function(){var e=[];return e.toString=function(){for(var e=[],t=0;t<this.length;t++){var n=this[t];n[2]?e.push("@media "+n[2]+"{"+n[1]+"}"):e.push(n[1])}return e.join("")},e.i=function(t,n){"string"==typeof t&&(t=[[null,t,""]]);for(var o={},i=0;i<this.length;i++){var a=this[i][0];"number"==typeof a&&(o[a]=!0)}for(i=0;i<t.length;i++){var r=t[i];"number"==typeof r[0]&&o[r[0]]||(n&&!r[2]?r[2]=n:n&&(r[2]="("+r[2]+") and ("+n+")"),e.push(r))}},e}}])});
|
dataloaders/pymo/mocapplayer/libs/threejs/three.min.js
ADDED
The diff for this file is too large to render.
See raw diff
|
|
dataloaders/pymo/mocapplayer/playBuffer.html
ADDED
@@ -0,0 +1,418 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<!-- GrooveNet Player - V 0.3
|
3 |
+
By Omid Alemi
|
4 |
+
-->
|
5 |
+
<html lang="en">
|
6 |
+
|
7 |
+
<head>
|
8 |
+
<title>BVH Player</title>
|
9 |
+
<meta charset="utf-8">
|
10 |
+
<meta name="viewport" content="width=device-width, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0">
|
11 |
+
<style>
|
12 |
+
body {
|
13 |
+
margin:0;
|
14 |
+
overflow: hidden;
|
15 |
+
}
|
16 |
+
|
17 |
+
#metaoverlay {
|
18 |
+
width: 600px;
|
19 |
+
height: 150px;
|
20 |
+
background-color: rgba(10,10,10,0.5);
|
21 |
+
position: fixed;
|
22 |
+
bottom: 30px;
|
23 |
+
margin: 0 auto;
|
24 |
+
left: 20px;
|
25 |
+
display: none;
|
26 |
+
border-radius: 10px;
|
27 |
+
}
|
28 |
+
|
29 |
+
#labels {
|
30 |
+
font-family: sans-serif;
|
31 |
+
width: 300px;
|
32 |
+
height: 50%;
|
33 |
+
color: #e1e1e1;
|
34 |
+
line-height: 1.5;
|
35 |
+
/* border: 1px solid #f1f1f1; */
|
36 |
+
display: flex;
|
37 |
+
margin: 20px;
|
38 |
+
padding: 10px;
|
39 |
+
}
|
40 |
+
.bar {
|
41 |
+
fill: rgba(80, 180, 240, 0.99);
|
42 |
+
}
|
43 |
+
|
44 |
+
svg {
|
45 |
+
/* border: 1px solid red; */
|
46 |
+
}
|
47 |
+
</style>
|
48 |
+
<link rel="stylesheet" href="styles/pace.css"></link>
|
49 |
+
|
50 |
+
<script src="https://d3js.org/d3.v4.min.js"></script>
|
51 |
+
|
52 |
+
<script src="libs/pace.min.js"></script>
|
53 |
+
<script src="libs/math.min.js"></script>
|
54 |
+
<script src="libs/jquery.min.js"></script>
|
55 |
+
<script src="libs/threejs/three.min.js"></script>
|
56 |
+
<script src="libs/threejs/OrbitControls.js"></script>
|
57 |
+
<script src="libs/papaparse.min.js"></script>
|
58 |
+
|
59 |
+
<script src="libs/mocapjs.js"></script>
|
60 |
+
<script src="js/skeletonFactory.js"></script>
|
61 |
+
</head>
|
62 |
+
|
63 |
+
<body>
|
64 |
+
<div id="metaoverlay">
|
65 |
+
<div id="labels"></div>
|
66 |
+
<div id="labelsChart"></div>
|
67 |
+
</div>
|
68 |
+
|
69 |
+
<script type="text/javascript">
|
70 |
+
var characters = [];
|
71 |
+
var playing = false;
|
72 |
+
var showMeta = false;
|
73 |
+
var metadata = [];
|
74 |
+
var chart = {};
|
75 |
+
|
76 |
+
|
77 |
+
$(document).on("keypress", function(e) {
|
78 |
+
if (e.charCode == 32)
|
79 |
+
playing = !playing;
|
80 |
+
|
81 |
+
characters.forEach(function(c) {
|
82 |
+
c.playing = playing;
|
83 |
+
|
84 |
+
if (playing)
|
85 |
+
c.animStartTimeRef = Date.now();
|
86 |
+
else
|
87 |
+
c.animOffset = c.animIndex;
|
88 |
+
});
|
89 |
+
|
90 |
+
});
|
91 |
+
|
92 |
+
$(window).on("resize", function(e) {
|
93 |
+
camera.aspect = window.innerWidth / window.innerHeight;
|
94 |
+
camera.updateProjectionMatrix();
|
95 |
+
|
96 |
+
renderer.setSize(window.innerWidth, window.innerHeight);
|
97 |
+
});
|
98 |
+
|
99 |
+
var getUrlParameter = function getUrlParameter(sParam) {
|
100 |
+
var sPageURL = decodeURIComponent(window.location.search.substring(1)),
|
101 |
+
sURLVariables = sPageURL.split('&'),
|
102 |
+
sParameterName,
|
103 |
+
i;
|
104 |
+
|
105 |
+
for (i = 0; i < sURLVariables.length; i++) {
|
106 |
+
sParameterName = sURLVariables[i].split('=');
|
107 |
+
if (sParameterName[0] === sParam) {
|
108 |
+
return sParameterName[1] === undefined ? true : sParameterName[1];
|
109 |
+
}
|
110 |
+
}
|
111 |
+
};
|
112 |
+
</script>
|
113 |
+
|
114 |
+
<script>
|
115 |
+
var scene, camera, renderer;
|
116 |
+
var geometry, material, mesh;
|
117 |
+
var played_count = 0;
|
118 |
+
|
119 |
+
function set_the_scene3() {
|
120 |
+
|
121 |
+
// Add the light
|
122 |
+
light = new THREE.PointLight(0xffffff, 0.9, 0);
|
123 |
+
light.position.set(0, 160, 40);
|
124 |
+
scene.add(light);
|
125 |
+
|
126 |
+
// Create a grid for the floor
|
127 |
+
var size = 200,
|
128 |
+
step = 20;
|
129 |
+
|
130 |
+
// Draw the plane
|
131 |
+
var planeGeometry = new THREE.PlaneGeometry(size * 2, size * 2);
|
132 |
+
var planeMaterial = new THREE.MeshPhongMaterial({
|
133 |
+
color: 0x444444,
|
134 |
+
emissive: 0x000000,
|
135 |
+
specular: 0x111111,
|
136 |
+
side: THREE.DoubleSide,
|
137 |
+
transparent: true,
|
138 |
+
opacity: 0.6
|
139 |
+
});
|
140 |
+
var plane = new THREE.Mesh(planeGeometry, planeMaterial);
|
141 |
+
plane.position.set(0, 0, 0);
|
142 |
+
plane.rotation.set(math.pi / 2, 0, 0);
|
143 |
+
scene.add(plane);
|
144 |
+
|
145 |
+
//Draw the lines
|
146 |
+
var lineGeometry = new THREE.Geometry();
|
147 |
+
var lineMaterial = new THREE.LineBasicMaterial({
|
148 |
+
color: 0x555555,
|
149 |
+
linewidth: 1.2
|
150 |
+
});
|
151 |
+
for (var i = -size; i <= size; i += step) {
|
152 |
+
lineGeometry.vertices.push(new THREE.Vector3(-size, -0.04, i));
|
153 |
+
lineGeometry.vertices.push(new THREE.Vector3(size, -0.04, i));
|
154 |
+
lineGeometry.vertices.push(new THREE.Vector3(i, -0.04, -size));
|
155 |
+
lineGeometry.vertices.push(new THREE.Vector3(i, -0.04, size));
|
156 |
+
}
|
157 |
+
|
158 |
+
var line = new THREE.LineSegments(lineGeometry, lineMaterial);
|
159 |
+
scene.add(line);
|
160 |
+
}
|
161 |
+
|
162 |
+
function add_basic_lights() {
|
163 |
+
// Add the light
|
164 |
+
var light = new THREE.HemisphereLight(0xffffff, 0x000000, 0.9);
|
165 |
+
scene.add(light);
|
166 |
+
}
|
167 |
+
|
168 |
+
function add_floor_theo() {
|
169 |
+
var textureLoader = new THREE.TextureLoader();
|
170 |
+
|
171 |
+
var texture = textureLoader.load('theo_floor1.jpg', function(tt) {
|
172 |
+
});
|
173 |
+
|
174 |
+
texture.wrapS = THREE.RepeatWrapping;
|
175 |
+
texture.wrapT = THREE.RepeatWrapping;
|
176 |
+
texture.repeat = new THREE.Vector2(1,1.66);
|
177 |
+
|
178 |
+
|
179 |
+
var material = new THREE.MeshPhongMaterial({
|
180 |
+
color: 0xffffff,
|
181 |
+
specular: 0xffffff,
|
182 |
+
shininess: 100,
|
183 |
+
map: texture,
|
184 |
+
|
185 |
+
});
|
186 |
+
var geometry = new THREE.CubeGeometry(300, 300, 5);
|
187 |
+
|
188 |
+
var mesh = new THREE.Mesh(geometry, material);
|
189 |
+
mesh.position.y = -2;
|
190 |
+
mesh.rotation.x = -Math.PI / 2;
|
191 |
+
scene.add(mesh);
|
192 |
+
}
|
193 |
+
|
194 |
+
function add_floor_grid(){
|
195 |
+
// Create a grid for the floor
|
196 |
+
var size = 400,
|
197 |
+
step = 40;
|
198 |
+
|
199 |
+
// Draw the plane
|
200 |
+
var planeGeometry = new THREE.PlaneGeometry(size * 2, size * 2);
|
201 |
+
var planeMaterial = new THREE.MeshPhongMaterial({
|
202 |
+
color: 0x212121,
|
203 |
+
emissive: 0x333333,
|
204 |
+
specular: 0x222222,
|
205 |
+
side: THREE.DoubleSide,
|
206 |
+
transparent: true,
|
207 |
+
opacity: 0.3
|
208 |
+
});
|
209 |
+
var plane = new THREE.Mesh(planeGeometry, planeMaterial);
|
210 |
+
plane.position.set(0, 0, 0);
|
211 |
+
plane.rotation.set(math.pi / 2, 0, 0);
|
212 |
+
scene.add(plane);
|
213 |
+
|
214 |
+
//Draw the lines
|
215 |
+
var lineGeometry = new THREE.Geometry();
|
216 |
+
var lineMaterial = new THREE.LineBasicMaterial({
|
217 |
+
color: 0x333333,
|
218 |
+
linewidth: 1.2
|
219 |
+
});
|
220 |
+
for (var i = -size; i <= size; i += step) {
|
221 |
+
lineGeometry.vertices.push(new THREE.Vector3(-size, -0.04, i));
|
222 |
+
lineGeometry.vertices.push(new THREE.Vector3(size, -0.04, i));
|
223 |
+
lineGeometry.vertices.push(new THREE.Vector3(i, -0.04, -size));
|
224 |
+
lineGeometry.vertices.push(new THREE.Vector3(i, -0.04, size));
|
225 |
+
}
|
226 |
+
|
227 |
+
var line = new THREE.LineSegments(lineGeometry, lineMaterial);
|
228 |
+
scene.add(line);
|
229 |
+
}
|
230 |
+
|
231 |
+
function init(cz) {
|
232 |
+
scene = new THREE.Scene();
|
233 |
+
camera = new THREE.PerspectiveCamera(30, window.innerWidth / window.innerHeight, 1, 4000);
|
234 |
+
camera.position.set(0, 250, 0);
|
235 |
+
camera.position.z = cz;
|
236 |
+
scene.add(camera);
|
237 |
+
|
238 |
+
// set_the_scene3();
|
239 |
+
}
|
240 |
+
|
241 |
+
function loadBVHFile(bvhURL, cname, jm, bm, scale) {
|
242 |
+
var c2 = new BVHCharacter(cname, jm, bm, makeJointGeometry_Dode, makeBoneGeometry_Cylinder2);
|
243 |
+
// c2.setOriginPosition(0, 0, 0);
|
244 |
+
c2.skelScale = scale;
|
245 |
+
c2.loadFromURL(bvhURL, function() {
|
246 |
+
scene.add(c2.skeleton);
|
247 |
+
});
|
248 |
+
|
249 |
+
characters.push(c2);
|
250 |
+
}
|
251 |
+
|
252 |
+
function loadPosFile(csvURL, cname, jm, bm, scale, fr) {
|
253 |
+
var c2 = new C3DCharacter(cname, markermaterial, makeJointGeometry_Sphere1);
|
254 |
+
// c2.setOriginPosition(0, 0, 0);
|
255 |
+
c2.scale = scale;
|
256 |
+
c2.frameTime = fr;
|
257 |
+
c2.loadFromURL(csvURL, function() {});
|
258 |
+
|
259 |
+
characters.push(c2);
|
260 |
+
}
|
261 |
+
|
262 |
+
function loadPosBuffer(data, cname, jm, bm, scale, fr) {
|
263 |
+
var c2 = new C3DCharacter(cname, bonematerial5, makeJointGeometry_SphereX(2.5));
|
264 |
+
// c2.setOriginPosition(0, 0, 0);
|
265 |
+
c2.scale = scale;
|
266 |
+
c2.frameTime = fr;
|
267 |
+
c2.loadFromBuffer(data, function() {});
|
268 |
+
|
269 |
+
characters.push(c2);
|
270 |
+
}
|
271 |
+
|
272 |
+
function initRenderer() {
|
273 |
+
renderer = new THREE.WebGLRenderer({
|
274 |
+
antialias: true
|
275 |
+
});
|
276 |
+
renderer.setSize(window.innerWidth, window.innerHeight);
|
277 |
+
//renderer.setSize(600, 500);
|
278 |
+
renderer.gammaInput = true;
|
279 |
+
renderer.gammaOutput = true;
|
280 |
+
renderer.setPixelRatio(window.devicePixelRatio);
|
281 |
+
renderer.setClearColor(0x222222, 1);
|
282 |
+
// renderer.setClearColor(0xffffff, 1);
|
283 |
+
|
284 |
+
|
285 |
+
document.body.appendChild(renderer.domElement);
|
286 |
+
|
287 |
+
controls = new THREE.OrbitControls(camera, renderer.domElement);
|
288 |
+
}
|
289 |
+
|
290 |
+
function animate() {
|
291 |
+
// if (!playing)
|
292 |
+
// return;
|
293 |
+
|
294 |
+
requestAnimationFrame(animate);
|
295 |
+
|
296 |
+
characters.forEach(function(c) {
|
297 |
+
// console.log(c.playing);
|
298 |
+
if (1 && c.ready) {
|
299 |
+
if (c.playing) {
|
300 |
+
c.animIndex = c.animOffset + Math.floor((Date.now() - c.animStartTimeRef) / c.frameTime / 1000);
|
301 |
+
|
302 |
+
if (c.animIndex >= c.frameCount-1) {
|
303 |
+
c.animOffset = 0;
|
304 |
+
c.animStartTimeRef = Date.now();
|
305 |
+
c.animIndex = 0;
|
306 |
+
played_count++;
|
307 |
+
// if (played_count> 1)
|
308 |
+
// c.playing = false;
|
309 |
+
|
310 |
+
}
|
311 |
+
c.animFrame(c.animIndex);
|
312 |
+
|
313 |
+
if (showMeta) {
|
314 |
+
var labeldata = '';
|
315 |
+
for (var i=0; i<metadata[c.animIndex].length;i++) {
|
316 |
+
var v = Math.round(metadata[c.animIndex][i]*100)/100;
|
317 |
+
labeldata+= 'Label '+i+ ': '+ v+'<br/>';
|
318 |
+
}
|
319 |
+
document.getElementById('labels').innerHTML = labeldata;
|
320 |
+
|
321 |
+
updateChart(metadata[c.animIndex]);
|
322 |
+
}
|
323 |
+
}
|
324 |
+
}
|
325 |
+
});
|
326 |
+
|
327 |
+
renderer.render(scene, camera);
|
328 |
+
}
|
329 |
+
|
330 |
+
function makeLabelsChart() {
|
331 |
+
var metadataFrame = metadata[0];
|
332 |
+
|
333 |
+
chart.width = 200;
|
334 |
+
chart.height = 150;
|
335 |
+
|
336 |
+
chart.svg = d3.select("#labelsChart")
|
337 |
+
.append("svg")
|
338 |
+
.attr("width", 200)
|
339 |
+
.attr("height", 150);
|
340 |
+
|
341 |
+
chart.x = d3.scaleBand().rangeRound([0, chart.width]).padding(0.1),
|
342 |
+
chart.y = d3.scaleLinear().rangeRound([chart.height, 0]),
|
343 |
+
chart.h = d3.scaleLinear().rangeRound([0, chart.height/2]);;
|
344 |
+
|
345 |
+
chart.x.domain([0, 1, 2, 3]);
|
346 |
+
chart.y.domain([-2, 2]);
|
347 |
+
chart.h.domain([0, 2]);
|
348 |
+
|
349 |
+
var g = chart.svg.append("g");
|
350 |
+
|
351 |
+
g.append("line")
|
352 |
+
.attr('x1', 0)
|
353 |
+
.attr('x2', chart.width)
|
354 |
+
.attr('y1', chart.height/2)
|
355 |
+
.attr('y2', chart.height/2)
|
356 |
+
.attr('stroke', '#f1f1f1');
|
357 |
+
|
358 |
+
g.selectAll(".bar")
|
359 |
+
.data(metadataFrame)
|
360 |
+
.enter().append("rect")
|
361 |
+
.attr("class", "bar")
|
362 |
+
.attr("x", function(d, i) { return chart.x(i); })
|
363 |
+
.attr("y", function(d, i) { return chart.y(Math.max(0, d)); })
|
364 |
+
.attr("width", 20)
|
365 |
+
.attr("height", function(d) { return chart.h(Math.abs(d)); });
|
366 |
+
}
|
367 |
+
|
368 |
+
function updateChart(metadataFrame) {
|
369 |
+
var g = chart.svg.select("g");
|
370 |
+
|
371 |
+
g.selectAll(".bar")
|
372 |
+
.data(metadataFrame)
|
373 |
+
.attr("class", "bar")
|
374 |
+
.attr("x", function(d, i) { return chart.x(i); })
|
375 |
+
.attr("y", function(d, i) { return chart.y(Math.max(0, d)); })
|
376 |
+
.attr("width", 20)
|
377 |
+
.attr("height", function(d) { return chart.h(Math.abs(d)); });
|
378 |
+
}
|
379 |
+
|
380 |
+
function start(dataBuffer, metadata, cz, scale, frameTime) {
|
381 |
+
if (cz === undefined)
|
382 |
+
cz = 550;
|
383 |
+
|
384 |
+
if (scale === undefined)
|
385 |
+
scale = 1.0;
|
386 |
+
|
387 |
+
if (frameTime === undefined)
|
388 |
+
frameTime = 1/120;
|
389 |
+
|
390 |
+
init(cz);
|
391 |
+
add_basic_lights();
|
392 |
+
add_floor_grid();
|
393 |
+
initRenderer();
|
394 |
+
|
395 |
+
if (metadata.length > 1 ) {
|
396 |
+
showMeta = true;
|
397 |
+
document.getElementById("metaoverlay").style.display="flex";
|
398 |
+
// console.log(metadata.length);
|
399 |
+
makeLabelsChart();
|
400 |
+
}
|
401 |
+
|
402 |
+
if (dataBuffer !== undefined) {
|
403 |
+
console.log("Loading from internal bufffer");
|
404 |
+
// console.log(dataBuffer.length);
|
405 |
+
loadPosBuffer(dataBuffer, 'Fig', jointmaterial4, bm_a, scale, frameTime);
|
406 |
+
|
407 |
+
}
|
408 |
+
|
409 |
+
|
410 |
+
|
411 |
+
animate();
|
412 |
+
}
|
413 |
+
</script>
|
414 |
+
|
415 |
+
<script src="data.js"></script>
|
416 |
+
</body>
|
417 |
+
|
418 |
+
</html>
|
dataloaders/pymo/mocapplayer/playURL.html
ADDED
@@ -0,0 +1,269 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<!-- GrooveNet Player - V 0.3
|
3 |
+
By Omid Alemi
|
4 |
+
-->
|
5 |
+
<html lang="en">
|
6 |
+
|
7 |
+
<head>
|
8 |
+
<title>BVH Player</title>
|
9 |
+
<meta charset="utf-8">
|
10 |
+
<meta name="viewport" content="width=device-width, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0">
|
11 |
+
<style>
|
12 |
+
|
13 |
+
</style>
|
14 |
+
<link rel="stylesheet" href="styles/pace.css"></link>
|
15 |
+
|
16 |
+
<script src="libs/pace.min.js"></script>
|
17 |
+
<script src="libs/math.min.js"></script>
|
18 |
+
<script src="libs/jquery.min.js"></script>
|
19 |
+
<script src="libs/threejs/three.min.js"></script>
|
20 |
+
<script src="libs/threejs/OrbitControls.js"></script>
|
21 |
+
<script src="libs/papaparse.min.js"></script>
|
22 |
+
|
23 |
+
<script src="libs/mocapjs.js"></script>
|
24 |
+
<script src="js/skeletonFactory.js"></script>
|
25 |
+
</head>
|
26 |
+
|
27 |
+
<body>
|
28 |
+
<script type="text/javascript">
|
29 |
+
var characters = [];
|
30 |
+
var playing = false;
|
31 |
+
|
32 |
+
|
33 |
+
$(document).on("keypress", function(e) {
|
34 |
+
if (e.charCode == 32)
|
35 |
+
playing = !playing;
|
36 |
+
|
37 |
+
characters.forEach(function(c) {
|
38 |
+
c.playing = playing;
|
39 |
+
|
40 |
+
if (playing)
|
41 |
+
c.animStartTimeRef = Date.now();
|
42 |
+
else
|
43 |
+
c.animOffset = c.animIndex;
|
44 |
+
});
|
45 |
+
|
46 |
+
});
|
47 |
+
|
48 |
+
$(window).on("resize", function(e) {
|
49 |
+
camera.aspect = window.innerWidth / window.innerHeight;
|
50 |
+
camera.updateProjectionMatrix();
|
51 |
+
|
52 |
+
renderer.setSize(window.innerWidth, window.innerHeight);
|
53 |
+
});
|
54 |
+
|
55 |
+
var getUrlParameter = function getUrlParameter(sParam) {
|
56 |
+
var sPageURL = decodeURIComponent(window.location.search.substring(1)),
|
57 |
+
sURLVariables = sPageURL.split('&'),
|
58 |
+
sParameterName,
|
59 |
+
i;
|
60 |
+
|
61 |
+
for (i = 0; i < sURLVariables.length; i++) {
|
62 |
+
sParameterName = sURLVariables[i].split('=');
|
63 |
+
if (sParameterName[0] === sParam) {
|
64 |
+
return sParameterName[1] === undefined ? true : sParameterName[1];
|
65 |
+
}
|
66 |
+
}
|
67 |
+
};
|
68 |
+
</script>
|
69 |
+
|
70 |
+
<script>
|
71 |
+
var scene, camera, renderer;
|
72 |
+
var geometry, material, mesh;
|
73 |
+
var played_count = 0;
|
74 |
+
|
75 |
+
function set_the_scene3() {
|
76 |
+
|
77 |
+
// Add the light
|
78 |
+
light = new THREE.PointLight(0xffffff, 0.9, 0);
|
79 |
+
light.position.set(0, 160, 40);
|
80 |
+
scene.add(light);
|
81 |
+
|
82 |
+
// Create a grid for the floor
|
83 |
+
var size = 200,
|
84 |
+
step = 20;
|
85 |
+
|
86 |
+
// Draw the plane
|
87 |
+
var planeGeometry = new THREE.PlaneGeometry(size * 2, size * 2);
|
88 |
+
var planeMaterial = new THREE.MeshPhongMaterial({
|
89 |
+
color: 0x444444,
|
90 |
+
emissive: 0x000000,
|
91 |
+
specular: 0x111111,
|
92 |
+
side: THREE.DoubleSide,
|
93 |
+
transparent: true,
|
94 |
+
opacity: 0.6
|
95 |
+
});
|
96 |
+
var plane = new THREE.Mesh(planeGeometry, planeMaterial);
|
97 |
+
plane.position.set(0, 0, 0);
|
98 |
+
plane.rotation.set(math.pi / 2, 0, 0);
|
99 |
+
scene.add(plane);
|
100 |
+
|
101 |
+
//Draw the lines
|
102 |
+
var lineGeometry = new THREE.Geometry();
|
103 |
+
var lineMaterial = new THREE.LineBasicMaterial({
|
104 |
+
color: 0x555555,
|
105 |
+
linewidth: 1.2
|
106 |
+
});
|
107 |
+
for (var i = -size; i <= size; i += step) {
|
108 |
+
lineGeometry.vertices.push(new THREE.Vector3(-size, -0.04, i));
|
109 |
+
lineGeometry.vertices.push(new THREE.Vector3(size, -0.04, i));
|
110 |
+
lineGeometry.vertices.push(new THREE.Vector3(i, -0.04, -size));
|
111 |
+
lineGeometry.vertices.push(new THREE.Vector3(i, -0.04, size));
|
112 |
+
}
|
113 |
+
|
114 |
+
var line = new THREE.LineSegments(lineGeometry, lineMaterial);
|
115 |
+
scene.add(line);
|
116 |
+
}
|
117 |
+
|
118 |
+
function add_basic_lights() {
|
119 |
+
// Add the light
|
120 |
+
var light = new THREE.HemisphereLight(0xffffff, 0x000000, 0.8);
|
121 |
+
scene.add(light);
|
122 |
+
}
|
123 |
+
|
124 |
+
function add_floor_theo() {
|
125 |
+
var textureLoader = new THREE.TextureLoader();
|
126 |
+
|
127 |
+
var texture = textureLoader.load('theo_floor1.jpg', function(tt) {
|
128 |
+
});
|
129 |
+
|
130 |
+
texture.wrapS = THREE.RepeatWrapping;
|
131 |
+
texture.wrapT = THREE.RepeatWrapping;
|
132 |
+
texture.repeat = new THREE.Vector2(1,1.66);
|
133 |
+
|
134 |
+
|
135 |
+
var material = new THREE.MeshPhongMaterial({
|
136 |
+
color: 0xffffff,
|
137 |
+
specular: 0xffffff,
|
138 |
+
shininess: 100,
|
139 |
+
map: texture,
|
140 |
+
|
141 |
+
});
|
142 |
+
var geometry = new THREE.CubeGeometry(300, 300, 5);
|
143 |
+
|
144 |
+
var mesh = new THREE.Mesh(geometry, material);
|
145 |
+
mesh.position.y = -2;
|
146 |
+
mesh.rotation.x = -Math.PI / 2;
|
147 |
+
scene.add(mesh);
|
148 |
+
}
|
149 |
+
|
150 |
+
function init(cz) {
|
151 |
+
scene = new THREE.Scene();
|
152 |
+
camera = new THREE.PerspectiveCamera(30, window.innerWidth / window.innerHeight, 1, 4000);
|
153 |
+
camera.position.set(0, 80, 0);
|
154 |
+
camera.position.z = cz;
|
155 |
+
scene.add(camera);
|
156 |
+
|
157 |
+
// set_the_scene3();
|
158 |
+
}
|
159 |
+
|
160 |
+
function loadBVHFile(bvhURL, cname, jm, bm, scale) {
|
161 |
+
var c2 = new BVHCharacter(cname, jm, bm, makeJointGeometry_Dode, makeBoneGeometry_Cylinder2);
|
162 |
+
// c2.setOriginPosition(0, 0, 0);
|
163 |
+
c2.skelScale = scale;
|
164 |
+
c2.loadFromURL(bvhURL, function() {
|
165 |
+
scene.add(c2.skeleton);
|
166 |
+
});
|
167 |
+
|
168 |
+
characters.push(c2);
|
169 |
+
}
|
170 |
+
|
171 |
+
function loadPosFile(csvURL, cname, jm, bm, scale, fr) {
|
172 |
+
var c2 = new C3DCharacter(cname, markermaterial, makeJointGeometry_Sphere2);
|
173 |
+
// c2.setOriginPosition(0, 0, 0);
|
174 |
+
c2.scale = scale;
|
175 |
+
c2.frameTime = fr;
|
176 |
+
c2.loadFromURL(csvURL, function() {});
|
177 |
+
|
178 |
+
characters.push(c2);
|
179 |
+
}
|
180 |
+
|
181 |
+
function initRenderer() {
|
182 |
+
renderer = new THREE.WebGLRenderer({
|
183 |
+
antialias: true
|
184 |
+
});
|
185 |
+
renderer.setSize(window.innerWidth, window.innerHeight);
|
186 |
+
//renderer.setSize(600, 500);
|
187 |
+
renderer.gammaInput = true;
|
188 |
+
renderer.gammaOutput = true;
|
189 |
+
renderer.setPixelRatio(window.devicePixelRatio);
|
190 |
+
// renderer.setClearColor(0xdddddd, 1);
|
191 |
+
renderer.setClearColor(0xffffff, 1);
|
192 |
+
|
193 |
+
|
194 |
+
document.body.appendChild(renderer.domElement);
|
195 |
+
|
196 |
+
controls = new THREE.OrbitControls(camera, renderer.domElement);
|
197 |
+
}
|
198 |
+
|
199 |
+
function animate() {
|
200 |
+
// if (!playing)
|
201 |
+
// return;
|
202 |
+
|
203 |
+
requestAnimationFrame(animate);
|
204 |
+
|
205 |
+
characters.forEach(function(c) {
|
206 |
+
// console.log(c.playing);
|
207 |
+
if (1 && c.ready) {
|
208 |
+
if (c.playing) {
|
209 |
+
c.animIndex = c.animOffset + Math.floor((Date.now() - c.animStartTimeRef) / c.frameTime / 1000);
|
210 |
+
|
211 |
+
if (c.animIndex >= c.frameCount) {
|
212 |
+
c.animOffset = 0;
|
213 |
+
c.animStartTimeRef = Date.now();
|
214 |
+
c.animIndex = 0;
|
215 |
+
played_count++;
|
216 |
+
// if (played_count> 1)
|
217 |
+
// c.playing = false;
|
218 |
+
|
219 |
+
}
|
220 |
+
c.animFrame(c.animIndex);
|
221 |
+
}
|
222 |
+
}
|
223 |
+
});
|
224 |
+
|
225 |
+
renderer.render(scene, camera);
|
226 |
+
}
|
227 |
+
|
228 |
+
|
229 |
+
$(document).ready(function () {
|
230 |
+
var dataURL = getUrlParameter('data_url');
|
231 |
+
var cz = getUrlParameter('cz');
|
232 |
+
var scale = getUrlParameter('scale');
|
233 |
+
var frameTime = getUrlParameter('frame_time');
|
234 |
+
|
235 |
+
if (cz === undefined)
|
236 |
+
cz = 550;
|
237 |
+
|
238 |
+
if (scale === undefined)
|
239 |
+
scale = 1.0;
|
240 |
+
|
241 |
+
if (frameTime === undefined)
|
242 |
+
frameTime = 1/120;
|
243 |
+
|
244 |
+
init(cz);
|
245 |
+
add_basic_lights();
|
246 |
+
add_floor_theo();
|
247 |
+
initRenderer();
|
248 |
+
|
249 |
+
if (dataURL !== undefined) {
|
250 |
+
console.log("Loading " + dataURL);
|
251 |
+
|
252 |
+
if (dataURL.endsWith('.bvh'))
|
253 |
+
loadBVHFile(dataURL, 'Fig', jm_a, bm_a, scale);
|
254 |
+
else if (dataURL.endsWith('.csv'))
|
255 |
+
loadPosFile(dataURL, 'Fig', jm_a, bm_a, scale, frameTime);
|
256 |
+
else
|
257 |
+
console.error('Unsupport file.');
|
258 |
+
}
|
259 |
+
|
260 |
+
|
261 |
+
|
262 |
+
animate();
|
263 |
+
});
|
264 |
+
|
265 |
+
|
266 |
+
</script>
|
267 |
+
</body>
|
268 |
+
|
269 |
+
</html>
|
dataloaders/pymo/mocapplayer/styles/pace.css
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.pace {
|
2 |
+
-webkit-pointer-events: none;
|
3 |
+
pointer-events: none;
|
4 |
+
-webkit-user-select: none;
|
5 |
+
-moz-user-select: none;
|
6 |
+
user-select: none;
|
7 |
+
}
|
8 |
+
|
9 |
+
.pace-inactive {
|
10 |
+
display: none;
|
11 |
+
}
|
12 |
+
|
13 |
+
.pace .pace-progress {
|
14 |
+
background: #29d;
|
15 |
+
position: fixed;
|
16 |
+
z-index: 2000;
|
17 |
+
top: 0;
|
18 |
+
right: 100%;
|
19 |
+
width: 100%;
|
20 |
+
height: 2px;
|
21 |
+
}
|
22 |
+
|
23 |
+
.pace .pace-progress-inner {
|
24 |
+
display: block;
|
25 |
+
position: absolute;
|
26 |
+
right: 0px;
|
27 |
+
width: 100px;
|
28 |
+
height: 100%;
|
29 |
+
box-shadow: 0 0 10px #29d, 0 0 5px #29d;
|
30 |
+
opacity: 1.0;
|
31 |
+
-webkit-transform: rotate(3deg) translate(0px, -4px);
|
32 |
+
-moz-transform: rotate(3deg) translate(0px, -4px);
|
33 |
+
-ms-transform: rotate(3deg) translate(0px, -4px);
|
34 |
+
-o-transform: rotate(3deg) translate(0px, -4px);
|
35 |
+
transform: rotate(3deg) translate(0px, -4px);
|
36 |
+
}
|
37 |
+
|
38 |
+
.pace .pace-activity {
|
39 |
+
display: block;
|
40 |
+
position: fixed;
|
41 |
+
z-index: 2000;
|
42 |
+
top: 15px;
|
43 |
+
right: 20px;
|
44 |
+
width: 34px;
|
45 |
+
height: 34px;
|
46 |
+
border: solid 2px transparent;
|
47 |
+
border-top-color: #9ea7ac;
|
48 |
+
border-left-color: #9ea7ac;
|
49 |
+
border-radius: 30px;
|
50 |
+
-webkit-animation: pace-spinner 700ms linear infinite;
|
51 |
+
-moz-animation: pace-spinner 700ms linear infinite;
|
52 |
+
-ms-animation: pace-spinner 700ms linear infinite;
|
53 |
+
-o-animation: pace-spinner 700ms linear infinite;
|
54 |
+
animation: pace-spinner 700ms linear infinite;
|
55 |
+
}
|
56 |
+
|
57 |
+
@-webkit-keyframes pace-spinner {
|
58 |
+
0% { -webkit-transform: rotate(0deg); transform: rotate(0deg); }
|
59 |
+
100% { -webkit-transform: rotate(360deg); transform: rotate(360deg); }
|
60 |
+
}
|
61 |
+
@-moz-keyframes pace-spinner {
|
62 |
+
0% { -moz-transform: rotate(0deg); transform: rotate(0deg); }
|
63 |
+
100% { -moz-transform: rotate(360deg); transform: rotate(360deg); }
|
64 |
+
}
|
65 |
+
@-o-keyframes pace-spinner {
|
66 |
+
0% { -o-transform: rotate(0deg); transform: rotate(0deg); }
|
67 |
+
100% { -o-transform: rotate(360deg); transform: rotate(360deg); }
|
68 |
+
}
|
69 |
+
@-ms-keyframes pace-spinner {
|
70 |
+
0% { -ms-transform: rotate(0deg); transform: rotate(0deg); }
|
71 |
+
100% { -ms-transform: rotate(360deg); transform: rotate(360deg); }
|
72 |
+
}
|
73 |
+
@keyframes pace-spinner {
|
74 |
+
0% { transform: rotate(0deg); transform: rotate(0deg); }
|
75 |
+
100% { transform: rotate(360deg); transform: rotate(360deg); }
|
76 |
+
}
|
dataloaders/pymo/parsers.py
ADDED
@@ -0,0 +1,274 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
BVH Parser Class
|
3 |
+
|
4 |
+
By Omid Alemi
|
5 |
+
Created: June 12, 2017
|
6 |
+
|
7 |
+
Based on: https://gist.github.com/johnfredcee/2007503
|
8 |
+
|
9 |
+
'''
|
10 |
+
import re
|
11 |
+
from unicodedata import name
|
12 |
+
import numpy as np
|
13 |
+
from .data import Joint, MocapData
|
14 |
+
|
15 |
+
class BVHScanner():
|
16 |
+
'''
|
17 |
+
A wrapper class for re.Scanner
|
18 |
+
'''
|
19 |
+
def __init__(self):
|
20 |
+
|
21 |
+
def identifier(scanner, token):
|
22 |
+
return 'IDENT', token
|
23 |
+
|
24 |
+
def operator(scanner, token):
|
25 |
+
return 'OPERATOR', token
|
26 |
+
|
27 |
+
def digit(scanner, token):
|
28 |
+
return 'DIGIT', token
|
29 |
+
|
30 |
+
def open_brace(scanner, token):
|
31 |
+
return 'OPEN_BRACE', token
|
32 |
+
|
33 |
+
def close_brace(scanner, token):
|
34 |
+
return 'CLOSE_BRACE', token
|
35 |
+
|
36 |
+
self.scanner = re.Scanner([
|
37 |
+
(r'[a-zA-Z_]\w*', identifier),
|
38 |
+
#(r'-*[0-9]+(\.[0-9]+)?', digit), # won't work for .34
|
39 |
+
#(r'[-+]?[0-9]*\.?[0-9]+', digit), # won't work for 4.56e-2
|
40 |
+
#(r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', digit),
|
41 |
+
(r'-*[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', digit),
|
42 |
+
(r'}', close_brace),
|
43 |
+
(r'}', close_brace),
|
44 |
+
(r'{', open_brace),
|
45 |
+
(r':', None),
|
46 |
+
(r'\s+', None)
|
47 |
+
])
|
48 |
+
|
49 |
+
def scan(self, stuff):
|
50 |
+
return self.scanner.scan(stuff)
|
51 |
+
|
52 |
+
|
53 |
+
|
54 |
+
class BVHParser():
|
55 |
+
'''
|
56 |
+
A class to parse a BVH file.
|
57 |
+
|
58 |
+
Extracts the skeleton and channel values
|
59 |
+
'''
|
60 |
+
def __init__(self, filename=None):
|
61 |
+
self.reset()
|
62 |
+
|
63 |
+
def reset(self):
|
64 |
+
self._skeleton = {}
|
65 |
+
self.bone_context = []
|
66 |
+
self._motion_channels = []
|
67 |
+
self._motions = []
|
68 |
+
self.current_token = 0
|
69 |
+
self.framerate = 0.0
|
70 |
+
self.root_name = ''
|
71 |
+
|
72 |
+
self.scanner = BVHScanner()
|
73 |
+
|
74 |
+
self.data = MocapData()
|
75 |
+
|
76 |
+
|
77 |
+
def parse(self, filename, start=0, stop=-1):
|
78 |
+
self.reset()
|
79 |
+
self.correct_row_num = 0
|
80 |
+
with open(filename, 'r') as f:
|
81 |
+
for line in f.readlines():
|
82 |
+
self.correct_row_num += 1
|
83 |
+
|
84 |
+
with open(filename, 'r') as bvh_file:
|
85 |
+
raw_contents = bvh_file.read()
|
86 |
+
tokens, remainder = self.scanner.scan(raw_contents)
|
87 |
+
|
88 |
+
self._parse_hierarchy(tokens)
|
89 |
+
self.current_token = self.current_token + 1
|
90 |
+
self._parse_motion(tokens, start, stop)
|
91 |
+
|
92 |
+
self.data.skeleton = self._skeleton
|
93 |
+
self.data.channel_names = self._motion_channels
|
94 |
+
self.data.values = self._to_DataFrame()
|
95 |
+
self.data.root_name = self.root_name
|
96 |
+
self.data.framerate = self.framerate
|
97 |
+
|
98 |
+
return self.data
|
99 |
+
|
100 |
+
def _to_DataFrame(self):
|
101 |
+
'''Returns all of the channels parsed from the file as a pandas DataFrame'''
|
102 |
+
|
103 |
+
import pandas as pd
|
104 |
+
time_index = pd.to_timedelta([f[0] for f in self._motions], unit='s')
|
105 |
+
frames = [f[1] for f in self._motions]
|
106 |
+
channels = np.asarray([[channel[2] for channel in frame] for frame in frames])
|
107 |
+
column_names = ['%s_%s'%(c[0], c[1]) for c in self._motion_channels]
|
108 |
+
|
109 |
+
return pd.DataFrame(data=channels, index=time_index, columns=column_names)
|
110 |
+
|
111 |
+
|
112 |
+
def _new_bone(self, parent, name):
|
113 |
+
bone = {'parent': parent, 'channels': [], 'offsets': [], 'order': '','children': []}
|
114 |
+
return bone
|
115 |
+
|
116 |
+
def _push_bone_context(self,name):
|
117 |
+
self.bone_context.append(name)
|
118 |
+
|
119 |
+
def _get_bone_context(self):
|
120 |
+
return self.bone_context[len(self.bone_context)-1]
|
121 |
+
|
122 |
+
def _pop_bone_context(self):
|
123 |
+
self.bone_context = self.bone_context[:-1]
|
124 |
+
return self.bone_context[len(self.bone_context)-1]
|
125 |
+
|
126 |
+
def _read_offset(self, bvh, token_index):
|
127 |
+
if bvh[token_index] != ('IDENT', 'OFFSET'):
|
128 |
+
return None, None
|
129 |
+
token_index = token_index + 1
|
130 |
+
offsets = [0.0] * 3
|
131 |
+
for i in range(3):
|
132 |
+
offsets[i] = float(bvh[token_index][1])
|
133 |
+
token_index = token_index + 1
|
134 |
+
return offsets, token_index
|
135 |
+
|
136 |
+
def _read_channels(self, bvh, token_index):
|
137 |
+
if bvh[token_index] != ('IDENT', 'CHANNELS'):
|
138 |
+
return None, None
|
139 |
+
token_index = token_index + 1
|
140 |
+
channel_count = int(bvh[token_index][1])
|
141 |
+
token_index = token_index + 1
|
142 |
+
channels = [""] * channel_count
|
143 |
+
order = ""
|
144 |
+
for i in range(channel_count):
|
145 |
+
channels[i] = bvh[token_index][1]
|
146 |
+
token_index = token_index + 1
|
147 |
+
if(channels[i] == "Xrotation" or channels[i]== "Yrotation" or channels[i]== "Zrotation"):
|
148 |
+
order += channels[i][0]
|
149 |
+
else :
|
150 |
+
order = ""
|
151 |
+
return channels, token_index, order
|
152 |
+
|
153 |
+
def _parse_joint(self, bvh, token_index):
|
154 |
+
end_site = False
|
155 |
+
joint_id = bvh[token_index][1]
|
156 |
+
token_index = token_index + 1
|
157 |
+
joint_name = bvh[token_index][1]
|
158 |
+
token_index = token_index + 1
|
159 |
+
|
160 |
+
parent_name = self._get_bone_context()
|
161 |
+
|
162 |
+
if (joint_id == "End"):
|
163 |
+
joint_name = parent_name+ '_Nub'
|
164 |
+
end_site = True
|
165 |
+
joint = self._new_bone(parent_name, joint_name)
|
166 |
+
if bvh[token_index][0] != 'OPEN_BRACE':
|
167 |
+
print('Was expecting brance, got ', bvh[token_index])
|
168 |
+
return None
|
169 |
+
token_index = token_index + 1
|
170 |
+
offsets, token_index = self._read_offset(bvh, token_index)
|
171 |
+
joint['offsets'] = offsets
|
172 |
+
if not end_site:
|
173 |
+
channels, token_index, order = self._read_channels(bvh, token_index)
|
174 |
+
joint['channels'] = channels
|
175 |
+
joint['order'] = order
|
176 |
+
for channel in channels:
|
177 |
+
self._motion_channels.append((joint_name, channel))
|
178 |
+
|
179 |
+
self._skeleton[joint_name] = joint
|
180 |
+
self._skeleton[parent_name]['children'].append(joint_name)
|
181 |
+
|
182 |
+
while (bvh[token_index][0] == 'IDENT' and bvh[token_index][1] == 'JOINT') or (bvh[token_index][0] == 'IDENT' and bvh[token_index][1] == 'End'):
|
183 |
+
self._push_bone_context(joint_name)
|
184 |
+
token_index = self._parse_joint(bvh, token_index)
|
185 |
+
self._pop_bone_context()
|
186 |
+
|
187 |
+
if bvh[token_index][0] == 'CLOSE_BRACE':
|
188 |
+
return token_index + 1
|
189 |
+
|
190 |
+
print('Unexpected token ', bvh[token_index])
|
191 |
+
|
192 |
+
def _parse_hierarchy(self, bvh):
|
193 |
+
self.current_token = 0
|
194 |
+
if bvh[self.current_token] != ('IDENT', 'HIERARCHY'):
|
195 |
+
return None
|
196 |
+
self.current_token = self.current_token + 1
|
197 |
+
if bvh[self.current_token] != ('IDENT', 'ROOT'):
|
198 |
+
return None
|
199 |
+
self.current_token = self.current_token + 1
|
200 |
+
if bvh[self.current_token][0] != 'IDENT':
|
201 |
+
return None
|
202 |
+
|
203 |
+
root_name = bvh[self.current_token][1]
|
204 |
+
root_bone = self._new_bone(None, root_name)
|
205 |
+
self.current_token = self.current_token + 2 #skipping open brace
|
206 |
+
offsets, self.current_token = self._read_offset(bvh, self.current_token)
|
207 |
+
channels, self.current_token, order = self._read_channels(bvh, self.current_token)
|
208 |
+
root_bone['offsets'] = offsets
|
209 |
+
root_bone['channels'] = channels
|
210 |
+
root_bone['order'] = order
|
211 |
+
self._skeleton[root_name] = root_bone
|
212 |
+
self._push_bone_context(root_name)
|
213 |
+
|
214 |
+
for channel in channels:
|
215 |
+
self._motion_channels.append((root_name, channel))
|
216 |
+
|
217 |
+
while bvh[self.current_token][1] == 'JOINT':
|
218 |
+
self.current_token = self._parse_joint(bvh, self.current_token)
|
219 |
+
|
220 |
+
self.root_name = root_name
|
221 |
+
|
222 |
+
def _parse_motion(self, bvh, start, stop):
|
223 |
+
if bvh[self.current_token][0] != 'IDENT':
|
224 |
+
print('Unexpected text')
|
225 |
+
return None
|
226 |
+
if bvh[self.current_token][1] != 'MOTION':
|
227 |
+
print('No motion section')
|
228 |
+
return None
|
229 |
+
self.current_token = self.current_token + 1
|
230 |
+
if bvh[self.current_token][1] != 'Frames':
|
231 |
+
return None
|
232 |
+
self.current_token = self.current_token + 1
|
233 |
+
frame_count = int(bvh[self.current_token][1])
|
234 |
+
|
235 |
+
if stop<0 or stop>frame_count:
|
236 |
+
stop = min(frame_count, self.correct_row_num-431)
|
237 |
+
|
238 |
+
assert(start>=0)
|
239 |
+
assert(start<stop)
|
240 |
+
|
241 |
+
self.current_token = self.current_token + 1
|
242 |
+
if bvh[self.current_token][1] != 'Frame':
|
243 |
+
return None
|
244 |
+
self.current_token = self.current_token + 1
|
245 |
+
if bvh[self.current_token][1] != 'Time':
|
246 |
+
return None
|
247 |
+
self.current_token = self.current_token + 1
|
248 |
+
frame_rate = float(bvh[self.current_token][1])
|
249 |
+
|
250 |
+
self.framerate = frame_rate
|
251 |
+
|
252 |
+
self.current_token = self.current_token + 1
|
253 |
+
|
254 |
+
frame_time = 0.0
|
255 |
+
self._motions = [()] * (stop-start)
|
256 |
+
idx=0
|
257 |
+
for i in range(stop):
|
258 |
+
#print(i)
|
259 |
+
channel_values = []
|
260 |
+
|
261 |
+
for channel in self._motion_channels:
|
262 |
+
#print(channel)
|
263 |
+
channel_values.append((channel[0], channel[1], float(bvh[self.current_token][1])))
|
264 |
+
self.current_token = self.current_token + 1
|
265 |
+
|
266 |
+
if i>=start:
|
267 |
+
self._motions[idx] = (frame_time, channel_values)
|
268 |
+
frame_time = frame_time + frame_rate
|
269 |
+
idx+=1
|
270 |
+
|
271 |
+
|
272 |
+
if __name__ == "__main__":
|
273 |
+
p = BVHParser()
|
274 |
+
data = [p.parse("../../../datasets/beat_full/2/2_scott_0_1_1.bvh")]
|
dataloaders/pymo/preprocessing.py
ADDED
@@ -0,0 +1,726 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
Preprocessing Tranformers Based on sci-kit's API
|
3 |
+
|
4 |
+
By Omid Alemi
|
5 |
+
Created on June 12, 2017
|
6 |
+
'''
|
7 |
+
import copy
|
8 |
+
import pandas as pd
|
9 |
+
import numpy as np
|
10 |
+
from sklearn.base import BaseEstimator, TransformerMixin
|
11 |
+
from .Quaternions import Quaternions
|
12 |
+
from .rotation_tools import Rotation
|
13 |
+
|
14 |
+
class MocapParameterizer(BaseEstimator, TransformerMixin):
|
15 |
+
def __init__(self, param_type = 'euler'):
|
16 |
+
'''
|
17 |
+
|
18 |
+
param_type = {'euler', 'quat', 'expmap', 'position'}
|
19 |
+
'''
|
20 |
+
self.param_type = param_type
|
21 |
+
|
22 |
+
def fit(self, X, y=None):
|
23 |
+
return self
|
24 |
+
|
25 |
+
def transform(self, X, y=None):
|
26 |
+
if self.param_type == 'euler':
|
27 |
+
return X
|
28 |
+
elif self.param_type == 'expmap':
|
29 |
+
return self._to_expmap(X)
|
30 |
+
elif self.param_type == 'quat':
|
31 |
+
return X
|
32 |
+
elif self.param_type == 'position':
|
33 |
+
return self._to_pos(X)
|
34 |
+
else:
|
35 |
+
raise UnsupportedParamError('Unsupported param: %s. Valid param types are: euler, quat, expmap, position' % self.param_type)
|
36 |
+
# return X
|
37 |
+
|
38 |
+
def inverse_transform(self, X, copy=None):
|
39 |
+
if self.param_type == 'euler':
|
40 |
+
return X
|
41 |
+
elif self.param_type == 'expmap':
|
42 |
+
return self._expmap_to_euler(X)
|
43 |
+
elif self.param_type == 'quat':
|
44 |
+
raise UnsupportedParamError('quat2euler is not supported')
|
45 |
+
elif self.param_type == 'position':
|
46 |
+
print('positions 2 eulers is not supported')
|
47 |
+
return X
|
48 |
+
else:
|
49 |
+
raise UnsupportedParamError('Unsupported param: %s. Valid param types are: euler, quat, expmap, position' % self.param_type)
|
50 |
+
|
51 |
+
def _to_pos(self, X):
|
52 |
+
'''Converts joints rotations in Euler angles to joint positions'''
|
53 |
+
|
54 |
+
Q = []
|
55 |
+
for track in X:
|
56 |
+
channels = []
|
57 |
+
titles = []
|
58 |
+
euler_df = track.values
|
59 |
+
|
60 |
+
# Create a new DataFrame to store the exponential map rep
|
61 |
+
pos_df = pd.DataFrame(index=euler_df.index)
|
62 |
+
|
63 |
+
# Copy the root rotations into the new DataFrame
|
64 |
+
# rxp = '%s_Xrotation'%track.root_name
|
65 |
+
# ryp = '%s_Yrotation'%track.root_name
|
66 |
+
# rzp = '%s_Zrotation'%track.root_name
|
67 |
+
# pos_df[rxp] = pd.Series(data=euler_df[rxp], index=pos_df.index)
|
68 |
+
# pos_df[ryp] = pd.Series(data=euler_df[ryp], index=pos_df.index)
|
69 |
+
# pos_df[rzp] = pd.Series(data=euler_df[rzp], index=pos_df.index)
|
70 |
+
|
71 |
+
# List the columns that contain rotation channels
|
72 |
+
rot_cols = [c for c in euler_df.columns if ('rotation' in c)]
|
73 |
+
|
74 |
+
# List the columns that contain position channels
|
75 |
+
pos_cols = [c for c in euler_df.columns if ('position' in c)]
|
76 |
+
|
77 |
+
# List the joints that are not end sites, i.e., have channels
|
78 |
+
joints = (joint for joint in track.skeleton)
|
79 |
+
|
80 |
+
tree_data = {}
|
81 |
+
|
82 |
+
for joint in track.traverse():
|
83 |
+
parent = track.skeleton[joint]['parent']
|
84 |
+
rot_order = track.skeleton[joint]['order']
|
85 |
+
#print("rot_order:" + joint + " :" + rot_order)
|
86 |
+
|
87 |
+
# Get the rotation columns that belong to this joint
|
88 |
+
rc = euler_df[[c for c in rot_cols if joint in c]]
|
89 |
+
|
90 |
+
# Get the position columns that belong to this joint
|
91 |
+
pc = euler_df[[c for c in pos_cols if joint in c]]
|
92 |
+
|
93 |
+
# Make sure the columns are organized in xyz order
|
94 |
+
if rc.shape[1] < 3:
|
95 |
+
euler_values = np.zeros((euler_df.shape[0], 3))
|
96 |
+
rot_order = "XYZ"
|
97 |
+
else:
|
98 |
+
euler_values = np.pi/180.0*np.transpose(np.array([track.values['%s_%srotation'%(joint, rot_order[0])], track.values['%s_%srotation'%(joint, rot_order[1])], track.values['%s_%srotation'%(joint, rot_order[2])]]))
|
99 |
+
|
100 |
+
if pc.shape[1] < 3:
|
101 |
+
pos_values = np.asarray([[0,0,0] for f in pc.iterrows()])
|
102 |
+
else:
|
103 |
+
pos_values =np.asarray([[f[1]['%s_Xposition'%joint],
|
104 |
+
f[1]['%s_Yposition'%joint],
|
105 |
+
f[1]['%s_Zposition'%joint]] for f in pc.iterrows()])
|
106 |
+
|
107 |
+
quats = Quaternions.from_euler(np.asarray(euler_values), order=rot_order.lower(), world=False)
|
108 |
+
|
109 |
+
tree_data[joint]=[
|
110 |
+
[], # to store the rotation matrix
|
111 |
+
[] # to store the calculated position
|
112 |
+
]
|
113 |
+
if track.root_name == joint:
|
114 |
+
tree_data[joint][0] = quats#rotmats
|
115 |
+
# tree_data[joint][1] = np.add(pos_values, track.skeleton[joint]['offsets'])
|
116 |
+
tree_data[joint][1] = pos_values
|
117 |
+
else:
|
118 |
+
# for every frame i, multiply this joint's rotmat to the rotmat of its parent
|
119 |
+
tree_data[joint][0] = tree_data[parent][0]*quats# np.matmul(rotmats, tree_data[parent][0])
|
120 |
+
|
121 |
+
# add the position channel to the offset and store it in k, for every frame i
|
122 |
+
k = pos_values + np.asarray(track.skeleton[joint]['offsets'])
|
123 |
+
|
124 |
+
# multiply k to the rotmat of the parent for every frame i
|
125 |
+
q = tree_data[parent][0]*k #np.matmul(k.reshape(k.shape[0],1,3), tree_data[parent][0])
|
126 |
+
|
127 |
+
# add q to the position of the parent, for every frame i
|
128 |
+
tree_data[joint][1] = tree_data[parent][1] + q #q.reshape(k.shape[0],3) + tree_data[parent][1]
|
129 |
+
|
130 |
+
# Create the corresponding columns in the new DataFrame
|
131 |
+
pos_df['%s_Xposition'%joint] = pd.Series(data=[e[0] for e in tree_data[joint][1]], index=pos_df.index)
|
132 |
+
pos_df['%s_Yposition'%joint] = pd.Series(data=[e[1] for e in tree_data[joint][1]], index=pos_df.index)
|
133 |
+
pos_df['%s_Zposition'%joint] = pd.Series(data=[e[2] for e in tree_data[joint][1]], index=pos_df.index)
|
134 |
+
|
135 |
+
|
136 |
+
new_track = track.clone()
|
137 |
+
new_track.values = pos_df
|
138 |
+
Q.append(new_track)
|
139 |
+
return Q
|
140 |
+
|
141 |
+
|
142 |
+
def _to_expmap(self, X):
|
143 |
+
'''Converts Euler angles to Exponential Maps'''
|
144 |
+
|
145 |
+
Q = []
|
146 |
+
for track in X:
|
147 |
+
channels = []
|
148 |
+
titles = []
|
149 |
+
euler_df = track.values
|
150 |
+
|
151 |
+
# Create a new DataFrame to store the exponential map rep
|
152 |
+
exp_df = pd.DataFrame(index=euler_df.index)
|
153 |
+
|
154 |
+
# Copy the root positions into the new DataFrame
|
155 |
+
rxp = '%s_Xposition'%track.root_name
|
156 |
+
ryp = '%s_Yposition'%track.root_name
|
157 |
+
rzp = '%s_Zposition'%track.root_name
|
158 |
+
exp_df[rxp] = pd.Series(data=euler_df[rxp], index=exp_df.index)
|
159 |
+
exp_df[ryp] = pd.Series(data=euler_df[ryp], index=exp_df.index)
|
160 |
+
exp_df[rzp] = pd.Series(data=euler_df[rzp], index=exp_df.index)
|
161 |
+
|
162 |
+
# List the columns that contain rotation channels
|
163 |
+
rots = [c for c in euler_df.columns if ('rotation' in c and 'Nub' not in c)]
|
164 |
+
|
165 |
+
# List the joints that are not end sites, i.e., have channels
|
166 |
+
joints = (joint for joint in track.skeleton if 'Nub' not in joint)
|
167 |
+
|
168 |
+
for joint in joints:
|
169 |
+
r = euler_df[[c for c in rots if joint in c]] # Get the columns that belong to this joint
|
170 |
+
euler = [[f[1]['%s_Xrotation'%joint], f[1]['%s_Yrotation'%joint], f[1]['%s_Zrotation'%joint]] for f in r.iterrows()] # Make sure the columsn are organized in xyz order
|
171 |
+
exps = [Rotation(f, 'euler', from_deg=True).to_expmap() for f in euler] # Convert the eulers to exp maps
|
172 |
+
|
173 |
+
# Create the corresponding columns in the new DataFrame
|
174 |
+
|
175 |
+
exp_df['%s_alpha'%joint] = pd.Series(data=[e[0] for e in exps], index=exp_df.index)
|
176 |
+
exp_df['%s_beta'%joint] = pd.Series(data=[e[1] for e in exps], index=exp_df.index)
|
177 |
+
exp_df['%s_gamma'%joint] = pd.Series(data=[e[2] for e in exps], index=exp_df.index)
|
178 |
+
|
179 |
+
new_track = track.clone()
|
180 |
+
new_track.values = exp_df
|
181 |
+
Q.append(new_track)
|
182 |
+
|
183 |
+
return Q
|
184 |
+
|
185 |
+
def _expmap_to_euler(self, X):
|
186 |
+
Q = []
|
187 |
+
for track in X:
|
188 |
+
channels = []
|
189 |
+
titles = []
|
190 |
+
exp_df = track.values
|
191 |
+
|
192 |
+
# Create a new DataFrame to store the exponential map rep
|
193 |
+
euler_df = pd.DataFrame(index=exp_df.index)
|
194 |
+
|
195 |
+
# Copy the root positions into the new DataFrame
|
196 |
+
rxp = '%s_Xposition'%track.root_name
|
197 |
+
ryp = '%s_Yposition'%track.root_name
|
198 |
+
rzp = '%s_Zposition'%track.root_name
|
199 |
+
euler_df[rxp] = pd.Series(data=exp_df[rxp], index=euler_df.index)
|
200 |
+
euler_df[ryp] = pd.Series(data=exp_df[ryp], index=euler_df.index)
|
201 |
+
euler_df[rzp] = pd.Series(data=exp_df[rzp], index=euler_df.index)
|
202 |
+
|
203 |
+
# List the columns that contain rotation channels
|
204 |
+
exp_params = [c for c in exp_df.columns if ( any(p in c for p in ['alpha', 'beta','gamma']) and 'Nub' not in c)]
|
205 |
+
|
206 |
+
# List the joints that are not end sites, i.e., have channels
|
207 |
+
joints = (joint for joint in track.skeleton if 'Nub' not in joint)
|
208 |
+
|
209 |
+
for joint in joints:
|
210 |
+
r = exp_df[[c for c in exp_params if joint in c]] # Get the columns that belong to this joint
|
211 |
+
expmap = [[f[1]['%s_alpha'%joint], f[1]['%s_beta'%joint], f[1]['%s_gamma'%joint]] for f in r.iterrows()] # Make sure the columsn are organized in xyz order
|
212 |
+
euler_rots = [Rotation(f, 'expmap').to_euler(True)[0] for f in expmap] # Convert the eulers to exp maps
|
213 |
+
|
214 |
+
# Create the corresponding columns in the new DataFrame
|
215 |
+
|
216 |
+
euler_df['%s_Xrotation'%joint] = pd.Series(data=[e[0] for e in euler_rots], index=euler_df.index)
|
217 |
+
euler_df['%s_Yrotation'%joint] = pd.Series(data=[e[1] for e in euler_rots], index=euler_df.index)
|
218 |
+
euler_df['%s_Zrotation'%joint] = pd.Series(data=[e[2] for e in euler_rots], index=euler_df.index)
|
219 |
+
|
220 |
+
new_track = track.clone()
|
221 |
+
new_track.values = euler_df
|
222 |
+
Q.append(new_track)
|
223 |
+
|
224 |
+
return Q
|
225 |
+
|
226 |
+
|
227 |
+
class JointSelector(BaseEstimator, TransformerMixin):
|
228 |
+
'''
|
229 |
+
Allows for filtering the mocap data to include only the selected joints
|
230 |
+
'''
|
231 |
+
def __init__(self, joints, include_root=False):
|
232 |
+
self.joints = joints
|
233 |
+
self.include_root = include_root
|
234 |
+
|
235 |
+
def fit(self, X, y=None):
|
236 |
+
return self
|
237 |
+
|
238 |
+
def transform(self, X, y=None):
|
239 |
+
selected_joints = []
|
240 |
+
selected_channels = []
|
241 |
+
|
242 |
+
if self.include_root:
|
243 |
+
selected_joints.append(X[0].root_name)
|
244 |
+
|
245 |
+
selected_joints.extend(self.joints)
|
246 |
+
|
247 |
+
for joint_name in selected_joints:
|
248 |
+
selected_channels.extend([o for o in X[0].values.columns if joint_name in o])
|
249 |
+
|
250 |
+
Q = []
|
251 |
+
|
252 |
+
|
253 |
+
for track in X:
|
254 |
+
t2 = track.clone()
|
255 |
+
|
256 |
+
for key in track.skeleton.keys():
|
257 |
+
if key not in selected_joints:
|
258 |
+
t2.skeleton.pop(key)
|
259 |
+
t2.values = track.values[selected_channels]
|
260 |
+
|
261 |
+
Q.append(t2)
|
262 |
+
|
263 |
+
|
264 |
+
return Q
|
265 |
+
|
266 |
+
|
267 |
+
class Numpyfier(BaseEstimator, TransformerMixin):
|
268 |
+
'''
|
269 |
+
Just converts the values in a MocapData object into a numpy array
|
270 |
+
Useful for the final stage of a pipeline before training
|
271 |
+
'''
|
272 |
+
def __init__(self):
|
273 |
+
pass
|
274 |
+
|
275 |
+
def fit(self, X, y=None):
|
276 |
+
self.org_mocap_ = X[0].clone()
|
277 |
+
self.org_mocap_.values.drop(self.org_mocap_.values.index, inplace=True)
|
278 |
+
|
279 |
+
return self
|
280 |
+
|
281 |
+
def transform(self, X, y=None):
|
282 |
+
Q = []
|
283 |
+
|
284 |
+
for track in X:
|
285 |
+
Q.append(track.values.values)
|
286 |
+
|
287 |
+
return np.array(Q)
|
288 |
+
|
289 |
+
def inverse_transform(self, X, copy=None):
|
290 |
+
Q = []
|
291 |
+
|
292 |
+
for track in X:
|
293 |
+
|
294 |
+
new_mocap = self.org_mocap_.clone()
|
295 |
+
time_index = pd.to_timedelta([f for f in range(track.shape[0])], unit='s')
|
296 |
+
|
297 |
+
new_df = pd.DataFrame(data=track, index=time_index, columns=self.org_mocap_.values.columns)
|
298 |
+
|
299 |
+
new_mocap.values = new_df
|
300 |
+
|
301 |
+
|
302 |
+
Q.append(new_mocap)
|
303 |
+
|
304 |
+
return Q
|
305 |
+
|
306 |
+
class RootTransformer(BaseEstimator, TransformerMixin):
|
307 |
+
def __init__(self, method):
|
308 |
+
"""
|
309 |
+
Accepted methods:
|
310 |
+
abdolute_translation_deltas
|
311 |
+
pos_rot_deltas
|
312 |
+
"""
|
313 |
+
self.method = method
|
314 |
+
|
315 |
+
def fit(self, X, y=None):
|
316 |
+
return self
|
317 |
+
|
318 |
+
def transform(self, X, y=None):
|
319 |
+
Q = []
|
320 |
+
|
321 |
+
for track in X:
|
322 |
+
if self.method == 'abdolute_translation_deltas':
|
323 |
+
new_df = track.values.copy()
|
324 |
+
xpcol = '%s_Xposition'%track.root_name
|
325 |
+
ypcol = '%s_Yposition'%track.root_name
|
326 |
+
zpcol = '%s_Zposition'%track.root_name
|
327 |
+
|
328 |
+
|
329 |
+
dxpcol = '%s_dXposition'%track.root_name
|
330 |
+
dzpcol = '%s_dZposition'%track.root_name
|
331 |
+
|
332 |
+
dx = track.values[xpcol].diff()
|
333 |
+
dz = track.values[zpcol].diff()
|
334 |
+
|
335 |
+
dx[0] = 0
|
336 |
+
dz[0] = 0
|
337 |
+
|
338 |
+
new_df.drop([xpcol, zpcol], axis=1, inplace=True)
|
339 |
+
|
340 |
+
new_df[dxpcol] = dx
|
341 |
+
new_df[dzpcol] = dz
|
342 |
+
|
343 |
+
new_track = track.clone()
|
344 |
+
new_track.values = new_df
|
345 |
+
# end of abdolute_translation_deltas
|
346 |
+
|
347 |
+
elif self.method == 'pos_rot_deltas':
|
348 |
+
new_track = track.clone()
|
349 |
+
|
350 |
+
# Absolute columns
|
351 |
+
xp_col = '%s_Xposition'%track.root_name
|
352 |
+
yp_col = '%s_Yposition'%track.root_name
|
353 |
+
zp_col = '%s_Zposition'%track.root_name
|
354 |
+
|
355 |
+
xr_col = '%s_Xrotation'%track.root_name
|
356 |
+
yr_col = '%s_Yrotation'%track.root_name
|
357 |
+
zr_col = '%s_Zrotation'%track.root_name
|
358 |
+
|
359 |
+
# Delta columns
|
360 |
+
dxp_col = '%s_dXposition'%track.root_name
|
361 |
+
dzp_col = '%s_dZposition'%track.root_name
|
362 |
+
|
363 |
+
dxr_col = '%s_dXrotation'%track.root_name
|
364 |
+
dyr_col = '%s_dYrotation'%track.root_name
|
365 |
+
dzr_col = '%s_dZrotation'%track.root_name
|
366 |
+
|
367 |
+
|
368 |
+
new_df = track.values.copy()
|
369 |
+
|
370 |
+
root_pos_x_diff = pd.Series(data=track.values[xp_col].diff(), index=new_df.index)
|
371 |
+
root_pos_z_diff = pd.Series(data=track.values[zp_col].diff(), index=new_df.index)
|
372 |
+
|
373 |
+
root_rot_y_diff = pd.Series(data=track.values[yr_col].diff(), index=new_df.index)
|
374 |
+
root_rot_x_diff = pd.Series(data=track.values[xr_col].diff(), index=new_df.index)
|
375 |
+
root_rot_z_diff = pd.Series(data=track.values[zr_col].diff(), index=new_df.index)
|
376 |
+
|
377 |
+
|
378 |
+
root_pos_x_diff[0] = 0
|
379 |
+
root_pos_z_diff[0] = 0
|
380 |
+
|
381 |
+
root_rot_y_diff[0] = 0
|
382 |
+
root_rot_x_diff[0] = 0
|
383 |
+
root_rot_z_diff[0] = 0
|
384 |
+
|
385 |
+
new_df.drop([xr_col, yr_col, zr_col, xp_col, zp_col], axis=1, inplace=True)
|
386 |
+
|
387 |
+
new_df[dxp_col] = root_pos_x_diff
|
388 |
+
new_df[dzp_col] = root_pos_z_diff
|
389 |
+
|
390 |
+
new_df[dxr_col] = root_rot_x_diff
|
391 |
+
new_df[dyr_col] = root_rot_y_diff
|
392 |
+
new_df[dzr_col] = root_rot_z_diff
|
393 |
+
|
394 |
+
new_track.values = new_df
|
395 |
+
|
396 |
+
Q.append(new_track)
|
397 |
+
|
398 |
+
return Q
|
399 |
+
|
400 |
+
def inverse_transform(self, X, copy=None, start_pos=None):
|
401 |
+
Q = []
|
402 |
+
|
403 |
+
#TODO: simplify this implementation
|
404 |
+
|
405 |
+
startx = 0
|
406 |
+
startz = 0
|
407 |
+
|
408 |
+
if start_pos is not None:
|
409 |
+
startx, startz = start_pos
|
410 |
+
|
411 |
+
for track in X:
|
412 |
+
new_track = track.clone()
|
413 |
+
if self.method == 'abdolute_translation_deltas':
|
414 |
+
new_df = new_track.values
|
415 |
+
xpcol = '%s_Xposition'%track.root_name
|
416 |
+
ypcol = '%s_Yposition'%track.root_name
|
417 |
+
zpcol = '%s_Zposition'%track.root_name
|
418 |
+
|
419 |
+
|
420 |
+
dxpcol = '%s_dXposition'%track.root_name
|
421 |
+
dzpcol = '%s_dZposition'%track.root_name
|
422 |
+
|
423 |
+
dx = track.values[dxpcol].values
|
424 |
+
dz = track.values[dzpcol].values
|
425 |
+
|
426 |
+
recx = [startx]
|
427 |
+
recz = [startz]
|
428 |
+
|
429 |
+
for i in range(dx.shape[0]-1):
|
430 |
+
recx.append(recx[i]+dx[i+1])
|
431 |
+
recz.append(recz[i]+dz[i+1])
|
432 |
+
|
433 |
+
# recx = [recx[i]+dx[i+1] for i in range(dx.shape[0]-1)]
|
434 |
+
# recz = [recz[i]+dz[i+1] for i in range(dz.shape[0]-1)]
|
435 |
+
# recx = dx[:-1] + dx[1:]
|
436 |
+
# recz = dz[:-1] + dz[1:]
|
437 |
+
|
438 |
+
new_df[xpcol] = pd.Series(data=recx, index=new_df.index)
|
439 |
+
new_df[zpcol] = pd.Series(data=recz, index=new_df.index)
|
440 |
+
|
441 |
+
new_df.drop([dxpcol, dzpcol], axis=1, inplace=True)
|
442 |
+
|
443 |
+
new_track.values = new_df
|
444 |
+
# end of abdolute_translation_deltas
|
445 |
+
|
446 |
+
elif self.method == 'pos_rot_deltas':
|
447 |
+
new_track = track.clone()
|
448 |
+
|
449 |
+
# Absolute columns
|
450 |
+
xp_col = '%s_Xposition'%track.root_name
|
451 |
+
yp_col = '%s_Yposition'%track.root_name
|
452 |
+
zp_col = '%s_Zposition'%track.root_name
|
453 |
+
|
454 |
+
xr_col = '%s_Xrotation'%track.root_name
|
455 |
+
yr_col = '%s_Yrotation'%track.root_name
|
456 |
+
zr_col = '%s_Zrotation'%track.root_name
|
457 |
+
|
458 |
+
# Delta columns
|
459 |
+
dxp_col = '%s_dXposition'%track.root_name
|
460 |
+
dzp_col = '%s_dZposition'%track.root_name
|
461 |
+
|
462 |
+
dxr_col = '%s_dXrotation'%track.root_name
|
463 |
+
dyr_col = '%s_dYrotation'%track.root_name
|
464 |
+
dzr_col = '%s_dZrotation'%track.root_name
|
465 |
+
|
466 |
+
|
467 |
+
new_df = track.values.copy()
|
468 |
+
|
469 |
+
dx = track.values[dxp_col].values
|
470 |
+
dz = track.values[dzp_col].values
|
471 |
+
|
472 |
+
drx = track.values[dxr_col].values
|
473 |
+
dry = track.values[dyr_col].values
|
474 |
+
drz = track.values[dzr_col].values
|
475 |
+
|
476 |
+
rec_xp = [startx]
|
477 |
+
rec_zp = [startz]
|
478 |
+
|
479 |
+
rec_xr = [0]
|
480 |
+
rec_yr = [0]
|
481 |
+
rec_zr = [0]
|
482 |
+
|
483 |
+
|
484 |
+
for i in range(dx.shape[0]-1):
|
485 |
+
rec_xp.append(rec_xp[i]+dx[i+1])
|
486 |
+
rec_zp.append(rec_zp[i]+dz[i+1])
|
487 |
+
|
488 |
+
rec_xr.append(rec_xr[i]+drx[i+1])
|
489 |
+
rec_yr.append(rec_yr[i]+dry[i+1])
|
490 |
+
rec_zr.append(rec_zr[i]+drz[i+1])
|
491 |
+
|
492 |
+
|
493 |
+
new_df[xp_col] = pd.Series(data=rec_xp, index=new_df.index)
|
494 |
+
new_df[zp_col] = pd.Series(data=rec_zp, index=new_df.index)
|
495 |
+
|
496 |
+
new_df[xr_col] = pd.Series(data=rec_xr, index=new_df.index)
|
497 |
+
new_df[yr_col] = pd.Series(data=rec_yr, index=new_df.index)
|
498 |
+
new_df[zr_col] = pd.Series(data=rec_zr, index=new_df.index)
|
499 |
+
|
500 |
+
new_df.drop([dxr_col, dyr_col, dzr_col, dxp_col, dzp_col], axis=1, inplace=True)
|
501 |
+
|
502 |
+
|
503 |
+
new_track.values = new_df
|
504 |
+
|
505 |
+
Q.append(new_track)
|
506 |
+
|
507 |
+
return Q
|
508 |
+
|
509 |
+
|
510 |
+
class RootCentricPositionNormalizer(BaseEstimator, TransformerMixin):
|
511 |
+
def __init__(self):
|
512 |
+
pass
|
513 |
+
|
514 |
+
def fit(self, X, y=None):
|
515 |
+
return self
|
516 |
+
|
517 |
+
def transform(self, X, y=None):
|
518 |
+
Q = []
|
519 |
+
|
520 |
+
for track in X:
|
521 |
+
new_track = track.clone()
|
522 |
+
|
523 |
+
rxp = '%s_Xposition'%track.root_name
|
524 |
+
ryp = '%s_Yposition'%track.root_name
|
525 |
+
rzp = '%s_Zposition'%track.root_name
|
526 |
+
|
527 |
+
projected_root_pos = track.values[[rxp, ryp, rzp]]
|
528 |
+
|
529 |
+
projected_root_pos.loc[:,ryp] = 0 # we want the root's projection on the floor plane as the ref
|
530 |
+
|
531 |
+
new_df = pd.DataFrame(index=track.values.index)
|
532 |
+
|
533 |
+
all_but_root = [joint for joint in track.skeleton if track.root_name not in joint]
|
534 |
+
# all_but_root = [joint for joint in track.skeleton]
|
535 |
+
for joint in all_but_root:
|
536 |
+
new_df['%s_Xposition'%joint] = pd.Series(data=track.values['%s_Xposition'%joint]-projected_root_pos[rxp], index=new_df.index)
|
537 |
+
new_df['%s_Yposition'%joint] = pd.Series(data=track.values['%s_Yposition'%joint]-projected_root_pos[ryp], index=new_df.index)
|
538 |
+
new_df['%s_Zposition'%joint] = pd.Series(data=track.values['%s_Zposition'%joint]-projected_root_pos[rzp], index=new_df.index)
|
539 |
+
|
540 |
+
|
541 |
+
# keep the root as it is now
|
542 |
+
new_df[rxp] = track.values[rxp]
|
543 |
+
new_df[ryp] = track.values[ryp]
|
544 |
+
new_df[rzp] = track.values[rzp]
|
545 |
+
|
546 |
+
new_track.values = new_df
|
547 |
+
|
548 |
+
Q.append(new_track)
|
549 |
+
|
550 |
+
return Q
|
551 |
+
|
552 |
+
def inverse_transform(self, X, copy=None):
|
553 |
+
Q = []
|
554 |
+
|
555 |
+
for track in X:
|
556 |
+
new_track = track.clone()
|
557 |
+
|
558 |
+
rxp = '%s_Xposition'%track.root_name
|
559 |
+
ryp = '%s_Yposition'%track.root_name
|
560 |
+
rzp = '%s_Zposition'%track.root_name
|
561 |
+
|
562 |
+
projected_root_pos = track.values[[rxp, ryp, rzp]]
|
563 |
+
|
564 |
+
projected_root_pos.loc[:,ryp] = 0 # we want the root's projection on the floor plane as the ref
|
565 |
+
|
566 |
+
new_df = pd.DataFrame(index=track.values.index)
|
567 |
+
|
568 |
+
for joint in track.skeleton:
|
569 |
+
new_df['%s_Xposition'%joint] = pd.Series(data=track.values['%s_Xposition'%joint]+projected_root_pos[rxp], index=new_df.index)
|
570 |
+
new_df['%s_Yposition'%joint] = pd.Series(data=track.values['%s_Yposition'%joint]+projected_root_pos[ryp], index=new_df.index)
|
571 |
+
new_df['%s_Zposition'%joint] = pd.Series(data=track.values['%s_Zposition'%joint]+projected_root_pos[rzp], index=new_df.index)
|
572 |
+
|
573 |
+
|
574 |
+
new_track.values = new_df
|
575 |
+
|
576 |
+
Q.append(new_track)
|
577 |
+
|
578 |
+
return Q
|
579 |
+
|
580 |
+
|
581 |
+
class Flattener(BaseEstimator, TransformerMixin):
|
582 |
+
def __init__(self):
|
583 |
+
pass
|
584 |
+
|
585 |
+
def fit(self, X, y=None):
|
586 |
+
return self
|
587 |
+
|
588 |
+
def transform(self, X, y=None):
|
589 |
+
return np.concatenate(X, axis=0)
|
590 |
+
|
591 |
+
class ConstantsRemover(BaseEstimator, TransformerMixin):
|
592 |
+
'''
|
593 |
+
For now it just looks at the first track
|
594 |
+
'''
|
595 |
+
|
596 |
+
def __init__(self, eps = 10e-10):
|
597 |
+
self.eps = eps
|
598 |
+
|
599 |
+
|
600 |
+
def fit(self, X, y=None):
|
601 |
+
stds = X[0].values.std()
|
602 |
+
cols = X[0].values.columns.values
|
603 |
+
self.const_dims_ = [c for c in cols if (stds[c] < self.eps).any()]
|
604 |
+
self.const_values_ = {c:X[0].values[c].values[0] for c in cols if (stds[c] < self.eps).any()}
|
605 |
+
return self
|
606 |
+
|
607 |
+
def transform(self, X, y=None):
|
608 |
+
Q = []
|
609 |
+
|
610 |
+
|
611 |
+
for track in X:
|
612 |
+
t2 = track.clone()
|
613 |
+
#for key in t2.skeleton.keys():
|
614 |
+
# if key in self.ConstDims_:
|
615 |
+
# t2.skeleton.pop(key)
|
616 |
+
t2.values = track.values[track.values.columns.difference(self.const_dims_)]
|
617 |
+
Q.append(t2)
|
618 |
+
|
619 |
+
return Q
|
620 |
+
|
621 |
+
def inverse_transform(self, X, copy=None):
|
622 |
+
Q = []
|
623 |
+
|
624 |
+
for track in X:
|
625 |
+
t2 = track.clone()
|
626 |
+
for d in self.const_dims_:
|
627 |
+
t2.values[d] = self.const_values_[d]
|
628 |
+
Q.append(t2)
|
629 |
+
|
630 |
+
return Q
|
631 |
+
|
632 |
+
class ListStandardScaler(BaseEstimator, TransformerMixin):
|
633 |
+
def __init__(self, is_DataFrame=False):
|
634 |
+
self.is_DataFrame = is_DataFrame
|
635 |
+
|
636 |
+
def fit(self, X, y=None):
|
637 |
+
if self.is_DataFrame:
|
638 |
+
X_train_flat = np.concatenate([m.values for m in X], axis=0)
|
639 |
+
else:
|
640 |
+
X_train_flat = np.concatenate([m for m in X], axis=0)
|
641 |
+
|
642 |
+
self.data_mean_ = np.mean(X_train_flat, axis=0)
|
643 |
+
self.data_std_ = np.std(X_train_flat, axis=0)
|
644 |
+
|
645 |
+
return self
|
646 |
+
|
647 |
+
def transform(self, X, y=None):
|
648 |
+
Q = []
|
649 |
+
|
650 |
+
for track in X:
|
651 |
+
if self.is_DataFrame:
|
652 |
+
normalized_track = track.copy()
|
653 |
+
normalized_track.values = (track.values - self.data_mean_) / self.data_std_
|
654 |
+
else:
|
655 |
+
normalized_track = (track - self.data_mean_) / self.data_std_
|
656 |
+
|
657 |
+
Q.append(normalized_track)
|
658 |
+
|
659 |
+
if self.is_DataFrame:
|
660 |
+
return Q
|
661 |
+
else:
|
662 |
+
return np.array(Q)
|
663 |
+
|
664 |
+
def inverse_transform(self, X, copy=None):
|
665 |
+
Q = []
|
666 |
+
|
667 |
+
for track in X:
|
668 |
+
|
669 |
+
if self.is_DataFrame:
|
670 |
+
unnormalized_track = track.copy()
|
671 |
+
unnormalized_track.values = (track.values * self.data_std_) + self.data_mean_
|
672 |
+
else:
|
673 |
+
unnormalized_track = (track * self.data_std_) + self.data_mean_
|
674 |
+
|
675 |
+
Q.append(unnormalized_track)
|
676 |
+
|
677 |
+
if self.is_DataFrame:
|
678 |
+
return Q
|
679 |
+
else:
|
680 |
+
return np.array(Q)
|
681 |
+
|
682 |
+
class DownSampler(BaseEstimator, TransformerMixin):
|
683 |
+
def __init__(self, rate):
|
684 |
+
self.rate = rate
|
685 |
+
|
686 |
+
|
687 |
+
def fit(self, X, y=None):
|
688 |
+
|
689 |
+
return self
|
690 |
+
|
691 |
+
def transform(self, X, y=None):
|
692 |
+
Q = []
|
693 |
+
|
694 |
+
for track in X:
|
695 |
+
#print(track.values.size)
|
696 |
+
#new_track = track.clone()
|
697 |
+
#new_track.values = track.values[0:-1:self.rate]
|
698 |
+
#print(new_track.values.size)
|
699 |
+
new_track = track[0:-1:self.rate]
|
700 |
+
Q.append(new_track)
|
701 |
+
|
702 |
+
return Q
|
703 |
+
|
704 |
+
def inverse_transform(self, X, copy=None):
|
705 |
+
return X
|
706 |
+
|
707 |
+
|
708 |
+
#TODO: JointsSelector (x)
|
709 |
+
#TODO: SegmentMaker
|
710 |
+
#TODO: DynamicFeaturesAdder
|
711 |
+
#TODO: ShapeFeaturesAdder
|
712 |
+
#TODO: DataFrameNumpier (x)
|
713 |
+
|
714 |
+
class TemplateTransform(BaseEstimator, TransformerMixin):
|
715 |
+
def __init__(self):
|
716 |
+
pass
|
717 |
+
|
718 |
+
def fit(self, X, y=None):
|
719 |
+
return self
|
720 |
+
|
721 |
+
def transform(self, X, y=None):
|
722 |
+
return X
|
723 |
+
|
724 |
+
class UnsupportedParamError(Exception):
|
725 |
+
def __init__(self, message):
|
726 |
+
self.message = message
|
dataloaders/pymo/rotation_tools.py
ADDED
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
Tools for Manipulating and Converting 3D Rotations
|
3 |
+
|
4 |
+
By Omid Alemi
|
5 |
+
Created: June 12, 2017
|
6 |
+
|
7 |
+
Adapted from that matlab file...
|
8 |
+
'''
|
9 |
+
|
10 |
+
import math
|
11 |
+
import numpy as np
|
12 |
+
|
13 |
+
def deg2rad(x):
|
14 |
+
return x/180*math.pi
|
15 |
+
|
16 |
+
|
17 |
+
def rad2deg(x):
|
18 |
+
return x/math.pi*180
|
19 |
+
|
20 |
+
class Rotation():
|
21 |
+
def __init__(self,rot, param_type, rotation_order, **params):
|
22 |
+
self.rotmat = []
|
23 |
+
self.rotation_order = rotation_order
|
24 |
+
if param_type == 'euler':
|
25 |
+
self._from_euler(rot[0],rot[1],rot[2], params)
|
26 |
+
elif param_type == 'expmap':
|
27 |
+
self._from_expmap(rot[0], rot[1], rot[2], params)
|
28 |
+
|
29 |
+
def _from_euler(self, alpha, beta, gamma, params):
|
30 |
+
'''Expecting degress'''
|
31 |
+
|
32 |
+
if params['from_deg']==True:
|
33 |
+
alpha = deg2rad(alpha)
|
34 |
+
beta = deg2rad(beta)
|
35 |
+
gamma = deg2rad(gamma)
|
36 |
+
|
37 |
+
ca = math.cos(alpha)
|
38 |
+
cb = math.cos(beta)
|
39 |
+
cg = math.cos(gamma)
|
40 |
+
sa = math.sin(alpha)
|
41 |
+
sb = math.sin(beta)
|
42 |
+
sg = math.sin(gamma)
|
43 |
+
|
44 |
+
Rx = np.asarray([[1, 0, 0],
|
45 |
+
[0, ca, sa],
|
46 |
+
[0, -sa, ca]
|
47 |
+
])
|
48 |
+
|
49 |
+
Ry = np.asarray([[cb, 0, -sb],
|
50 |
+
[0, 1, 0],
|
51 |
+
[sb, 0, cb]])
|
52 |
+
|
53 |
+
Rz = np.asarray([[cg, sg, 0],
|
54 |
+
[-sg, cg, 0],
|
55 |
+
[0, 0, 1]])
|
56 |
+
|
57 |
+
self.rotmat = np.eye(3)
|
58 |
+
|
59 |
+
############################ inner product rotation matrix in order defined at BVH file #########################
|
60 |
+
for axis in self.rotation_order :
|
61 |
+
if axis == 'X' :
|
62 |
+
self.rotmat = np.matmul(Rx, self.rotmat)
|
63 |
+
elif axis == 'Y':
|
64 |
+
self.rotmat = np.matmul(Ry, self.rotmat)
|
65 |
+
else :
|
66 |
+
self.rotmat = np.matmul(Rz, self.rotmat)
|
67 |
+
################################################################################################################
|
68 |
+
|
69 |
+
def _from_expmap(self, alpha, beta, gamma, params):
|
70 |
+
if (alpha == 0 and beta == 0 and gamma == 0):
|
71 |
+
self.rotmat = np.eye(3)
|
72 |
+
return
|
73 |
+
|
74 |
+
#TODO: Check exp map params
|
75 |
+
|
76 |
+
theta = np.linalg.norm([alpha, beta, gamma])
|
77 |
+
|
78 |
+
expmap = [alpha, beta, gamma] / theta
|
79 |
+
|
80 |
+
x = expmap[0]
|
81 |
+
y = expmap[1]
|
82 |
+
z = expmap[2]
|
83 |
+
|
84 |
+
s = math.sin(theta/2)
|
85 |
+
c = math.cos(theta/2)
|
86 |
+
|
87 |
+
self.rotmat = np.asarray([
|
88 |
+
[2*(x**2-1)*s**2+1, 2*x*y*s**2-2*z*c*s, 2*x*z*s**2+2*y*c*s],
|
89 |
+
[2*x*y*s**2+2*z*c*s, 2*(y**2-1)*s**2+1, 2*y*z*s**2-2*x*c*s],
|
90 |
+
[2*x*z*s**2-2*y*c*s, 2*y*z*s**2+2*x*c*s , 2*(z**2-1)*s**2+1]
|
91 |
+
])
|
92 |
+
|
93 |
+
|
94 |
+
|
95 |
+
def get_euler_axis(self):
|
96 |
+
R = self.rotmat
|
97 |
+
theta = math.acos((self.rotmat.trace() - 1) / 2)
|
98 |
+
axis = np.asarray([R[2,1] - R[1,2], R[0,2] - R[2,0], R[1,0] - R[0,1]])
|
99 |
+
axis = axis/(2*math.sin(theta))
|
100 |
+
return theta, axis
|
101 |
+
|
102 |
+
def to_expmap(self):
|
103 |
+
theta, axis = self.get_euler_axis()
|
104 |
+
rot_arr = theta * axis
|
105 |
+
if np.isnan(rot_arr).any():
|
106 |
+
rot_arr = [0, 0, 0]
|
107 |
+
return rot_arr
|
108 |
+
|
109 |
+
def to_euler(self, use_deg=False):
|
110 |
+
eulers = np.zeros((2, 3))
|
111 |
+
|
112 |
+
if np.absolute(np.absolute(self.rotmat[2, 0]) - 1) < 1e-12:
|
113 |
+
#GIMBAL LOCK!
|
114 |
+
print('Gimbal')
|
115 |
+
if np.absolute(self.rotmat[2, 0]) - 1 < 1e-12:
|
116 |
+
eulers[:,0] = math.atan2(-self.rotmat[0,1], -self.rotmat[0,2])
|
117 |
+
eulers[:,1] = -math.pi/2
|
118 |
+
else:
|
119 |
+
eulers[:,0] = math.atan2(self.rotmat[0,1], -elf.rotmat[0,2])
|
120 |
+
eulers[:,1] = math.pi/2
|
121 |
+
|
122 |
+
return eulers
|
123 |
+
|
124 |
+
theta = - math.asin(self.rotmat[2,0])
|
125 |
+
theta2 = math.pi - theta
|
126 |
+
|
127 |
+
# psi1, psi2
|
128 |
+
eulers[0,0] = math.atan2(self.rotmat[2,1]/math.cos(theta), self.rotmat[2,2]/math.cos(theta))
|
129 |
+
eulers[1,0] = math.atan2(self.rotmat[2,1]/math.cos(theta2), self.rotmat[2,2]/math.cos(theta2))
|
130 |
+
|
131 |
+
# theta1, theta2
|
132 |
+
eulers[0,1] = theta
|
133 |
+
eulers[1,1] = theta2
|
134 |
+
|
135 |
+
# phi1, phi2
|
136 |
+
eulers[0,2] = math.atan2(self.rotmat[1,0]/math.cos(theta), self.rotmat[0,0]/math.cos(theta))
|
137 |
+
eulers[1,2] = math.atan2(self.rotmat[1,0]/math.cos(theta2), self.rotmat[0,0]/math.cos(theta2))
|
138 |
+
|
139 |
+
if use_deg:
|
140 |
+
eulers = rad2deg(eulers)
|
141 |
+
|
142 |
+
return eulers
|
143 |
+
|
144 |
+
def to_quat(self):
|
145 |
+
#TODO
|
146 |
+
pass
|
147 |
+
|
148 |
+
def __str__(self):
|
149 |
+
return "Rotation Matrix: \n " + self.rotmat.__str__()
|
150 |
+
|
151 |
+
|
152 |
+
|
153 |
+
|
dataloaders/pymo/rotation_tools.py!
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
Tools for Manipulating and Converting 3D Rotations
|
3 |
+
|
4 |
+
By Omid Alemi
|
5 |
+
Created: June 12, 2017
|
6 |
+
|
7 |
+
Adapted from that matlab file...
|
8 |
+
'''
|
9 |
+
|
10 |
+
import math
|
11 |
+
import numpy as np
|
12 |
+
|
13 |
+
def deg2rad(x):
|
14 |
+
return x/180*math.pi
|
15 |
+
|
16 |
+
class Rotation():
|
17 |
+
def __init__(self,rot, param_type, **params):
|
18 |
+
self.rotmat = []
|
19 |
+
if param_type == 'euler':
|
20 |
+
self._from_euler(rot[0],rot[1],rot[2], params)
|
21 |
+
|
22 |
+
def _from_euler(self, alpha, beta, gamma, params):
|
23 |
+
'''Expecting degress'''
|
24 |
+
|
25 |
+
if params['from_deg']==True:
|
26 |
+
alpha = deg2rad(alpha)
|
27 |
+
beta = deg2rad(beta)
|
28 |
+
gamma = deg2rad(gamma)
|
29 |
+
|
30 |
+
Rx = np.asarray([[1, 0, 0],
|
31 |
+
[0, math.cos(alpha), -math.sin(alpha)],
|
32 |
+
[0, math.sin(alpha), math.cos(alpha)]
|
33 |
+
])
|
34 |
+
|
35 |
+
Ry = np.asarray([[math.cos(beta), 0, math.sin(beta)],
|
36 |
+
[0, 1, 0],
|
37 |
+
[-math.sin(beta), 0, math.cos(beta)]])
|
38 |
+
|
39 |
+
Rz = np.asarray([[math.cos(gamma), -math.sin(gamma), 0],
|
40 |
+
[math.sin(gamma), math.cos(gamma), 0],
|
41 |
+
[0, 0, 1]])
|
42 |
+
|
43 |
+
self.rotmat = np.matmul(np.matmul(Rz, Ry), Rx).T
|
44 |
+
|
45 |
+
def get_euler_axis(self):
|
46 |
+
R = self.rotmat
|
47 |
+
theta = math.acos((self.rotmat.trace() - 1) / 2)
|
48 |
+
axis = np.asarray([R[2,1] - R[1,2], R[0,2] - R[2,0], R[1,0] - R[0,1]])
|
49 |
+
axis = axis/(2*math.sin(theta))
|
50 |
+
return theta, axis
|
51 |
+
|
52 |
+
def to_expmap(self):
|
53 |
+
theta, axis = self.get_euler_axis()
|
54 |
+
rot_arr = theta * axis
|
55 |
+
if np.isnan(rot_arr).any():
|
56 |
+
rot_arr = [0, 0, 0]
|
57 |
+
return rot_arr
|
58 |
+
|
59 |
+
def to_euler(self):
|
60 |
+
#TODO
|
61 |
+
pass
|
62 |
+
|
63 |
+
def to_quat(self):
|
64 |
+
#TODO
|
65 |
+
pass
|
66 |
+
|
67 |
+
|
68 |
+
|
69 |
+
|
dataloaders/pymo/viz_tools.py
ADDED
@@ -0,0 +1,236 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import numpy as np
|
3 |
+
import matplotlib.pyplot as plt
|
4 |
+
import IPython
|
5 |
+
import os
|
6 |
+
|
7 |
+
def save_fig(fig_id, tight_layout=True):
|
8 |
+
if tight_layout:
|
9 |
+
plt.tight_layout()
|
10 |
+
plt.savefig(fig_id + '.png', format='png', dpi=300)
|
11 |
+
|
12 |
+
|
13 |
+
def draw_stickfigure(mocap_track, frame, data=None, joints=None, draw_names=False, ax=None, figsize=(8,8)):
|
14 |
+
if ax is None:
|
15 |
+
fig = plt.figure(figsize=figsize)
|
16 |
+
ax = fig.add_subplot(111)
|
17 |
+
|
18 |
+
if joints is None:
|
19 |
+
joints_to_draw = mocap_track.skeleton.keys()
|
20 |
+
else:
|
21 |
+
joints_to_draw = joints
|
22 |
+
|
23 |
+
if data is None:
|
24 |
+
df = mocap_track.values
|
25 |
+
else:
|
26 |
+
df = data
|
27 |
+
|
28 |
+
for joint in joints_to_draw:
|
29 |
+
ax.scatter(x=df['%s_Xposition'%joint][frame],
|
30 |
+
y=df['%s_Yposition'%joint][frame],
|
31 |
+
alpha=0.6, c='b', marker='o')
|
32 |
+
|
33 |
+
parent_x = df['%s_Xposition'%joint][frame]
|
34 |
+
parent_y = df['%s_Yposition'%joint][frame]
|
35 |
+
|
36 |
+
children_to_draw = [c for c in mocap_track.skeleton[joint]['children'] if c in joints_to_draw]
|
37 |
+
|
38 |
+
for c in children_to_draw:
|
39 |
+
child_x = df['%s_Xposition'%c][frame]
|
40 |
+
child_y = df['%s_Yposition'%c][frame]
|
41 |
+
ax.plot([parent_x, child_x], [parent_y, child_y], 'k-', lw=2)
|
42 |
+
|
43 |
+
if draw_names:
|
44 |
+
ax.annotate(joint,
|
45 |
+
(df['%s_Xposition'%joint][frame] + 0.1,
|
46 |
+
df['%s_Yposition'%joint][frame] + 0.1))
|
47 |
+
|
48 |
+
return ax
|
49 |
+
|
50 |
+
def draw_stickfigure3d(mocap_track, frame, data=None, joints=None, draw_names=False, ax=None, figsize=(8,8)):
|
51 |
+
from mpl_toolkits.mplot3d import Axes3D
|
52 |
+
|
53 |
+
if ax is None:
|
54 |
+
fig = plt.figure(figsize=figsize)
|
55 |
+
ax = fig.add_subplot(111, projection='3d')
|
56 |
+
|
57 |
+
if joints is None:
|
58 |
+
joints_to_draw = mocap_track.skeleton.keys()
|
59 |
+
else:
|
60 |
+
joints_to_draw = joints
|
61 |
+
|
62 |
+
if data is None:
|
63 |
+
df = mocap_track.values
|
64 |
+
else:
|
65 |
+
df = data
|
66 |
+
|
67 |
+
for joint in joints_to_draw:
|
68 |
+
parent_x = df['%s_Xposition'%joint][frame]
|
69 |
+
parent_y = df['%s_Zposition'%joint][frame]
|
70 |
+
parent_z = df['%s_Yposition'%joint][frame]
|
71 |
+
# ^ In mocaps, Y is the up-right axis
|
72 |
+
|
73 |
+
ax.scatter(xs=parent_x,
|
74 |
+
ys=parent_y,
|
75 |
+
zs=parent_z,
|
76 |
+
alpha=0.6, c='b', marker='o')
|
77 |
+
|
78 |
+
|
79 |
+
children_to_draw = [c for c in mocap_track.skeleton[joint]['children'] if c in joints_to_draw]
|
80 |
+
|
81 |
+
for c in children_to_draw:
|
82 |
+
child_x = df['%s_Xposition'%c][frame]
|
83 |
+
child_y = df['%s_Zposition'%c][frame]
|
84 |
+
child_z = df['%s_Yposition'%c][frame]
|
85 |
+
# ^ In mocaps, Y is the up-right axis
|
86 |
+
|
87 |
+
ax.plot([parent_x, child_x], [parent_y, child_y], [parent_z, child_z], 'k-', lw=2, c='black')
|
88 |
+
|
89 |
+
if draw_names:
|
90 |
+
ax.text(x=parent_x + 0.1,
|
91 |
+
y=parent_y + 0.1,
|
92 |
+
z=parent_z + 0.1,
|
93 |
+
s=joint,
|
94 |
+
color='rgba(0,0,0,0.9)')
|
95 |
+
|
96 |
+
return ax
|
97 |
+
|
98 |
+
|
99 |
+
def sketch_move(mocap_track, data=None, ax=None, figsize=(16,8)):
|
100 |
+
if ax is None:
|
101 |
+
fig = plt.figure(figsize=figsize)
|
102 |
+
ax = fig.add_subplot(111)
|
103 |
+
|
104 |
+
if data is None:
|
105 |
+
data = mocap_track.values
|
106 |
+
|
107 |
+
for frame in range(0, data.shape[0], 4):
|
108 |
+
# draw_stickfigure(mocap_track, f, data=data, ax=ax)
|
109 |
+
|
110 |
+
for joint in mocap_track.skeleton.keys():
|
111 |
+
children_to_draw = [c for c in mocap_track.skeleton[joint]['children']]
|
112 |
+
|
113 |
+
parent_x = data['%s_Xposition'%joint][frame]
|
114 |
+
parent_y = data['%s_Yposition'%joint][frame]
|
115 |
+
|
116 |
+
frame_alpha = frame/data.shape[0]
|
117 |
+
|
118 |
+
for c in children_to_draw:
|
119 |
+
child_x = data['%s_Xposition'%c][frame]
|
120 |
+
child_y = data['%s_Yposition'%c][frame]
|
121 |
+
|
122 |
+
ax.plot([parent_x, child_x], [parent_y, child_y], '-', lw=1, color='gray', alpha=frame_alpha)
|
123 |
+
|
124 |
+
|
125 |
+
|
126 |
+
def viz_cnn_filter(feature_to_viz, mocap_track, data, gap=25):
|
127 |
+
fig = plt.figure(figsize=(16,4))
|
128 |
+
ax = plt.subplot2grid((1,8),(0,0))
|
129 |
+
ax.imshow(feature_to_viz.T, aspect='auto', interpolation='nearest')
|
130 |
+
|
131 |
+
ax = plt.subplot2grid((1,8),(0,1), colspan=7)
|
132 |
+
for frame in range(feature_to_viz.shape[0]):
|
133 |
+
frame_alpha = 0.2#frame/data.shape[0] * 2 + 0.2
|
134 |
+
|
135 |
+
for joint_i, joint in enumerate(mocap_track.skeleton.keys()):
|
136 |
+
children_to_draw = [c for c in mocap_track.skeleton[joint]['children']]
|
137 |
+
|
138 |
+
parent_x = data['%s_Xposition'%joint][frame] + frame * gap
|
139 |
+
parent_y = data['%s_Yposition'%joint][frame]
|
140 |
+
|
141 |
+
ax.scatter(x=parent_x,
|
142 |
+
y=parent_y,
|
143 |
+
alpha=0.6,
|
144 |
+
cmap='RdBu',
|
145 |
+
c=feature_to_viz[frame][joint_i] * 10000,
|
146 |
+
marker='o',
|
147 |
+
s = abs(feature_to_viz[frame][joint_i] * 10000))
|
148 |
+
plt.axis('off')
|
149 |
+
for c in children_to_draw:
|
150 |
+
child_x = data['%s_Xposition'%c][frame] + frame * gap
|
151 |
+
child_y = data['%s_Yposition'%c][frame]
|
152 |
+
|
153 |
+
ax.plot([parent_x, child_x], [parent_y, child_y], '-', lw=1, color='gray', alpha=frame_alpha)
|
154 |
+
|
155 |
+
|
156 |
+
def print_skel(X):
|
157 |
+
stack = [X.root_name]
|
158 |
+
tab=0
|
159 |
+
while stack:
|
160 |
+
joint = stack.pop()
|
161 |
+
tab = len(stack)
|
162 |
+
print('%s- %s (%s)'%('| '*tab, joint, X.skeleton[joint]['parent']))
|
163 |
+
for c in X.skeleton[joint]['children']:
|
164 |
+
stack.append(c)
|
165 |
+
|
166 |
+
|
167 |
+
def nb_play_mocap_fromurl(mocap, mf, frame_time=1/30, scale=1, base_url='http://titan:8385'):
|
168 |
+
if mf == 'bvh':
|
169 |
+
bw = BVHWriter()
|
170 |
+
with open('test.bvh', 'w') as ofile:
|
171 |
+
bw.write(mocap, ofile)
|
172 |
+
|
173 |
+
filepath = '../notebooks/test.bvh'
|
174 |
+
elif mf == 'pos':
|
175 |
+
c = list(mocap.values.columns)
|
176 |
+
|
177 |
+
for cc in c:
|
178 |
+
if 'rotation' in cc:
|
179 |
+
c.remove(cc)
|
180 |
+
mocap.values.to_csv('test.csv', index=False, columns=c)
|
181 |
+
|
182 |
+
filepath = '../notebooks/test.csv'
|
183 |
+
else:
|
184 |
+
return
|
185 |
+
|
186 |
+
url = '%s/mocapplayer/player.html?data_url=%s&scale=%f&cz=200&order=xzyi&frame_time=%f'%(base_url, filepath, scale, frame_time)
|
187 |
+
iframe = '<iframe src=' + url + ' width="100%" height=500></iframe>'
|
188 |
+
link = '<a href=%s target="_blank">New Window</a>'%url
|
189 |
+
return IPython.display.HTML(iframe+link)
|
190 |
+
|
191 |
+
def nb_play_mocap(mocap, mf, meta=None, frame_time=1/30, scale=1, camera_z=500, base_url=None):
|
192 |
+
data_template = 'var dataBuffer = `$$DATA$$`;'
|
193 |
+
data_template += 'var metadata = $$META$$;'
|
194 |
+
data_template += 'start(dataBuffer, metadata, $$CZ$$, $$SCALE$$, $$FRAMETIME$$);'
|
195 |
+
dir_path = os.path.dirname(os.path.realpath(__file__))
|
196 |
+
|
197 |
+
|
198 |
+
if base_url is None:
|
199 |
+
base_url = os.path.join(dir_path, 'mocapplayer/playBuffer.html')
|
200 |
+
|
201 |
+
# print(dir_path)
|
202 |
+
|
203 |
+
if mf == 'bvh':
|
204 |
+
pass
|
205 |
+
elif mf == 'pos':
|
206 |
+
cols = list(mocap.values.columns)
|
207 |
+
for c in cols:
|
208 |
+
if 'rotation' in c:
|
209 |
+
cols.remove(c)
|
210 |
+
|
211 |
+
data_csv = mocap.values.to_csv(index=False, columns=cols)
|
212 |
+
|
213 |
+
if meta is not None:
|
214 |
+
lines = [','.join(item) for item in meta.astype('str')]
|
215 |
+
meta_csv = '[' + ','.join('[%s]'%l for l in lines) +']'
|
216 |
+
else:
|
217 |
+
meta_csv = '[]'
|
218 |
+
|
219 |
+
data_assigned = data_template.replace('$$DATA$$', data_csv)
|
220 |
+
data_assigned = data_assigned.replace('$$META$$', meta_csv)
|
221 |
+
data_assigned = data_assigned.replace('$$CZ$$', str(camera_z))
|
222 |
+
data_assigned = data_assigned.replace('$$SCALE$$', str(scale))
|
223 |
+
data_assigned = data_assigned.replace('$$FRAMETIME$$', str(frame_time))
|
224 |
+
|
225 |
+
else:
|
226 |
+
return
|
227 |
+
|
228 |
+
|
229 |
+
|
230 |
+
with open(os.path.join(dir_path, 'mocapplayer/data.js'), 'w') as oFile:
|
231 |
+
oFile.write(data_assigned)
|
232 |
+
|
233 |
+
url = '%s?&cz=200&order=xzyi&frame_time=%f&scale=%f'%(base_url, frame_time, scale)
|
234 |
+
iframe = '<iframe frameborder="0" src=' + url + ' width="100%" height=500></iframe>'
|
235 |
+
link = '<a href=%s target="_blank">New Window</a>'%url
|
236 |
+
return IPython.display.HTML(iframe+link)
|
dataloaders/pymo/writers.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pandas as pd
|
3 |
+
|
4 |
+
class BVHWriter():
|
5 |
+
def __init__(self):
|
6 |
+
pass
|
7 |
+
|
8 |
+
def write(self, X, ofile):
|
9 |
+
|
10 |
+
# Writing the skeleton info
|
11 |
+
ofile.write('HIERARCHY\n')
|
12 |
+
|
13 |
+
self.motions_ = []
|
14 |
+
self._printJoint(X, X.root_name, 0, ofile)
|
15 |
+
|
16 |
+
# Writing the motion header
|
17 |
+
ofile.write('MOTION\n')
|
18 |
+
ofile.write('Frames: %d\n'%X.values.shape[0])
|
19 |
+
ofile.write('Frame Time: %f\n'%X.framerate)
|
20 |
+
|
21 |
+
# Writing the data
|
22 |
+
self.motions_ = np.asarray(self.motions_).T
|
23 |
+
lines = [" ".join(item) for item in self.motions_.astype(str)]
|
24 |
+
ofile.write("".join("%s\n"%l for l in lines))
|
25 |
+
|
26 |
+
def _printJoint(self, X, joint, tab, ofile):
|
27 |
+
|
28 |
+
if X.skeleton[joint]['parent'] == None:
|
29 |
+
ofile.write('ROOT %s\n'%joint)
|
30 |
+
elif len(X.skeleton[joint]['children']) > 0:
|
31 |
+
ofile.write('%sJOINT %s\n'%('\t'*(tab), joint))
|
32 |
+
else:
|
33 |
+
ofile.write('%sEnd site\n'%('\t'*(tab)))
|
34 |
+
|
35 |
+
ofile.write('%s{\n'%('\t'*(tab)))
|
36 |
+
|
37 |
+
ofile.write('%sOFFSET %3.5f %3.5f %3.5f\n'%('\t'*(tab+1),
|
38 |
+
X.skeleton[joint]['offsets'][0],
|
39 |
+
X.skeleton[joint]['offsets'][1],
|
40 |
+
X.skeleton[joint]['offsets'][2]))
|
41 |
+
channels = X.skeleton[joint]['channels']
|
42 |
+
n_channels = len(channels)
|
43 |
+
|
44 |
+
if n_channels > 0:
|
45 |
+
for ch in channels:
|
46 |
+
self.motions_.append(np.asarray(X.values['%s_%s'%(joint, ch)].values))
|
47 |
+
|
48 |
+
if len(X.skeleton[joint]['children']) > 0:
|
49 |
+
ch_str = ''.join(' %s'*n_channels%tuple(channels))
|
50 |
+
ofile.write('%sCHANNELS %d%s\n' %('\t'*(tab+1), n_channels, ch_str))
|
51 |
+
|
52 |
+
for c in X.skeleton[joint]['children']:
|
53 |
+
self._printJoint(X, c, tab+1, ofile)
|
54 |
+
|
55 |
+
ofile.write('%s}\n'%('\t'*(tab)))
|