File size: 3,852 Bytes
2b84cc7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 |
{
"data": {
"motion_loader": {
"_target_": "src.data.amass_motion.AMASSMotionLoader_custom",
"base_dir": "datasets/motions/amass_feats",
"normalizer": {
"_target_": "src.data.motion.Normalizer",
"base_dir": "stats/humanml3d/amass_feats",
"eps": 1e-12
},
"nfeats": 135
},
"_target_": "src.data.text_motion.TextMotionDataset",
"path": "datasets/annotations/humanml3d",
"text_to_token_emb": {
"_target_": "src.data.text.TokenEmbeddings",
"path": "datasets/annotations/humanml3d",
"modelname": "distilbert-base-uncased",
"preload": true
},
"text_to_sent_emb": {
"_target_": "src.data.text.SentenceEmbeddings",
"path": "datasets/annotations/humanml3d",
"modelname": "sentence-transformers/all-mpnet-base-v2",
"preload": true
},
"preload": true
},
"model": {
"_target_": "src.model.TMR",
"motion_encoder": {
"_target_": "src.model.ACTORStyleEncoder",
"nfeats": 135,
"vae": true,
"latent_dim": 256,
"ff_size": 1024,
"num_layers": 6,
"num_heads": 4,
"dropout": 0.1,
"activation": "gelu"
},
"text_encoder": {
"_target_": "src.model.ACTORStyleEncoder",
"nfeats": 768,
"vae": true,
"latent_dim": 256,
"ff_size": 1024,
"num_layers": 6,
"num_heads": 4,
"dropout": 0.1,
"activation": "gelu"
},
"motion_decoder": {
"_target_": "src.model.ACTORStyleDecoder",
"nfeats": 135,
"latent_dim": 256,
"ff_size": 1024,
"num_layers": 6,
"num_heads": 4,
"dropout": 0.1,
"activation": "gelu"
},
"vae": true,
"lmd": {
"recons": 1.0,
"latent": 1e-05,
"kl": 1e-05,
"contrastive": 0.1
},
"lr": 0.0001,
"temperature": 0.1,
"threshold_selfsim": 0.8,
"threshold_selfsim_metrics": 0.95
},
"trainer": {
"_target_": "pytorch_lightning.Trainer",
"max_epochs": 1000,
"log_every_n_steps": 50,
"num_sanity_val_steps": 0,
"check_val_every_n_epoch": 1,
"accelerator": "gpu",
"devices": 1,
"callbacks": [
{
"_target_": "pytorch_lightning.callbacks.ModelCheckpoint",
"filename": "latest-{epoch}",
"every_n_epochs": 1,
"save_top_k": 1,
"save_last": true
},
{
"_target_": "pytorch_lightning.callbacks.ModelCheckpoint",
"filename": "latest-{epoch}",
"monitor": "step",
"mode": "max",
"every_n_epochs": 100,
"save_top_k": -1,
"save_last": false
},
{
"_target_": "src.callback.progress.ProgressLogger",
"precision": 3
},
{
"_target_": "src.callback.tqdmbar.TQDMProgressBar"
}
],
"logger": {
"_target_": "src.logger.csv.CSVLogger",
"save_dir": "outputs/tmr_humanml3d_amass_feats",
"name": "logs"
}
},
"run_dir": "outputs/tmr_humanml3d_amass_feats",
"seed": 1234,
"logger_level": "INFO",
"ckpt": "last",
"resume_dir": null,
"dataloader": {
"_target_": "torch.utils.data.DataLoader",
"batch_size": 32,
"num_workers": 8
}
} |