asenella commited on
Commit
607e199
1 Parent(s): ef5f1c9

Uploading JMVAE in asenella/translated_mmnist_resnets_2_JMVAE_23ljk6ks

Browse files
Files changed (7) hide show
  1. README.md +13 -0
  2. decoders.pkl +3 -0
  3. encoders.pkl +3 -0
  4. environment.json +1 -0
  5. joint_encoder.pkl +3 -0
  6. model.pt +3 -0
  7. model_config.json +1 -0
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: en
3
+ tags:
4
+ - multivae
5
+ license: apache-2.0
6
+ ---
7
+
8
+ ### Downloading this model from the Hub
9
+ This model was trained with multivae. It can be downloaded or reloaded using the method `load_from_hf_hub`
10
+ ```python
11
+ >>> from multivae.models import AutoModel
12
+ >>> model = AutoModel.load_from_hf_hub(hf_hub_path="your_hf_username/repo_name")
13
+ ```
decoders.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2bac0b35504761a052384286b6faede74d148ba17a4e382e4e88d1e93b638a6
3
+ size 23753212
encoders.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08dcb7d6abc1a491d489ba2bc72b6ae9b31298443eddc162e89173e2f5552ba6
3
+ size 113836439
environment.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"name": "EnvironmentConfig", "python_version": "3.10"}
joint_encoder.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fed9c9458ea7502589310208a0db8a8843defe94e10db8c5615941826fe91fb
3
+ size 33542288
model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23cb9d07501d0d15651bb693d568f1695794c06ee35baeeb3422c07216810cc0
3
+ size 171013538
model_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"name": "JMVAEConfig", "n_modalities": 5, "latent_dim": 200, "input_dims": {"m0": [3, 28, 28], "m1": [3, 28, 28], "m2": [3, 28, 28], "m3": [3, 28, 28], "m4": [3, 28, 28]}, "uses_likelihood_rescaling": false, "rescale_factors": null, "decoders_dist": {"m0": "laplace", "m1": "laplace", "m2": "laplace", "m3": "laplace", "m4": "laplace"}, "decoder_dist_params": {"m0": {"scale": 0.75}, "m1": {"scale": 0.75}, "m2": {"scale": 0.75}, "m3": {"scale": 0.75}, "m4": {"scale": 0.75}}, "logits_to_std": "softplus", "custom_architectures": ["encoders", "decoders", "joint_encoder"], "alpha": 0.1, "warmup": 100, "beta": 0.5}