Uploading MMVAE in asenella/MHD_MMVAE_dhumjgtt
Browse files- README.md +13 -0
- decoders.pkl +3 -0
- encoders.pkl +3 -0
- environment.json +1 -0
- model.pt +3 -0
- model_config.json +1 -0
README.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language: en
|
3 |
+
tags:
|
4 |
+
- multivae
|
5 |
+
license: apache-2.0
|
6 |
+
---
|
7 |
+
|
8 |
+
### Downloading this model from the Hub
|
9 |
+
This model was trained with multivae. It can be downloaded or reloaded using the method `load_from_hf_hub`
|
10 |
+
```python
|
11 |
+
>>> from multivae.models import AutoModel
|
12 |
+
>>> model = AutoModel.load_from_hf_hub(hf_hub_path="your_hf_username/repo_name")
|
13 |
+
```
|
decoders.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a64e316ace4bc0ba7956c2d3fca53594f4b4ca67c7e0762e2c27217bb06076aa
|
3 |
+
size 32019123
|
encoders.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5a9327261229087b3bbeba4d79f5003cca38fc766e55582ab129daf2cc69a059
|
3 |
+
size 49379186
|
environment.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"name": "EnvironmentConfig", "python_version": "3.10"}
|
model.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:354570bf9754a0c8efa09bf34445e8f34538b409c392c18adc619af65fd83044
|
3 |
+
size 81332018
|
model_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"name": "MMVAEConfig", "n_modalities": 3, "latent_dim": 64, "input_dims": {"image": [3, 28, 28], "audio": [1, 32, 128], "trajectory": [200]}, "uses_likelihood_rescaling": true, "rescale_factors": null, "decoders_dist": {"image": "normal", "audio": "normal", "trajectory": "normal"}, "decoder_dist_params": {}, "logits_to_std": "standard", "custom_architectures": ["encoders", "decoders"], "K": 10, "prior_and_posterior_dist": "normal", "learn_prior": false, "beta": 2.5}
|