Upload 17 files
Browse files- .gitattributes +35 -35
- README.md +78 -0
- feature_extractor/preprocessor_config.json +20 -0
- model_index.json +33 -0
- safety_checker/model_config.json +16 -0
- safety_checker/model_state.pdparams +3 -0
- scheduler/scheduler_config.json +13 -0
- text_encoder/model_config.json +12 -0
- text_encoder/model_state.pdparams +3 -0
- tokenizer/merges.txt +0 -0
- tokenizer/special_tokens_map.json +1 -0
- tokenizer/tokenizer_config.json +1 -0
- tokenizer/vocab.json +0 -0
- unet/config.json +42 -0
- unet/model_state.pdparams +3 -0
- vae/config.json +31 -0
- vae/model_state.pdparams +3 -0
.gitattributes
CHANGED
@@ -1,35 +1,35 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.
|
29 |
-
*.
|
30 |
-
*.
|
31 |
-
*.
|
32 |
-
*.
|
33 |
-
*.
|
34 |
-
|
35 |
-
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*.pdparams filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
@@ -1,3 +1,81 @@
|
|
1 |
---
|
2 |
license: creativeml-openrail-m
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
license: creativeml-openrail-m
|
3 |
+
tags:
|
4 |
+
- stable-diffusion
|
5 |
+
- stable-diffusion-diffusers
|
6 |
+
- text-to-image
|
7 |
+
- text-to-audio
|
8 |
+
inference: true
|
9 |
+
extra_gated_prompt: |-
|
10 |
+
This model is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage.
|
11 |
+
The CreativeML OpenRAIL License specifies:
|
12 |
+
|
13 |
+
1. You can't use the model to deliberately produce nor share illegal or harmful outputs or content
|
14 |
+
2. Riffusion claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license
|
15 |
+
3. You may re-distribute the weights and use the model commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully)
|
16 |
+
Please read the full license carefully here: https://huggingface.co/spaces/CompVis/stable-diffusion-license
|
17 |
+
|
18 |
+
extra_gated_heading: Please read the LICENSE to access this model
|
19 |
---
|
20 |
+
|
21 |
+
# Riffusion PaddlePaddle Version
|
22 |
+
|
23 |
+
Riffusion is an app for real-time music generation with stable diffusion.
|
24 |
+
|
25 |
+
Read about it at https://www.riffusion.com/about and try it at https://www.riffusion.com/.
|
26 |
+
|
27 |
+
* Web app: https://github.com/hmartiro/riffusion-app
|
28 |
+
* Inference server: https://github.com/hmartiro/riffusion-inference
|
29 |
+
* Model checkpoint: https://huggingface.co/riffusion/riffusion-model-v1
|
30 |
+
|
31 |
+
This repository contains the model files, including:
|
32 |
+
|
33 |
+
* a diffusers formated library
|
34 |
+
* a compiled checkpoint file
|
35 |
+
* a traced unet for improved inference speed
|
36 |
+
* a seed image library for use with riffusion-app
|
37 |
+
|
38 |
+
## Riffusion v1 Model
|
39 |
+
|
40 |
+
Riffusion is a latent text-to-image diffusion model capable of generating spectrogram images given any text input. These spectrograms can be converted into audio clips.
|
41 |
+
|
42 |
+
The model was created by [Seth Forsgren](https://sethforsgren.com/) and [Hayk Martiros](https://haykmartiros.com/) as a hobby project.
|
43 |
+
|
44 |
+
You can use the Riffusion model directly, or try the [Riffusion web app](https://www.riffusion.com/).
|
45 |
+
|
46 |
+
The Riffusion model was created by fine-tuning the **Stable-Diffusion-v1-5** checkpoint. Read about Stable Diffusion here [🤗's Stable Diffusion blog](https://huggingface.co/blog/stable_diffusion).
|
47 |
+
|
48 |
+
### Model Details
|
49 |
+
- **Developed by:** Seth Forsgren, Hayk Martiros
|
50 |
+
- **Model type:** Diffusion-based text-to-image generation model
|
51 |
+
- **Language(s):** English
|
52 |
+
- **License:** [The CreativeML OpenRAIL M license](https://huggingface.co/spaces/CompVis/stable-diffusion-license) is an [Open RAIL M license](https://www.licenses.ai/blog/2022/8/18/naming-convention-of-responsible-ai-licenses), adapted from the work that [BigScience](https://bigscience.huggingface.co/) and [the RAIL Initiative](https://www.licenses.ai/) are jointly carrying in the area of responsible AI licensing. See also [the article about the BLOOM Open RAIL license](https://bigscience.huggingface.co/blog/the-bigscience-rail-license) on which our license is based.
|
53 |
+
- **Model Description:** This is a model that can be used to generate and modify images based on text prompts. It is a [Latent Diffusion Model](https://arxiv.org/abs/2112.10752) that uses a fixed, pretrained text encoder ([CLIP ViT-L/14](https://arxiv.org/abs/2103.00020)) as suggested in the [Imagen paper](https://arxiv.org/abs/2205.11487).
|
54 |
+
|
55 |
+
### Direct Use
|
56 |
+
The model is intended for research purposes only. Possible research areas and
|
57 |
+
tasks include
|
58 |
+
|
59 |
+
- Generation of artworks, audio, and use in creative processes.
|
60 |
+
- Applications in educational or creative tools.
|
61 |
+
- Research on generative models.
|
62 |
+
|
63 |
+
### Datasets
|
64 |
+
The original Stable Diffusion v1.5 was trained on the [LAION-5B](https://arxiv.org/abs/2210.08402) dataset using the [CLIP text encoder](https://openai.com/blog/clip/), which provided an amazing starting point with an in-depth understanding of language, including musical concepts. The team at LAION also compiled a fantastic audio dataset from many general, speech, and music sources that we recommend at [LAION-AI/audio-dataset](https://github.com/LAION-AI/audio-dataset/blob/main/data_collection/README.md).
|
65 |
+
|
66 |
+
### Fine Tuning
|
67 |
+
|
68 |
+
Check out the [diffusers training examples](https://huggingface.co/docs/diffusers/training/overview) from Hugging Face. Fine tuning requires a dataset of spectrogram images of short audio clips, with associated text describing them. Note that the CLIP encoder is able to understand and connect many words even if they never appear in the dataset. It is also possible to use a [dreambooth](https://huggingface.co/blog/dreambooth) method to get custom styles.
|
69 |
+
|
70 |
+
## Citation
|
71 |
+
|
72 |
+
If you build on this work, please cite it as follows:
|
73 |
+
|
74 |
+
```
|
75 |
+
@article{Forsgren_Martiros_2022,
|
76 |
+
author = {Forsgren, Seth* and Martiros, Hayk*},
|
77 |
+
title = {{Riffusion - Stable diffusion for real-time music generation}},
|
78 |
+
url = {https://riffusion.com/about},
|
79 |
+
year = {2022}
|
80 |
+
}
|
81 |
+
```
|
feature_extractor/preprocessor_config.json
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"crop_size": 224,
|
3 |
+
"do_center_crop": true,
|
4 |
+
"do_convert_rgb": true,
|
5 |
+
"do_normalize": true,
|
6 |
+
"do_resize": true,
|
7 |
+
"feature_extractor_type": "CLIPFeatureExtractor",
|
8 |
+
"image_mean": [
|
9 |
+
0.48145466,
|
10 |
+
0.4578275,
|
11 |
+
0.40821073
|
12 |
+
],
|
13 |
+
"image_std": [
|
14 |
+
0.26862954,
|
15 |
+
0.26130258,
|
16 |
+
0.27577711
|
17 |
+
],
|
18 |
+
"resample": 3,
|
19 |
+
"size": 224
|
20 |
+
}
|
model_index.json
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "StableDiffusionPipeline",
|
3 |
+
"_ppdiffusers_version": "0.9.0",
|
4 |
+
"feature_extractor": [
|
5 |
+
"paddlenlp.transformers",
|
6 |
+
"CLIPFeatureExtractor"
|
7 |
+
],
|
8 |
+
"requires_safety_checker": true,
|
9 |
+
"safety_checker": [
|
10 |
+
"stable_diffusion",
|
11 |
+
"StableDiffusionSafetyChecker"
|
12 |
+
],
|
13 |
+
"scheduler": [
|
14 |
+
"ppdiffusers",
|
15 |
+
"PNDMScheduler"
|
16 |
+
],
|
17 |
+
"text_encoder": [
|
18 |
+
"paddlenlp.transformers",
|
19 |
+
"CLIPTextModel"
|
20 |
+
],
|
21 |
+
"tokenizer": [
|
22 |
+
"paddlenlp.transformers",
|
23 |
+
"CLIPTokenizer"
|
24 |
+
],
|
25 |
+
"unet": [
|
26 |
+
"ppdiffusers",
|
27 |
+
"UNet2DConditionModel"
|
28 |
+
],
|
29 |
+
"vae": [
|
30 |
+
"ppdiffusers",
|
31 |
+
"AutoencoderKL"
|
32 |
+
]
|
33 |
+
}
|
safety_checker/model_config.json
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"init_args": [
|
3 |
+
{
|
4 |
+
"image_resolution": 224,
|
5 |
+
"vision_layers": 24,
|
6 |
+
"vision_heads": 16,
|
7 |
+
"vision_embed_dim": 1024,
|
8 |
+
"vision_patch_size": 14,
|
9 |
+
"vision_mlp_ratio": 4,
|
10 |
+
"vision_hidden_act": "quick_gelu",
|
11 |
+
"projection_dim": 768,
|
12 |
+
"init_class": "CLIPVisionModel"
|
13 |
+
}
|
14 |
+
],
|
15 |
+
"init_class": "StableDiffusionSafetyChecker"
|
16 |
+
}
|
safety_checker/model_state.pdparams
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1faa29e54299a89d50d40031ff732a193b1c985b70c814be65050b054c755462
|
3 |
+
size 1215974074
|
scheduler/scheduler_config.json
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "PNDMScheduler",
|
3 |
+
"_ppdiffusers_version": "0.9.0",
|
4 |
+
"beta_end": 0.012,
|
5 |
+
"beta_schedule": "scaled_linear",
|
6 |
+
"beta_start": 0.00085,
|
7 |
+
"num_train_timesteps": 1000,
|
8 |
+
"prediction_type": "epsilon",
|
9 |
+
"set_alpha_to_one": false,
|
10 |
+
"skip_prk_steps": true,
|
11 |
+
"steps_offset": 1,
|
12 |
+
"trained_betas": null
|
13 |
+
}
|
text_encoder/model_config.json
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"max_text_length": 77,
|
3 |
+
"vocab_size": 49408,
|
4 |
+
"text_embed_dim": 768,
|
5 |
+
"text_heads": 12,
|
6 |
+
"text_layers": 12,
|
7 |
+
"text_hidden_act": "quick_gelu",
|
8 |
+
"projection_dim": 768,
|
9 |
+
"initializer_range": 0.02,
|
10 |
+
"initializer_factor": 1.0,
|
11 |
+
"init_class": "CLIPTextModel"
|
12 |
+
}
|
text_encoder/model_state.pdparams
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d538a142aff6871bf014aed26e16d5909ba3cb4192786a159b4475651304a71f
|
3 |
+
size 492264182
|
tokenizer/merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer/special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": {"content": "<|startoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": "<|endoftext|>"}
|
tokenizer/tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"errors": "replace", "max_len": 77, "bos_token": {"content": "<|startoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "pad_token": "<|endoftext|>", "add_prefix_space": false, "do_lower_case": true, "model_max_length": 77, "name_or_path": "/root/.cache/huggingface/diffusers/models--riffusion--riffusion-model-v1/snapshots/79993436c342ff529802d1dabb016ebe15b5c4ae/tokenizer", "special_tokens_map_file": "/tmp/tmphvapnz03/special_tokens_map.json", "tokenizer_class": "CLIPTokenizer"}
|
tokenizer/vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
unet/config.json
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "UNet2DConditionModel",
|
3 |
+
"_diffusers_version": "0.4.0",
|
4 |
+
"_name_or_path": "/root/.cache/huggingface/diffusers/models--riffusion--riffusion-model-v1/snapshots/79993436c342ff529802d1dabb016ebe15b5c4ae/unet",
|
5 |
+
"_ppdiffusers_version": "0.9.0",
|
6 |
+
"act_fn": "silu",
|
7 |
+
"attention_head_dim": 8,
|
8 |
+
"block_out_channels": [
|
9 |
+
320,
|
10 |
+
640,
|
11 |
+
1280,
|
12 |
+
1280
|
13 |
+
],
|
14 |
+
"center_input_sample": false,
|
15 |
+
"cross_attention_dim": 768,
|
16 |
+
"down_block_types": [
|
17 |
+
"CrossAttnDownBlock2D",
|
18 |
+
"CrossAttnDownBlock2D",
|
19 |
+
"CrossAttnDownBlock2D",
|
20 |
+
"DownBlock2D"
|
21 |
+
],
|
22 |
+
"downsample_padding": 1,
|
23 |
+
"dual_cross_attention": false,
|
24 |
+
"flip_sin_to_cos": true,
|
25 |
+
"freq_shift": 0,
|
26 |
+
"in_channels": 4,
|
27 |
+
"layers_per_block": 2,
|
28 |
+
"mid_block_scale_factor": 1,
|
29 |
+
"norm_eps": 1e-05,
|
30 |
+
"norm_num_groups": 32,
|
31 |
+
"num_class_embeds": null,
|
32 |
+
"only_cross_attention": false,
|
33 |
+
"out_channels": 4,
|
34 |
+
"sample_size": 64,
|
35 |
+
"up_block_types": [
|
36 |
+
"UpBlock2D",
|
37 |
+
"CrossAttnUpBlock2D",
|
38 |
+
"CrossAttnUpBlock2D",
|
39 |
+
"CrossAttnUpBlock2D"
|
40 |
+
],
|
41 |
+
"use_linear_projection": false
|
42 |
+
}
|
unet/model_state.pdparams
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8a9a644295e79123ed6b9846a628bfb8422ba832eb9b94c024002132ba288672
|
3 |
+
size 3438161812
|
vae/config.json
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "AutoencoderKL",
|
3 |
+
"_diffusers_version": "0.4.0",
|
4 |
+
"_name_or_path": "/root/.cache/huggingface/diffusers/models--riffusion--riffusion-model-v1/snapshots/79993436c342ff529802d1dabb016ebe15b5c4ae/vae",
|
5 |
+
"_ppdiffusers_version": "0.9.0",
|
6 |
+
"act_fn": "silu",
|
7 |
+
"block_out_channels": [
|
8 |
+
128,
|
9 |
+
256,
|
10 |
+
512,
|
11 |
+
512
|
12 |
+
],
|
13 |
+
"down_block_types": [
|
14 |
+
"DownEncoderBlock2D",
|
15 |
+
"DownEncoderBlock2D",
|
16 |
+
"DownEncoderBlock2D",
|
17 |
+
"DownEncoderBlock2D"
|
18 |
+
],
|
19 |
+
"in_channels": 3,
|
20 |
+
"latent_channels": 4,
|
21 |
+
"layers_per_block": 2,
|
22 |
+
"norm_num_groups": 32,
|
23 |
+
"out_channels": 3,
|
24 |
+
"sample_size": 256,
|
25 |
+
"up_block_types": [
|
26 |
+
"UpDecoderBlock2D",
|
27 |
+
"UpDecoderBlock2D",
|
28 |
+
"UpDecoderBlock2D",
|
29 |
+
"UpDecoderBlock2D"
|
30 |
+
]
|
31 |
+
}
|
vae/model_state.pdparams
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fb1fc7930e559cc10f9a41c682abe13c49c3ffa33df356abd7666e9282892076
|
3 |
+
size 334641404
|