camenduru commited on
Commit
c1209d0
1 Parent(s): eae919f

thanks to stabilityai ❤

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ freesound_dataset_attribution2.csv filter=lfs diff=lfs merge=lfs -text
37
+ Stable_audio_open.jpg filter=lfs diff=lfs merge=lfs -text
LICENSE ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ STABILITY AI NON-COMMERCIAL RESEARCH COMMUNITY LICENSE AGREEMENT
2
+ Dated: June 5, 2024
3
+
4
+ By using or distributing any portion or element of the Models, Software, Software Products or Derivative Works, you agree to be bound by this Agreement.
5
+
6
+ "Agreement" means this Stable Non-Commercial Research Community License Agreement.
7
+
8
+ “AUP” means the Stability AI Acceptable Use Policy available at https://stability.ai/use-policy, as may be updated from time to time.
9
+
10
+ "Derivative Work(s)” means (a) any derivative work of the Software Products as recognized by U.S. copyright laws and (b) any modifications to a Model, and any other model created which is based on or derived from the Model or the Model’s output. For clarity, Derivative Works do not include the output of any Model.
11
+
12
+ “Documentation” means any specifications, manuals, documentation, and other written information provided by Stability AI related to the Software.
13
+
14
+ "Licensee" or "you" means you, or your employer or any other person or entity (if you are entering into this Agreement on such person or entity's behalf), of the age required under applicable laws, rules or regulations to provide legal consent and that has legal authority to bind your employer or such other person or entity if you are entering in this Agreement on their behalf.
15
+
16
+ “Model(s)" means, collectively, Stability AI’s proprietary models and algorithms, including machine-learning models, trained model weights and other elements of the foregoing, made available under this Agreement.
17
+
18
+ “Non-Commercial Uses” means exercising any of the rights granted herein for the purpose of research or non-commercial purposes. For the avoidance of doubt, personal creative use is permissible as “Non-Commercial Use.” Non-Commercial Use does not, however, include the sale of Stability’s underlying Models to third parties or use of outputs from Stability’s underlying Models to train or create a competing product or service.
19
+
20
+ "Stability AI" or "we" means Stability AI Ltd. and its affiliates.
21
+
22
+ "Software" means Stability AI’s proprietary software made available under this Agreement.
23
+
24
+ “Software Products” means the Models, Software and Documentation, individually or in any combination.
25
+
26
+ 1. License Rights and Redistribution.
27
+
28
+ a. Subject to your compliance with this Agreement, the AUP (which is hereby incorporated herein by reference), and the Documentation, Stability AI grants you a non-exclusive, worldwide, non-transferable, non-sublicensable, revocable, royalty free and limited license under Stability AI’s intellectual property or other rights owned or controlled by Stability AI embodied in the Software Products to use, reproduce, distribute, and create Derivative Works of, the Software Products, in each case for Non-Commercial Uses only, unless you subscribe to a membership via https://stability.ai/membership or otherwise obtain a commercial license from Stability AI.
29
+
30
+ b. You may not use the Software Products or Derivative Works to enable third parties to use the Software Products or Derivative Works as part of your hosted service or via your APIs, whether you are adding substantial additional functionality thereto or not. Merely distributing the Software Products or Derivative Works for download online without offering any related service (ex. by distributing the Models on HuggingFace) is not a violation of this subsection. If you wish to use the Software Products or any Derivative Works for commercial or production use or you wish to make the Software Products or any Derivative Works available to third parties via your hosted service or your APIs, contact Stability AI at https://stability.ai/contact.
31
+
32
+ c. If you distribute or make the Software Products, or any Derivative Works thereof, available to a third party, the Software Products, Derivative Works, or any portion thereof, respectively, will remain subject to this Agreement and you must (i) provide a copy of this Agreement to such third party, and (ii) retain the following attribution notice within a "Notice" text file distributed as a part of such copies: "This Stability AI Model is licensed under the Stability AI Non-Commercial Research Community License, Copyright (c) Stability AI Ltd. All Rights Reserved.” If you create a Derivative Work of a Software Product, you may add your own attribution notices to the Notice file included with the Software Product, provided that you clearly indicate which attributions apply to the Software Product and you must state in the NOTICE file that you changed the Software Product and how it was modified.
33
+
34
+
35
+ 2. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE SOFTWARE PRODUCTS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE SOFTWARE PRODUCTS, DERIVATIVE WORKS OR ANY OUTPUT OR RESULTS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE SOFTWARE PRODUCTS, DERIVATIVE WORKS AND ANY OUTPUT AND RESULTS.
36
+
37
+
38
+ 3. Limitation of Liability. IN NO EVENT WILL STABILITY AI OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF STABILITY AI OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING.
39
+
40
+
41
+ 4. Intellectual Property.
42
+
43
+ a. No trademark licenses are granted under this Agreement, and in connection with the Software Products or Derivative Works, neither Stability AI nor Licensee may use any name or mark owned by or associated with the other or any of its affiliates, except as required for reasonable and customary use in describing and redistributing the Software Products or Derivative Works.
44
+
45
+ b. Subject to Stability AI’s ownership of the Software Products and Derivative Works made by or for Stability AI, with respect to any Derivative Works that are made by you, as between you and Stability AI, you are and will be the owner of such Derivative Works
46
+
47
+ c. If you institute litigation or other proceedings against Stability AI (including a cross-claim or counterclaim in a lawsuit) alleging that the Software Products, Derivative Works or associated outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless Stability AI from and against any claim by any third party arising out of or related to your use or distribution of the Software Products or Derivative Works in violation of this Agreement.
48
+
49
+
50
+ 5. Term and Termination. The term of this Agreement will commence upon your acceptance of this Agreement or access to the Software Products and will continue in full force and effect until terminated in accordance with the terms and conditions herein. Stability AI may terminate this Agreement if you are in breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete and cease use of any Software Products or Derivative Works. Sections 2-4 shall survive the termination of this Agreement.
README.md ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ license_name: stable-audio-nc-community
4
+ license_link: LICENSE
5
+ extra_gated_prompt: >-
6
+ By clicking "Agree", you agree to the [License
7
+ Agreement](https://huggingface.co/stabilityai/stable-audio-open-1.0/blob/main/LICENSE)
8
+ and acknowledge Stability AI's [Privacy
9
+ Policy](https://stability.ai/privacy-policy).
10
+ extra_gated_fields:
11
+ Name: text
12
+ Email: text
13
+ Country: country
14
+ Organization or Affiliation: text
15
+ Receive email updates and promotions on Stability AI products, services, and research?:
16
+ type: select
17
+ options:
18
+ - 'Yes'
19
+ - 'No'
20
+ language:
21
+ - en
22
+ ---
23
+
24
+ # Stable Audio Open 1.0
25
+
26
+ ![Stable Audio Open logo](./stable_audio_light.png)
27
+
28
+ Please note: For commercial use, please refer to [https://stability.ai/membership](https://stability.ai/membership)
29
+
30
+ ## Model Description
31
+ `Stable Audio Open 1.0` generates variable-length (up to 47s) stereo audio at 44.1kHz from text prompts. It comprises three components: an autoencoder that compresses waveforms into a manageable sequence length, a T5-based text embedding for text conditioning, and a transformer-based diffusion (DiT) model that operates in the latent space of the autoencoder.
32
+
33
+ ## Usage
34
+ This model is made to be used with the [`stable-audio-tools`](https://github.com/Stability-AI/stable-audio-tools) library for inference, for example:
35
+
36
+ ```python
37
+ import torch
38
+ import torchaudio
39
+ from einops import rearrange
40
+ from stable_audio_tools import get_pretrained_model
41
+ from stable_audio_tools.inference.generation import generate_diffusion_cond
42
+
43
+ device = "cuda" if torch.cuda.is_available() else "cpu"
44
+
45
+ # Download model
46
+ model, model_config = get_pretrained_model("stabilityai/stable-audio-open-1.0")
47
+ sample_rate = model_config["sample_rate"]
48
+ sample_size = model_config["sample_size"]
49
+
50
+ model = model.to(device)
51
+
52
+ # Set up text and timing conditioning
53
+ conditioning = [{
54
+ "prompt": "128 BPM tech house drum loop",
55
+ "seconds_start": 0,
56
+ "seconds_total": 30
57
+ }]
58
+
59
+ # Generate stereo audio
60
+ output = generate_diffusion_cond(
61
+ model,
62
+ steps=100,
63
+ cfg_scale=7,
64
+ conditioning=conditioning,
65
+ sample_size=sample_size,
66
+ sigma_min=0.3,
67
+ sigma_max=500,
68
+ sampler_type="dpmpp-3m-sde",
69
+ device=device
70
+ )
71
+
72
+ # Rearrange audio batch to a single sequence
73
+ output = rearrange(output, "b d n -> d (b n)")
74
+
75
+ # Peak normalize, clip, convert to int16, and save to file
76
+ output = output.to(torch.float32).div(torch.max(torch.abs(output))).clamp(-1, 1).mul(32767).to(torch.int16).cpu()
77
+ torchaudio.save("output.wav", output, sample_rate)
78
+ ```
79
+
80
+
81
+ ## Model Details
82
+ * **Model type**: `Stable Audio Open 1.0` is a latent diffusion model based on a transformer architecture.
83
+ * **Language(s)**: English
84
+ * **License**: See the [LICENSE file](https://huggingface.co/stabilityai/stable-audio-open-1.0/blob/main/LICENSE).
85
+ * **Commercial License**: to use this model commercially, please refer to [https://stability.ai/membership](https://stability.ai/membership)
86
+
87
+
88
+ ## Training dataset
89
+
90
+ ### Datasets Used
91
+ Our dataset consists of 486492 audio recordings, where 472618 are from Freesound and 13874 are from the Free Music Archive (FMA). All audio files are licensed under CC0, CC BY, or CC Sampling+. This data is used to train our autoencoder and DiT. We use a publicly available pre-trained T5 model ([t5-base](https://huggingface.co/google-t5/t5-base)) for text conditioning.
92
+
93
+ ### Attribution
94
+ Attribution for all audio recordings used to train Stable Audio Open 1.0 can be found in this repository.
95
+ - FreeSound attribution [[csv](https://huggingface.co/stabilityai/stable-audio-open-1.0/blob/main/freesound_dataset_attribution2.csv)]
96
+ - FMA attribution [[csv](https://huggingface.co/stabilityai/stable-audio-open-1.0/blob/main/fma_dataset_attribution2.csv)]
97
+
98
+ ### Mitigations
99
+ We conducted an in-depth analysis to ensure no unauthorized copyrighted music was present in our training data before we began training.
100
+
101
+ To that end, we first identified music samples in FreeSound using the [PANNs](https://github.com/qiuqiangkong/audioset_tagging_cnn) music classifier based on AudioSet classes. The identified music samples had at least 30 seconds of music that was predicted to belong to a music-related class with a threshold of 0.15 (PANNs output probabilities range from 0 to 1). This threshold was determined by classifying known music examples from FMA and ensuring no false negatives were present.
102
+
103
+ The identified music samples were sent to Audible Magic’s identification services, a trusted content detection company, to ensure the absence of copyrighted music. Audible Magic flagged suspected copyrighted music, which we subsequently removed before training on the dataset. The majority of the removed content was field recordings in which copyrighted music was playing in the background. Following this procedure, we were left with 266324 CC0, 194840 CC-BY, and 11454 CC Sampling+ audio recordings.
104
+
105
+ We also conducted an in-depth analysis to ensure no copyrighted content was present in FMA's subset. In this case, the procedure was slightly different because the FMA subset consists of music signals. We did a metadata search against a large database of copyrighted music (https://www.kaggle.com/datasets/maharshipandya/-spotify-tracks-dataset) and flagged any potential match. The flagged content was reviewed individually by humans. After this process, we ended up with 8967 CC-BY and 4907 CC0 tracks.
106
+
107
+
108
+ ## Use and Limitations
109
+
110
+
111
+ ### Intended Use
112
+ The primary use of Stable Audio Open is research and experimentation on AI-based music and audio generation, including:
113
+
114
+ - Research efforts to better understand the limitations of generative models and further improve the state of science.
115
+ - Generation of music and audio guided by text to explore current abilities of generative AI models by machine learning practitioners and artists.
116
+
117
+
118
+ ### Out-of-Scope Use Cases
119
+ The model should not be used on downstream applications without further risk evaluation and mitigation. The model should not be used to intentionally create or disseminate audio or music pieces that create hostile or alienating environments for people.
120
+
121
+
122
+ ### Limitations
123
+ - The model is not able to generate realistic vocals.
124
+ - The model has been trained with English descriptions and will not perform as well in other languages.
125
+ - The model does not perform equally well for all music styles and cultures.
126
+ - The model is better at generating sound effects and field recordings than music.
127
+ - It is sometimes difficult to assess what types of text descriptions provide the best generations. Prompt engineering may be required to obtain satisfying results.
128
+
129
+
130
+ ### Biases
131
+ The source of data is potentially lacking diversity and all cultures are not equally represented in the dataset. The model may not perform equally well on the wide variety of music genres and sound effects that exist. The generated samples from the model will reflect the biases from the training data.
fma_dataset_attribution2.csv ADDED
The diff for this file is too large to render. See raw diff
 
freesound_dataset_attribution2.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8819b131646d3e48aa9e4571f7484c7a269bb9b639f2ce78a86e2cdefa5bb8eb
3
+ size 50433223
model.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6049ae92ec8362804cb4cb8a2845be93071439da2daff9997c285f8119d7ea40
3
+ size 4853997287
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b20458a071231aaf32613b6fbc7945f28f34dbba4f295bb49bad56f5f66b57e
3
+ size 4853889016
model_config.json ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "diffusion_cond",
3
+ "sample_size": 2097152,
4
+ "sample_rate": 44100,
5
+ "audio_channels": 2,
6
+ "model": {
7
+ "pretransform": {
8
+ "type": "autoencoder",
9
+ "iterate_batch": true,
10
+ "config": {
11
+ "encoder": {
12
+ "type": "oobleck",
13
+ "requires_grad": false,
14
+ "config": {
15
+ "in_channels": 2,
16
+ "channels": 128,
17
+ "c_mults": [1, 2, 4, 8, 16],
18
+ "strides": [2, 4, 4, 8, 8],
19
+ "latent_dim": 128,
20
+ "use_snake": true
21
+ }
22
+ },
23
+ "decoder": {
24
+ "type": "oobleck",
25
+ "config": {
26
+ "out_channels": 2,
27
+ "channels": 128,
28
+ "c_mults": [1, 2, 4, 8, 16],
29
+ "strides": [2, 4, 4, 8, 8],
30
+ "latent_dim": 64,
31
+ "use_snake": true,
32
+ "final_tanh": false
33
+ }
34
+ },
35
+ "bottleneck": {
36
+ "type": "vae"
37
+ },
38
+ "latent_dim": 64,
39
+ "downsampling_ratio": 2048,
40
+ "io_channels": 2
41
+ }
42
+ },
43
+ "conditioning": {
44
+ "configs": [
45
+ {
46
+ "id": "prompt",
47
+ "type": "t5",
48
+ "config": {
49
+ "t5_model_name": "t5-base",
50
+ "max_length": 128
51
+ }
52
+ },
53
+ {
54
+ "id": "seconds_start",
55
+ "type": "number",
56
+ "config": {
57
+ "min_val": 0,
58
+ "max_val": 512
59
+ }
60
+ },
61
+ {
62
+ "id": "seconds_total",
63
+ "type": "number",
64
+ "config": {
65
+ "min_val": 0,
66
+ "max_val": 512
67
+ }
68
+ }
69
+ ],
70
+ "cond_dim": 768
71
+ },
72
+ "diffusion": {
73
+ "cross_attention_cond_ids": ["prompt", "seconds_start", "seconds_total"],
74
+ "global_cond_ids": ["seconds_start", "seconds_total"],
75
+ "type": "dit",
76
+ "config": {
77
+ "io_channels": 64,
78
+ "embed_dim": 1536,
79
+ "depth": 24,
80
+ "num_heads": 24,
81
+ "cond_token_dim": 768,
82
+ "global_cond_dim": 1536,
83
+ "project_cond_tokens": false,
84
+ "transformer_type": "continuous_transformer"
85
+ }
86
+ },
87
+ "io_channels": 64
88
+ },
89
+ "training": {
90
+ "use_ema": true,
91
+ "log_loss_info": false,
92
+ "optimizer_configs": {
93
+ "diffusion": {
94
+ "optimizer": {
95
+ "type": "AdamW",
96
+ "config": {
97
+ "lr": 5e-5,
98
+ "betas": [0.9, 0.999],
99
+ "weight_decay": 1e-3
100
+ }
101
+ },
102
+ "scheduler": {
103
+ "type": "InverseLR",
104
+ "config": {
105
+ "inv_gamma": 1000000,
106
+ "power": 0.5,
107
+ "warmup": 0.99
108
+ }
109
+ }
110
+ }
111
+ },
112
+ "demo": {
113
+ "demo_every": 2000,
114
+ "demo_steps": 250,
115
+ "num_demos": 4,
116
+ "demo_cond": [
117
+ {"prompt": "Amen break 174 BPM", "seconds_start": 0, "seconds_total": 12},
118
+ {"prompt": "A beautiful orchestral symphony, classical music", "seconds_start": 0, "seconds_total": 160},
119
+ {"prompt": "Chill hip-hop beat, chillhop", "seconds_start": 0, "seconds_total": 190},
120
+ {"prompt": "A pop song about love and loss", "seconds_start": 0, "seconds_total": 180}
121
+ ],
122
+ "demo_cfg_scales": [3, 6, 9]
123
+ }
124
+ }
125
+ }
stable_audio_light.png ADDED