Fabio Grasso commited on
Commit
b1fdcc2
1 Parent(s): 1f7facb

init moseca

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. Dockerfile +20 -9
  2. README.md +191 -18
  3. app.py +0 -3
  4. app/_fastapi_server.py +20 -0
  5. app/examples.py +0 -58
  6. app/footer.py +118 -0
  7. app/header.py +68 -0
  8. app/helpers.py +153 -12
  9. app/main.py +0 -155
  10. app/pages/About.py +154 -0
  11. app/pages/Karaoke.py +176 -0
  12. app/pages/Separate.py +203 -0
  13. {lib → app/service}/__init__.py +0 -0
  14. app/{demucs_runner.py → service/demucs_runner.py} +26 -8
  15. app/service/vocal_remover/__init__.py +0 -0
  16. app/service/vocal_remover/layers.py +126 -0
  17. app/service/vocal_remover/nets.py +125 -0
  18. app/service/vocal_remover/runner.py +234 -0
  19. app/service/youtube.py +72 -0
  20. app/sidebar.py +0 -12
  21. app/style.py +131 -0
  22. img/bmc-button.png +0 -0
  23. img/image_stems.png +0 -0
  24. img/karaoke_fun.png +0 -0
  25. img/logo_moseca.png +0 -0
  26. img/state-of-art.png +0 -0
  27. lib/st_audiorec/.DS_Store +0 -0
  28. lib/st_audiorec/__init__.py +0 -1
  29. lib/st_audiorec/frontend/.DS_Store +0 -0
  30. lib/st_audiorec/frontend/.env +0 -6
  31. lib/st_audiorec/frontend/.prettierrc +0 -5
  32. lib/st_audiorec/frontend/build/.DS_Store +0 -0
  33. lib/st_audiorec/frontend/build/asset-manifest.json +0 -22
  34. lib/st_audiorec/frontend/build/bootstrap.min.css +0 -0
  35. lib/st_audiorec/frontend/build/index.html +0 -1
  36. lib/st_audiorec/frontend/build/precache-manifest.4829c060d313d0b0d13d9af3b0180289.js +0 -26
  37. lib/st_audiorec/frontend/build/service-worker.js +0 -39
  38. lib/st_audiorec/frontend/build/static/.DS_Store +0 -0
  39. lib/st_audiorec/frontend/build/static/css/2.bfbf028b.chunk.css +0 -2
  40. lib/st_audiorec/frontend/build/static/css/2.bfbf028b.chunk.css.map +0 -1
  41. lib/st_audiorec/frontend/build/static/js/2.270b84d8.chunk.js +0 -0
  42. lib/st_audiorec/frontend/build/static/js/2.270b84d8.chunk.js.LICENSE.txt +0 -58
  43. lib/st_audiorec/frontend/build/static/js/2.270b84d8.chunk.js.map +0 -0
  44. lib/st_audiorec/frontend/build/static/js/main.833ba252.chunk.js +0 -2
  45. lib/st_audiorec/frontend/build/static/js/main.833ba252.chunk.js.map +0 -1
  46. lib/st_audiorec/frontend/build/static/js/runtime-main.11ec9aca.js +0 -2
  47. lib/st_audiorec/frontend/build/static/js/runtime-main.11ec9aca.js.map +0 -1
  48. lib/st_audiorec/frontend/build/styles.css +0 -59
  49. lib/st_audiorec/frontend/package-lock.json +0 -0
  50. lib/st_audiorec/frontend/package.json +0 -44
Dockerfile CHANGED
@@ -1,23 +1,34 @@
1
- FROM python:3.9
2
 
 
3
 
4
- RUN pip install --user --upgrade pip
5
 
6
- RUN apt-get update && apt-get install -y ffmpeg
 
 
7
 
8
- COPY . .
9
 
 
10
  RUN pip install --no-cache-dir -r requirements.txt
11
 
12
- RUN mkdir -p /tmq
13
- RUN chmod 777 /tmp
14
- RUN mkdir -p /.cache
15
- RUN chmod 777 /.cache
 
 
 
 
16
 
17
  ENV PYTHONPATH "${PYTHONPATH}:/app"
18
 
 
 
19
  EXPOSE 7860
20
 
21
  HEALTHCHECK CMD curl --fail http://localhost:7860/_stcore/health
22
 
23
- ENTRYPOINT ["streamlit", "run", "app/main.py", "--server.port=7860", "--server.address=0.0.0.0"]
 
 
 
1
+ # syntax=docker/dockerfile:1
2
 
3
+ FROM python:3.8
4
 
 
5
 
6
+ RUN apt-get update && \
7
+ apt-get install -y ffmpeg jq curl && \
8
+ pip install --upgrade pip
9
 
10
+ WORKDIR /app
11
 
12
+ COPY requirements.txt .
13
  RUN pip install --no-cache-dir -r requirements.txt
14
 
15
+ COPY scripts/ .
16
+ COPY app ./app
17
+ copy img ./img
18
+
19
+ RUN wget --progress=bar:force:noscroll https://huggingface.co/fabiogra/baseline_vocal_remover/resolve/main/baseline.pth
20
+
21
+ RUN mkdir -p /tmp/ /tmp/vocal_remover /.cache /.config && \
22
+ chmod 777 /tmp /tmp/vocal_remover /.cache /.config
23
 
24
  ENV PYTHONPATH "${PYTHONPATH}:/app"
25
 
26
+ RUN chmod +x prepare_samples.sh
27
+
28
  EXPOSE 7860
29
 
30
  HEALTHCHECK CMD curl --fail http://localhost:7860/_stcore/health
31
 
32
+ RUN ["./prepare_samples.sh"]
33
+
34
+ ENTRYPOINT ["streamlit", "run", "app/header.py", "--server.port=7860", "--server.address=0.0.0.0"]
README.md CHANGED
@@ -1,37 +1,210 @@
1
  ---
2
- title: Music Splitter
3
- emoji: 🎶
4
- colorFrom: indigo
5
- colorTo: yellow
6
  sdk: docker
 
 
 
7
  pinned: true
8
  ---
9
 
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
11
 
12
- # Music Source Splitter 🎶
13
- <a href="https://huggingface.co/spaces/fabiogra/st-music-splitter"><img src="https://img.shields.io/badge/🤗%20Hugging%20Face-Spaces-blue" alt="Hugging Face Spaces"></a>
14
 
 
 
 
 
 
 
 
 
 
15
 
16
- This is a streamlit demo of the [Music Source Separation](https://huggingface.co/spaces/fabiogra/st-music-splitter).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
- The model can separate the vocals, drums, bass, and other from a music track.
19
 
20
- ## Usage
21
 
22
- You can use the demo [here](https://huggingface.co/spaces/fabiogra/st-music-splitter), or run it locally with:
23
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  ```bash
25
- streamlit run app.py
 
26
  ```
27
- > **Note**: In order to run the demo locally, you need to install the dependencies with `pip install -r requirements.txt`.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
 
30
- ## How it works
31
 
32
- The app uses a pretrained model called Hybrid Spectrogram and Waveform Source Separation from <a href="https://github.com/facebookresearch/demucs">facebook/htdemucs</a>.
 
 
33
 
 
 
34
 
35
- ## Acknowledgements
36
- - HtDemucs model from <a href="https://github.com/facebookresearch/demucs">facebook/htdemucs</a>
37
- - Streamlit Audio Recorder from <a href="https://github.com/stefanrmmr/streamlit_audio_recorder">stefanrmmr/streamlit_audio_recorder</a>
 
 
 
1
  ---
2
+ title: Test Moseca
3
+ emoji: 🎤🎸🥁🎹
4
+ colorFrom: yellow
5
+ colorTo: purple
6
  sdk: docker
7
+ app_port: 7860
8
+ models: ["https://huggingface.co/fabiogra/baseline_vocal_remover"]
9
+ tags: ["audio", "music", "vocal-removal", "karaoke", "music-separation", "music-source-separation"]
10
  pinned: true
11
  ---
12
 
13
+ <p align="center">
14
+ <img src="img/logo_moseca.png" alt="logo" width="70" />
15
+ </p>
16
+ <h2 align="center">Moseca</h1>
17
+ <p align="center">Music Source Separation & Karaoke</p>
18
 
 
 
19
 
20
+ </a>
21
+ <a href="https://huggingface.co/spaces/fabiogra/moseca?duplicate=true">
22
+ <img src="https://img.shields.io/badge/🤗%20Hugging%20Face-Spaces-blue"
23
+ alt="Hugging Face Spaces"></a>
24
+ <a href="https://huggingface.co/spaces/fabiogra/moseca/discussions?docker=true">
25
+ <img src="https://img.shields.io/badge/-Docker%20Image-blue?logo=docker&labelColor=white"
26
+ alt="Docker"></a><a href="https://www.buymeacoffee.com/fabiogra">
27
+ <img src="https://img.shields.io/badge/Buy%20me%20a%20coffee--yellow.svg?logo=buy-me-a-coffee&logoColor=orange&style=social"
28
+ alt="Buy me a coffee"></a>
29
 
30
+ ---
31
+
32
+ - [Setup](#setup)
33
+ - [About](#about)
34
+ - [High-Quality Stem Separation](#high-quality-stem-separation)
35
+ - [Advanced AI Algorithms](#advanced-ai-algorithms)
36
+ - [Karaoke Fun](#karaoke-fun)
37
+ - [Easy Deployment](#easy-deployment)
38
+ - [Open-Source and Free](#open-source-and-free)
39
+ - [Support](#support)
40
+ - [FAQs](#faqs)
41
+ - [What is Moseca?](#what-is-moseca)
42
+ - [Are there any limitations?](#are-there-any-limitations)
43
+ - [How does Moseca work?](#how-does-moseca-work)
44
+ - [How do I use Moseca?](#how-do-i-use-moseca)
45
+ - [Where can I find the code for Moseca?](#where-can-i-find-the-code-for-moseca)
46
+ - [How can I get in touch with you?](#how-can-i-get-in-touch-with-you)
47
+ - [Disclaimer](#disclaimer)
48
 
 
49
 
50
+ ---
51
 
 
52
 
53
+ ## Setup
54
+ ### Local environment
55
+ Create a new environment with Python 3.8 and install the requirements:
56
+ ```bash
57
+ pip install -r requirements.txt
58
+ ```
59
+ then run the app with:
60
+ ```bash
61
+ streamlit run app/header.py
62
+ ```
63
+ ### Docker
64
+ You can also run the app with Docker:
65
  ```bash
66
+ docker build -t moseca .
67
+ docker run -it --rm -p 7860:7860 $(DOCKER_IMAGE_NAME)
68
  ```
69
+ or pull the image from Hugging Face Spaces:
70
+ ```bash
71
+ docker run -it -p 7860:7860 --platform=linux/amd64 \
72
+ registry.hf.space/fabiogra-moseca:latest
73
+ ```
74
+
75
+ You can set the following environment variables to limit the resources used by the app:
76
+ - ENV_LIMITATION=true
77
+ - LIMIT_CPU=true
78
+ ---
79
+ ## About
80
+
81
+ Welcome to Moseca, your personal web application designed to redefine your music experience.
82
+ Whether you're a musician looking to remix your favorite songs, a karaoke
83
+ enthusiast, or a music lover wanting to dive deeper into your favorite tracks,
84
+ Moseca is for you.
85
+
86
+ <br>
87
+
88
+ ### High-Quality Stem Separation
89
+
90
+ <img title="High-Quality Stem Separation" src="https://i.imgur.com/l7H8YWL.png" width="250" ></img>
91
+
92
+
93
+ <br>
94
+
95
+ Separate up to 6 stems including 🗣voice, 🥁drums, 🔉bass, 🎸guitar,
96
+ 🎹piano (beta), and 🎶 others.
97
+
98
+ <br>
99
+
100
+ ### Advanced AI Algorithms
101
+
102
+ <img title="Advanced AI Algorithms" src="https://i.imgur.com/I8Pvdav.png" width="250" ></img>
103
+
104
+ <br>
105
+
106
+ Moseca utilizes state-of-the-art AI technology to extract voice or music from
107
+ your original songs accurately.
108
+
109
+ <br>
110
+
111
+ ### Karaoke Fun
112
+
113
+ <img title="Karaoke Fun" src="https://i.imgur.com/nsn3JGV.png" width="250" ></img>
114
+
115
+ <br>
116
+
117
+ Engage with your favorite tunes in a whole new way!
118
+
119
+ Moseca offers an immersive online karaoke experience, allowing you to search
120
+ for any song on YouTube and remove the vocals online.
121
+
122
+ Enjoy singing along with high-quality instrumentals at the comfort of your home.
123
+
124
+
125
+ <br>
126
+
127
+ ### Easy Deployment
128
+
129
+
130
+ With Moseca, you can deploy your personal Moseca app in the
131
+ <a href="https://huggingface.co/spaces/fabiogra/moseca?duplicate=true">
132
+ <img src="https://img.shields.io/badge/🤗%20Hugging%20Face-Spaces-blue"
133
+ alt="Hugging Face Spaces"></a> or locally with
134
+ [![Docker Call](https://img.shields.io/badge/-Docker%20Image-blue?logo=docker&labelColor=white)](https://huggingface.co/spaces/fabiogra/moseca/discussions?docker=true)
135
+ in just one click.
136
+
137
+ <br>
138
+
139
+ ### Open-Source and Free
140
+
141
+ Moseca is the free and open-source alternative to lalal.ai, splitter.ai or media.io vocal remover.
142
+
143
+ You can modify, distribute, and use it free of charge. I believe in the power of community
144
+ collaboration and encourage users to contribute to our source code, making Moseca better with
145
+ each update.
146
+
147
+
148
+ <br>
149
+
150
+ ### Support
151
+
152
+ - Show your support by giving a star to the GitHub repository [![GitHub stars](https://img.shields.io/github/stars/fabiogra/moseca.svg?style=social&label=Star&maxAge=2592000)](https://github.com/fabiogra/moseca).
153
+ - If you have found an issue or have a suggestion to improve Moseca, you can open an [![GitHub issues](https://img.shields.io/github/issues/fabiogra/moseca.svg)](https://github.com/fabiogra/moseca/issues/new)
154
+ - Enjoy Moseca? [![Buymeacoffee](https://img.shields.io/badge/Buy%20me%20a%20coffee--yellow.svg?logo=buy-me-a-coffee&logoColor=orange&style=social)](https://www.buymeacoffee.com/fabiogra)
155
+
156
+ ------
157
+
158
+ ## FAQs
159
+
160
+ ### What is Moseca?
161
+
162
+ Moseca is an open-source web app that utilizes advanced AI technology to separate vocals and
163
+ instrumentals from music tracks. It also provides an online karaoke experience by allowing you
164
+ to search for any song on YouTube and remove the vocals.
165
+
166
+ ### Are there any limitations?
167
+ Yes, in this environment there are some limitations regarding lenght processing
168
+ and CPU usage to allow a smooth experience for all users.
169
+
170
+ <b>If you want to <u>remove these limitations</u> you can deploy a Moseca app in your personal
171
+ environment like in the <a href="https://huggingface.co/spaces/fabiogra/moseca?duplicate=true"><img src="https://img.shields.io/badge/🤗%20Hugging%20Face-Spaces-blue" alt="Hugging Face Spaces"></a> or locally with [![Docker Call](https://img.shields.io/badge/-Docker%20Image-blue?logo=docker&labelColor=white)](https://huggingface.co/spaces/fabiogra/moseca/discussions?docker=true)</b>
172
+
173
+ ### How does Moseca work?
174
+ Moseca utilizes the Hybrid Spectrogram and Waveform Source Separation ([DEMUCS](https://github.com/facebookresearch/demucs)) model from Facebook. For fast karaoke vocal removal, Moseca uses the AI vocal remover developed by [tsurumeso](https://github.com/tsurumeso/vocal-remover).
175
+
176
+ ### How do I use Moseca?
177
+ 1. Upload your file: choose your song and upload it to Moseca. It supports
178
+ a wide range of music formats for your convenience.
179
+
180
+ 2. Choose separation mode: opt for voice only, 4-stem or 6-stem separation
181
+ depending on your requirement.
182
+
183
+ 3. Let AI do its magic: Moseca’s advanced AI will work to separate vocals
184
+ from music in a matter of minutes, giving you high-quality, separated audio tracks.
185
+
186
+ 4. Download and enjoy: preview and download your separated audio tracks.
187
+ Now you can enjoy them anytime, anywhere!
188
+
189
+
190
+ ### Where can I find the code for Moseca?
191
+
192
+ The code for Moseca is readily available on
193
+ [GitHub](https://github.com/fabiogra/moseca) and
194
+ [Hugging Face](https://huggingface.co/spaces/fabiogra/moseca).
195
 
196
 
197
+ ### How can I get in touch with you?
198
 
199
+ For any questions or feedback, feel free to contact me on
200
+ [![Twitter](https://badgen.net/badge/icon/twitter?icon=twitter&label)](https://twitter.com/grsFabio)
201
+ or [LinkedIn](https://www.linkedin.com/in/fabio-grasso/en).
202
 
203
+ ------
204
+ ## Disclaimer
205
 
206
+ Moseca is designed to separate vocals and instruments from copyrighted music for
207
+ legally permissible purposes, such as learning, practicing, research, or other non-commercial
208
+ activities that fall within the scope of fair use or exceptions to copyright. As a user, you are
209
+ responsible for ensuring that your use of separated audio tracks complies with the legal
210
+ requirements in your jurisdiction.
app.py DELETED
@@ -1,3 +0,0 @@
1
- from app import main
2
-
3
- main.run()
 
 
 
 
app/_fastapi_server.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
+ from fastapi.responses import FileResponse
3
+ from urllib.parse import unquote
4
+
5
+ import os
6
+
7
+ app = FastAPI()
8
+
9
+
10
+ @app.get("/streaming/{path:path}")
11
+ async def serve_streaming(path: str):
12
+ # Decode URL-encoded characters
13
+ decoded_path = unquote(path)
14
+ return FileResponse(decoded_path, filename=os.path.basename(decoded_path))
15
+
16
+
17
+ if __name__ == "__main__":
18
+ import uvicorn
19
+
20
+ uvicorn.run(app, host="127.0.0.1", port=8000)
app/examples.py DELETED
@@ -1,58 +0,0 @@
1
- import streamlit as st
2
-
3
- from app.helpers import load_audio_segment, plot_audio
4
-
5
- def _load_example(name: str):
6
- st.markdown("<center><h3> Original </h3></center>", unsafe_allow_html=True)
7
-
8
- cols = st.columns(2)
9
- with cols[0]:
10
- auseg = load_audio_segment(f"samples/{name}", "mp3")
11
- plot_audio(auseg, step=50)
12
- with cols[1]:
13
- audio_file = open(f"samples/{name}", "rb")
14
- audio_bytes = audio_file.read()
15
- st.audio(audio_bytes)
16
-
17
- for file in ["vocals.mp3", "drums.mp3", "bass.mp3", "other.mp3"]:
18
- st.markdown("<br>", unsafe_allow_html=True)
19
- label = file.split(".")[0].capitalize()
20
- label = {
21
- "Drums": "🥁",
22
- "Bass": "🎸",
23
- "Other": "🎹",
24
- "Vocals": "🎤",
25
- }.get(label) + " " + label
26
- st.markdown("<center><h3>" + label + "</h3></center>", unsafe_allow_html=True)
27
-
28
- cols = st.columns(2)
29
- with cols[0]:
30
- auseg = load_audio_segment(f"samples/{name.split('.mp3')[0]}/{file}", "mp3")
31
- plot_audio(auseg, step=50)
32
- with cols[1]:
33
- audio_file = open(f"samples/{name.split('.mp3')[0]}/{file}", "rb")
34
- audio_bytes = audio_file.read()
35
- st.audio(audio_bytes)
36
-
37
-
38
- def show_examples():
39
- with st.columns([2, 8, 1])[1]:
40
- selection = st.selectbox("Select an example music to quickly see results", ["Something About You - Marilyn Ford", "Broke Me - FASSounds", "Indie Rock"])
41
- if selection == "Broke Me - FASSounds":
42
- _load_example("broke-me-fassounds.mp3")
43
- link = "https://pixabay.com/users/fassounds-3433550/"
44
- st.markdown(
45
- f"""Music by <a href="{link}">FASSounds</a> from <a href="{link}">Pixabay</a>""",
46
- unsafe_allow_html=True)
47
- elif selection == "Indie Rock":
48
- _load_example("indie-rock.mp3")
49
- link = "https://pixabay.com/music/indie-rock-112771/"
50
- st.markdown(
51
- f"""Music by <a href="{link}">Music_Unlimited</a> from <a href="{link}">Pixabay</a>""",
52
- unsafe_allow_html=True)
53
- elif selection == "Something About You - Marilyn Ford":
54
- _load_example("something-about-you-marilyn-ford.mp3")
55
- link = "https://pixabay.com/music/rnb-something-about-you-marilyn-ford-135781/"
56
- st.markdown(
57
- f"""Music by <a href="{link}">Marilyn Ford</a> from <a href="{link}">Pixabay</a>""",
58
- unsafe_allow_html=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app/footer.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+ from streamlit.components.v1 import html
4
+ from htbuilder import HtmlElement, div, a, p, img, styles
5
+ from htbuilder.units import percent, px
6
+
7
+
8
+ def image(src_as_string, **style):
9
+ return img(src=src_as_string, style=styles(**style))
10
+
11
+
12
+ def link(link, text, **style):
13
+ return a(_href=link, _target="_blank", style=styles(**style))(text)
14
+
15
+
16
+ def layout(*args):
17
+ style = """
18
+ <style>
19
+ footer {visibility: hidden;}
20
+ .stApp { bottom: 50px; }
21
+ </style>
22
+ """
23
+
24
+ style_div = styles(
25
+ position="fixed",
26
+ left=0,
27
+ bottom=0,
28
+ margin=px(0, 0, 0, 0),
29
+ width=percent(100),
30
+ color="black",
31
+ text_align="center",
32
+ height="auto",
33
+ opacity=1,
34
+ align_items="center",
35
+ flex_direction="column",
36
+ display="flex",
37
+ )
38
+ body = p(
39
+ id="myFooter",
40
+ style=styles(
41
+ margin=px(0, 0, 0, 0),
42
+ padding=px(5),
43
+ font_size="0.8rem",
44
+ color="rgb(51,51,51)",
45
+ font_family="Exo",
46
+ ),
47
+ )
48
+ foot = div(style=style_div)(body)
49
+
50
+ st.markdown(style, unsafe_allow_html=True)
51
+
52
+ for arg in args:
53
+ if isinstance(arg, str):
54
+ body(arg)
55
+
56
+ elif isinstance(arg, HtmlElement):
57
+ body(arg)
58
+
59
+ st.markdown(str(foot), unsafe_allow_html=True)
60
+
61
+ js_code = """
62
+ <script>
63
+ function rgbReverse(rgb){
64
+ var r = rgb[0]*0.299;
65
+ var g = rgb[1]*0.587;
66
+ var b = rgb[2]*0.114;
67
+
68
+ if ((r + g + b)/255 > 0.5){
69
+ return "rgb(49, 51, 63)"
70
+ }else{
71
+ return "rgb(250, 250, 250)"
72
+ }
73
+
74
+ };
75
+ var stApp_css = window.parent.document.querySelector("#root > div:nth-child(1) > div > div > div");
76
+ window.onload = function () {
77
+ var mutationObserver = new MutationObserver(function(mutations) {
78
+ mutations.forEach(function(mutation) {
79
+ var bgColor = window.getComputedStyle(stApp_css).backgroundColor.replace("rgb(", "").replace(")", "").split(", ");
80
+ var fontColor = rgbReverse(bgColor);
81
+ var pTag = window.parent.document.getElementById("myFooter");
82
+ pTag.style.color = fontColor;
83
+ });
84
+ });
85
+
86
+ /**Element**/
87
+ mutationObserver.observe(stApp_css, {
88
+ attributes: true,
89
+ characterData: true,
90
+ childList: true,
91
+ subtree: true,
92
+ attributeOldValue: true,
93
+ characterDataOldValue: true
94
+ });
95
+ }
96
+
97
+
98
+ </script>
99
+ """
100
+ html(js_code)
101
+
102
+
103
+ def footer():
104
+ myargs = [
105
+ "Made in ",
106
+ link(
107
+ "https://streamlit.io/",
108
+ image("https://streamlit.io/images/brand/streamlit-mark-color.png", width="20px"),
109
+ ),
110
+ " with ❤️ by ",
111
+ link("https://twitter.com/grsFabio", "@grsFabio"),
112
+ "&nbsp;&nbsp;&nbsp;",
113
+ link(
114
+ "https://www.buymeacoffee.com/fabiogra",
115
+ image("https://i.imgur.com/YFu6MMA.png", margin="0em", align="top", width="130px"),
116
+ ),
117
+ ]
118
+ layout(*myargs)
app/header.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+ from helpers import switch_page
4
+ from style import CSS
5
+ import logging
6
+
7
+ from streamlit_option_menu import option_menu
8
+
9
+ logging.basicConfig(
10
+ format="%(asctime)s %(levelname)-8s %(message)s",
11
+ level=logging.INFO,
12
+ datefmt="%Y-%m-%d %H:%M:%S",
13
+ )
14
+
15
+
16
+ def header(logo_and_title=True):
17
+ if "first_run" not in st.session_state:
18
+ st.session_state.first_run = True
19
+ for key in [
20
+ "search_results",
21
+ "selected_value",
22
+ "filename",
23
+ "executed",
24
+ "play_karaoke",
25
+ "url",
26
+ "random_song",
27
+ "last_dir",
28
+ ]:
29
+ st.session_state[key] = None
30
+ st.session_state.video_options = []
31
+ st.session_state.page = "Karaoke"
32
+ switch_page(st.session_state.page)
33
+
34
+ st.set_page_config(
35
+ page_title="Moseca - Music Separation and Karaoke - Free and Open Source alternative to lalal.ai, splitter.ai or media.io vocal remover.",
36
+ page_icon="img/logo_moseca.png",
37
+ layout="wide",
38
+ initial_sidebar_state="collapsed",
39
+ )
40
+ st.markdown(CSS, unsafe_allow_html=True)
41
+
42
+ options = ["Karaoke", "Separate", "About"]
43
+ page = option_menu(
44
+ menu_title=None,
45
+ options=options,
46
+ # bootrap icons
47
+ icons=["play-btn-fill", "file-earmark-music", "info-circle"],
48
+ default_index=options.index(st.session_state.page),
49
+ orientation="horizontal",
50
+ styles={"nav-link": {"padding-left": "1.5rem", "padding-right": "1.5rem"}},
51
+ key="",
52
+ )
53
+ if page != st.session_state.page:
54
+ switch_page(page)
55
+
56
+ if logo_and_title:
57
+ head = st.columns([5, 1, 3, 5])
58
+ with head[1]:
59
+ st.image("img/logo_moseca.png", use_column_width=False, width=80)
60
+ with head[2]:
61
+ st.markdown(
62
+ "<h1>moseca</h1><p><b>Music Source Separation & Karaoke</b></p>",
63
+ unsafe_allow_html=True,
64
+ )
65
+
66
+
67
+ if __name__ == "__main__":
68
+ header()
app/helpers.py CHANGED
@@ -1,19 +1,160 @@
1
- from pydub import AudioSegment
 
 
2
 
3
- import streamlit as st
4
- import plotly.graph_objs as go
5
- import plotly.express as px
6
- import pandas as pd
7
  import numpy as np
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
- @st.cache_data
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  def load_audio_segment(path: str, format: str) -> AudioSegment:
11
  return AudioSegment.from_file(path, format=format)
12
 
13
- def plot_audio(_audio_segment: AudioSegment, title: str = None, step = 20) -> go.Figure:
 
 
14
  samples = _audio_segment.get_array_of_samples()
15
- arr = np.array(samples[::step])
16
- df = pd.DataFrame(arr)
17
- fig = px.line(df, y=0, render_mode="webgl", line_shape="linear", width=1000, height=60, title=title)
18
- fig.update_layout(xaxis_fixedrange=True, yaxis_fixedrange=True, yaxis_visible=False, xaxis_visible=False, hovermode=False, margin=dict(l=0, r=0, t=0, b=0))
19
- st.plotly_chart(fig, use_container_width=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ from io import BytesIO
3
+ import json
4
 
5
+ import matplotlib.pyplot as plt
 
 
 
6
  import numpy as np
7
+ import requests
8
+ import streamlit as st
9
+ from PIL import Image
10
+ from pydub import AudioSegment
11
+ from base64 import b64encode
12
+ from pathlib import Path
13
+ from streamlit.runtime.scriptrunner import RerunData, RerunException
14
+ from streamlit.source_util import get_pages
15
+ from streamlit_player import st_player
16
+
17
+ extensions = ["mp3", "wav", "ogg", "flac"] # we will look for all those file types.
18
+ example_songs = [1, 2, 3]
19
+
20
+
21
+ def img_to_bytes(img_path):
22
+ img_bytes = Path(img_path).read_bytes()
23
+ encoded = b64encode(img_bytes).decode()
24
+ return encoded
25
+
26
+
27
+ # @st.cache_data(show_spinner=False)
28
+ def img_to_html(img_path):
29
+ img_html = "<div style='display: flex; justify-content: center; align-items: center; height: 50vh;'><img src='data:image/png;base64,{}' class='img-fluid' style='max-width: 100%; max-height: 100%;' ></div>".format(
30
+ img_to_bytes(img_path)
31
+ )
32
+ return img_html
33
 
34
+
35
+ @st.cache_data(show_spinner=False)
36
+ def url_is_valid(url):
37
+ if url.startswith("http") is False:
38
+ st.error("URL should start with http or https.")
39
+ return False
40
+ elif url.split(".")[-1] not in extensions:
41
+ st.error("Extension not supported.")
42
+ return False
43
+ try:
44
+ r = requests.get(url)
45
+ r.raise_for_status()
46
+ return True
47
+ except Exception:
48
+ st.error("URL is not valid.")
49
+ return False
50
+
51
+
52
+ @st.cache_data(show_spinner=False)
53
  def load_audio_segment(path: str, format: str) -> AudioSegment:
54
  return AudioSegment.from_file(path, format=format)
55
 
56
+
57
+ @st.cache_data(show_spinner=False)
58
+ def plot_audio(_audio_segment: AudioSegment, *args, **kwargs) -> Image.Image:
59
  samples = _audio_segment.get_array_of_samples()
60
+ arr = np.array(samples)
61
+
62
+ fig, ax = plt.subplots(figsize=(10, 2))
63
+ ax.plot(arr, linewidth=0.05)
64
+ ax.set_axis_off()
65
+
66
+ # Set the background color to transparent
67
+ fig.patch.set_alpha(0)
68
+ ax.patch.set_alpha(0)
69
+
70
+ buf = BytesIO()
71
+ plt.savefig(buf, format="png", dpi=100, bbox_inches="tight")
72
+ buf.seek(0)
73
+ image = Image.open(buf)
74
+
75
+ plt.close(fig)
76
+ return image
77
+
78
+
79
+ def get_random_song():
80
+ sample_songs = json.load(open("sample_songs.json"))
81
+ name, url = random.choice(list(sample_songs.items()))
82
+ return name, url
83
+
84
+
85
+ def streamlit_player(
86
+ player,
87
+ url,
88
+ height,
89
+ is_active,
90
+ muted,
91
+ start,
92
+ key,
93
+ playback_rate=1,
94
+ events=None,
95
+ play_inline=False,
96
+ light=False,
97
+ ):
98
+ with player:
99
+ options = {
100
+ "progress_interval": 1000,
101
+ "playing": is_active, # st.checkbox("Playing", False),
102
+ "muted": muted,
103
+ "light": light,
104
+ "play_inline": play_inline,
105
+ "playback_rate": playback_rate,
106
+ "height": height,
107
+ "config": {"start": start},
108
+ "events": events,
109
+ }
110
+ if url != "":
111
+ events = st_player(url, **options, key=key)
112
+ return events
113
+
114
+
115
+ @st.cache_data(show_spinner=False)
116
+ def local_audio(path, mime="audio/mp3"):
117
+ data = b64encode(Path(path).read_bytes()).decode()
118
+ return [{"type": mime, "src": f"data:{mime};base64,{data}"}]
119
+
120
+
121
+ def _standardize_name(name: str) -> str:
122
+ return name.lower().replace("_", " ").strip()
123
+
124
+
125
+ @st.cache_data(show_spinner=False)
126
+ def switch_page(page_name: str):
127
+ st.session_state.page = page_name
128
+
129
+ page_name = _standardize_name(page_name)
130
+
131
+ pages = get_pages("header.py") # OR whatever your main page is called
132
+
133
+ for page_hash, config in pages.items():
134
+ if _standardize_name(config["page_name"]) == page_name:
135
+ raise RerunException(
136
+ RerunData(
137
+ page_script_hash=page_hash,
138
+ page_name=page_name,
139
+ )
140
+ )
141
+
142
+ page_names = [_standardize_name(config["page_name"]) for config in pages.values()]
143
+ raise ValueError(f"Could not find page {page_name}. Must be one of {page_names}")
144
+
145
+
146
+ def st_local_audio(pathname, key):
147
+ st_player(
148
+ local_audio(pathname),
149
+ **{
150
+ "progress_interval": 1000,
151
+ "playing": False,
152
+ "muted": False,
153
+ "light": False,
154
+ "play_inline": True,
155
+ "playback_rate": 1,
156
+ "height": 40,
157
+ "config": {"start": 0, "forceAudio": True, "forceHLS": True, "forceSafariHLS": True},
158
+ },
159
+ key=key,
160
+ )
app/main.py DELETED
@@ -1,155 +0,0 @@
1
- import logging
2
- import os
3
- from pathlib import Path
4
-
5
- import requests
6
- import streamlit as st
7
- from app.examples import show_examples
8
-
9
- from demucs_runner import separator
10
- from lib.st_custom_components import st_audiorec
11
- from helpers import load_audio_segment, plot_audio
12
- from sidebar import text as text_side
13
-
14
- logging.basicConfig(
15
- format="%(asctime)s %(levelname)-8s %(message)s",
16
- level=logging.DEBUG,
17
- datefmt="%Y-%m-%d %H:%M:%S",
18
- )
19
-
20
- max_duration = 10 # in seconds
21
-
22
- model = "htdemucs"
23
- extensions = ["mp3", "wav", "ogg", "flac"] # we will look for all those file types.
24
- two_stems = None # only separate one stems from the rest, for instance
25
-
26
- # Options for the output audio.
27
- mp3 = True
28
- mp3_rate = 320
29
- float32 = False # output as float 32 wavs, unsused if 'mp3' is True.
30
- int24 = False # output as int24 wavs, unused if 'mp3' is True.
31
- # You cannot set both `float32 = True` and `int24 = True` !!
32
-
33
-
34
- out_path = Path("/tmp")
35
- in_path = Path("/tmp")
36
-
37
-
38
- def url_is_valid(url):
39
- if url.startswith("http") is False:
40
- st.error("URL should start with http or https.")
41
- return False
42
- elif url.split(".")[-1] not in extensions:
43
- st.error("Extension not supported.")
44
- return False
45
- try:
46
- r = requests.get(url)
47
- r.raise_for_status()
48
- return True
49
- except Exception:
50
- st.error("URL is not valid.")
51
- return False
52
-
53
-
54
- def run():
55
- st.markdown("<h1><center>🎶 Music Source Splitter</center></h1>", unsafe_allow_html=True)
56
- st.markdown("<center><i>Hight Quality Audio Source Separation</i></center>", unsafe_allow_html=True)
57
- st.sidebar.markdown(text_side, unsafe_allow_html=True)
58
- st.markdown("""
59
- <style>
60
- .st-af {
61
- font-size: 1.5rem;
62
- align-items: center;
63
- padding-right: 2rem;
64
- }
65
-
66
- </style>
67
- """,
68
- unsafe_allow_html=True,
69
- )
70
- filename = None
71
- choice = st.radio(label=" ", options=["🔗 From URL", "⬆️ Upload File", "🎤 Record Audio"], horizontal=True)
72
- if choice == "🔗 From URL":
73
- url = st.text_input("Paste the URL of the audio file", key="url", help="Supported formats: mp3, wav, ogg, flac.")
74
- if url != "":
75
- # check if the url is valid
76
- if url_is_valid(url):
77
- with st.spinner("Downloading audio..."):
78
- filename = url.split("/")[-1]
79
- os.system(f"wget -O {in_path / filename} {url}")
80
-
81
- elif choice == "⬆️ Upload File":
82
- uploaded_file = st.file_uploader("Choose a file", type=extensions, key="file", help="Supported formats: mp3, wav, ogg, flac.")
83
- if uploaded_file is not None:
84
- with open(in_path / uploaded_file.name, "wb") as f:
85
- f.write(uploaded_file.getbuffer())
86
- filename = uploaded_file.name
87
- elif choice == "🎤 Record Audio":
88
- wav_audio_data = st_audiorec()
89
- if wav_audio_data is not None:
90
- if wav_audio_data != b'RIFF,\x00\x00\x00WAVEfmt \x10\x00\x00\x00\x01\x00\x02\x00\x80>\x00\x00\x00\xfa\x00\x00\x04\x00\x10\x00data\x00\x00\x00\x00':
91
- filename = "recording.wav"
92
- with open(in_path / filename, "wb") as f:
93
- f.write(wav_audio_data)
94
-
95
- if filename is not None:
96
- song = load_audio_segment(in_path / filename, filename.split(".")[-1])
97
-
98
- n_secs = round(len(song) / 1000)
99
- audio_file = open(in_path / filename, "rb")
100
- audio_bytes = audio_file.read()
101
- start_time = st.slider("Choose the start time", min_value=0, max_value=n_secs, step=1, value=0, help=f"Maximum duration is {max_duration} seconds.")
102
- _ = st.audio(audio_bytes, start_time=start_time)
103
- end_time = min(start_time + max_duration, n_secs)
104
- song = song[start_time*1000:end_time*1000]
105
- tot_time = end_time - start_time
106
- st.info(f"Audio source will be processed from {start_time} to {end_time} seconds.", icon="⏱")
107
- execute = st.button("Split Music 🎶", type="primary")
108
- if execute:
109
- song.export(in_path / filename, format=filename.split(".")[-1])
110
- with st.spinner(f"Splitting source audio, it will take almost {round(tot_time*3.6)} seconds..."):
111
- separator(
112
- tracks=[in_path / filename],
113
- out=out_path,
114
- model=model,
115
- device="cpu",
116
- shifts=1,
117
- overlap=0.5,
118
- stem=two_stems,
119
- int24=int24,
120
- float32=float32,
121
- clip_mode="rescale",
122
- mp3=mp3,
123
- mp3_bitrate=mp3_rate,
124
- jobs=os.cpu_count(),
125
- verbose=True,
126
- )
127
-
128
- last_dir = ".".join(filename.split(".")[:-1])
129
- for file in ["vocals.mp3", "drums.mp3", "bass.mp3", "other.mp3"]:
130
- file = out_path / Path(model) / last_dir / file
131
- st.markdown("<hr>", unsafe_allow_html=True)
132
- label = file.name.split(".")[0].replace("_", " ").capitalize()
133
- # add emoji to label
134
- label = {
135
- "Drums": "🥁",
136
- "Bass": "🎸",
137
- "Other": "🎹",
138
- "Vocals": "🎤",
139
- }.get(label) + " " + label
140
- st.markdown("<center><h3>" + label + "</h3></center>", unsafe_allow_html=True)
141
-
142
- cols = st.columns(2)
143
- with cols[0]:
144
- auseg = load_audio_segment(file, "mp3")
145
- plot_audio(auseg)
146
- with cols[1]:
147
- audio_file = open(file, "rb")
148
- audio_bytes = audio_file.read()
149
- st.audio(audio_bytes)
150
-
151
- if __name__ == "__main__":
152
- run()
153
- st.markdown("<br><br>", unsafe_allow_html=True)
154
- with st.expander("Show examples", expanded=False):
155
- show_examples()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app/pages/About.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+ from header import header
4
+ from footer import footer
5
+
6
+
7
+ def body():
8
+ with st.columns([2, 3, 2])[1]:
9
+ st.markdown(
10
+ """
11
+ <center>
12
+
13
+ ## Welcome to Moseca, your personal web application designed to redefine your music experience.
14
+ <font size="3"> Whether you're a musician looking to remix your favorite songs, a karaoke
15
+ enthusiast, or a music lover wanting to dive deeper into your favorite tracks,
16
+ Moseca is for you. </font>
17
+
18
+ <br>
19
+
20
+ ### High-Quality Stem Separation
21
+
22
+ <center><img title="High-Quality Stem Separation" src="https://i.imgur.com/l7H8YWL.png" width="60%" ></img></center>
23
+
24
+
25
+ <br>
26
+
27
+ <font size="3"> Separate up to 6 stems including 🗣voice, 🥁drums, 🔉bass, 🎸guitar,
28
+ 🎹piano (beta), and 🎶 others. </font>
29
+
30
+ <br>
31
+
32
+ ### Advanced AI Algorithms
33
+
34
+ <center><img title="Advanced AI Algorithms" src="https://i.imgur.com/I8Pvdav.png" width="60%" ></img></center>
35
+
36
+ <br>
37
+
38
+ <font size="3"> Moseca utilizes state-of-the-art AI technology to extract voice or music from
39
+ your original songs accurately. </font>
40
+
41
+ <br>
42
+
43
+ ### Karaoke Fun
44
+
45
+ <center><img title="Karaoke Fun" src="https://i.imgur.com/nsn3JGV.png" width="60%" ></img></center>
46
+
47
+ <br>
48
+
49
+ <font size="3"> Engage with your favorite tunes in a whole new way! </font>
50
+
51
+ <font size="3"> Moseca offers an immersive online karaoke experience, allowing you to search
52
+ for any song on YouTube and remove the vocals online. </font>
53
+
54
+ <font size="3"> Enjoy singing along with high-quality instrumentals at the comfort of your home.
55
+ </font>
56
+
57
+ <br>
58
+
59
+ ### Easy Deployment
60
+
61
+
62
+ <font size="3"> With Moseca, you can deploy your personal Moseca app in the
63
+ <a href="https://huggingface.co/spaces/fabiogra/moseca?duplicate=true">
64
+ <img src="https://img.shields.io/badge/🤗%20Hugging%20Face-Spaces-blue"
65
+ alt="Hugging Face Spaces"></a> or locally with </font>
66
+ [![Docker Call](https://img.shields.io/badge/-Docker%20Image-blue?logo=docker&labelColor=white)](https://huggingface.co/spaces/fabiogra/moseca/discussions?docker=true)
67
+ <font size="3"> in just one click. </font>
68
+
69
+ <br>
70
+
71
+ ### Open-Source and Free
72
+
73
+ <font size="3"> Moseca is the free and open-source alternative to lalal.ai, splitter.ai or media.io vocal remover.
74
+
75
+ You can modify, distribute, and use it free of charge. I believe in the power of community
76
+ collaboration and encourage users to contribute to our source code, making Moseca better with
77
+ each update.
78
+ </font>
79
+
80
+ <br>
81
+
82
+ ### Support
83
+
84
+ - <font size="3"> Show your support by giving a star to the GitHub repository</font> [![GitHub stars](https://img.shields.io/github/stars/fabiogra/moseca.svg?style=social&label=Star&maxAge=2592000)](https://github.com/fabiogra/moseca).
85
+ - <font size="3"> If you have found an issue or have a suggestion to improve Moseca, you can open an</font> [![GitHub issues](https://img.shields.io/github/issues/fabiogra/moseca.svg)](https://github.com/fabiogra/moseca/issues/new)
86
+ - <font size="3"> Enjoy Moseca?</font> [![Buymeacoffee](https://img.shields.io/badge/Buy%20me%20a%20coffee--yellow.svg?logo=buy-me-a-coffee&logoColor=orange&style=social)](https://www.buymeacoffee.com/fabiogra)
87
+
88
+ ------
89
+
90
+ ## FAQs
91
+
92
+ ### What is Moseca?
93
+
94
+ <font size="3"> Moseca is an open-source web app that utilizes advanced AI technology to separate vocals and
95
+ instrumentals from music tracks. It also provides an online karaoke experience by allowing you
96
+ to search for any song on YouTube and remove the vocals.</font>
97
+
98
+ ### Are there any limitations?
99
+ <font size="3">Yes, in this environment there are some limitations regarding lenght processing
100
+ and CPU usage to allow a smooth experience for all users.
101
+
102
+ <b>If you want to <u>remove these limitations</u> you can deploy a Moseca app in your personal
103
+ environment like in the <a href="https://huggingface.co/spaces/fabiogra/moseca?duplicate=true"><img src="https://img.shields.io/badge/🤗%20Hugging%20Face-Spaces-blue" alt="Hugging Face Spaces"></a> or locally with [![Docker Call](https://img.shields.io/badge/-Docker%20Image-blue?logo=docker&labelColor=white)](https://huggingface.co/spaces/fabiogra/moseca/discussions?docker=true)</b>
104
+ </font>
105
+ ### How does Moseca work?
106
+ <font size="3"> Moseca utilizes the Hybrid Spectrogram and Waveform Source Separation ([DEMUCS](https://github.com/facebookresearch/demucs)) model from Facebook. For fast karaoke vocal removal, Moseca uses the AI vocal remover developed by [tsurumeso](https://github.com/tsurumeso/vocal-remover).
107
+ </font>
108
+ ### How do I use Moseca?
109
+ <font size="3">1. Upload your file: choose your song and upload it to Moseca. It supports
110
+ a wide range of music formats for your convenience.</font>
111
+
112
+ <font size="3">2. Choose separation mode: opt for voice only, 4-stem or 6-stem separation
113
+ depending on your requirement.</font>
114
+
115
+ <font size="3">3. Let AI do its magic: Moseca’s advanced AI will work to separate vocals
116
+ from music in a matter of minutes, giving you high-quality, separated audio tracks.</font>
117
+
118
+ <font size="3">4. Download and enjoy: preview and download your separated audio tracks.
119
+ Now you can enjoy them anytime, anywhere! </font>
120
+ </font>
121
+
122
+ ### Where can I find the code for Moseca?
123
+
124
+ <font size="3">The code for Moseca is readily available on
125
+ [GitHub](https://github.com/fabiogra/moseca) and
126
+ [Hugging Face](https://huggingface.co/spaces/fabiogra/moseca).
127
+ </font>
128
+
129
+ ### How can I get in touch with you?
130
+
131
+ <font size="3">For any questions or feedback, feel free to contact me on </font>
132
+ [![Twitter](https://badgen.net/badge/icon/twitter?icon=twitter&label)](https://twitter.com/grsFabio)
133
+ <font size="3">or</font> [LinkedIn](https://www.linkedin.com/in/fabio-grasso/en).
134
+
135
+ ------
136
+ ## Disclaimer
137
+
138
+ <font size="3">Moseca is designed to separate vocals and instruments from copyrighted music for
139
+ legally permissible purposes, such as learning, practicing, research, or other non-commercial
140
+ activities that fall within the scope of fair use or exceptions to copyright. As a user, you are
141
+ responsible for ensuring that your use of separated audio tracks complies with the legal
142
+ requirements in your jurisdiction.
143
+ </font>
144
+
145
+ </center>
146
+ """,
147
+ unsafe_allow_html=True,
148
+ )
149
+
150
+
151
+ if __name__ == "__main__":
152
+ header(logo_and_title=False)
153
+ body()
154
+ footer()
app/pages/Karaoke.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+
3
+ import streamlit as st
4
+ from streamlit_player import st_player
5
+ from streamlit_searchbox import st_searchbox
6
+
7
+ from service.youtube import (
8
+ get_youtube_url,
9
+ search_youtube,
10
+ download_audio_from_youtube,
11
+ )
12
+ from helpers import (
13
+ get_random_song,
14
+ load_audio_segment,
15
+ streamlit_player,
16
+ local_audio,
17
+ )
18
+
19
+ from service.vocal_remover.runner import separate, load_model
20
+ from footer import footer
21
+ from header import header
22
+
23
+
24
+ out_path = Path("/tmp")
25
+ in_path = Path("/tmp")
26
+
27
+ sess = st.session_state
28
+
29
+
30
+ def show_karaoke(pathname, initial_player):
31
+ cols = st.columns([1, 1, 3, 1])
32
+ with cols[1]:
33
+ sess.delay = st.slider(
34
+ label="Start delay in karaoke (seconds)",
35
+ key="delay_slider",
36
+ value=2,
37
+ min_value=0,
38
+ max_value=5,
39
+ help="Synchronize youtube player with karaoke audio by adding a delay to the youtube player.",
40
+ )
41
+ with cols[2]:
42
+ events = st_player(
43
+ local_audio(pathname),
44
+ **{
45
+ "progress_interval": 1000,
46
+ "playing": False,
47
+ "muted": False,
48
+ "light": False,
49
+ "play_inline": True,
50
+ "playback_rate": 1,
51
+ "height": 40,
52
+ "config": {
53
+ "start": 0,
54
+ "forceAudio": True,
55
+ },
56
+ "events": ["onProgress", "onPlay"],
57
+ },
58
+ key="karaoke_player",
59
+ )
60
+ st.markdown(
61
+ "<center>⬆️ Click on the play button to start karaoke<center>",
62
+ unsafe_allow_html=True,
63
+ )
64
+ with st.columns([1, 4, 1])[1]:
65
+ if events.name == "onProgress" and events.data["playedSeconds"] > 0:
66
+ initial_player.empty()
67
+ st_player(
68
+ sess.url + f"&t={sess.delay}s",
69
+ **{
70
+ "progress_interval": 1000,
71
+ "playing": True,
72
+ "muted": True,
73
+ "light": False,
74
+ "play_inline": False,
75
+ "playback_rate": 1,
76
+ "height": 250,
77
+ "events": None,
78
+ },
79
+ key="yt_muted_player",
80
+ )
81
+
82
+
83
+ def body():
84
+ st.markdown("<center>Search for a song on YouTube<center>", unsafe_allow_html=True)
85
+ yt_cols = st.columns([1, 3, 2, 1])
86
+ with yt_cols[1]:
87
+ selected_value = st_searchbox(
88
+ search_youtube,
89
+ label=None,
90
+ placeholder="Search by name...",
91
+ clear_on_submit=True,
92
+ key="yt_searchbox",
93
+ )
94
+ if selected_value is not None and selected_value in sess.video_options:
95
+ sess.random_song = None
96
+
97
+ if selected_value != sess.selected_value: # New song selected
98
+ sess.executed = False
99
+
100
+ sess.selected_value = selected_value
101
+ sess.url = get_youtube_url(selected_value)
102
+
103
+ with yt_cols[2]:
104
+ if st.button("🎲 Random song", use_container_width=True):
105
+ sess.last_dir, sess.url = get_random_song()
106
+ sess.random_song = True
107
+ sess.video_options = []
108
+ sess.executed = False
109
+
110
+ if sess.url is not None:
111
+ player_cols = st.columns([2, 2, 1, 1], gap="medium")
112
+ with player_cols[1]:
113
+ player = st.empty()
114
+ streamlit_player(
115
+ player,
116
+ sess.url,
117
+ height=200,
118
+ is_active=False,
119
+ muted=False,
120
+ start=0,
121
+ key="yt_player",
122
+ events=["onProgress"],
123
+ )
124
+
125
+ # Separate vocals
126
+ cols_before_sep = st.columns([2, 4, 2])
127
+ with cols_before_sep[1]:
128
+ execute_button = st.empty()
129
+ execute = execute_button.button(
130
+ "Confirm and remove vocals 🎤 🎶",
131
+ type="primary",
132
+ use_container_width=True,
133
+ )
134
+ if execute or sess.executed:
135
+ execute_button.empty()
136
+ player.empty()
137
+ if execute:
138
+ sess.executed = False
139
+ if sess.random_song is None:
140
+ if not sess.executed:
141
+ cols_spinners = st.columns([1, 2, 1])
142
+ with cols_spinners[1]:
143
+ with st.spinner(
144
+ "Separating vocals from music, it will take a while..."
145
+ ):
146
+ sess.filename = download_audio_from_youtube(sess.url, in_path)
147
+ if sess.filename is None:
148
+ st.stop()
149
+ sess.url = None
150
+ filename = sess.filename
151
+ song = load_audio_segment(
152
+ in_path / filename, filename.split(".")[-1]
153
+ )
154
+ song.export(in_path / filename, format=filename.split(".")[-1])
155
+ model, device = load_model(pretrained_model="baseline.pth")
156
+ separate(
157
+ input=in_path / filename,
158
+ model=model,
159
+ device=device,
160
+ output_dir=out_path,
161
+ only_no_vocals=True,
162
+ )
163
+ selected_value = None
164
+ sess.last_dir = ".".join(sess.filename.split(".")[:-1])
165
+ sess.executed = True
166
+ else:
167
+ sess.executed = True
168
+
169
+ if sess.executed:
170
+ show_karaoke(out_path / "vocal_remover" / sess.last_dir / "no_vocals.mp3", player)
171
+
172
+
173
+ if __name__ == "__main__":
174
+ header()
175
+ body()
176
+ footer()
app/pages/Separate.py ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+
4
+ import streamlit as st
5
+ from streamlit_option_menu import option_menu
6
+
7
+ from service.demucs_runner import separator
8
+ from helpers import (
9
+ load_audio_segment,
10
+ plot_audio,
11
+ st_local_audio,
12
+ url_is_valid,
13
+ )
14
+
15
+ from service.vocal_remover.runner import separate, load_model
16
+
17
+ from footer import footer
18
+ from header import header
19
+
20
+ label_sources = {
21
+ "no_vocals.mp3": "🎶 Instrumental",
22
+ "vocals.mp3": "🎤 Vocals",
23
+ "drums.mp3": "🥁 Drums",
24
+ "bass.mp3": "🎸 Bass",
25
+ "guitar.mp3": "🎸 Guitar",
26
+ "piano.mp3": "🎹 Piano",
27
+ "other.mp3": "🎶 Other",
28
+ }
29
+
30
+ extensions = ["mp3", "wav", "ogg", "flac"]
31
+
32
+
33
+ out_path = Path("/tmp")
34
+ in_path = Path("/tmp")
35
+
36
+
37
+ def reset_execution():
38
+ st.session_state.executed = False
39
+
40
+
41
+ def body():
42
+ filename = None
43
+ cols = st.columns([1, 3, 2, 1])
44
+ with cols[1]:
45
+ with st.columns([1, 5, 1])[1]:
46
+ option = option_menu(
47
+ menu_title=None,
48
+ options=["Upload File", "From URL"],
49
+ icons=["cloud-upload-fill", "link-45deg"],
50
+ orientation="horizontal",
51
+ styles={"container": {"width": "100%", "margin": "0px", "padding": "0px"}},
52
+ key="option_separate",
53
+ )
54
+ if option == "Upload File":
55
+ uploaded_file = st.file_uploader(
56
+ "Choose a file",
57
+ type=extensions,
58
+ key="file",
59
+ help="Supported formats: mp3, wav, ogg, flac.",
60
+ )
61
+ if uploaded_file is not None:
62
+ with open(in_path / uploaded_file.name, "wb") as f:
63
+ f.write(uploaded_file.getbuffer())
64
+ filename = uploaded_file.name
65
+ st_local_audio(in_path / filename, key="input_upload_file")
66
+
67
+ elif option == "From URL": # TODO: show examples
68
+ url = st.text_input(
69
+ "Paste the URL of the audio file",
70
+ key="url_input",
71
+ help="Supported formats: mp3, wav, ogg, flac.",
72
+ )
73
+ if url != "":
74
+ if url_is_valid(url):
75
+ with st.spinner("Downloading audio..."):
76
+ filename = url.split("/")[-1]
77
+ os.system(f"wget -O {in_path / filename} {url}")
78
+ st_local_audio(in_path / filename, key="input_from_url")
79
+ with cols[2]:
80
+ separation_mode = st.selectbox(
81
+ "Choose the separation mode",
82
+ [
83
+ "Vocals & Instrumental (Faster)",
84
+ "Vocals & Instrumental (High Quality, Slower)",
85
+ "Vocals, Drums, Bass & Other (Slower)",
86
+ "Vocal, Drums, Bass, Guitar, Piano & Other (Slowest)",
87
+ ],
88
+ on_change=reset_execution(),
89
+ key="separation_mode",
90
+ )
91
+ if separation_mode == "Vocals & Instrumental (Faster)":
92
+ max_duration = 30
93
+ else:
94
+ max_duration = 15
95
+
96
+ if filename is not None:
97
+ song = load_audio_segment(in_path / filename, filename.split(".")[-1])
98
+ n_secs = round(len(song) / 1000)
99
+ if os.environ.get("ENV_LIMITATION", False):
100
+ with cols[2]:
101
+ start_time = st.number_input(
102
+ "Choose the start time",
103
+ min_value=0,
104
+ max_value=n_secs,
105
+ step=1,
106
+ value=0,
107
+ help=f"Maximum duration is {max_duration} seconds for this separation mode. Duplicate this space to remove any limit.",
108
+ format="%d",
109
+ )
110
+ st.session_state.start_time = start_time
111
+ end_time = min(start_time + max_duration, n_secs)
112
+ song = song[start_time * 1000 : end_time * 1000]
113
+ st.info(
114
+ f"Audio source will be processed from {start_time} to {end_time} seconds. Duplicate this space to remove any limit.",
115
+ icon="⏱",
116
+ )
117
+ else:
118
+ start_time = 0
119
+ end_time = n_secs
120
+ with st.columns([1, 3, 1])[1]:
121
+ execute = st.button("Split Music 🎶", type="primary", use_container_width=True)
122
+ if execute or st.session_state.executed:
123
+ if execute:
124
+ st.session_state.executed = False
125
+
126
+ if not st.session_state.executed:
127
+ song.export(in_path / filename, format=filename.split(".")[-1])
128
+ with st.spinner("Separating source audio, it will take a while..."):
129
+ if separation_mode == "Vocals & Instrumental (Faster)":
130
+ model_name = "vocal_remover"
131
+ model, device = load_model(pretrained_model="baseline.pth")
132
+ separate(
133
+ input=in_path / filename,
134
+ model=model,
135
+ device=device,
136
+ output_dir=out_path,
137
+ )
138
+ else:
139
+ stem = None
140
+ model_name = "htdemucs"
141
+ if (
142
+ separation_mode
143
+ == "Vocal, Drums, Bass, Guitar, Piano & Other (Slowest)"
144
+ ):
145
+ model_name = "htdemucs_6s"
146
+ elif separation_mode == "Vocals & Instrumental (High Quality, Slower)":
147
+ stem = "vocals"
148
+
149
+ separator(
150
+ tracks=[in_path / filename],
151
+ out=out_path,
152
+ model=model_name,
153
+ shifts=1,
154
+ overlap=0.5,
155
+ stem=stem,
156
+ int24=False,
157
+ float32=False,
158
+ clip_mode="rescale",
159
+ mp3=True,
160
+ mp3_bitrate=320,
161
+ verbose=True,
162
+ start_time=start_time,
163
+ end_time=end_time,
164
+ )
165
+ last_dir = ".".join(filename.split(".")[:-1])
166
+ filename = None
167
+ st.session_state.executed = True
168
+
169
+ def get_sources(path):
170
+ sources = {}
171
+ for file in [
172
+ "no_vocals.mp3",
173
+ "vocals.mp3",
174
+ "drums.mp3",
175
+ "bass.mp3",
176
+ "guitar.mp3",
177
+ "piano.mp3",
178
+ "other.mp3",
179
+ ]:
180
+ fullpath = path / file
181
+ if fullpath.exists():
182
+ sources[file] = fullpath
183
+ return sources
184
+
185
+ sources = get_sources(out_path / Path(model_name) / last_dir)
186
+ tab_sources = st.tabs([f"**{label_sources.get(k)}**" for k in sources.keys()])
187
+ for i, (file, pathname) in enumerate(sources.items()):
188
+ with tab_sources[i]:
189
+ cols = st.columns(2)
190
+ with cols[0]:
191
+ auseg = load_audio_segment(pathname, "mp3")
192
+ st.image(
193
+ plot_audio(auseg, title="", file=file),
194
+ use_column_width="always",
195
+ )
196
+ with cols[1]:
197
+ st_local_audio(pathname, key=f"output_{file}")
198
+
199
+
200
+ if __name__ == "__main__":
201
+ header()
202
+ body()
203
+ footer()
{lib → app/service}/__init__.py RENAMED
File without changes
app/{demucs_runner.py → service/demucs_runner.py} RENAMED
@@ -2,7 +2,7 @@ import argparse
2
  import sys
3
  from pathlib import Path
4
  from typing import List
5
-
6
  from dora.log import fatal
7
  import torch as th
8
 
@@ -11,12 +11,14 @@ from demucs.audio import save_audio
11
  from demucs.pretrained import get_model_from_args, ModelLoadingError
12
  from demucs.separate import load_track
13
 
 
 
14
 
 
15
  def separator(
16
  tracks: List[Path],
17
  out: Path,
18
  model: str,
19
- device: str,
20
  shifts: int,
21
  overlap: float,
22
  stem: str,
@@ -25,27 +27,43 @@ def separator(
25
  clip_mode: str,
26
  mp3: bool,
27
  mp3_bitrate: int,
28
- jobs: int,
29
  verbose: bool,
 
 
30
  ):
31
  """Separate the sources for the given tracks
32
 
33
  Args:
34
  tracks (Path): Path to tracks
35
- out (Path): Folder where to put extracted tracks. A subfolder with the model name will be created.
 
36
  model (str): Model name
37
- device (str): Device to use, default is cuda if available else cpu
38
- shifts (int): Number of random shifts for equivariant stabilization. Increase separation time but improves quality for Demucs. 10 was used in the original paper.
 
39
  overlap (float): Overlap
40
  stem (str): Only separate audio into {STEM} and no_{STEM}.
41
  int24 (bool): Save wav output as 24 bits wav.
42
  float32 (bool): Save wav output as float32 (2x bigger).
43
- clip_mode (str): Strategy for avoiding clipping: rescaling entire signal if necessary (rescale) or hard clipping (clamp).
 
44
  mp3 (bool): Convert the output wavs to mp3.
45
  mp3_bitrate (int): Bitrate of converted mp3.
46
- jobs (int): Number of jobs. This can increase memory usage but will be much faster when multiple cores are available.
47
  verbose (bool): Verbose
48
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  args = argparse.Namespace()
50
  args.tracks = tracks
51
  args.out = out
 
2
  import sys
3
  from pathlib import Path
4
  from typing import List
5
+ import os
6
  from dora.log import fatal
7
  import torch as th
8
 
 
11
  from demucs.pretrained import get_model_from_args, ModelLoadingError
12
  from demucs.separate import load_track
13
 
14
+ import streamlit as st
15
+
16
 
17
+ @st.cache_data(show_spinner=False)
18
  def separator(
19
  tracks: List[Path],
20
  out: Path,
21
  model: str,
 
22
  shifts: int,
23
  overlap: float,
24
  stem: str,
 
27
  clip_mode: str,
28
  mp3: bool,
29
  mp3_bitrate: int,
 
30
  verbose: bool,
31
+ *args,
32
+ **kwargs,
33
  ):
34
  """Separate the sources for the given tracks
35
 
36
  Args:
37
  tracks (Path): Path to tracks
38
+ out (Path): Folder where to put extracted tracks. A subfolder with the model name will be
39
+ created.
40
  model (str): Model name
41
+ shifts (int): Number of random shifts for equivariant stabilization.
42
+ Increase separation time but improves quality for Demucs.
43
+ 10 was used in the original paper.
44
  overlap (float): Overlap
45
  stem (str): Only separate audio into {STEM} and no_{STEM}.
46
  int24 (bool): Save wav output as 24 bits wav.
47
  float32 (bool): Save wav output as float32 (2x bigger).
48
+ clip_mode (str): Strategy for avoiding clipping: rescaling entire signal if necessary
49
+ (rescale) or hard clipping (clamp).
50
  mp3 (bool): Convert the output wavs to mp3.
51
  mp3_bitrate (int): Bitrate of converted mp3.
 
52
  verbose (bool): Verbose
53
  """
54
+
55
+ if os.environ.get("LIMIT_CPU", False):
56
+ th.set_num_threads(1)
57
+ jobs = 1
58
+ else:
59
+ # Number of jobs. This can increase memory usage but will be much faster when
60
+ # multiple cores are available.
61
+ jobs = os.cpu_count()
62
+
63
+ if th.cuda.is_available():
64
+ device = "cuda"
65
+ else:
66
+ device = "cpu"
67
  args = argparse.Namespace()
68
  args.tracks = tracks
69
  args.out = out
app/service/vocal_remover/__init__.py ADDED
File without changes
app/service/vocal_remover/layers.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+ import torch.nn.functional as F
4
+
5
+
6
+ def crop_center(h1, h2):
7
+ h1_shape = h1.size()
8
+ h2_shape = h2.size()
9
+
10
+ if h1_shape[3] == h2_shape[3]:
11
+ return h1
12
+ elif h1_shape[3] < h2_shape[3]:
13
+ raise ValueError("h1_shape[3] must be greater than h2_shape[3]")
14
+
15
+ s_time = (h1_shape[3] - h2_shape[3]) // 2
16
+ e_time = s_time + h2_shape[3]
17
+ h1 = h1[:, :, :, s_time:e_time]
18
+
19
+ return h1
20
+
21
+
22
+ class Conv2DBNActiv(nn.Module):
23
+ def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
24
+ super(Conv2DBNActiv, self).__init__()
25
+ self.conv = nn.Sequential(
26
+ nn.Conv2d(
27
+ nin,
28
+ nout,
29
+ kernel_size=ksize,
30
+ stride=stride,
31
+ padding=pad,
32
+ dilation=dilation,
33
+ bias=False,
34
+ ),
35
+ nn.BatchNorm2d(nout),
36
+ activ(),
37
+ )
38
+
39
+ def __call__(self, x):
40
+ return self.conv(x)
41
+
42
+
43
+ class Encoder(nn.Module):
44
+ def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
45
+ super(Encoder, self).__init__()
46
+ self.conv1 = Conv2DBNActiv(nin, nout, ksize, stride, pad, activ=activ)
47
+ self.conv2 = Conv2DBNActiv(nout, nout, ksize, 1, pad, activ=activ)
48
+
49
+ def __call__(self, x):
50
+ h = self.conv1(x)
51
+ h = self.conv2(h)
52
+
53
+ return h
54
+
55
+
56
+ class Decoder(nn.Module):
57
+ def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False):
58
+ super(Decoder, self).__init__()
59
+ self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
60
+ self.dropout = nn.Dropout2d(0.1) if dropout else None
61
+
62
+ def __call__(self, x, skip=None):
63
+ x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
64
+
65
+ if skip is not None:
66
+ skip = crop_center(skip, x)
67
+ x = torch.cat([x, skip], dim=1)
68
+
69
+ h = self.conv1(x)
70
+ # h = self.conv2(h)
71
+
72
+ if self.dropout is not None:
73
+ h = self.dropout(h)
74
+
75
+ return h
76
+
77
+
78
+ class ASPPModule(nn.Module):
79
+ def __init__(self, nin, nout, dilations=(4, 8, 12), activ=nn.ReLU, dropout=False):
80
+ super(ASPPModule, self).__init__()
81
+ self.conv1 = nn.Sequential(
82
+ nn.AdaptiveAvgPool2d((1, None)),
83
+ Conv2DBNActiv(nin, nout, 1, 1, 0, activ=activ),
84
+ )
85
+ self.conv2 = Conv2DBNActiv(nin, nout, 1, 1, 0, activ=activ)
86
+ self.conv3 = Conv2DBNActiv(nin, nout, 3, 1, dilations[0], dilations[0], activ=activ)
87
+ self.conv4 = Conv2DBNActiv(nin, nout, 3, 1, dilations[1], dilations[1], activ=activ)
88
+ self.conv5 = Conv2DBNActiv(nin, nout, 3, 1, dilations[2], dilations[2], activ=activ)
89
+ self.bottleneck = Conv2DBNActiv(nout * 5, nout, 1, 1, 0, activ=activ)
90
+ self.dropout = nn.Dropout2d(0.1) if dropout else None
91
+
92
+ def forward(self, x):
93
+ _, _, h, w = x.size()
94
+ feat1 = F.interpolate(self.conv1(x), size=(h, w), mode="bilinear", align_corners=True)
95
+ feat2 = self.conv2(x)
96
+ feat3 = self.conv3(x)
97
+ feat4 = self.conv4(x)
98
+ feat5 = self.conv5(x)
99
+ out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1)
100
+ out = self.bottleneck(out)
101
+
102
+ if self.dropout is not None:
103
+ out = self.dropout(out)
104
+
105
+ return out
106
+
107
+
108
+ class LSTMModule(nn.Module):
109
+ def __init__(self, nin_conv, nin_lstm, nout_lstm):
110
+ super(LSTMModule, self).__init__()
111
+ self.conv = Conv2DBNActiv(nin_conv, 1, 1, 1, 0)
112
+ self.lstm = nn.LSTM(input_size=nin_lstm, hidden_size=nout_lstm // 2, bidirectional=True)
113
+ self.dense = nn.Sequential(
114
+ nn.Linear(nout_lstm, nin_lstm), nn.BatchNorm1d(nin_lstm), nn.ReLU()
115
+ )
116
+
117
+ def forward(self, x):
118
+ N, _, nbins, nframes = x.size()
119
+ h = self.conv(x)[:, 0] # N, nbins, nframes
120
+ h = h.permute(2, 0, 1) # nframes, N, nbins
121
+ h, _ = self.lstm(h)
122
+ h = self.dense(h.reshape(-1, h.size()[-1])) # nframes * N, nbins
123
+ h = h.reshape(nframes, N, 1, nbins)
124
+ h = h.permute(1, 2, 3, 0)
125
+
126
+ return h
app/service/vocal_remover/nets.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+ import torch.nn.functional as F
4
+
5
+ from app.service.vocal_remover import layers
6
+
7
+
8
+ class BaseNet(nn.Module):
9
+ def __init__(self, nin, nout, nin_lstm, nout_lstm, dilations=((4, 2), (8, 4), (12, 6))):
10
+ super(BaseNet, self).__init__()
11
+ self.enc1 = layers.Conv2DBNActiv(nin, nout, 3, 1, 1)
12
+ self.enc2 = layers.Encoder(nout, nout * 2, 3, 2, 1)
13
+ self.enc3 = layers.Encoder(nout * 2, nout * 4, 3, 2, 1)
14
+ self.enc4 = layers.Encoder(nout * 4, nout * 6, 3, 2, 1)
15
+ self.enc5 = layers.Encoder(nout * 6, nout * 8, 3, 2, 1)
16
+
17
+ self.aspp = layers.ASPPModule(nout * 8, nout * 8, dilations, dropout=True)
18
+
19
+ self.dec4 = layers.Decoder(nout * (6 + 8), nout * 6, 3, 1, 1)
20
+ self.dec3 = layers.Decoder(nout * (4 + 6), nout * 4, 3, 1, 1)
21
+ self.dec2 = layers.Decoder(nout * (2 + 4), nout * 2, 3, 1, 1)
22
+ self.lstm_dec2 = layers.LSTMModule(nout * 2, nin_lstm, nout_lstm)
23
+ self.dec1 = layers.Decoder(nout * (1 + 2) + 1, nout * 1, 3, 1, 1)
24
+
25
+ def __call__(self, x):
26
+ e1 = self.enc1(x)
27
+ e2 = self.enc2(e1)
28
+ e3 = self.enc3(e2)
29
+ e4 = self.enc4(e3)
30
+ e5 = self.enc5(e4)
31
+
32
+ h = self.aspp(e5)
33
+
34
+ h = self.dec4(h, e4)
35
+ h = self.dec3(h, e3)
36
+ h = self.dec2(h, e2)
37
+ h = torch.cat([h, self.lstm_dec2(h)], dim=1)
38
+ h = self.dec1(h, e1)
39
+
40
+ return h
41
+
42
+
43
+ class CascadedNet(nn.Module):
44
+ def __init__(self, n_fft, nout=32, nout_lstm=128):
45
+ super(CascadedNet, self).__init__()
46
+ self.max_bin = n_fft // 2
47
+ self.output_bin = n_fft // 2 + 1
48
+ self.nin_lstm = self.max_bin // 2
49
+ self.offset = 64
50
+
51
+ self.stg1_low_band_net = nn.Sequential(
52
+ BaseNet(2, nout // 2, self.nin_lstm // 2, nout_lstm),
53
+ layers.Conv2DBNActiv(nout // 2, nout // 4, 1, 1, 0),
54
+ )
55
+ self.stg1_high_band_net = BaseNet(2, nout // 4, self.nin_lstm // 2, nout_lstm // 2)
56
+
57
+ self.stg2_low_band_net = nn.Sequential(
58
+ BaseNet(nout // 4 + 2, nout, self.nin_lstm // 2, nout_lstm),
59
+ layers.Conv2DBNActiv(nout, nout // 2, 1, 1, 0),
60
+ )
61
+ self.stg2_high_band_net = BaseNet(
62
+ nout // 4 + 2, nout // 2, self.nin_lstm // 2, nout_lstm // 2
63
+ )
64
+
65
+ self.stg3_full_band_net = BaseNet(3 * nout // 4 + 2, nout, self.nin_lstm, nout_lstm)
66
+
67
+ self.out = nn.Conv2d(nout, 2, 1, bias=False)
68
+ self.aux_out = nn.Conv2d(3 * nout // 4, 2, 1, bias=False)
69
+
70
+ def forward(self, x):
71
+ x = x[:, :, : self.max_bin]
72
+
73
+ bandw = x.size()[2] // 2
74
+ l1_in = x[:, :, :bandw]
75
+ h1_in = x[:, :, bandw:]
76
+ l1 = self.stg1_low_band_net(l1_in)
77
+ h1 = self.stg1_high_band_net(h1_in)
78
+ aux1 = torch.cat([l1, h1], dim=2)
79
+
80
+ l2_in = torch.cat([l1_in, l1], dim=1)
81
+ h2_in = torch.cat([h1_in, h1], dim=1)
82
+ l2 = self.stg2_low_band_net(l2_in)
83
+ h2 = self.stg2_high_band_net(h2_in)
84
+ aux2 = torch.cat([l2, h2], dim=2)
85
+
86
+ f3_in = torch.cat([x, aux1, aux2], dim=1)
87
+ f3 = self.stg3_full_band_net(f3_in)
88
+
89
+ mask = torch.sigmoid(self.out(f3))
90
+ mask = F.pad(
91
+ input=mask,
92
+ pad=(0, 0, 0, self.output_bin - mask.size()[2]),
93
+ mode="replicate",
94
+ )
95
+
96
+ if self.training:
97
+ aux = torch.cat([aux1, aux2], dim=1)
98
+ aux = torch.sigmoid(self.aux_out(aux))
99
+ aux = F.pad(
100
+ input=aux,
101
+ pad=(0, 0, 0, self.output_bin - aux.size()[2]),
102
+ mode="replicate",
103
+ )
104
+ return mask, aux
105
+ else:
106
+ return mask
107
+
108
+ def predict_mask(self, x):
109
+ mask = self.forward(x)
110
+
111
+ if self.offset > 0:
112
+ mask = mask[:, :, :, self.offset : -self.offset]
113
+ assert mask.size()[3] > 0
114
+
115
+ return mask
116
+
117
+ def predict(self, x):
118
+ mask = self.forward(x)
119
+ pred_mag = x * mask
120
+
121
+ if self.offset > 0:
122
+ pred_mag = pred_mag[:, :, :, self.offset : -self.offset]
123
+ assert pred_mag.size()[3] > 0
124
+
125
+ return pred_mag
app/service/vocal_remover/runner.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import logging
3
+ import librosa
4
+ import numpy as np
5
+ import soundfile as sf
6
+ import torch
7
+ from stqdm import stqdm
8
+ import streamlit as st
9
+ from pydub import AudioSegment
10
+
11
+ from app.service.vocal_remover import nets
12
+
13
+
14
+ if os.environ.get("LIMIT_CPU", False):
15
+ torch.set_num_threads(1)
16
+
17
+
18
+ def merge_artifacts(y_mask, thres=0.05, min_range=64, fade_size=32):
19
+ if min_range < fade_size * 2:
20
+ raise ValueError("min_range must be >= fade_size * 2")
21
+
22
+ idx = np.where(y_mask.min(axis=(0, 1)) > thres)[0]
23
+ start_idx = np.insert(idx[np.where(np.diff(idx) != 1)[0] + 1], 0, idx[0])
24
+ end_idx = np.append(idx[np.where(np.diff(idx) != 1)[0]], idx[-1])
25
+ artifact_idx = np.where(end_idx - start_idx > min_range)[0]
26
+ weight = np.zeros_like(y_mask)
27
+ if len(artifact_idx) > 0:
28
+ start_idx = start_idx[artifact_idx]
29
+ end_idx = end_idx[artifact_idx]
30
+ old_e = None
31
+ for s, e in zip(start_idx, end_idx):
32
+ if old_e is not None and s - old_e < fade_size:
33
+ s = old_e - fade_size * 2
34
+
35
+ if s != 0:
36
+ weight[:, :, s : s + fade_size] = np.linspace(0, 1, fade_size)
37
+ else:
38
+ s -= fade_size
39
+
40
+ if e != y_mask.shape[2]:
41
+ weight[:, :, e - fade_size : e] = np.linspace(1, 0, fade_size)
42
+ else:
43
+ e += fade_size
44
+
45
+ weight[:, :, s + fade_size : e - fade_size] = 1
46
+ old_e = e
47
+
48
+ v_mask = 1 - y_mask
49
+ y_mask += weight * v_mask
50
+
51
+ return y_mask
52
+
53
+
54
+ def make_padding(width, cropsize, offset):
55
+ left = offset
56
+ roi_size = cropsize - offset * 2
57
+ if roi_size == 0:
58
+ roi_size = cropsize
59
+ right = roi_size - (width % roi_size) + left
60
+
61
+ return left, right, roi_size
62
+
63
+
64
+ def wave_to_spectrogram(wave, hop_length, n_fft):
65
+ wave_left = np.asfortranarray(wave[0])
66
+ wave_right = np.asfortranarray(wave[1])
67
+
68
+ spec_left = librosa.stft(wave_left, n_fft=n_fft, hop_length=hop_length)
69
+ spec_right = librosa.stft(wave_right, n_fft=n_fft, hop_length=hop_length)
70
+ spec = np.asfortranarray([spec_left, spec_right])
71
+
72
+ return spec
73
+
74
+
75
+ def spectrogram_to_wave(spec, hop_length=1024):
76
+ if spec.ndim == 2:
77
+ wave = librosa.istft(spec, hop_length=hop_length)
78
+ elif spec.ndim == 3:
79
+ spec_left = np.asfortranarray(spec[0])
80
+ spec_right = np.asfortranarray(spec[1])
81
+
82
+ wave_left = librosa.istft(spec_left, hop_length=hop_length)
83
+ wave_right = librosa.istft(spec_right, hop_length=hop_length)
84
+ wave = np.asfortranarray([wave_left, wave_right])
85
+
86
+ return wave
87
+
88
+
89
+ class Separator(object):
90
+ def __init__(self, model, device, batchsize, cropsize, postprocess=False, progress_bar=None):
91
+ self.model = model
92
+ self.offset = model.offset
93
+ self.device = device
94
+ self.batchsize = batchsize
95
+ self.cropsize = cropsize
96
+ self.postprocess = postprocess
97
+ self.progress_bar = progress_bar
98
+
99
+ def _separate(self, X_mag_pad, roi_size):
100
+ X_dataset = []
101
+ patches = (X_mag_pad.shape[2] - 2 * self.offset) // roi_size
102
+ for i in range(patches):
103
+ start = i * roi_size
104
+ X_mag_crop = X_mag_pad[:, :, start : start + self.cropsize]
105
+ X_dataset.append(X_mag_crop)
106
+
107
+ X_dataset = np.asarray(X_dataset)
108
+
109
+ self.model.eval()
110
+ with torch.no_grad():
111
+ mask = []
112
+ # To reduce the overhead, dataloader is not used.
113
+ for i in stqdm(
114
+ range(0, patches, self.batchsize),
115
+ st_container=self.progress_bar,
116
+ gui=False,
117
+ ):
118
+ X_batch = X_dataset[i : i + self.batchsize]
119
+ X_batch = torch.from_numpy(X_batch).to(self.device)
120
+
121
+ pred = self.model.predict_mask(X_batch)
122
+
123
+ pred = pred.detach().cpu().numpy()
124
+ pred = np.concatenate(pred, axis=2)
125
+ mask.append(pred)
126
+
127
+ mask = np.concatenate(mask, axis=2)
128
+
129
+ return mask
130
+
131
+ def _preprocess(self, X_spec):
132
+ X_mag = np.abs(X_spec)
133
+ X_phase = np.angle(X_spec)
134
+
135
+ return X_mag, X_phase
136
+
137
+ def _postprocess(self, mask, X_mag, X_phase):
138
+ if self.postprocess:
139
+ mask = merge_artifacts(mask)
140
+
141
+ y_spec = mask * X_mag * np.exp(1.0j * X_phase)
142
+ v_spec = (1 - mask) * X_mag * np.exp(1.0j * X_phase)
143
+
144
+ return y_spec, v_spec
145
+
146
+ def separate(self, X_spec):
147
+ X_mag, X_phase = self._preprocess(X_spec)
148
+
149
+ n_frame = X_mag.shape[2]
150
+ pad_l, pad_r, roi_size = make_padding(n_frame, self.cropsize, self.offset)
151
+ X_mag_pad = np.pad(X_mag, ((0, 0), (0, 0), (pad_l, pad_r)), mode="constant")
152
+ X_mag_pad /= X_mag_pad.max()
153
+
154
+ mask = self._separate(X_mag_pad, roi_size)
155
+ mask = mask[:, :, :n_frame]
156
+
157
+ y_spec, v_spec = self._postprocess(mask, X_mag, X_phase)
158
+
159
+ return y_spec, v_spec
160
+
161
+
162
+ @st.cache_resource(show_spinner=False)
163
+ def load_model(pretrained_model, n_fft=2048):
164
+ model = nets.CascadedNet(n_fft, 32, 128)
165
+ if torch.cuda.is_available():
166
+ device = torch.device("cuda:0")
167
+ model.to(device)
168
+ # elif torch.backends.mps.is_available() and torch.backends.mps.is_built():
169
+ # device = torch.device("mps")
170
+ # model.to(device)
171
+ else:
172
+ device = torch.device("cpu")
173
+ model.load_state_dict(torch.load(pretrained_model, map_location=device))
174
+ return model, device
175
+
176
+
177
+ # @st.cache_data(show_spinner=False)
178
+ def separate(
179
+ input,
180
+ model,
181
+ device,
182
+ output_dir,
183
+ batchsize=4,
184
+ cropsize=256,
185
+ postprocess=False,
186
+ hop_length=1024,
187
+ n_fft=2048,
188
+ sr=44100,
189
+ progress_bar=None,
190
+ only_no_vocals=False,
191
+ ):
192
+ X, sr = librosa.load(input, sr=sr, mono=False, dtype=np.float32, res_type="kaiser_fast")
193
+ basename = os.path.splitext(os.path.basename(input))[0]
194
+
195
+ if X.ndim == 1:
196
+ # mono to stereo
197
+ X = np.asarray([X, X])
198
+
199
+ X_spec = wave_to_spectrogram(X, hop_length, n_fft)
200
+
201
+ with torch.no_grad():
202
+ sp = Separator(model, device, batchsize, cropsize, postprocess, progress_bar=progress_bar)
203
+ y_spec, v_spec = sp.separate(X_spec)
204
+
205
+ base_dir = f"{output_dir}/vocal_remover/{basename}"
206
+ os.makedirs(base_dir, exist_ok=True)
207
+
208
+ wave = spectrogram_to_wave(y_spec, hop_length=hop_length)
209
+ try:
210
+ sf.write(f"{base_dir}/no_vocals.mp3", wave.T, sr)
211
+ except Exception:
212
+ logging.error("Failed to write no_vocals.mp3, trying pydub...")
213
+ pydub_write(wave, f"{base_dir}/no_vocals.mp3", sr)
214
+ if only_no_vocals:
215
+ return
216
+ wave = spectrogram_to_wave(v_spec, hop_length=hop_length)
217
+ try:
218
+ sf.write(f"{base_dir}/vocals.mp3", wave.T, sr)
219
+ except Exception:
220
+ logging.error("Failed to write vocals.mp3, trying pydub...")
221
+ pydub_write(wave, f"{base_dir}/vocals.mp3", sr)
222
+
223
+
224
+ def pydub_write(wave, output_path, frame_rate, audio_format="mp3"):
225
+ # Ensure the wave data is in the right format for pydub (mono and 16-bit depth)
226
+ wave_16bit = (wave * 32767).astype(np.int16)
227
+
228
+ audio_segment = AudioSegment(
229
+ wave_16bit.tobytes(),
230
+ frame_rate=frame_rate,
231
+ sample_width=wave_16bit.dtype.itemsize,
232
+ channels=1,
233
+ )
234
+ audio_segment.export(output_path, format=audio_format)
app/service/youtube.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import List
3
+ import yt_dlp
4
+ import string
5
+ import time
6
+ import re
7
+ import streamlit as st
8
+ from pytube import Search
9
+
10
+
11
+ def _sanitize_filename(filename):
12
+ safe_chars = "-_.() %s%s" % (
13
+ re.escape(string.ascii_letters),
14
+ re.escape(string.digits),
15
+ )
16
+ safe_filename = re.sub(f"[^{safe_chars}]", "_", filename)
17
+ return safe_filename.strip()
18
+
19
+
20
+ @st.cache_data(show_spinner=False)
21
+ def download_audio_from_youtube(url, output_path):
22
+ if not os.path.exists(output_path):
23
+ os.makedirs(output_path)
24
+
25
+ with yt_dlp.YoutubeDL() as ydl:
26
+ info_dict = ydl.extract_info(url, download=False)
27
+ if info_dict.get("duration") > 360:
28
+ st.error("Song is too long. Please use a song no longer than 6 minutes.")
29
+ return
30
+ video_title = info_dict.get("title", None)
31
+ video_title = _sanitize_filename(video_title)
32
+ ydl_opts = {
33
+ "format": "bestaudio/best",
34
+ "postprocessors": [
35
+ {
36
+ "key": "FFmpegExtractAudio",
37
+ "preferredcodec": "mp3",
38
+ "preferredquality": "192",
39
+ }
40
+ ],
41
+ "outtmpl": os.path.join(output_path, video_title),
42
+ #'quiet': True,
43
+ }
44
+ with yt_dlp.YoutubeDL(ydl_opts) as ydl:
45
+ ydl.download([url])
46
+ return f"{video_title}.mp3"
47
+
48
+
49
+ @st.cache_data(show_spinner=False)
50
+ def query_youtube(query: str) -> Search:
51
+ return Search(query)
52
+
53
+
54
+ def search_youtube(query: str) -> List:
55
+ if len(query) > 3:
56
+ time.sleep(0.5)
57
+ search = query_youtube(query + " lyrics")
58
+ st.session_state.search_results = search.results
59
+ video_options = [video.title for video in st.session_state.search_results]
60
+ st.session_state.video_options = video_options
61
+ else:
62
+ video_options = []
63
+ return video_options
64
+
65
+
66
+ def get_youtube_url(title: str) -> str:
67
+ video = st.session_state.search_results[st.session_state.video_options.index(title)]
68
+ return video.embed_url
69
+
70
+
71
+ def check_if_is_youtube_url(url: str) -> bool:
72
+ return url.startswith("http")
app/sidebar.py DELETED
@@ -1,12 +0,0 @@
1
- text = """
2
- <b>🎶 Music Source Splitter</b> is a web app that allows you to separate the vocals and the instrumental of a song.
3
- <hr>
4
- <h3>How does it work?</h3>
5
- The app uses a pretrained model called Hybrid Spectrogram and Waveform Source Separation from <a href="https://github.com/facebookresearch/demucs">facebook/htdemucs</a>.
6
- <br><br>
7
- <h3>Where can I find the code?</h3>
8
- The code for this app is available both on <a href="https://github.com/fabiogra/st-music-splitter">GitHub</a> and <a href="https://huggingface.co/spaces/fabiogra/st-music-splitter/tree/main">HuggingFace</a>.
9
- <br><br>
10
- <h3>Contact me</h3>
11
- Contact me on <a href="https://twitter.com/grsFabio">Twitter</a> or on <a href="https://www.linkedin.com/in/fabio-grasso/">LinkedIn</a> if you have any questions or feedback.
12
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
app/style.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _font_title = "Monoton"
2
+ _font_subtitle = "Exo"
3
+
4
+ CSS = (
5
+ """
6
+ <!-- Add the font link from Google Fonts -->
7
+ <link href="https://fonts.googleapis.com/css2?family="""
8
+ + _font_title
9
+ + """&display=swap" rel="stylesheet">
10
+ <link href="https://fonts.googleapis.com/css2?family="""
11
+ + _font_subtitle
12
+ + """&display=swap" rel="stylesheet">
13
+
14
+ <style>
15
+ /* Remove the streamlit header */
16
+ header[data-testid="stHeader"] {
17
+ display: none;
18
+ }
19
+ /* Remove the sidebar menu */
20
+ div[data-testid="collapsedControl"]{
21
+ display: none;
22
+ }
23
+ /* Background */
24
+ .css-z5fcl4 {
25
+ padding: 0.5rem;
26
+ padding-top: 0rem;
27
+ }
28
+
29
+ /* Distances between the title and the image in mobile */
30
+ .css-1uifejx.e1tzin5v1 {
31
+ margin-bottom: 0px;
32
+ padding-bottom: 0px;
33
+ }
34
+ h1 {
35
+ padding-top: 0px;
36
+ }
37
+
38
+
39
+ /* Center the image within its container */
40
+ .css-1kyxreq {
41
+ justify-content: center;
42
+ }
43
+
44
+ /* Remove fixed width from the image container */
45
+ .css-1kyxreq.etr89bj2 {
46
+ width: 100% !important;
47
+ }
48
+
49
+ /* Center the title */
50
+ .css-k7vsyb {
51
+ text-align: center;
52
+ }
53
+
54
+ /* Hide the anchor button */
55
+ .css-zt5igj.e16nr0p33 a {
56
+ display: none;
57
+ }
58
+ /* Hide the full screen button */
59
+ .css-e370rw.e19lei0e1 {
60
+ display: none;
61
+ }
62
+ .css-6awftf.e19lei0e1 {
63
+ display: none;
64
+ }
65
+
66
+ /* Desktop */
67
+ @media (min-width: 640px) {
68
+ .stMarkdown {
69
+ max-width: 100%;
70
+ width: auto;
71
+ display: inline-block;
72
+ }
73
+ /* Dynamically add space between the image and the title */
74
+ .css-1kyxreq {
75
+ justify-content: right;
76
+ }
77
+ }
78
+
79
+ /* Add space after the image and the title */
80
+ .css-1a32fsj {
81
+ margin-right: 0px;
82
+ }
83
+
84
+ /* Apply the futuristic font to the text title*/
85
+ #moseca {
86
+ font-family: '"""
87
+ + _font_title
88
+ + """', sans-serif;
89
+ font-size: 3rem;
90
+ text-align: center;
91
+ /* Align the text to the center of the box */
92
+ align-items: center;
93
+ /* Set the line height to the same as the height of the box */
94
+ line-height: 3.5rem;
95
+ margin-bottom: -1rem;
96
+ }
97
+
98
+ /* subtitle */
99
+ .css-5rimss p, .css-nahz7x p {
100
+ font-family: """
101
+ + _font_subtitle
102
+ + """, sans-serif;
103
+ font-size: 0.8rem;
104
+ text-align: center;
105
+ }
106
+
107
+ /* Desktop */
108
+ @media (min-width: 640px) {
109
+ .css-zt5igj, .css-nahz7x p {
110
+ text-align: left;
111
+ }
112
+ .css-5rimss p {
113
+ text-align: left;
114
+ }
115
+ }
116
+
117
+ .st-af {
118
+ align-items: center;
119
+ padding-right: 2rem;
120
+ }
121
+
122
+ /* Remove the gap around the player */
123
+ .css-434r0z {
124
+ gap: 0rem;
125
+ }
126
+
127
+
128
+ </style>
129
+
130
+ """
131
+ )
img/bmc-button.png ADDED
img/image_stems.png ADDED
img/karaoke_fun.png ADDED
img/logo_moseca.png ADDED
img/state-of-art.png ADDED
lib/st_audiorec/.DS_Store DELETED
Binary file (6.15 kB)
 
lib/st_audiorec/__init__.py DELETED
@@ -1 +0,0 @@
1
-
 
 
lib/st_audiorec/frontend/.DS_Store DELETED
Binary file (8.2 kB)
 
lib/st_audiorec/frontend/.env DELETED
@@ -1,6 +0,0 @@
1
- # Run the component's dev server on :3001
2
- # (The Streamlit dev server already runs on :3000)
3
- PORT=3001
4
-
5
- # Don't automatically open the web browser on `npm run start`.
6
- BROWSER=none
 
 
 
 
 
 
 
lib/st_audiorec/frontend/.prettierrc DELETED
@@ -1,5 +0,0 @@
1
- {
2
- "endOfLine": "lf",
3
- "semi": false,
4
- "trailingComma": "es5"
5
- }
 
 
 
 
 
 
lib/st_audiorec/frontend/build/.DS_Store DELETED
Binary file (6.15 kB)
 
lib/st_audiorec/frontend/build/asset-manifest.json DELETED
@@ -1,22 +0,0 @@
1
- {
2
- "files": {
3
- "main.js": "./static/js/main.833ba252.chunk.js",
4
- "main.js.map": "./static/js/main.833ba252.chunk.js.map",
5
- "runtime-main.js": "./static/js/runtime-main.11ec9aca.js",
6
- "runtime-main.js.map": "./static/js/runtime-main.11ec9aca.js.map",
7
- "static/css/2.bfbf028b.chunk.css": "./static/css/2.bfbf028b.chunk.css",
8
- "static/js/2.270b84d8.chunk.js": "./static/js/2.270b84d8.chunk.js",
9
- "static/js/2.270b84d8.chunk.js.map": "./static/js/2.270b84d8.chunk.js.map",
10
- "index.html": "./index.html",
11
- "precache-manifest.4829c060d313d0b0d13d9af3b0180289.js": "./precache-manifest.4829c060d313d0b0d13d9af3b0180289.js",
12
- "service-worker.js": "./service-worker.js",
13
- "static/css/2.bfbf028b.chunk.css.map": "./static/css/2.bfbf028b.chunk.css.map",
14
- "static/js/2.270b84d8.chunk.js.LICENSE.txt": "./static/js/2.270b84d8.chunk.js.LICENSE.txt"
15
- },
16
- "entrypoints": [
17
- "static/js/runtime-main.11ec9aca.js",
18
- "static/css/2.bfbf028b.chunk.css",
19
- "static/js/2.270b84d8.chunk.js",
20
- "static/js/main.833ba252.chunk.js"
21
- ]
22
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lib/st_audiorec/frontend/build/bootstrap.min.css DELETED
The diff for this file is too large to render. See raw diff
 
lib/st_audiorec/frontend/build/index.html DELETED
@@ -1 +0,0 @@
1
- <!doctype html><html lang="en"><head><title>Streamlit Audio Recorder Component</title><meta charset="UTF-8"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="Streamlit Audio Recorder Component"/><link rel="stylesheet" href="bootstrap.min.css"/><link rel="stylesheet" href="./styles.css"/><link href="./static/css/2.bfbf028b.chunk.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div><script>!function(e){function t(t){for(var n,l,a=t[0],p=t[1],i=t[2],c=0,s=[];c<a.length;c++)l=a[c],Object.prototype.hasOwnProperty.call(o,l)&&o[l]&&s.push(o[l][0]),o[l]=0;for(n in p)Object.prototype.hasOwnProperty.call(p,n)&&(e[n]=p[n]);for(f&&f(t);s.length;)s.shift()();return u.push.apply(u,i||[]),r()}function r(){for(var e,t=0;t<u.length;t++){for(var r=u[t],n=!0,a=1;a<r.length;a++){var p=r[a];0!==o[p]&&(n=!1)}n&&(u.splice(t--,1),e=l(l.s=r[0]))}return e}var n={},o={1:0},u=[];function l(t){if(n[t])return n[t].exports;var r=n[t]={i:t,l:!1,exports:{}};return e[t].call(r.exports,r,r.exports,l),r.l=!0,r.exports}l.m=e,l.c=n,l.d=function(e,t,r){l.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:r})},l.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},l.t=function(e,t){if(1&t&&(e=l(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var r=Object.create(null);if(l.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var n in e)l.d(r,n,function(t){return e[t]}.bind(null,n));return r},l.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return l.d(t,"a",t),t},l.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},l.p="./";var a=this.webpackJsonpstreamlit_component_template=this.webpackJsonpstreamlit_component_template||[],p=a.push.bind(a);a.push=t,a=a.slice();for(var i=0;i<a.length;i++)t(a[i]);var f=p;r()}([])</script><script src="./static/js/2.270b84d8.chunk.js"></script><script src="./static/js/main.833ba252.chunk.js"></script></body></html>
 
 
lib/st_audiorec/frontend/build/precache-manifest.4829c060d313d0b0d13d9af3b0180289.js DELETED
@@ -1,26 +0,0 @@
1
- self.__precacheManifest = (self.__precacheManifest || []).concat([
2
- {
3
- "revision": "de27ef444ab2ed520b64cb0c988a478a",
4
- "url": "./index.html"
5
- },
6
- {
7
- "revision": "1a47c80c81698454dced",
8
- "url": "./static/css/2.bfbf028b.chunk.css"
9
- },
10
- {
11
- "revision": "1a47c80c81698454dced",
12
- "url": "./static/js/2.270b84d8.chunk.js"
13
- },
14
- {
15
- "revision": "3fc7fb5bfeeec1534560a2c962e360a7",
16
- "url": "./static/js/2.270b84d8.chunk.js.LICENSE.txt"
17
- },
18
- {
19
- "revision": "3478f4c246f37a2cbb97",
20
- "url": "./static/js/main.833ba252.chunk.js"
21
- },
22
- {
23
- "revision": "7c26bca7e16783d14d15",
24
- "url": "./static/js/runtime-main.11ec9aca.js"
25
- }
26
- ]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lib/st_audiorec/frontend/build/service-worker.js DELETED
@@ -1,39 +0,0 @@
1
- /**
2
- * Welcome to your Workbox-powered service worker!
3
- *
4
- * You'll need to register this file in your web app and you should
5
- * disable HTTP caching for this file too.
6
- * See https://goo.gl/nhQhGp
7
- *
8
- * The rest of the code is auto-generated. Please don't update this file
9
- * directly; instead, make changes to your Workbox build configuration
10
- * and re-run your build process.
11
- * See https://goo.gl/2aRDsh
12
- */
13
-
14
- importScripts("https://storage.googleapis.com/workbox-cdn/releases/4.3.1/workbox-sw.js");
15
-
16
- importScripts(
17
- "./precache-manifest.4829c060d313d0b0d13d9af3b0180289.js"
18
- );
19
-
20
- self.addEventListener('message', (event) => {
21
- if (event.data && event.data.type === 'SKIP_WAITING') {
22
- self.skipWaiting();
23
- }
24
- });
25
-
26
- workbox.core.clientsClaim();
27
-
28
- /**
29
- * The workboxSW.precacheAndRoute() method efficiently caches and responds to
30
- * requests for URLs in the manifest.
31
- * See https://goo.gl/S9QRab
32
- */
33
- self.__precacheManifest = [].concat(self.__precacheManifest || []);
34
- workbox.precaching.precacheAndRoute(self.__precacheManifest, {});
35
-
36
- workbox.routing.registerNavigationRoute(workbox.precaching.getCacheKeyForURL("./index.html"), {
37
-
38
- blacklist: [/^\/_/,/\/[^/?]+\.[^/]+$/],
39
- });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lib/st_audiorec/frontend/build/static/.DS_Store DELETED
Binary file (6.15 kB)
 
lib/st_audiorec/frontend/build/static/css/2.bfbf028b.chunk.css DELETED
@@ -1,2 +0,0 @@
1
- ._3ybTi{margin:2em;padding:.5em;border:2px solid #000;font-size:2em;text-align:center}
2
- /*# sourceMappingURL=2.bfbf028b.chunk.css.map */
 
 
 
lib/st_audiorec/frontend/build/static/css/2.bfbf028b.chunk.css.map DELETED
@@ -1 +0,0 @@
1
- {"version":3,"sources":["index.css"],"names":[],"mappings":"AAEA,QACE,UAAW,CACX,YAAc,CACd,qBAAsB,CACtB,aAAc,CACd,iBACF","file":"2.bfbf028b.chunk.css","sourcesContent":["/* add css module styles here (optional) */\n\n._3ybTi {\n margin: 2em;\n padding: 0.5em;\n border: 2px solid #000;\n font-size: 2em;\n text-align: center;\n}\n"]}
 
 
lib/st_audiorec/frontend/build/static/js/2.270b84d8.chunk.js DELETED
The diff for this file is too large to render. See raw diff
 
lib/st_audiorec/frontend/build/static/js/2.270b84d8.chunk.js.LICENSE.txt DELETED
@@ -1,58 +0,0 @@
1
- /*
2
- object-assign
3
- (c) Sindre Sorhus
4
- @license MIT
5
- */
6
-
7
- /**
8
- * @license
9
- * Copyright 2018-2021 Streamlit Inc.
10
- *
11
- * Licensed under the Apache License, Version 2.0 (the "License");
12
- * you may not use this file except in compliance with the License.
13
- * You may obtain a copy of the License at
14
- *
15
- * http://www.apache.org/licenses/LICENSE-2.0
16
- *
17
- * Unless required by applicable law or agreed to in writing, software
18
- * distributed under the License is distributed on an "AS IS" BASIS,
19
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
20
- * See the License for the specific language governing permissions and
21
- * limitations under the License.
22
- */
23
-
24
- /** @license React v0.19.1
25
- * scheduler.production.min.js
26
- *
27
- * Copyright (c) Facebook, Inc. and its affiliates.
28
- *
29
- * This source code is licensed under the MIT license found in the
30
- * LICENSE file in the root directory of this source tree.
31
- */
32
-
33
- /** @license React v16.13.1
34
- * react-is.production.min.js
35
- *
36
- * Copyright (c) Facebook, Inc. and its affiliates.
37
- *
38
- * This source code is licensed under the MIT license found in the
39
- * LICENSE file in the root directory of this source tree.
40
- */
41
-
42
- /** @license React v16.14.0
43
- * react-dom.production.min.js
44
- *
45
- * Copyright (c) Facebook, Inc. and its affiliates.
46
- *
47
- * This source code is licensed under the MIT license found in the
48
- * LICENSE file in the root directory of this source tree.
49
- */
50
-
51
- /** @license React v16.14.0
52
- * react.production.min.js
53
- *
54
- * Copyright (c) Facebook, Inc. and its affiliates.
55
- *
56
- * This source code is licensed under the MIT license found in the
57
- * LICENSE file in the root directory of this source tree.
58
- */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lib/st_audiorec/frontend/build/static/js/2.270b84d8.chunk.js.map DELETED
The diff for this file is too large to render. See raw diff
 
lib/st_audiorec/frontend/build/static/js/main.833ba252.chunk.js DELETED
@@ -1,2 +0,0 @@
1
- (this.webpackJsonpstreamlit_component_template=this.webpackJsonpstreamlit_component_template||[]).push([[0],{17:function(t,e,a){t.exports=a(28)},28:function(t,e,a){"use strict";a.r(e);var n=a(6),o=a.n(n),r=a(15),c=a.n(r),i=a(0),l=a(1),s=a(2),u=a(3),d=a(8),p=a(11),m=(a(27),function(t){Object(s.a)(a,t);var e=Object(u.a)(a);function a(){var t;Object(l.a)(this,a);for(var n=arguments.length,r=new Array(n),c=0;c<n;c++)r[c]=arguments[c];return(t=e.call.apply(e,[this].concat(r))).state={isFocused:!1,recordState:null,audioDataURL:"",reset:!1},t.render=function(){var e=t.props.theme,a={},n=t.state.recordState;if(e){var r="1px solid ".concat(t.state.isFocused?e.primaryColor:"gray");a.border=r,a.outline=r}return o.a.createElement("span",null,o.a.createElement("div",null,o.a.createElement("button",{id:"record",onClick:t.onClick_start},"Start Recording"),o.a.createElement("button",{id:"stop",onClick:t.onClick_stop},"Stop"),o.a.createElement("button",{id:"reset",onClick:t.onClick_reset},"Reset"),o.a.createElement("button",{id:"continue",onClick:t.onClick_continue},"Download"),o.a.createElement(p.b,{state:n,onStop:t.onStop_audio,type:"audio/wav",backgroundColor:"rgb(255, 255, 255)",foregroundColor:"rgb(255,76,75)",canvasWidth:450,canvasHeight:100}),o.a.createElement("audio",{id:"audio",controls:!0,src:t.state.audioDataURL})))},t.onClick_start=function(){t.setState({reset:!1,audioDataURL:"",recordState:p.a.START}),d.a.setComponentValue("")},t.onClick_stop=function(){t.setState({reset:!1,recordState:p.a.STOP})},t.onClick_reset=function(){t.setState({reset:!0,audioDataURL:"",recordState:p.a.STOP}),d.a.setComponentValue("")},t.onClick_continue=function(){if(""!==t.state.audioDataURL){var e=(new Date).toLocaleString(),a="streamlit_audio_"+(e=(e=(e=e.replace(" ","")).replace(/_/g,"")).replace(",",""))+".wav",n=document.createElement("a");n.style.display="none",n.href=t.state.audioDataURL,n.download=a,document.body.appendChild(n),n.click()}},t.onStop_audio=function(e){!0===t.state.reset?(t.setState({audioDataURL:""}),d.a.setComponentValue("")):(t.setState({audioDataURL:e.url}),fetch(e.url).then((function(t){return t.blob()})).then((function(t){return new Response(t).arrayBuffer()})).then((function(t){d.a.setComponentValue({arr:new Uint8Array(t)})})))},t}return Object(i.a)(a)}(d.b)),f=Object(d.c)(m);d.a.setComponentReady(),d.a.setFrameHeight(),c.a.render(o.a.createElement(o.a.StrictMode,null,o.a.createElement(f,null)),document.getElementById("root"))}},[[17,1,2]]]);
2
- //# sourceMappingURL=main.833ba252.chunk.js.map
 
 
 
lib/st_audiorec/frontend/build/static/js/main.833ba252.chunk.js.map DELETED
@@ -1 +0,0 @@
1
- {"version":3,"sources":["StreamlitAudioRecorder.tsx","index.tsx"],"names":["StAudioRec","state","isFocused","recordState","audioDataURL","reset","render","theme","props","style","borderStyling","primaryColor","border","outline","id","onClick","onClick_start","onClick_stop","onClick_reset","onClick_continue","onStop","onStop_audio","type","backgroundColor","foregroundColor","canvasWidth","canvasHeight","controls","src","setState","RecordState","START","Streamlit","setComponentValue","STOP","datetime","Date","toLocaleString","filename","replace","a","document","createElement","display","href","download","body","appendChild","click","data","url","fetch","then","ctx","blob","Response","arrayBuffer","buffer","Uint8Array","StreamlitComponentBase","withStreamlitConnection","setComponentReady","setFrameHeight","ReactDOM","StrictMode","getElementById"],"mappings":"wQAiBMA,G,kNACGC,MAAQ,CAAEC,WAAW,EAAOC,YAAa,KAAMC,aAAc,GAAIC,OAAO,G,EAExEC,OAAS,WAMd,IAAQC,EAAU,EAAKC,MAAfD,MACFE,EAA6B,GAE3BN,EAAgB,EAAKF,MAArBE,YAGR,GAAII,EAAO,CAGT,IAAMG,EAAa,oBACjB,EAAKT,MAAMC,UAAYK,EAAMI,aAAe,QAC9CF,EAAMG,OAASF,EACfD,EAAMI,QAAUH,EAGlB,OACE,8BACE,6BACE,4BAAQI,GAAG,SAASC,QAAS,EAAKC,eAAlC,mBAGA,4BAAQF,GAAG,OAAOC,QAAS,EAAKE,cAAhC,QAGA,4BAAQH,GAAG,QAAQC,QAAS,EAAKG,eAAjC,SAIA,4BAAQJ,GAAG,WAAWC,QAAS,EAAKI,kBAApC,YAIA,kBAAC,IAAD,CACElB,MAAOE,EACPiB,OAAQ,EAAKC,aACbC,KAAK,YACLC,gBAAgB,qBAChBC,gBAAgB,iBAChBC,YAAa,IACbC,aAAc,MAGhB,2BACEZ,GAAG,QACHa,UAAQ,EACRC,IAAK,EAAK3B,MAAMG,kB,EASlBY,cAAgB,WACtB,EAAKa,SAAS,CACZxB,OAAO,EACPD,aAAc,GACdD,YAAa2B,IAAYC,QAE3BC,IAAUC,kBAAkB,K,EAGtBhB,aAAe,WACrB,EAAKY,SAAS,CACZxB,OAAO,EACPF,YAAa2B,IAAYI,Q,EAIrBhB,cAAgB,WACtB,EAAKW,SAAS,CACZxB,OAAO,EACPD,aAAc,GACdD,YAAa2B,IAAYI,OAE3BF,IAAUC,kBAAkB,K,EAGtBd,iBAAmB,WACzB,GAAgC,KAA5B,EAAKlB,MAAMG,aACf,CAEE,IAAI+B,GAAW,IAAIC,MAAOC,iBAItBC,EAAW,oBADfH,GADAA,GADAA,EAAWA,EAASI,QAAQ,IAAK,KACbA,QAAQ,KAAM,KACdA,QAAQ,IAAK,KACc,OAGzCC,EAAIC,SAASC,cAAc,KACjCF,EAAE/B,MAAMkC,QAAU,OAClBH,EAAEI,KAAO,EAAK3C,MAAMG,aACpBoC,EAAEK,SAAWP,EACbG,SAASK,KAAKC,YAAYP,GAC1BA,EAAEQ,U,EAIE3B,aAAe,SAAC4B,IACG,IAArB,EAAKhD,MAAMI,OAEb,EAAKwB,SAAS,CACZzB,aAAc,KAEhB4B,IAAUC,kBAAkB,MAE5B,EAAKJ,SAAS,CACZzB,aAAc6C,EAAKC,MAGrBC,MAAMF,EAAKC,KAAKE,MAAK,SAASC,GAC5B,OAAOA,EAAIC,UACVF,MAAK,SAASE,GAGf,OAAQ,IAAIC,SAASD,GAAOE,iBAC3BJ,MAAK,SAASK,GACfzB,IAAUC,kBAAkB,CAC1B,IAAO,IAAIyB,WAAWD,U,yBAhIPE,MA8IVC,cAAwB5D,GAIvCgC,IAAU6B,oBAIV7B,IAAU8B,iBCnKVC,IAASzD,OACP,kBAAC,IAAM0D,WAAP,KACE,kBAAC,EAAD,OAEFvB,SAASwB,eAAe,W","file":"static/js/main.833ba252.chunk.js","sourcesContent":["import {\n Streamlit,\n StreamlitComponentBase,\n withStreamlitConnection,\n} from \"streamlit-component-lib\"\nimport React, { ReactNode } from \"react\"\n\nimport AudioReactRecorder, { RecordState } from 'audio-react-recorder'\nimport 'audio-react-recorder/dist/index.css'\n\ninterface State {\n isFocused: boolean\n recordState: null\n audioDataURL: string\n reset: boolean\n}\n\nclass StAudioRec extends StreamlitComponentBase<State> {\n public state = { isFocused: false, recordState: null, audioDataURL: '', reset: false}\n\n public render = (): ReactNode => {\n // Arguments that are passed to the plugin in Python are accessible\n\n // Streamlit sends us a theme object via props that we can use to ensure\n // that our component has visuals that match the active theme in a\n // streamlit app.\n const { theme } = this.props\n const style: React.CSSProperties = {}\n\n const { recordState } = this.state\n\n // compatibility with older vers of Streamlit that don't send theme object.\n if (theme) {\n // Use the theme object to style our button border. Alternatively, the\n // theme style is defined in CSS vars.\n const borderStyling = `1px solid ${\n this.state.isFocused ? theme.primaryColor : \"gray\"}`\n style.border = borderStyling\n style.outline = borderStyling\n }\n\n return (\n <span>\n <div>\n <button id='record' onClick={this.onClick_start}>\n Start Recording\n </button>\n <button id='stop' onClick={this.onClick_stop}>\n Stop\n </button>\n <button id='reset' onClick={this.onClick_reset}>\n Reset\n </button>\n\n <button id='continue' onClick={this.onClick_continue}>\n Download\n </button>\n\n <AudioReactRecorder\n state={recordState}\n onStop={this.onStop_audio}\n type='audio/wav'\n backgroundColor='rgb(255, 255, 255)'\n foregroundColor='rgb(255,76,75)'\n canvasWidth={450}\n canvasHeight={100}\n />\n\n <audio\n id='audio'\n controls\n src={this.state.audioDataURL}\n />\n\n </div>\n </span>\n )\n }\n\n\n private onClick_start = () => {\n this.setState({\n reset: false,\n audioDataURL: '',\n recordState: RecordState.START\n })\n Streamlit.setComponentValue('')\n }\n\n private onClick_stop = () => {\n this.setState({\n reset: false,\n recordState: RecordState.STOP\n })\n }\n\n private onClick_reset = () => {\n this.setState({\n reset: true,\n audioDataURL: '',\n recordState: RecordState.STOP\n })\n Streamlit.setComponentValue('')\n }\n\n private onClick_continue = () => {\n if (this.state.audioDataURL !== '')\n {\n // get datetime string for filename\n let datetime = new Date().toLocaleString();\n datetime = datetime.replace(' ', '');\n datetime = datetime.replace(/_/g, '');\n datetime = datetime.replace(',', '');\n var filename = 'streamlit_audio_' + datetime + '.wav';\n\n // auromatically trigger download\n const a = document.createElement('a');\n a.style.display = 'none';\n a.href = this.state.audioDataURL;\n a.download = filename;\n document.body.appendChild(a);\n a.click();\n }\n }\n\n private onStop_audio = (data) => {\n if (this.state.reset === true)\n {\n this.setState({\n audioDataURL: ''\n })\n Streamlit.setComponentValue('')\n }else{\n this.setState({\n audioDataURL: data.url\n })\n\n fetch(data.url).then(function(ctx){\n return ctx.blob()\n }).then(function(blob){\n // converting blob to arrayBuffer, this process step needs to be be improved\n // this operation's time complexity scales exponentially with audio length\n return (new Response(blob)).arrayBuffer()\n }).then(function(buffer){\n Streamlit.setComponentValue({\n \"arr\": new Uint8Array(buffer)\n })\n })\n\n }\n\n\n }\n}\n\n// \"withStreamlitConnection\" is a wrapper function. It bootstraps the\n// connection between your component and the Streamlit app, and handles\n// passing arguments from Python -> Component.\n// You don't need to edit withStreamlitConnection (but you're welcome to!).\nexport default withStreamlitConnection(StAudioRec)\n\n// Tell Streamlit we're ready to start receiving data. We won't get our\n// first RENDER_EVENT until we call this function.\nStreamlit.setComponentReady()\n\n// Finally, tell Streamlit to update our initial height. We omit the\n// `height` parameter here to have it default to our scrollHeight.\nStreamlit.setFrameHeight()\n","import React from \"react\"\nimport ReactDOM from \"react-dom\"\nimport StAudioRec from \"./StreamlitAudioRecorder\"\n\nReactDOM.render(\n <React.StrictMode>\n <StAudioRec />\n </React.StrictMode>,\n document.getElementById(\"root\")\n)\n"],"sourceRoot":""}
 
 
lib/st_audiorec/frontend/build/static/js/runtime-main.11ec9aca.js DELETED
@@ -1,2 +0,0 @@
1
- !function(e){function t(t){for(var n,l,a=t[0],p=t[1],i=t[2],c=0,s=[];c<a.length;c++)l=a[c],Object.prototype.hasOwnProperty.call(o,l)&&o[l]&&s.push(o[l][0]),o[l]=0;for(n in p)Object.prototype.hasOwnProperty.call(p,n)&&(e[n]=p[n]);for(f&&f(t);s.length;)s.shift()();return u.push.apply(u,i||[]),r()}function r(){for(var e,t=0;t<u.length;t++){for(var r=u[t],n=!0,a=1;a<r.length;a++){var p=r[a];0!==o[p]&&(n=!1)}n&&(u.splice(t--,1),e=l(l.s=r[0]))}return e}var n={},o={1:0},u=[];function l(t){if(n[t])return n[t].exports;var r=n[t]={i:t,l:!1,exports:{}};return e[t].call(r.exports,r,r.exports,l),r.l=!0,r.exports}l.m=e,l.c=n,l.d=function(e,t,r){l.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:r})},l.r=function(e){"undefined"!==typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},l.t=function(e,t){if(1&t&&(e=l(e)),8&t)return e;if(4&t&&"object"===typeof e&&e&&e.__esModule)return e;var r=Object.create(null);if(l.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var n in e)l.d(r,n,function(t){return e[t]}.bind(null,n));return r},l.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return l.d(t,"a",t),t},l.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},l.p="./";var a=this.webpackJsonpstreamlit_component_template=this.webpackJsonpstreamlit_component_template||[],p=a.push.bind(a);a.push=t,a=a.slice();for(var i=0;i<a.length;i++)t(a[i]);var f=p;r()}([]);
2
- //# sourceMappingURL=runtime-main.11ec9aca.js.map
 
 
 
lib/st_audiorec/frontend/build/static/js/runtime-main.11ec9aca.js.map DELETED
@@ -1 +0,0 @@
1
- {"version":3,"sources":["../webpack/bootstrap"],"names":["webpackJsonpCallback","data","moduleId","chunkId","chunkIds","moreModules","executeModules","i","resolves","length","Object","prototype","hasOwnProperty","call","installedChunks","push","modules","parentJsonpFunction","shift","deferredModules","apply","checkDeferredModules","result","deferredModule","fulfilled","j","depId","splice","__webpack_require__","s","installedModules","1","exports","module","l","m","c","d","name","getter","o","defineProperty","enumerable","get","r","Symbol","toStringTag","value","t","mode","__esModule","ns","create","key","bind","n","object","property","p","jsonpArray","this","oldJsonpFunction","slice"],"mappings":"aACE,SAASA,EAAqBC,GAQ7B,IAPA,IAMIC,EAAUC,EANVC,EAAWH,EAAK,GAChBI,EAAcJ,EAAK,GACnBK,EAAiBL,EAAK,GAIHM,EAAI,EAAGC,EAAW,GACpCD,EAAIH,EAASK,OAAQF,IACzBJ,EAAUC,EAASG,GAChBG,OAAOC,UAAUC,eAAeC,KAAKC,EAAiBX,IAAYW,EAAgBX,IACpFK,EAASO,KAAKD,EAAgBX,GAAS,IAExCW,EAAgBX,GAAW,EAE5B,IAAID,KAAYG,EACZK,OAAOC,UAAUC,eAAeC,KAAKR,EAAaH,KACpDc,EAAQd,GAAYG,EAAYH,IAKlC,IAFGe,GAAqBA,EAAoBhB,GAEtCO,EAASC,QACdD,EAASU,OAATV,GAOD,OAHAW,EAAgBJ,KAAKK,MAAMD,EAAiBb,GAAkB,IAGvDe,IAER,SAASA,IAER,IADA,IAAIC,EACIf,EAAI,EAAGA,EAAIY,EAAgBV,OAAQF,IAAK,CAG/C,IAFA,IAAIgB,EAAiBJ,EAAgBZ,GACjCiB,GAAY,EACRC,EAAI,EAAGA,EAAIF,EAAed,OAAQgB,IAAK,CAC9C,IAAIC,EAAQH,EAAeE,GACG,IAA3BX,EAAgBY,KAAcF,GAAY,GAE3CA,IACFL,EAAgBQ,OAAOpB,IAAK,GAC5Be,EAASM,EAAoBA,EAAoBC,EAAIN,EAAe,KAItE,OAAOD,EAIR,IAAIQ,EAAmB,GAKnBhB,EAAkB,CACrBiB,EAAG,GAGAZ,EAAkB,GAGtB,SAASS,EAAoB1B,GAG5B,GAAG4B,EAAiB5B,GACnB,OAAO4B,EAAiB5B,GAAU8B,QAGnC,IAAIC,EAASH,EAAiB5B,GAAY,CACzCK,EAAGL,EACHgC,GAAG,EACHF,QAAS,IAUV,OANAhB,EAAQd,GAAUW,KAAKoB,EAAOD,QAASC,EAAQA,EAAOD,QAASJ,GAG/DK,EAAOC,GAAI,EAGJD,EAAOD,QAKfJ,EAAoBO,EAAInB,EAGxBY,EAAoBQ,EAAIN,EAGxBF,EAAoBS,EAAI,SAASL,EAASM,EAAMC,GAC3CX,EAAoBY,EAAER,EAASM,IAClC5B,OAAO+B,eAAeT,EAASM,EAAM,CAAEI,YAAY,EAAMC,IAAKJ,KAKhEX,EAAoBgB,EAAI,SAASZ,GACX,qBAAXa,QAA0BA,OAAOC,aAC1CpC,OAAO+B,eAAeT,EAASa,OAAOC,YAAa,CAAEC,MAAO,WAE7DrC,OAAO+B,eAAeT,EAAS,aAAc,CAAEe,OAAO,KAQvDnB,EAAoBoB,EAAI,SAASD,EAAOE,GAEvC,GADU,EAAPA,IAAUF,EAAQnB,EAAoBmB,IAC/B,EAAPE,EAAU,OAAOF,EACpB,GAAW,EAAPE,GAA8B,kBAAVF,GAAsBA,GAASA,EAAMG,WAAY,OAAOH,EAChF,IAAII,EAAKzC,OAAO0C,OAAO,MAGvB,GAFAxB,EAAoBgB,EAAEO,GACtBzC,OAAO+B,eAAeU,EAAI,UAAW,CAAET,YAAY,EAAMK,MAAOA,IACtD,EAAPE,GAA4B,iBAATF,EAAmB,IAAI,IAAIM,KAAON,EAAOnB,EAAoBS,EAAEc,EAAIE,EAAK,SAASA,GAAO,OAAON,EAAMM,IAAQC,KAAK,KAAMD,IAC9I,OAAOF,GAIRvB,EAAoB2B,EAAI,SAAStB,GAChC,IAAIM,EAASN,GAAUA,EAAOiB,WAC7B,WAAwB,OAAOjB,EAAgB,SAC/C,WAA8B,OAAOA,GAEtC,OADAL,EAAoBS,EAAEE,EAAQ,IAAKA,GAC5BA,GAIRX,EAAoBY,EAAI,SAASgB,EAAQC,GAAY,OAAO/C,OAAOC,UAAUC,eAAeC,KAAK2C,EAAQC,IAGzG7B,EAAoB8B,EAAI,KAExB,IAAIC,EAAaC,KAA+C,yCAAIA,KAA+C,0CAAK,GACpHC,EAAmBF,EAAW5C,KAAKuC,KAAKK,GAC5CA,EAAW5C,KAAOf,EAClB2D,EAAaA,EAAWG,QACxB,IAAI,IAAIvD,EAAI,EAAGA,EAAIoD,EAAWlD,OAAQF,IAAKP,EAAqB2D,EAAWpD,IAC3E,IAAIU,EAAsB4C,EAI1BxC,I","file":"static/js/runtime-main.11ec9aca.js","sourcesContent":[" \t// install a JSONP callback for chunk loading\n \tfunction webpackJsonpCallback(data) {\n \t\tvar chunkIds = data[0];\n \t\tvar moreModules = data[1];\n \t\tvar executeModules = data[2];\n\n \t\t// add \"moreModules\" to the modules object,\n \t\t// then flag all \"chunkIds\" as loaded and fire callback\n \t\tvar moduleId, chunkId, i = 0, resolves = [];\n \t\tfor(;i < chunkIds.length; i++) {\n \t\t\tchunkId = chunkIds[i];\n \t\t\tif(Object.prototype.hasOwnProperty.call(installedChunks, chunkId) && installedChunks[chunkId]) {\n \t\t\t\tresolves.push(installedChunks[chunkId][0]);\n \t\t\t}\n \t\t\tinstalledChunks[chunkId] = 0;\n \t\t}\n \t\tfor(moduleId in moreModules) {\n \t\t\tif(Object.prototype.hasOwnProperty.call(moreModules, moduleId)) {\n \t\t\t\tmodules[moduleId] = moreModules[moduleId];\n \t\t\t}\n \t\t}\n \t\tif(parentJsonpFunction) parentJsonpFunction(data);\n\n \t\twhile(resolves.length) {\n \t\t\tresolves.shift()();\n \t\t}\n\n \t\t// add entry modules from loaded chunk to deferred list\n \t\tdeferredModules.push.apply(deferredModules, executeModules || []);\n\n \t\t// run deferred modules when all chunks ready\n \t\treturn checkDeferredModules();\n \t};\n \tfunction checkDeferredModules() {\n \t\tvar result;\n \t\tfor(var i = 0; i < deferredModules.length; i++) {\n \t\t\tvar deferredModule = deferredModules[i];\n \t\t\tvar fulfilled = true;\n \t\t\tfor(var j = 1; j < deferredModule.length; j++) {\n \t\t\t\tvar depId = deferredModule[j];\n \t\t\t\tif(installedChunks[depId] !== 0) fulfilled = false;\n \t\t\t}\n \t\t\tif(fulfilled) {\n \t\t\t\tdeferredModules.splice(i--, 1);\n \t\t\t\tresult = __webpack_require__(__webpack_require__.s = deferredModule[0]);\n \t\t\t}\n \t\t}\n\n \t\treturn result;\n \t}\n\n \t// The module cache\n \tvar installedModules = {};\n\n \t// object to store loaded and loading chunks\n \t// undefined = chunk not loaded, null = chunk preloaded/prefetched\n \t// Promise = chunk loading, 0 = chunk loaded\n \tvar installedChunks = {\n \t\t1: 0\n \t};\n\n \tvar deferredModules = [];\n\n \t// The require function\n \tfunction __webpack_require__(moduleId) {\n\n \t\t// Check if module is in cache\n \t\tif(installedModules[moduleId]) {\n \t\t\treturn installedModules[moduleId].exports;\n \t\t}\n \t\t// Create a new module (and put it into the cache)\n \t\tvar module = installedModules[moduleId] = {\n \t\t\ti: moduleId,\n \t\t\tl: false,\n \t\t\texports: {}\n \t\t};\n\n \t\t// Execute the module function\n \t\tmodules[moduleId].call(module.exports, module, module.exports, __webpack_require__);\n\n \t\t// Flag the module as loaded\n \t\tmodule.l = true;\n\n \t\t// Return the exports of the module\n \t\treturn module.exports;\n \t}\n\n\n \t// expose the modules object (__webpack_modules__)\n \t__webpack_require__.m = modules;\n\n \t// expose the module cache\n \t__webpack_require__.c = installedModules;\n\n \t// define getter function for harmony exports\n \t__webpack_require__.d = function(exports, name, getter) {\n \t\tif(!__webpack_require__.o(exports, name)) {\n \t\t\tObject.defineProperty(exports, name, { enumerable: true, get: getter });\n \t\t}\n \t};\n\n \t// define __esModule on exports\n \t__webpack_require__.r = function(exports) {\n \t\tif(typeof Symbol !== 'undefined' && Symbol.toStringTag) {\n \t\t\tObject.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });\n \t\t}\n \t\tObject.defineProperty(exports, '__esModule', { value: true });\n \t};\n\n \t// create a fake namespace object\n \t// mode & 1: value is a module id, require it\n \t// mode & 2: merge all properties of value into the ns\n \t// mode & 4: return value when already ns object\n \t// mode & 8|1: behave like require\n \t__webpack_require__.t = function(value, mode) {\n \t\tif(mode & 1) value = __webpack_require__(value);\n \t\tif(mode & 8) return value;\n \t\tif((mode & 4) && typeof value === 'object' && value && value.__esModule) return value;\n \t\tvar ns = Object.create(null);\n \t\t__webpack_require__.r(ns);\n \t\tObject.defineProperty(ns, 'default', { enumerable: true, value: value });\n \t\tif(mode & 2 && typeof value != 'string') for(var key in value) __webpack_require__.d(ns, key, function(key) { return value[key]; }.bind(null, key));\n \t\treturn ns;\n \t};\n\n \t// getDefaultExport function for compatibility with non-harmony modules\n \t__webpack_require__.n = function(module) {\n \t\tvar getter = module && module.__esModule ?\n \t\t\tfunction getDefault() { return module['default']; } :\n \t\t\tfunction getModuleExports() { return module; };\n \t\t__webpack_require__.d(getter, 'a', getter);\n \t\treturn getter;\n \t};\n\n \t// Object.prototype.hasOwnProperty.call\n \t__webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); };\n\n \t// __webpack_public_path__\n \t__webpack_require__.p = \"./\";\n\n \tvar jsonpArray = this[\"webpackJsonpstreamlit_component_template\"] = this[\"webpackJsonpstreamlit_component_template\"] || [];\n \tvar oldJsonpFunction = jsonpArray.push.bind(jsonpArray);\n \tjsonpArray.push = webpackJsonpCallback;\n \tjsonpArray = jsonpArray.slice();\n \tfor(var i = 0; i < jsonpArray.length; i++) webpackJsonpCallback(jsonpArray[i]);\n \tvar parentJsonpFunction = oldJsonpFunction;\n\n\n \t// run deferred modules from other chunks\n \tcheckDeferredModules();\n"],"sourceRoot":""}
 
 
lib/st_audiorec/frontend/build/styles.css DELETED
@@ -1,59 +0,0 @@
1
- *{
2
- margin: 0;
3
- padding: 0;
4
- }
5
-
6
- .container{
7
- margin: 0 auto;
8
- text-align: center;
9
- }
10
-
11
- .display{
12
- width: 100%;
13
- padding: 5px 0;
14
- margin: 15px 0;
15
- }
16
-
17
- .controllers{
18
- width: 100%;
19
- padding: 5px 0;
20
- margin-top: 15px;
21
- margin-bottom: 35px;
22
- }
23
-
24
- button{
25
- padding-top: 0.25rem;
26
- padding-bottom: 0.25rem;
27
- padding-right: 0.75rem;
28
- padding-left: 0.75rem;
29
- margin-right: 0.5rem;
30
- margin-left: 0.1rem;
31
- font-size: 16px;
32
- background-color: #ffffff;
33
- color: #000000;
34
- border: 1px solid rgba(49, 51, 63, 0.2);
35
- border-radius: 0.25rem;
36
- margin-top: 0.75rem;
37
- margin-bottom: 0.25rem;
38
- }
39
-
40
- button:hover{
41
- padding-top: 0.25rem;
42
- padding-bottom: 0.25rem;
43
- padding-right: 0.75rem;
44
- padding-left: 0.75rem;
45
- margin-right: 0.5rem;
46
- margin-left: 0.1rem;
47
- font-size: 16px;
48
- background-color: #ffffff;
49
- color: #ff4c4b;
50
- border: 1px solid #ff4c4b;
51
- border-radius: 0.25rem;
52
- margin-top: 0.75rem;
53
- margin-bottom: 0.25rem;
54
- }
55
-
56
- audio {
57
- width: 450px;
58
- height: 45px;
59
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lib/st_audiorec/frontend/package-lock.json DELETED
The diff for this file is too large to render. See raw diff
 
lib/st_audiorec/frontend/package.json DELETED
@@ -1,44 +0,0 @@
1
- {
2
- "name": "streamlit_component_template",
3
- "version": "0.1.0",
4
- "private": true,
5
- "dependencies": {
6
- "@types/jest": "^24.0.0",
7
- "@types/node": "^12.0.0",
8
- "@types/react": "^16.9.0",
9
- "@types/react-dom": "^16.9.0",
10
- "audio-react-recorder": "^1.0.4",
11
- "axios": "^0.27.2",
12
- "normalize.css": "^8.0.1",
13
- "react": "^16.13.1",
14
- "react-dom": "^16.13.1",
15
- "react-media-recorder": "^1.6.4",
16
- "react-scripts": "3.4.1",
17
- "streamlit-component-lib": "^1.4.0",
18
- "turbodepot-node": "^7.0.1",
19
- "typescript": "~3.8.0",
20
- "use-media-recorder": "^2.0.4"
21
- },
22
- "scripts": {
23
- "start": "react-scripts start",
24
- "build": "react-scripts build",
25
- "test": "react-scripts test",
26
- "eject": "react-scripts eject"
27
- },
28
- "eslintConfig": {
29
- "extends": "react-app"
30
- },
31
- "browserslist": {
32
- "production": [
33
- ">0.2%",
34
- "not dead",
35
- "not op_mini all"
36
- ],
37
- "development": [
38
- "last 1 chrome version",
39
- "last 1 firefox version",
40
- "last 1 safari version"
41
- ]
42
- },
43
- "homepage": "."
44
- }