File size: 3,234 Bytes
2735a87
5f10f3c
2735a87
 
 
 
 
e38c032
7bfd63b
15020c4
28b1346
650a7e0
 
e38c032
 
28b1346
fccb33a
 
 
c06eb2c
b687ee3
 
e1d454c
a66b073
 
a2a3404
 
2735a87
e38c032
f65a9bb
 
 
2735a87
f65a9bb
 
2735a87
fe3e9c5
e1d454c
a66b073
 
 
2735a87
 
 
005a146
 
a66b073
 
 
 
 
2735a87
 
 
 
b1637b3
e267a3d
0fc8181
 
aab03db
 
 
0fc8181
 
 
 
 
 
 
 
 
 
 
 
2735a87
28b1346
2735a87
224a3a2
2735a87
9c0d175
b161f0d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import tempfile
import gradio as gr
import os
import tensorflow as tf
import sys
import numpy as np
import csv
import datetime
import joblib
from huggingface_hub import hf_hub_download

# NO GPU
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

# Cacher le nom du repo
python_path = hf_hub_download(repo_id=os.environ['REPO_ID'], repo_type="space", filename=os.environ['MODEL_FILE'],
                                use_auth_token=os.environ['TOKEN'])
print(python_path)
sys.path.append(os.environ['PRIVATE_DIR'])
from models import *
preprocess_model, model = get_models()
url_dict = get_durl()
audio_names = get_audio_names()
index = get_index()
#encoder_text = get_encoder_text()
encoder_text = tf.keras.models.load_model("encoder_text_retrievaltext_bmg_221022_54_clean")
def process(prompt, lang):
    now = datetime.datetime.now()
    print()
    print('*************')
    print("Current Time: ", str(now))
    print("Text input : ", prompt)
    print('*************')
    print()

    embed_query  = get_predict(encoder_text, prompt, preprocess_model, model)
    do_normalize(embed_query)
    D, I = get_distance(index, embed_query, TOP)
    #print(I)
    #print(D)
    print("----")
    for i in range(len(I[0])):
        print(audio_names[I[0][i]], " with distance ", D[0][i])
        print("    url : ", get_url(I[0][i], audio_names, url_dict))
        
    return [get_url(I[0][0], audio_names, url_dict), 
            get_url(I[0][1], audio_names, url_dict), 
            get_url(I[0][2], audio_names, url_dict), 
            get_url(I[0][3], audio_names, url_dict), 
            get_url(I[0][4], audio_names, url_dict)]

inputs = [gr.Textbox(label="Input", value="type your description", max_lines=2), 
            gr.Radio(label="Language", choices=["en"], value="en")]

poc_examples = [
          ["Mysterious filmscore with Arabic influenced instruments","en"],
          ["Let's go on a magical adventure with wizzards, dragons and castles","en"],
          ["Creepy piano opening evolves and speeds up into a cinematic orchestral piece","en"],
          ["Chilled electronic","en"],
          #["","en"],
          ["Relax piano","en"],
          ["Halloween rock with creepy organ","en"],
          ["Rhythmic electro dance track for sport, motivation and sweating","en"],
          ["soundtrack for an action movie from the eighties in a retro synth wave style","en"],
          ["Choral female singing is rhythmically accompanied in a church with medieval instruments","en"],
          ["Christmas","en"],
          ["love romantic with piano, strings and vocals","en"],
          ["Electronic soundscapes for chilling and relaxing","en"],
          ["Minimal, emotional, melancholic piano","en"],
          ["A calm and romantic acoustic guitar melody","en"],
          ["horror suspense piano","en"],
          ["Big Band","en"],
          ["90 eurodance beat","en"],
]

outputs = [gr.Audio(label="Track 1"), gr.Audio(label="Track 2"), gr.Audio(label="Track 3"), gr.Audio(label="Track 4"), gr.Audio(label="Track 5")]
demo1 = gr.Interface(fn=process, inputs=inputs, outputs=outputs, examples=poc_examples, cache_examples=False, examples_per_page=20)

demo1.launch(debug=False)