kboaten commited on
Commit
b13c70a
1 Parent(s): 63e4d11

Create UI.ipynb

Browse files
Files changed (1) hide show
  1. UI.ipynb +81 -0
UI.ipynb ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ !pip install gradio
2
+ !apt-get install timidity
3
+ !pip install transformers
4
+
5
+ import gradio as gr
6
+ from pathlib import Path
7
+ import subprocess
8
+ from transformers import AutoTokenizer
9
+ from transformers import TFAutoModelForCausalLM
10
+ from transformers import TFAutoModelForSequenceClassification, AutoTokenizer
11
+ import numpy as np
12
+
13
+ #load poem generation model
14
+ model = TFAutoModelForCausalLM.from_pretrained('drive/MyDrive/FIRE_3rd Sem/peom_gn/')
15
+ base_model = "distilgpt2"
16
+ tokenizer = AutoTokenizer.from_pretrained(base_model)
17
+
18
+
19
+
20
+ #load sentiment analysis
21
+ model_v1 = TFAutoModelForSequenceClassification.from_pretrained('drive/MyDrive/FIRE_3rd Sem/sen_analysis/bert')
22
+ base_model = "distilbert-base-uncased"
23
+ tokenizer_v1 = AutoTokenizer.from_pretrained(base_model)
24
+
25
+ #music generation
26
+ base_path = "/content/drive/MyDrive/FIRE_3rd Sem/music_gn/"
27
+ #path_mid_file -> Replace this with model generated file path
28
+ path_mid_file = base_path + "Comic_Relief.mid"
29
+ path_wav_file = base_path + "output_comic.wav"
30
+ subprocess.call(['timidity', path_mid_file, "-Ow", "-o", path_wav_file])
31
+
32
+
33
+ #MUSIC GENERATION
34
+ def inference_music_gen():
35
+ return Path(path_wav_file)
36
+
37
+ music_gen_interface = gr.Interface(
38
+ inference_music_gen,
39
+ inputs = None,
40
+ outputs = gr.outputs.Audio(type="filepath", label="Output")
41
+ )
42
+
43
+ #SENTIMENT ANALYSIS
44
+ def inference_sentiment_analysis(sen):
45
+ tokenized_v1 = tokenizer_v1([sen], return_tensors="np", padding="longest")
46
+ outputs_v1 = model_v1(tokenized_v1).logits
47
+ classifications_v1 = np.argmax(outputs_v1, axis=1)
48
+ if classifications_v1[0] == 1:
49
+ res = "Positive :)"
50
+ else:
51
+ res = "Negative :("
52
+ return res
53
+
54
+ sentiment_analysis_interface = gr.Interface(
55
+ fn=inference_sentiment_analysis,
56
+ inputs=gr.Textbox(lines=2, placeholder="Enter a Sentence"),
57
+ outputs="text",
58
+ )
59
+
60
+ #POEM GENERATION
61
+ def inference_poem_gen(start):
62
+ tokenized = tokenizer(start, return_tensors="np")
63
+ outputs = model.generate(**tokenized, max_new_tokens=20)
64
+ res = tokenizer.decode(outputs[0])
65
+ return res.replace("<LINE>", "\n")
66
+
67
+ poem_gen_interface = gr.Interface(
68
+ fn=inference_poem_gen,
69
+ inputs=gr.Textbox(lines=2, placeholder="Start Here..."),
70
+ outputs="text",
71
+ )
72
+
73
+ #COMBINE ALL
74
+ title = "Music Generation"
75
+ description = "Add Project description"
76
+ article = "<p style='text-align: center'><a href='https://github.com/' target='_blank'>Github Repo</a></p>"
77
+ #we can add other project related stuff as well
78
+
79
+ demo = gr.TabbedInterface([music_gen_interface, poem_gen_interface, sentiment_analysis_interface],
80
+ ["Music Generation", "Poem Generation", "Sentiment Analysis"])
81
+ demo.launch(debug=True, share=True)