Spaces:
Runtime error
Runtime error
abhi1nandy2
commited on
Commit
·
f21b8d3
1
Parent(s):
0551f2f
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import openai
|
2 |
+
|
3 |
+
ai_role_dict = {
|
4 |
+
"music_director": "You are an Experienced Music Director who has 15+ Years experience in the industry",
|
5 |
+
"lyricist": "You are an Experienced Lyricist, who has written hit songs in several languages",
|
6 |
+
"freelance_lyricist": "You are an Experienced Freelance Lyricist, who has helped writing songs in several languages",
|
7 |
+
"music_composer": "You are an Experienced Music Composer, who has composed songs of several genre and arrangements over the years",
|
8 |
+
"sound_engineer": "You are an Experienced Sound Engineer, who can provide expert feedback on the arrangement being used."
|
9 |
+
}
|
10 |
+
|
11 |
+
from tenacity import (
|
12 |
+
retry,
|
13 |
+
stop_after_attempt,
|
14 |
+
wait_random_exponential,
|
15 |
+
) # for exponential backoff
|
16 |
+
|
17 |
+
@retry(wait=wait_random_exponential(min=1, max=80), stop=stop_after_attempt(6))
|
18 |
+
def get_response(ai_role, query, model):
|
19 |
+
|
20 |
+
response = openai.ChatCompletion.create(
|
21 |
+
model=model,
|
22 |
+
messages=[
|
23 |
+
{"role": "system", "content": "{}".format(ai_role)},
|
24 |
+
{"role": "user", "content": "{}".format(query)},
|
25 |
+
]
|
26 |
+
)
|
27 |
+
|
28 |
+
return response['choices'][0]['message']['content']
|
29 |
+
|
30 |
+
def write_intermediate_outputs(filename, text):
|
31 |
+
with open(filename, 'w') as fw:
|
32 |
+
fw.write(text)
|
33 |
+
|
34 |
+
sample_file_path = f'./{filename}'
|
35 |
+
|
36 |
+
return sample_file_path
|
37 |
+
|
38 |
+
def write_and_compose(model, api_key, genre, keywords, emotion):
|
39 |
+
openai.api_key = api_key
|
40 |
+
initial_lyrics = get_response(ai_role_dict['freelance_lyricist'], "Write structured lyrics of a {} song with the following keywords - {}, and use the following emotion - {}".format(genre, keywords, emotion), model)
|
41 |
+
|
42 |
+
query_feedback = '''The Freelance Lyricist submitted these lyrics:
|
43 |
+
|
44 |
+
{}
|
45 |
+
|
46 |
+
Provide suitable feedback (in bullet-points)
|
47 |
+
'''
|
48 |
+
|
49 |
+
feedback1 = get_response(ai_role_dict['music_director'], query_feedback.format(initial_lyrics), model)
|
50 |
+
feedback2 = get_response(ai_role_dict['lyricist'], query_feedback.format(initial_lyrics), model)
|
51 |
+
|
52 |
+
# Workflow: Step 3
|
53 |
+
|
54 |
+
feedback = '''After seeing the lyrics you initially submitted -
|
55 |
+
|
56 |
+
{}
|
57 |
+
|
58 |
+
the music director provided the following feedback -
|
59 |
+
{}
|
60 |
+
|
61 |
+
the lyricist provided the following feedback as well -
|
62 |
+
{}
|
63 |
+
|
64 |
+
Incorporate this feedback, and make suggested changes to the lyrics based on the feedback only
|
65 |
+
'''
|
66 |
+
|
67 |
+
final_lyrics = get_response(ai_role_dict['freelance_lyricist'], feedback.format(initial_lyrics, feedback1, feedback2), model)
|
68 |
+
|
69 |
+
# Workflow: Step 4
|
70 |
+
|
71 |
+
query_composer = '''Given the lyrics of the {} song on {} in the emotion - {} -
|
72 |
+
|
73 |
+
{}
|
74 |
+
|
75 |
+
write a suitable chord progression (for each line of the same lyrics), followed by the suitable arrangement required to sing and record the song (in bullet points)'''
|
76 |
+
|
77 |
+
composition_1 = get_response(ai_role_dict['music_composer'], query_composer.format(genre, keywords, emotion, final_lyrics), model)
|
78 |
+
|
79 |
+
query_sound_engineer = '''Given the lyrics of the {} song on {} in the emotion - {} -
|
80 |
+
|
81 |
+
{}
|
82 |
+
|
83 |
+
with a Chord Progression and Arrangement (suggested by the Music Composer) -
|
84 |
+
|
85 |
+
{}
|
86 |
+
|
87 |
+
could you write improvements that could be made to the Arrangement (in bullet points)? If the current arrangement is upto the mark, write "No change in the arrangement required"
|
88 |
+
'''
|
89 |
+
|
90 |
+
composition_2 = get_response(ai_role_dict['sound_engineer'], query_sound_engineer.format(genre, keywords, emotion, final_lyrics, composition_1), model)
|
91 |
+
|
92 |
+
final_query = '''Given the lyrics of the {} song on {} in the emotion - {} -
|
93 |
+
|
94 |
+
{}
|
95 |
+
|
96 |
+
with a Chord Progression and Arrangement (suggested by the Music Composer) -
|
97 |
+
|
98 |
+
{}
|
99 |
+
|
100 |
+
and further improvements on the Arrangement (suggested by the Sound Engineer)
|
101 |
+
|
102 |
+
{}
|
103 |
+
|
104 |
+
- suggest any further improvements that could be made to the (a) Chord Progression (b) Arrangement.
|
105 |
+
- After that, Write 10 "="s in the next line
|
106 |
+
- After that, Write the final Chord Progression and Arrangement
|
107 |
+
- Also, write a suitable title for the song
|
108 |
+
|
109 |
+
'''
|
110 |
+
|
111 |
+
final_response = get_response(ai_role_dict['music_director'], final_query.format(genre, keywords, emotion, final_lyrics, composition_1, composition_2), model)
|
112 |
+
|
113 |
+
final_improvements = final_response.split('==========')[0]
|
114 |
+
|
115 |
+
final_chord_prog_and_composition = final_response.split('==========')[-1]
|
116 |
+
|
117 |
+
# return initial_lyrics, feedback1, feedback2, final_lyrics, composition_1, composition_2, final_improvements, final_chord_prog_and_composition
|
118 |
+
|
119 |
+
output_file_list = []
|
120 |
+
output_file_list.append(write_intermediate_outputs('step_2.txt', initial_lyrics))
|
121 |
+
output_file_list.append(write_intermediate_outputs('step_3A.txt', feedback1))
|
122 |
+
output_file_list.append(write_intermediate_outputs('step_3B.txt', feedback2))
|
123 |
+
output_file_list.append(write_intermediate_outputs('step_5.txt', composition_1))
|
124 |
+
output_file_list.append(write_intermediate_outputs('step_6.txt', composition_2))
|
125 |
+
output_file_list.append(write_intermediate_outputs('step_7.txt', final_improvements))
|
126 |
+
|
127 |
+
return final_lyrics, final_chord_prog_and_composition, output_file_list
|
128 |
+
|
129 |
+
import gradio as gr
|
130 |
+
|
131 |
+
description = '''## Objective -
|
132 |
+
|
133 |
+
Given specific Genre, Keywords, and Emotion, make a Brand New Song without lifting a finger!
|
134 |
+
|
135 |
+
1. Get lyrics of a new song
|
136 |
+
2. Get a suitable chord progression
|
137 |
+
3. Get a suitable musical arrangement for singing and recording the song.
|
138 |
+
4. Cherry on the top - Get a suitable song title!
|
139 |
+
|
140 |
+
## AI Music Team is composed of several GPT agents with the following "personas" -
|
141 |
+
|
142 |
+
1. Experienced Music Director who has 15+ Years experience in the industry
|
143 |
+
2. Experienced Lyricist, who has written hit songs in several languages
|
144 |
+
3. Experienced Freelance Lyricist, who has helped writing songs in several languages
|
145 |
+
4. Experienced Music Composer, who has composed songs of several genre and arrangements over the years
|
146 |
+
5. Experienced Sound Engineer, who can provide expert feedback on the arrangement being used.
|
147 |
+
|
148 |
+
## Workflow (Intermediate outputs/results are output as downloadable files) -
|
149 |
+
|
150 |
+
1. Get Inputs from user (OpenAI API Endpoint, API Key, keywords, genre, emotion for the song). Check out [this link](https://platform.openai.com/account/api-keys) to get your API Key
|
151 |
+
2. Experienced Freelance Lyricist writes a lyrics draft (**see `step_2.txt`**)
|
152 |
+
3. Experienced Music Director and Experienced Lyricist provide feedback (**see `step_3A.txt` & `step_3B.txt` respectively**)
|
153 |
+
4. Experienced Freelance Lyricist incorporates the feedback, lyrics is finalized here
|
154 |
+
5. Experienced Music Composer will provide a chord progression, and an arrangement of instruments (**see `step_5.txt`**)
|
155 |
+
6. Experienced Sound Engineer will provide ways to improve on the existing arrangement (**see `step_6.txt`**)
|
156 |
+
7. Finally, Music Director will provide improve on the chord progression and the existing arrangement, and give the song a name (**see `step_7.txt`**)
|
157 |
+
'''
|
158 |
+
|
159 |
+
demo = gr.Interface(title = 'Write and Compose brand new Songs using an Elite *AI Music Team*', description = description,
|
160 |
+
fn=write_and_compose,
|
161 |
+
inputs=[gr.Radio(["gpt-3.5-turbo", "gpt-4"], value="gpt-3.5-turbo", label = "Choose the OpenAI API Endpoint"), "text", "text", gr.Textbox(label="Keywords (separated by comma)"), "text"], # model, api_key, genre, keywords, emotion
|
162 |
+
# outputs=[gr.Textbox(label="Lyrics after Step #2"), gr.Textbox(label="Feedback provided by Music Director in Step #3"), gr.Textbox(label="Feedback provided by Lyricist in Step #3"), gr.Textbox(label="Final Lyrics of the song after Step #4"), gr.Textbox(label="Chord Progression and Arrangement suggested by Music Composer in Step #5"), gr.Textbox(label="Arrangement improvements suggested by Sound Engineer in Step #6"), gr.Textbox(label="Chord and Arrangement improvements suggested by Music Director in Step #7"), gr.Textbox(label="Final Chord Progression, Arrangment, and Song Title")], # initial_lyrics, feedback1, feedback2, final_lyrics, composition_1, composition_2, final_improvements, final_chord_prog_and_composition
|
163 |
+
outputs=[gr.Textbox(label="Final Lyrics of the song after Step #4"), gr.Textbox(label="Final Chord Progression, Arrangement, and Song Title"), gr.File(label='Intermediate Outputs')], # initial_lyrics, feedback1, feedback2, final_lyrics, composition_1, composition_2, final_improvements, final_chord_prog_and_composition
|
164 |
+
)
|
165 |
+
demo.launch()
|