Nafise commited on
Commit
2f49beb
1 Parent(s): ba73b65
Files changed (3) hide show
  1. Dockerfile +17 -0
  2. main.py +58 -0
  3. requirements.txt +14 -0
Dockerfile ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use the official Python image from Docker Hub
2
+ FROM python:3.9
3
+
4
+ # Set the working directory in the container
5
+ WORKDIR /code
6
+
7
+ # Copy the requirements file into the container at /code
8
+ COPY ./requirements.txt /code/requirements.txt
9
+
10
+ # Install the Python dependencies
11
+ RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
12
+
13
+ # Copy the current directory contents into the container at /code
14
+ COPY . .
15
+
16
+ # Define the command to start the Flask app using Gunicorn
17
+ CMD ["gunicorn", "-b", "0.0.0.0:7860", "main:app"]
main.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, jsonify, request
2
+ import requests
3
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
4
+
5
+ app = Flask(__name__)
6
+
7
+ # Initialize sentiment analysis model
8
+ sentiment_tokenizer = AutoTokenizer.from_pretrained("mrm8488/t5-base-finetuned-emotion")
9
+ sentiment_model = AutoModelForSeq2SeqLM.from_pretrained("mrm8488/t5-base-finetuned-emotion")
10
+
11
+ # Initialize dialogue generation model
12
+ tokenizer = AutoTokenizer.from_pretrained("microsoft/GODEL-v1_1-large-seq2seq")
13
+ model = AutoModelForSeq2SeqLM.from_pretrained("microsoft/GODEL-v1_1-large-seq2seq")
14
+
15
+ # Last.fm API key
16
+ API_KEY = "e554f25da26e93055f2780bbe2b9293b"
17
+
18
+ # Function to generate response
19
+ def generate_response(dialog):
20
+ knowledge = ''
21
+ instruction = f'Instruction: given a dialog context, you need to respond empathically.'
22
+ dialog_text = ' EOS '.join(dialog)
23
+ query = f"{instruction} [CONTEXT] {dialog_text} {knowledge}"
24
+
25
+ input_ids = tokenizer.encode(query, return_tensors="pt")
26
+ output = model.generate(input_ids, max_length=16, min_length=2, top_p=0.9, do_sample=True)
27
+ generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
28
+ return generated_text
29
+
30
+ # Function to perform sentiment analysis
31
+ def sentiment_finder(user_dialog):
32
+ input_ids = sentiment_tokenizer.encode(user_dialog + '</s>', return_tensors='pt')
33
+ output = sentiment_model.generate(input_ids=input_ids, max_length=2)
34
+ emotion = [sentiment_tokenizer.decode(ids) for ids in output][0]
35
+ return emotion[6:]
36
+
37
+ @app.route("/get_response", methods=["POST", "GET"])
38
+ def get_response():
39
+ data = request.json
40
+ dialog = data.get('dialog', [])
41
+ generated_text = generate_response(dialog)
42
+ user_dialog = dialog[-1]
43
+ emotion = sentiment_finder(user_dialog)
44
+
45
+ # Fetch music recommendations based on emotion
46
+ recommendations_url = f"http://ws.audioscrobbler.com/2.0/?method=tag.gettoptracks&tag={emotion}&api_key={API_KEY}&format=json&limit=4"
47
+ recommendations_response = requests.get(recommendations_url)
48
+
49
+ recommendations = []
50
+ if recommendations_response.ok:
51
+ recommendations_data = recommendations_response.json()
52
+ recommendations = recommendations_data["tracks"]["track"]
53
+
54
+ response_data = {'generated_response': generated_text, 'recommendations': recommendations}
55
+ return jsonify(response_data)
56
+
57
+ if __name__ == '__main__':
58
+ app.run(port=8000)
requirements.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ transformers[sentencepiece]
2
+ flask~=2.2.2
3
+ flask-restful
4
+ flask-cors
5
+ streamlit
6
+ requests
7
+ torch
8
+ torchvision
9
+ torchaudio
10
+ pyttsx3
11
+ PyAudio
12
+ SpeechRecognition
13
+ setuptools
14
+ gunicorn