luonghuyquang commited on
Commit
1336b1e
1 Parent(s): 3566e33
Files changed (2) hide show
  1. app.py +94 -0
  2. requirements.txt +7 -0
app.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+
3
+ import random
4
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM
5
+ import torch
6
+ import gradio as gr
7
+ import pandas as pd
8
+ from datetime import datetime
9
+
10
+ # Load emotion model and tokenizer
11
+ emotion_tokenizer = AutoTokenizer.from_pretrained("mrm8488/t5-base-finetuned-emotion")
12
+ emotion_model = AutoModelForSeq2SeqLM.from_pretrained("mrm8488/t5-base-finetuned-emotion")
13
+
14
+ # Load text generation model and tokenizer
15
+ import os
16
+ token=os.getenv('hftoken')
17
+ text_tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-2b-it", token=token)
18
+ text_model = AutoModelForCausalLM.from_pretrained("google/gemma-2-2b-it", token=token)
19
+
20
+ # Set device to cpu
21
+ device = torch.device('cpu')
22
+ emotion_model.to(device)
23
+ text_model.to(device)
24
+
25
+
26
+ # Function to predict emotion
27
+ def get_emotion(text):
28
+ input_ids = emotion_tokenizer.encode(text + '</s>', return_tensors='pt').to(device)
29
+ output = emotion_model.generate(input_ids=input_ids, max_length=2)
30
+ dec = [emotion_tokenizer.decode(ids, skip_special_tokens=True) for ids in output]
31
+ label = dec[0].strip()
32
+ return label
33
+
34
+ def generate_quote(original_text, emotion):
35
+ # Generate one inspirational quote based on emotion and original text
36
+ input_text = f"Text: {original_text}\nEmotion: {emotion}\nInspirational Quote:"
37
+ input_ids = text_tokenizer(input_text, return_tensors="pt").to(device)
38
+ outputs = text_model.generate(**input_ids, max_new_tokens=70, do_sample=True, temperature=0.7)
39
+ generated_text = text_tokenizer.decode(outputs[0], skip_special_tokens=True)
40
+
41
+ if "Inspirational Quote:" in generated_text:
42
+ quote = generated_text.split("Inspirational Quote:")[1].strip().split("\n")[0]
43
+ else:
44
+ quote = generated_text.strip()
45
+
46
+ return quote
47
+
48
+ import os
49
+ import pandas as pd
50
+
51
+ # Ensure file exists and get absolute path
52
+ csv_file = os.path.join(os.getcwd(), 'diary_entries.csv')
53
+ if not os.path.exists(csv_file):
54
+ df = pd.DataFrame(columns=["Date", "Diary Text", "Emotion", "Quote"])
55
+ df.to_csv(csv_file, index=False)
56
+ else:
57
+ df = pd.read_csv(csv_file)
58
+
59
+ # Function to handle emotion detection, quote generation, and image display
60
+ def journal_interface(Diary):
61
+ try:
62
+ # Step 1: Detect Emotion
63
+ emotion = get_emotion(Diary)
64
+
65
+ # Step 2: Generate Inspirational Quote
66
+ quote = generate_quote(Diary, emotion)
67
+
68
+ # Step 3: Save to CSV
69
+ date_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
70
+ new_entry = pd.DataFrame([[date_time, Diary, emotion, quote]], columns=["Date", "Diary Text", "Emotion", "Quote"])
71
+ global df
72
+ df = pd.concat([df, new_entry], ignore_index=True)
73
+ df.to_csv(csv_file, index=False)
74
+
75
+ return emotion, quote
76
+ except Exception as e:
77
+ print(f"Error encountered: {str(e)}")
78
+ return f"Error: {str(e)}", ""
79
+
80
+ # Update the Gradio interface
81
+ interface = gr.Interface(
82
+ fn=journal_interface,
83
+ inputs=gr.Textbox(lines=5, placeholder="Enter your thoughts here..."),
84
+ outputs=[
85
+ gr.Textbox(label="Detected Emotion"),
86
+ gr.Textbox(label="Generated Quote")
87
+ ],
88
+ title="AI-Powered Personal Journal",
89
+ description="Enter your thoughts, and the AI will detect the emotion and generate an inspirational quote based on it.",
90
+ theme=gr.themes.Soft()
91
+ )
92
+
93
+ # Launch the Gradio app
94
+ interface.launch(share=True)
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ requests
2
+ transformers
3
+ torch
4
+ gradio
5
+ pandas
6
+ tiktoken
7
+ sentencepiece