File size: 3,358 Bytes
1336b1e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
import requests

import random
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM
import torch
import gradio as gr
import pandas as pd
from datetime import datetime

# Load emotion model and tokenizer
emotion_tokenizer = AutoTokenizer.from_pretrained("mrm8488/t5-base-finetuned-emotion")
emotion_model = AutoModelForSeq2SeqLM.from_pretrained("mrm8488/t5-base-finetuned-emotion")

# Load text generation model and tokenizer
import os
token=os.getenv('hftoken')
text_tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-2b-it", token=token)
text_model = AutoModelForCausalLM.from_pretrained("google/gemma-2-2b-it", token=token)

# Set device to cpu
device = torch.device('cpu')
emotion_model.to(device)
text_model.to(device)


# Function to predict emotion
def get_emotion(text):
    input_ids = emotion_tokenizer.encode(text + '</s>', return_tensors='pt').to(device)
    output = emotion_model.generate(input_ids=input_ids, max_length=2)
    dec = [emotion_tokenizer.decode(ids, skip_special_tokens=True) for ids in output]
    label = dec[0].strip()
    return label

def generate_quote(original_text, emotion):
    # Generate one inspirational quote based on emotion and original text
    input_text = f"Text: {original_text}\nEmotion: {emotion}\nInspirational Quote:"
    input_ids = text_tokenizer(input_text, return_tensors="pt").to(device)
    outputs = text_model.generate(**input_ids, max_new_tokens=70, do_sample=True, temperature=0.7)
    generated_text = text_tokenizer.decode(outputs[0], skip_special_tokens=True)

    if "Inspirational Quote:" in generated_text:
        quote = generated_text.split("Inspirational Quote:")[1].strip().split("\n")[0]
    else:
        quote = generated_text.strip()

    return quote

import os
import pandas as pd

# Ensure file exists and get absolute path
csv_file = os.path.join(os.getcwd(), 'diary_entries.csv')
if not os.path.exists(csv_file):
    df = pd.DataFrame(columns=["Date", "Diary Text", "Emotion", "Quote"])
    df.to_csv(csv_file, index=False)
else:
    df = pd.read_csv(csv_file)

# Function to handle emotion detection, quote generation, and image display
def journal_interface(Diary):
    try:
        # Step 1: Detect Emotion
        emotion = get_emotion(Diary)

        # Step 2: Generate Inspirational Quote
        quote = generate_quote(Diary, emotion)

        # Step 3: Save to CSV
        date_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        new_entry = pd.DataFrame([[date_time, Diary, emotion, quote]], columns=["Date", "Diary Text", "Emotion", "Quote"])
        global df
        df = pd.concat([df, new_entry], ignore_index=True)
        df.to_csv(csv_file, index=False)

        return emotion, quote
    except Exception as e:
        print(f"Error encountered: {str(e)}")
        return f"Error: {str(e)}", ""

# Update the Gradio interface
interface = gr.Interface(
    fn=journal_interface,
    inputs=gr.Textbox(lines=5, placeholder="Enter your thoughts here..."),
    outputs=[
        gr.Textbox(label="Detected Emotion"),
        gr.Textbox(label="Generated Quote")
    ],
    title="AI-Powered Personal Journal",
    description="Enter your thoughts, and the AI will detect the emotion and generate an inspirational quote based on it.",
    theme=gr.themes.Soft()
)

# Launch the Gradio app
interface.launch(share=True)