Spaces:
Sleeping
Sleeping
# import required libraries | |
import torch | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
# sample data | |
# message = { 'angry': 13.207298517227173, | |
# 'disgust': 0.12863066513091326, | |
# 'fear': 0.11500244727358222, | |
# 'happy': 18.36708039045334, | |
# 'sad': 62.15316653251648, | |
# 'surprise': 0.025137534248642623, | |
# 'neutral': 6.003682315349579 } | |
def train_model(): | |
# define the tokenizer for preprocessing any inputs | |
tokenizer = AutoTokenizer.from_pretrained("stabilityai/StableBeluga-7B", use_fast=False) | |
# define the LLM model for the output | |
model = AutoModelForCausalLM.from_pretrained("stabilityai/StableBeluga-7B", torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map="auto") | |
return (model, tokenizer) | |
def process_emotions(model, tokenizer, emotion_data): | |
system_prompt = "### System:\nYou are StableBeluga, an AI programmed to follow instructions with high accuracy. Your current task is to assist with enhancing human emotional regulation and intelligence. You will receive information about the emotional ratios of a client. Based on that information, please provide exactly one original and creative journal prompt to facilitate the client's self-reflection and emotional exploration.\n\n" | |
message = emotion_data | |
prompt = f"{system_prompt}### User: {message}\n\n### Assistant:\n" | |
inputs = tokenizer(prompt, return_tensors="pt").to("cuda") | |
output = model.generate(**inputs, do_sample=True, top_p=0.95, top_k=0, max_new_tokens=256) | |
journal_prompt = tokenizer.decode(output[0], skip_special_tokens=True) | |
return journal_prompt | |