Abdulvahap's picture
Update app.py
82a851d verified
raw
history blame
2.01 kB
import os
from huggingface_hub import login
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import gradio as gr
# Retrieve the Hugging Face token from the environment variable
hf_token = os.getenv('HF_TOKEN')
# Authenticate using the token
if hf_token:
login(token=hf_token)
else:
print("Error: Hugging Face token not found.")
exit(1)
# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3.1-70B")
model = AutoModelForCausalLM.from_pretrained(
"meta-llama/Meta-Llama-3.1-70B",
rope_scaling={'type': 'llama3', 'factor': 8.0} # Adjust the type and factor as needed
)
# Create a text generation pipeline
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
# Load sentiment analysis pipeline
sentiment_analyzer = pipeline("sentiment-analysis")
# Initialize conversation context
context = []
def predict(context, input_text):
"""Generate response based on context and input."""
context.append(input_text)
inputs = tokenizer(" ".join(context), return_tensors="pt")
outputs = model.generate(inputs.input_ids, max_length=200, pad_token_id=tokenizer.eos_token_id)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
context.append(response)
return response
def predict_with_emotion(context, input_text):
"""Generate response with emotion detection."""
sentiment = sentiment_analyzer(input_text)[0]['label']
response = predict(context, input_text)
if sentiment == 'NEGATIVE':
response = "I'm sorry to hear that. " + response
elif sentiment == 'POSITIVE':
response = "That's great! " + response
return response
def chatbot(input_text):
"""Gradio chatbot function."""
global context
response = predict_with_emotion(context, input_text)
return response
# Create Gradio interface
iface = gr.Interface(fn=chatbot, inputs="text", outputs="text", title="Contextual Emotion-Aware LLaMA-70B Chatbot")
iface.launch()