import streamlit as st from transformers import ( MarianMTModel, MarianTokenizer, GPT2LMHeadModel, GPT2Tokenizer, pipeline ) st.title("Multi Chatbot") models = { "English to French": { "name": "Helsinki-NLP/opus-mt-en-fr", "description": "Translate English text to French." }, "Sentiment Analysis": { "name": "distilbert-base-uncased-finetuned-sst-2-english", "description": "Analyze the sentiment of input text." }, "Story Generator": { "name": "distilgpt2", "description": "Generate creative stories based on input." } } st.sidebar.header("Choose a Model") selected_model_key = st.sidebar.radio("Select a Model:", list(models.keys())) model_name = models[selected_model_key]["name"] model_description = models[selected_model_key]["description"] st.sidebar.markdown(f"### Model Description\n{model_description}") @st.cache_resource def load_english_to_french(): tokenizer = MarianTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-fr") model = MarianMTModel.from_pretrained("Helsinki-NLP/opus-mt-en-fr") return tokenizer, model @st.cache_resource def load_sentiment_analysis(): return pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english") @st.cache_resource def load_story_generator(): tokenizer = GPT2Tokenizer.from_pretrained("distilgpt2") model = GPT2LMHeadModel.from_pretrained("distilgpt2") tokenizer.pad_token = tokenizer.eos_token # Set pad token to EOS token return tokenizer, model if selected_model_key == "English to French": st.write("Loading English to French model...") en_fr_tokenizer, en_fr_model = load_english_to_french() st.write("English to French model loaded successfully.") elif selected_model_key == "Sentiment Analysis": st.write("Loading Sentiment Analysis model...") sentiment_analyzer = load_sentiment_analysis() st.write("Sentiment Analysis model loaded successfully.") elif selected_model_key == "Story Generator": st.write("Loading Story Generator model...") story_gen_tokenizer, story_gen_model = load_story_generator() st.write("Story Generator model loaded successfully.") user_input = st.text_input("Enter your query:") if user_input: if selected_model_key == "English to French": try: inputs = en_fr_tokenizer(user_input, return_tensors="pt", truncation=True, padding=True) outputs = en_fr_model.generate(inputs["input_ids"], max_length=150, num_return_sequences=1) translated_text = en_fr_tokenizer.decode(outputs[0], skip_special_tokens=True) st.write(f"Translated Text: {translated_text}") except Exception as e: st.error(f"Error during translation: {e}") elif selected_model_key == "Sentiment Analysis": try: result = sentiment_analyzer(user_input)[0] st.write(f"Sentiment: {result['label']}") st.write(f"Confidence: {result['score']:.2f}") except Exception as e: st.error(f"Error during sentiment analysis: {e}") elif selected_model_key == "Story Generator": try: inputs = story_gen_tokenizer(user_input, return_tensors="pt", truncation=True, padding=True) outputs = story_gen_model.generate( inputs["input_ids"], attention_mask=inputs["attention_mask"], # Pass the attention mask max_length=200, num_return_sequences=1, temperature=0.7, no_repeat_ngram_size=2 ) story = story_gen_tokenizer.decode(outputs[0], skip_special_tokens=True) st.write(f"Generated Story: {story}") except Exception as e: st.error(f"Error during story generation: {e}")