import streamlit as st from transformers import AutoTokenizer, AutoModelForCausalLM from transformers import pipeline import os import logging, sys from dotenv import load_dotenv from huggingface_hub import login #load_dotenv() #HF_TOKEN = os.environ.get("HF_API_TOKEN") HF_TOKEN = st.secrets["HF_API_TOKEN"] login(token=HF_TOKEN) # Setup logging logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) # Load the tokenizer and model tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1") model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1") pipe = pipeline("text-generation", model=model,#"mistralai/Mistral-7B-v0.1", tokenizer=tokenizer, ) # Generate text using the pipeline result = pipe("tell me about transformer.", max_length=50, truncation=True) print(result) pipe = pipeline("text-generation", model=model, #'Mistral-7B-Instruct-v0.2' tokenizer=tokenizer, ) question =st.text_input("enter your question","tell me about transformer.") # Generate text using the pipeline result = pipe(question, max_length=50, truncation=True) print(result)