File size: 933 Bytes
f933b1a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
import streamlit as st
from ctransformers import AutoModelForCausalLM

# Load the model outside the main function to avoid reloading on every run
llm = AutoModelForCausalLM.from_pretrained("TheBloke/Mistral-7B-v0.1-GGUF", 
                                           model_file="mistral-7b-v0.1.Q4_K_M.gguf", 
                                           model_type="mistral", 
                                           gpu_layers=50)

def generate_response(prompt):
    return llm(prompt)

def main():
    st.title("AI Text Generation App")
    
    prompt = st.text_input("Enter your prompt:")
    if prompt:
        try:
            response = generate_response(prompt)
            
            # Display the generated response
            st.subheader("Generated Response")
            st.write(response)
        except Exception as e:
            st.error(f"Error generating response: {e}")

if __name__ == "__main__":
    main()