Giang07 commited on
Commit
5f546a5
1 Parent(s): 843ba45

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -42
app.py CHANGED
@@ -1,56 +1,24 @@
1
- # app.py
2
- import os
3
  import streamlit as st
4
- from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
5
- from langchain.llms import HuggingFacePipeline
6
- from langchain import PromptTemplate, LLMChain
7
 
8
- # Load the model and tokenizer
9
- model_name = "Giang07/Llama-2-7b-chat-QLoRa"
10
-
11
- model_dir = "/home/user/model"
12
-
13
- # Check if model is already downloaded
14
- config_path = os.path.join(model_dir, "config.json")
15
- # if not os.path.exists(config_path):
16
- st.write("Downloading model...")
17
- # os.makedirs(model_dir, exist_ok=True)
18
- model = AutoModelForCausalLM.from_pretrained(model_name)
19
- # model.save_pretrained(model_dir)
20
- tokenizer = AutoTokenizer.from_pretrained(model_name)
21
- # tokenizer.save_pretrained(model_dir)
22
- # else:
23
- # st.write("Loading model from cache...")
24
- # model = AutoModelForCausalLM.from_pretrained(model_dir)
25
- # tokenizer = AutoTokenizer.from_pretrained(model_dir)
26
-
27
- # Create the pipeline
28
- hf_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
29
-
30
- # Define the Hugging Face Pipeline LLM
31
- llm = HuggingFacePipeline(pipeline=hf_pipeline)
32
-
33
- # Define a prompt template
34
- prompt_template = PromptTemplate(
35
- input_variables=["input_text"],
36
- template="Translate the following English text to French: {input_text}"
37
- )
38
-
39
- # Create the LLM chain
40
- llm_chain = LLMChain(prompt_template=prompt_template, llm=llm)
41
 
42
  # Streamlit application
43
- st.title("Ti6 Llama2-QLoRa Test")
44
 
45
  # Text input field
46
- input_text = st.text_input("What is your words:")
47
 
48
  # Button to generate response
49
  if st.button("Translate"):
50
  if input_text:
51
  # Generate response using LangChain
52
- response = llm_chain.run({"input_text": input_text})
53
  # Display the response
54
- st.write("Answer:", response)
55
  else:
56
  st.write("Please enter some text.")
 
 
 
1
  import streamlit as st
2
+ from model_utils import load_model, create_pipeline, generate_text
 
 
3
 
4
+ # Load model and tokenizer
5
+ st.write("Loading model...")
6
+ model, tokenizer = load_model()
7
+ hf_pipeline = create_pipeline(model, tokenizer)
8
+ st.write("Model loaded successfully!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
  # Streamlit application
11
+ st.title("Hugging Face Model with LangChain and Streamlit")
12
 
13
  # Text input field
14
+ input_text = st.text_input("Enter text to translate:")
15
 
16
  # Button to generate response
17
  if st.button("Translate"):
18
  if input_text:
19
  # Generate response using LangChain
20
+ response = generate_text(hf_pipeline, input_text)
21
  # Display the response
22
+ st.write("Translated text:", response)
23
  else:
24
  st.write("Please enter some text.")