Leri777 commited on
Commit
7f13eee
·
verified ·
1 Parent(s): b78e9ba

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -5
app.py CHANGED
@@ -6,7 +6,6 @@ import torch
6
  from transformers import AutoModelForCausalLM, GemmaTokenizerFast, pipeline
7
  from langchain_huggingface import HuggingFacePipeline
8
  from langchain.prompts import PromptTemplate
9
- from langchain.chains import LLMChain
10
 
11
  # Logging setup
12
  log_file = '/tmp/app_debug.log'
@@ -33,7 +32,7 @@ else:
33
  logger.warning("GPU is not available. Proceeding with CPU setup.")
34
  model = AutoModelForCausalLM.from_pretrained(
35
  model_id,
36
- device_map="cpu",
37
  low_cpu_mem_usage=True,
38
  token=os.getenv('HF_TOKEN'),
39
  )
@@ -92,9 +91,7 @@ def predict(message, chat_history=[]):
92
  # Gradio UI
93
  interface = gr.Interface(
94
  fn=predict,
95
- inputs=[
96
- gr.Textbox(label="User input")
97
- ],
98
  outputs="text",
99
  live=True,
100
  )
 
6
  from transformers import AutoModelForCausalLM, GemmaTokenizerFast, pipeline
7
  from langchain_huggingface import HuggingFacePipeline
8
  from langchain.prompts import PromptTemplate
 
9
 
10
  # Logging setup
11
  log_file = '/tmp/app_debug.log'
 
32
  logger.warning("GPU is not available. Proceeding with CPU setup.")
33
  model = AutoModelForCausalLM.from_pretrained(
34
  model_id,
35
+ device_map="auto",
36
  low_cpu_mem_usage=True,
37
  token=os.getenv('HF_TOKEN'),
38
  )
 
91
  # Gradio UI
92
  interface = gr.Interface(
93
  fn=predict,
94
+ inputs=gr.Textbox(label="User input"),
 
 
95
  outputs="text",
96
  live=True,
97
  )