Mohannad commited on
Commit
e33935a
1 Parent(s): 8269cc1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -5
app.py CHANGED
@@ -27,7 +27,7 @@ def load_llm():
27
  # Use a pipeline for later
28
  from transformers import pipeline
29
 
30
- globals()['pipe'] = pipeline("text-generation",
31
  model=model,
32
  tokenizer= tokenizer,
33
  torch_dtype=torch.bfloat16,
@@ -39,11 +39,11 @@ def load_llm():
39
  eos_token_id=tokenizer.eos_token_id
40
  )
41
 
42
- globals()['llm'] = HuggingFacePipeline(pipeline = pipe, model_kwargs = {'temperature':0})
43
 
 
44
 
45
-
46
- load_llm()
47
 
48
  import json
49
  import textwrap
@@ -107,7 +107,6 @@ def answer(context, question):
107
  template = get_prompt(instruction, system_prompt)
108
  print(template)
109
 
110
- llm = globals()['llm']
111
  prompt = PromptTemplate(template=template, input_variables=["text"])
112
  llm_chain = LLMChain(prompt=prompt, llm=llm)
113
  output = llm_chain.run(question)
 
27
  # Use a pipeline for later
28
  from transformers import pipeline
29
 
30
+ pipe = pipeline("text-generation",
31
  model=model,
32
  tokenizer= tokenizer,
33
  torch_dtype=torch.bfloat16,
 
39
  eos_token_id=tokenizer.eos_token_id
40
  )
41
 
42
+ llm = HuggingFacePipeline(pipeline = pipe, model_kwargs = {'temperature':0})
43
 
44
+ return pipe, llm
45
 
46
+ pipe, llm = load_llm()
 
47
 
48
  import json
49
  import textwrap
 
107
  template = get_prompt(instruction, system_prompt)
108
  print(template)
109
 
 
110
  prompt = PromptTemplate(template=template, input_variables=["text"])
111
  llm_chain = LLMChain(prompt=prompt, llm=llm)
112
  output = llm_chain.run(question)