vishwask commited on
Commit
75ded75
1 Parent(s): 6d9ed4f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -10
app.py CHANGED
@@ -48,8 +48,8 @@ def load_model(_docs):
48
  text_splitter = RecursiveCharacterTextSplitter(chunk_size=1024, chunk_overlap=256)
49
  texts = text_splitter.split_documents(docs)
50
  db = FAISS.from_documents(texts, embeddings)
51
- model_name_or_path = "/home/user/app/Llama-2-13B-chat-GPTQ/"
52
- #model_name_or_path = "/home/user/app/codeLlama"
53
 
54
  model_basename = "model"
55
 
@@ -57,8 +57,8 @@ def load_model(_docs):
57
 
58
  model = AutoGPTQForCausalLM.from_quantized(
59
  model_name_or_path,
60
- revision="gptq-8bit-128g-actorder_False",
61
- #revision="gptq-8bit-128g-actorder_True",
62
  model_basename=model_basename,
63
  use_safetensors=True,
64
  trust_remote_code=True,
@@ -105,12 +105,12 @@ def load_model(_docs):
105
  streamer=streamer,)
106
  llm = HuggingFacePipeline(pipeline=text_pipeline, model_kwargs={"temperature": 0.1})
107
 
108
- SYSTEM_PROMPT = ("Use the following pieces of context to answer the question at the end. "
109
- "If you don't know the answer, just say that you don't know, "
110
- "don't try to make up an answer.")
111
- # SYSTEM_PROMPT = ("Use the following pieces of context along with general information you possess to answer the question at the end."
112
- # "If you don't know the answer, just say that you don't know, "
113
- # "don't try to make up an answer. Answer what is asked strictly and do not provide further questions at all.")
114
 
115
  template = generate_prompt("""{context} Question: {question} """,system_prompt=SYSTEM_PROMPT,) #Enter memory here!
116
  prompt = PromptTemplate(template=template, input_variables=["context", "question"]) #Add history here
 
48
  text_splitter = RecursiveCharacterTextSplitter(chunk_size=1024, chunk_overlap=256)
49
  texts = text_splitter.split_documents(docs)
50
  db = FAISS.from_documents(texts, embeddings)
51
+ #model_name_or_path = "/home/user/app/Llama-2-13B-chat-GPTQ/"
52
+ #model_name_or_path = "/home/user/app/codeLlama/"
53
 
54
  model_basename = "model"
55
 
 
57
 
58
  model = AutoGPTQForCausalLM.from_quantized(
59
  model_name_or_path,
60
+ #revision="gptq-8bit-128g-actorder_False",
61
+ revision="gptq-8bit-128g-actorder_True",
62
  model_basename=model_basename,
63
  use_safetensors=True,
64
  trust_remote_code=True,
 
105
  streamer=streamer,)
106
  llm = HuggingFacePipeline(pipeline=text_pipeline, model_kwargs={"temperature": 0.1})
107
 
108
+ # SYSTEM_PROMPT = ("Use the following pieces of context to answer the question at the end. "
109
+ # "If you don't know the answer, just say that you don't know, "
110
+ # "don't try to make up an answer.")
111
+ SYSTEM_PROMPT = ("Use the following pieces of context along with general information you possess to answer the question at the end."
112
+ "If you don't know the answer, just say that you don't know, "
113
+ "don't try to make up an answer.")
114
 
115
  template = generate_prompt("""{context} Question: {question} """,system_prompt=SYSTEM_PROMPT,) #Enter memory here!
116
  prompt = PromptTemplate(template=template, input_variables=["context", "question"]) #Add history here