vishwask commited on
Commit
d61423a
1 Parent(s): 0459b74

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -6
app.py CHANGED
@@ -48,14 +48,17 @@ def load_model(_docs):
48
  text_splitter = RecursiveCharacterTextSplitter(chunk_size=1024, chunk_overlap=256)
49
  texts = text_splitter.split_documents(docs)
50
  db = FAISS.from_documents(texts, embeddings)
51
- model_name_or_path = "/home/user/app/Llama-2-13B-chat-GPTQ/"
 
 
52
  model_basename = "model"
53
 
54
  tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
55
 
56
  model = AutoGPTQForCausalLM.from_quantized(
57
  model_name_or_path,
58
- revision="gptq-8bit-128g-actorder_False",
 
59
  model_basename=model_basename,
60
  use_safetensors=True,
61
  trust_remote_code=True,
@@ -82,7 +85,6 @@ def load_model(_docs):
82
 
83
  DEFAULT_SYSTEM_PROMPT = """
84
  You are a helpful, respectful and honest assistant with knowledge of machine learning, data science, computer science, Python programming language, mathematics, probability and statistics.
85
- Take a deep breath and work on the given problem step-by-step.
86
  """.strip()
87
 
88
  def generate_prompt(prompt: str, system_prompt: str = DEFAULT_SYSTEM_PROMPT) -> str:
@@ -99,9 +101,12 @@ def load_model(_docs):
99
  streamer=streamer,)
100
  llm = HuggingFacePipeline(pipeline=text_pipeline, model_kwargs={"temperature": 0.5})
101
 
102
- SYSTEM_PROMPT = ("Use the following pieces of context to answer the question at the end. "
103
- "If you don't know the answer, just say that you don't know, "
104
- "don't try to make up an answer.")
 
 
 
105
 
106
  template = generate_prompt("""{context} Question: {question} """,system_prompt=SYSTEM_PROMPT,) #Enter memory here!
107
  prompt = PromptTemplate(template=template, input_variables=["context", "question"]) #Add history here
 
48
  text_splitter = RecursiveCharacterTextSplitter(chunk_size=1024, chunk_overlap=256)
49
  texts = text_splitter.split_documents(docs)
50
  db = FAISS.from_documents(texts, embeddings)
51
+ #model_name_or_path = "/home/user/app/Llama-2-13B-chat-GPTQ/"
52
+ model_name_or_path = "/home/user/app/codeLlama"
53
+
54
  model_basename = "model"
55
 
56
  tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
57
 
58
  model = AutoGPTQForCausalLM.from_quantized(
59
  model_name_or_path,
60
+ #revision="gptq-8bit-128g-actorder_False",
61
+ revision="gptq-8bit-128g-actorder_True",
62
  model_basename=model_basename,
63
  use_safetensors=True,
64
  trust_remote_code=True,
 
85
 
86
  DEFAULT_SYSTEM_PROMPT = """
87
  You are a helpful, respectful and honest assistant with knowledge of machine learning, data science, computer science, Python programming language, mathematics, probability and statistics.
 
88
  """.strip()
89
 
90
  def generate_prompt(prompt: str, system_prompt: str = DEFAULT_SYSTEM_PROMPT) -> str:
 
101
  streamer=streamer,)
102
  llm = HuggingFacePipeline(pipeline=text_pipeline, model_kwargs={"temperature": 0.5})
103
 
104
+ # SYSTEM_PROMPT = ("Use the following pieces of context to answer the question at the end. "
105
+ # "If you don't know the answer, just say that you don't know, "
106
+ # "don't try to make up an answer.")
107
+ SYSTEM_PROMPT = ("Use the following pieces of context along with general information you possess to answer the question at the end. "
108
+ "If you don't know the answer, just say that you don't know, "
109
+ "don't try to make up an answer.")
110
 
111
  template = generate_prompt("""{context} Question: {question} """,system_prompt=SYSTEM_PROMPT,) #Enter memory here!
112
  prompt = PromptTemplate(template=template, input_variables=["context", "question"]) #Add history here