herMaster commited on
Commit
b76f5cf
1 Parent(s): c0e4cdb

removed the demo chat function

Browse files
Files changed (1) hide show
  1. app.py +24 -25
app.py CHANGED
@@ -34,7 +34,7 @@ print("loading the LLM......................................")
34
  # )
35
 
36
  llm = AutoModelForCausalLM.from_pretrained("TheBloke/Llama-2-7B-Chat-GGUF",
37
- model_file="llama-2-7b-chat.Q8_0.gguf",
38
  model_type="llama",
39
  # config = ctransformers.hub.AutoConfig,
40
  # hf = True
@@ -66,6 +66,7 @@ pdf_path = './100 Weird Facts About the Human Body.pdf'
66
  reader = PdfReader(pdf_path)
67
  text = ""
68
  num_of_pages = len(reader.pages)
 
69
  for page in range(num_of_pages):
70
  current_page = reader.pages[page]
71
  text += current_page.extract_text()
@@ -114,41 +115,39 @@ client.upload_records(
114
  print("Records uploaded........................................")
115
  print("###########################################################")
116
 
117
- # def chat(question):
118
 
119
- # hits = client.search(
120
- # collection_name="my_facts",
121
- # query_vector=encoder.encode(question).tolist(),
122
- # limit=3
123
- # )
124
- # context = []
125
- # for hit in hits:
126
- # context.append(list(hit.payload.values())[0])
127
 
128
- # context = context[0] + context[1] + context[2]
129
 
130
- # system_prompt = """You are a helpful assistant, you will use the provided context to answer user questions.
131
- # Read the given context before answering questions and think step by step. If you can not answer a user question based on
132
- # the provided context, inform the user. Do not use any other information for answering user. Provide a detailed answer to the question."""
133
 
134
 
135
- # B_INST, E_INST = "[INST]", "[/INST]"
136
 
137
- # B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
138
 
139
- # SYSTEM_PROMPT = B_SYS + system_prompt + E_SYS
140
 
141
- # instruction = f"""
142
- # Context: {context}
143
- # User: {question}"""
144
 
145
- # prompt_template = B_INST + SYSTEM_PROMPT + instruction + E_INST
146
 
147
- # result = llm(prompt_template)
148
- # return result
149
 
150
- def chat(question):
151
- return "hello " + question
152
 
153
  screen = gr.Interface(
154
  fn = chat,
 
34
  # )
35
 
36
  llm = AutoModelForCausalLM.from_pretrained("TheBloke/Llama-2-7B-Chat-GGUF",
37
+ model_file="llama-2-7b-chat.Q3_K_S.gguf",
38
  model_type="llama",
39
  # config = ctransformers.hub.AutoConfig,
40
  # hf = True
 
66
  reader = PdfReader(pdf_path)
67
  text = ""
68
  num_of_pages = len(reader.pages)
69
+
70
  for page in range(num_of_pages):
71
  current_page = reader.pages[page]
72
  text += current_page.extract_text()
 
115
  print("Records uploaded........................................")
116
  print("###########################################################")
117
 
118
+ def chat(question):
119
 
120
+ hits = client.search(
121
+ collection_name="my_facts",
122
+ query_vector=encoder.encode(question).tolist(),
123
+ limit=3
124
+ )
125
+ context = []
126
+ for hit in hits:
127
+ context.append(list(hit.payload.values())[0])
128
 
129
+ context = context[0] + context[1] + context[2]
130
 
131
+ system_prompt = """You are a helpful assistant, you will use the provided context to answer user questions.
132
+ Read the given context before answering questions and think step by step. If you can not answer a user question based on
133
+ the provided context, inform the user. Do not use any other information for answering user. Provide a detailed answer to the question."""
134
 
135
 
136
+ B_INST, E_INST = "[INST]", "[/INST]"
137
 
138
+ B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
139
 
140
+ SYSTEM_PROMPT = B_SYS + system_prompt + E_SYS
141
 
142
+ instruction = f"""
143
+ Context: {context}
144
+ User: {question}"""
145
 
146
+ prompt_template = B_INST + SYSTEM_PROMPT + instruction + E_INST
147
 
148
+ result = llm(prompt_template)
149
+ return result
150
 
 
 
151
 
152
  screen = gr.Interface(
153
  fn = chat,