ECUiVADE commited on
Commit
5ba76d3
1 Parent(s): d025dda

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -10
app.py CHANGED
@@ -59,7 +59,7 @@ Age=""
59
  chat_log_name =""
60
 
61
  from llama_cpp import Llama
62
- llm = Llama(model_path=model_file, model_type="mistral",n_ctx = 2048)
63
 
64
  def get_drive_service():
65
  credentials = service_account.Credentials.from_service_account_file(
@@ -146,17 +146,19 @@ def generate(prompt, history):
146
 
147
  response = ""
148
 
149
- # while(len(response) < 1):
150
- # output = llm(context, max_tokens=400, stop=["Nurse:"], echo=False)
151
- # response = output["choices"][0]["text"]
152
- # response = response.strip()
 
153
 
154
- for output in llm(input, stream=True, max_tokens=100, ):
155
- piece = output['choices'][0]['text']
156
- response += piece
157
- chatbot[-1] = (chatbot[-1][0], response)
158
 
159
- yield response
 
 
 
 
 
160
 
161
  context += response
162
  print (context)
 
59
  chat_log_name =""
60
 
61
  from llama_cpp import Llama
62
+ llm = Llama(model_path=model_file, model_type="mistral",n_gpu_layers=-1,n_ctx = 2048)
63
 
64
  def get_drive_service():
65
  credentials = service_account.Credentials.from_service_account_file(
 
146
 
147
  response = ""
148
 
149
+ while(len(response) < 1):
150
+ output = llm(context, max_tokens=400, stop=["Nurse:"], echo=False)
151
+ response = output["choices"][0]["text"]
152
+ response = response.strip()
153
+ yield response
154
 
 
 
 
 
155
 
156
+ # for output in llm(input, stream=True, max_tokens=100, ):
157
+ # piece = output['choices'][0]['text']
158
+ # response += piece
159
+ # chatbot[-1] = (chatbot[-1][0], response)
160
+
161
+ # yield response
162
 
163
  context += response
164
  print (context)