AFischer1985 commited on
Commit
da429cd
·
verified ·
1 Parent(s): 59cb099

Update run.py

Browse files
Files changed (1) hide show
  1. run.py +7 -7
run.py CHANGED
@@ -342,8 +342,8 @@ def response(message, history):
342
 
343
  # Request Response from LLM:
344
  system2=None # system2 can be used as fictive first words of the AI, which are not displayed or stored
345
- print("RAG: "+rag)
346
- print("System: "+system+"\n\nMessage: "+message)
347
  prompt=extend_prompt(
348
  message, # current message of the user
349
  history, # complete history
@@ -352,7 +352,7 @@ def response(message, history):
352
  system2, # fictive first words of the AI (neither displayed nor stored)
353
  historylimit=historylimit # number of past messages to consider for response to current message
354
  )
355
- print(prompt)
356
 
357
  ## Request response from model
358
  #------------------------------
@@ -375,10 +375,10 @@ def response(message, history):
375
  )
376
  stream = client.text_generation(prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
377
  response = ""
378
- print("User: "+message+"\nAI: ")
379
  for text in stream:
380
  part=text.token.text
381
- print(part, end="", flush=True)
382
  response += part
383
  yield response
384
  if((myType=="1a")): #add RAG-results to chat-output if appropriate
@@ -406,7 +406,7 @@ def response(message, history):
406
  response="" #+"("+myType+")\n"
407
  buffer=""
408
  #print("URL: "+url)
409
- print("User: "+message+"\nAI: ")
410
  for text in requests.post(url, json=body, stream=True): #-H 'accept: application/json' -H 'Content-Type: application/json'
411
  if buffer is None: buffer=""
412
  buffer=str("".join(buffer))
@@ -423,7 +423,7 @@ def response(message, history):
423
  if(part.lstrip('\n\r').startswith("data: ")): part=part.lstrip('\n\r').replace("data: ", "")
424
  try:
425
  part = str(json.loads(part)["choices"][0]["text"])
426
- print(part, end="", flush=True)
427
  response=response+part
428
  buffer="" # reset buffer
429
  except Exception as e:
 
342
 
343
  # Request Response from LLM:
344
  system2=None # system2 can be used as fictive first words of the AI, which are not displayed or stored
345
+ #print("RAG: "+rag)
346
+ #print("System: "+system+"\n\nMessage: "+message)
347
  prompt=extend_prompt(
348
  message, # current message of the user
349
  history, # complete history
 
352
  system2, # fictive first words of the AI (neither displayed nor stored)
353
  historylimit=historylimit # number of past messages to consider for response to current message
354
  )
355
+ print("\n\nPrompt:\n"+prompt)
356
 
357
  ## Request response from model
358
  #------------------------------
 
375
  )
376
  stream = client.text_generation(prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
377
  response = ""
378
+ #print("User: "+message+"\nAI: ")
379
  for text in stream:
380
  part=text.token.text
381
+ #print(part, end="", flush=True)
382
  response += part
383
  yield response
384
  if((myType=="1a")): #add RAG-results to chat-output if appropriate
 
406
  response="" #+"("+myType+")\n"
407
  buffer=""
408
  #print("URL: "+url)
409
+ #print("User: "+message+"\nAI: ")
410
  for text in requests.post(url, json=body, stream=True): #-H 'accept: application/json' -H 'Content-Type: application/json'
411
  if buffer is None: buffer=""
412
  buffer=str("".join(buffer))
 
423
  if(part.lstrip('\n\r').startswith("data: ")): part=part.lstrip('\n\r').replace("data: ", "")
424
  try:
425
  part = str(json.loads(part)["choices"][0]["text"])
426
+ #print(part, end="", flush=True)
427
  response=response+part
428
  buffer="" # reset buffer
429
  except Exception as e: