Ashishkr commited on
Commit
ed744f1
1 Parent(s): fd6b9df

Update model.py

Browse files
Files changed (1) hide show
  1. model.py +9 -6
model.py CHANGED
@@ -55,20 +55,23 @@ tokenizer = transformers.AutoTokenizer.from_pretrained(
55
  # texts.append(f'{message} [/INST]')
56
  # return ''.join(texts)
57
 
58
-
59
  def get_prompt(message: str, chat_history: list[tuple[str, str]], system_prompt: str) -> str:
60
  texts = [f'{system_prompt}\n']
61
 
62
- for user_input, response in chat_history[:-1]:
63
- texts.append(f'{user_input} {response}\n')
 
64
 
65
- # Getting the user input and response from the last tuple in the chat history
66
- last_user_input, last_response = chat_history[-1]
67
- texts.append(f' input: {last_user_input} {last_response} {message} Response: ')
 
 
68
 
69
  return ''.join(texts)
70
 
71
 
 
72
  def get_input_token_length(message: str, chat_history: list[tuple[str, str]], system_prompt: str) -> int:
73
  prompt = get_prompt(message, chat_history, system_prompt)
74
  input_ids = tokenizer([prompt], return_tensors='np', add_special_tokens=False)['input_ids']
 
55
  # texts.append(f'{message} [/INST]')
56
  # return ''.join(texts)
57
 
 
58
  def get_prompt(message: str, chat_history: list[tuple[str, str]], system_prompt: str) -> str:
59
  texts = [f'{system_prompt}\n']
60
 
61
+ if chat_history:
62
+ for user_input, response in chat_history[:-1]:
63
+ texts.append(f'{user_input} {response}\n')
64
 
65
+ # Getting the user input and response from the last tuple in the chat history
66
+ last_user_input, last_response = chat_history[-1]
67
+ texts.append(f' input: {last_user_input} {last_response} {message} Response: ')
68
+ else:
69
+ texts.append(f' input: {message} Response: ')
70
 
71
  return ''.join(texts)
72
 
73
 
74
+
75
  def get_input_token_length(message: str, chat_history: list[tuple[str, str]], system_prompt: str) -> int:
76
  prompt = get_prompt(message, chat_history, system_prompt)
77
  input_ids = tokenizer([prompt], return_tensors='np', add_special_tokens=False)['input_ids']