mabaochang commited on
Commit
3b3e4a4
1 Parent(s): 9901c38

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -9
README.md CHANGED
@@ -82,20 +82,14 @@ import sys
82
  model_path = "./" # You can modify the path for storing the local model
83
  model = AutoModelForCausalLM.from_pretrained(model_path)
84
  tokenizer = AutoTokenizer.from_pretrained(model_path)
85
- history = "" # save dialog history
86
- max_history_length = 1024
87
-
88
  print("Human:")
89
  line = input()
90
  while line:
91
- history += 'Human: ' + line.strip() + '\n\nAssistant:'
92
- if len(history) > max_history_length:
93
- history = history[-1024:]
94
- input_ids = tokenizer(history, return_tensors="pt").input_ids
95
  outputs = model.generate(input_ids, max_new_tokens=200, do_sample = True, top_k = 30, top_p = 0.85, temperature = 0.35, repetition_penalty=1.2)
96
  rets = tokenizer.batch_decode(outputs, skip_special_tokens=True)
97
- print("Assistant:\n" + rets[0].strip().replace(history, ""))
98
- history += "\n" + rets[0].strip().replace(history, "")
99
  print("\n------------------------------------------------\nHuman:")
100
  line = input()
101
 
 
82
  model_path = "./" # You can modify the path for storing the local model
83
  model = AutoModelForCausalLM.from_pretrained(model_path)
84
  tokenizer = AutoTokenizer.from_pretrained(model_path)
 
 
 
85
  print("Human:")
86
  line = input()
87
  while line:
88
+ inputs = 'Human: ' + line.strip() + '\n\nAssistant:'
89
+ input_ids = tokenizer(inputs, return_tensors="pt").input_ids
 
 
90
  outputs = model.generate(input_ids, max_new_tokens=200, do_sample = True, top_k = 30, top_p = 0.85, temperature = 0.35, repetition_penalty=1.2)
91
  rets = tokenizer.batch_decode(outputs, skip_special_tokens=True)
92
+ print("Assistant:\n" + rets[0].strip().replace(inputs, ""))
 
93
  print("\n------------------------------------------------\nHuman:")
94
  line = input()
95