tanyuzhou commited on
Commit
d290873
β€’
1 Parent(s): bbeb0a6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -7,8 +7,8 @@ from transformers import TextStreamer
7
  import spaces
8
 
9
  # Load model and tokenizer
10
- model = AutoModelForCausalLM.from_pretrained("Rorical/0-roleplay", return_dict=True, trust_remote_code=True, low_cpu_mem_usage=False)
11
- tokenizer = AutoTokenizer.from_pretrained("Rorical/0-roleplay", trust_remote_code=True, low_cpu_mem_usage=False)
12
  tokenizer.chat_template = "{% for message in messages %}{{'' + ((message['role'] + '\n') if message['role'] != '' else '') + message['content'] + '' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ 'ζ˜Ÿι‡Ž\n' }}{% endif %}"
13
 
14
  # Define the response function
 
7
  import spaces
8
 
9
  # Load model and tokenizer
10
+ model = AutoModelForCausalLM.from_pretrained("Rorical/0-roleplay", return_dict=True, trust_remote_code=True)
11
+ tokenizer = AutoTokenizer.from_pretrained("Rorical/0-roleplay", trust_remote_code=True)
12
  tokenizer.chat_template = "{% for message in messages %}{{'' + ((message['role'] + '\n') if message['role'] != '' else '') + message['content'] + '' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ 'ζ˜Ÿι‡Ž\n' }}{% endif %}"
13
 
14
  # Define the response function