tanyuzhou commited on
Commit
6bb9255
β€’
1 Parent(s): f078519

update requirements

Browse files
Files changed (2) hide show
  1. app.py +2 -7
  2. requirements.txt +2 -2
app.py CHANGED
@@ -1,18 +1,13 @@
1
  import gradio as gr
2
 
3
  import torch
4
- from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
5
  from transformers import TextStreamer
6
 
7
  import spaces
8
 
9
- quantization_config = BitsAndBytesConfig(
10
- load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16
11
- )
12
-
13
  # Load model and tokenizer
14
- model = AutoModelForCausalLM.from_pretrained("Rorical/0-roleplay", return_dict=True, trust_remote_code=True).to(dtype=torch.float16)
15
- model = model.to(device="cuda")
16
  tokenizer = AutoTokenizer.from_pretrained("Rorical/0-roleplay", trust_remote_code=True)
17
  tokenizer.chat_template = "{% for message in messages %}{{'' + ((message['role'] + '\n') if message['role'] != '' else '') + message['content'] + '' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ 'ζ˜Ÿι‡Ž\n' }}{% endif %}"
18
 
 
1
  import gradio as gr
2
 
3
  import torch
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer
5
  from transformers import TextStreamer
6
 
7
  import spaces
8
 
 
 
 
 
9
  # Load model and tokenizer
10
+ model = AutoModelForCausalLM.from_pretrained("Rorical/0-roleplay", return_dict=True, trust_remote_code=True, load_in_4bit=True)
 
11
  tokenizer = AutoTokenizer.from_pretrained("Rorical/0-roleplay", trust_remote_code=True)
12
  tokenizer.chat_template = "{% for message in messages %}{{'' + ((message['role'] + '\n') if message['role'] != '' else '') + message['content'] + '' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ 'ζ˜Ÿι‡Ž\n' }}{% endif %}"
13
 
requirements.txt CHANGED
@@ -1,8 +1,8 @@
1
  accelerate
 
2
  bitsandbytes
3
  timm
4
  einops
5
  torch
6
  torchvision
7
- Pillow
8
- transformers
 
1
  accelerate
2
+ transformers==4.34.0
3
  bitsandbytes
4
  timm
5
  einops
6
  torch
7
  torchvision
8
+ Pillow