rphrp1985 commited on
Commit
1d433d0
1 Parent(s): 9ccc3b3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -46,7 +46,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
46
 
47
  # model_id = "mistralai/Mistral-7B-v0.3"
48
 
49
- model_id = "CohereForAI/c4ai-command-r-v01"
50
 
51
 
52
  tokenizer = AutoTokenizer.from_pretrained(
@@ -55,15 +55,15 @@ tokenizer = AutoTokenizer.from_pretrained(
55
  , token= token,)
56
 
57
 
58
- accelerator = Accelerator()
59
 
60
  model = AutoModelForCausalLM.from_pretrained(model_id, token= token,
61
  # torch_dtype= torch.uint8,
62
- torch_dtype=torch.float16,
63
- load_in_8bit=True,
64
- # # torch_dtype=torch.fl,
65
- attn_implementation="flash_attention_2",
66
- low_cpu_mem_usage=True,
67
  # device_map='cuda',
68
  # device_map=accelerator.device_map,
69
 
@@ -71,7 +71,7 @@ model = AutoModelForCausalLM.from_pretrained(model_id, token= token,
71
 
72
 
73
  #
74
- model = accelerator.prepare(model)
75
 
76
 
77
  # device_map = infer_auto_device_map(model, max_memory={0: "79GB", "cpu":"65GB" })
 
46
 
47
  # model_id = "mistralai/Mistral-7B-v0.3"
48
 
49
+ model_id = "CohereForAI/c4ai-command-r-plus-4bit"
50
 
51
 
52
  tokenizer = AutoTokenizer.from_pretrained(
 
55
  , token= token,)
56
 
57
 
58
+ # accelerator = Accelerator()
59
 
60
  model = AutoModelForCausalLM.from_pretrained(model_id, token= token,
61
  # torch_dtype= torch.uint8,
62
+ # torch_dtype=torch.float16,
63
+ # load_in_8bit=True,
64
+ # # # torch_dtype=torch.fl,
65
+ # attn_implementation="flash_attention_2",
66
+ # low_cpu_mem_usage=True,
67
  # device_map='cuda',
68
  # device_map=accelerator.device_map,
69
 
 
71
 
72
 
73
  #
74
+ # model = accelerator.prepare(model)
75
 
76
 
77
  # device_map = infer_auto_device_map(model, max_memory={0: "79GB", "cpu":"65GB" })