Daeyongkwon98 commited on
Commit
d14f87e
ยท
verified ยท
1 Parent(s): 5fd0daa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -14,7 +14,7 @@ prompt_template = Template("Human: ${inst} </s> Assistant: ")
14
  # ๋ชจ๋ธ๊ณผ ํ† ํฌ๋‚˜์ด์ € ๋กœ๋“œ
15
  model_name = "meta-llama/Llama-3.2-3b-instruct" # ๋ชจ๋ธ ๊ฒฝ๋กœ
16
  tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False)
17
- model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="cuda").eval()
18
 
19
  # ์ƒ์„ฑ ์„ค์ • (Gradio UI์—์„œ ์ œ์–ดํ•  ์ˆ˜ ์žˆ๋Š” ๋ณ€์ˆ˜๋“ค)
20
  default_generation_config = GenerationConfig(
 
14
  # ๋ชจ๋ธ๊ณผ ํ† ํฌ๋‚˜์ด์ € ๋กœ๋“œ
15
  model_name = "meta-llama/Llama-3.2-3b-instruct" # ๋ชจ๋ธ ๊ฒฝ๋กœ
16
  tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False)
17
+ model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="cpu").eval()
18
 
19
  # ์ƒ์„ฑ ์„ค์ • (Gradio UI์—์„œ ์ œ์–ดํ•  ์ˆ˜ ์žˆ๋Š” ๋ณ€์ˆ˜๋“ค)
20
  default_generation_config = GenerationConfig(