Daeyongkwon98 commited on
Commit
128b55e
·
verified ·
1 Parent(s): aaf3452

ChatMusician에서 Llama2 7B로 변경

Browse files
Files changed (1) hide show
  1. app.py +4 -5
app.py CHANGED
@@ -6,11 +6,10 @@ from string import Template
6
  # 프롬프트 템플릿 설정
7
  prompt_template = Template("Human: ${inst} </s> Assistant: ")
8
 
9
- # ChatMusician 모델과 토크나이저 로드
10
- model_name = "m-a-p/ChatMusician" # ChatMusician 모델의 Hugging Face Hub 경로
11
- tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
12
- # model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="cuda", resume_download=True).eval()
13
- model = AutoModelForCausalLM.from_pretrained(model_name, low_cpu_mem_usage=True).eval()
14
 
15
  # 생성 설정 (Gradio UI에서 제어할 수 있는 변수들)
16
  default_generation_config = GenerationConfig(
 
6
  # 프롬프트 템플릿 설정
7
  prompt_template = Template("Human: ${inst} </s> Assistant: ")
8
 
9
+ # LLaMA 2 7B 모델과 토크나이저 로드
10
+ model_name = "meta-llama/Llama-2-7b-hf" # Replace with the LLaMA 2 Hugging Face Hub path
11
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False)
12
+ model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="cuda").eval()
 
13
 
14
  # 생성 설정 (Gradio UI에서 제어할 수 있는 변수들)
15
  default_generation_config = GenerationConfig(