| | |
| | |
| | |
| |
|
| | """HF Jobs์์ ๋ชจ๋ธ ํ
์คํธ""" |
| |
|
| | from transformers import AutoModelForCausalLM, AutoTokenizer |
| | from peft import PeftModel |
| | import torch |
| |
|
| | |
| | BASE_MODEL = "Qwen/Qwen2.5-0.5B" |
| | ADAPTER_MODEL = "epinfomax/youtube-thumbnail-trend-analyzer" |
| |
|
| | print("=" * 60) |
| | print("YouTube ์ธ๋ค์ผ ํธ๋ ๋ ๋ถ์ ๋ชจ๋ธ ํ
์คํธ") |
| | print("=" * 60) |
| |
|
| | |
| | print("\n๋ชจ๋ธ ๋ก๋ ์ค...") |
| | tokenizer = AutoTokenizer.from_pretrained(ADAPTER_MODEL) |
| |
|
| | base_model = AutoModelForCausalLM.from_pretrained( |
| | BASE_MODEL, |
| | torch_dtype=torch.float16, |
| | device_map="auto", |
| | trust_remote_code=True |
| | ) |
| |
|
| | model = PeftModel.from_pretrained(base_model, ADAPTER_MODEL) |
| | model.eval() |
| | print("๋ชจ๋ธ ๋ก๋ ์๋ฃ!") |
| |
|
| | |
| | test_input = """๋ค์ ์ธ๋ค์ผ ๋ถ์๋ค์ ๋ณด๊ณ ์ค๋์ ํธ๋ ๋๋ฅผ ์์ฝํ๊ณ Midjourney ํ๋กฌํํธ๋ฅผ ์ถ์ฒํด์ค: |
| | |
| | [์์
] ์์ํฌ - ์์ฌํ ๋จ์ |
| | - ๋ฐฐ๊ฒฝ: ์ค๋ ์ง์ ๊ทธ๋ผ๋ฐ์ด์
|
| | - ์ธ๋ฌผ: ์กธ๋ฆฐ ํ์ , ๊ณ ๊ฐ ์์ |
| | - ํ
์คํธ: '์์ฌํ ๋จ์' ํฐ์ ์ธ๋ฆฌํ์ฒด |
| | - ๋ถ์๊ธฐ: ๊ฐ์ฑ์ , ์์ ์ |
| | |
| | [๊ฒ์] ์นผ๋ฐ๋ ์นด๋ฅดํ
|
| | - ๋ฐฐ๊ฒฝ: ์ด๋์ด ๊ฒ์ ํ๋ฉด |
| | - ์ธ๋ฌผ: ๊ฒ์ ์บ๋ฆญํฐ๋ค |
| | - ํ
์คํธ: '์ฐํ๋ณต๋กค' ๋
ธ๋์ ๊ตต์ ๊ธ์จ |
| | - ๋ถ์๊ธฐ: ์ ๋จธ๋ฌ์ค, ๊ฐ๋ฒผ์ด |
| | |
| | [์ํฐํ
์ธ๋จผํธ] ํฉ์ ๋ฏผ ์ ํ์ด |
| | - ๋ฐฐ๊ฒฝ: ๋ฐฉ์ก ์คํ๋์ค |
| | - ์ธ๋ฌผ: ํฉ์ ๋ฏผ, ์๋ ํ์ |
| | - ํ
์คํธ: ์์ |
| | - ๋ถ์๊ธฐ: ์ฝ๋ฏน, ์น๊ทผํจ |
| | |
| | [๊ณผํ๊ธฐ์ ] ์์ดํฐ ์ ๊ธฐ์ |
| | - ๋ฐฐ๊ฒฝ: ๊น๋ํ ํฐ์/ํ์ |
| | - ์ธ๋ฌผ: ์์ |
| | - ํ
์คํธ: ๊ธฐ์ ๊ด๋ จ ํ
์คํธ |
| | - ๋ถ์๊ธฐ: ๋ฏธ๋์ , ๊น๋ํจ |
| | |
| | [๋
ธํ์ฐ] ๊ธฐ์84 ์์์ฅ |
| | - ๋ฐฐ๊ฒฝ: ์์์ฅ, ํ๋์ |
| | - ์ธ๋ฌผ: ๊ธฐ์84, ๋ฐ์ด๋๋ ๋ชจ์ต |
| | - ํ
์คํธ: '์ํ4๋' ๋นจ๊ฐ์ |
| | - ๋ถ์๊ธฐ: ๋์ ์ , ์ ๋จธ๋ฌ์ค""" |
| |
|
| | print("\n" + "=" * 60) |
| | print("์
๋ ฅ:") |
| | print("=" * 60) |
| | print(test_input[:500] + "...") |
| |
|
| | |
| | print("\n" + "=" * 60) |
| | print("๋ชจ๋ธ ์๋ต ์์ฑ ์ค...") |
| | print("=" * 60) |
| |
|
| | messages = [{"role": "user", "content": test_input}] |
| | text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) |
| | inputs = tokenizer(text, return_tensors="pt").to(model.device) |
| |
|
| | with torch.no_grad(): |
| | outputs = model.generate( |
| | **inputs, |
| | max_new_tokens=500, |
| | temperature=0.7, |
| | top_p=0.9, |
| | do_sample=True, |
| | pad_token_id=tokenizer.pad_token_id, |
| | eos_token_id=tokenizer.eos_token_id, |
| | ) |
| |
|
| | response = tokenizer.decode(outputs[0], skip_special_tokens=True) |
| |
|
| | |
| | if "์ถ์ฒํด์ค:" in response: |
| | response = response.split("์ถ์ฒํด์ค:")[-1].strip() |
| |
|
| | print("\n" + "=" * 60) |
| | print("๋ชจ๋ธ ์ถ๋ ฅ:") |
| | print("=" * 60) |
| | print(response) |
| |
|
| | print("\n" + "=" * 60) |
| | print("ํ
์คํธ ์๋ฃ!") |
| | print("=" * 60) |
| |
|