import torch from transformers import AutoTokenizer, AutoModelForSequenceClassification
๋ชจ๋ธ ๋ฐ ํ ํฌ๋์ด์ ๋ก๋
model_name = "jokh/disaster_class" labels = ["ํด๋น ์์", "1๋จ๊ณ", "2๋จ๊ณ", "3๋จ๊ณ", "4๋จ๊ณ", "5๋จ๊ณ"]
๋ชจ๋ธ๊ณผ ํ ํฌ๋์ด์ ๋ถ๋ฌ์ค๊ธฐ
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=len(labels))
GPU ์ฌ์ฉ ๊ฐ๋ฅ ์ฌ๋ถ ํ์ธ ๋ฐ ๋ชจ๋ธ ๋ก๋
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") try: model.to(device) # ๋ชจ๋ธ์ GPU ๋๋ CPU๋ก ์ด๋ model.eval() print("๋ชจ๋ธ ๋ก๋ ์ฑ๊ณต!") except RuntimeError as e: print(f"๋ชจ๋ธ ๋ก๋ ์ค ์ค๋ฅ ๋ฐ์: {e}") exit() # ์ค๋ฅ ๋ฐ์ ์ ํ๋ก๊ทธ๋จ ์ข ๋ฃ
์์ธก ํจ์
def predict(text): encoding = tokenizer(text, padding="max_length", truncation=True, max_length=128, return_tensors="pt") encoding = encoding.to(device) # ์ ๋ ฅ ๋ฐ์ดํฐ๋ฅผ ๋ชจ๋ธ๊ณผ ๋์ผํ ์ฅ์น๋ก ์ด๋ with torch.no_grad(): output = model(**encoding) prediction = torch.argmax(output.logits, dim=1).item() return labels[prediction]
์ฌ์ฉ์ ์ ๋ ฅ ๋ฐ๋ณต ์คํ
while True: text = input("์ ๊ณ ๋ด์ฉ์ ์ ๋ ฅํ์ธ์ (์ข ๋ฃํ๋ ค๋ฉด 'exit' ์ ๋ ฅ): ") if text.lower() == "exit": break result = predict(text) print(f"์์ธก๋ ์ฌ๋ ๋ฑ๊ธ: {result}")
- Downloads last month
- 6
Model tree for Jokh/disaster_class
Base model
beomi/kcbert-base