import torch from transformers import AutoTokenizer, AutoModelForSequenceClassification

๋ชจ๋ธ ๋ฐ ํ† ํฌ๋‚˜์ด์ € ๋กœ๋“œ

model_name = "jokh/disaster_class" labels = ["ํ•ด๋‹น ์—†์Œ", "1๋‹จ๊ณ„", "2๋‹จ๊ณ„", "3๋‹จ๊ณ„", "4๋‹จ๊ณ„", "5๋‹จ๊ณ„"]

๋ชจ๋ธ๊ณผ ํ† ํฌ๋‚˜์ด์ € ๋ถˆ๋Ÿฌ์˜ค๊ธฐ

tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=len(labels))

GPU ์‚ฌ์šฉ ๊ฐ€๋Šฅ ์—ฌ๋ถ€ ํ™•์ธ ๋ฐ ๋ชจ๋ธ ๋กœ๋“œ

device = torch.device("cuda" if torch.cuda.is_available() else "cpu") try: model.to(device) # ๋ชจ๋ธ์„ GPU ๋˜๋Š” CPU๋กœ ์ด๋™ model.eval() print("๋ชจ๋ธ ๋กœ๋“œ ์„ฑ๊ณต!") except RuntimeError as e: print(f"๋ชจ๋ธ ๋กœ๋“œ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {e}") exit() # ์˜ค๋ฅ˜ ๋ฐœ์ƒ ์‹œ ํ”„๋กœ๊ทธ๋žจ ์ข…๋ฃŒ

์˜ˆ์ธก ํ•จ์ˆ˜

def predict(text): encoding = tokenizer(text, padding="max_length", truncation=True, max_length=128, return_tensors="pt") encoding = encoding.to(device) # ์ž…๋ ฅ ๋ฐ์ดํ„ฐ๋ฅผ ๋ชจ๋ธ๊ณผ ๋™์ผํ•œ ์žฅ์น˜๋กœ ์ด๋™ with torch.no_grad(): output = model(**encoding) prediction = torch.argmax(output.logits, dim=1).item() return labels[prediction]

์‚ฌ์šฉ์ž ์ž…๋ ฅ ๋ฐ˜๋ณต ์‹คํ–‰

while True: text = input("์‹ ๊ณ  ๋‚ด์šฉ์„ ์ž…๋ ฅํ•˜์„ธ์š” (์ข…๋ฃŒํ•˜๋ ค๋ฉด 'exit' ์ž…๋ ฅ): ") if text.lower() == "exit": break result = predict(text) print(f"์˜ˆ์ธก๋œ ์žฌ๋‚œ ๋“ฑ๊ธ‰: {result}")

Downloads last month
6
Safetensors
Model size
109M params
Tensor type
F32
ยท
Inference Providers NEW
This model isn't deployed by any Inference Provider. ๐Ÿ™‹ Ask for provider support

Model tree for Jokh/disaster_class

Base model

beomi/kcbert-base
Finetuned
(155)
this model