kootaeng2 commited on
Commit
136c55f
ยท
1 Parent(s): 39c894a

Fix: Correct model path discovery for server environment

Browse files
Files changed (1) hide show
  1. src/emotion_engine.py +15 -20
src/emotion_engine.py CHANGED
@@ -1,37 +1,32 @@
1
- # emotion_engine.py
2
 
3
  import torch
4
  from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
5
  import os
6
 
7
  def load_emotion_classifier():
8
- # ํ˜„์žฌ ์Šคํฌ๋ฆฝํŠธ ํŒŒ์ผ์˜ ๋””๋ ‰ํ„ฐ๋ฆฌ ๊ฒฝ๋กœ๋ฅผ ๊ฐ€์ ธ์˜ต๋‹ˆ๋‹ค.
9
- base_path = os.path.dirname(os.path.abspath(__file__))
 
 
 
 
 
 
 
10
 
11
- # ๋ชจ๋ธ ํด๋”์˜ ์ ˆ๋Œ€ ๊ฒฝ๋กœ๋ฅผ ๋งŒ๋“ญ๋‹ˆ๋‹ค.
12
- MODEL_PATH = os.path.join(base_path, "korean-emotion-classifier-final")
13
-
14
- # ๊ฒฝ๋กœ๊ฐ€ ๋กœ์ปฌ ๋””๋ ‰ํ„ฐ๋ฆฌ์ธ์ง€ ํ™•์ธ
15
- if not os.path.isdir(MODEL_PATH):
16
- print(f"โŒ ์˜ค๋ฅ˜: ์ง€์ •๋œ ๊ฒฝ๋กœ '{MODEL_PATH}'์— ๋ชจ๋ธ ํด๋”๊ฐ€ ์กด์žฌํ•˜์ง€ ์•Š์Šต๋‹ˆ๋‹ค.")
17
- return None
18
-
19
- print(f"--- ์ตœ์ข… ๋ชจ๋ธ ๊ฒฝ๋กœ ํ™•์ธ: [{MODEL_PATH}] ---")
20
- print(f"๋กœ์ปฌ ์ ˆ๋Œ€ ๊ฒฝ๋กœ '{MODEL_PATH}'์—์„œ ๋ชจ๋ธ์„ ์ง์ ‘ ๋ถˆ๋Ÿฌ์˜ต๋‹ˆ๋‹ค...")
21
 
22
  try:
23
- # 1. from_pretrained()์— ์ ˆ๋Œ€ ๊ฒฝ๋กœ๋ฅผ ์ง์ ‘ ์ „๋‹ฌํ•ฉ๋‹ˆ๋‹ค.
24
- # 2. `local_files_only=True`๋Š” ์ œ๊ฑฐํ•ฉ๋‹ˆ๋‹ค. ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๊ฐ€ ์ž๋™์œผ๋กœ ์ธ์‹ํ•ฉ๋‹ˆ๋‹ค.
25
- tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
26
- model = AutoModelForSequenceClassification.from_pretrained(MODEL_PATH)
27
-
28
  print("โœ… ๋กœ์ปฌ ๋ชจ๋ธ ํŒŒ์ผ ์ง์ ‘ ๋กœ๋”ฉ ์„ฑ๊ณต!")
29
 
30
  except Exception as e:
31
  print(f"โŒ ๋ชจ๋ธ ๋กœ๋”ฉ ์ค‘ ์˜ค๋ฅ˜: {e}")
32
- # ์˜ค๋ฅ˜๊ฐ€ ๋ฐœ์ƒํ•œ ์›์ธ์„ ์ •ํ™•ํžˆ ์ถœ๋ ฅํ•ฉ๋‹ˆ๋‹ค.
33
- print(f"์ƒ์„ธ ์˜ค๋ฅ˜ ๋ฉ”์‹œ์ง€: {e}")
34
  return None
 
35
 
36
  device = 0 if torch.cuda.is_available() else -1
37
  emotion_classifier = pipeline("text-classification", model=model, tokenizer=tokenizer, device=device)
 
1
+ # emotion_engine.py (์ˆ˜์ • ํ›„ ์ตœ์ข… ๋ฒ„์ „)
2
 
3
  import torch
4
  from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
5
  import os
6
 
7
  def load_emotion_classifier():
8
+ # --- ์ด ๋ถ€๋ถ„์„ ์ˆ˜์ •ํ•ฉ๋‹ˆ๋‹ค ---
9
+ # ํ˜„์žฌ ์Šคํฌ๋ฆฝํŠธ ํŒŒ์ผ์˜ ์ ˆ๋Œ€ ๊ฒฝ๋กœ๋ฅผ ์ฐพ์Šต๋‹ˆ๋‹ค. (์˜ˆ: /app/src/emotion_engine.py)
10
+ script_path = os.path.abspath(__file__)
11
+ # ์Šคํฌ๋ฆฝํŠธ๊ฐ€ ์žˆ๋Š” ๋””๋ ‰ํ„ฐ๋ฆฌ๋ฅผ ์ฐพ์Šต๋‹ˆ๋‹ค. (์˜ˆ: /app/src)
12
+ src_dir = os.path.dirname(script_path)
13
+ # ๊ทธ ์ƒ์œ„ ๋””๋ ‰ํ„ฐ๋ฆฌ, ์ฆ‰ ํ”„๋กœ์ ํŠธ ๋ฃจํŠธ๋ฅผ ์ฐพ์Šต๋‹ˆ๋‹ค. (์˜ˆ: /app)
14
+ base_dir = os.path.dirname(src_dir)
15
+ # ํ”„๋กœ์ ํŠธ ๋ฃจํŠธ์™€ ๋ชจ๋ธ ํด๋” ์ด๋ฆ„์„ ํ•ฉ์ณ ์ •ํ™•ํ•œ ๊ฒฝ๋กœ๋ฅผ ๋งŒ๋“ญ๋‹ˆ๋‹ค.
16
+ MODEL_PATH = os.path.join(base_dir, "korean-emotion-classifier-final")
17
 
18
+ print(f"--- ๋ฐฐํฌ ํ™˜๊ฒฝ ๋ชจ๋ธ ๊ฒฝ๋กœ ํ™•์ธ: [{MODEL_PATH}] ---")
 
 
 
 
 
 
 
 
 
19
 
20
  try:
21
+ # local_files_only ์˜ต์…˜์€ ๋กœ์ปฌ ๊ฒฝ๋กœ๋ฅผ ๋ช…์‹œํ•  ๋•Œ ์•ˆ์ „์„ ์œ„ํ•ด ์œ ์ง€ํ•˜๋Š” ๊ฒƒ์ด ์ข‹์Šต๋‹ˆ๋‹ค.
22
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH, local_files_only=True)
23
+ model = AutoModelForSequenceClassification.from_pretrained(MODEL_PATH, local_files_only=True)
 
 
24
  print("โœ… ๋กœ์ปฌ ๋ชจ๋ธ ํŒŒ์ผ ์ง์ ‘ ๋กœ๋”ฉ ์„ฑ๊ณต!")
25
 
26
  except Exception as e:
27
  print(f"โŒ ๋ชจ๋ธ ๋กœ๋”ฉ ์ค‘ ์˜ค๋ฅ˜: {e}")
 
 
28
  return None
29
+ # --- ์—ฌ๊ธฐ๊นŒ์ง€ ์ˆ˜์ • ---
30
 
31
  device = 0 if torch.cuda.is_available() else -1
32
  emotion_classifier = pipeline("text-classification", model=model, tokenizer=tokenizer, device=device)