talgatzh commited on
Commit
64fbfc0
·
verified ·
1 Parent(s): 4d90efd

Upload talgat.py

Browse files
Files changed (1) hide show
  1. talgat.py +134 -0
talgat.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ import re, json, sys, subprocess
3
+ import torch
4
+ from transformers import AutoTokenizer, AutoModelForCausalLM
5
+ from datasets import load_dataset
6
+ from tqdm import tqdm
7
+
8
+ # ===== Параметры =====
9
+ BASE_MODEL = "google/gemma-3-4b-it"
10
+ MODEL_PATH = "talgatzh/gemma-finetuned-model2"
11
+ OUTPUT_FILE = "gemma_inference_results_from_multidomain_fixedzxcs555.jsonl"
12
+ MAX_NEW_TOKENS = 60
13
+ MAX_TEXTS = 20 # увеличь для более стабильной метрики (>=200)
14
+
15
+ # ===== ROUGE (установим при необходимости) =====
16
+ try:
17
+ import evaluate
18
+ except ImportError:
19
+ subprocess.check_call([sys.executable, "-m", "pip", "install", "-q", "evaluate"])
20
+ import evaluate
21
+
22
+ # ===== Модель и токенизатор =====
23
+ tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL, trust_remote_code=True)
24
+ model = AutoModelForCausalLM.from_pretrained(
25
+ MODEL_PATH,
26
+ device_map="auto",
27
+ torch_dtype=torch.float16,
28
+ trust_remote_code=True
29
+ ).eval()
30
+
31
+ # pad_token для стабильности (у Gemma pad = eos)
32
+ if tokenizer.pad_token_id is None:
33
+ tokenizer.pad_token = tokenizer.eos_token
34
+
35
+ # ===== Утилиты =====
36
+ def is_kazakh(text: str) -> bool:
37
+ return any(c in text.lower() for c in "қәөүңғұһі")
38
+
39
+ _SENT_SPLIT = re.compile(r'(?<=[\.\!\?…])\s+|\n+')
40
+ def lead_n(text: str, n=3) -> str:
41
+ sents = [s.strip() for s in _SENT_SPLIT.split(text.strip()) if s.strip()]
42
+ return " ".join(sents[:n])
43
+
44
+ def build_chat_prompt(text: str) -> str:
45
+ instr = (
46
+ "Мақсат: Экстрактивті қысқаша мазмұн.\n"
47
+ "Ереже: Тек бастапқы мәтіндегі сөйлемдерді көшір. Өз сөзіңмен жазба. Синоним қолданба.\n"
48
+ "Мәтіннен тек 2–3 ең маңызды сөйлемді таңда да, сол күйінде жаз.\n"
49
+ "Формат: тек сөйлемдер, жаңа сөздер қоспа.\n\n"
50
+ "Мәтін:\n"
51
+ f"{text.strip()}\n\n"
52
+ "Қысқаша мазмұн:"
53
+ )
54
+ messages = [{"role": "user", "content": instr}]
55
+ # Правильный chat-template для Gemma-IT
56
+ return tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
57
+
58
+ # ===== Данные =====
59
+ dataset = load_dataset("kz-transformers/multidomain-kazakh-dataset",
60
+ split="train", streaming=True)
61
+
62
+ INPUT_TEXTS = []
63
+ for ex in dataset:
64
+ txt = (ex.get("text") or "").strip()
65
+ if is_kazakh(txt) and len(txt.split()) > 20:
66
+ INPUT_TEXTS.append(txt)
67
+ if len(INPUT_TEXTS) >= MAX_TEXTS:
68
+ break
69
+
70
+ print(f"✔ Отобрано {len(INPUT_TEXTS)} казахских текстов из multidomain")
71
+
72
+ # ===== Генерация =====
73
+ results, preds, refs = [], [], []
74
+
75
+ for text in tqdm(INPUT_TEXTS, desc="Generating summaries"):
76
+ prompt_text = build_chat_prompt(text)
77
+
78
+ toks = tokenizer(prompt_text, return_tensors="pt", truncation=True, max_length=2048)
79
+ toks = {k: v.to(model.device) for k, v in toks.items()}
80
+
81
+ with torch.no_grad():
82
+ out = model.generate(
83
+ **toks,
84
+ max_new_tokens=MAX_NEW_TOKENS,
85
+ do_sample=False,
86
+ temperature=0.0,
87
+ repetition_penalty=1.05,
88
+ no_repeat_ngram_size=6,
89
+ eos_token_id=tokenizer.eos_token_id,
90
+ pad_token_id=tokenizer.pad_token_id,
91
+ use_cache=True,
92
+ )
93
+
94
+ # === Берём ТОЛЬКО новые токены после входа ===
95
+ input_len = toks["input_ids"].shape[1]
96
+ gen_ids = out[0, input_len:]
97
+ generated = tokenizer.decode(gen_ids, skip_special_tokens=True).strip()
98
+
99
+ # Чистим возможные «утечки» ролей/маркеров
100
+ for bad in ("model", "<start_of_turn>", "<end_of_turn>"):
101
+ if generated.lower().startswith(bad):
102
+ generated = generated[len(bad):].lstrip(": ").strip()
103
+ generated = generated.replace(bad, "").strip()
104
+
105
+ # Fallback: если пусто — берём первые 2–3 предложения исходника
106
+ if not generated:
107
+ generated = lead_n(text, n=3)
108
+
109
+ reference = lead_n(text, n=3)
110
+
111
+ results.append({"text": text, "summary": generated, "reference": reference})
112
+ preds.append(generated)
113
+ refs.append(reference)
114
+
115
+ # ===== Сохранение =====
116
+ with open(OUTPUT_FILE, "w", encoding="utf-8") as f:
117
+ for r in results:
118
+ f.write(json.dumps(r, ensure_ascii=False) + "\n")
119
+
120
+ print(f"✅ Сохранено {len(results)} суммаризаций → {OUTPUT_FILE}")
121
+
122
+ # ===== ROUGE к Lead-3 (прокси для быстрой диагностики) =====
123
+ rouge = evaluate.load("rouge")
124
+ scores = rouge.compute(predictions=preds, references=refs, use_stemmer=True)
125
+ scores_pct = {k: round(v * 100, 2) for k, v in scores.items()}
126
+ print("🔎 ROUGE vs Lead-3:")
127
+ for k in ("rouge1", "rouge2", "rougeL", "rougeLsum"):
128
+ print(f"{k.upper()}: {scores_pct.get(k, 0)}%")
129
+
130
+ # ===== Быстрый дебаг первых 3 пар =====
131
+ for i in range(min(3, len(results))):
132
+ print("\n--- SAMPLE", i+1, "---")
133
+ print("PRED:", results[i]["summary"])
134
+ print("REF :", results[i]["reference"])