"""
数据预处理：将法律案件数据转换为适用于罪名分类模型训练的 token 化数据，主要功能是对输入文本进行分词编码，
并将罪名标签映射为对应的 ID，最终生成可直接模型直接使用的训练 / 测试数据文件。
{
  "input": "被告人段某于某日持刀将被害人刺伤...",
  "output": {
    "罪名": ["故意伤害", "非法持有武器"]
  }
}
=>
{"input": [101, 2345, 6789, ...], "output": 0}
{"input": [101, 2345, 6789, ...], "output": 3}
"""
import json

import tqdm
from transformers import AutoTokenizer

with open("crime_types/accusation_id", encoding="utf-8") as f:
	accusation_id = json.load(f)


def convert(line):
	# "{'relevant_articles': [234], 'accusation': ['故意伤害'], 'punish_of_money': 0, 'criminals': ['段某'],
	# 'term_of_imprisonment': {'death_penalty': False, 'imprisonment': 12, 'life_imprisonment': False}}"
	results = []
	text = line["input"]
	input_ids = tokenizer.encode(text, truncation=True)
	
	for s in line["output"]["罪名"]:
		data = {"input": input_ids, "output": accusation_id[s]}
		results.append(json.dumps(data, ensure_ascii=False))
		
	return results


# 分词器一定要和模型配套
# model_name = "E:\\code\\qwen2.5-1.5B-Instruct"
model_name = "Qwen/Qwen2.5-1.5B-Instruct"
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)

data_type = "test"

with open("data/{}_data.jsonl".format(data_type), encoding="utf-8") as f:
	lines = [convert(json.loads(line.strip())) for line in tqdm.tqdm(f.readlines())]
lines = [ss for s in lines for ss in s]

with open("data/{}_data_classify_token".format(data_type), "w", encoding="utf-8") as f:
	f.writelines("\n".join(lines))
