#!/usr/bin/env python3
# Author: Armit
# Create Time: 周二 2025/07/22 

# 观察数据集统计特征 (字符级别)
# NOTE: 朴素建模，训练集中各属性的用字直接映射到测试集

import json
from pathlib import Path
from collections import defaultdict
from pprint import pprint as pp

BASE_PATH = Path(__file__).parent
DATA_PATH = BASE_PATH / 'data'
OUT_PATH = BASE_PATH / 'out'
OUT_PATH.mkdir(exist_ok=True)

PLT_NAME = 'plant'
CLS_NAME = 'class'
ENV_NAME = 'environment'
ARE_NAME = 'area'
ALT_NAME = 'altitude'
NON_NAME = 'none'

VOCABS = {
  PLT_NAME: set(),
  CLS_NAME: set(),
  ENV_NAME: set(),
  ARE_NAME: set(),
  ALT_NAME: set(),
  NON_NAME: set(),
}

# 虚词表
VIRTUAL_CHARS = [
  '，', '。', '（', '）',
  '是', '的', '得', '着', '过', '了',
  '在', '从', '到', '至', '于', '对', '为',
]

def stats_trainset():
  for i in range(1, 5+1):
    fp = DATA_PATH / 'train' / f'train0{i}.jsonl'
    items = []
    with open(fp, 'r', encoding='utf-8') as fh:
      for line in fh.readlines():
        it = json.loads(line.strip())
        items.append(it)
    print(f'>> file {fp}')
    print(f'>> nlen: {len(items)}')

    for it in items:
      text: str = it['text']
      mask = [NON_NAME] * len(text)
      for key, val in it['label'].items():
        for name, lcut in val.items():
          l, r = lcut[0]
          for j in range(l, r + 1):
            mask[j] = key
      for i, key in enumerate(mask):
        VOCABS[key].add(text[i])

  save_fp = OUT_PATH / 'trainset-char.json'
  print(f'>> save file: {save_fp}')
  with open(save_fp, 'w', encoding='utf-8') as fh:
    json.dump({k: sorted(v) for k, v in VOCABS.items()}, fh, indent=2, ensure_ascii=False)


def stats_testset():
  fp = DATA_PATH / 'test.jsonl'
  items = []
  with open(fp, 'r', encoding='utf-8') as fh:
    for line in fh.readlines():
      it = json.loads(line.strip())
      items.append(it)
  print(f'>> file {fp}')
  print(f'>> nlen: {len(items)}')

  texts = [it['text'] for it in items]
  texts.sort()

  try:
    from mk_vocab import make_tokenizer
    tokenizer = make_tokenizer(OUT_PATH / 'kgram' / 'vocab.txt', bidrectional=True)
  except:
    tokenizer = None

  data = []
  for text in texts:
    # 依先验虚词表
    virt = ''.join([c if c not in VIRTUAL_CHARS else '.' for c in text])
    # 依训练集字表
    mask = {}
    for prop in ['PLT', 'CLS', 'ENV', 'ARE', 'ALT', 'NON']:
      prop_name: str = globals().get(f'{prop}_NAME')
      vocab = VOCABS[prop_name]
      mask[prop] = ''.join([c if c in vocab else '.' for c in text])
    item = {
      'text': text,
      'virt': virt,
      'mask': mask,
    }
    # 依训练集词表 (熵分词)
    if tokenizer is not None:
      item['tokens'] = '/'.join(tokenizer(text))
    data.append(item)

  save_fp = OUT_PATH / 'testset-char.json'
  print(f'>> save file: {save_fp}')
  with open(save_fp, 'w', encoding='utf-8') as fh:
    json.dump(data, fh, indent=2, ensure_ascii=False)


if __name__ == '__main__':
  stats_trainset()
  stats_testset()
