# zuele/tokenizer.py
import os
import re
from typing import List, Tuple, Union, Iterable, Iterator, Set
from .regex_rules import ECON_PATTERN


class Tokenizer:
    _MAX_WORD_LEN = 30

    # 1. 默认句子切分正则，可覆盖
    _SENT_RE = re.compile(
        r'[^\n]+',  # 任意非换行字符组成的连续片段
        flags=re.UNICODE
    )

    # ---------- 公开初始化 ----------
    def __init__(
            self,
            dict_path: str = None,
            company_path: str = None,
    ) -> None:
        # 1. 通用词典路径
        if dict_path is None:
            dict_path = os.path.join(
                os.path.dirname(__file__), "data", "zuele_dict.txt"
            )
        self._dict_path = dict_path
        self._lexicon: dict[str, Tuple[int, str]] = {}
        self._load_lexicon()

        # 2. 正则
        self._regex = ECON_PATTERN

        # 3. 公司名单路径
        if company_path is None:
            company_path = os.path.join(
                os.path.dirname(__file__), "data", "company_names.txt"
            )
        self._company: Set[str] = self._load_company_set(company_path)

    # ---------- 私有工具 ----------
    @staticmethod
    def _load_company_set(path: str) -> Set[str]:
        """读入公司名单，返回 set（O(1) 查询）。"""
        if not os.path.isfile(path):
            # 文件不存在时返回空集合，避免报错
            return set()
        with open(path, encoding="utf-8") as f:
            return {line.strip() for line in f if line.strip()}

    def _load_lexicon(self) -> None:
        if self._lexicon:  # 防止重复加载
            return
        with open(self._dict_path, encoding="utf-8") as f:
            for line in f:
                line = line.strip()
                if not line or line.startswith("#"):
                    continue
                try:
                    word, freq, pos = line.split("\t")
                    self._lexicon[word] = (int(freq), pos)
                except ValueError:
                    continue

    # ---------- 句子切分 ----------
    def _split_sentences(self, text: str):
        for m in self._SENT_RE.finditer(text):
            sent = m.group().strip()
            if sent:
                yield sent

    # ---------- 单句分词 ----------
    def _cut_single(self, text: str, *, with_pos: bool) -> List[Union[str, Tuple[str, str]]]:
        res, i, n = [], 0, len(text)
        _STOP = {"的", "了", "，", "、", "与", "或"}

        while i < n:
            # 1) 先尝试匹配公司名（最长优先）
            for j in range(min(n, i + self._MAX_WORD_LEN), i, -1):
                w = text[i:j]
                if w in self._company:
                    res.append((w, "ntc") if with_pos else w)
                    i = j
                    break
            else:
                # 2) 再尝试正则（经济实体）
                m = self._regex.match(text, i)
                if m:
                    ent = m.group()
                    res.append((ent, "m") if with_pos else ent)
                    i = m.end()
                    continue

                # 3) 然后通用词典
                for j in range(min(n, i + self._MAX_WORD_LEN), i, -1):
                    w = text[i:j]
                    if w in self._lexicon:
                        pos = self._lexicon[w][1]
                        res.append((w, pos) if with_pos else w)
                        i = j
                        break
                else:
                    # 4) 兜底单字
                    if with_pos and text[i] in _STOP:
                        res.append((text[i], "u"))
                        i += 1
                        continue
                    res.append((text[i], "x") if with_pos else text[i])
                    i += 1
        return res

    # ---------- 公开流式接口 ----------
    def cut(self, text: str, *, with_pos: bool = False) -> Iterator[Union[str, Tuple[str, str]]]:
        for sent in self._split_sentences(text):
            for token in self._cut_single(sent, with_pos=with_pos):
                yield token
