# -*- coding:utf-8 -*-
# @FileName :base.py
# @Date: 2024/8/3
# @Author: 天空之城
import ast

import jieba
import jieba.posseg as pseg
from spacy.lang.zh import Chinese
from address_normalizer.app.config import PATTERN_FILE, JIEBA_FILE, TOKENIZE_MODEL


class BaseModel:
    """NER 基础模型 模型初始化"""

    def __init__(self, mode=TOKENIZE_MODEL):
        self.mode = mode
        if self.mode == 'rule':
            self.processor = self.init_nlp()
        elif self.mode == 'jieba':
            self.processor = self.init_jieba()
        else:
            raise ValueError("Unsupported mode. Choose 'lac' or 'rule'.")

    def init_nlp(self):
        nlp = Chinese()
        ruler = nlp.add_pipe('entity_ruler')
        patterns = self.load_patterns(PATTERN_FILE)  # 从文件加载模式
        with nlp.select_pipes(enable='tagger'):
            ruler.add_patterns(patterns)
        return nlp

    def init_jieba(self):
        jieba.initialize()
        jieba.load_userdict(JIEBA_FILE)
        self.load_custom_words(JIEBA_FILE)
        return pseg

    def load_patterns(self, file_path):
        """从文件中加载模式"""
        patterns = []
        with open(file_path, 'r', encoding='utf-8') as file:
            for line in file:
                pattern = ast.literal_eval(line.strip())
                patterns.append(pattern)
        return patterns

    @staticmethod
    def load_custom_words(file_path):
        with open(file_path, 'r', encoding='utf-8') as f:
            for line in f:
                word_freq = line.strip().split()
                if len(word_freq) == 3:
                    word, freq, _ = word_freq
                    jieba.add_word(word, freq=int(freq))
        return jieba
