#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/12/14 14:49
# @Author  : huidong.bai
# @File    : MongoNluGrammar.py
# @Software: PyCharm
# @Mail    : MasterBai2018@outlook.com
import os
import re
import sys
import copy
from jsgf import PublicRule, Literal, Grammar, Rule, Expansion, Sequence, Literal
from jsgf import parse_grammar_file
from itertools import product



class NTreeNode:
    def __init__(self, val=None):
        self.val = val
        self.children = []

    def add_children(self, node):
        self.children.append(node)


class NLUGrammar:
    rules = {}
    public_rules = []

    def __init__(self, file):
        self.__load_grammar(file)
        with open(file, "r", encoding="utf-8") as file:
            datas = file.readlines()
            for data in datas:
                if data.isspace() or data.startswith("#") or data.startswith("//") or data.startswith("@") or len(
                        data) == 0:
                    continue
                data = data.replace(";", "").strip()
                if "=" in data:
                    header = data.split("=")[0].strip()
                    body = data.split("=")[1].strip().split("|")
                    rule_list = [word.strip() for word in body]
                    rule_name = re.findall(r'<(.*?)>', header)[0]
                    if rule_name and rule_list:
                        self.rules[rule_name] = rule_list

    def __load_grammar(self, jsgf_file):
        try:
            grammar: Grammar = parse_grammar_file(jsgf_file)
            grammar.compile()
            self.grammar = grammar
            for rule in grammar.visible_rules:
                self.public_rules.append(rule.name)
        except Exception as e:
            raise Exception(f"Paser jsgf file failed: {e}")

    def get_nlu_tag(self, tag):
        rule: Literal = self.grammar.get_rule_from_name(tag).expansion
        rule.case_sensitive = True
        return rule.text

    @staticmethod
    def slots_parser(text):
        new_text = text
        matches = re.findall(r'_(.*?)_(.*?)_', text)
        slots_list = []

        for slots, val in matches:
            slots_list.append(f"{slots}:{val}")
            new_text = new_text.replace(f"_{slots}_{val}_", val, 1)
        return new_text, ";".join(slots_list)


if __name__ == '__main__':
    # 检查是否提供了目录参数
    if len(sys.argv) != 2:
        print("请输入Grammar文件路径.")
        sys.exit(1)
    
    grammar_path = sys.argv[1]

    # 检查路径是否存在
    if not os.path.exists(grammar_path):
        print("指定的路径不存在，请重新输入有效的文件路径。")
        sys.exit(1)

    nlu_grammar = NLUGrammar(grammar_path)
    skill = nlu_grammar.get_nlu_tag("skill")
    intention = nlu_grammar.get_nlu_tag("skill")
    strategy = nlu_grammar.get_nlu_tag("strategy")

    all_sentence_list = []
    for public_rule in nlu_grammar.public_rules:
        _rule_list = nlu_grammar.rules.get(public_rule)
        all_sentence_list.extend(_rule_list)

    result_list = all_sentence_list

    while True:
        is_need_iterate = False
        iterate_list = copy.deepcopy(result_list)
        for a in iterate_list:
            match = re.search(r'<(.*?)>', a)
            if match:
                is_need_iterate = True
                result_list.remove(a)
                rule_tag = match.group(1)
                child_list = nlu_grammar.rules.get(rule_tag)
                for child in child_list:
                    result_list.append(a.replace(f"<{rule_tag}>", child, 1))
        if not is_need_iterate:
            break

    print("# 泛化个数：", len(result_list))

    for text in result_list:
        new_text, slots = NLUGrammar.slots_parser(text)
        print(f"text:{new_text}\tskill:{skill};intention:{intention}\tstrategy:{strategy}")
