from genericpath import isfile
import brain
from new_parser import *
from pptree import *
from queue import Queue
import numpy as np
import os


class EnglishParser:
    def __init__(self):
        self.step = 1  # 步数
        self.index = 0  # 当前句子下标
        self.sentence = ''  # 被分析的字符串
        self.sign = ['$']  # 符号栈
        self.status = ['0']  # 状态栈
        self.brain = ParserBrain(0.01)  # 使用超级大脑
        self.table = ParserTable()
        self.acceptable= False  # 标识当前句子能否被接受 用于readout前的判断

    def reset(self):
        self.step = 1  # 步数
        self.index = 0  # 当前句子下标
        self.sign = ['$']  # 符号栈
        self.status = ['0']  # 状态栈
        self.acceptable= False  # 标识当前句子能否被接受 用于readout前的判断
  
    def analyse(self, sentence: str):
        now_status = "0"  # 当前状态，起始值为1
        self.sentence=sentence
        sentence = sentence.split(' ')
        while True:
            assembly = self.brain.getAssembly(sentence[self.index])  # 获得当前word的assembly
            action = self.table.getAction(now_status, assembly.getArea())
            #print(action)
            if action is None:
                return False
            mode,aim=action[0],action[1:]
            if action == 'acc':
                self.acceptable=True
                break

            if mode== 's':  # 转移到其他状态
                self.status.append(aim)  # 将下一个状态压入状态栈中
                self.sign.append(assembly.area_name)
                # print(assembly.area_name)
                # self.sign.append(assembly.getArea())  # 压入当前符号
                self.index += 1

            elif mode == 'r':
                parent = []  # 进行规约的父母
                rule=self.table.rule[aim]
                for i in range(len(rule[1])):
                    self.status.pop()
                    parent.append(self.sign[-1])
                    self.sign.pop()
                now_status=self.status[-1]  # 弹出之后，目前的状态
                result=self.table.getGoto(now_status,rule[0])
                comp_name = self.table.rule[aim][0]
                to_area_name = comp_name + f"_{len(stage_2_Areas[comp_name])}"  # 合并到的脑区name
                stage_2_Areas[comp_name].setdefault(len(stage_2_Areas[comp_name]), to_area_name)  # 新建合并脑区
                # 此时没有刺激, 只能从外部输入
                self.brain.add_explicit_area(to_area_name, 20, 10, 0.1)  # 每个单词是一个脑区?!
                # to_area: Area = self.brain.areas[to_area_name]
                to_area_assembly = self.brain.merge_by_assembly(parent[0],parent[1],to_area_name)
                # self.brain.areas[to_area_name].winners = to_area_assembly
                self.status.append(result)
                self.sign.append(to_area_name)  # 将合并后的新脑区压入栈中
            now_status=self.status[-1]
        return True

    # 用于生成解析树
    def readout(self):
        if not self.acceptable:
            print('当前句子无法解析')
            return False
        q=[]
        s=self.brain.getWord(self.sign[-1]).search_assembly()  # 起始符号
        head=Node(s.getArea())  # 将当前词性写入解析树
        q.append((s,head))
        while q:
            front,node=q[0]
            if front.father:
                left,right=front.father
                left_s=left.split('_')[0]
                right_s=right.split('_')[0]
                q.append((self.brain.getWord(right).search_assembly(),Node(right_s,node)))
                q.append((self.brain.getWord(left).search_assembly(),Node(left_s,node)))
            else:  # 终结符
                Node(front.index,node)
            q.pop(0)
        print_tree(head,horizontal=False)
        pass

#句子库随机抽取若干测试数据
def generate_test_set(library_name,num):
    sentences=[]
    with open(library_name,'r')as f:
        sentences=f.read().splitlines()
    random_list=np.random.randint(0,150,num)
    test=[]
    for i in random_list:
        test.append(sentences[i])
    return test

#参数分别表示从VC、VT、VI句子库中抽取num个测试用句子，并将生成树存入result.txt文件        
def get_result(num1,num2,num3):
    print('启动解析器中...')
    p=EnglishParser()
    test=[]
    test+=generate_test_set("test_sentences_VC.txt",num1)
    test+=generate_test_set("test_sentences_VT.txt",num2)
    test+=generate_test_set("test_sentences_VI.txt",num3)
    # if (os.path.isfile("result.txt")):
    #     os.remove("result.txt")
    with open("result.txt",'w+')as f:
        for i in test:
            sentence=i[:-1]
            print(f'{sentence}开始解析...')
            f.write(f'{sentence}\n')
            f.flush()
            p.analyse(i)
            p.readout()
            p.reset()  # 重置解析器
            f.seek(0,2)  # 定位到文件末尾
            f.write('\n')
            f.flush()
            print(f'{sentence}解析完成!\n')
        print('所有句子解析完毕，具体结果请查看result.txt文件。')
    pass
            