from common import *
import Before_analyse
from lexicalAnalysis import lexicalAnalysis
import Analyse_output

FILE_FOLDER = './data/'
FILE_NAME = 'test0B.sql'
FILE_PATH = FILE_FOLDER + FILE_NAME

OUT_PUT_LEX = 'lex.tsv'
OUT_PUT_GRA = 'gra.tsv'


LEX_RESULT_PATH = ' '
GRA_RESULT_PATH = ' '
tokens = []


def _get_output_name(in_NAME, in_file):
    test_Name = in_NAME.split('.')
    type = str(test_Name[0])
    return type[-1] + in_file


def writeResult1(file, statement, tokens: list):
    tokenPerRow = 4
    string = ''
    file.write('STATEMENT: ' + statement + '\n')
    file.write('------------词法分析部分----------------\n')
    for index, token in enumerate(tokens):
        if index == 0:
            string += str(token)
            continue
        if (index + 1) % tokenPerRow != 0:
            string += str(token)
        else:
            string += str(token) + '\n'
            file.write(string)
            string = ''
    file.write(string + '\n')
    file.write('------------语法分析部分----------------\n')


def writeResult2(file, statement, tokens: list):
    for elm in tokens:
        if type(elm[2]) == str:

            string = str(elm[0]) + '\t<' + str(elm[1]) + ',' + elm[2].replace('"', '') + '>'
        else:
            string = str(elm[0]) + '\t<' + str(elm[1]) + ',' + str(elm[2]) + '>'
        file.write(string + '\n')


if __name__ == '__main__':
    codeFile = open(FILE_PATH, 'r', encoding='utf-8')

    LEX_RESULT_PATH = _get_output_name(FILE_NAME, OUT_PUT_LEX)


    resultFile = open(LEX_RESULT_PATH, 'w', encoding='utf-8')
    for statement in codeFile:
        tokens = lexicalAnalysis(statement)

        # tokens 的结构 （名称，类别，标号）
        writeResult2(resultFile, statement, tokens)
        for index in range(0, len(tokens)):
            if tokens[index][0] == 'GROUP BY':
                tokens[index] = ('GROUPBY', tokens[index][1])
                continue

            if tokens[index][0] == 'ORDER BY':
                tokens[index] = ('ORDERBY', tokens[index][1])
                continue

            tokens[index] = (tokens[index][0], tokens[index][1])

        ObtainGramar()  # 获取文法
        # 分析归约前的准备，包括获得的FIRST FOLLOW集，预测分析表
        Before_analyse.PreparedBefore()
        Analyse_output.AnalyseOutput(tokens)
        tokens = []

    codeFile.close()
    resultFile.close()
