# coding=UTF-8
import random
from const_ import *
print("start to build train data pair!")

#with open("data",encoding="utf-8") as f:
with open("./data.txt") as f:
    lines = f.readlines()

def extract_relations(line):
    relations = []
    temp = line.split("( mso:")
    if line.count("( mso:") == 1:
        relations.append("mso." + temp[1].split(" ")[0])
    elif line.count("( mso:") == 2:
        relations.append("mso." + temp[1].split(" ")[0])
        relations.append("mso." + temp[2].split(" ")[0])
    elif line.count("( mso:") == 3:
        relations.append("mso." + temp[1].split(" ")[0])
        relations.append("mso." + temp[2].split(" ")[0])
        relations.append("mso." + temp[3].split(" ")[0])
    temp = line.split("r-mso:")
    if line.count("r-mso:") == 1:
        relations.append("r-mso." + temp[1].split(" ")[0])
    elif line.count("r-mso:") == 2:
        relations.append("r-mso." + temp[1].split(" ")[0])
        relations.append("r-mso." + temp[2].split(" ")[0])
    elif line.count("r-mso:") == 3:
        relations.append("r-mso." + temp[1].split(" ")[0])
        relations.append("r-mso." + temp[2].split(" ")[0])
        relations.append("r-mso." + temp[3].split(" ")[0])
    return relations


train_data = []
temp_data = []

for i,line in enumerate(lines):
    if i%3==1:
        relations = []
        if line.count("mso:")>=1:
            relations = extract_relations(line)
        if len(relations)>0:
            temp_data.append(relations)
        for j,r in enumerate(relations):
            one_pair = []
            one_pair.append(extract_id(line))
            one_pair.append(extract_question(lines[i-1]))
            one_pair.append(r)
            score = 0
            if j==0:
                score = 2
            if j>=1:
                score = 1
            one_pair.append(score)
            train_data.append(one_pair)

print("start to build word dict!")

def all_char_upper(word):
    for char in word:
        if char.islower():
            return False
    return True

def all_char_lower(word):
    for char in word:
        if char.isupper():
            return False
    return True

def first_char_upper(word):
    if word[0].isupper() and word[1:].islower():
        return True
    return False

def judge_pure_english(keyword):
    return keyword.isalpha()


word_dict = []

word_dict.append("UNKNOWN-TOKEN")
word_dict.append("ENTITY")
word_dict.append("mso")
word_dict.append("r-mso")
word_dict.append("s")
word_dict.append("NUMBER")
word_dict.append("DATE")

print("start to build corpus.txt !")

sentence_set = []
for item in train_data:
    new_sentence = replace_entity_in_logic_form(item[1],lines[int(item[0])*3-2])
    final_sentence = replace_number_and_date_after_entity_removed(new_sentence)
    item[1] = final_sentence
    if final_sentence not in sentence_set:
        sentence_set.append(final_sentence)
    word_list = final_sentence.split(" ")
    for word in word_list:
        if word not in word_dict:
            word_dict.append(word)


with open("./data_test.txt") as f:
    lines_test = f.readlines()

for i,item in enumerate(lines_test):
    if i%3==0:
        question = extract_question(item)
        entity_list = predict_question_entities(question)
        final_sentence = update_question(question,entity_list)
        if final_sentence not in sentence_set:
            sentence_set.append(final_sentence)
        word_list = final_sentence.split(" ")
        for word in word_list:
            if word not in word_dict:
                word_dict.append(word)


for item in train_data:
    new_relation = format_relation(item[2])
    item[2] = new_relation
    if new_relation not in sentence_set:
        sentence_set.append(new_relation)
    word_list = new_relation.split(" ")
    for word in word_list:
        if word not in word_dict:
            word_dict.append(word)


with open("./word_dict.txt",mode="w") as f:
    for id,word in enumerate(word_dict):
        f.write(word + " " + str(id))
        f.write("\n")

with open("./corpus.txt",mode="w") as f:
    for id,sentence in enumerate(sentence_set):
        f.write("T" + str(id) + " " + sentence)
        f.write("\n")

def sentence_to_id_str(sentence):
    result = ""
    word_list = sentence.split(" ")
    for word in word_list:
        if word=="":
            continue
        if word not in word_dict:
            word = "UNKNOWN-TOKEN"
        if check_is_date(word):
            word = "DATE"
        elif check_is_num(word):
            word = "NUMBER"
        result += str(word_dict.index(word.replace(" ",""))) + " "
    result = result[:len(result)-1] # 去掉最后的空格
    return result


with open("./corpus_preprocessed.txt",mode="w") as f:
    for id,sentence in enumerate(sentence_set):
        f.write("T" + str(id) + " " + sentence_to_id_str(sentence))
        f.write("\n")



MID_SENTENCE_NUM = (MIN_SENTENCE_NUM + MAX_SENTENCE_NUM)/2
with open("./relation_train.txt",mode="w") as f1,open("./relation_valid.txt",mode="w") as f2,open("./relation_test.txt",mode="w") as f3:
    for index,item in enumerate(train_data):
        # if random.randint(1,10)!=1:
        if True:
            sentence_index = sentence_set.index(item[1])
            relation_index = sentence_set.index(item[2])
            f1.write(str(item[3]) + " T" + str(sentence_index) + " T" + str(relation_index)) # 正例
            f1.write("\n")

            if relation_index<MID_SENTENCE_NUM:
                for i in range(MID_SENTENCE_NUM,MID_SENTENCE_NUM+100):
                    f1.write(str(0) + " T" + str(sentence_index) + " T" + str(i))
                    f1.write("\n")
            if relation_index>=MID_SENTENCE_NUM:
                for i in range(MIN_SENTENCE_NUM,MIN_SENTENCE_NUM+100):
                    f1.write(str(0) + " T" + str(sentence_index) + " T" + str(i))
                    f1.write("\n")

        else:
            sentence_index = sentence_set.index(item[1])
            relation_index = sentence_set.index(item[2])
            f2.write(str(item[3]) + " T" + str(sentence_index) + " T" + str(relation_index))  # 正例
            f2.write("\n")
            f3.write(str(item[3]) + " T" + str(sentence_index) + " T" + str(relation_index))  # 正例
            f3.write("\n")

            if relation_index<MID_SENTENCE_NUM:
                for i in range(MID_SENTENCE_NUM,MAX_SENTENCE_NUM):
                    f2.write(str(0) + " T" + str(sentence_index) + " T" + str(i))
                    f2.write("\n")
                    f3.write(str(0) + " T" + str(sentence_index) + " T" + str(i))
                    f3.write("\n")
            if relation_index>=MID_SENTENCE_NUM:
                for i in range(MIN_SENTENCE_NUM,MID_SENTENCE_NUM):
                    f2.write(str(0) + " T" + str(sentence_index) + " T" + str(i))
                    f2.write("\n")
                    f3.write(str(0) + " T" + str(sentence_index) + " T" + str(i))
                    f3.write("\n")
