# test_data = test_data_all[:1500]
# val_data = test_data_all[3427:]

import argparse
import glob
import logging
import os
import random
import csv
from clear import clean_str

import numpy as np
import stanfordnlp

mohler = []
labels = []
with open('mohler_dataset_edited.csv',encoding="utf-8") as fr:
    for line in fr.readlines():
        line = line.strip('\n').split(',')
        mohler.append(line)


mohler.remove(mohler[0])

nlp = stanfordnlp.Pipeline()

fo = open("train.tsv", "w")


def get_dependencies(text):
    if text == None or len(text) < 1:
        fo.write("\n")
        return
    length = 0
    doc = nlp(text)
    for i in range(len(doc.sentences)):
        for idx, dep_edge in enumerate(doc.sentences[i].dependencies):
            x = "_ROOT"
            y = 0
            if dep_edge[0].index != '0':
                x = doc.sentences[i].dependencies[int(dep_edge[0].index) - 1][2].text
                y = int(dep_edge[0].index) + length
            fo.write(
                dep_edge[1] + "\t" + x + "\t" + str(y) + "\t" + dep_edge[2].text + "\t" + str(idx + length + 1) + "\n")
        length = length + len(doc.sentences[i].dependencies)


for i, data in enumerate(mohler):
    labels.append(float(data[6]))
    get_dependencies(clean_str(data[2]))
    fo.write("\n")
    get_dependencies(clean_str(data[3]))
    fo.write("\n")

fo.close()

labels = np.array(labels)
np.savetxt('train_labels.txt', labels, delimiter=',')