
def read_corpus(corpus_path):
    """
    read corpus and return json of samples
    : param - corpus_path
    : return - json of samples
    """
    data = []
    with open(corpus_path, encoding="utf-8") as f:
        lines = f.readlines()
    sentences, tags = [], []
    for line in lines:
        if line != "\n":
            char, label = line.strip().split()
            sentences.append(char)
            tags.append(label)
        else:
            data.append((sentences, tags))
            sentences, tags = [], []
    return data

# generate dataset.txt according to bi-lstm-crf dataset samples
# https://github.com/jidasheng/bi-lstm-crf/tree/master/bi_lstm_crf/app/sample_corpus
def generate_dataset(corpus_path, dataset_file="dataset.txt"):
    all_data = read_corpus(corpus_path)
    try:
        with open(dataset_file,  "+a", encoding="utf-8") as ds:
            for item in all_data:
                sentence, tags = item
                sent = "".join(sentence)
                ds.write(sent + "\t" + "[")
                tag_temp = []
                for tag in tags:
                    # ds.write('"{}"'.format(tag))
                    tag_temp.append('"{}"'.format(tag))
                ds.write(",".join(tag_temp) + "]\n")
        print("Succeed to save dataset of {} to {}".format(corpus_path, dataset_file))
        return True
    except:
        return False 


# training_data 构造(统计非重复)字典


if __name__ == '__main__':
    corpus_path = "/home/gyf/pkg/bilstm-crf/china-people-daily-ner-corpus/example.train"
    corpus_path = r"D:\gitee\bilstm_crf\bilstm_crf\china-people-daily-ner-corpus\example.train"
    corpus_test = r"D:\gitee\bilstm_crf\bilstm_crf\china-people-daily-ner-corpus\example.test"
    corpus_dev = r"D:\gitee\bilstm_crf\bilstm_crf\china-people-daily-ner-corpus\example.dev"
    generate_dataset(corpus_test, dataset_file="dataset_test.txt")
    generate_dataset(corpus_dev, dataset_file="dataset_dev.txt")

