# _*_ coding:utf-8 _*_

import glob
import os
import random
import pandas as pd
from config import *

# 根据标注文件生成对应关系
def get_annotation(ann_path):
    with open(ann_path, 'r', encoding='utf-8') as file:
        anns = {}
        for line in file.readlines():
            arr = line.split('\t')[1].split()  # ['T1', 'Disease 1845 1850',	'1型糖尿病']
            name =arr[0]  # Disease
            start = int(arr[1])  # 1845
            end = int(arr[-1])  # 1850
            # 标注过长时
            if end - start >50:
                continue
            anns[start] = 'B-' + name
            for i in range(start + 1, end):
                anns[i] = 'I-' + name
        return anns

def get_text(txt_path):
    with open(txt_path, 'r', encoding='utf-8') as file:
        return file.read()

# 建立文字与标签对应的关系
def generate_annotation():
    for txt_path in glob.glob(ORIGIN_DIR + '*.txt'):
        ann_path = txt_path[:-3] + 'ann'
        anns = get_annotation(ann_path)  # 获取.txt对应的.ann对应的实体标注信息
        text = get_text(txt_path)  # 读取.txt文本的全部文字信息
        # 建立文字与标注的对应
        df = pd.DataFrame({"word": list(text), 'label': ['O'] * len(text)})  # 将文本中的所有文字对应标签先标记为"O"
        df.loc[anns.keys(), 'label'] = list(anns.values())  # 将实体对应的标签由“O”替换为“B-”或“I-”
        # 导出文件
        file_name = os.path.split(txt_path)[1]  # 将文件夹和文件名切开('./input/origin', '147_10.txt')，取得文件名'147_10.txt'
        df.to_csv(ANNOTATION_DIR + file_name, header=None, index=None)

#  拆分训练集和测试集
def split_sample(test_size = 0.3):
    files = glob.glob(ANNOTATION_DIR + '*.txt')
    random.seed(1)
    random.shuffle(files)
    n = int(len(files) * test_size)
    test_files = files[:n]
    train_files = files[n:]
    # 合并文件
    merge_file(train_files, TRAIN_SAMPLE_PATH)
    merge_file(test_files, TEST_SAMPLE_PATH)

# 合并文件
def merge_file(files, target_path):
    with open(target_path, 'a', encoding='utf-8') as file:
        for f in files:
            text = open(f, 'r', encoding='utf-8').read()
            file.write(text)

# 生成词表
def generate_vocab():
    df = pd.read_csv(TRAIN_SAMPLE_PATH, usecols=[0], names=['word'])  # 获取训练集中所有的文字
    vocab_lsit = [WORD_PAD, WORD_UNK] + df['word'].value_counts().keys().tolist()  # 将pad与unk加入到文字列表中，该列表中文字按照出现次数排列从多到少
    vocab_lsit = vocab_lsit[:VOCAB_SIZE]
    vocab_dict = {v: k for k, v in enumerate(vocab_lsit)}  # 文字与索引的对应关系
    vocab = pd.DataFrame(list(vocab_dict.items()))  # 取出每一对
    vocab.to_csv(VOCAB_PATH, header=None, index=None)

# 生成标签表
def generate_label():
    df = pd.read_csv(TRAIN_SAMPLE_PATH, usecols=[1], names=['label'])
    label_list = df['label'].value_counts().keys().tolist()
    label_dict = {v: k for k, v in enumerate(label_list)}
    label = pd.DataFrame(list(label_dict.items()))
    label.to_csv(LABEL_PATH, header=None, index=None)

if __name__ == '__main__':
    # anns = get_annotation('./input/origin/0.ann')
    # print(anns)
    # {1845: 'B-Disease', 1846: 'I-Disease', 1847: 'I-Disease', 1848: 'I-Disease', 1849: 'I-Disease'}

    #建立文字与标签的对应关系
    #generate_annotation()

    # 拆分训练集和测试集
    # split_sample()

    # 生成词表和标签表
    # generate_vocab()
    # generate_label()
    print('end')