from sklearn.preprocessing import MultiLabelBinarizer as MLB
import pickle, os, random

def convert_to_multi_classes_idx(path="./baidu_95/baidu_95.csv", dataset="baidu_95"):
    """不同文件需要定制转换函数.
    """
    if not os.path.exists(f"{dataset}/data"): os.path.mkdirs(f"{dataset}/data")
    with open(path, 'r') as f1:
        from_file = f1.readlines()

    from_file = [line.split(",") for line in from_file[:1000]]
    
    # [str, [l1, l2, ...]]
    to_file = [["".join(line[1:]).strip().replace("\t",""), line[0].split()] for line in from_file]
    all_labels = [line[1] for line in to_file]

    mlb = MLB()
    one_hot_labels = mlb.fit_transform(all_labels)
    one_hot_labels = [[str(i) for i in line.tolist()] for line in one_hot_labels]

    to_file = [f"{line1[0]}\t" + '\t'.join(line2) for line1, line2 in zip(to_file, one_hot_labels)]
    random.seed(123)
    random.shuffle(to_file)

    train_idx = int(len(to_file)*0.8)
    dev_idx = int(len(to_file)*0.9)

    with open(f"{dataset}/data/train.txt", 'w') as f1:
        for line in to_file[:train_idx]:
            f1.write(line+'\n')

    with open(f"{dataset}/data/dev.txt", 'w') as f1:
        for line in to_file[train_idx:dev_idx]:
            f1.write(line+'\n')

    with open(f"{dataset}/data/test.txt", 'w') as f1:
        for line in to_file[dev_idx:]:
            f1.write(line+'\n')

    with open(f"{dataset}/data/infer.txt", 'w') as f1:
        for line in to_file[dev_idx:]:
            f1.write(line.split('\t')[0] + '\n')

    label_names = mlb.classes_.tolist()
    with open(f"{dataset}/data/class.txt", 'w') as f1:
        for name in label_names:
            f1.write(name+'\n')

    with open(f"{dataset}/data/mlb.pkl", 'wb') as f1:
        pickle.dump(mlb, f1)

    print("DONE.")

if __name__ == "__main__":
    convert_to_multi_classes_idx()

