import json   
import random  
# 读取json文件  
with open('text_entity_relation_extraction.json', 'r', encoding='utf-8') as f:  
    data = json.load(f)  
  
# 转换数据格式  
output = []  
for item in data:  
    text = item['text']  
    spo_list = [{"predicate": spo['predicate'], "subject": spo['subject'], "object": spo['object']} for spo in item['spo_list']]  
    output.append({"text": text, "spo_list": spo_list})  
  
# 定义切分比例  
train_ratio = 0.8  
test_ratio = 0.1  
predict_ratio = 0.1  
  
# 计算每个部分的数量  
total_data_points = len(output)  
train_size = int(train_ratio * total_data_points)  
test_size = int(test_ratio * total_data_points)  
predict_size = total_data_points - train_size - test_size  
  
# 打乱数据顺序  
random.shuffle(output)  
  
# 切分数据  
train_data = output[:train_size]  
test_data = output[train_size:train_size + test_size]  
predict_data = output[train_size + test_size:]  
  
# 将结果保存到文件  
with open('train.json', 'w', encoding='utf-8') as f_train:  
    for item in train_data:  
        json.dump(item, f_train, ensure_ascii=False)  
        f_train.write('\n')  
  
with open('test.json', 'w', encoding='utf-8') as f_test:  
    for item in test_data:  
        json.dump(item, f_test, ensure_ascii=False)  
        f_test.write('\n')  
  
with open('predict.json', 'w', encoding='utf-8') as f_predict:  
    for item in predict_data:  
        json.dump(item, f_predict, ensure_ascii=False)  
        f_predict.write('\n')