
# #  取训练数据
# count=0
# out=open('rmrb-48w.txt','w',encoding='utf-8')
# with open('/home/qincong/data3/news/新闻语料/baokan-done/service_json_with_segment/baokan_pred_segment_03.json','r',encoding='utf-8') as file:
#     for line in file:
#         if count<=480000:
#             out.write(line)
#             count+=1
#         else:
#             break
# out.close()


#  取验证集数据
# count=0
# out=open('val.txt','w',encoding='utf-8')
# with open('/home/qincong/data3/news/新闻语料/baokan-done/service_json_with_segment/baokan_pred_segment_02.json','r',encoding='utf-8') as file:
#     for line in file:
#         if count<=1000:
#             out.write(line)
#             count+=1
#         else:
#             break
# out.close()

#  取测试集数据
# count=0
# out=open('test_01.txt','w',encoding='utf-8')
# with open('/home/qincong/data3/news/新闻语料/baokan-done/service_json_with_segment/baokan_pred_segment_00.json','r',encoding='utf-8') as file:
#     for line in file:
#         if count>=1000 and count<=2000:
#             out.write(line)
#             count+=1
#         elif count>2000:
#             break
#         else:
#             count+=1
# out.close()

# 筛选出句子中有效词，可以找到义原的词占比80%以上，作为训练文件
# import json
# import re
# import pickle
# def get_sememe_vec():
#     sememe_vec=dict()
#     with open('sememe-vec.txt','r',encoding='utf-8') as file:
#         content=file.readlines()
#         for item in content[1:]:
#             w=item.strip().split()[0]
#             v=[float(i) for i in item.strip().split()[1:]]
#             sememe_vec[w]=v
#     with open(r"word2def.pkl",'rb') as file:
#         word2def=pickle.load(file)
#     with open(r"def2word.pkl",'rb') as file:
#         def2word=pickle.load(file)
#     return sememe_vec,word2def,def2word

# sememe_vec, word2def, def2word = get_sememe_vec()





#  取训练数据
# count=0
# out=open('rmrb-sememe12.txt','w',encoding='utf-8')
# with open('/home/qincong/data3/news/新闻语料/baokan-done/service_json_with_segment/baokan_pred_segment_03.json','r',encoding='utf-8') as file:
#     for line in file:
#         if count<=120000:
#             jsonData=line.strip()
#             text = json.loads(jsonData)
#             sentence,head,pos = [],[],[]
#             for item in text['Sentence']:
#                 pos.extend(item.split())
#             new, c = [], 0
#             for key in pos:
#                 if key.split('/')[1] not in ['w','u','c','a','f','m','d','y','p']:
#                     word=key.split('/')[0]
#                     new.append(word)
#                     _def = word2def.get(word,[])  
#                     if len(_def) !=0:
#                         c+=1
#             if len(new)!=0 and c/len(new) >=0.8:
#                 out.write(line)
#                 count+=1
#         else:
#             count+=1
# out.close()


# 取验证数据
# count=0
# out=open('val.txt','w',encoding='utf-8')
# with open('/home/qincong/data3/news/新闻语料/baokan-done/service_json_with_segment/baokan_pred_segment_02.json','r',encoding='utf-8') as file:
#     for line in file:
#         if count>=1000 and count<=2000:
#             jsonData=line.strip()
#             text = json.loads(jsonData)
#             sentence,head,pos = [],[],[]
#             for item in text['Sentence']:
#                 pos.extend(item.split())
#             new, c = [], 0
#             for key in pos:
#                 if key.split('/')[1] not in ['w','u','c','a','f','m','d','y','p']:
#                     word=key.split('/')[0]
#                     new.append(word)
#                     _def = word2def.get(word,[])  
#                     if len(_def) !=0:
#                         c+=1
#             if len(new)!=0 and c/len(new) >=0.8:
#                 out.write(line)
#                 count+=1
#         elif count>2000:
#             break
#         else:
#             count+=1
# out.close()



# 取测试数据
# count=0
# out=open('test.txt','w',encoding='utf-8')
# with open('/home/qincong/data3/news/新闻语料/baokan-done/service_json_with_segment/baokan_pred_segment_00.json','r',encoding='utf-8') as file:
#     for line in file:
#         if count>=1000 and count<=2000:
#             jsonData=line.strip()
#             text = json.loads(jsonData)
#             sentence,head,pos = [],[],[]
#             for item in text['Sentence']:
#                 pos.extend(item.split())
#             new, c = [], 0
#             for key in pos:
#                 if key.split('/')[1] not in ['w','u','c','a','f','m','d','y','p']:
#                     word=key.split('/')[0]
#                     new.append(word)
#                     _def = word2def.get(word,[])  
#                     if len(_def) !=0:
#                         c+=1
#             if len(new)!=0 and c/len(new) >=0.8:
#                 out.write(line)
#                 count+=1
#         elif count>2000:
#             break
#         else:
#             count+=1
# out.close()




# #  取数据给贵荣
count=0
out=open('200w_all.txt','w',encoding='utf-8')
with open('/home/qincong/data3/news/新闻语料/baokan-done/service_json_with_segment/baokan_pred_segment_03.json','r',encoding='utf-8') as file:
    for line in file:
        if count<=500000:
            out.write(line)
            count+=1
        else:
            break
out.close()