# -*- coding: utf-8 -*-
# @Time    : 2021/11/27 14:43
# @Author  : zhangyi
# @FileName: make_train_data.py
# @Software: vscode

'''
生成train_data.pkl 作为训练集
'''

import json
from pickle import NONE
import pickle
import re
from transformers import BertTokenizer
import pickle, random, copy
import torch
import numpy as np
random.seed(2021)

def prepare_data(file_path):
    with open(file_path,'r',encoding='utf-8') as file:
        content=file.readlines()
        print(len(content))
        sent_head=[]
        for line in content:
            jsonData=line.strip()
            text = json.loads(jsonData)
            sentence,head,pos = [],[],[]
            for item in text['Sentence']:
                sentence.append(re.sub('\/[a-zA-Z]*','',item))
                pos.extend(item.split())
            for item in text['Deps']:
                head_id=item.get('Head')
                for d in item.get('Dep'):
                    if d.get('key')=='obj':
                        obj_id=d.get('value')
                        head.append((head_id,'obj',obj_id))
                    if d.get('key')=='sbj':
                        sbj_id=d.get('value')
                        head.append((head_id,'sbj',sbj_id))
            sent_head.append((sentence,head,pos))
        return sent_head
'''
# ['抓住', '这 一 环', '，', '就', '为 提高 产品 质量', '打下 了', '基础', '。'], 
# [(0, 'obj', 1), (5, 'obj', 6)]
# ['荆毅/nr', '定居/v', '城市/n', '的/u', '第一/m', '件/q', '事/n', '，/w', '是/v', '给/p', '三/m', '岁/q', '的/u', '女儿/n', '找/v', '幼儿园/n', '，/w', '妻/Ng', '命/n', '我/r', '去/v', '寻/v', '。/w']
'''
def step2(path):
    result_sent,result_idx=[],[]
    with open(path,'wb') as file:
        for item in sent_head:
            sentence,head,pos=item[0],item[1],item[2]
            s=[re.sub(' ','',i) for i in sentence]  # 去掉空格:['图', '为', '遇难儿童的亲人', '悲痛欲绝', '。']
            num,i=[],0   # 以下操作为了获得每个词对应字的索引，bert为字编码，需要记录分词的每个字索引，然后进行组合拼接等操作
            for word in sentence:
                for k in word.strip().split():
                    num.append(list(range(i,i+len(k))))
                    i=i+len(k)            
            idx,record=[],[]
            for key in head:  # (0, 'obj', 1)
                h=sentence[key[0]].split()[0]  # 核心谓词取块中的第一个词
                before=s[0:key[0]]  # 当前词前面的所有块，用于计算当前词的索引
                id1,l=[],0
                for i in before:
                    l+=len(i)
                id1.append(l)
                for i in range(1,len(h)):
                    id1.append(l+i)          # id1 存储核心谓词在整个句子中的索引
                head_def=word2def.get(h,'')  # 获得核心谓词的义原def
                z.add(h)
                if head_def!='':
                    y.add(h)
                rel=key[1]
                if len(sentence[key[2]].split())==0:
                    continue
                if sentence[key[2]].split()[-1] in punc and len(sentence[key[2]].split())>1:
                    t=sentence[key[2]].split()[-2]
                    n=len(''.join(i for i in sentence[key[2]].split()[:-2]))
                else:
                    t=sentence[key[2]].split()[-1]  # 主宾语块取块的最后一个词，标点符号除外
                    n=len(''.join(i for i in sentence[key[2]].split()[:-1]))
                _def=word2def.get(t,'')  # 获得非核心谓词的义原def
                z.add(t)  # 统计词是否都有义原的比例
                if _def!='':
                    y.add(t)
                before=s[0:key[2]]  # 当前词前面的所有块，用于计算当前词的索引,不直接去index是防止句子中词汇重复
                id2,l=[],0
                for i in before:
                    l+=len(i)
                id2.append(l+n)
                for i in range(1,len(t)):
                    id2.append(l+n+i)
                # print(sentence)
                # if id1 == [12, 13, 14, 15]:
                #     print('1:',id1)
                #     print('1:',num.index(id1))
                # if id2 == [12, 13, 14, 15]:
                #     print('2:',id2)
                #     print('2:',num.index(id2))
                idx.append((num.index(id1),rel,num.index(id2)))
                record.append((h,rel,t))
            sent=[]
            if len(num)<10:
                continue
            else:
                sent.append(''.join(i for i in s))  # 去掉list转为string存储用于bert训练编码
                sent.append(num)                    # 全部的分词索引
                sent.append(idx)                    # [([38], 'sbj', [7]), ([45, 46], 'sbj', [41, 42])]
                sent.append(record)                 # [('试', 'sbj', '我'), ('接受', 'sbj', '领导')]
                sent.append(pos)                    # 带有词性标签的分词块
                result_sent.append(sent)
        print(result_sent[:2],len(result_sent))      
        pickle.dump(result_sent,file) 
        print(len(z),len(y),len(y)/len(z))


if __name__=="__main__":
    z,y=set(),set()
    with open(r"word2def.pkl",'rb') as file:
        word2def=pickle.load(file)
    punc = ['，','。','！','？','；','“','”','：','、','）','（']
    # file_path = 'rmrb-12w.txt'
    file_path = 'rmrb-sememe12.txt'
    file_path = 'val.txt'
    file_path = 'test.txt'
    sent_head=prepare_data(file_path)  # 从依存数据得到 head & sbj & obj 的信息 
    print(sent_head[0])
    path = 'final/'+ 'train_data.pkl'
    path = 'final/'+ 'val_data.pkl'
    path = 'final/'+ 'test_data.pkl'
    step2(path)

    
        
        
'''
train_data.pkl 存储resul_sent:[],每个item为以下内容：
item[0]:'荆毅定居城市的第一件事，是给三岁的女儿找幼儿园，妻命我去寻。'

item[1]:[[0, 1], [2, 3], [4, 5], [6], [7, 8], [9], [10], [11], [12], [13], [14], [15], [16], [17, 18], [19], [20, 21, 22], [23], [24], [25], [26], [27], [28], [29]]

item[2]:[(8, 'sbj', 6), (8, 'obj', 14), (14, 'obj', 15), (18, 'sbj', 17), (18, 'obj', 19), (21, 'sbj', 19)]
[([12], 'sbj', [10]), ([12], 'obj', [19]), ([19], 'obj', [20, 21, 22]), ([25], 'sbj', [24]), ([25], 'obj', [26]), ([28], 'sbj', [26])]

item[3]:[('是', 'sbj', '事'), ('是', 'obj', '找'), ('找', 'obj', '幼儿园'), ('命', 'sbj', '妻'), ('命', 'obj', '我'), ('寻', 'sbj', '我')]

item[4]:['荆毅/nr', '定居/v', '城市/n', '的/u', '第一/m', '件/q', '事/n', '，/w', '是/v', '给/p', '三/m', '岁/q', '的/u', '女儿/n', '找/v', '幼儿园/n', '，/w', '妻/Ng', '命/n', '我/r', '去/v', '寻/v', '。/w']
'''   
            
            
            
            
            
            
            
            
            
            

def train_Dataset():
    with open('train_data.pkl', 'rb') as out_file:
        train_data = pickle.load(out_file)
        print("len(sent_relp):", len(train_data))
    train=[]
    input_ids, token_type_ids, attention_mask, fenci_idx, triple,org_tokens,appendix = [],[],[],[],[],[],[]
    for item in train_data:
        encoded_dict = tokenizer(item[0], return_tensors="pt",padding='max_length', max_length=512, truncation=True)
        r=0
        for trip in item[2]:
            train_s=[]
            t=item[1][trip[2]]  # []对应词在原句中的索引
            input=torch.from_numpy(np.array([encoded_dict['input_ids'].squeeze(0)[i] if i-1 not in t else 103 for i in range(len(encoded_dict['input_ids'].squeeze(0)))]))
            train_s.append(input.unsqueeze(0)) 
            train_s.append(encoded_dict['token_type_ids'])
            train_s.append(encoded_dict['attention_mask'])
            train_s.append(item[0])
            train_s.append(item[1])
            train_s.append(trip)
            train_s.append(item[3][r])
            train.append(train_s)
            # add origin encoding 
            train_s=[]
            train_s.append(encoded_dict['input_ids'])
            train_s.append(encoded_dict['token_type_ids'])
            train_s.append(encoded_dict['attention_mask'])
            train_s.append(item[0])
            train_s.append(item[1])
            train_s.append(trip)
            train_s.append(item[3][r])
            train.append(train_s)
            r+=1
    with open('train_loader.pkl','wb') as file:
        pickle.dump(train,file) 
                     

