# -*- coding: utf-8 -*-
"""
Created on Sat Apr 16 14:48:12 2022

@author: 11325
"""

import re
import pandas as pd
import numpy as np
import random as rd
import math
import copy
import tqdm 
import os
import torch


test_path = os.path.join(os.path.split(os.path.realpath(__file__))[0], '.\data\\simple_data.txt')


class data_processing():
    def __init__(self,data_path,max_pred,max_len,batch_size):
        self.data_path=data_path
        self.max_pred=max_pred
        self.max_len=max_len
        self.batch_size=batch_size
        self.sentences_idlist=[]
        self.word_idx_dic=[]
        self.idx_word_dic=[]
        self.vocab_size=0


    def get_sentens(self):
        #数据读取
        words=set()
        data=pd.read_table(self.data_path,header=None)
        iterlen=list(data.columns)
        for i in iterlen:
            data[i]=data[i].str.replace("[.,!?\\-]", '')
            data[i]=data[i].str.lower()
        sentences=np.array(data).tolist()
        
        #将嵌套list解套
        for i,j in enumerate(sentences):
            sentences[i]=" ".join(j)
        sentence=" ".join(sentences).split()
        
        #生成单词字典
        for word in sentence:
            words.add(word)
        word_idx_dic={'[PAD]':0,'[CLS]' : 1, '[SEP]' : 2, '[MASK]' : 3}
        
        for i,j in enumerate(list(words)):
            word_idx_dic[j]=i+4

        idx_word_dic={i: j for i,j in enumerate(word_idx_dic)}
        vocab_size=len(word_idx_dic)
        
        #句子to idx
        sentences_idlist=[]
        for sentence_str in sentences:
            word_idx=[word_idx_dic[i] for i in sentence_str.split()]
            sentences_idlist.append(word_idx)
            
        self.sentences_idlist=sentences_idlist
        self.word_idx_dic=word_idx_dic
        self.idx_word_dic=idx_word_dic
        self.vocab_size=vocab_size
        
        #return sentences_idlist,word_idx_dic,idx_word_dic,vocab_size


    def make_batch(self):
        batch=[]
        positivenum=0#正样本个数，一般与负样本相等   
        negtivenum=0
        while positivenum !=self.batch_size/2 or negtivenum !=self.batch_size/2:
            
            token_sentence_a_idx=rd.randrange(len(self.sentences_idlist))#第一句采样
            token_sentence_b_idx=rd.randrange(len(self.sentences_idlist))#第二句采样  
            
            token_a=self.sentences_idlist[token_sentence_a_idx]
            token_b=self.sentences_idlist[token_sentence_b_idx]
            
            #cls+a句+sep+b句+sep
            input_id=[self.word_idx_dic['[CLS]']]+token_a+[self.word_idx_dic['[SEP]']]+token_b+[self.word_idx_dic['[SEP]']]
            segment_id=[0]*(1+len(token_a)+1)+[1]*(len(token_b)+1)#表征向量与inputid等长，其中0为第一句，1为第二句
               
            #MASK LM 遮挡某一个文字，进行学习
            num_pred=min(self.max_pred,max(1,int(len(input_id)*0.15)))
            #能够被预测的单词在输入句中的idx
            can_masked_position=[i for i,token in enumerate(input_id) 
                                 if token!=self.word_idx_dic['[CLS]'] and token!=self.word_idx_dic['[SEP]'] ]
            rd.shuffle(can_masked_position)#将位置打乱，取前k个作为遮挡的单词
            masked_token,masked_token_pos=[],[]
            
            for pos in can_masked_position[:num_pred]:#选取前num_pred个token进行遮挡
                masked_token_pos.append(pos)
                masked_token.append(input_id[pos])
                
                if rd.random()<0.8:#80%的概率遮挡
                    input_id[pos]=self.word_idx_dic['[MASK]']
                elif rd.random()>0.9:#10%的概率替换为其他词
                    replace_index=rd.randint(0, self.vocab_size)
                    
                    while replace_index<4:
                        replace_index=rd.randint(0, self.vocab_size)#如果替换的是无意义词，重新随机
                    input_id[pos]=replace_index
             #PADing补全
            num_pading=int(self.max_len-len(input_id))
            input_id.extend([0]*num_pading)#给长度不足的句子不全
            segment_id.extend([0]*num_pading)
             
            #mask补全
            if self.max_pred>num_pred:
                num_pading=self.max_pred-num_pred                
                masked_token.extend([0]*num_pading)
                masked_token_pos.extend([0]*num_pading)
                
            if token_sentence_a_idx+1==token_sentence_b_idx and positivenum<self.batch_size/2:
                batch.append([input_id,segment_id,masked_token,masked_token_pos,True])#句a与句b是相连的，他们的idx：a+1=b
                positivenum+=1#正样本数目加1
            elif token_sentence_a_idx+1!=token_sentence_b_idx and negtivenum<self.batch_size/2:
                batch.append([input_id,segment_id,masked_token,masked_token_pos,False])#句a与句b是相连的，他们的idx：a+1=b
                negtivenum+=1#负样本数目+1
        return batch
    
class dataset(torch.utils.data.Dataset):
    def __init__(self,batch):
        self.batch=batch
        self.input_id=[]
        self.segment_id=[]
        self.masked_token=[]
        self.masked_token_pos=[]
        self.is_next=False
        
    def processing_batch(self):
        input_id,segment_id,masked_token,masked_token_pos,is_next=zip(*self.batch)#解包batch
        self.input_id=torch.LongTensor(input_id)
        self.segment_id=torch.LongTensor(segment_id)
        self.masked_token=torch.LongTensor(masked_token)
        self.masked_token_pos=torch.LongTensor(masked_token_pos)
        self.is_next=torch.LongTensor(is_next)
        
    def __len__(self):
        return len(self.input_id)
    
    def __getitem__(self,idx):
        return self.input_id[idx],self.segment_id[idx],self.masked_token[idx],self.masked_token_pos[idx],self.is_next[idx]
            
            
if __name__=='__main__':
    #genrateParis()
    data=data_processing(test_path, 5,30,6)
    data.get_sentens()
    batch=data.make_batch()
    dataSet=dataset(batch)
    dataSet.processing_batch()
    dataLoader=torch.utils.data.DataLoader(dataSet,6,True)
    #get_sentens(test_path)
    #dataload=DataLoad(enity, relationShips)
    #test_data=dataload.load_with_dic(FB15K_BASE_PATH_test_PATH)
    i=1