import torch
from torch import nn
from torch.nn import functional as F
from torch.utils import data
from torch.optim import Adam
import numpy as np
import math
from tqdm import tqdm
from torch import load,save
'''
句子翻译。
前后巅倒。数字转为9-数字。原句最后一个词重复。
'''


device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')

#字典定义
zd_s ='<SOS>,<EOS>,<PAD>,0,1,2,3,4,5,6,7,8,9,q,w,e,r,t,y,u,i,o,p,a,s,d,f,g,h,j,k,l,z,x,c,v,b,n,m'
#共39个
zd_xr = zd_s.split(',')
zd_x= {w: i for i, w in enumerate(zd_xr)}
zd_y = {w.upper(): i for i, w in enumerate(zd_xr)}
zd_y_index = {i: w.upper() for i, w in enumerate(zd_xr)}
zd_yr = zd_s.upper().split(',')

#一共多少个训练样本
train_sample_count = 15*10**4

#随机生成句子
def get_data(print_data:bool = False):
    '''
    生成样本。x随机生成30-48长。 y为x的逆序，大写，数字为9-数字。 首字母重复。
    :return:
    '''
    words='0,1,2,3,4,5,6,7,8,9,q,w,e,r,t,y,u,i,o,p,a,s,d,f,g,h,j,k,l,z,x,c,v,b,n,m'.split(',')

    #概率
    p1 = np.arange(1,11,1)
    p2 = np.arange(1,27,1)
    p = np.concatenate([p1,p2])
    p = p/p.sum()

    #选n个词
    n = np.random.randint(30,48)
    x = np.random.choice(words, size=n, replace=True,p=p)
    x = x.tolist()


    #对x进行变换。小写转大写，数字9减。
    def to_y(s: str):
        s = s.upper()
        if not s.isdigit():
            return s
        i = 9 - int(s)
        return str(i)

    y =[to_y(s) for s in x]
    y = y + [y[-1]] #最后一个重复
    #逆序
    y = y[::-1]

    #加开始结束
    x = ['<SOS>']+x+['<EOS>']+['<PAD>']*sentence_words

    y = ['<SOS>']+y+['<EOS>']+['<PAD>']*sentence_words
    x = x[:sentence_words]
    y = y[:(sentence_words+1)] #多一个因为需要窗口右移一词做为训练用。

    if print_data:
        orgx = ''.join(x)
        print(f'orgx={orgx}')
        orgy = ''.join(y)
        print(f'orgy={orgy}')

    #转为数字向量
    x =[zd_x[i] for i in x]
    y =[zd_y[i] for i in y]

    x = torch.LongTensor(x).to(device)
    y = torch.LongTensor(y).to(device)
    return x, y

class Mydataset(data.Dataset):
    def __init__(self):
        super(Mydataset,self).__init__()

    def __len__(self):
        return train_sample_count

    def __getitem__(self, item):
        return get_data()

#词向量维度
word_embeding_dim=32

#字典词数量，26字母+10数字+开始+结束+填充
vocabsize=39

#每句话50个词，不够用<PAD>填
sentence_words = 50



# 位置编码层
class PositionEmbedding(nn.Module):
    def __init__(self):
        super().__init__()

        # pos是第几个词,i是第几个维度,d_model是维度总数
        def get_pe(pos, i, d_model):
            fenmu = 1e4 ** (i / d_model)
            pe = pos / fenmu

            if i % 2 == 0:
                return math.sin(pe)
            return math.cos(pe)

        # 初始化位置编码矩阵
        pe = torch.empty(sentence_words, word_embeding_dim)
        for i in range(sentence_words):
            for j in range(word_embeding_dim):
                pe[i, j] = get_pe(i, j, word_embeding_dim)
        pe = pe.unsqueeze(0)

        # 定义为不更新的常量
        self.register_buffer('pe', pe)

        # 词编码层
        self.embed = nn.Embedding(vocabsize, word_embeding_dim)
        # 初始化参数
        self.embed.weight.data.normal_(0, 0.1)

    def forward(self, x):
        # [8, 50] -> [8, 50, 32]
        embed = self.embed(x)

        # 词编码和位置编码相加
        # [8, 50, 32] + [1, 50, 32] -> [8, 50, 32]
        embed = embed + self.pe
        return embed


#生成<pad>的mask
def mask_pad(data):
    # b句话,每句话50个词,这里是还没embed的
    # data = [b, 50]
    # 判断每个词是不是<PAD>
    mask = data == zd_x['<PAD>']

    # [b, 50] -> [b,  1, 50]
    mask = mask.reshape(-1, 1 , sentence_words)

    # 在计算注意力时,是计算50个词和50个词相互之间的注意力,所以是个50*50的矩阵
    # 是pad的列是true,意味着任何词对pad的注意力都是0
    # 但是pad本身对其他词的注意力并不是0
    # 所以是pad的行不是true

    # 复制n次
    # [b,  1, 50] -> [b,  50, 50]
    mask = mask.expand(-1, sentence_words, sentence_words)

    return mask.to(device)


#生成上三角加pad mask
def mask_tril(data):
    # b句话,每句话50个词,这里是还没embed的
    # data = [b, 50]

    # 50*50的矩阵表示每个词对其他词是否可见
    # 上三角矩阵,不包括对角线,意味着,对每个词而言,他只能看到他自己,和他之前的词,而看不到之后的词
    # [1, 50, 50]
    """
    [[0, 1, 1, 1, 1],
     [0, 0, 1, 1, 1],
     [0, 0, 0, 1, 1],
     [0, 0, 0, 0, 1],
     [0, 0, 0, 0, 0]]"""
    tril = 1 - torch.tril(torch.ones(data.shape[0], sentence_words, sentence_words, dtype=torch.long))
    tril=tril.to(device)

    # 判断y当中每个词是不是pad,如果是pad则不可见
    # [b, 50]
    mask = data == zd_y['<PAD>']
    mask = mask.reshape(-1, 1 , sentence_words)
    mask = mask.expand(-1, sentence_words, sentence_words)
    mask.to(device)


    #填写为True
    tril.masked_fill_(mask,1)

    mask = tril>0
    return mask.to(device)

#注意力头数。
number_of_heads=2
class SentenceModel(nn.Module):
    def __init__(self):
        super(SentenceModel,self).__init__()
        self.x_embeding=PositionEmbedding()
        self.y_embeding=PositionEmbedding()

        num_encode_decode_layers=6
        '''为什么不能创建nn.Transformer，因为直接创建会在TransformerEncoder和TransformerDecoder设置一个输出的norm。
        运行结果不正常。 所以事先创建好encoder，decoder，再创建Transformer
        '''
        encoderlayer = nn.TransformerEncoderLayer(word_embeding_dim, number_of_heads, batch_first=True, norm_first=True)
        encoder = nn.TransformerEncoder(encoderlayer, num_layers=num_encode_decode_layers)

        decoderlayer = nn.TransformerDecoderLayer(word_embeding_dim, number_of_heads, batch_first=True, norm_first=True)
        decoder = nn.TransformerDecoder(decoderlayer, num_layers=num_encode_decode_layers)

        #批次优先，先正规，encoder,decoder事先建好再转入
        self.tranmodel = nn.Transformer(d_model=word_embeding_dim, nhead=number_of_heads,
                                 batch_first=True, norm_first=True, custom_encoder=encoder,
                                 custom_decoder=decoder)
        #如果创建nn.Transformer不传入custom_encoder,custom_decoder,训练不能收敛.Transformer多创建encoder_norm
        #  encoder_norm = LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
        #  self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
        # TransformerEncoder在最后返回结果再做再一次LayerNorm，训练就不能收敛
        '''
        self.tranmodel = nn.Transformer(d_model=word_embeding_dim, nhead=number_of_heads,
                                 batch_first=True, norm_first=True)
        '''

        self.fc_out=nn.Linear(word_embeding_dim,vocabsize)

    def forward(self,x,y):
        x_embeding=self.x_embeding(x)
        y_embeding=self.y_embeding(y)

        x_mask = mask_pad(x)
        y_mask = mask_tril(y)

        #根据文档要求，按number_of_header扩展
        x_mask=torch.concatenate([x_mask]*number_of_heads)
        y_mask=torch.concatenate([y_mask]*number_of_heads)

        pred = self.tranmodel(x_embeding,y_embeding,src_mask=x_mask, tgt_mask=y_mask,memory_mask=x_mask)
        out = self.fc_out(pred)
        return out

model = SentenceModel()
model.to(device)

def train():
    #每批8个
    samples_one_batch = 8
    loader = data.DataLoader(dataset=Mydataset(), batch_size=samples_one_batch, shuffle=True)
    # Initializing a BERT bert-base-uncased style configuration
    optim = Adam(model.parameters(), lr=5e-3)
    sheduler = torch.optim.lr_scheduler.StepLR(optim, step_size=1250, gamma=0.95)
    model.train(True)

    with tqdm(total=train_sample_count) as bar:
        for batchindex,(x,y) in enumerate(loader):
            optim.zero_grad()
            orgy = y

            #不需要预测y第50个。
            pred = model(x,y[:,:-1])
            #需要用y从1开始的做为目标
            y = y[:,1:].reshape(-1)

            #PAD不参与训练。找到所有PAD删除、
            select = y!=zd_y['<PAD>']
            y = y[select]
            pred = pred.reshape(-1, vocabsize)[select]

            loss = F.cross_entropy(pred,y)
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(),0.5)
            optim.step()
            sheduler.step()
            bar.update(samples_one_batch)
            #使用交叉熵，softmax+log+nulloss,
            if batchindex%1000==0:
                #求精度
                pred_y = torch.argmax(pred, dim=-1)
                correct_count = (pred_y == y).sum()
                correct_percent = correct_count / y.shape[0]
                lr = optim.param_groups[0]['lr']
                print(f'{batchindex},lr={lr},loss={loss},accuracy={correct_percent}')

                #评估
                evalmodel(model, x, orgy)

    #保存模型
    save(ckptfile,model)
    print(f'模型已保存:{ckptfile}')


#评估model。给出x,y样本。从<SOS>开始预测（50-1）个词，计算正确率。
def evalmodel(model,xs,ys,printflag=False):
    bs = xs.shape[0]
    correctcount=0
    totalcount=0
    for i in range(bs):
        target=torch.tensor([zd_y['<SOS>']]+[zd_y['<PAD>']]*49,dtype=torch.long,device=device)
        for k in range(sentence_words-1):
            pred = model(xs[i].unsqueeze(0),target.unsqueeze(0))
            pred_y = torch.argmax(pred,dim=-1)
            out = pred_y[:,k]
            target[k+1] = out
        search = torch.argwhere(ys[i]==zd_y['<EOS>'])
        if printflag:
            print(f"i={i}")        #打印字符串真实的y和预测的target
            print(''.join([zd_yr[w] for w in ys[i,:search+1].tolist()]))
            print(''.join([zd_yr[w] for w in target[:search+1].tolist()]))
        correctcount += (target[:search]==ys[i,:search]).sum()
        totalcount+=search

    correctrate = correctcount/totalcount
    correctrate = correctrate.item()
    print(f'eval正确率={correctrate}')



ckptfile='sentencemodel.ckpt'
if __name__ == '__main__':
    #训练并保存模型
    train()
    #加载
    model.load_state_dict(load(ckptfile))
    model.eval()
    loader = data.DataLoader(dataset=Mydataset(), batch_size=8, shuffle=True)
    for x,y in loader:
        #测一批8个样本的正确率
        evalmodel(model,x,y,printflag=True)
        break

