import os
import torch
from torch import nn, Tensor
import dltools
import matplotlib.pyplot as plt
import collections
import math

with open(r".\MNIST\fra.txt", "r", encoding="utf-8") as f:
    raw_text = f.read()
# [:75] 不仅可以截取数组，也可以直接截取字符串
print(raw_text[:75])


# 数据预处理
def preprocess_nmt(text):
    # 判断标点符号前面是否有空格
    def no_space(char, prev_char):
        return char in set(",.!?") and prev_char != " "

    # 替换识别不了的字符
    text = text.replace("\u202f", " ").replace("\xa0", " ").lower()
    # 在单词和标点之间插入空格 目的是后面直接用空格分词把单词和符号分开
    out = [" " + char if i > 0 and no_space(char, text[i - 1]) else char for i, char in enumerate(text)]
    # join 数组拼接为字符串""为分隔符
    return "".join(out)


# text = preprocess_nmt(raw_text)
# print(text[:80])


# 词元化
def tokenize_nmt(text, num_examples=None):
    source, target = [], []
    for i, line in enumerate(text.split("\n")):
        if num_examples and i > num_examples:
            break
        parts = line.split("\t")
        if len(parts) == 2:
            source.append(parts[0].split(" "))
            target.append(parts[1].split(" "))

    return source, target


# source, target = tokenize_nmt(text)
# print(source[:6])
# print(target[:6])


# 统计每句话的长度的分布情况
def show_list_len_pair_hist(legend, xlabel, ylabel, xlist, ylist):
    _, _, patches = plt.hist([[len(l) for l in xlist], [len(l) for l in ylist]])
    plt.xlabel(xlabel)
    plt.ylabel(ylabel)

    for patch in patches[1].patches:
        patch.set_hatch("/")

    plt.legend(legend)
    plt.figure = (6, 3)
    plt.show()


# show_list_len_pair_hist(["source", "target"], "# tokens per sequence", "count", source, target)

# source_vocab = dltools.Vocab(source, min_freq=2, reserved_tokens=["<pad>", "<bos>", "<eos>"])
# print(len(source_vocab))

def truncate_or_pad(line, num_steps, padding_token):
    # 截断或填充文本序列
    if len(line) > num_steps:
        return line[:num_steps]  # 太长，截断
    return line + [padding_token] * (num_steps - len(line))  # 填充


# print(truncate_or_pad(source_vocab[source[0]], 10, source_vocab["<pad>"]))

# 将机器翻译的文本序列转换成小批量tensor
def build_array_nmt(lines, vocab, num_steps):
    # 通过vocab拿到line的索引
    result = []
    for line in lines:
        line = vocab[line] + [vocab["<eos>"]]
        result.append(line)
    pad = vocab["<pad>"]
    array = torch.tensor([truncate_or_pad(line, num_steps, pad) for line in result])
    # sum(1) 返回的结果是[3,3,3] sum()返回结果是9
    valid_len = (array != pad).type(torch.int32).sum(1)
    return array, valid_len


def load_data_nmt(batch_size, num_steps, num_examples=600):
    # 需要返回数据集的迭代器和词表
    text = preprocess_nmt(raw_text)
    source, target = tokenize_nmt(text, num_examples)
    reserved_tokens = ["<pad>", "<bos>", "<eos>"]
    source_vocab = dltools.Vocab(source, min_freq=2, reserved_tokens=reserved_tokens)
    target_vocab = dltools.Vocab(target, min_freq=2, reserved_tokens=reserved_tokens)

    source_array, source_valid_len = build_array_nmt(source, source_vocab, num_steps)
    target_array, target_valid_len = build_array_nmt(target, target_vocab, num_steps)
    data_arrays = (source_array, source_valid_len, target_array, target_valid_len)
    data_iter = dltools.load_array(data_arrays, batch_size)
    return data_iter, source_vocab, target_vocab


data_iter, source_vocab, target_vocab = load_data_nmt(batch_size=2, num_steps=8)

for X, X_valid_len, Y, Y_valid_len in data_iter:
    print(f"X:{X}")
    print(f"X_valid_len:{X_valid_len}")
    print(f"Y:{Y}")
    print(f"Y_valid_len:{Y_valid_len}")
    break


# 编码器基类

class Encoder(nn.Module):
    def __init__(self, **kwargs):
        # **kwargs可以用来表示关键字参数，本质上是dict类型
        super().__init__(**kwargs)

    def forward(self, X, *args):
        # *args可以用来表示任何多个无名参数，本质上是tuple类型
        # *args参数必须在**kwargs前面，因为位置参数必须在关键词参数前面。
        raise NotImplementedError


# 解码器
class Decoder(nn.Module):
    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    def init_state(self, enc_outputs, *args):
        raise NotImplementedError

    def forward(self, X, state):
        raise NotImplementedError


# 合并编码器和解码器
class EncoderDecoder(nn.Module):
    def __init__(self, encoder, decoder, **kwargs):
        super().__init__(**kwargs)
        self.encoder = encoder
        self.decoder = decoder

    def forward(self, enc_X, dec_Y, *args):
        enc_outputs = self.encoder(enc_X, *args)
        dec_state = self.decoder.init_state(enc_outputs, *args)
        # dec_Y 训练的时候对应的是真实的Y值 前面拼上 <bos>
        # dec_Y 预测的时候第一个传<bos>预测一个单词，并作为下一个预测单词的dec_Y
        return self.decoder(dec_Y, dec_state)


# seq2seq encoder
class Seq2SeqEncoder(Encoder):
    def __init__(self, vocab_size, embed_size, num_hiddens, num_layers, dropout=0.0, **kwargs):
        super().__init__(**kwargs)

        # 嵌入层
        self.embedding = nn.Embedding(vocab_size, embed_size)
        self.rnn = nn.GRU(embed_size, num_hiddens, num_layers, dropout=dropout, batch_first=True)

    def forward(self, X, *args):
        # 注意X的形状， 在进行embedding之前，X的形状是多少？一般都是（batch_size, num_steps, vocab_size）
        # X经过embedding处理，X的形状：（batch_size, num_steps, embed_size）
        X = self.embedding(X)

        # 没用手动传入state，这时，pytorch会帮我们完成隐藏状态的初始化，即0
        output, state = self.rnn(X)
        # output的形状：(batch_size, num_steps, num_hiddens)
        # state[0]的形状：(num_layers, batch_size, num_hiddens)
        return output, state


encoder = Seq2SeqEncoder(vocab_size=10, embed_size=8, num_hiddens=16, num_layers=2)
encoder.eval()
X = torch.zeros((4, 7), dtype=torch.long)
output, state = encoder(X)


# print(f"output.shape:{output.shape}")
# print(f"state.shape:{state.shape}")


class Seq2SeqDecoder(Decoder):
    def __init__(self, vocab_size, embed_size, num_hiddens, num_layers, dropout=0.0, **kwargs):
        super().__init__(**kwargs)
        self.embedding = nn.Embedding(vocab_size, embed_size)
        self.rnn = nn.GRU(embed_size + num_hiddens, num_hiddens, num_layers, dropout=dropout, batch_first=True)
        # 输出层
        self.dense = nn.Linear(num_hiddens, vocab_size)

    def init_state(self, enc_outputs, *args):
        return enc_outputs[1]

    def forward(self, Y, state):
        # Y经过embedding处理，Y的形状 (batch_size, num_steps, embed_size)
        Y = self.embedding(Y)

        # 把Y和state拼接到一起，方便计算，最后一层state的形状(batch_size, num_hiddens)
        # 要把state的形状扩充成三维。变成(batch_size, num_steps, embed_size)
        context = state[-1].repeat(Y.shape[1], 1, 1)
        # context 现在形状 (num_steps,batch_size,num_hiddens) 需要调整为(batch_size, num_steps, embed_size)
        context = context.permute(1, 0, 2)
        Y_and_context = torch.cat((Y, context), -1)
        #        print(f"Y_and_context.shape:{Y_and_context.shape}")
        output, state = self.rnn(Y_and_context, state)
        output = self.dense(output)
        # output 形状(batch_size, num_steps, vocab_size)
        # state的形状 (num_layers, batch_size, num_hiddens)
        return output, state


decoder = Seq2SeqDecoder(vocab_size=10, embed_size=8, num_hiddens=16, num_layers=2)
decoder.eval()
state = decoder.init_state(encoder(X))
Y = torch.zeros((4, 7), dtype=torch.long)
output, state = decoder(Y, state)


# print(f"output.shape:{output.shape}")
# print(f"state.shape:{state.shape}")


def sequence_mask(X, valid_len, value=0):
    # 找到最大序列长度
    maxlen = X.size(1)
    mask = torch.arange((maxlen), dtype=torch.float32, device=X.device).reshape(-1, maxlen) < valid_len.reshape(
        valid_len.shape[0], -1)
    X[~mask] = value
    return X


test = torch.arange((3))
print(f"test:{test}")
print(f"test.shape:{test.shape}")
test = test.reshape(-1, test.shape[0])
print(f"test:{test}")
print(f"test.shape:{test.shape}")

test2 = torch.tensor(([1, 1]))
print(f"test2:{test2}")
test2 = test2.reshape(test2.shape[0], -1)
print(f"test2:{test2}")
print(test < test2)

X = torch.tensor([[1, 2, 3], [4, 5, 6]])
print(sequence_mask(X, torch.tensor([1, 2])))


# 重写交叉熵损失， 添加屏蔽无效的内容
class MaskedSoftmaxCELoss(nn.CrossEntropyLoss):

    # pred的形状：(batch_size, num_steps, vocab_size)
    # label的形状：(batch_size, num_steps)
    # valid_len的形状：(batch_size)
    def forward(self, pred: Tensor, label: Tensor, valid_len: Tensor) -> Tensor:
        weights = torch.ones_like(label)
        weights = sequence_mask(weights, valid_len)
        # reduction 聚合的维度 none:不聚合
        self.reduction = "none"
        # pred.permute(0, 2, 1) 操作的原因不太清楚
        unweighted_loss = super().forward(pred.permute(0, 2, 1), label)
        weighted_loss = (unweighted_loss * weights).mean(dim=1)
        return weighted_loss


loss = MaskedSoftmaxCELoss()
pred = torch.ones(3, 4, 10)
label = torch.ones((3, 4), dtype=torch.long)
print(f"pred:{pred}")
print(f"label:{label}")
res = loss(pred, label, torch.tensor([4, 2, 1]))
print(res)


# 训练
def train_seq2seq(net, data_iter, lr, num_epochs, tgt_vocab, device):
    # 初始化
    def xavier_init_weights(m):
        # 序列号数据处理通用的初始化代码(直接复制使用就行)
        if type(m) == nn.Linear:
            nn.init.xavier_uniform_(m.weight)
        if type(m) == nn.GRU:
            for param in m._flat_weights_names:
                if "weight" in param:
                    nn.init.xavier_uniform_(m._parameters[param])

    net.apply(xavier_init_weights)
    net.to(device)
    print(f"net.state_dict():{net.state_dict()}")
    # net.parameters() 会递归查询属性nn.Module和其属性里的nn.Module中的参数
    optimizer = torch.optim.Adam(net.parameters(), lr=lr)
    loss = MaskedSoftmaxCELoss()

    for epoch in range(num_epochs):
        timer = dltools.Timer()
        metric = dltools.Accumulator(2)
        for batch in data_iter:
            # 梯度清零
            optimizer.zero_grad()
            # 取数据
            X, X_valid_len, Y, Y_valid_len = [x.to(device) for x in batch]
            bos = torch.tensor([tgt_vocab["<bos>"]] * Y.shape[0], device=device).reshape(-1, 1)
            # 开头加上bos ，那么Y就要去掉最后一列，保证序列的长度不变
            dec_input = torch.cat([bos, Y[:, :-1]], 1)
            Y_hat, _ = net(X, dec_input, X_valid_len)
            # 计算损失
            l = loss(Y_hat, Y, Y_valid_len)
            # 一定要先sum变成标量再做反向传播
            l.sum().backward()
            # 梯度裁剪
            dltools.grad_clipping(net, 1)
            # 更新
            optimizer.step()
            with torch.no_grad():
                metric.add(l.sum(), Y_valid_len.sum())

        if (epoch + 1) % 10 == 0:
            print(f"{epoch + 1} : {metric[0] / metric[1]}")

    print(f"last loss {metric[0] / metric[1]:.3f}, {metric[1] / timer.stop():.1f} tokens/s")


embed_size, num_hiddens, num_layers, dropout = 32, 32, 2, 0.1
batch_size, num_steps = 64, 10
lr, num_epochs, device = 0.005, 100, dltools.try_gpu()
data_iter, source_vocab, target_vocab = load_data_nmt(batch_size, num_steps)

encoder = Seq2SeqEncoder(len(source_vocab), embed_size, num_hiddens, num_layers, dropout)
decoder = Seq2SeqDecoder(len(target_vocab), embed_size, num_hiddens, num_layers, dropout)
net = EncoderDecoder(encoder, decoder)

train_seq2seq(net, data_iter, lr, num_epochs, target_vocab, device)


# 预测和评估
def predict_seq2seq(net, src_sentence, source_vocab, target_vocab, num_steps, device):
    # 预测的时候需要把net设置为评估模式
    net.eval()
    src_tokens = source_vocab[preprocess_nmt(src_sentence).split(" ")] + [source_vocab["<eos>"]]
    enc_valid_len = torch.tensor([len(src_tokens)], device=device)
    src_tokens = truncate_or_pad(src_tokens, num_steps, source_vocab["<pad>"])

    # 增加一个维度用来表示批次
    enc_X = torch.unsqueeze(torch.tensor(src_tokens, dtype=torch.long, device=device), dim=0)
    enc_outputs = net.encoder(enc_X, enc_valid_len)
    dec_state = net.decoder.init_state(enc_outputs, enc_valid_len)
    # 给预测结果也提前加一个批次维度
    dec_X = torch.unsqueeze(torch.tensor([target_vocab["<bos>"]], dtype=torch.long, device=device), dim=0)

    output_seq = []
    for i in range(num_steps):
        Y_hat, dec_state= net.decoder(dec_X, dec_state)
        dec_X = Y_hat.argmax(dim=2)
        pred = dec_X.squeeze(dim=0).type(torch.int32).item()
        if pred == target_vocab["<eos>"]:
            break
        output_seq.append(pred)
        # pred 对应的是单词不是字母
    return " ".join(target_vocab.to_tokens(output_seq))


# seq2se2的评估指标：BLEU: bilingual evaluation understudy 双语互译质量评估辅助工具
def bleu(pred_seq, label_seq, k):
    pred_tokens, label_tokens = preprocess_nmt(pred_seq).split(" "), preprocess_nmt(label_seq).split(" ")
    pred_len, label_len = len(pred_tokens), len(label_tokens)
    score = math.exp(min(0.0, 1.0 - (label_len / pred_len)))

    for n in range(1, k + 1):
        num_matches, label_subs = 0, collections.defaultdict(int)
        for i in range(label_len - n + 1):
            label_subs[" ".join(label_tokens[i: i + n])] += 1

        for i in range(pred_len - n + 1):
            temp = " ".join(pred_tokens[i:i+n])
            if label_subs[temp] > 0:
                num_matches += 1
                label_subs[temp] -= 1
        score *= math.pow(num_matches/(pred_len - n + 1), math.pow(0.5, n))
    return score


# 开始预测
engs = ["go.", "i lost.", "he's calm.", "i'm home."]
fras = ["va!", "j'ai perdu.", "il est calme.", "je suis chez moi."]

for eng, fra in zip(engs, fras):
    translation = predict_seq2seq(net, eng, source_vocab, target_vocab, num_steps, device)
    print(f"{eng} => {translation}, bleu: {bleu(translation, fra, k=2):.3f}")


# 定义注意力解码器
class AttentionDecoder(Decoder):
    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    @property
    def attention_weights(self):
        raise NotImplementedError


def transpose_qkv(X, num_heads):
        # 输入X的shape：(batch_size, 查询数/键值对个数,num_hiddens)
        X = X.reshape(X.shape[0], X.shape[1], num_heads, -1)
        X = X.permute(0, 2, 1, 3)  # batch_size,num_heads, 查询数/键值对个数,num_hiddens/num_heads)
        # 这里把batch_size和num_heads合并在一起了
        return X.reshape(-1, X.shape[2], X.shape[3])  # batch_size * num_heads, 查询/键值对个数, num_hiddens/num_heads


def transpose_output(X, num_heads):
    # 先把num_heads分离
    X = X.reshape(-1, num_heads, X.shape[1], X.shape[2])
    X = X.permute(0, 2, 1, 3)
    return X.reshape(X.shape[0], X.shape[1], -1)

class MultiHeadAttention(nn.Module):
    def __init__(self, key_size, query_size, value_size, num_hiddens, num_heads, dropout=0.0, bias=False, **kwargs):
        super().__init__(**kwargs)
        self.num_heads = num_heads
        self.attention = dltools.DotProductAttention(dropout)
        self.W_q = nn.Linear(query_size, num_hiddens, bias=bias)
        self.W_k = nn.Linear(key_size, num_hiddens, bias=bias)
        self.W_v = nn.Linear(value_size, num_hiddens, bias=bias)
        self.W_o = nn.Linear(num_hiddens, num_hiddens, bias=bias)

    def forward(self, queries, keys, values, valid_lens):
        # queries, keys, values 传入的形状：(batch_size, 查询熟练或者键值对数量，num_hiddens)
        queries = transpose_qkv(self.W_q(queries), self.num_heads)
        keys = transpose_qkv(self.W_k(keys), self.num_heads)
        values = transpose_qkv(self.W_v(values), self.num_heads)
        # print(f"queries.shape:{queries.shape}")
        # print(f"keys.shape:{keys.shape}")
        if valid_lens is not None:
            valid_lens = torch.repeat_interleave(valid_lens, repeats=self.num_heads, dim=0)
        # output shape:(batch_size*num_heads, num_steps, num_hiddens/num_heads)
        output = self.attention(queries, keys, values, valid_lens)
        output_concat = transpose_output(output, self.num_heads)
        return self.W_o(output_concat)


# 添加Bahdanau的decoder
class Seq2SeqAttentionDecoder(AttentionDecoder):
    def __init__(self, vocab_size, embed_size, num_hiddens, num_heads, num_layers, dropout=0.0, **kwargs):
        super().__init__(**kwargs)
        # self.attention = dltools.AdditiveAttention(num_hiddens, num_hiddens, num_hiddens, dropout=dropout)
        self.attention = MultiHeadAttention(num_hiddens, num_hiddens, num_hiddens, num_hiddens, num_heads, dropout)
        self.embedding = nn.Embedding(vocab_size, embed_size)
        self.rnn = nn.GRU(embed_size + num_hiddens, num_hiddens, num_layers, dropout=dropout, batch_first=True)
        self.dense = nn.Linear(num_hiddens, vocab_size)


    def init_state(self, enc_outputs, enc_valid_lens, *args):
        # outputs : (batch_size, num_steps, num_hiddens)
        # hidden_state:(num_layers, batch_size, num_hiddens)
        outputs, hidden_state = enc_outputs
        return (outputs, hidden_state, enc_valid_lens)

    def forward(self, X, state):
        # enc_outputs (batch_size, num_steps, num_hiddens)
        # hidden_state: (num_layers, batch_size, num_hiddens)
        # state是元组
        enc_outputs, hidden_state, enc_valid_lens = state
        # X: (batch_size, num_steps, vocab_size)
        X = self.embedding(X)
        X = X.permute(1, 0, 2)
        # X: (batch_size, num_steps, embed_size)
        outputs, self._attention_weights= [],[]

        for x in X:
            # print(f"hidden_state.shape:{hidden_state.shape}")
            # print(f"hidden_state[-1].shape:{hidden_state[-1].shape}")
            query= torch.unsqueeze(hidden_state[-1], dim=1)
            # query (batch_size, 1, num_hiddens)
            # print(f"enc_outputs.shape:{enc_outputs.shape}")
            context = self.attention(query, enc_outputs, enc_outputs, enc_valid_lens)
            # print(f"context.shape:{context.shape}")
            # print(f"x.shape:{x.shape}")
            x = torch.cat((context, torch.unsqueeze(x, dim=1)), dim=-1)
            # print(f"x.shape:{x.shape}")
            out, hidden_state = self.rnn(x, hidden_state)
            # print(f"out.shape:{out.shape}")
            # print(f"hidden_state.shape:{hidden_state.shape}")
            outputs.append(out)
            self._attention_weights.append(self.attention_weights)
            # print("---------")

        outputs = self.dense(torch.cat(outputs, dim=1))
        # print(f"outputs.shape:{outputs.shape}")
        return outputs, [enc_outputs, hidden_state, enc_valid_lens]

    @property
    def attention_weights(self):
        return self._attention_weights

encoder_1 = Seq2SeqEncoder(vocab_size=10, embed_size=8, num_hiddens=16, num_layers=2)
encoder_1.eval()
decoder_1 = Seq2SeqAttentionDecoder(vocab_size=10, embed_size=8, num_hiddens=16, num_layers=2, num_heads=2)
decoder_1.eval()

# batch_size:4 num_steps:7
X = torch.zeros((4, 7), dtype=torch.long)
temp = encoder_1(X)
state = decoder_1.init_state(temp, None)
outputs, state = decoder_1(X, state)
print(f"outputs.shape:{outputs.shape}")
print(f"len(state):{len(state)}")
print(f"state[0].shape:{state[0].shape}")
print(f"len(state[1]):{len(state[1])}")
print(f"state[1][0].shape:{state[1][0].shape}")

# 注意力训练
embed_size, num_hiddens, num_layers, dropout = 32, 100, 2, 0.1
batch_size, num_steps, num_heads = 64, 10, 5
lr, num_epochs, device = 0.005, 100, dltools.try_gpu()

data_iter, source_vocab, target_vocab = load_data_nmt(batch_size, num_steps)

encoder_2 = Seq2SeqEncoder(len(source_vocab), embed_size, num_hiddens, num_layers, dropout)
decoder_2 = Seq2SeqAttentionDecoder(len(target_vocab), embed_size, num_hiddens, num_heads, num_layers, dropout)
net_2 = EncoderDecoder(encoder_2, decoder_2)

train_seq2seq(net_2, data_iter, lr, num_epochs, target_vocab, device)
# 开始预测
engs = ["go.", "i lost.", "he's calm.", "i'm home."]
fras = ["va!", "j'ai perdu.", "il est calme.", "je suis chez moi."]

for eng, fra in zip(engs, fras):
    translation = predict_seq2seq(net_2, eng, source_vocab, target_vocab, num_steps, device)
    print(f"{eng} => {translation}, bleu: {bleu(translation, fra, k=2):.3f}")




num_hiddens, num_heads = 100, 5
attention = MultiHeadAttention(num_hiddens, num_hiddens, num_hiddens, num_hiddens, num_heads, 0.2)
attention.eval()

batch_size, num_queries = 2, 4
num_kvparis, valid_lens = 6, torch.tensor([3, 2])
X = torch.ones((batch_size, num_queries, num_hiddens))
Y = torch.ones((batch_size, num_kvparis, num_hiddens))
res = attention(X, Y, Y, valid_lens)
print(f"res.shape:{res.shape}")





















