import pickle
import re
import time

import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Embedding, Dense, LayerNormalization, Dropout

# 添加常量定义
MAX_LENGTH = 50  # 增加最大长度以适应更长的序列
EPOCHS = 50  # 增加训练轮数
BATCH_SIZE = 32  # 增加批次大小
MIN_DATASET_SIZE = 10  # 最小数据集大小


def load_dataset(path):
    try:
        with open(path, 'r', encoding='utf-8') as f:
            lines = f.read().strip().split('\n')
        pairs = [line.split('\t') for line in lines]

        # 添加更多的训练数据
        additional_data = [
            ("Hello", "你好"),
            ("Hello, how are you?", "你好，你好吗？"),
            ("I am fine, thank you.", "我很好，谢谢。"),
            ("What's your name?", "你叫什么名字？"),
            ("Nice to meet you.", "很高兴认识你。"),
            ("Where are you from?", "你来自哪里？"),
            ("I am from China.", "我来自中国。"),
            ("What time is it?", "现在几点了？"),
            ("Good morning.", "早上好。"),
            ("Good night.", "晚安。"),
            ("How's the weather today?", "今天天气怎么样？"),
            ("It's sunny today.", "今天是晴天。"),
            ("I love you.", "我爱你。"),
            ("Thank you very much.", "非常感谢。"),
            ("You're welcome.", "不客气。"),
            ("Goodbye.", "再见。"),
            ("See you later.", "回头见。"),
            ("Have a nice day.", "祝你今天愉快。"),
            ("I don't understand.", "我不明白。"),
            ("Please speak slowly.", "请说慢一点。")
        ]

        # 确保数据格式正确
        pairs = [(eng.strip(), chn.strip()) for eng, chn in pairs + additional_data]

        # 保存扩充后的数据集
        with open(path, 'w', encoding='utf-8') as f:
            for eng, chn in pairs:
                f.write(f"{eng}\t{chn}\n")

        return zip(*pairs)
    except FileNotFoundError:
        print("创建新的数据集...")
        return zip(*additional_data)


# 修改预处理函数
def preprocess_sentence(sentence):
    sentence = sentence.lower().strip()
    # 对于中文句子，不需要添加标点符号的空格
    if any('\u4e00' <= char <= '\u9fff' for char in sentence):
        sentence = re.sub(r'[" "]+', " ", sentence)
    else:
        sentence = re.sub(r"([?.!,])", r" \1 ", sentence)
        sentence = re.sub(r'[" "]+', " ", sentence)
        sentence = re.sub(r"[^a-zA-Z?.!,]+", " ", sentence)
    sentence = sentence.strip()
    return sentence


def tokenize(lang):
    lang_tokenizer = tf.keras.preprocessing.text.Tokenizer(
        filters='',
        oov_token='<unk>'
    )
    # 添加特殊标记
    special_tokens = ['<pad>', '<start>', '<end>', '<unk>']
    lang_tokenizer.fit_on_texts(special_tokens + list(lang))

    tensor = lang_tokenizer.texts_to_sequences(lang)
    tensor = tf.keras.preprocessing.sequence.pad_sequences(
        tensor,
        maxlen=MAX_LENGTH,
        padding='post',
        dtype='int32'
    )

    # 确保所有索引值都小于词汇表大小
    vocab_size = len(lang_tokenizer.word_index)
    tensor = np.minimum(tensor, vocab_size - 1)

    return tensor, lang_tokenizer


def load_preprocessed_data(path):
    input_lang, target_lang = load_dataset(path)

    # 预处理句子
    input_lang = ['<start> ' + preprocess_sentence(w) + ' <end>' for w in input_lang]
    target_lang = ['<start> ' + preprocess_sentence(w) + ' <end>' for w in target_lang]

    # 创建词汇表
    input_tensor, input_lang_tokenizer = tokenize(input_lang)
    target_tensor, target_lang_tokenizer = tokenize(target_lang)

    # 确保所有索引值都在有效范围内
    input_tensor = tf.where(input_tensor >= len(input_lang_tokenizer.word_index),
                            input_lang_tokenizer.word_index['<unk>'],
                            input_tensor)
    target_tensor = tf.where(target_tensor >= len(target_lang_tokenizer.word_index),
                             target_lang_tokenizer.word_index['<unk>'],
                             target_tensor)

    return input_tensor, target_tensor, input_lang_tokenizer, target_lang_tokenizer


# 加载和预处理数据
input_tensor, target_tensor, input_lang_tokenizer, target_lang_tokenizer = load_preprocessed_data('dataset.txt')


class PositionalEncoding(tf.keras.layers.Layer):
    def __init__(self, position, d_model):
        super(PositionalEncoding, self).__init__()
        self.d_model = d_model
        self.pos_encoding = self.positional_encoding(position, d_model)

    def get_angles(self, position, i, d_model):
        angles = 1 / np.power(10000, (2 * (i // 2)) / np.float32(d_model))
        return position * angles

    def positional_encoding(self, position, d_model):
        angle_rads = self.get_angles(
            np.arange(position)[:, np.newaxis],
            np.arange(d_model)[np.newaxis, :],
            d_model
        )

        angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
        angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])

        pos_encoding = angle_rads[np.newaxis, ...]
        return tf.cast(pos_encoding, dtype=tf.float32)

    def call(self, inputs):
        seq_len = tf.shape(inputs)[1]
        # 使用 tf.slice 来安全地获取所需长度的位置编码
        pos_encoding = tf.slice(self.pos_encoding, [0, 0, 0], [-1, seq_len, -1])
        return inputs + pos_encoding


class MultiHeadAttention(tf.keras.layers.Layer):
    def __init__(self, d_model, num_heads):
        super(MultiHeadAttention, self).__init__()
        self.num_heads = num_heads
        self.d_model = d_model

        assert d_model % self.num_heads == 0

        self.depth = d_model // self.num_heads

        self.wq = Dense(d_model)
        self.wk = Dense(d_model)
        self.wv = Dense(d_model)

        self.dense = Dense(d_model)

    def split_heads(self, x, batch_size):
        x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
        return tf.transpose(x, perm=[0, 2, 1, 3])

    def call(self, v, k, q, mask):
        batch_size = tf.shape(q)[0]

        q = self.wq(q)
        k = self.wk(k)
        v = self.wv(v)

        q = self.split_heads(q, batch_size)
        k = self.split_heads(k, batch_size)
        v = self.split_heads(v, batch_size)

        scaled_attention, attention_weights = self.scaled_dot_product_attention(
            q, k, v, mask)

        scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3])

        concat_attention = tf.reshape(scaled_attention,
                                      (batch_size, -1, self.d_model))

        output = self.dense(concat_attention)

        return output, attention_weights

    def scaled_dot_product_attention(self, q, k, v, mask):
        matmul_qk = tf.matmul(q, k, transpose_b=True)

        dk = tf.cast(tf.shape(k)[-1], tf.float32)
        scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)

        if mask is not None:
            scaled_attention_logits += (mask * -1e9)

        attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1)

        output = tf.matmul(attention_weights, v)

        return output, attention_weights


class EncoderLayer(tf.keras.layers.Layer):
    def __init__(self, d_model, num_heads, dff, rate=0.1):
        super(EncoderLayer, self).__init__()

        self.mha = MultiHeadAttention(d_model, num_heads)
        self.ffn = self.point_wise_feed_forward_network(d_model, dff)

        self.layernorm1 = LayerNormalization(epsilon=1e-6)
        self.layernorm2 = LayerNormalization(epsilon=1e-6)

        self.dropout1 = Dropout(rate)
        self.dropout2 = Dropout(rate)

    def point_wise_feed_forward_network(self, d_model, dff):
        return tf.keras.Sequential([
            Dense(dff, activation='relu'),  # (batch_size, seq_len, dff)
            Dense(d_model)  # (batch_size, seq_len, d_model)
        ])

    def call(self, x, mask):
        attn_output, _ = self.mha(x, x, x, mask)  # (batch_size, input_seq_len, d_model)
        attn_output = self.dropout1(attn_output)
        out1 = self.layernorm1(x + attn_output)  # (batch_size, input_seq_len, d_model)

        ffn_output = self.ffn(out1)  # (batch_size, input_seq_len, d_model)
        ffn_output = self.dropout2(ffn_output)
        out2 = self.layernorm2(out1 + ffn_output)  # (batch_size, input_seq_len, d_model)

        return out2


class Encoder(tf.keras.layers.Layer):
    def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size,
                 maximum_position_encoding, rate=0.1):
        super(Encoder, self).__init__()

        self.d_model = d_model
        self.num_layers = num_layers

        self.embedding = Embedding(input_vocab_size, d_model)
        self.pos_encoding = PositionalEncoding(maximum_position_encoding, d_model)

        self.enc_layers = [EncoderLayer(d_model, num_heads, dff, rate)
                           for _ in range(num_layers)]

        self.dropout = Dropout(rate)

    def call(self, x, mask):
        seq_len = tf.shape(x)[1]

        x = self.embedding(x)
        x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
        x = self.pos_encoding(x)

        x = self.dropout(x)

        for i in range(self.num_layers):
            x = self.enc_layers[i](x, mask)

        return x  # (batch_size, input_seq_len, d_model)


class DecoderLayer(tf.keras.layers.Layer):
    def __init__(self, d_model, num_heads, dff, rate=0.1):
        super(DecoderLayer, self).__init__()

        self.mha1 = MultiHeadAttention(d_model, num_heads)
        self.mha2 = MultiHeadAttention(d_model, num_heads)

        self.ffn = self.point_wise_feed_forward_network(d_model, dff)

        self.layernorm1 = LayerNormalization(epsilon=1e-6)
        self.layernorm2 = LayerNormalization(epsilon=1e-6)
        self.layernorm3 = LayerNormalization(epsilon=1e-6)

        self.dropout1 = Dropout(rate)
        self.dropout2 = Dropout(rate)
        self.dropout3 = Dropout(rate)

    def point_wise_feed_forward_network(self, d_model, dff):
        return tf.keras.Sequential([
            Dense(dff, activation='relu'),  # (batch_size, seq_len, dff)
            Dense(d_model)  # (batch_size, seq_len, d_model)
        ])

    def call(self, x, enc_output, look_ahead_mask, padding_mask):
        attn1, attn_weights_block1 = self.mha1(x, x, x, look_ahead_mask)
        attn1 = self.dropout1(attn1)
        out1 = self.layernorm1(attn1 + x)

        attn2, attn_weights_block2 = self.mha2(enc_output, enc_output, out1, padding_mask)
        attn2 = self.dropout2(attn2)
        out2 = self.layernorm2(attn2 + out1)

        ffn_output = self.ffn(out2)
        ffn_output = self.dropout3(ffn_output)
        out3 = self.layernorm3(ffn_output + out2)

        return out3, attn_weights_block1, attn_weights_block2


class Decoder(tf.keras.layers.Layer):
    def __init__(self, num_layers, d_model, num_heads, dff, target_vocab_size,
                 maximum_position_encoding, rate=0.1):
        super(Decoder, self).__init__()

        self.d_model = d_model
        self.num_layers = num_layers

        self.embedding = Embedding(target_vocab_size, d_model)
        self.pos_encoding = PositionalEncoding(maximum_position_encoding, d_model)

        self.dec_layers = [DecoderLayer(d_model, num_heads, dff, rate)
                           for _ in range(num_layers)]
        self.dropout = Dropout(rate)

    def call(self, x, enc_output, look_ahead_mask, padding_mask):
        seq_len = tf.shape(x)[1]
        attention_weights = {}

        x = self.embedding(x)
        x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
        x = self.pos_encoding(x)

        x = self.dropout(x)

        for i in range(self.num_layers):
            x, block1, block2 = self.dec_layers[i](x, enc_output, look_ahead_mask, padding_mask)
            attention_weights[f'decoder_layer{i + 1}_block1'] = block1
            attention_weights[f'decoder_layer{i + 1}_block2'] = block2

        return x, attention_weights


class Transformer(tf.keras.Model):
    def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size,
                 target_vocab_size, pe_input, pe_target, rate=0.1):
        super(Transformer, self).__init__()

        self.encoder = Encoder(num_layers, d_model, num_heads, dff,
                               input_vocab_size, MAX_LENGTH, rate)

        self.decoder = Decoder(num_layers, d_model, num_heads, dff,
                               target_vocab_size, MAX_LENGTH, rate)

        self.final_layer = Dense(target_vocab_size)

    def call(self, inp, tar, enc_padding_mask, look_ahead_mask, dec_padding_mask):
        enc_output = self.encoder(inp, enc_padding_mask)  # (batch_size, inp_seq_len, d_model)

        dec_output, attention_weights = self.decoder(
            tar, enc_output, look_ahead_mask, dec_padding_mask)

        final_output = self.final_layer(dec_output)  # (batch_size, tar_seq_len, target_vocab_size)

        return final_output, attention_weights


def create_padding_mask(seq):
    seq = tf.cast(tf.math.equal(seq, 0), tf.float32)
    return seq[:, tf.newaxis, tf.newaxis, :]  # (batch_size, 1, 1, seq_len)


def create_look_ahead_mask(size):
    mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0)
    return mask  # (seq_len, seq_len)


def create_masks(inp, tar):
    # 确保输入类型为 int32
    inp = tf.cast(inp, tf.int32)
    tar = tf.cast(tar, tf.int32)

    enc_padding_mask = create_padding_mask(inp)
    dec_padding_mask = create_padding_mask(inp)
    look_ahead_mask = create_look_ahead_mask(tf.shape(tar)[1])
    dec_target_padding_mask = create_padding_mask(tar)
    combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask)
    return enc_padding_mask, combined_mask, dec_padding_mask


# 超参数
num_layers = 4
d_model = 256  # 增加模型维度
dff = 1024
num_heads = 8
input_vocab_size = len(input_lang_tokenizer.word_index) + 1
target_vocab_size = len(target_lang_tokenizer.word_index) + 1
dropout_rate = 0.1

# 创建Transformer模型
transformer = Transformer(num_layers, d_model, num_heads, dff,
                          input_vocab_size, target_vocab_size,
                          pe_input=MAX_LENGTH,
                          pe_target=MAX_LENGTH,
                          rate=dropout_rate)

# 定义损失函数
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction='none')


def loss_function(real, pred):
    mask = tf.math.logical_not(tf.math.equal(real, 0))
    loss_ = loss_object(real, pred)
    mask = tf.cast(mask, dtype=loss_.dtype)
    loss_ *= mask
    return tf.reduce_sum(loss_) / tf.reduce_sum(mask)


# 定义优化器
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.98, epsilon=1e-9)


# 训练步骤
@tf.function
def train_step(inp, tar):
    tar_inp = tar[:, :-1]
    tar_real = tar[:, 1:]

    enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp, tar_inp)

    with tf.GradientTape() as tape:
        predictions, _ = transformer(inp, tar_inp,
                                     enc_padding_mask,
                                     combined_mask,
                                     dec_padding_mask)
        loss = loss_function(tar_real, predictions)

        # 添加正则化损失
        l2_loss = 0
        for var in transformer.trainable_variables:
            l2_loss += tf.nn.l2_loss(var)

        # 添加正则化系数
        lambda_reg = 0.01
        total_loss = loss + lambda_reg * l2_loss

    gradients = tape.gradient(total_loss, transformer.trainable_variables)

    # 添加梯度裁剪
    gradients, _ = tf.clip_by_global_norm(gradients, clip_norm=1.0)

    optimizer.apply_gradients(zip(gradients, transformer.trainable_variables))

    return loss


# 训练循环
EPOCHS = 50  # 增加训练轮数
BATCH_SIZE = 32  # 增加批次大小

dataset = tf.data.Dataset.from_tensor_slices((input_tensor, target_tensor)).shuffle(len(input_tensor))
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)

for epoch in range(EPOCHS):
    start = time.time()

    total_loss = 0

    for (batch, (inp, tar)) in enumerate(dataset):
        batch_loss = train_step(inp, tar)
        total_loss += batch_loss

        if batch % 50 == 0:
            print(f'Epoch {epoch + 1} Batch {batch} Loss {batch_loss.numpy():.4f}')

    print(f'Epoch {epoch + 1} Loss {total_loss / (len(input_tensor) // BATCH_SIZE):.4f}')
    print(f'Time taken for 1 epoch: {time.time() - start:.2f} secs\n')


# 首先定义translate函数
def translate(sentence):
    # 预处理输入句子
    sentence = preprocess_sentence(sentence)
    sentence = '<start> ' + sentence + ' <end>'

    # 将输入句子转换为张量
    inputs = [input_lang_tokenizer.word_index.get(word, input_lang_tokenizer.word_index['<unk>'])
              for word in sentence.split()]
    inputs = tf.keras.preprocessing.sequence.pad_sequences([inputs], maxlen=MAX_LENGTH, padding='post')
    inputs = tf.convert_to_tensor(inputs, dtype=tf.int32)

    # 初始化解码器输入
    output = tf.convert_to_tensor([target_lang_tokenizer.word_index['<start>']], dtype=tf.int32)
    output = tf.expand_dims(output, 0)

    for i in range(MAX_LENGTH):
        enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inputs, output)

        # 预测下一个词
        predictions, _ = transformer(inputs, output,
                                     enc_padding_mask,
                                     combined_mask,
                                     dec_padding_mask)

        # 获取最后一个词的预测
        predictions = predictions[:, -1:, :]
        predicted_id = tf.cast(tf.argmax(predictions, axis=-1), dtype=tf.int32)

        # 如果预测到结束标记，就停止预测
        if predicted_id == target_lang_tokenizer.word_index['<end>']:
            break

        # 将预测的词添加到输出中
        output = tf.concat([output, predicted_id], axis=-1)

    # 将输出转换回文本并去除特殊标记
    output = tf.squeeze(output, axis=0)
    output = [target_lang_tokenizer.index_word[int(i)] for i in output
              if i > 0 and target_lang_tokenizer.index_word[int(i)] not in ['<start>', '<end>']]

    # 打印结果（不显示特殊标记）
    print(f'输入: {sentence.replace("<start>", "").replace("<end>", "").strip()}')
    print(f'翻译: {"".join(output)}')

    return "".join(output)


# 保存模型代码
print("保存模型...")
try:
    transformer.save('transformer_model.keras')
    with open('input_tokenizer.pickle', 'wb') as handle:
        pickle.dump(input_lang_tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)
    with open('target_tokenizer.pickle', 'wb') as handle:
        pickle.dump(target_lang_tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)
    print("模型和分词器保存成功！")
except Exception as e:
    print(f"保存模型时发生错误: {str(e)}")

# 然后进行测试
print("\n测试翻译结果：")
test_sentences = [
    "Hello, how are you?",
    "I am fine, thank you.",
    "What's your name?",
    "Nice to meet you.",
    "your name",
]

print("开始测试翻译...")
for sentence in test_sentences:
    translate(sentence)
    print()
