import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import numpy as np
import random
from collections import Counter

import pandas as pd
import re
import jieba


# 定义词汇表
# 训练前预处理阶段：在模型训练开始之前，遍历整个训练数据集的所有句子，构建词汇表。
# 仅需调用一次：词汇表构建完成后，后续的训练和推理都直接复用该词汇表。
class Vocabulary:
    def __init__(self):
        self.word2idx = {'<PAD>': 0, '<SOS>': 1, '<EOS>': 2, '<UNK>': 3}
        self.idx2word = {0: '<PAD>', 1: '<SOS>', 2: '<EOS>', 3: '<UNK>'}
        self.word_count = Counter()

    # 从句子列表中构建词汇表，忽略低频词
    def build_vocab(self, sentences, min_freq=2, is_chinese=False):
        for sentence in sentences:
            words = sentence.split()
            self.word_count.update(words)

        for word, count in self.word_count.items():
            if count >= min_freq and word not in self.word2idx:
                self.word2idx[word] = len(self.word2idx)
                self.idx2word[len(self.idx2word)] = word

    # 将句子转换为索引序列（如 "I love cats" → [6, 7, 4]）
    def sentence_to_indices(self, sentence):
        return [self.word2idx.get(word, self.word2idx['<UNK>'])
                for word in sentence.split()]
    def build_tgt_indices(self, sentence):
        words = sentence.split()  # 英文按空格分词，中文需用Jieba
        # 添加 <SOS> 和 <EOS>
        indices = [self.word2idx['<SOS>']] + [self.word2idx.get(word, self.word2idx['<UNK>']) for word in words] + [self.word2idx['<EOS>']]
        return indices

#
class TranslationDataset(Dataset):
    def __init__(self, src_data, tgt_data):
        self.src_data = src_data
        self.tgt_data = tgt_data

    def __len__(self):
        return len(self.src_data)

    def __getitem__(self, idx):
        src = torch.LongTensor(self.src_data[idx])
        tgt = torch.LongTensor(self.tgt_data[idx])
        return src, tgt


# 输入：batch 是一个列表，其中每个元素是一个 (src_seq, tgt_seq) 的元组。
def collate_fn(batch):
    src_batch, tgt_batch = zip(*batch)
    src_lengths = torch.LongTensor([len(x) for x in src_batch])
    tgt_lengths = torch.LongTensor([len(x) for x in tgt_batch])

    # 填充到最大长度
    src_padded = nn.utils.rnn.pad_sequence(src_batch, padding_value=0, batch_first=True)
    tgt_padded = nn.utils.rnn.pad_sequence(tgt_batch, padding_value=0, batch_first=True)

    return src_padded, tgt_padded, src_lengths, tgt_lengths


# 4. 定义RNN翻译模型
class RNNTranslator(nn.Module):
    def __init__(self, src_vocab_size, tgt_vocab_size, embed_dim=256, hidden_dim=512):
        super().__init__()
        self.encoder_embed = nn.Embedding(src_vocab_size, embed_dim)
        self.decoder_embed = nn.Embedding(tgt_vocab_size, embed_dim)

        self.encoder_rnn = nn.GRU(embed_dim, hidden_dim, batch_first=True)
        self.decoder_rnn = nn.GRU(embed_dim, hidden_dim, batch_first=True)

        self.fc = nn.Linear(hidden_dim, tgt_vocab_size)

    def forward(self, src, tgt, teacher_forcing_ratio=0.5):
        assert src.device == tgt.device == next(self.parameters()).device

        # Encoder
        src_embedded = self.encoder_embed(src)
        encoder_outputs, hidden = self.encoder_rnn(src_embedded)

        # Decoder
        batch_size = tgt.size(0)
        tgt_len = tgt.size(1)
        tgt_vocab_size = self.fc.out_features

        # 初始化输出和输入
        # outputs = torch.zeros(batch_size, tgt_len, tgt_vocab_size)

        # 初始化output时显式指定设备
        outputs = torch.zeros(
            batch_size, tgt_len, tgt_vocab_size,
            device=src.device  # 关键：与输入同设备
        )

        decoder_input = tgt[:, 0].unsqueeze(1)  # 初始输入为<SOS>

        for t in range(1, tgt_len):
            decoder_embedded = self.decoder_embed(decoder_input)
            decoder_output, hidden = self.decoder_rnn(decoder_embedded, hidden)

            output = self.fc(decoder_output.squeeze(1))
            outputs[:, t] = output

            # 使用Teacher Forcing或预测结果作为下一步输入
            use_teacher_forcing = random.random() < teacher_forcing_ratio
            if use_teacher_forcing and t < tgt_len - 1:
                decoder_input = tgt[:, t].unsqueeze(1)
            else:
                decoder_input = output.argmax(1).unsqueeze(1)

        return outputs


# 5. 训练模型
def train(model, dataloader, epochs=10):
    model.train()
    for epoch in range(epochs):
        total_loss = 0
        for src, tgt, src_len, tgt_len in dataloader:
            src, tgt = src.to(device), tgt.to(device)

            optimizer.zero_grad()
            output = model(src, tgt)

            # 计算损失（忽略<PAD>）
            loss = criterion(
                output[:, 1:].reshape(-1, output.size(-1)),  # 跳过<SOS>
                tgt[:, 1:].reshape(-1)  # 跳过<SOS>
            )

            loss.backward()
            optimizer.step()
            total_loss += loss.item()

        print(f'Epoch {epoch + 1}, Loss: {total_loss / len(dataloader):.4f}')


# 5. 翻译推理
def translate(model, sentence, src_vocab, tgt_vocab, max_len=20):
    model.eval()
    indices = src_vocab.sentence_to_indices(sentence)
    src = torch.LongTensor(indices).unsqueeze(0).to(device)

    with torch.no_grad():
        # Encoder
        src_embedded = model.encoder_embed(src)
        encoder_outputs, hidden = model.encoder_rnn(src_embedded)

        # Decoder
        decoder_input = torch.LongTensor([tgt_vocab.word2idx['<SOS>']]).to(device)
        decoded_words = []

        for _ in range(max_len):
            decoder_embedded = model.decoder_embed(decoder_input.unsqueeze(0))
            decoder_output, hidden = model.decoder_rnn(decoder_embedded, hidden)
            output = model.fc(decoder_output.squeeze(0))
            top_idx = output.argmax().item()

            if top_idx == tgt_vocab.word2idx['<EOS>']:
                break

            decoded_words.append(tgt_vocab.idx2word[top_idx])
            decoder_input = torch.LongTensor([top_idx]).to(device)

    return ' '.join(decoded_words)


def clean_text(text):
    text = re.sub(r"[^\w\s]", "", text)  # 移除标点
    text = text.lower()  # 统一小写
    return text.strip()


def new_chinese(sentence):
    temp = list(jieba.cut(sentence))
    result = ' '.join(temp)
    return result


if __name__ == '__main__':
    data = pd.read_table('data/cmn.txt')
    data = data.iloc[:, :2]  # 所有行，前两列(0和1索引)

    # 示例数据（实际应用中需替换为真实平行语料，如WMT或Opus）
    english_sentences = data.iloc[:, 0].tolist()
    french_sentences = data.iloc[:, 1].tolist()

    english_sentences = [clean_text(sent) for sent in english_sentences]
    french_sentences = [clean_text(sent) for sent in french_sentences]
    french_sentences = [new_chinese(sent) for sent in french_sentences]

    # 构建词汇表
    src_vocab = Vocabulary()
    tgt_vocab = Vocabulary()
    src_vocab.build_vocab(english_sentences, 2, False)
    tgt_vocab.build_vocab(french_sentences, 2, True)

    # 转换为索引序列
    src_data = [src_vocab.sentence_to_indices(sent) for sent in english_sentences]
    tgt_data = [tgt_vocab.build_tgt_indices(sent) for sent in french_sentences]

    # 数据处理
    dataset = TranslationDataset(src_data, tgt_data)
    dataloader = DataLoader(dataset, batch_size=64, collate_fn=collate_fn)

    # 模型
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = RNNTranslator(len(src_vocab.word2idx), len(tgt_vocab.word2idx)).to(device)
    criterion = nn.CrossEntropyLoss(ignore_index=0).to(device)  # 忽略<PAD>
    optimizer = optim.Adam(model.parameters(), lr=0.001)

    # 开始训练
    train(model, dataloader, epochs=20)

    # 测试翻译
    print(translate(model, "i am happy all day", src_vocab, tgt_vocab))  # 输出应接近 "J'aime les chats"
