import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
from collections import Counter
import re

class EncoderRNN(nn.Module):
    """编码器RNN，将英文句子编码为隐藏状态"""
    def __init__(self, input_size, hidden_size, num_layers=2, dropout=0.1):
        super(EncoderRNN, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        
        self.embedding = nn.Embedding(input_size, hidden_size)
        self.gru = nn.GRU(hidden_size, hidden_size, num_layers, 
                         dropout=dropout if num_layers > 1 else 0, batch_first=True)
        self.dropout = nn.Dropout(dropout)
    
    def forward(self, input_seq, hidden=None):
        embedded = self.dropout(self.embedding(input_seq))
        outputs, hidden = self.gru(embedded, hidden)
        return outputs, hidden
    
    def init_hidden(self, batch_size, device):
        return torch.zeros(self.num_layers, batch_size, self.hidden_size, device=device)

class DecoderRNN(nn.Module):
    """解码器RNN，从隐藏状态生成中文句子"""
    def __init__(self, hidden_size, output_size, num_layers=2, dropout=0.1):
        super(DecoderRNN, self).__init__()
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.num_layers = num_layers
        
        self.embedding = nn.Embedding(output_size, hidden_size)
        self.gru = nn.GRU(hidden_size, hidden_size, num_layers,
                         dropout=dropout if num_layers > 1 else 0, batch_first=True)
        self.out = nn.Linear(hidden_size, output_size)
        self.dropout = nn.Dropout(dropout)
    
    def forward(self, input_seq, hidden):
        embedded = self.dropout(self.embedding(input_seq))
        output, hidden = self.gru(embedded, hidden)
        output = self.out(output.squeeze(1))
        return output, hidden

class AttentionDecoderRNN(nn.Module):
    """带注意力机制的解码器"""
    def __init__(self, hidden_size, output_size, num_layers=2, dropout=0.1):
        super(AttentionDecoderRNN, self).__init__()
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.num_layers = num_layers
        
        self.embedding = nn.Embedding(output_size, hidden_size)
        self.attention = nn.Linear(hidden_size * 2, 1)
        self.gru = nn.GRU(hidden_size * 2, hidden_size, num_layers,
                         dropout=dropout if num_layers > 1 else 0, batch_first=True)
        self.out = nn.Linear(hidden_size, output_size)
        self.dropout = nn.Dropout(dropout)
    
    def forward(self, input_seq, hidden, encoder_outputs):
        embedded = self.dropout(self.embedding(input_seq))
        
        # 注意力计算
        seq_len = encoder_outputs.size(1)
        hidden_repeated = hidden[-1].unsqueeze(1).repeat(1, seq_len, 1)
        attention_weights = torch.tanh(self.attention(
            torch.cat((hidden_repeated, encoder_outputs), dim=2)))
        attention_weights = torch.softmax(attention_weights, dim=1)
        
        context = torch.bmm(attention_weights.transpose(1, 2), encoder_outputs)
        
        # GRU输入
        gru_input = torch.cat((embedded, context), dim=2)
        output, hidden = self.gru(gru_input, hidden)
        
        output = self.out(output.squeeze(1))
        return output, hidden, attention_weights

class RNTranslator:
    """RNN翻译器主类"""
    def __init__(self, max_length=50, hidden_size=256, num_layers=2):
        self.max_length = max_length
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        
        # 词汇表
        self.en_word2idx = {}
        self.en_idx2word = {}
        self.zh_word2idx = {}
        self.zh_idx2word = {}
        
        # 特殊标记
        self.PAD_token = 0
        self.SOS_token = 1
        self.EOS_token = 2
        self.UNK_token = 3
        
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        
        # 模型
        self.encoder = None
        self.decoder = None
    
    def preprocess_text(self, text, lang='en'):
        """文本预处理"""
        if lang == 'en':
            # 英文预处理：小写化，分割单词
            text = text.lower()
            words = re.findall(r"[\w']+|[.,!?;]", text)
        else:
            # 中文预处理：按字符分割
            words = list(text)
        return words
    
    def build_vocab(self, en_sentences, zh_sentences):
        """构建词汇表"""
        # 英文词汇表
        en_words = []
        for sentence in en_sentences:
            en_words.extend(self.preprocess_text(sentence, 'en'))
        
        en_word_counts = Counter(en_words)
        en_vocab = ['<PAD>', '<SOS>', '<EOS>', '<UNK>'] + \
                  [word for word, count in en_word_counts.items() if count >= 2]
        
        self.en_word2idx = {word: idx for idx, word in enumerate(en_vocab)}
        self.en_idx2word = {idx: word for idx, word in enumerate(en_vocab)}
        
        # 中文词汇表
        zh_words = []
        for sentence in zh_sentences:
            zh_words.extend(self.preprocess_text(sentence, 'zh'))
        
        zh_word_counts = Counter(zh_words)
        zh_vocab = ['<PAD>', '<SOS>', '<EOS>', '<UNK>'] + \
                  [word for word, count in zh_word_counts.items() if count >= 2]
        
        self.zh_word2idx = {word: idx for idx, word in enumerate(zh_vocab)}
        self.zh_idx2word = {idx: word for idx, word in enumerate(zh_vocab)}
    
    def sentence_to_indices(self, sentence, lang='en'):
        """将句子转换为索引序列"""
        words = self.preprocess_text(sentence, lang)
        word2idx = self.en_word2idx if lang == 'en' else self.zh_word2idx
        
        indices = [word2idx.get(word, self.UNK_token) for word in words]
        indices = [self.SOS_token] + indices + [self.EOS_token]
        
        # 填充到最大长度
        if len(indices) < self.max_length:
            indices.extend([self.PAD_token] * (self.max_length - len(indices)))
        else:
            indices = indices[:self.max_length]
            indices[-1] = self.EOS_token
        
        return indices
    
    def indices_to_sentence(self, indices, lang='zh'):
        """将索引序列转换为句子"""
        idx2word = self.zh_idx2word if lang == 'zh' else self.en_idx2word
        
        words = []
        for idx in indices:
            if idx == self.EOS_token:
                break
            if idx != self.PAD_token and idx != self.SOS_token:
                words.append(idx2word.get(idx, '<UNK>'))
        
        if lang == 'zh':
            return ''.join(words)
        else:
            return ' '.join(words)
    
    def create_model(self, use_attention=True):
        """创建模型"""
        encoder_input_size = len(self.en_word2idx)
        decoder_output_size = len(self.zh_word2idx)
        
        self.encoder = EncoderRNN(encoder_input_size, self.hidden_size, 
                                 self.num_layers).to(self.device)
        
        if use_attention:
            self.decoder = AttentionDecoderRNN(self.hidden_size, decoder_output_size,
                                             self.num_layers).to(self.device)
        else:
            self.decoder = DecoderRNN(self.hidden_size, decoder_output_size,
                                    self.num_layers).to(self.device)
    
    def train(self, en_sentences, zh_sentences, epochs=100, batch_size=32, 
              learning_rate=0.001, use_attention=True):
        """训练模型"""
        # 构建词汇表
        self.build_vocab(en_sentences, zh_sentences)
        
        # 创建模型
        self.create_model(use_attention)
        
        # 准备训练数据
        en_indices = [self.sentence_to_indices(sent, 'en') for sent in en_sentences]
        zh_indices = [self.sentence_to_indices(sent, 'zh') for sent in zh_sentences]
        
        en_tensor = torch.tensor(en_indices, dtype=torch.long).to(self.device)
        zh_tensor = torch.tensor(zh_indices, dtype=torch.long).to(self.device)
        
        # 优化器和损失函数
        encoder_optimizer = optim.Adam(self.encoder.parameters(), lr=learning_rate)
        decoder_optimizer = optim.Adam(self.decoder.parameters(), lr=learning_rate)
        criterion = nn.CrossEntropyLoss(ignore_index=self.PAD_token)
        
        self.encoder.train()
        self.decoder.train()
        
        for epoch in range(epochs):
            total_loss = 0
            
            for i in range(0, len(en_sentences), batch_size):
                batch_en = en_tensor[i:i+batch_size]
                batch_zh = zh_tensor[i:i+batch_size]
                
                # 编码器前向传播
                encoder_outputs, encoder_hidden = self.encoder(batch_en)
                
                # 解码器输入和目标
                decoder_input = batch_zh[:, :-1]  # 去掉EOS
                decoder_target = batch_zh[:, 1:]   # 去掉SOS
                
                # 解码器前向传播
                decoder_hidden = encoder_hidden
                loss = 0
                
                for t in range(decoder_input.size(1)):
                    if use_attention:
                        decoder_output, decoder_hidden, _ = self.decoder(
                            decoder_input[:, t:t+1], decoder_hidden, encoder_outputs)
                    else:
                        decoder_output, decoder_hidden = self.decoder(
                            decoder_input[:, t:t+1], decoder_hidden)
                    
                    loss += criterion(decoder_output, decoder_target[:, t])
                
                # 反向传播
                encoder_optimizer.zero_grad()
                decoder_optimizer.zero_grad()
                loss.backward()
                
                # 梯度裁剪
                torch.nn.utils.clip_grad_norm_(self.encoder.parameters(), max_norm=1.0)
                torch.nn.utils.clip_grad_norm_(self.decoder.parameters(), max_norm=1.0)
                
                encoder_optimizer.step()
                decoder_optimizer.step()
                
                total_loss += loss.item()
            
            if (epoch + 1) % 10 == 0:
                avg_loss = total_loss / (len(en_sentences) / batch_size)
                print(f'Epoch [{epoch+1}/{epochs}], Loss: {avg_loss:.4f}')
    
    def translate(self, english_sentence, use_attention=True):
        """翻译英文句子到中文"""
        if self.encoder is None or self.decoder is None:
            raise ValueError("模型未训练，请先调用train方法训练模型")
        
        self.encoder.eval()
        self.decoder.eval()
        
        with torch.no_grad():
            # 预处理输入
            en_indices = self.sentence_to_indices(english_sentence, 'en')
            en_tensor = torch.tensor([en_indices], dtype=torch.long).to(self.device)
            
            # 编码
            encoder_outputs, encoder_hidden = self.encoder(en_tensor)
            
            # 解码
            decoder_input = torch.tensor([[self.zh_word2idx['<SOS>']]], 
                                       dtype=torch.long).to(self.device)
            decoder_hidden = encoder_hidden
            
            decoded_words = []
            
            for _ in range(self.max_length):
                if use_attention:
                    decoder_output, decoder_hidden, _ = self.decoder(
                        decoder_input, decoder_hidden, encoder_outputs)
                else:
                    decoder_output, decoder_hidden = self.decoder(
                        decoder_input, decoder_hidden)
                
                # 获取最可能的词
                topv, topi = decoder_output.topk(1)
                decoder_input = topi.squeeze().detach().unsqueeze(0).unsqueeze(0)
                
                if topi.item() == self.zh_word2idx['<EOS>']:
                    break
                
                decoded_words.append(self.zh_idx2word.get(topi.item(), '<UNK>'))
            
            return ''.join(decoded_words)
    
    def save_model(self, filepath):
        """保存模型"""
        torch.save({
            'encoder_state_dict': self.encoder.state_dict(),
            'decoder_state_dict': self.decoder.state_dict(),
            'en_word2idx': self.en_word2idx,
            'zh_word2idx': self.zh_word2idx,
            'en_idx2word': self.en_idx2word,
            'zh_idx2word': self.zh_idx2word,
            'max_length': self.max_length,
            'hidden_size': self.hidden_size,
            'num_layers': self.num_layers
        }, filepath)
    
    def load_model(self, filepath):
        """加载模型"""
        checkpoint = torch.load(filepath, map_location=self.device)
        
        self.max_length = checkpoint['max_length']
        self.hidden_size = checkpoint['hidden_size']
        self.num_layers = checkpoint['num_layers']
        
        self.en_word2idx = checkpoint['en_word2idx']
        self.zh_word2idx = checkpoint['zh_word2idx']
        self.en_idx2word = checkpoint['en_idx2word']
        self.zh_idx2word = checkpoint['zh_idx2word']
        
        self.create_model(use_attention=True)
        self.encoder.load_state_dict(checkpoint['encoder_state_dict'])
        self.decoder.load_state_dict(checkpoint['decoder_state_dict'])