import torch
import torch.nn as nn
import numpy as np

# 定义词汇表大小和特殊标记
src_vocab_size = 10000
trg_vocab_size = 8000
BOS_TOKEN_IDX = 1  # 在你的Transformer中padding_idx=1，所以BOS用其他值
EOS_TOKEN_IDX = 2
PAD_TOKEN_IDX = 0

def create_sample_data(num_samples=1000, max_length=20):
    src_data = []
    trg_data = []

    for _ in range(num_samples):
        # 随机生成源序列和目标序列长度(5-15个tokens)
        src_len = np.random.randint(5, 16)
        trg_len = np.random.randint(5, 16)

        # 生成源序列 [BOS, tokens..., EOS]
        src_seq = [BOS_TOKEN_IDX] + \
                  list(np.random.randint(3, src_vocab_size, src_len-2)) + \
                  [EOS_TOKEN_IDX]

        # 生成目标序列 [BOS, tokens..., EOS]
        trg_seq = [BOS_TOKEN_IDX] + \
                  list(np.random.randint(3, trg_vocab_size, trg_len-2)) + \
                  [EOS_TOKEN_IDX]

        # 填充到固定长度
        src_seq += [PAD_TOKEN_IDX] * (max_length - len(src_seq))
        trg_seq += [PAD_TOKEN_IDX] * (max_length - len(trg_seq))

        src_data.append(src_seq)
        trg_data.append(trg_seq)

    return src_data, trg_data


'''
修改数据加载代码以使用真实数据
'''
# 如果需要使用Hugging Face的tokenizer，可以这样实现：
def create_real_data_with_hf_tokenizer(num_samples, max_length):
    try:
        from transformers import AutoTokenizer
        from datasets import load_dataset

        # 使用预训练的tokenizer（例如BERT tokenizer）
        # 注意：这需要安装transformers库: pip install transformers
        tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")

        # 加载数据集
        dataset = load_dataset("opus_books", "en-fr", split='train')

        src_data = []
        trg_data = []

        for i, example in enumerate(dataset):
            if i >= num_samples:
                break

            # 获取文本内容
            src_text = example['translation']['en']
            trg_text = example['translation']['fr']

            # Tokenization
            src_tokens = tokenizer.encode(src_text, add_special_tokens=False)[:max_length-2]
            trg_tokens = tokenizer.encode(trg_text, add_special_tokens=False)[:max_length-2]

            # 添加特殊标记
            src_seq = [tokenizer.cls_token_id or 1] + src_tokens + [tokenizer.sep_token_id or 2]
            trg_seq = [1] + trg_tokens + [2]  # 使用默认的BOS/EOS ID

            # 填充
            src_seq += [tokenizer.pad_token_id or 0] * (max_length - len(src_seq))
            trg_seq += [0] * (max_length - len(trg_seq))

            src_data.append(src_seq)
            trg_data.append(trg_seq)

        return src_data, trg_data
    except Exception as e:
        print(f"无法加载真实数据集或tokenizer，使用示例数据: {e}")
        return create_sample_data(num_samples, max_length)

# 生成示例训练数据
src_train_data, trg_train_data = create_real_data_with_hf_tokenizer(num_samples=10000, max_length=20)


# 示例数据加载器
from torch.utils.data import DataLoader, Dataset

class TranslationDataset(Dataset):
    def __init__(self, src_data, trg_data):
        self.src_data = src_data
        self.trg_data = trg_data

    def __len__(self):
        return len(self.src_data)

    def __getitem__(self, idx):
        return {
            'src': torch.tensor(self.src_data[idx], dtype=torch.long),
            'trg': torch.tensor(self.trg_data[idx], dtype=torch.long)
        }

# 创建数据加载器
train_dataset = TranslationDataset(src_train_data, trg_train_data)
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)


"""
训练
"""
from Transformer import Transformer
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
src_vocab_size = tokenizer.vocab_size
trg_vocab_size = tokenizer.vocab_size
# 设置设备
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# 初始化模型
model = Transformer(
    src_pad_ix=0,
    trg_pad_ix=0,
    enc_voc_size=src_vocab_size,
    dec_voc_size=trg_vocab_size,
    d_model=512,
    max_len=512,
    n_heads=8,
    ffn_hidden=2048,
    n_layers=6,
    drop_prod=0.1,
    device=device
).to(device)

# 使用交叉熵损失，忽略填充位置
criterion = nn.CrossEntropyLoss(ignore_index=0)
optimizer = torch.optim.Adam(model.parameters(), lr=0.0001, betas=(0.9, 0.98), eps=1e-9)

def train_epoch(model, data_loader, optimizer, criterion, device):
    model.train()
    total_loss = 0

    for batch_idx, batch in enumerate(data_loader):
        src = batch['src'].to(device)
        trg = batch['trg'].to(device)

        # 目标序列需要移位一位作为输入
        trg_input = trg[:, :-1]
        targets = trg[:, 1:]

        # 前向传播
        logits = model(src, trg_input)

        # 计算损失
        loss = criterion(logits.reshape(-1, logits.shape[-1]), targets.reshape(-1))

        # 反向传播
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        total_loss += loss.item()

        if batch_idx % 100 == 0:
            print(f"Batch {batch_idx}, Loss: {loss.item():.4f}")

    return total_loss / len(data_loader)

# 初始化最佳损失值
best_loss = float('inf')

# 保存检查点函数
def save_checkpoint(model, optimizer, epoch, loss, filepath):
    checkpoint = {
        'epoch': epoch,
        'model_state_dict': model.state_dict(),
        'optimizer_state_dict': optimizer.state_dict(),
        'loss': loss,
        'src_vocab_size': src_vocab_size,
        'trg_vocab_size': trg_vocab_size
    }
    torch.save(checkpoint, filepath)
    print(f"Model saved to {filepath}")

# 训练多个epoch
num_epochs = 80
for epoch in range(num_epochs):
    avg_loss = train_epoch(model, train_loader, optimizer, criterion, device)
    print(f"Epoch [{epoch+1}/{num_epochs}], Average Loss: {avg_loss:.4f}")

     # 保存最佳模型
    if avg_loss < best_loss:
        best_loss = avg_loss
        save_checkpoint(
            model,
            optimizer,
            epoch,
            avg_loss,
            'transformer_best_model.pth'
        )
        print(f"New best model saved with loss: {best_loss:.4f}")