# 导入必要的库
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import string
from tqdm import tqdm
import warnings

warnings.filterwarnings("ignore")

# 设置随机种子
torch.manual_seed(42)
np.random.seed(42)
print("torch version:", torch.__version__)


# 读取文件内容并返回文本
def read_file(filename):
    with open(filename, "r") as f:
        return f.read()


# 创建字符到索引的映射
def create_char_to_idx_mapping(text):
    unique_chars = sorted(set(text))
    char_to_idx = {char: idx for idx, char in enumerate(unique_chars)}
    idx_to_char = {idx: char for char, idx in char_to_idx.items()}
    return char_to_idx, idx_to_char


# 将文本转换为字符索引
def text_to_indices(text, char_to_idx):
    return [char_to_idx[char] for char in text]


# 加载 enwik8 数据，减少数据量
train_text = read_file("data/enwik8/train.txt")[:1000000]  # 使用前 100 万字符
valid_text = read_file("data/enwik8/valid.txt")[:50000]  # 使用前 5 万字符
test_text = read_file("data/enwik8/test.txt")[:5000]  # 使用前 5000 字符

# 创建字符到索引的映射
char_to_idx, idx_to_char = create_char_to_idx_mapping(train_text)

# 将文本转换为索引
train_indices = text_to_indices(train_text, char_to_idx)
valid_indices = text_to_indices(valid_text, char_to_idx)
test_indices = text_to_indices(test_text, char_to_idx)

print("train indices length:", len(train_indices))


# 定义数据集类
class TextDataset(Dataset):
    def __init__(self, data, seq_length):
        self.data = data
        self.seq_length = seq_length

    def __len__(self):
        return len(self.data) // self.seq_length

    def __getitem__(self, idx):
        start_idx = idx * self.seq_length
        end_idx = (idx + 1) * self.seq_length
        input_seq = torch.tensor(self.data[start_idx:end_idx])
        target_seq = torch.tensor(self.data[start_idx + 1 : end_idx + 1])
        return input_seq, target_seq


# 定义 Infini-Transformer 模型
class InfiniTransformer(nn.Module):
    def __init__(
        self, vocab_size, d_model=64, n_heads=4, n_layers=3, d_ff=256, seq_length=512
    ):
        super(InfiniTransformer, self).__init__()

        self.d_model = d_model
        self.seq_length = seq_length

        # 嵌入层
        self.embedding = nn.Embedding(vocab_size, d_model)

        # Transformer 编码器层
        self.transformer_layers = nn.ModuleList(
            [
                nn.TransformerEncoderLayer(d_model, n_heads, d_ff)
                for _ in range(n_layers)
            ]
        )

        # 输出层
        self.fc_out = nn.Linear(d_model, vocab_size)

    def forward(self, x):
        # 嵌入输入
        x = self.embedding(x) * np.sqrt(self.d_model)

        # 传递给每一层 transformer
        for layer in self.transformer_layers:
            x = layer(x)

        # 输出预测
        x = self.fc_out(x)
        return x


# 定义训练函数
def train(model, train_loader, criterion, optimizer, device):
    model.train()
    total_loss = 0.0

    # 使用 tqdm 为训练过程添加进度条
    for input_seq, target_seq in tqdm(train_loader, desc="Training"):
        input_seq, target_seq = input_seq.to(device), target_seq.to(device)

        optimizer.zero_grad()

        output = model(input_seq)
        loss = criterion(output.view(-1, output.size(-1)), target_seq.view(-1))

        loss.backward()
        optimizer.step()

        total_loss += loss.item()

    return total_loss / len(train_loader)


# 定义评估函数
def evaluate(model, valid_loader, criterion, device):
    model.eval()
    total_loss = 0.0

    # 使用 tqdm 为评估过程添加进度条
    with torch.no_grad():
        for input_seq, target_seq in tqdm(valid_loader, desc="Evaluating"):
            input_seq, target_seq = input_seq.to(device), target_seq.to(device)

            output = model(input_seq)

            loss = criterion(output.view(-1, output.size(-1)), target_seq.view(-1))

            total_loss += loss.item()

    return total_loss / len(valid_loader)


# 定义超参数
batch_size = 16  # 减小批次大小
seq_length = 512  # 减小序列长度
epochs = 5  # 减少训练轮次
learning_rate = 1e-4
d_model = 64  # 减小模型的维度
n_heads = 4
n_layers = 3
d_ff = 256
print("batch_size:", batch_size)

# 创建数据集和数据加载器
train_dataset = TextDataset(train_indices, seq_length)
valid_dataset = TextDataset(valid_indices, seq_length)
test_dataset = TextDataset(test_indices, seq_length)

train_loader = DataLoader(
    train_dataset, batch_size=batch_size, shuffle=True, num_workers=0, pin_memory=False
)
valid_loader = DataLoader(
    valid_dataset, batch_size=batch_size, shuffle=False, num_workers=0, pin_memory=False
)
test_loader = DataLoader(
    test_dataset, batch_size=batch_size, shuffle=False, num_workers=0, pin_memory=False
)

print("train_loader length:", len(train_loader))
print("valid_loader length:", len(valid_loader))
print("test_loader length:", len(test_loader))

# 创建模型并将其移动到 CPU
vocab_size = len(char_to_idx)
device = torch.device("cpu")  # 强制使用 CPU
model = InfiniTransformer(vocab_size, d_model, n_heads, n_layers, d_ff, seq_length).to(
    device
)
print(model)

# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)

# 训练模型
for epoch in range(epochs):
    train_loss = train(model, train_loader, criterion, optimizer, device)
    valid_loss = evaluate(model, valid_loader, criterion, device)

    print(
        f"Epoch [{epoch+1}/{epochs}], Train Loss: {train_loss:.4f}, Validation Loss: {valid_loss:.4f}"
    )

# 测试模型
test_loss = evaluate(model, test_loader, criterion, device)
print(f"Test Loss: {test_loss:.4f}")
