import tiktoken
import torch
from torch.utils.data import Dataset
import pandas as pd  # 导入 pandas 库，并使用别名 pd
from torch.utils.data import DataLoader
from gpt2_download import download_and_load_gpt2
from gpt2_tools import GPTModel, load_weights_into_gpt
import time
import matplotlib.pyplot as plt

tokenizer = tiktoken.get_encoding("gpt2")
print(tokenizer.encode("<|endoftext|>", allowed_special={"<|endoftext|>"}))


# 定义一个名为 SpamDataset 的类，继承自 Dataset 类
class SpamDataset(Dataset):
    # 初始化方法，接收 CSV 文件路径、标记器、最大长度和填充标记 ID 作为参数
    def __init__(self, csv_file, tokenizer, max_length=None, pad_token_id=50256):
        # 读取 CSV 文件数据并存入 self.data
        self.data = pd.read_csv(csv_file)
        # 对文本进行预标记化，将每个文本用标记器编码后存入 self.encoded_texts 列表
        self.encoded_texts = [tokenizer.encode(text) for text in self.data["Text"]]
        # 如果没有传入最大长度，则计算数据集中最长编码序列的长度并赋值给 self.max_length
        if max_length is None:
            self.max_length = self._longest_encoded_length()
        else:
            # 否则使用传入的最大长度
            self.max_length = max_length
        # 如果序列长度超过最大长度，则截断
        self.encoded_texts = [encoded_text[:self.max_length] if len(encoded_text) > self.max_length else encoded_text for encoded_text in self.encoded_texts]
        # 对长度不足最大长度的序列进行填充
        self.encoded_texts = [encoded_text + [pad_token_id] * (self.max_length - len(encoded_text)) for encoded_text in self.encoded_texts]

        # 将字符串标签映射到整数（例如 "spam" 对应 1，"ham" 对应 0）
        self.label_map = {"spam": 1, "ham": 0}
        self.data["Label"] = self.data["Label"].map(self.label_map)

    # 根据索引获取数据集中的一个样本，返回编码后的文本和对应的标签
    def __getitem__(self, index):
        encoded = self.encoded_texts[index]
        label = self.data.iloc[index]["Label"]
        return (torch.tensor(encoded, dtype=torch.long), torch.tensor(label, dtype=torch.long))

    # 返回数据集的长度
    def __len__(self):
        return len(self.data)

    # 计算数据集中最长编码序列的长度的私有方法
    def _longest_encoded_length(self):
        max_length = 0
        for encoded_text in self.encoded_texts:
            encoded_length = len(encoded_text)
            if encoded_length > max_length:
                max_length = encoded_length
        return max_length

train_dataset = SpamDataset(
    csv_file="train.csv",
    max_length=None,
    tokenizer=tokenizer
)

print("train_dataset.length: ", train_dataset.max_length)

val_dataset = SpamDataset(
    csv_file="validation.csv",
    max_length=train_dataset.max_length,
    tokenizer=tokenizer
)

test_dataset = SpamDataset(
    csv_file="test.csv",
    max_length=train_dataset.max_length,
    tokenizer=tokenizer
)

# 设置确保与大多数计算机兼容
num_workers = 0
batch_size = 8
torch.manual_seed(123)
# 创建训练集数据加载器
train_loader = DataLoader(
    dataset=train_dataset,  # 使用训练数据集
    batch_size=batch_size,  # 批次大小为 8
    shuffle=True,  # 在每个 epoch 中打乱数据
    num_workers=num_workers,  # 使用 0 个工作进程
    drop_last=True  # 如果最后一个批次的大小小于 batch_size，则丢弃它
)
# 创建验证集数据加载器
val_loader = DataLoader(
    dataset=val_dataset,
    batch_size=batch_size,
    num_workers=num_workers,
    drop_last=False  # 不丢弃最后一个批次，即使其大小小于 batch_size
)
# 创建测试集数据加载器
test_loader = DataLoader(
    dataset=test_dataset,
    batch_size=batch_size,
    num_workers=num_workers,
    drop_last=False
)

# 遍历训练集数据加载器中的一个批次，仅用于演示目的
for input_batch, target_batch in train_loader:
    pass
print("Input batch dimensions: ", input_batch.shape)
print("Label batch dimensions", target_batch.shape)
'''
 Input batch dimensions: torch.Size([8, 120])
 Label batch dimensions torch.Size([8])
'''

# 打印训练集、验证集和测试集的批次数量
print(f"{len(train_loader)} training batches")
print(f"{len(val_loader)} validation batches")
print(f"{len(test_loader)} test batches")
'''
 130 training batches
 19 validation batches
 38 test batche
'''

CHOOSE_MODEL ="gpt2-small (124M)"
INPUT_PROMPT="Every effort moves"

BASE_CONFIG={
    "vocab_size": 50257, "drop_rate": 0.0, "qkv_bias": True, "context_length": 1024
}
model_configs={
    "gpt2-small (124M)": {"emb_dim": 768, "n_layers": 12, "n_heads":12},
    "gpt2-medium (355M)": {"emb_dim": 1024, "n_layers": 24, "n_heads": 16},
    "gpt2-large (774M)": {"emb_dim": 1280, "n_layers": 36, "n_heads": 20},
    "gpt2-x1 (1558M)":{"emb_dim": 1600, "n_layers":48, "n_heads": 25}
}
BASE_CONFIG.update(model_configs[CHOOSE_MODEL])

model_size = CHOOSE_MODEL.split(" ")[-1].lstrip("(").rstrip(")")
settings, params = download_and_load_gpt2(model_size=model_size, models_dir="gpt2")

model = GPTModel(BASE_CONFIG)
load_weights_into_gpt(model, params)
model.eval()

# text_1 = "Every effort moves you"
# token_ids = generate_text_simple(
#     model=model,
#     idx=text_to_token_ids(text_1, tokenizer),
#     max_new_tokens=15,
#     context_size=BASE_CONFIG["context_length"]
# )
# print(token_ids_to_text(token_ids, tokenizer))
#
# text_2=(
#     "Is the following text 'spam'? Answer with 'yes' or 'no':"
#     " 'You are a winner you have been specially"
#     " selected to receive $1000 cash or a $2000 award.'"
# )
# token_ids = generate_text_simple(
#     model=model,
#     idx=text_to_token_ids(text_2, tokenizer),
#     max_new_tokens=23,
#     context_size=BASE_CONFIG["context_length"]
# )
# print(token_ids_to_text(token_ids, tokenizer))

for param in model.parameters():
    param.requires_grad = False

torch.manual_seed(123)
num_classes = 2
model.out_head = torch.nn.Linear(in_features=BASE_CONFIG["emb_dim"], out_features=num_classes)

for param in model.trf_blocks[-1].parameters():
    param.requires_grad = True

for param in model.final_norm.parameters():
    param.requires_grad = True

def calc_accuracy_loader(data_loader, model, device, num_batches=None):
    # 将模型设置为评估模式
    model.eval()
    correct_predictions, num_examples = 0, 0
    # 如果没有指定批次数量，则默认为数据加载器的长度
    if num_batches is None:
        num_batches = len(data_loader)
    else:
        # 遍历数据加载器中的批次
        for i, (input_batch, target_batch) in enumerate(data_loader):
            num_batches = min(num_batches, len(data_loader))
            if i < num_batches:
                # 将输入和目标批次移动到指定设备
                input_batch = input_batch.to(device)
                target_batch = target_batch.to(device)
                # 获取模型输出的最后一个输出标记的对数几率
                with torch.no_grad():
                    logits = model(input_batch)[:,-1,:]
                # 获取预测的标签
                predicted_labels = torch.argmax(logits, dim=-1)
                # 计算正确预测的数量和总样本数量
                num_examples += predicted_labels.shape[0]
                correct_predictions += (predicted_labels == target_batch).sum().item()
            else:
                break
    # 返回准确率
    return correct_predictions / num_examples

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
torch.manual_seed(123)

train_accuracy = calc_accuracy_loader(train_loader, model, device, num_batches=10)
val_accuracy = calc_accuracy_loader(val_loader, model, device, num_batches=10)
test_accuracy = calc_accuracy_loader(test_loader, model, device, num_batches=10)

print(f"Training accuracy: {train_accuracy*100:.2f}")
print(f"Validation accuracy: {val_accuracy*100:.2f}")
print(f"Test accuracy: {test_accuracy*100:.2f}%")

'''
 Training accuracy: 46.25%
 Validation accuracy: 45.00%
 Test accuracy: 48.75%
'''

def calc_loss_batch(input_batch, target_batch, model, device):
    input_batch = input_batch.to(device)
    target_batch = target_batch.to(device)
    logits = model(input_batch)[:,-1,:]
    loss = torch.nn.functional.cross_entropy(logits, target_batch)
    return loss

def calc_loss_loader(data_loader, model, device, num_batches=None):
    # 初始化总损失为 0
    total_loss = 0.
    # 如果数据加载器长度为 0，则返回 NaN
    if len(data_loader) == 0:
        return float("nan")
    elif num_batches is None:
        # 如果没有指定批次数量，则默认为数据加载器的长度
        num_batches = len(data_loader)
    else:
        # 确保批次数量不超过数据加载器中的批次数量
        num_batches = min(num_batches, len(data_loader))
        for i, (input_batch, target_batch) in enumerate(data_loader):
            if i < num_batches:
                # 计算当前批次的损失
                loss = calc_loss_batch(input_batch, target_batch, model, device)
                total_loss += loss.item()
            else:
                break
    # 返回平均损失
    return total_loss / num_batches

# 关闭梯度跟踪，因为此时还未进行训练，提高效率
with torch.no_grad():
    train_loss = calc_loss_loader(train_loader, model, device, num_batches=5)
    print(f"Training loss: {train_loss:.3f}")
    val_loss = calc_loss_loader(val_loader, model, device, num_batches=5)
    print(f"Validation loss: {val_loss:.3f}")
    test_loss = calc_loss_loader(test_loader, model, device, num_batches=5)
    print(f"Test loss: {test_loss:.3f}")

'''
 Training loss: 2.453
 Validation loss: 2.583
 Test loss: 2.322
'''


def train_classifier_simple(
        model, train_loader, val_loader, optimizer, device,
        num_epochs, eval_freq, eval_iter
):
    # 初始化用于记录损失和示例数量的列表
    train_losses, val_losses, train_accs, val_accs = [], [], [], []
    examples_seen, global_step = 0, -1

    # 主训练循环
    for epoch in range(num_epochs):
        model.train()  # 将模型设置为训练模式

        for input_batch, target_batch in train_loader:
            optimizer.zero_grad()  # 重置上一个批次的梯度
            loss = calc_loss_batch(
                input_batch, target_batch, model, device  # 计算当前批次的损失
            )
            loss.backward()  # 计算损失的梯度
            optimizer.step()  # 使用梯度更新模型权重
            examples_seen += input_batch.shape[0]  # 更新已看到的示例数量
            global_step += 1

            # 可选的评估步骤
            if global_step % eval_freq == 0:
                train_loss, val_loss = evaluate_model(
                    model, train_loader, val_loader, device, eval_iter
                )
                train_losses.append(train_loss)
                val_losses.append(val_loss)
                print(
                    f"Ep {epoch + 1} (Step {global_step:06d}): "
                    f"Train loss {train_loss:.3f}, "
                    f"Val loss {val_loss:.3f}"
                )

        # 每个epoch计算一次准确率
        train_accuracy = calc_accuracy_loader(
            train_loader, model, device, num_batches=eval_iter
        )
        val_accuracy = calc_accuracy_loader(
            val_loader, model, device, num_batches=eval_iter
        )

        print(f"Training accuracy: {train_accuracy * 100:.2f}% ", end="")
        print(f"Validation accuracy: {val_accuracy * 100:.2f}%")
        train_accs.append(train_accuracy)
        val_accs.append(val_accuracy)

    return train_losses, val_losses, train_accs, val_accs, examples_seen


def evaluate_model(model, train_loader, val_loader, device, eval_iter):
    """
    评估模型函数。

    参数：
    - model：待评估的模型。
    - train_loader：训练数据加载器。
    - val_loader：验证数据加载器。
    - device：设备（如 CPU 或 GPU）。
    - eval_iter：评估迭代次数。

    返回：
    - train_loss：训练损失。
    - val_loss：验证损失。
    """
    model.eval()
    with torch.no_grad():
        train_loss = calc_loss_loader(train_loader, model, device, num_batches=eval_iter)
        val_loss = calc_loss_loader(val_loader, model, device, num_batches=eval_iter)
        model.train()
    return train_loss, val_loss



optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5, weight_decay=0.1)
start_time = time.time()
torch.manual_seed(123)
num_epochs = 5
train_losses, val_losses, train_accs, val_accs, examples_seen = train_classifier_simple(
    model, train_loader, val_loader, optimizer, device,
    num_epochs=num_epochs, eval_freq=50, eval_iter=5
)
end_time = time.time()
execution_time_minutes = (end_time - start_time) / 60
print(f"Training completed in {execution_time_minutes:.2f} minutes.")

def plot_values(
    epochs_seen, examples_seen, train_values, val_values,
    label="loss"
):
    # 创建图形和轴对象，设置图形大小
    fig, ax1 = plt.subplots(figsize=(5, 3))

    # 绘制训练和验证的损失或准确率随epoch变化的曲线
    ax1.plot(epochs_seen, train_values, label=f"Training {label}")
    ax1.plot(
        epochs_seen, val_values, linestyle="-.",
        label=f"Validation {label}"
    )
    ax1.set_xlabel("Epochs")  # 设置x轴标签为"Epochs"
    ax1.set_ylabel(label.capitalize())  # 设置y轴标签为参数label的首字母大写形式
    ax1.legend()  # 显示图例

    # 创建一个次坐标轴，用于显示"examples_seen"为x轴
    ax2 = ax1.twiny()
    ax2.plot(examples_seen, train_values, alpha=0)  # 添加不可见曲线对齐刻度
    ax2.set_xlabel("Examples seen")  # 设置第二个x轴标签为"Examples seen"

    fig.tight_layout()  # 调整布局以留出显示空间
    plt.savefig(f"{label}-plot.pdf")  # 将图形保存为PDF文件
    plt.show()  # 显示图形


epochs_tensor = torch.linspace(0, num_epochs, len(train_losses))
examples_seen_tensor = torch.linspace(0, examples_seen, len(train_losses))
plot_values(epochs_tensor, examples_seen_tensor, train_losses, val_losses)

epochs_tensor = torch.linspace(0, num_epochs, len(train_accs))
examples_seen_tensor = torch.linspace(0, examples_seen, len(train_accs))
plot_values(epochs_tensor, examples_seen_tensor, train_accs, val_accs, label="accuracy")

train_accuracy = calc_accuracy_loader(train_loader, model, device, num_batches=10)
val_accuracy = calc_accuracy_loader(val_loader, model, device, num_batches=10)
test_accuracy = calc_accuracy_loader(test_loader, model, device, num_batches=10)
print(f"Training accuracy: {train_accuracy*100:.2f}%")
print(f"Validation accuracy: {val_accuracy*100:.2f}%")
print(f"Test accuracy: {test_accuracy*100:.2f}%")

torch.save(model.state_dict(), "review_classifier.pth")

'''
[50256]
train_dataset.length:  120
Input batch dimensions:  torch.Size([8, 120])
Label batch dimensions torch.Size([8])
130 training batches
19 validation batches
38 test batches
Training accuracy: 53.75
Validation accuracy: 55.00
Test accuracy: 51.25%
Training loss: 2.688
Validation loss: 2.581
Test loss: 2.845
Ep 1 (Step 000000): Train loss 2.630, Val loss 2.389
Ep 1 (Step 000050): Train loss 0.667, Val loss 0.651
Ep 1 (Step 000100): Train loss 0.677, Val loss 0.544
Training accuracy: 65.00% Validation accuracy: 67.50%
Ep 2 (Step 000150): Train loss 0.557, Val loss 0.527
Ep 2 (Step 000200): Train loss 0.462, Val loss 0.432
Ep 2 (Step 000250): Train loss 0.425, Val loss 0.468
Training accuracy: 95.00% Validation accuracy: 77.50%
Ep 3 (Step 000300): Train loss 0.369, Val loss 0.421
Ep 3 (Step 000350): Train loss 0.380, Val loss 0.441
Training accuracy: 90.00% Validation accuracy: 82.50%
Ep 4 (Step 000400): Train loss 0.325, Val loss 0.451
Ep 4 (Step 000450): Train loss 0.388, Val loss 0.387
Ep 4 (Step 000500): Train loss 0.171, Val loss 0.205
Training accuracy: 87.50% Validation accuracy: 92.50%
Ep 5 (Step 000550): Train loss 0.263, Val loss 0.111
Ep 5 (Step 000600): Train loss 0.067, Val loss 0.103
Training accuracy: 97.50% Validation accuracy: 95.00%
Training completed in 0.24 minutes.
Training accuracy: 97.50%
Validation accuracy: 97.50%
Test accuracy: 93.75%
'''