import datetime
import os
from pathlib import Path

import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np

#1. 分词器（Tokenizer）-字符级
class CharTokenizer:
    def __init__(self,text):
        self.vocab = sorted(list(set(text)))
        self.vocab_size = len(self.vocab)
        self.char_to_idx = {char: idx for idx, char in enumerate(self.vocab)}   # 字符到索引的映射
        self.idx_to_char = {idx: char for idx, char in enumerate(self.vocab)}   # 索引到字符的映射

    def encode(self, text):
        return [self.char_to_idx[char] for char in text]  # 将文本编码为索引列表
    
    def decode(self, indices):
        return ''.join([self.idx_to_char[idx] for idx in indices])  # 将索引列表解码为文本
    
# 2. 模型组件

# 位置编码
class PositionEncoding(nn.Module):
    def __init__(self, d_model, max_len=5000):
        super().__init__()
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)  # 位置编码
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * -(np.log(10000.0) / d_model))  # 计算分母
        pe[:, 0::2] = torch.sin(position * div_term) # 偶数位置
        pe[:, 1::2] = torch.cos(position * div_term) # 奇数位置
        pe = pe.unsqueeze(0) # 增加一个维度
        self.register_buffer('pe', pe) # 注册缓冲区，避免在反向传播时更新

    def forward(self, x):
        x = x + self.pe[:, :x.size(1), :]  #将位置编码加到输入embedding上
        return x
    
# 3. Transformer模型（simplified transformer decode layer）
class SimpleDecoderLayer(nn.Module):
    def __init__(self, d_model, nhead, dim_feedforward, dropout):
        super().__init__()
        self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=True)  # 自注意力机制
        self.norm1 = nn.LayerNorm(d_model)  # 层归一化
        self.ffn = nn.Sequential(
            nn.Linear(d_model, dim_feedforward),
            nn.ReLU(),
            nn.Linear(dim_feedforward, d_model)
        ) # 前馈网络
        self.norm2 = nn.LayerNorm(d_model)
        self.dropout = nn.Dropout(dropout)

    def forward(self, tgt, tgt_mask=None):
        # tgt: [batch_size, seq_len, d_model]
        # tgt_mask: [seq_len, seq_len]-用于屏蔽未来的token
        attn_output, _ = self.self_attn(tgt, tgt, tgt, attn_mask=tgt_mask) # 自注意力机制
        tgt = tgt + self.dropout(attn_output) # 残差连接+dropout
        tgt = self.norm1(tgt) # 层归一化

        ffn_output = self.ffn(tgt) # 前馈网络
        tgt = tgt + self.dropout(ffn_output) # 残差连接+dropout
        tgt = self.norm2(tgt) # 层归一化
        return tgt
    
# 4. 模型(SimpleTransformerLM)
class SimpleTransforerLM(nn.Module):
    def __init__(self, vocab_size, d_model, nhead, num_layers, dim_feedforward, dropout, max_len):
        super(SimpleTransforerLM, self).__init__()
        self.embedding = nn.Embedding(vocab_size, d_model)  # 嵌入层
        self.pos_encoder = PositionEncoding(d_model)  # 位置编码
        decoder_layer = nn.TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout, batch_first=True)  # 解码器层
        self.transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers) # 堆叠解码器层
        self.fc = nn.Linear(d_model, vocab_size) # 输出层

        self.d_model = d_model
        self.init_weights()  # 初始化权重

    # 初始化权重
    def init_weights(self):
        initrange = 0.1  # 初始化范围
        self.embedding.weight.data.uniform_(-initrange, initrange) # 嵌入层权重初始化
        self.fc.bias.data.zero_() # 输出层偏置初始化
        self.fc.weight.data.uniform_(-initrange, initrange) # 输出层权重初始化

    def forward(self, x, tgt_mask=None):
        src_emb = self.embedding(x) * np.sqrt(self.d_model)  # 嵌入
        src_emb = self.pos_encoder(src_emb) # 位置编码
        memory = torch.zeros_like(src_emb)  # Decoder-only 模型不需要Encoder，memory设为全零
        output = self.transformer_decoder(tgt=src_emb, memory=memory, tgt_mask=tgt_mask) # 解码器
        output = self.fc(output)
        return output
    
    def generate_square_mask(self, size):
        mask = torch.triu(torch.ones(size, size) * float('-inf'), diagonal=1)
        return mask
    
    def save_model(self, filepath):
        """保存模型结构和参数"""
        torch.save({
            'model_state_dict': self.state_dict(),
            'vocab_size': self.fc.out_features,
            'd_model': self.d_model,
            'nhead': self.transformer_decoder.layers[0].self_attn.num_heads,
            'num_layers': len(self.transformer_decoder.layers),
            'dim_feedforward': self.transformer_decoder.layers[0].linear1.out_features,
            'dropout': self.transformer_decoder.layers[0].dropout.p,
            'max_len': self.pos_encoder.pe.size(1)
        }, filepath)
    
    @classmethod
    def load_model(cls, filepath, device='cpu'):
        """加载模型"""
        checkpoint = torch.load(filepath, map_location=device)
        model = cls(
            vocab_size=checkpoint['vocab_size'],
            d_model=checkpoint['d_model'],
            nhead=checkpoint['nhead'],
            num_layers=checkpoint['num_layers'],
            dim_feedforward=checkpoint['dim_feedforward'],
            dropout=checkpoint['dropout'],
            max_len=checkpoint['max_len']
        )
        model.load_state_dict(checkpoint['model_state_dict'])
        model.to(device)
        return model
    
# 4. 数据集
class TextDataset:
    def __init__(self, text, tokenizer, seq_len):
        self.tokenizer = tokenizer
        self.indexed_text = tokenizer.encode(text)  # 将文本编码为索引列表
        self.seq_len = seq_len
    
    def __len__(self):
        return len(self.indexed_text) - self.seq_len # 可以创建多少个序列对
    
    def __getitem__(self, idx):
        input_indices = self.indexed_text[idx:idx + self.seq_len] # 输入序列
        target_indices = self.indexed_text[idx + 1:idx + self.seq_len + 1] # 目标序列(下一个字符)
        return torch.tensor(input_indices), torch.tensor(target_indices)  # 返回输入和目标序列的张量    
    
# 5. 训练循环（train_model）
def train_model(model, dataset, tokenizer, epochs, batch_size, seq_len, learning_rate, device):
    model.to(device)  # 将模型移动到设备上
    model.train()  # 设置模型为训练模式
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)  # 优化器
    criterion = nn.CrossEntropyLoss()  # 交叉熵损失函数
    dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, drop_last=True)  # 数据加载器

    for epoch in range(epochs):
        total_loss = 0.0
        for batch_idx, (input_seq, target_seq) in enumerate(dataloader):
            input_seq, target_seq = input_seq.to(device), target_seq.to(device)
            src_mask = model.generate_square_mask(seq_len).to(device)  # 生成掩码

            optimizer.zero_grad() # 清零梯度
            output = model(input_seq, tgt_mask=src_mask) # 前向传播

            loss = criterion(output.view(-1, model.fc.out_features), target_seq.view(-1)) # 计算损失
            loss.backward() # 反向传播
            optimizer.step() # 更新参数

            total_loss += loss.item() # 累加损失
            if batch_idx % 100 == 0:
                avg_loss = total_loss / (batch_idx + 1) # 平均损失
                print(f"Epoch [{epoch + 1}/{epochs}], Batch [{batch_idx + 1}/{len(dataloader)}], Loss: {avg_loss:.4f}")
        avg_epoch_loss = total_loss / len(dataloader) # 平均损失
        print(f"Epoch [{epoch + 1} finished, Average Loss: {avg_epoch_loss:.4f}")

# 6. 文本生成（generate_text）
def generate_text(model, tokenizer, start_text, max_len, device, temperature=1.0):
    model.to(device)  # 将模型移动到设备上
    model.eval()  # 设置模型为评估模式
    input_indices = tokenizer.encode(start_text)  # 编码起始文本
    input_tensor = torch.tensor(input_indices).unsqueeze(0).to(device)  # 转换为张量并增加批次维度

    generated_indices = input_indices.copy()  # 初始化生成索引列表

    with torch.no_grad():  # 不计算梯度
        for _ in range(max_len):
            src_mask = model.generate_square_mask(input_tensor.size(1)).to(device)
            output = model(input_tensor, tgt_mask=src_mask)  # 前向传播
            last_output = output[:, -1, :]  # 取最后一个时间步的输出
            output_probs = nn.functional.softmax(last_output / temperature, dim=-1)  # softmax计算概率
            next_token_index = torch.multinomial(output_probs, num_samples=1).item() # 从概率分布中采样下一个token

            generated_indices.append(next_token_index)  # 添加到生成索引列表
            input_tensor = torch.tensor([generated_indices]).to(device)  # 更新输入张量

    generate_text = tokenizer.decode(generated_indices)  # 解码为文本
    return generate_text  # 返回生成文本

# 7. 主程序
if __name__ == "__main__":
    # print('加载数据', datetime.datetime.now())
    # # 读取文本数据
    # book_dir = Path(r'三国演义') # 文本数据目录
    # text_data = ""  # 初始化文本数据
    # # 遍历目录下所有文本文件
    # book:Path
    # for book in book_dir.glob('*.txt'):
    #     text_data += book.read_text(encoding='utf-8')  # 读取文本数据

    # print('内容长度', len(text_data))
    # print('分词', datetime.datetime.now())
    # tokenizer = CharTokenizer(text_data)  # 初始化分词器

    # # 模型参数
    # vocab_size = tokenizer.vocab_size  # 词汇表大小
    # d_model = 128  # 嵌入维度
    # nhead = 8  # 注意力头数
    # num_layers = 8  # 解码器层数
    # dim_feedforward = 256  # 前馈网络维度
    # dropout = 0.1  # dropout率
    # seq_len = 32  # 输入序列长度
    # max_len = 5000  # 最大序列长度

    # # 训练参数
    # epochs = 10  # 训练轮数
    # batch_size = 32  # 批大小
    # learning_rate = 0.001  # 学习率
    # device = torch.device("cuda" if torch.cuda.is_available() else "cpu")  # 设备选择

    # # 初始化模型
    # model = SimpleTransforerLM(vocab_size, d_model, nhead, num_layers, dim_feedforward, dropout, max_len).to(device)  # 初始化模型

    # # 创建数据集
    # dataset = TextDataset(text_data, tokenizer, seq_len)  # 创建数据集

    # # 训练模型
    # print('训练模型', datetime.datetime.now())
    # train_model(model, dataset, tokenizer, epochs, batch_size, seq_len, learning_rate, device)  # 训练模型

    # # 保存模型
    # print('保存模型', datetime.datetime.now())
    # model_dir = Path("saved_models")
    # model_dir.mkdir(exist_ok=True)  # 创建保存模型的目录
    # model_path = model_dir / "transformer_lm.pth"
    # model.save_model(model_path)
    # print(f"模型已保存到 {model_path}")

    # # 保存tokenizer
    # tokenizer_path = model_dir / "tokenizer.pkl"
    # torch.save(tokenizer, tokenizer_path)
    # print(f"分词器已保存到 {tokenizer_path}")

    # 加载模型和tokenizer
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")  # 设备选择
    model_path = Path("saved_models/transformer_lm.pth")
    tokenizer_path = Path("saved_models/tokenizer.pkl")

    model = SimpleTransforerLM.load_model(model_path, device=device)
    tokenizer = torch.load(tokenizer_path)

    # 生成文本
    start_text = "刘备"
    generated_text = generate_text(model, tokenizer, start_text, max_len=100, device=device, temperature=0.7)
    print(generated_text)





