import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import jieba
import numpy as np

# 自定义数据集类
class TextDataset(Dataset):
    def __init__(self, texts, seq_length):
        self.texts = texts
        self.seq_length = seq_length
        self.char_to_idx = {}
        self.idx_to_char = {}
        self.build_vocab()

    def build_vocab(self):
        unique_chars = set(''.join(self.texts))
        self.char_to_idx = {char: idx + 1 for idx, char in enumerate(unique_chars)}  # 从1开始，0用于填充
        self.idx_to_char = {idx: char for char, idx in self.char_to_idx.items()}

        # 将文本转换为索引
        self.numeric_texts = []
        for text in self.texts:
            self.numeric_texts.extend([self.char_to_idx[char] for char in text])

    def __len__(self):
        return len(self.numeric_texts) - self.seq_length

    def __getitem__(self, index):
        x = self.numeric_texts[index:index + self.seq_length]
        y = self.numeric_texts[index + 1:index + self.seq_length + 1]
        return torch.tensor(x, dtype=torch.long), torch.tensor(y, dtype=torch.long)

# 定义GPT模型
class SimpleGPT(nn.Module):
    def __init__(self, vocab_size, embed_dim, num_heads, num_layers, seq_length):
        super(SimpleGPT, self).__init__()
        self.embedding = nn.Embedding(vocab_size, embed_dim)
        self.positional_encoding = nn.Parameter(torch.zeros(1, seq_length, embed_dim))
        encoder_layer = nn.TransformerEncoderLayer(d_model=embed_dim, nhead=num_heads, dim_feedforward=256)
        self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=num_layers)
        self.fc_out = nn.Linear(embed_dim, vocab_size)

    def forward(self, x):
        batch_size, seq_length = x.size()
        x = self.embedding(x) + self.positional_encoding[:, :seq_length, :]
        x = x.permute(1, 0, 2)  # 转换为 (seq_length, batch_size, embed_dim)
        x = self.transformer_encoder(x)
        x = x.permute(1, 0, 2)  # 转换回 (batch_size, seq_length, embed_dim)
        x = self.fc_out(x)
        return x

# 训练模型
def train(model, data_loader, optimizer, criterion, num_epochs=10):
    model.train()
    for epoch in range(num_epochs):
        total_loss = 0
        for x, y in data_loader:
            optimizer.zero_grad()
            output = model(x)
            loss = criterion(output.view(-1, output.size(-1)), y.view(-1))
            loss.backward()
            optimizer.step()
            total_loss += loss.item()
        print(f'Epoch {epoch + 1}, Loss: {total_loss / len(data_loader)}')

# 预测下一个词
def predict_next_word(model, tokenizer, initial_text, max_length, device):
    model.eval()
    generated_sequence = []

    initial_indices = [tokenizer[char] for char in initial_text]
    input_tensor = torch.tensor(initial_indices, dtype=torch.long).unsqueeze(0).to(device)  # 增加batch维度

    with torch.no_grad():
        for _ in range(max_length):
            output = model(input_tensor)
            next_token_logits = output[:, -1, :]  # 取最后一个时间步的输出
            next_token = torch.argmax(next_token_logits, dim=-1)  # 获取概率最高的下一个词

            generated_sequence.append(next_token.item())  # 记录生成的词
            input_tensor = torch.cat((input_tensor, next_token.unsqueeze(0)), dim=1)  # 更新输入

    return generated_sequence

# 主函数
def main():
    # 示例文本数据
    texts = [
        "我今天心情很好",
        "真是个美好的日子",
        "我希望一切都更好",
        "这部电影真的很精彩",
        "我对未来充满了期待",
        "天气变得越来越冷了",
        "我有时候会感到孤独",
        "学习新的东西总是令人兴奋",
        "我爱这个城市",
        "生活总是有惊喜"
    ]

    seq_length = 10  # 序列长度

    # 创建数据集
    dataset = TextDataset(texts, seq_length)
    data_loader = DataLoader(dataset, batch_size=2, shuffle=True)

    # 模型参数设置
    vocab_size = len(dataset.char_to_idx) + 1  # 加1是因为索引从1开始
    embed_dim = 256
    num_heads = 4
    num_layers = 4

    # 创建模型
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = SimpleGPT(vocab_size, embed_dim, num_heads, num_layers, seq_length)
    model.to(device)

    # 训练模型
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    criterion = nn.CrossEntropyLoss()
    train(model, data_loader, optimizer, criterion)

    # 预测下一个词
    initial_text = "我今天"
    predicted_indices = predict_next_word(model, dataset.char_to_idx, initial_text, max_length=5, device=device)

    # 将索引转换回字符
    generated_text = ''.join(dataset.idx_to_char[idx] for idx in predicted_indices)
    print("生成的文本：", generated_text)

if __name__ == "__main__":
    main()