import sys

sys.path.append("/Users/yishanli/python/myProject/python-demo/TensorFlow/")

import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader

from llm.SimpleLLM import SimpleLLM

text = "hello world my name is liyishan hello"
#分词
tokens = text.split()
# 构建词汇表
vocab = sorted(set(tokens))
vocab_size = len(vocab)
#单词到索引的映射
word_to_idx = {word: idx for idx,word in enumerate(vocab)}
#索引到单词的映射
idx_to_word = {idx:word for idx,word in enumerate(vocab)}
# 将文本转换为索引序列
input_seq = [word_to_idx[word] for word in tokens[:-1]]
target_seq = [word_to_idx[word] for word in tokens[1:]]
# 自定义数据集类
class TextDataset(Dataset):
    def __init__(self, input_seq,target_seq):
        self.input_seq = torch.tensor(input_seq,dtype=torch.long)
        self.target_seq = torch.tensor(target_seq,dtype=torch.long)
    def __len__(self):
        return len(self.input_seq)
    def __getitem__(self,idx):
        return self.input_seq[idx],self.target_seq[idx]
# 创建数据集和数据加载器
dataset = TextDataset(input_seq, target_seq)
batch_size = 1
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False)

# 超参数设置
embedding_dim = 50
hidden_size = 100
learning_rate = 0.001
num_epochs = 100
# 初始化模型
model = SimpleLLM(vocab_size,embedding_dim,hidden_size)
# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(),lr = learning_rate)

# 训练模型
for epoch in range(num_epochs):
    total_loss = 0
    for inputs, targets in dataloader:
        # 前向传播
        outputs = model(inputs)
        # 计算损失
        loss = criterion(outputs, targets)
        # 反向传播和优化
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        total_loss += loss.item()
    if (epoch + 1) % 10 == 0:
        print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {total_loss / len(dataloader):.4f}')

torch.save(model, 'saved_model.pth')

