
import torch
import torch.nn as nn
import torch.optim as optim
import sys

sys.path.append('..')

from oformat import ConllDataset, load_conll, build_vocab

from torch.utils.data import DataLoader

# 假设我们已经读取了数据并构建了词汇表
# 示例：加载 CoNLL 文件
file_path = '../data/deppdata.txt'
sentences = load_conll(file_path)

word_to_idx, pos_to_idx, deprel_to_idx = build_vocab(sentences, basepath="../")


from omod import ChinesePosParser

# 初始化模型
vocab_size = len(word_to_idx) + 1  # +1 用于未知词
pos_size = len(pos_to_idx)+1  # +1 用于未知标签
model = ChinesePosParser(vocab_size, pos_size)

dataset = ConllDataset(sentences, word_to_idx, pos_to_idx, deprel_to_idx)
dataloader = DataLoader(dataset)
# train(model, dataloader, )
# dataloader = DataLoader(sentences, batch_size=batch_size, shuffle=True, 
#                             collate_fn=lambda x: prepare_batch(x, word2idx, label2idx))

# 训练
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

epochs = 20
for epoch in range(epochs):
    model.train()
    total_loss = 0
    for batch in dataloader:
        # 获取输入和目标
        words = batch['words'].view(-1)
        heads = batch['heads'].view(-1)
        pos = batch['pos_tags'].view(-1)
        deprels = batch['deprels'].view(-1)
        pos_preds = model(words)
        loss = nn.CrossEntropyLoss()(pos_preds, pos)
        loss.backward()
        
        optimizer.step()
        optimizer.zero_grad()
        # 累计损失
        total_loss += loss.item()
    # 打印每个 epoch 的损失
    print(f'Epoch {epoch+1}/{epochs}, Loss: {total_loss / len(dataloader)}')

# 保存模型
torch.save(model.state_dict(), '../model/pos_parser.pkl')

