
import torch
import torch.nn as nn
import torch.optim as optim

from oformat import ConllDataset, load_conll, build_vocab

from torch.utils.data import DataLoader

# 假设我们已经读取了数据并构建了词汇表
# 示例：加载 CoNLL 文件
file_path = 'data/deppdata.txt'
sentences = load_conll(file_path)
# word2idx, tag2idx = build_vocab(sentences)
# 示例：构建词汇表和标签表
word_to_idx, pos_to_idx, deprel_to_idx = build_vocab(sentences, basepath='../')
dataset = ConllDataset(sentences, word_to_idx, pos_to_idx, deprel_to_idx)
dataloader = DataLoader(dataset)

from omod import ChineseDependencyParser

# 初始化模型
vocab_size = len(word_to_idx) + 1  # +1 用于未知词
embed_dim = 3
hidden_dim = 200
num_labels = len(deprel_to_idx)+1  # +1 用于未知标签
model = ChineseDependencyParser(vocab_size, embed_dim, hidden_dim, num_labels)

def train(model, dataloader, criterion, optimizer, epochs=10):
    model.train()
    for epoch in range(epochs):
        total_loss = 0
        for batch in dataloader:
            # 获取输入和目标
            words = batch['words'].view(-1)
            heads = batch['heads'].view(-1)
            tags = batch['pos_tags'].view(-1)
            deprels = batch['deprels'].view(-1)

            # 清零梯度
            optimizer.zero_grad()
            # 前向传播
            arc_head_scores, arc_dep_scores, rel_scores = model(words)
            # print(arc_head_scores, arc_dep_scores, rel_scores, rel_scores.squeeze())
            print(criterion(rel_scores, deprels))
            # 计算损失
            loss = criterion(arc_head_scores.squeeze(), heads.to(torch.float)) + \
                   criterion(arc_dep_scores.squeeze(), tags.to(torch.float)) + \
                   criterion(rel_scores, deprels)
            # 反向传播和优化
            loss.backward()
            optimizer.step()
            # 累计损失
            total_loss += loss.item()
        # 打印每个 epoch 的损失
        print(f'Epoch {epoch+1}/{epochs}, Loss: {total_loss / len(dataloader)}')

train(model, dataloader, nn.CrossEntropyLoss(), optim.Adam(model.parameters(), lr=0.001))

# 保存模型
torch.save(model.state_dict(), 'model/dependency_parser.pkl')

