import torch
from torch.utils.data import Dataset

class ConllDataset(Dataset):
    def __init__(self, sentences, word_to_idx, pos_to_idx, deprel_to_idx):
        self.sentences = sentences
        self.word_to_idx = word_to_idx
        self.pos_to_idx = pos_to_idx
        self.deprel_to_idx = deprel_to_idx

    def __len__(self):
        return len(self.sentences)

    def __getitem__(self, idx):
        sentence = self.sentences[idx]
        words = [self.word_to_idx.get(word['form'], 0) for word in sentence]  # 0 表示未知词
        pos_tags = [self.pos_to_idx.get(word['upos'], 0) for word in sentence]
        heads = [word['head'] for word in sentence]
        deprels = [self.deprel_to_idx.get(word['deprel'], 0) for word in sentence]

        # word_ids = pad_sequence([torch.tensor(x[0]) for x in batch], padding_value=0)
        # pos_ids = pad_sequence([torch.tensor(x[1]) for x in batch], padding_value=0)
        # heads = pad_sequence([torch.tensor(x[2]) for x in batch], padding_value=-1)
        # deprels = pad_sequence([torch.tensor(x[3]) for x in batch], padding_value=-1)
        # return word_ids.T, pos_ids.T, heads.T, deprels.T  # [batch, seq_len]
        
        it = {
            'words': torch.tensor(words, dtype=torch.long),
            'pos_tags': torch.tensor(pos_tags, dtype=torch.long),
            'heads': torch.tensor(heads, dtype=torch.long),
            'deprels': torch.tensor(deprels, dtype=torch.long)
        } 
        # print(idx, sentence, words, it)
        return it

def load_conll(file_path):
    """
    加载 CoNLL 格式文件并解析为结构化数据。
    返回一个列表，每个元素是一个句子，句子是一个字典列表，每个字典代表一个词的信息。
    """
    sentences = []
    with open(file_path, 'r', encoding='utf-8') as f:
        sentence = []
        for line in f:
            line = line.strip()
            if not line:
                # 空行表示句子结束
                if sentence:
                    sentences.append(sentence)
                    sentence = []
            else:
                # 解析每一行
                parts = line.split('\t')
                if len(parts) != 10:
                    continue  # 跳过格式不正确的行
                word_info = {
                    'id': int(parts[0]), #词的序号。
                    'form': parts[1], #词本身。
                    'lemma': parts[2], #词的原形
                    'upos': parts[3], #通用词性标签
                    'xpos': parts[4], #语言特定的词性标签（可选）。
                    'feats': parts[5], #其他特征（可选）。
                    'head': int(parts[6]), #当前词的依存头节点（指向的词的 ID）。
                    'deprel': parts[7],#依存关系类型
                    'deps': parts[8],#其他依存信息（可选）。
                    'misc': parts[9]#其他信息（可选）。
                }
                sentence.append(word_info)
        # 添加最后一个句子
        if sentence:
            sentences.append(sentence)
    return sentences

from collections import defaultdict
import pickle as pkl

def build_vocab(sentences, basepath="./"):
    word_counts = defaultdict(int)
    tag_counts = defaultdict(int)
    rel_counts = defaultdict(int)

    for sentence in sentences:
        for step in sentence:
            # print(step)
            word_counts[step['form']] += 1
            tag_counts[step['upos']] += 1
            rel_counts[step['deprel']] += 1
    print(word_counts)
    word2idx = {word: idx + 1 for idx, word in enumerate(word_counts)}  # +2 for padding and unknown
    # print(word2idx)
    # word2idx['<PAD>'] = 0
    # word2idx['<UNK>'] = 1

    tag2idx = {tag: idx + 1 for idx, tag in enumerate(tag_counts)}

    rel2idx = {rel: idx + 1 for idx, rel in enumerate(rel_counts)}

    with open(basepath+'data/generate_pkl/depvocab.pkl', 'wb') as f:
        pkl.dump([word2idx,tag2idx,rel2idx], f)

    return word2idx, tag2idx, rel2idx