import json
import collections
import torch
from torch.utils.data import Dataset, DataLoader

def getdata(en_path, ch_path):
    with open(en_path, 'r', encoding='utf-8') as f:
        en_tokensized = json.load(f)
    with open(ch_path, 'r', encoding='utf-8') as f:
        ch_tokensized = json.load(f)
    return en_tokensized, ch_tokensized

def count_corpus(tokens):
    if len(tokens) == 0 or isinstance(tokens[0], list):
        tokens = [token for line in tokens for token in line]
    return collections.Counter(tokens)

class Vocab:
    def __init__(self, tokens=None, min_freq=10, reserved_tokens=None):
        if tokens is None:
            tokens = []
        if reserved_tokens is None:
            reserved_tokens = []
        counter = count_corpus(tokens)
        self.token_freqs = sorted(counter.items(), key=lambda x: x[1], reverse=True)
        self.unk, uniq_tokens = 0, ['<unk>'] + reserved_tokens
        uniq_tokens += [token for token, freq in self.token_freqs
                        if freq >= min_freq and token not in uniq_tokens]
        self.idx_to_token, self.token_to_idx = [], dict()
        for token in uniq_tokens:
            self.idx_to_token.append(token)
            self.token_to_idx[token] = len(self.idx_to_token) - 1

    def __len__(self):
        return len(self.idx_to_token)

    def __getitem__(self, tokens):
        if not isinstance(tokens, (list, tuple)):
            return self.token_to_idx.get(tokens, self.unk)
        return [self.__getitem__(token) for token in tokens]

    def to_tokens(self, indices):
        if not isinstance(indices, (list, tuple)):
            return self.idx_to_token[indices]
        return [self.idx_to_token[idx] for idx in indices]

def truncate_pad(line, num_steps, padding_token):
    if len(line) > num_steps:
        return line[:num_steps]
    return line + [padding_token] * (num_steps - len(line))

def build_batches(source, target, source_vocab, target_vocab):
    combined = sorted(zip(source, target), key=lambda x: len(x[0]))
    sorted_source, sorted_target = zip(*combined)
    source = [source_vocab[sentence] + [source_vocab['<eos>']] for sentence in sorted_source]
    target = [target_vocab[sentence] + [target_vocab['<eos>']] for sentence in sorted_target]
    return list(zip(source, target))

def load_vocab(source, target):
    src_vocab = Vocab(source, min_freq=10, reserved_tokens=['<pad>', '<bos>', '<eos>'])
    tar_vocab = Vocab(target, min_freq=10, reserved_tokens=['<pad>', '<bos>', '<eos>'])
    return source, target, src_vocab, tar_vocab

class Seq2SeqDataset(Dataset):
    def __init__(self, source, target, source_vocab, target_vocab):
        self.batches = build_batches(source, target, source_vocab, target_vocab)
        self.source_vocab = source_vocab
        self.target_vocab = target_vocab

    def __len__(self):
        return len(self.batches)

    def __getitem__(self, idx):
        source_sentence, target_sentence = self.batches[idx]
        return source_sentence, target_sentence

def collate_fn(batch, source_vocab, target_vocab):
    source_sentences, target_sentences = zip(*batch)
    source_max_len = max(len(s) for s in source_sentences)
    target_max_len = max(len(s) for s in target_sentences)

    source_padded = [truncate_pad(s, source_max_len, source_vocab['<pad>']) for s in source_sentences]
    target_padded = [truncate_pad(s, target_max_len, target_vocab['<pad>']) for s in target_sentences]

    source_batch = torch.tensor(source_padded)
    source_valid_len = (source_batch != source_vocab['<pad>']).type(torch.float32).sum(dim=1)
    target_batch = torch.tensor(target_padded)
    target_valid_len = (target_batch != target_vocab['<pad>']).type(torch.float32).sum(dim=1)

    return source_batch, source_valid_len, target_batch, target_valid_len

def load_data(batch_size, source, target, num_workers=0, pin_memory=False):
    source, target, src_vocab, tar_vocab = load_vocab(source, target)
    dataset = Seq2SeqDataset(source, target, src_vocab, tar_vocab)
    dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=pin_memory,
                            collate_fn=lambda batch: collate_fn(batch, src_vocab, tar_vocab))
    return dataloader

def save_vocab(vocab, file_path):
    token_to_idx = vocab.token_to_idx
    idx_to_token = vocab.idx_to_token
    vocab_data = {
        'token_to_idx': token_to_idx,
        'idx_to_token': idx_to_token
    }
    with open(file_path, 'w', encoding='utf-8') as f:
        json.dump(vocab_data, f, ensure_ascii=False)

    