import unittest
import json
import torch
from src.dataset import StreamingTranslationDataset
from src.dataset import Vocabulary
from unit_test.base_test_case import BaseTestCase


class TestStreamingTranslationDataset(BaseTestCase):
    def setUp(self):
        self.test_file = "test_data.json"
        data = [
            {"src": "Hello", "tgt": "你好"},
            {"src": "How are you?", "tgt": "你好吗？"},
            {"src": "What's your name?", "tgt": "你的名字是什么？"}
        ]
        with open(self.test_file, "w", encoding="utf-8") as f:
            json.dump(data, f, ensure_ascii=False)

        # 创建词汇表
        self.src_vocab = Vocabulary()
        self.tgt_vocab = Vocabulary()

        # 构建词汇表
        self.src_vocab.build([d["src"] for d in data])
        self.tgt_vocab.build([d["tgt"] for d in data])

        # 生成数据集对象
        self.dataset = StreamingTranslationDataset(self.test_file, self.src_vocab, self.tgt_vocab, max_len=5)

    def tearDown(self):
        self.cleanFile(self.test_file)

    def test_dataset_length(self):
        self.assertEqual(len(self.dataset), 3)

    def test_get_item(self):
        sample = self.dataset[0]  # 取第一条数据

        # 确保返回的是字典，并且包含‘src’和‘tgt’
        self.assertIsInstance(sample, dict)
        self.assertIn('src', sample)
        self.assertIn('tgt', sample)

        # 确保tensor形状符合（max_len + 2）(包含<sos>和<eos>)
        self.assertEqual(sample['src'].shape, torch.Size([7]))
        self.assertEqual(sample['tgt'].shape, torch.Size([7]))

    def test_padding(self):
        sample = self.dataset[0]
        src_tensor = sample['src']
        tgt_tensor = sample['tgt']

        # 句子应包含<sos>(1)和<eos>(2)，填充为0
        self.assertEqual(src_tensor[0].item(), 1)  # <sos>
        self.assertEqual(tgt_tensor[0].item(), 1)  # <sos>
        self.assertEqual(src_tensor[-1].item(), 0)  # 填充0
        self.assertEqual(tgt_tensor[-1].item(), 0)  # 填充0

    def test_out_of_bounds(self):
        """测试索引超出范围时返回None"""
        sample = self.dataset[100]  # 超出索引范围
        self.assertIsNone(sample)

    def test_missing_fields(self):
        # 测试数据中缺少字段
        test_data = [{"english": "hello"}]
        with open(self.test_file, "w", encoding="utf-8") as f:
            json.dump(test_data, f, ensure_ascii=False)

        dataset = StreamingTranslationDataset(self.test_file, self.src_vocab, self.tgt_vocab)
        sample = dataset[0]

        # 确保返回默认填充的tensor
        self.assertTrue(torch.equal(sample['src'], torch.LongTensor([0] * (dataset.max_len + 2))))
        self.assertTrue(torch.equal(sample['tgt'], torch.LongTensor([0] * (dataset.max_len + 2))))


if __name__ == '__main__':
    unittest.main()
