import logging
import pytest
import common
import numpy as np

# Configure logging to print to standard output
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

def test_preprocess():
    """Test text preprocessing and token-id mapping creation."""
    text = "You say goodbye, I say hello."
    corpus, token2ids, id2tokens = common.preprocess(text)
    
    # Test corpus content
    np.testing.assert_array_equal(corpus, [0, 1, 2, 3, 4, 1, 5, 6])
    
    # Test token to id mapping
    expected_token2ids = {
        'you': 0, 'say': 1, 'goodbye': 2, ',': 3,
        'i': 4, 'hello': 5, '.': 6
    }
    assert token2ids == expected_token2ids
    
    # Test id to token mapping
    expected_id2tokens = {v: k for k, v in expected_token2ids.items()}
    assert id2tokens == expected_id2tokens

def test_create_co_matrix():
    """Test co-occurrence matrix creation."""
    # Test data
    text = "You say goodbye, I say hello."
    corpus, token2ids, id2tokens = common.preprocess(text)
    logging.debug(f"corpus: {corpus}")
    logging.debug(f"vocab: {token2ids}")
    vocab_size = len(token2ids)
    window_size = 1

    # Create co-occurrence matrix
    co_matrix = common.create_co_matrix(corpus, vocab_size, window_size)

    # Expected co-occurrence matrix
    expected_co_matrix = np.array([
        [0, 1, 0, 0, 0, 0, 0],
        [1, 0, 1, 0, 1, 1, 0],
        [0, 1, 0, 1, 0, 0, 0],
        [0, 0, 1, 0, 1, 0, 0],
        [0, 1, 0, 1, 0, 0, 0],
        [0, 1, 0, 0, 0, 0, 1],
        [0, 0, 0, 0, 0, 1, 0]
    ])

    np.testing.assert_array_equal(co_matrix, expected_co_matrix)

def test_cos_similarity():
    """Test cosine similarity calculation."""
    # Test data
    text = "You say goodbye, I say hello."
    corpus, token2ids, id2tokens = common.preprocess(text)
    logging.debug(f"corpus: {corpus}")
    logging.debug(f"vocab: {token2ids}")
    vocab_size = len(token2ids)
    window_size = 1

    # Create co-occurrence matrix
    co_matrix = common.create_co_matrix(corpus, vocab_size, window_size)
    
    # Test same vector similarity
    actual = common.cos_similarity(co_matrix[0], co_matrix[0])
    np.testing.assert_allclose(actual, 1.0, rtol=1e-6)
    
    # Test different vectors
    actual = common.cos_similarity(co_matrix[0], co_matrix[1])
    np.testing.assert_array_less(-1e-10, actual)  # 允许等于0
    np.testing.assert_array_less(actual, 1 + 1e-10)  # 允许等于1

def test_ppmi():
    """Test PPMI matrix calculation."""
    # Test data
    text = "You say goodbye, I say hello."
    corpus, token2ids, id2tokens = common.preprocess(text)
    logging.debug(f"corpus: {corpus}")
    logging.debug(f"vocab: {token2ids}")
    vocab_size = len(token2ids)
    window_size = 1

    # Create co-occurrence matrix and calculate PPMI
    co_matrix = common.create_co_matrix(corpus, vocab_size, window_size)
    ppmi_matrix = common.ppmi(co_matrix, eps=1e-8)

    # Test PPMI properties
    np.testing.assert_array_equal(ppmi_matrix.shape, co_matrix.shape)
    np.testing.assert_array_less(-1e-10, ppmi_matrix)
    
    # Test that PPMI values are meaningful
    # Words that never co-occur should have PPMI = 0
    word_you = token2ids['you']
    word_hello = token2ids['hello']
    np.testing.assert_allclose(ppmi_matrix[word_you, word_hello], 0, atol=1e-10)
    
    # Words that co-occur should have PPMI > 0
    word_say = token2ids['say']
    word_hello = token2ids['hello']
    np.testing.assert_array_less(0, ppmi_matrix[word_say, word_hello])
    
    # Test symmetry
    np.testing.assert_allclose(ppmi_matrix, ppmi_matrix.T)

def test_create_context_target():
    """Test context-target pair creation."""
    # Test data
    text = "You say goodbye, I say hello."
    corpus, token2ids, id2tokens = common.preprocess(text)
    logging.debug(f"corpus: {corpus}")
    logging.debug(f"vocab: {token2ids}")

    window_size = 1

    contexts, target = common.create_context_target(corpus, window_size)
    
    # Expected results
    expected_contexts = [[0, 2], [1, 3], [2, 4], [3, 1], [4, 5], [1, 6]]
    expected_target = [1, 2, 3, 4, 1, 5]
    
    np.testing.assert_array_equal(contexts, expected_contexts)
    np.testing.assert_array_equal(target, expected_target)

def test_convert_one_hot():
    """Test one-hot encoding conversion."""
    # Test 1D corpus
    one_dim_corpus = np.array([0, 1, 2, 3, 4, 1, 5])
    vocab_size = 7
    one_hot = common.convert_one_hot(one_dim_corpus, vocab_size)
    
    # Expected 1D one-hot encoding
    expected_one_hot = np.array([
        [1, 0, 0, 0, 0, 0, 0],
        [0, 1, 0, 0, 0, 0, 0],
        [0, 0, 1, 0, 0, 0, 0],
        [0, 0, 0, 1, 0, 0, 0],
        [0, 0, 0, 0, 1, 0, 0],
        [0, 1, 0, 0, 0, 0, 0],
        [0, 0, 0, 0, 0, 1, 0]
    ])
    
    np.testing.assert_array_equal(one_hot.shape, (7, 7))
    np.testing.assert_array_equal(one_hot, expected_one_hot)

    # Test 2D corpus
    two_dim_corpus = np.array([[0, 1, 2], [3, 4, 1], [5, 6, 4]])
    one_hot = common.convert_one_hot(two_dim_corpus, vocab_size)
    
    # Expected 2D one-hot encoding
    expected_one_hot = np.array([
        [[1, 0, 0, 0, 0, 0, 0],
         [0, 1, 0, 0, 0, 0, 0],
         [0, 0, 1, 0, 0, 0, 0]],
        [[0, 0, 0, 1, 0, 0, 0],
         [0, 0, 0, 0, 1, 0, 0],
         [0, 1, 0, 0, 0, 0, 0]],
        [[0, 0, 0, 0, 0, 1, 0],
         [0, 0, 0, 0, 0, 0, 1],
         [0, 0, 0, 0, 1, 0, 0]]
    ])
    
    np.testing.assert_array_equal(one_hot.shape, (3, 3, 7))
    np.testing.assert_array_equal(one_hot, expected_one_hot)

def test_convert_one_hot_invalid_input():
    """Test one-hot encoding with invalid input."""
    invalid_corpus = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
    vocab_size = 10
    
    with pytest.raises(ValueError, match="corpus must be 1D or 2D array"):
        common.convert_one_hot(invalid_corpus, vocab_size)