import re
from typing import Tuple, List, Dict
import numpy as np
import logging

def preprocess(text: str) -> Tuple[List[str], Dict[str, int], Dict[int, str]]:
    """
    Preprocess text by tokenizing and creating token-id mappings.
    
    Args:
        text: Input text to preprocess
        
    Returns:
        Tuple containing:
        - List of token ids
        - Dictionary mapping tokens to ids
        - Dictionary mapping ids to tokens
    """
    # Convert to lowercase
    text = text.lower()
    
    # Split text into tokens using whitespace and punctuation
    tokens = re.findall(r'\w+|[^\w\s]', text)
    
    # Create token to id and id to token mappings
    token_to_id = {}
    token_ids = []
    for token in tokens:
        if token not in token_to_id:
            id = len(token_to_id)
            token_to_id[token] = id
        token_ids.append(token_to_id[token])

    id_to_token = {idx: token for token, idx in token_to_id.items()}
    
    return token_ids, token_to_id, id_to_token

def create_co_matrix(corpus: List[int], vocab_size: int, window_size: int = 1) -> np.ndarray:
    """
    Create a co-occurrence matrix from a corpus of tokens.
    
    Args:
        corpus: List of token ids
        vocab_size: Size of the vocabulary
        window_size: Size of the window for co-occurrence
        
    Returns:
        Co-occurrence matrix
    """
    corpus_size = len(corpus)
    co_matrix = np.zeros((vocab_size, vocab_size), dtype=np.int32)

    for idx, word_id in enumerate(corpus):
        for i in range(1, window_size + 1):
            left_idx = idx - i
            right_idx = idx + i

            if left_idx >= 0:
                left_word_id = corpus[left_idx]
                logging.debug(f"{word_id} and {left_word_id} coexist")
                co_matrix[word_id, left_word_id] += 1
            
            if right_idx < corpus_size:
                right_word_id = corpus[right_idx]
                logging.debug(f"{word_id} and {right_word_id} coexist")
                co_matrix[word_id, right_word_id] += 1

    return co_matrix

def cos_similarity(x: np.ndarray, y: np.ndarray, eps: float = 1e-8) -> float:
    """
    Calculate the cosine similarity between two vectors.
    
    Args:
        x: First vector
        y: Second vector
    
    Returns:
        Cosine similarity
    """
    nx = np.sqrt(np.sum(x**2)) + eps
    ny = np.sqrt(np.sum(y**2)) + eps
    return np.dot(x, y) / (nx * ny)

def ppmi(co_matrix: np.ndarray, eps: float = 1e-8) -> np.ndarray:
    """
    计算PPMI (Positive Pointwise Mutual Information)矩阵。
    
    Args:
        co_matrix (np.ndarray): 共现矩阵
        eps (float, optional): 防止除零的小数值。默认为1e-8
    
    Returns:
        np.ndarray: PPMI矩阵,其中每个元素为max(0, PMI)
    """
    pmi = np.zeros_like(co_matrix, dtype=np.float32)
    token_freq = np.sum(co_matrix, axis=0)
    total_freq = np.sum(co_matrix)

    for i in range(0, pmi.shape[0]):
        for j in range(0, pmi.shape[1]):
            numerator = co_matrix[i, j] * total_freq + eps
            denominator = token_freq[i] * token_freq[j] + eps
            v = np.log2(numerator / denominator)
            pmi[i, j] = max(0, v)

    return pmi

def create_context_target(corpus: List[int], window_size: int = 1) -> Tuple[List[List[int]], List[int]]:
    """
    创建上下文-目标对。
    
    Args:
        corpus: 包含整数编码的语料库
        window_size: 上下文窗口的大小
    
    Returns:
        Tuple[List[List[int]], List[int]]: 上下文列表和目标列表，
        每个上下文是一个包含左右邻居词的列表
    """
    target = []
    contexts = []

    for i in range(window_size, len(corpus)-window_size):
        target.append(corpus[i])
        contexts.append([corpus[i-window_size], corpus[i+window_size]])

    return contexts, target

def convert_one_hot(corpus: List[int], vocab_size: int) -> np.ndarray:
    """
    将语料库转换为one-hot编码。
    
    Args:
        corpus: 包含整数编码的语料库
        vocab_size: 词汇表的大小
    
    Returns:
        np.ndarray: one-hot编码的矩阵
    """
    N = corpus.shape[0]

    if corpus.ndim == 1:
        one_hot = np.zeros((N, vocab_size), dtype=np.int32)
        for idx, word_id in enumerate(corpus):
            one_hot[idx, word_id] = 1

        return one_hot
    
    elif corpus.ndim == 2:
        C = corpus.shape[1]
        one_hot = np.zeros((N, C, vocab_size), dtype=np.int32)
        for idx, word_ids in enumerate(corpus):
            for t in range(C):
                one_hot[idx, t, word_ids[t]] = 1

        return one_hot
    
    else:
        raise ValueError("corpus must be 1D or 2D array")
