import numpy as np
from collections import defaultdict
import logging
from typing import List, Tuple

class HiddenMarkovModel:
    def __init__(self):
        self.states = None
        self.symbols = None
        self.symbol_to_index = None
        self.state_to_index = None
        self.initial_probabilities = None
        self.transition_probabilities = None
        self.emission_probabilities = None
        
    def train(self, training_data: List[List[Tuple[str, str]]], smooth_factor: float = 0.1):
        # 提取唯一的状态和符号
        self.states = sorted(set(tag for _, tag in (word_tag for sublist in training_data for word_tag in sublist)))
        self.symbols = sorted(set(word for word, _ in (word_tag for sublist in training_data for word_tag in sublist)))
        
        # 创建映射
        self.state_to_index = {state: i for i, state in enumerate(self.states)}
        self.symbol_to_index = {symbol: i for i, symbol in enumerate(self.symbols)}
        
        num_states = len(self.states)
        num_symbols = len(self.symbols)
        
        # 初始化计数器
        initial_counts = np.zeros(num_states)
        transition_counts = np.zeros((num_states, num_states))
        emission_counts = np.zeros((num_states, num_symbols))
        
        # 收集计数
        for sentence in training_data:
            # 初始状态计数
            first_state = sentence[0][1]
            initial_counts[self.state_to_index[first_state]] += 1
            
            # 转移和发射计数
            for i in range(len(sentence)):
                word, state = sentence[i]
                state_idx = self.state_to_index[state]
                word_idx = self.symbol_to_index[word]
                emission_counts[state_idx, word_idx] += 1
                
                if i < len(sentence) - 1:
                    next_state = sentence[i + 1][1]
                    next_state_idx = self.state_to_index[next_state]
                    transition_counts[state_idx, next_state_idx] += 1
        
        # 应用平滑并计算概率
        # 初始概率
        self.initial_probabilities = (initial_counts + smooth_factor) / (np.sum(initial_counts) + smooth_factor * num_states)
        
        # 转移概率
        self.transition_probabilities = np.zeros((num_states, num_states))
        for i in range(num_states):
            denominator = np.sum(transition_counts[i]) + smooth_factor * num_states
            for j in range(num_states):
                self.transition_probabilities[i, j] = (transition_counts[i, j] + smooth_factor) / denominator
        
        # 发射概率
        self.emission_probabilities = np.zeros((num_states, num_symbols))
        for i in range(num_states):
            denominator = np.sum(emission_counts[i]) + smooth_factor * num_symbols
            for j in range(num_symbols):
                self.emission_probabilities[i, j] = (emission_counts[i, j] + smooth_factor) / denominator
        
        
    def viterbi(self, observation: List[str]) -> Tuple[List[str], float]:
        # 处理空序列
        if not observation:
            return [], 0.0
        
        num_states = len(self.states)
        T = len(observation)
        
        # 初始化
        viterbi_matrix = np.zeros((num_states, T))
        backpointer = np.zeros((num_states, T), dtype=int)
        
        # 处理未知词
        def get_symbol_index(symbol):
            return self.symbol_to_index.get(symbol, len(self.symbols) - 1)
        
        # 初始化第一步
        first_symbol = observation[0]
        symbol_idx = get_symbol_index(first_symbol)
        for s in range(num_states):
            # 使用对数概率，但要处理零概率的情况
            initial_prob = max(self.initial_probabilities[s], 1e-10)
            emission_prob = max(self.emission_probabilities[s, symbol_idx], 1e-10)
            viterbi_matrix[s, 0] = np.log(initial_prob) + np.log(emission_prob)
        
        # 递归步骤
        for t in range(1, T):
            symbol_idx = get_symbol_index(observation[t])
            for s in range(num_states):
                # 处理零概率
                emission_prob = max(self.emission_probabilities[s, symbol_idx], 1e-10)
                # 计算转移概率
                transition_probs = np.maximum(self.transition_probabilities[:, s], 1e-10)
                # 计算路径概率
                probabilities = (viterbi_matrix[:, t-1] + 
                               np.log(transition_probs) + 
                               np.log(emission_prob))
                viterbi_matrix[s, t] = np.max(probabilities)
                backpointer[s, t] = np.argmax(probabilities)
        
        # 回溯
        best_path_pointer = np.argmax(viterbi_matrix[:, -1])
        best_path = [best_path_pointer]
        for t in range(T-1, 0, -1):
            best_path_pointer = backpointer[best_path_pointer, t]
            best_path.append(best_path_pointer)
        
        best_path.reverse()
        
        # 转换回状态标签
        best_path_states = [self.states[i] for i in best_path]
        
        # 计算序列的总概率（将对数概率转换回普通概率）
        final_log_prob = viterbi_matrix[best_path[-1], -1]
        sequence_probability = np.exp(final_log_prob)
        
        
        # 处理概率溢出
        if np.isnan(sequence_probability) or np.isinf(sequence_probability):
            # 如果发生溢出，返回一个归一化的概率值
            normalized_prob = final_log_prob / len(observation)
            sequence_probability = np.exp(normalized_prob)
        
        return best_path_states, sequence_probability
    
    