import numpy as np

# 定义隐马尔科夫模型
class HMM:
    def __init__(self):
        # 隐含状态集合（例如：健康、发烧）
        self.states = ['Healthy', 'Fever']
        self.state_index = {s: i for i, s in enumerate(self.states)}

        # 观测状态集合（例如：症状）
        self.observations = ['normal', 'cold', 'dizzy']
        self.observation_index = {o: i for i, o in enumerate(self.observations)}

        # 初始概率分布 P(Health), P(Fever)
        self.pi = np.array([0.6, 0.4])

        # 状态转移矩阵 A (Healthy -> Healthy/Fever; Fever -> Healthy/Fever)
        self.A = np.array([
            [0.7, 0.3],  # Healthy -> ...
            [0.4, 0.6]   # Fever -> ...
        ])

        # 观测概率矩阵 B (P(Obs | State))
        self.B = np.array([
            [0.5, 0.4, 0.1],  # Healthy 对应 normal/cold/dizzy
            [0.1, 0.3, 0.6]   # Fever 对应 normal/cold/dizzy
        ])

    def forward(self, obs_seq):
        T = len(obs_seq)
        N = len(self.states)
        alpha = np.zeros((T, N))

        # 初始化第一步
        alpha[0, :] = self.pi * self.B[:, self.observation_index[obs_seq[0]]]

        # 递推其余时间步
        for t in range(1, T):
            for j in range(N):
                alpha[t, j] = np.sum(alpha[t-1] * self.A[:, j]) * self.B[j, self.observation_index[obs_seq[t]]]

        return alpha, np.sum(alpha[-1, :])

    def backward(self, obs_seq):
        T = len(obs_seq)
        N = len(self.states)
        beta = np.zeros((T, N))

        # 初始化最后一步为1
        beta[-1, :] = 1.0

        # 从后往前递推
        for t in reversed(range(T-1)):
            for i in range(N):
                beta[t, i] = np.sum(
                    self.A[i, :] * self.B[:, self.observation_index[obs_seq[t+1]]] * beta[t+1, :]
                )

        # 初始加权求总观测概率
        p_obs = np.sum(self.pi * self.B[:, self.observation_index[obs_seq[0]]] * beta[0, :])
        return beta, p_obs

    def baum_welch(self, obs_seqs, max_iter=10, tol=1e-4):
        """
        使用 Baum-Welch 算法训练 HMM 参数
        obs_seqs: 多个观测序列组成的列表，如 [['normal', 'cold', ...], [...]]
        """
        num_states = len(self.states)
        num_obs = len(self.observations)

        for iteration in range(max_iter):
            total_log_prob = 0.0

            # 初始化期望值
            xi_sum = np.zeros((num_states, num_states))
            gamma_state_sum = np.zeros(num_states)
            gamma_trans_sum = np.zeros((num_states, num_obs))

            for obs_seq in obs_seqs:
                T = len(obs_seq)
                alpha, p_seq = self.forward(obs_seq)
                beta, _ = self.backward(obs_seq)
                total_log_prob += np.log(p_seq)

                # 计算 gamma_t(i) = P(q_t = i | O, λ)
                gamma = (alpha * beta) / (alpha[-1].sum() + 1e-10)

                # 更新初始概率
                self.pi = gamma[0]

                # 更新状态转移矩阵 A
                for t in range(T - 1):
                    denom = np.sum(alpha[t] * beta[t]) + 1e-10
                    for i in range(num_states):
                        for j in range(num_states):
                            numer = alpha[t, i] * self.A[i, j] * self.B[j, self.observation_index[obs_seq[t + 1]]] * beta[t + 1, j]
                            xi_sum[i, j] += numer / denom

                # 更新观测矩阵 B
                for t in range(T):
                    o_idx = self.observation_index[obs_seq[t]]
                    gamma_state_sum += gamma[t]
                    gamma_trans_sum[:, o_idx] += gamma[t]

            # 归一化 A 和 B
            self.A = xi_sum / (np.sum(xi_sum, axis=1, keepdims=True) + 1e-10)
            gamma_trans_sum /= (gamma_state_sum.reshape(-1, 1) + 1e-10)
            self.B = gamma_trans_sum

            print(f"Iteration {iteration+1}, Log Probability: {total_log_prob:.6f}")

            if iteration > 0 and abs(total_log_prob - prev_log_prob) < tol:
                print("Converged.")
                break
            prev_log_prob = total_log_prob

    def viterbi(self, obs_seq):
        T = len(obs_seq)
        N = len(self.states)
        delta = np.zeros((T, N))
        psi = np.zeros((T, N), dtype=int)

        # 初始化
        delta[0, :] = self.pi * self.B[:, self.observation_index[obs_seq[0]]]
        psi[0, :] = 0

        # 递推
        for t in range(1, T):
            for j in range(N):
                max_val = np.max(delta[t-1] * self.A[:, j])
                delta[t, j] = max_val * self.B[j, self.observation_index[obs_seq[t]]]
                psi[t, j] = np.argmax(delta[t-1] * self.A[:, j])

        # 回溯最优路径
        path = np.zeros(T, dtype=int)
        path[-1] = np.argmax(delta[-1, :])
        for t in reversed(range(T-1)):
            path[t] = psi[t+1, path[t+1]]

        return [self.states[i] for i in path]


# 主程序入口
def main():
    hmm = HMM()

    while True:
        user_input = input("请输入观测序列（用空格分隔，例如：normal cold dizzy），直接按回车退出：").strip()
        if not user_input:
            print("程序结束。")
            break

        obs_seq = user_input.split()

        # 检查是否都是合法观测
        invalid = [o for o in obs_seq if o not in hmm.observations]
        if invalid:
            print(f"错误：无效观测 {invalid}，请重新输入。")
            continue

        # 前向算法
        alpha, p_forward = hmm.forward(obs_seq)
        print(f"\n前向算法计算的观测概率：{p_forward:.6f}")

        # 后向算法
        beta, p_backward = hmm.backward(obs_seq)
        print(f"后向算法计算的观测概率：{p_backward:.6f}")

        # 维特比算法
        path = hmm.viterbi(obs_seq)
        print(f"维特比算法推测的隐含状态序列：{' → '.join(path)}\n")

        # 是否使用 Baum-Welch 进行训练
        train_choice = input("是否使用鲍姆-韦尔奇算法训练模型？(y/n): ").strip().lower()
        if train_choice == 'y':
            num_samples = int(input("请输入用于训练的样本数量："))
            training_seqs = []
            for i in range(num_samples):
                sample = input(f"请输入第 {i+1} 条观测序列（用空格分隔）：").strip().split()
                training_seqs.append(sample)

            hmm.baum_welch(training_seqs)
            print("\n训练后模型参数：")
            print("初始概率 pi:", dict(zip(hmm.states, hmm.pi)))
            print("状态转移矩阵 A:")
            print(hmm.A)
            print("观测概率矩阵 B:")
            print(hmm.B)

if __name__ == '__main__':
    main()
