import numpy as np
import random
from scipy import stats

# 定义状态，动作，奖励，转移矩阵等
states = ['S1', 'S2', 'S3']  # 状态集合
actions = ['A1', 'A2']  # 动作集合

# 转移概率矩阵：三行三列分别代表S1, S2, S3状态转移到其他状态的概率
transition_prob = {
    'S1': {'A1': [0.8, 0.2, 0.0], 'A2': [0.0, 1.0, 0.0]},
    'S2': {'A1': [0.3, 0.3, 0.4], 'A2': [0.0, 0.6, 0.4]},
    'S3': {'A1': [0.1, 0.0, 0.9], 'A2': [0.0, 0.0, 1.0]},
}

# 奖励函数，针对每个状态
rewards = {
    'S1': 10,
    'S2': -5,
    'S3': 20,
}


# 选择策略（policy）：简单的随机策略
def choose_action(state):
    return random.choice(actions)


# 马尔可夫模拟：运行多个回合，返回每次的累计奖励
def simulate_mdp(num_simulations, max_steps):
    cumulative_rewards = []

    for _ in range(num_simulations):
        state = random.choice(states)  # 随机选择初始状态
        total_reward = 0

        for _ in range(max_steps):
            action = choose_action(state)
            reward = rewards[state]
            total_reward += reward

            # 根据当前状态和动作进行状态转移
            next_state_probs = transition_prob[state][action]
            state = np.random.choice(states, p=next_state_probs)

        cumulative_rewards.append(total_reward)

    return cumulative_rewards


# 进行蒙特卡洛模拟
num_simulations = 1000  # 模拟次数
max_steps = 50  # 每次模拟的最大步数
cumulative_rewards = simulate_mdp(num_simulations, max_steps)

# 统计分析
mean_reward = np.mean(cumulative_rewards)
std_reward = np.std(cumulative_rewards)
confidence_interval = stats.norm.interval(0.95, loc=mean_reward, scale=std_reward / np.sqrt(num_simulations))

print(f"累计奖励的均值: {mean_reward}")
print(f"累计奖励的标准差: {std_reward}")
print(f"95% 置信区间: {confidence_interval}")

# 假设检验：例如我们假设奖励均值应为15
expected_mean = 15
t_stat, p_value = stats.ttest_1samp(cumulative_rewards, expected_mean)

print(f"t统计量: {t_stat}, p值: {p_value}")

if p_value < 0.05:
    print("拒绝原假设：累计奖励的均值显著不同于15")
else:
    print("无法拒绝原假设：累计奖励的均值与15没有显著差异")
