import numpy as np

S = ["s1", "s2", "s3", "s4", "s5"]  # 状态集合
A = ["保持s1", "前往s1", "前往s2", "前往s3", "前往s4", "前往s5", "概率前往"]  # 动作集合
# 状态转移函数
P = {
    "s1-保持s1-s1": 1.0,
    "s1-前往s2-s2": 1.0,
    "s2-前往s1-s1": 1.0,
    "s2-前往s3-s3": 1.0,
    "s3-前往s4-s4": 1.0,
    "s3-前往s5-s5": 1.0,
    "s4-前往s5-s5": 1.0,
    "s4-概率前往-s2": 0.2,
    "s4-概率前往-s3": 0.4,
    "s4-概率前往-s4": 0.4,
}
# 奖励函数
R = {
    "s1-保持s1": -1,
    "s1-前往s2": 0,
    "s2-前往s1": -1,
    "s2-前往s3": -2,
    "s3-前往s4": -2,
    "s3-前往s5": 0,
    "s4-前往s5": 10,
    "s4-概率前往": 1,
}

# 策略1,随机策略
Pi_1 = {
    "s1-保持s1": 0.5,
    "s1-前往s2": 0.5,
    "s2-前往s1": 0.5,
    "s2-前往s3": 0.5,
    "s3-前往s4": 0.5,
    "s3-前往s5": 0.5,
    "s4-前往s5": 0.5,
    "s4-概率前往": 0.5,
}
# 策略2
Pi_2 = {
    "s1-保持s1": 0.6,
    "s1-前往s2": 0.4,
    "s2-前往s1": 0.3,
    "s2-前往s3": 0.7,
    "s3-前往s4": 0.5,
    "s3-前往s5": 0.5,
    "s4-前往s5": 0.1,
    "s4-概率前往": 0.9,
}

def join(str1, str2):
    return str1 + '-' + str2

gamma = 0.5  # 折扣因子
MDP = (S, A, P, R, gamma)

def sample(MDP, Pi, timestep_max, number):
    """
    该函数实现从MDP中采样number个序列, 每个序列的长度不超过timestep_max步。

    参数:
    MDP (tuple): 马尔可夫决策过程，包含状态集合S、动作集合A、状态转移函数P、奖励函数R和折扣因子gamma。
    Pi (dict): 策略函数，指定在每个状态下采取每个动作的概率。
    timestep_max (int): 每个采样序列的最大步数。
    number (int): 要采样的序列数量。

    返回:
    list: 包含采样序列的列表，每个序列是一个元组列表，元组包含状态、动作、奖励和下一个状态。
    """
    S, A, P, R, gamma = MDP
    episodes = []
    for _ in range(number):
        episode = []
        timestep = 0
        # 随机选择一个初始状态（除了终止状态s5）
        s = S[np.random.randint(0, len(S)-1)]  
        # 不断地在当前状态下采样，直到采样timestep_max步，或者到达终止状态
        while s != "s5" and timestep < timestep_max:
            timestep += 1
            # 根据策略pi采样动作
            pi_s_a_rand = np.random.rand()
            pi_s_a = 0
            for a_opt in A:
                pi_s_a_name = join(s, a_opt)  
                pi_s_a += Pi.get(pi_s_a_name, 0)
                if pi_s_a > pi_s_a_rand:
                    # 采样到动作a_opt的奖励
                    a = a_opt
                    a_r = R.get(pi_s_a_name, 0)
                    break
            # 根据状态转移函数采样下一个状态
            p_s_a_rand = np.random.rand()
            p_s_a = 0
            for s_opt in S:
                p_s_a_name = join(join(s, a), s_opt)    
                p_s_a += P.get(p_s_a_name, 0)
                if p_s_a > p_s_a_rand:
                    s_next = s_opt
                    break
            episode.append((s, a, a_r, s_next))
            s = s_next
        episodes.append(episode)
    return episodes

episodes = sample(MDP, Pi_1, 20, 5)
for i, episode in enumerate(episodes):
    print("采样第%d序列：%s" %(i + 1, episode))
                    

def MC(episodes, V, N, gamma):
    for episode in episodes:
        G = 0
        for i in range(len(episode) - 1, -1, -1):
            s, a, a_r, s_next = episode[i]
            G = gamma * G + a_r
            N[s] += 1
            V[s] += (G - V[s]) / N[s]

gamma = 0.5
V = {"s1": 0, "s2": 0, "s3": 0, "s4": 0, "s5": 0}
N = {"s1": 0, "s2": 0, "s3": 0, "s4": 0, "s5": 0}
timestep_max = 20
episodes = sample(MDP, Pi_1, timestep_max, 100)
MC(episodes, V, N, gamma)
print("使用蒙特卡洛方法计算的状态价值：", V)

def occupancy(episodes, s, a, timestep_max, gamma):
    total_times = np.zeros(timestep_max)
    occur_times = np.zeros(timestep_max)
    for episode in episodes:
        for i in range(len(episode)):
            s_opt, a_opt, _, _= episode[i]
            total_times += 1
            if s == s_opt and a == a_opt:
                occur_times[i] += 1
    rho = 0
    for i in range(timestep_max):
        if total_times[i]:
            rho += gamma**i*occur_times[i] / total_times[i]
    return (1-gamma)*rho

gamma = 0.5
timestep_max = 1000
np.random.seed(0)
episodes_1 = sample(MDP, Pi_1, timestep_max, 1000)
episodes_2 = sample(MDP, Pi_2, timestep_max, 1000)
rho_1 = occupancy(episodes_1, "s4", "概率前往", timestep_max, gamma)
rho_2 = occupancy(episodes_2, "s4", "概率前往", timestep_max, gamma)
print(rho_1, rho_2)
