import numpy as np


# 梯度下降原理
def gradient_desend():
    # 设置真实的关系函数，基于此构造采样数据
    w, b = np.array([[1.4], [0.9]]), 0.2  # 真实的权重w为2x1的张量，偏置b为1xn的张量
    x = np.random.random((100, 2))  # 构造采样数据x为一个100x2的张量
    y = np.dot(x, w) + b  # 构造采样数据y

    w, b = np.random.randn(2, 1), 0  # 初始化的随机参数w和b，要求逼近真实的w和b
    lr = 0.01  # 定义学习率

    for epoch in range(20):
        # 权重梯度: dl/dw = 2 * (wx + b - y) * x
        w = w - lr * 2 * np.dot(x.T, (np.dot(x, w) + b - y))  # 注意张量维度
        # 偏置梯度: dl/db = 2 * (wx + b - y)
        b = b - lr * 2 * (np.dot(x, w) + b - y).sum()  # b向量均使用单一值，用sum简化
        print(f"Epoch: {epoch}, Loss: {((np.dot(x, w) + b - y) ** 2).sum():.3f}")

    def batch_gradient_decend(w, b):  # 批量梯度下降范式
        batch = 10  # 每次取10项x和y的数据
        for epoch in range(20):
            for i in range(int(len(x) / batch)):
                x_batch = x[batch * i: batch * (i + 1)]
                y_batch = y[batch * i: batch * (i + 1)]
                w = w - lr * 2 * np.dot(x_batch.T, np.dot(x_batch, w) + b - y_batch)
                b = b - lr * 2 * (np.dot(x_batch, w) + b - y_batch).sum()
            print(f"Epoch: {epoch}, Loss: {((np.dot(x, w) + b - y) ** 2).sum():.4f}")


# 迭代动态规划：策略评估与策略提升
def dynamic_program():  # S0-S1-S2-S3单路径简单环境，状态数为5，动作数为2，向左或向右
    state_value = [0 for _ in range(5)]  # 初始化状态价值表
    actions = np.full((4, 2), 0.5)  # 初始化均匀策略

    for epoch in range(3):
        while True:  # 策略评估
            delta = []  # 记录更新前后状态价值的差值，作为收敛结束依据
            for i in range(5 - 1):  # 遍历终态前的每一状态，S0-S1-S2-S3
                temp = state_value[i]  # 记录该状态更新前的值

                # 向左固定-1奖励
                left_value = actions[i][0] * (-1 + state_value[i - 1 if i != 0 else i])
                # 向右判断是否最终奖励，仅当状态为S3时，向右即奖励
                reward = 1 if i == 5 - 2 else -1
                right_value = actions[i][1] * (reward + state_value[i + 1])

                state_value[i] = left_value + right_value
                delta.append(abs(temp - state_value[i]))  # 所有状态的delta

            print(state_value)  # ———————— 查看收敛过程 ———————— #
            if np.max(delta) < 0.1: break  # 更新最大的状态价值不超过0.1，则认为收敛

        for i in range(5 - 1):  # 策略提升，贪婪策略
            if i != 5 - 2:  # 非终态时奖励固定-1，策略取最高状态价值路径即可
                if state_value[i-1 if i != 0 else i] > state_value[i+1]:
                    actions[i][0], actions[i][1] = 1, 0
                else:
                    actions[i][0], actions[i][1] = 0, 1
            else:  # 终态加上奖励，左-1右+1
                if state_value[i-1 if i != 0 else i] - 1 > state_value[i+1] + 1:
                    actions[i][0], actions[i][1] = 1, 0
                else:
                    actions[i][0], actions[i][1] = 0, 1


# 采样Q-learning算法
class Env:  # 定义S0-S1-S2-S3模拟环境用于交互和采样
    def reset(self):  # 当做init使用，初始位置为0，结束标志置否
        self.state, self.done = 0, False
        return self.state

    def step(self, action):
        self.reward = -1
        if action == 1:  # 仅倒数第二状态右移时奖励为正
            if self.state == 5 - 2: self.reward, self.done = 1, True
            self.state += 1
        elif self.state != 0: self.state -= 1
        return self.state, self.reward, self.done


class Qlearning:
    def __init__(self):
        self.q_values = np.zeros((5, 2))

    def choose_action(self, state):  # 动作选择采用epsilon-greedy策略
        return np.argmax(self.q_values[state]) if np.random.uniform() < 0.7 else np.random.randint(0, 2)

    def update(self, s, a, r, ns):  # 初始化未定义state=4的情况，单独定义即可
        # q_target = r + gamma * max_a(q(s', a'))
        # q(s,a) += lr * (q_target - q(s,a))
        q_target = r + 0.9 * self.q_values[ns][np.argmax(self.q_values[ns])]
        self.q_values[s][a] += 0.1 * (q_target - self.q_values[s][a])  # 0.1学习率


def run():
    env, agent = Env(), Qlearning()

    for epoch in range(100):
        state = env.reset()
        while True:
            action = agent.choose_action(state)
            next_state, reward, done = env.step(action)
            agent.update(state, action, reward, next_state)  # 单步更新

            if done: break
            state = next_state

        [print(f"S{i}:{agent.q_values[i]}") for i in range(4) if not epoch % 20]
