import gym
from gym import spaces
from gym.core import ActType, ObsType
import numpy as np
from typing import Optional, Tuple


class RLPredEnv(gym.Env):
    def __init__(self, X, y):
        super(RLPredEnv, self).__init__()
        self.X = X
        self.y = y
        self.reset()
        self.action_space = spaces.Box(low=0, high=1, shape=(3,), dtype=np.float32)

        self.observation_space = spaces.Box(
            low=-1, high=1, shape=self.observation().shape, dtype=np.float32
        )

    def reset(
        self,
        *,
        seed: Optional[int] = None,
        options: Optional[dict] = None,
    ) -> Tuple[ObsType, dict]:
        self.done = False
        # self.watch_idx = np.arange(len(self.X))
        self.watch_idx = [np.random.randint(len(self.X))]
        # np.random.shuffle(self.watch_idx)
        self.k = 0
        return self.observation(), {}

    def step(self, action: ActType) -> Tuple[ObsType, float, bool, bool, dict]:
        op = np.argmax(action)
        watch_value = self.y[self.watch_idx[self.k]]
        if op == 0:
            reward = 0
        elif op == 1:
            if watch_value >= 0:
                reward = watch_value * action[op]
            else:
                reward = -5 * watch_value * action[op]
        else:
            if watch_value <= 0:
                reward = -watch_value * action[op]
            else:
                reward = 5 * watch_value * action[op]
        self.k += 1
        self.done = self.k >= len(self.watch_idx)
        return self.observation(), reward, self.done, False, {}

    def observation(self):
        if self.k < len(self.watch_idx):
            return self.X[self.watch_idx[self.k]]
        return np.zeros_like(self.X[0])
