import gym
from gym import spaces
from gym.core import ActType, ObsType
import numpy as np
from typing import Optional, Tuple


class RLPredEnv2(gym.Env):
    def __init__(self, X, y):
        super(RLPredEnv2, self).__init__()
        self.X = X
        self.y = y
        self.maxpos = 10
        self.reset()
        self.action_space = spaces.Discrete(3, start=0)

        self.observation_space = spaces.Box(
            low=-1, high=1, shape=self.observation().shape, dtype=np.float32
        )

    def reset(
        self,
        *,
        seed: Optional[int] = None,
        options: Optional[dict] = None,
    ) -> Tuple[ObsType, dict]:
        self.done = False
        self.watch_idx = np.arange(len(self.X))
        # self.watch_idx = [np.random.randint(len(self.X))]
        # np.random.shuffle(self.watch_idx)
        self.k = 0
        self.pos = 0
        self.value = 1
        return self.observation(), {}

    def step(self, action: ActType) -> Tuple[ObsType, float, bool, bool, dict]:
        action = action - 1
        info = {}
        if self.pos + action > self.maxpos or self.pos + action <= -self.maxpos:
            reward = -10
            info["gain"] = 0
        else:
            self.pos += action
            reward = (
                self.pos * self.y[self.watch_idx[self.k]] - abs(action) * 7.5e-4
            ) * 1000
            info["gain"] = (
                self.pos * self.y[self.watch_idx[self.k]] - abs(action) * 7.5e-4
            ) * self.value
        self.value *= 1 + self.y[self.watch_idx[self.k]]
        self.k += 1
        self.done = self.k >= len(self.watch_idx)

        return self.observation(), reward, self.done, False, info

    def observation(self):
        if self.k < len(self.watch_idx):
            return np.concatenate(
                [self.X[self.watch_idx[self.k]], [self.pos / self.maxpos]]
            )
        return np.zeros(self.X[0].shape[0] + 1)
