import os, sys
import random
import numpy as np
from typing import Optional
from pathlib import Path
sys.path.append(str(Path(__file__).parent.parent.parent))
sys.path.append(str(Path(__file__).parent.parent))
from gym import Wrapper

from policy.random_agent import RandomAgent
from env.chooseenv import make
from consts import *
from collections import deque
from .policy_manager import PolicyManager
from utils.card_type import card_judge

class TexasWrapper(Wrapper):
    """
        0 - 弃牌
        1 - 过牌
        2 - 叫牌（跟住）
        3 - 加注筹码池数量一半的筹码
        4 - 加注筹码池数量的筹码
        5 - all in
    """
    def __init__(self, env, policy_manager:Optional[PolicyManager] = None) -> None:
        self.env = env
        self.policy_manager = policy_manager
        self.action_deque = deque(np.eye(len(ACTION))[0], maxlen=4)
        self._action_space = env.env_core.action_spaces['player_0']
        self._observation_space = env.env_core.observation_spaces['player_0']
        self.oppo_policy = {}
        self._benifit = deque([0], maxlen=50)

    def render_mode(self):
        pass

    def seed(self, seed = None):
        pass

    def game_init(self):
        self.player_id_map = self.env.player_id_map
        self.player_id_reverse_map = self.env.player_id_reverse_map
        self.player_id = random.choice(list(self.player_id_map.keys()))
        self.player_idx = self.env.player_id_map[self.player_id]
        self._step = 0
        self._fold_oppo = set()
        self.player_init_payoff = self.env.payoff[self.player_id]
        oppo_idxs = [_idx for _idx in range(4) if _idx != self.player_idx]
        oppo_agents = self.policy_manager.init_agents(oppo_idxs)
        for _idx in oppo_idxs:
            self.oppo_policy[_idx] = oppo_agents[_idx]

    def reset(self, *args, **kwargs):
        self.game_init()
        obss = self.env.reset()
        obss, done = self.oppo_step(obss, False)
        if done:
            return self.reset()
        self.hand_cards ={f'player_{i}': self.env.env_core.unwrapped.observe(f'player_{i}')['observation'][:52] for i in range(4)}

        obs_encode = self.obs_encode(obss[self.player_idx], self.player_idx)
        card_win = self.card_win(obss)
        mask = obss[self.player_idx]['obs']['action_mask']

        encode_oppo_obs = np.concatenate([self.hand_cards[self.player_id_reverse_map[(self.player_idx+i)%4]] for i in range(1,4)], axis=-1)
        self.last_stage = obss[self.player_idx]['obs']['stage']
        return {'obs':obs_encode, 'win':card_win, 'mask':mask, 'oppo_obs':encode_oppo_obs}, {}

    def oppo_step(self, obss, done=False):
        while obss[self.player_idx]['obs'] is None and not done:
            actions = [[[0]*6]] * 4
            oppo_idx = self.player_id_map[obss[0]['current_move_player']]
            obs_encode = np.expand_dims(self.obs_encode(obss[oppo_idx], oppo_idx), 0)
            mask = obss[oppo_idx]['obs']['action_mask']
            if obss[oppo_idx]['obs']['stage'] in ['PREFLOP']:
                # 禁用放弃和 all in
                mask[0] = 0
                mask[-1] = 0
            actions[oppo_idx] = self.oppo_policy[oppo_idx].get_action({'obs':obs_encode, 'mask':mask})
            obss, _, done, info_before, info_after = self.env.step(actions)
            if actions[oppo_idx][0][0] == 0:
                self._fold_oppo.add(oppo_idx)
        return obss, done

    def step(self, action):
        actions = [[0]*6] * 4
        actions[self.player_idx] = self._decode_action(action)
        obss, reward, done, info_before, info_after = self.env.step(actions)
        self._step += 1
        obss, done = self.oppo_step(obss, done=done)
        for i in self._fold_oppo:
            self.hand_cards[self.player_id_reverse_map[i]] = np.zeros((52, ))
        obs_encode = self.obs_encode(obss[self.player_idx], self.player_idx)
        card_win = self.card_win(obss)
        if obss[self.player_idx]['obs'] is None:
            for obs in obss:
                if obs['obs'] is not None:
                    stage = obs['obs']['stage']
                    break
        else:
            stage = obss[self.player_idx]['obs']['stage']
        if not done:
            mask = obss[self.player_idx]['obs']['action_mask']
        else:
            mask = [1]*6
        reward = self.env.payoff[self.player_id] - self.player_init_payoff if done else 0
        if done:
            self._benifit.append(reward)
            reward /= 100
            if self.last_stage == 'PREFLOP' and reward < 0:
                reward -= 0.2
            if self.last_stage == 'FLOP' and reward < 0:
                reward -= 0.1
            elif self.last_stage == 'TURN' and reward < 0:
                reward -= 0.05
        else:
            self.last_stage = obss[self.player_idx]['obs']['stage']
        encode_oppo_obs = np.concatenate([self.hand_cards[self.player_id_reverse_map[(self.player_idx+i)%4]] for i in range(1,4)], axis=-1)
        return {'obs':obs_encode, 'win':card_win, 'mask':mask, 'oppo_obs':encode_oppo_obs}, reward, done, False, {'money':sum(self._benifit)}
    
    def card_win(self, obss):
        """
            预测胜率的标签
            0: 输
            1: 赢
        """
        can_win = []
        self_card = self.env.env_core.unwrapped.observe(self.player_id)['observation'][:52]
        self_card_type, self_card_score = card_judge(self_card)  # 牌力
        self_card_score = SCORE_BASE[self_card_type]+self_card_score
        self.card_strength = {self.player_id:self_card_score}
        
        for i in range(1, 4):
            idx = (self.player_idx+i)%4
            # if idx in self._fold_oppo:
            #     can_win.append(1)
            #     continue
            oppo_card = self.env.env_core.unwrapped.observe(self.player_id_reverse_map[idx])['observation'][:52]

            oppo_card_type, oppo_card_score = card_judge(oppo_card)  # 牌力
            oppo_card_score = SCORE_BASE[oppo_card_type]+oppo_card_score
            self.card_strength[self.player_id_reverse_map[idx]] = self_card_score
            if oppo_card_score <= self_card_score:
                can_win.append(1)
            else:
                can_win.append(0)
        return int(all(can_win))

    def obs_encode(self, obs, player_idx):
        if obs['obs'] is None:
            return self.last_obs_encode
        visible_card = obs['obs']['observation'][:52]
        card_type, card_score = card_judge(visible_card)  # 牌力
        card_score = SCORE_BASE[card_type]+card_score

        encode_visible_card = visible_card
        encode_hand_card = encode_card(obs['obs']['hand'])
        encode_desk_card = encode_card(obs['obs']['public_cards'])
        encode_stage = np.eye(len(GAME_STATE_N))[GAME_STATE_N[obs['obs']['stage']]]
        encode_all_chip = np.array(obs['obs']['all_chips'][self.player_idx:]+obs['obs']['all_chips'][:self.player_idx])/100
        encode_stakes = np.array(obs['obs']['stakes'][self.player_idx:]+obs['obs']['stakes'][:self.player_idx])/100
        encode_pot = [obs['obs']['pot']/400]

        # 180
        encode_obs = np.concatenate([
            encode_visible_card,
            encode_hand_card,
            encode_desk_card,
            encode_stage,
            encode_all_chip,
            encode_stakes,
            encode_pot,
            np.eye(len(CARD_TYPE))[card_type],
            [card_score/(SCORE_BASE[CARD_TYPE.ROYAL_STRAIGHT]+SCORE_DIST[CARD_TYPE.ROYAL_STRAIGHT])]
        ], axis=-1)
        self.last_obs_encode = encode_obs
        return encode_obs

    def _decode_action(self, action):
        return [np.eye(6, dtype=int)[action].tolist()]

if __name__ == '__main__':
    env = TexasWrapper()
    obs = env.reset()