''' A toy example of playing against a random agent on Limit Hold'em
'''
#coding=utf-8

import torch
import os
import platform

model_path = "model.pth"

mode_dir = "/data/models/"
if platform.system().lower() == 'windows':
    mode_dir = "d:/temp/models/"
if not os.path.isdir(mode_dir):
    os.makedirs(mode_dir)

class Bot(object):

    def __init__(self, uid):
        self.uid = uid
        uid_model_path = mode_dir+self.uid+".pth"
        if os.path.exists(uid_model_path):
            self.agent_0 = torch.load(uid_model_path)
        else:
            # self.agent_0 = torch.load(model_path)
            from rlcard.agents import DQNAgent
            self.agent_0 = DQNAgent(
                num_actions= 5, #(call, raise, check, fold, allin)
                state_shape=[72],
                mlp_layers=[64,64],
                device="cpu",
            )

        self.uid_model_path = uid_model_path
        self.states = []
        self.actions = []
        self.reward = 0
        self.count = 0


    def step(self, state):
        info = self.agent_0.eval_step(state)
        print("action:", info)
        action = int(info[0])                
        self.states.append(state)
        self.actions.append(action)
        return action

        
    def feed(self):
        # '''
        # [{'legal_actions': [0,1,2], 'obs': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
        #    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0,
        #    0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
        #    0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1,
        #    0, 0, 0, 0], 'raw_obs': {'hand': ['CA', 'D7'], 'public_cards': [], 'all_chips': [8, 10], 'my_chips': 8, 'legal_actions': ['call', 'fold'], 'raise_nums': [4, 0, 0, 0]}, 'raw_legal_actions': ['call', 'fold'], 'action_record': [(0, 'raise'), (1, 'raise'), (0, 'raise'), (1, 'raise'), (0, 'call'), (1, 'fold')]},
        #     0, 5.0,
        #    {'legal_actions': [1,2,3], 'obs': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
        #    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0,
        #    0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
        #    0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1,
        #    0, 0, 0, 0], 'raw_obs': {'hand': ['CA', 'D7'], 'public_cards': ['D6', 'C2', 'C5'], 'all_chips': [10, 10], 'my_chips': 10, 'legal_actions': ['raise', 'fold', 'check'], 'raise_nums': [4, 0, 0, 0]}, 'raw_legal_actions': ['raise', 'fold', 'check'], 'action_record': [(0, 'raise'), (1, 'raise'), (0, 'raise'), (1, 'raise'), (0, 'call'), (1, 'fold')]},
        #    True]
        # '''
        # data = json.loads(json_str)

        count = len(self.states)
        if count == 0:
            return
        for idx, state in enumerate(self.states):
            is_end = False
            reward = 0
            if idx == count - 1:
                is_end = True
                reward = self.reward
                next_state = state
            else:
                next_state = self.states[idx+1]
  
            data = [state, self.actions[idx], reward, next_state, is_end]
            print("feed data:", data)
            self.agent_0.feed(data)

        # Save model
        # torch.save(self.agent_0, self.uid_model_path)

        return

    def game_over(self, reward):
        self.count = self.count + 1
        
        self.reward = reward
        self.feed()
        self.states = []
        self.actions = []
        self.reward = 0
        if self.count %100==0:
            self.save()
        return


    def save(self):
        torch.save(self.agent_0, self.uid_model_path)
        return