import copy
import random
from typing import Dict, List, Tuple
import time
# import gym
# import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import pickle
import json
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
import socket
import os
HOME = os.getcwd()
print(HOME)
# from IPython.display import clear_output


class Actor(nn.Module):
    def __init__(
        self, 
        in_dim: int, 
        out_dim: int,
        num_cells: int = 128,
        init_w: float = 3e-3,
    ):
        """Initialize."""
        super(Actor, self).__init__()
        
        self.hidden1 = nn.Linear(in_dim, num_cells)
        self.hidden2 = nn.Linear(num_cells, num_cells)
        # self.hidden3 = nn.Linear(num_cells, num_cells)
        self.out = nn.Linear(num_cells, out_dim)
        
        self.out.weight.data.uniform_(-init_w, init_w)
        self.out.bias.data.uniform_(-init_w, init_w)

    def forward(self, state: torch.Tensor) -> torch.Tensor:
        """Forward method implementation."""
        x = F.tanh(self.hidden1(state))
        x = F.tanh(self.hidden2(x))
        # x = F.tanh(self.hidden3(x))
        action = F.tanh(self.out(x))
        
        return action
    
    
def decode_data(data):
    # load json
    recv_obser = json.loads(data)
    # print(recv_obser)
    observation = []
    observation_dict = recv_obser['observation']
    for key in observation_dict:
        observation.append(int(observation_dict[key]))
    reward = recv_obser['reward']
    terminated = recv_obser['terminated']
    truncated = recv_obser['truncated']
    return observation, reward, terminated, truncated

def encode_data(action, rudder, reset_flag=1):
    rudder = rudder + float(action) * 5

    if rudder >= 30.0:
        rudder = 30.0
    elif rudder <= -30.0:
        rudder = -30.0
    
    origin_data = {'boatname':'SLM7001',
                   'restart': reset_flag,
                   'rudl': rudder,
                   'rudr': rudder,
                   'rspl': 1000,
                   'rspr': 1000,
                   'subSystem': "control"
                   }

    data = json.dumps(origin_data, sort_keys=True, indent=4, separators=(',', ':'))
    return data.encode('utf-8'), rudder



hidden_num = 64
if __name__ == '__main__':
    
    # network socket
    REMOTE_HOST = '127.0.0.1'
    REMOTE_PORT = 10905
    tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    tcp_socket.connect((REMOTE_HOST, REMOTE_PORT))
    
    obs_dim = 3
    action_dim =1
    
    HOME = os.getcwd()
    print(HOME)
    model_path = os.path.join(HOME, 'DDPG_result/ddpg_model.pt')
    model = torch.load(model_path)['policy_net_state_dict']
    
    # device: cpu / gpu
    device = torch.device(
        "cuda" if torch.cuda.is_available() else "cpu"
    )
    print(device)
    actor = Actor(in_dim=obs_dim, out_dim=action_dim, num_cells=hidden_num).to(device=device)
    
    actor.load_state_dict(model)
    
    rudder = 0
    # reset
    action_data, rudder = encode_data(0, rudder=0, reset_flag=0)
    tcp_socket.send(action_data)# action
    while(1):
        # Initialize the environment and get its state
        info, addr = tcp_socket.recvfrom(1024)
        observation, _, _, _= decode_data(info)
        state = torch.tensor(observation, dtype=torch.float32, device=device).unsqueeze(0)
        
        selected_action = actor(
            torch.FloatTensor(state).to(device)
        )[0].detach().cpu().numpy()
        
        print(f'select action is: {selected_action}')
        
        action_data, rudder = encode_data(selected_action, rudder=rudder, reset_flag=0)
        tcp_socket.send(action_data)# action
        
    tcp_socket.close()
