from arguments import get_args
from fieldEnv import field
import numpy as np
import visualize
from RVO import RVO_update, compute_V_des

arg = get_args()

ws_model = dict()
# robot radius
ws_model['robot_radius'] = arg.radius_player
# circular obstacles, format [x,y,rad]
# no obstacles
ws_model['circular_obstacles'] = []
# with obstacles
# ws_model['circular_obstacles'] = [[-0.3, 2.5, 0.3], [1.5, 2.5, 0.3], [3.3, 2.5, 0.3], [5.1, 2.5, 0.3]]
# rectangular boundary, format [x,y,width/2,heigth/2]
ws_model['boundary'] = []

####### initialize environment hyperparameters ######
# env_name = "RoboschoolWalker2d-v1"
env_name = arg.env_name
max_ep_len = arg.max_ep_len
env = field(arg)

X = env.reset()
goal = np.array([arg.field_width/2, 0])
goal = np.concatenate((goal, X[2:]))
goal = goal.reshape(-1, 2).tolist()
current_ep_reward = 0
V = np.zeros((len(X), 2))
V_max = [1.0 for i in range(len(goal))]
trial_s = []
for t in range(1, max_ep_len + 1):
    X = X.reshape(-1, 2).tolist()
    V = V.reshape((-1, 2)).tolist()
    V_des = compute_V_des(X, goal, V_max)
    V = RVO_update(X, V_des, V, ws_model)

    V = np.array(V).reshape(-1)
    X, reward, done = env.step(V)

    trial_s.append(env.derive_abs_state())
    if done:
        break
visualize.draw(trial_s)
