File size: 2,815 Bytes
8f07272 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 |
extends Node3D
class_name AIController
var _player : Player
# ------------------ Godot RL Agents Logic ------------------------------------#
var heuristic := "human"
var done := false
# example actions
var movement_action := Vector2(0.0, 0.0)
var look_action := Vector2(0.0, 0.0)
var jump_action := false
var shoot_action := false
var needs_reset := false
var reward := 0.0
var n_steps_without_positive_reward = 0
var n_steps = 0
@onready var wide_raycast_sensor = $WideRaycastSensor
@onready var narrow_raycast_sensor = $NarrowRaycastSensor
func init(player):
_player=player
func set_team(value):
wide_raycast_sensor.team = value
narrow_raycast_sensor.team = value
if value == 0:
wide_raycast_sensor.team_collision_mask = 8
wide_raycast_sensor.enemy_collision_mask = 16
narrow_raycast_sensor.team_collision_mask = 8
narrow_raycast_sensor.enemy_collision_mask = 16
elif value == 1:
wide_raycast_sensor.team_collision_mask = 16
wide_raycast_sensor.enemy_collision_mask = 8
narrow_raycast_sensor.team_collision_mask = 16
narrow_raycast_sensor.enemy_collision_mask = 8
func reset():
n_steps_without_positive_reward = 0
n_steps = 0
func reset_if_done():
if done:
reset()
func get_obs():
var obs = []
obs.append_array(wide_raycast_sensor.get_observation())
obs.append_array(narrow_raycast_sensor.get_observation())
return {
"obs":obs
}
func get_reward():
var total_reward = reward + shaping_reward()
if total_reward <= 0.0:
n_steps_without_positive_reward += 1
else:
n_steps_without_positive_reward -= 1
n_steps_without_positive_reward = max(0, n_steps_without_positive_reward)
return total_reward
func zero_reward():
reward = 0.0
func shaping_reward():
var s_reward = 0.0
return s_reward
func set_heuristic(h):
# sets the heuristic from "human" or "model" nothing to change here
heuristic = h
func get_obs_space():
var obs = get_obs()
return {
"obs": {
"size": [len(obs["obs"])],
"space": "box"
},
}
func get_action_space():
return {
"movement_action" : {
"size": 2,
"action_type": "continuous"
},
"look_action" : {
"size": 2,
"action_type": "continuous"
},
"jump_action" : {
"size": 2,
"action_type": "discrete"
},
"shoot_action" : {
"size": 2,
"action_type": "discrete"
},
}
func get_done():
return done
func set_done_false():
done = false
func set_action(action):
movement_action = Vector2(clamp(action["movement_action"][0],-1.0,1.0), clamp(action["movement_action"][1],-1.0,1.0))
look_action = Vector2(clamp(action["look_action"][0],-1.0,1.0), clamp(action["look_action"][1],-1.0,1.0))
jump_action = action["jump_action"] == 1
shoot_action = action["shoot_action"] == 1
func _physics_process(delta):
n_steps += 1
if n_steps > 4000:
_player.needs_respawn = true
|