﻿'''
这个文件是主要开发文件，涵盖了策略全部的四个接口
-on_event接收比赛状态变化的信息。
    参数event_type type表示事件类型；
    参数EventArgument表示该事件的参数，如果不含参数，则为NULL。
-get_team_info控制队名。
    修改返回值的字符串即可修改自己的队名
-get_instruction控制5个机器人的轮速(leftspeed,rightspeed)，以及最后的reset(1即表明需要reset)
    通过返回值来给机器人赋轮速
    比赛中的每拍被调用，需要策略指定轮速，相当于旧接口的Strategy。
    参数field为In/Out参数，存储当前赛场信息，并允许策略修改己方轮速。
    ！！！所有策略的开发应该在此模块
-get_placement控制5个机器人及球在需要摆位时的位置
    通过返回值来控制机器人和球的摆位。
    每次自动摆位时被调用，需要策略指定摆位信息。
    定位球类的摆位需要符合规则，否则会被重摆
'''
import random
from typing import Tuple, Union, List
import os
import numpy as np
from V5RPC import *
import math
import pickle
from baseRobot import *
from GlobalVariable import *
from collections import deque
import copy

# 多智能体强化学习模型类 - 简化版Q-learning实现
class MultiAgentRL:
    def __init__(self):
        # 模型参数
        self.state_dim = 40  # 状态空间维度：5个我方球员(x,y)、5个对方球员(x,y)、球(x,y)、球速度(vx,vy)
        self.action_dim = 9  # 动作空间维度：8个方向移动+1个原地不动
        self.num_agents = 5  # 智能体数量（5个球员）
        self.gamma = 0.99    # 折扣因子
        self.learning_rate = 0.1  # 学习率
        self.epsilon = 1.0   # 探索率初始值
        self.epsilon_decay = 0.995  # 探索率衰减
        self.epsilon_min = 0.1  # 最小探索率
        self.batch_size = 32  # 批量大小
        self.memory_size = 10000  # 经验回放缓冲区大小
        self.step_counter = 0  # 步数计数器
        self.success_counter = 0  # 成功次数计数器，用于动态调整学习率
        
        # 经验回放缓冲区
        self.memory = [deque(maxlen=self.memory_size) for _ in range(self.num_agents)]
        
        # 使用字典存储Q值，键为状态-动作对的哈希值
        self.q_tables = [{} for _ in range(self.num_agents)]
        
        # 上一个状态和动作
        self.prev_state = None
        self.prev_actions = [None] * self.num_agents
        
        # 模型保存路径
        self.model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'rl_models')
        os.makedirs(self.model_dir, exist_ok=True)
        
        # 角色特定参数
        self.role_params = {
            0: {'name': '前锋', 'move_distance': 25.0, 'reward_weights': {'goal': 2.0, 'ball_distance': 1.5}},
            1: {'name': '守门员', 'move_distance': 15.0, 'reward_weights': {'goal_defense': 2.0, 'position': 1.5}},
            2: {'name': '后卫', 'move_distance': 20.0, 'reward_weights': {'defense': 1.8, 'position': 1.2}},
            3: {'name': '中场', 'move_distance': 22.0, 'reward_weights': {'support': 1.5, 'ball_distance': 1.0}},
            4: {'name': '后卫', 'move_distance': 20.0, 'reward_weights': {'defense': 1.8, 'position': 1.2}}
        }
        
        # 尝试加载已有Q表
        self._load_q_tables()
    
    def _state_to_key(self, state):
        """将连续状态离散化为键，优化状态表示方法"""
        # 提取关键信息：球的位置、最近的我方和对方球员位置
        ball_pos = state[20:22]  # 球的位置
        
        # 计算每个我方球员到球的距离
        my_distances = []
        for i in range(5):
            my_pos = state[i*2:i*2+2]
            dist = np.sqrt((my_pos[0] - ball_pos[0])**2 + (my_pos[1] - ball_pos[1])**2)
            my_distances.append((i, dist))
        
        # 计算每个对方球员到球的距离
        opp_distances = []
        for i in range(5):
            opp_pos = state[10+i*2:10+i*2+2]
            dist = np.sqrt((opp_pos[0] - ball_pos[0])**2 + (opp_pos[1] - ball_pos[1])**2)
            opp_distances.append((i, dist))
        
        # 排序找出最近的球员
        my_distances.sort(key=lambda x: x[1])
        opp_distances.sort(key=lambda x: x[1])
        
        # 提取最近的两个我方球员和最近的两个对方球员的索引
        closest_my = [my_distances[0][0], my_distances[1][0]]
        closest_opp = [opp_distances[0][0], opp_distances[1][0]]
        
        # 构建离散化状态
        # 1. 球的位置（离散化为10x10网格）
        ball_x_bin = min(9, max(0, int((ball_pos[0] + 1) * 5)))
        ball_y_bin = min(9, max(0, int((ball_pos[1] + 1) * 5)))
        
        # 2. 最近的我方和对方球员的位置（离散化为5x5网格）
        my_pos_bins = []
        for idx in closest_my:
            my_pos = state[idx*2:idx*2+2]
            x_bin = min(4, max(0, int((my_pos[0] + 1) * 2.5)))
            y_bin = min(4, max(0, int((my_pos[1] + 1) * 2.5)))
            my_pos_bins.extend([x_bin, y_bin])
        
        opp_pos_bins = []
        for idx in closest_opp:
            opp_pos = state[10+idx*2:10+idx*2+2]
            x_bin = min(4, max(0, int((opp_pos[0] + 1) * 2.5)))
            y_bin = min(4, max(0, int((opp_pos[1] + 1) * 2.5)))
            opp_pos_bins.extend([x_bin, y_bin])
        
        # 3. 球场区域（分为9个区域：左上、左中、左下、中上、中中、中下、右上、右中、右下）
        field_region = 3 * (1 + int(ball_pos[0] > 0.33) + int(ball_pos[0] > 0.66)) + \
                       (1 + int(ball_pos[1] > 0.33) + int(ball_pos[1] > 0.66))
        
        # 组合所有特征形成状态键
        return (ball_x_bin, ball_y_bin, *my_pos_bins, *opp_pos_bins, field_region)
    
    def _load_q_tables(self):
        """尝试加载已有Q表"""
        try:
            q_table_path = os.path.join(self.model_dir, 'q_tables.pkl')
            if os.path.exists(q_table_path):
                with open(q_table_path, 'rb') as f:
                    self.q_tables = pickle.load(f)
                print("成功加载Q表")
                # 如果成功加载了Q表，降低探索率但保持一定的探索能力
                self.epsilon = max(self.epsilon_min + 0.1, self.epsilon * 0.2)
        except Exception as e:
            print(f"加载Q表失败: {e}")
    
    def save_model(self):
        """保存Q表"""
        try:
            q_table_path = os.path.join(self.model_dir, 'q_tables.pkl')
            with open(q_table_path, 'wb') as f:
                pickle.dump(self.q_tables, f)
            print("Q表保存成功")
        except Exception as e:
            print(f"保存Q表失败: {e}")
    
    def get_state(self, field):
        """从场地信息中提取状态向量，增加球速度信息"""
        state = []
        
        # 添加我方球员位置
        for i in range(5):
            pos = field.self_robots[i].position
            state.extend([pos.x / 100.0, pos.y / 100.0])  # 归一化坐标
        
        # 添加对方球员位置
        for i in range(5):
            pos = field.opponent_robots[i].position
            state.extend([pos.x / 100.0, pos.y / 100.0])  # 归一化坐标
        
        # 添加球的位置
        ball_pos = field.ball.position
        state.extend([ball_pos.x / 100.0, ball_pos.y / 100.0])
        
        # 计算球的速度（如果有历史数据）
        ball_vx, ball_vy = 0.0, 0.0
        if GlobalVariable.tick > 0:
            prev_ball_pos = BallPos[GlobalVariable.tick - 1]
            ball_vx = (ball_pos.x - prev_ball_pos.x) / 100.0
            ball_vy = (ball_pos.y - prev_ball_pos.y) / 100.0
        state.extend([ball_vx, ball_vy])
        
        return np.array(state)
    
    def select_action(self, state, agent_idx):
        """选择动作（使用改进的epsilon-greedy策略）"""
        # 动态调整探索率，根据球员角色和场上形势
        effective_epsilon = self.epsilon
        
        # 根据球的位置调整探索率
        ball_pos_x = state[20]  # 归一化后的球x坐标
        
        # 如果球在我方半场，增加防守球员的探索率
        if ball_pos_x < 0 and (agent_idx == 2 or agent_idx == 4):
            effective_epsilon = min(1.0, self.epsilon * 1.2)
        
        # 如果球在对方半场，增加进攻球员的探索率
        if ball_pos_x > 0 and (agent_idx == 0 or agent_idx == 3):
            effective_epsilon = min(1.0, self.epsilon * 1.2)
        
        # 守门员使用较低的探索率，更依赖于已学习的经验
        if agent_idx == 1:
            effective_epsilon = max(0.05, self.epsilon * 0.5)
        
        # 使用epsilon-greedy策略选择动作
        if np.random.rand() <= effective_epsilon:
            # 随机探索，但使用启发式规则引导探索方向
            if np.random.rand() < 0.7:  # 70%的概率使用启发式规则
                # 获取球的位置
                ball_x, ball_y = state[20], state[21]
                # 获取当前球员位置
                agent_x, agent_y = state[agent_idx*2], state[agent_idx*2+1]
                
                # 计算到球的方向
                dx, dy = ball_x - agent_x, ball_y - agent_y
                angle = math.atan2(dy, dx)
                
                # 将角度映射到动作（0-7表示8个方向）
                action = int(((angle + math.pi) / (2 * math.pi) * 8) % 8)
                
                # 守门员特殊处理：倾向于在球门线附近移动
                if agent_idx == 1:
                    # 如果球在对方半场，守门员应该回到球门中心位置
                    if ball_x > 0:
                        # 计算到球门中心的方向
                        dx, dy = -0.95 - agent_x, 0 - agent_y
                        angle = math.atan2(dy, dx)
                        action = int(((angle + math.pi) / (2 * math.pi) * 8) % 8)
                    # 如果球接近我方球门，守门员应该向球移动
                    elif ball_x < -0.7:
                        # 使用原来计算的action
                        pass
                    # 否则，守门员应该在球门线上移动
                    else:
                        # 计算球门线上最近的点
                        goal_y = max(-0.3, min(0.3, ball_y))
                        dx, dy = -0.95 - agent_x, goal_y - agent_y
                        angle = math.atan2(dy, dx)
                        action = int(((angle + math.pi) / (2 * math.pi) * 8) % 8)
                
                return action
            else:
                # 完全随机探索
                return np.random.randint(self.action_dim)
        
        # 利用已学习的知识
        state_key = self._state_to_key(state)
        if state_key not in self.q_tables[agent_idx]:
            self.q_tables[agent_idx][state_key] = np.zeros(self.action_dim)
        
        # 选择Q值最大的动作
        return np.argmax(self.q_tables[agent_idx][state_key])
    
    def action_to_position(self, action, current_pos):
        """将动作转换为目标位置，根据角色调整移动距离"""
        # 获取球员索引（假设current_pos是一个带有索引属性的对象）
        agent_idx = 0  # 默认为前锋
        
        # 尝试从current_pos中获取球员索引
        if hasattr(current_pos, 'robot_index'):
            agent_idx = current_pos.robot_index
        
        # 获取该角色的移动距离
        move_distance = self.role_params.get(agent_idx, {'move_distance': 20.0})['move_distance']
        
        # 动作映射：0-7表示8个方向，8表示原地不动
        if action == 8:  # 原地不动
            return (current_pos.x, current_pos.y)
        
        # 计算移动方向（以弧度表示）
        angle = action * (2 * np.pi / 8)
        
        # 计算目标位置
        target_x = current_pos.x + move_distance * np.cos(angle)
        target_y = current_pos.y + move_distance * np.sin(angle)
        
        # 确保目标位置在场地范围内，并考虑角色特定的限制
        if agent_idx == 1:  # 守门员
            # 守门员应该主要在球门线附近活动
            target_x = max(-110, min(-90, target_x))
            target_y = max(-40, min(40, target_y))
        else:  # 其他球员
            target_x = max(-95, min(95, target_x))
            target_y = max(-65, min(65, target_y))
            
            # 防止进入禁区
            if target_x > 80 and abs(target_y) < 40:
                if target_x > 90:
                    target_x = 80
                if abs(target_y) < 30:
                    target_y = 40 if target_y > 0 else -40
        
        return (target_x, target_y)
    
    def store_experience(self, agent_idx, state, action, reward, next_state, done):
        """存储经验到回放缓冲区，优先存储高奖励经验"""
        # 如果是高奖励经验（进球或防守成功），多存储几次以增加其采样概率
        if abs(reward) > 5.0:
            for _ in range(3):  # 存储3次
                self.memory[agent_idx].append((state, action, reward, next_state, done))
            # 记录成功次数
            if reward > 5.0:
                self.success_counter += 1
        else:
            self.memory[agent_idx].append((state, action, reward, next_state, done))
    
    def calculate_reward(self, field, state):
        """计算奖励，优化奖励函数"""
        rewards = [0] * self.num_agents
        
        # 获取球的位置
        ball_x = field.ball.position.x
        ball_y = field.ball.position.y
        
        # 计算球的速度（如果有历史数据）
        ball_vx, ball_vy = 0, 0
        if GlobalVariable.tick > 0:
            prev_ball_pos = BallPos[GlobalVariable.tick - 1]
            ball_vx = ball_x - prev_ball_pos.x
            ball_vy = ball_y - prev_ball_pos.y
        ball_speed = math.sqrt(ball_vx**2 + ball_vy**2)
        
        # 基本奖励：球在对方半场且朝对方球门方向移动
        base_reward = 0
        if ball_x > 0:
            base_reward = 1.0
            # 如果球还在向对方球门移动，额外奖励
            if ball_vx > 0:
                base_reward += 0.5
        else:
            base_reward = -0.2
            # 如果球向我方球门移动，额外惩罚
            if ball_vx < 0:
                base_reward -= 0.5
        
        # 进球奖励（球进入对方球门）
        goal_reward = 15.0 if ball_x > 95 and abs(ball_y) < 30 else 0.0
        
        # 被进球惩罚（球进入我方球门）
        concede_penalty = -15.0 if ball_x < -95 and abs(ball_y) < 30 else 0.0
        
        # 球靠近对方球门的奖励
        goal_proximity_reward = 0
        if ball_x > 0:
            # 计算到对方球门的距离
            distance_to_goal = math.sqrt((110 - ball_x)**2 + (0 - ball_y)**2)
            # 距离越近，奖励越高
            goal_proximity_reward = 3.0 / (1.0 + distance_to_goal / 50.0)
        
        # 团队协作奖励：我方球员分布在场上的合理性
        team_formation_reward = 0
        # 计算我方球员的平均x坐标
        avg_x = sum(field.self_robots[i].position.x for i in range(5)) / 5
        # 如果平均位置在对方半场，表示团队整体前压，给予奖励
        if avg_x > 0:
            team_formation_reward = 1.0
        
        # 计算每个智能体的奖励
        for i in range(self.num_agents):
            # 获取智能体位置
            agent_x = field.self_robots[i].position.x
            agent_y = field.self_robots[i].position.y
            
            # 计算智能体到球的距离
            distance_to_ball = math.sqrt((agent_x - ball_x) ** 2 + (agent_y - ball_y) ** 2)
            
            # 角色特定奖励
            role_reward = 0
            
            if i == 0:  # 前锋
                # 前锋应该在对方半场，靠近球和对方球门
                if agent_x > 0:
                    role_reward += 1.0
                # 如果前锋是最接近球的我方球员，给予额外奖励
                is_closest = True
                for j in range(5):
                    if j != i:
                        other_distance = math.sqrt((field.self_robots[j].position.x - ball_x) ** 2 + 
                                                 (field.self_robots[j].position.y - ball_y) ** 2)
                        if other_distance < distance_to_ball:
                            is_closest = False
                            break
                if is_closest and ball_x > 0:
                    role_reward += 2.0
                # 前锋接近对方球门的奖励
                distance_to_goal = math.sqrt((110 - agent_x)**2 + (0 - agent_y)**2)
                role_reward += 2.0 / (1.0 + distance_to_goal / 70.0)
            
            elif i == 1:  # 守门员
                # 守门员应该在我方球门附近
                distance_to_goal = math.sqrt((agent_x + 110)**2 + agent_y**2)
                role_reward += 3.0 / (1.0 + distance_to_goal / 20.0)
                # 如果球在我方半场且守门员是最接近球的我方球员，给予额外奖励
                if ball_x < -50:
                    is_closest = True
                    for j in range(5):
                        if j != i:
                            other_distance = math.sqrt((field.self_robots[j].position.x - ball_x) ** 2 + 
                                                     (field.self_robots[j].position.y - ball_y) ** 2)
                            if other_distance < distance_to_ball:
                                is_closest = False
                                break
                    if is_closest:
                        role_reward += 2.0
            
            elif i == 2 or i == 4:  # 后卫
                # 后卫应该在我方半场
                if agent_x < 0:
                    role_reward += 1.0
                # 后卫应该在球和我方球门之间
                if ball_x > agent_x and agent_x > -90:
                    role_reward += 1.5
                # 如果球在我方半场，后卫应该靠近球
                if ball_x < 0 and distance_to_ball < 50:
                    role_reward += 2.0 / (1.0 + distance_to_ball / 30.0)
            
            elif i == 3:  # 中场
                # 中场应该根据球的位置灵活调整
                if (ball_x > 0 and agent_x > 0) or (ball_x < 0 and agent_x < 0):
                    role_reward += 1.0
                # 中场应该保持在球附近
                role_reward += 1.5 / (1.0 + distance_to_ball / 40.0)
            
            # 距离奖励：根据角色和球的位置调整
            distance_reward = 0
            if i == 0:  # 前锋
                # 前锋应该在对方半场时靠近球
                if ball_x > 0:
                    distance_reward = 2.5 / (1.0 + distance_to_ball / 40.0)
            elif i == 1:  # 守门员
                # 守门员应该在我方球门附近，但如果球接近我方球门，也应该靠近球
                if ball_x < -70:
                    distance_reward = 2.0 / (1.0 + distance_to_ball / 30.0)
            else:  # 其他球员
                # 根据球的位置调整距离奖励
                if (i == 2 or i == 4) and ball_x < 0:  # 后卫，球在我方半场
                    distance_reward = 2.0 / (1.0 + distance_to_ball / 40.0)
                elif i == 3:  # 中场
                    distance_reward = 1.5 / (1.0 + distance_to_ball / 50.0)
            
            # 避免球员聚集的惩罚
            crowding_penalty = 0
            for j in range(5):
                if j != i:
                    other_x = field.self_robots[j].position.x
                    other_y = field.self_robots[j].position.y
                    distance_to_teammate = math.sqrt((agent_x - other_x)**2 + (agent_y - other_y)**2)
                    if distance_to_teammate < 15:  # 如果两个球员太近
                        crowding_penalty -= 0.5
            
            # 综合奖励
            rewards[i] = base_reward + goal_reward + concede_penalty + \
                         goal_proximity_reward * 0.5 + team_formation_reward * 0.3 + \
                         role_reward + distance_reward + crowding_penalty
            
            # 应用角色特定的奖励权重
            role_weights = self.role_params.get(i, {'reward_weights': {}})['reward_weights']
            for key, weight in role_weights.items():
                if key == 'goal' and goal_reward > 0:
                    rewards[i] += goal_reward * (weight - 1)  # 额外的权重
                elif key == 'goal_defense' and concede_penalty < 0:
                    rewards[i] += concede_penalty * (weight - 1)  # 额外的权重
                elif key == 'ball_distance' and distance_reward > 0:
                    rewards[i] += distance_reward * (weight - 1)  # 额外的权重
                elif key == 'position' and role_reward > 0:
                    rewards[i] += role_reward * (weight - 1)  # 额外的权重
        
        return rewards
    
    def learn(self):
        """从经验回放缓冲区中学习，优化学习算法"""
        self.step_counter += 1
        
        # 动态调整探索率
        if self.epsilon > self.epsilon_min:
            # 基础衰减
            decay_rate = self.epsilon_decay
            
            # 根据成功次数调整衰减率
            if self.success_counter > 0:
                # 如果有成功经验，加速衰减
                decay_rate = max(self.epsilon_decay, self.epsilon_decay * (1 + self.success_counter * 0.01))
                # 重置成功计数器
                self.success_counter = max(0, self.success_counter - 1)
            
            self.epsilon *= decay_rate
        
        # 对每个智能体进行学习
        for agent_idx in range(self.num_agents):
            # 如果经验不足，跳过学习
            if len(self.memory[agent_idx]) < self.batch_size:
                continue
            
            # 从经验回放缓冲区中随机采样
            minibatch = random.sample(self.memory[agent_idx], self.batch_size)
            
            # 动态调整学习率
            effective_lr = self.learning_rate
            if agent_idx == 1:  # 守门员使用较低的学习率
                effective_lr *= 0.8
            
            for state, action, reward, next_state, done in minibatch:
                state_key = self._state_to_key(state)
                next_state_key = self._state_to_key(next_state)
                
                # 初始化状态的Q值（如果不存在）
                if state_key not in self.q_tables[agent_idx]:
                    self.q_tables[agent_idx][state_key] = np.zeros(self.action_dim)
                if next_state_key not in self.q_tables[agent_idx]:
                    self.q_tables[agent_idx][next_state_key] = np.zeros(self.action_dim)
                
                # 计算目标Q值
                if done:
                    target = reward
                else:
                    # 使用双Q学习方法减少过估计
                    # 1. 找出下一个状态中Q值最大的动作
                    next_action = np.argmax(self.q_tables[agent_idx][next_state_key])
                    # 2. 使用该动作的Q值计算目标
                    target = reward + self.gamma * self.q_tables[agent_idx][next_state_key][next_action]
                
                # 更新Q值，使用动态学习率
                # 对于高奖励的经验，使用更高的学习率
                if abs(reward) > 5.0:
                    effective_lr = min(1.0, self.learning_rate * 1.5)
                
                # 使用Bellman方程更新Q值
                self.q_tables[agent_idx][state_key][action] += effective_lr * (target - self.q_tables[agent_idx][state_key][action])
                
                # 经验优先级更新：对于有高TD误差的经验，增加其在下次采样中的概率
                # 这里通过重复添加到经验池来实现
                td_error = abs(target - self.q_tables[agent_idx][state_key][action])
                if td_error > 1.0 and np.random.rand() < 0.3:  # 30%的概率重新添加高TD误差的经验
                    self.memory[agent_idx].append((state, action, reward, next_state, done))
        # 学习完成
    
    # 已经在前面定义了相关方法，不需要重复定义

# 球轨迹预测器类（优化版，移除TensorFlow依赖）
class BallTrajectoryPredictor:
    def __init__(self):
        # 球的历史位置记录
        self.history_positions = []
        # 场地参数
        self.field_width = 220  # 场地宽度
        self.field_height = 180  # 场地高度
        self.goal_width = 60  # 球门宽度
        # 物理参数
        self.friction_factor = 0.95  # 摩擦系数
        self.bounce_factor = 0.8  # 碰撞反弹系数
        # 预测参数
        self.max_prediction_steps = 15  # 最大预测步数
        self.prediction_accuracy = []  # 记录预测准确性
        self.last_prediction = None  # 上一次的预测结果
        self.is_trained = False  # 模型是否已训练
        # 动态参数
        self.prediction_weights = {  # 不同场景下的预测权重
            'normal': 1.0,  # 正常情况
            'high_speed': 1.2,  # 高速情况
            'near_wall': 0.8,  # 接近边界
            'near_goal': 1.5   # 接近球门
        }
        
    def update_history(self, position):
        """更新球的历史位置并评估预测准确性"""
        current_pos = (position.x, position.y)
        
        # 如果有上一次预测，评估其准确性
        if self.last_prediction and len(self.history_positions) > 0:
            # 上一次的位置
            prev_pos = self.history_positions[-1]
            # 实际移动向量
            actual_dx = current_pos[0] - prev_pos[0]
            actual_dy = current_pos[1] - prev_pos[1]
            # 预测移动向量
            pred_dx = self.last_prediction[0] - prev_pos[0]
            pred_dy = self.last_prediction[1] - prev_pos[1]
            # 计算预测误差
            error = math.sqrt((pred_dx - actual_dx)**2 + (pred_dy - actual_dy)**2)
            self.prediction_accuracy.append(error)
            # 保持最近10次预测的准确性记录
            if len(self.prediction_accuracy) > 10:
                self.prediction_accuracy.pop(0)
        
        # 更新历史位置
        self.history_positions.append(current_pos)
        # 保持历史记录在合理范围内
        if len(self.history_positions) > 100:
            self.history_positions = self.history_positions[-100:]
    
    def collect_data(self, current_position, history_positions):
        """简化版收集数据函数，不执行任何操作"""
        pass
    
    def save_data(self):
        """简化版保存数据函数，不执行任何操作"""
        pass
    
    def load_data(self):
        """简化版加载数据函数，不执行任何操作"""
        return False
    
    def train(self):
        """简化版训练函数，不执行任何操作"""
        return False
    
    def load_model(self):
        """简化版加载模型函数，不执行任何操作"""
        return False
    
    def predict(self, current_position, history_positions):
        """预测球的未来位置（使用增强的线性预测）"""
        result = self.enhanced_linear_predict(current_position, history_positions)
        self.last_prediction = result
        return result
    
    def enhanced_linear_predict(self, current_position, history_positions):
        """增强的线性预测，考虑边界碰撞和摩擦"""
        if len(history_positions) < 2:
            return current_position.x, current_position.y
        
        # 使用最近两帧计算速度
        prev_pos = history_positions[-1]
        velocity_x = current_position.x - prev_pos[0] if isinstance(prev_pos, tuple) else current_position.x - prev_pos.x
        velocity_y = current_position.y - prev_pos[1] if isinstance(prev_pos, tuple) else current_position.y - prev_pos.y
        
        # 计算球速
        ball_speed = math.sqrt(velocity_x**2 + velocity_y**2)
        
        # 确定预测场景
        scenario = self._determine_scenario(current_position, ball_speed)
        
        # 获取对应场景的预测权重
        weight = self.prediction_weights.get(scenario, 1.0)
        
        # 动态预测因子，根据球速和场景调整
        base_factor = min(8, max(5, 6 + ball_speed * 0.1))
        prediction_factor = base_factor * weight
        
        # 初始预测位置（不考虑碰撞）
        future_x = current_position.x + velocity_x * prediction_factor
        future_y = current_position.y + velocity_y * prediction_factor
        
        # 考虑边界碰撞
        future_x, future_y = self._handle_boundary_collision(current_position.x, current_position.y, 
                                                           velocity_x, velocity_y, prediction_factor)
        
        return future_x, future_y
    
    def _determine_scenario(self, position, ball_speed):
        """确定当前球的预测场景"""
        # 场地边界
        half_width = self.field_width / 2
        half_height = self.field_height / 2
        half_goal = self.goal_width / 2
        
        # 检查是否接近边界
        near_x_boundary = abs(position.x) > half_width - 20
        near_y_boundary = abs(position.y) > half_height - 20
        
        # 检查是否接近球门
        near_goal = (abs(position.x) > half_width - 30) and (abs(position.y) < half_goal + 10)
        
        # 根据情况返回场景
        if near_goal:
            return 'near_goal'
        elif near_x_boundary or near_y_boundary:
            return 'near_wall'
        elif ball_speed > 10:
            return 'high_speed'
        else:
            return 'normal'
    
    def _handle_boundary_collision(self, x, y, vx, vy, steps):
        """处理边界碰撞，返回考虑碰撞后的预测位置"""
        # 场地边界
        half_width = self.field_width / 2
        half_height = self.field_height / 2
        half_goal = self.goal_width / 2
        
        # 模拟球的运动，检查碰撞
        for _ in range(int(steps)):
            # 应用摩擦
            vx *= self.friction_factor
            vy *= self.friction_factor
            
            # 更新位置
            next_x = x + vx
            next_y = y + vy
            
            # 检查x方向边界（考虑球门）
            if next_x > half_width:
                if abs(next_y) < half_goal:  # 球门区域
                    # 进球，不反弹
                    pass
                else:  # 非球门区域，反弹
                    next_x = 2 * half_width - next_x
                    vx = -vx * self.bounce_factor
            elif next_x < -half_width:
                if abs(next_y) < half_goal:  # 球门区域
                    # 进球，不反弹
                    pass
                else:  # 非球门区域，反弹
                    next_x = -2 * half_width - next_x
                    vx = -vx * self.bounce_factor
            
            # 检查y方向边界
            if next_y > half_height:
                next_y = 2 * half_height - next_y
                vy = -vy * self.bounce_factor
            elif next_y < -half_height:
                next_y = -2 * half_height - next_y
                vy = -vy * self.bounce_factor
            
            # 更新位置
            x, y = next_x, next_y
        
        return x, y
    
    def predict_multiple_steps(self, current_position, history_positions, num_steps=5):
        """预测多个时间步的球位置"""
        if len(history_positions) < 2:
            return [(current_position.x, current_position.y)] * num_steps
        
        # 使用最近两帧计算速度
        prev_pos = history_positions[-1]
        velocity_x = current_position.x - prev_pos[0] if isinstance(prev_pos, tuple) else current_position.x - prev_pos.x
        velocity_y = current_position.y - prev_pos[1] if isinstance(prev_pos, tuple) else current_position.y - prev_pos.y
        
        # 预测多个步骤
        predictions = []
        x, y = current_position.x, current_position.y
        vx, vy = velocity_x, velocity_y
        
        for _ in range(num_steps):
            # 应用摩擦
            vx *= self.friction_factor
            vy *= self.friction_factor
            
            # 更新位置
            x += vx
            y += vy
            
            # 处理边界碰撞
            x, y = self._check_boundary(x, y, vx, vy)
            
            predictions.append((x, y))
        
        return predictions
    
    def _check_boundary(self, x, y, vx, vy):
        """检查是否超出边界，如果是则调整位置"""
        # 场地边界
        half_width = self.field_width / 2
        half_height = self.field_height / 2
        half_goal = self.goal_width / 2
        
        # 检查x方向边界（考虑球门）
        if x > half_width:
            if abs(y) < half_goal:  # 球门区域
                # 进球，不反弹
                pass
            else:  # 非球门区域，反弹
                x = 2 * half_width - x
        elif x < -half_width:
            if abs(y) < half_goal:  # 球门区域
                # 进球，不反弹
                pass
            else:  # 非球门区域，反弹
                x = -2 * half_width - x
        
        # 检查y方向边界
        if y > half_height:
            y = 2 * half_height - y
        elif y < -half_height:
            y = -2 * half_height - y
        
        return x, y
    
    def get_interception_point(self, robot_pos, robot_speed):
        """计算机器人最佳拦截点"""
        if len(self.history_positions) < 2:
            return robot_pos.x, robot_pos.y
        
        # 预测多个时间步的球位置
        future_positions = self.predict_multiple_steps(Vector2(self.history_positions[-1][0], self.history_positions[-1][1]), 
                                                     self.history_positions[:-1], 10)
        
        # 计算机器人到每个预测点的到达时间
        best_point = None
        min_time_diff = float('inf')
        
        for i, (ball_x, ball_y) in enumerate(future_positions):
            # 计算机器人到该点的距离
            distance = math.sqrt((ball_x - robot_pos.x)**2 + (ball_y - robot_pos.y)**2)
            # 估计机器人到达该点所需的时间（步数）
            robot_time = distance / max(1.0, robot_speed)
            # 球到达该点的时间（步数）
            ball_time = i + 1
            
            # 计算时间差
            time_diff = abs(robot_time - ball_time)
            
            # 如果这个点比之前找到的点更适合拦截，更新最佳拦截点
            if time_diff < min_time_diff:
                min_time_diff = time_diff
                best_point = (ball_x, ball_y)
        
        # 如果找不到合适的拦截点，返回球的当前位置
        if best_point is None:
            return self.history_positions[-1][0], self.history_positions[-1][1]
        
        return best_point
    
    def linear_predict(self, current_position, history_positions):
        """简单的线性预测作为备选"""
        # 调用增强的线性预测
        return self.enhanced_linear_predict(current_position, history_positions)

# 初始化球轨迹预测器
ball_predictor = BallTrajectoryPredictor()

# 初始化多智能体强化学习模型
marl_model = MultiAgentRL()

# 定义训练周期（每隔多少帧训练一次模型）
TRAIN_INTERVAL = 100

# 收集球轨迹数据用于训练神经网络
def collect_ball_trajectory_data(current_position, history_positions):
    """收集球轨迹数据用于训练神经网络"""
    ball_predictor.update_history(current_position)
    ball_predictor.collect_data(current_position, history_positions)
    
# 计算球的最佳拦截点
def calculate_interception_point(robot_pos, robot_speed):
    """计算机器人最佳拦截点
    
    Args:
        robot_pos: 机器人当前位置
        robot_speed: 机器人当前速度估计值
        
    Returns:
        tuple: 最佳拦截点坐标 (x, y)
    """
    return ball_predictor.get_interception_point(robot_pos, robot_speed)

# 使用增强版预测球轨迹
def predict_ball_trajectory_nn(current_position, history_positions):
    """使用增强版预测球轨迹"""
    # 使用增强的线性预测方法
    return ball_predictor.enhanced_linear_predict(current_position, history_positions)

# 训练球轨迹模型（简化版，不执行实际训练）
def train_ball_trajectory_model(predictor):
    """训练球轨迹模型（简化版，不执行实际训练）"""
    # 简化版不需要实际训练，直接返回False
    return False

baseRobots = []# 定义我方机器人数组
oppRobots = []# 定义对方机器人数组
data_loader = DataLoader()
race_state = -1  # 定位球状态
race_state_trigger = -1    # 触发方
num_goalkepper = -1         #对方守门员编号 1代表有守门员  其余为有
penalty = 1               #点球的决策树 目前有三种
flag_penalty = -1                 #如果为1 则换点球策略
defend_flag = 0           #0为正常防守  1为点球防守

goal_time = 0
time = 0                  #统计门球造犯规失败次数
goal = 1                  #门球的决策树 目前有两种
flag_goal = 0             #如果为1 则换门球策略
last_race_state = -1      #上一次比赛情况
last_race_state_trigger = -1  #上一次触发方
guard_goal = 1            #门球防守的决策树  防止被对方造犯规

last_futuerballx = 0
last_futuerbally = 0
tickBeginPenalty = 0
tickBeginGoalKick = 0
tickBeginPlaceKick = 0
lastBallx = -110 + 37.5
lastBally = 0
BallPos = [Vector2(0, 0)] * 100000
# 对方机器人历史位置记录，用于预测
OppRobotPos = [[Vector2(0, 0)] * 10 for _ in range(5)]
# 对方机器人预测位置
OppRobotPredictPos = [Vector2(0, 0)] * 5
# 对方机器人威胁度评分
OppRobotThreat = [0] * 5
resetHistoryRecord = False
newMatch = False

# 打印比赛状态，详细请对比v5rpc.py
@unbox_event
def on_event(event_type: int, args: EventArguments):
    event = {
        0: lambda: print(args.judge_result.reason),
        1: lambda: print("Match Start"),
        2: lambda: print("Match Stop"),
        3: lambda: print("First Half Start"),
        4: lambda: print("Second Half Start"),
        5: lambda: print("Overtime Start"),
        6: lambda: print("Penalty Shootout Start"),
        7: lambda: print("MatchShootOutStart"),
        8: lambda: print("MatchBlockStart")
    }
    global race_state_trigger
    global race_state
    if event_type == 0:
        race_state = args.judge_result.type
        race_state_trigger = args.judge_result.offensive_team
        if race_state == JudgeResultEvent.ResultType.PlaceKick:
            print("Place Kick")
        elif race_state == JudgeResultEvent.ResultType.PenaltyKick:
            print("Penalty Kick")
        elif race_state == JudgeResultEvent.ResultType.GoalKick:
            print("Goal Kick")
        elif (race_state == JudgeResultEvent.ResultType.FreeKickLeftBot
              or race_state == JudgeResultEvent.ResultType.FreeKickRightBot
              or race_state == JudgeResultEvent.ResultType.FreeKickLeftTop
              or race_state == JudgeResultEvent.ResultType.FreeKickRightTop):
            print("Free Kick")

        actor = {
            Team.Self: lambda: print("By Self"),
            Team.Opponent: lambda: print("By Opp"),
            Team.Nobody: lambda: print("By Nobody"),
        }
        actor[race_state_trigger]()
    elif event_type == 6:
        race_state = 7 #点球大战

    event[event_type]()


@unbox_int
def get_team_info(server_version: int) -> str:
    version = {
        0: "V1.0",
        1: "V1.1"
    }
    print(f'server rpc version: {version.get(server_version, "V1.0")}')
    global newMatch
    newMatch = True
    return '闲鱼买家'# 在此行修改双引号中的字符串为自己的队伍名

# 守门员函数
def goalkeeper(field):
    global football_now_y, football_now_x
    global futureBally, futureBallx
    global defend_flag                   #点球防守flag
    global OppRobotPredictPos, OppRobotThreat

    football_now_x = field.ball.position.x
    football_now_y = field.ball.position.y 

    # 收集球轨迹数据用于训练神经网络
    collect_ball_trajectory_data(field.ball.position, BallPos[:GlobalVariable.tick])
    
    # 计算球的速度（在预测之前定义，避免变量作用域问题）
    # 添加边界检查，防止数组越界
    if GlobalVariable.tick > 0:
        ball_velocity_x = football_now_x - BallPos[GlobalVariable.tick - 1].x
        ball_velocity_y = football_now_y - BallPos[GlobalVariable.tick - 1].y
    else:
        ball_velocity_x = 0
        ball_velocity_y = 0
    
    # 使用神经网络预测球轨迹
    if ball_predictor.is_trained and GlobalVariable.tick > 5:
        # 如果神经网络模型已训练且有足够的历史数据，使用神经网络预测
        futureBallx, futureBally = predict_ball_trajectory_nn(field.ball.position, BallPos[:GlobalVariable.tick])
    else:
        # 否则使用改进的线性预测作为备选
        # Enhanced prediction factor based on ball speed
        prediction_factor = min(12, 8 + 4 * math.sqrt(ball_velocity_x**2 + ball_velocity_y**2) / 10)
        
        # 添加边界检查，防止数组越界
        if GlobalVariable.tick > 0:
            futureBallx = prediction_factor * football_now_x - (prediction_factor - 1) * BallPos[GlobalVariable.tick - 1].x
            futureBally = prediction_factor * football_now_y - (prediction_factor - 1) * BallPos[GlobalVariable.tick - 1].y
        else:
            futureBallx = football_now_x
            futureBally = football_now_y

    # Calculate ball speed for adaptive positioning
    ball_speed = math.sqrt(ball_velocity_x**2 + ball_velocity_y**2)
    
    # Determine if ball is moving toward our goal
    moving_toward_goal = ball_velocity_x > 0.5 and football_now_x > 0
    
    # Adjust goalkeeper position limits based on ball's speed and direction
    goalie_y_limit = min(22, 18 + ball_speed/2) if moving_toward_goal else 18
    
    # 检查是否有对方球员接近球门或有射门威胁
    shooting_threat = False
    shooting_threat_y = 0
    highest_threat = 0
    threat_index = -1
    
    # 找出威胁最大的对方球员
    for i in range(5):
        # 检查是否有射门威胁
        if OppRobotThreat[i] > highest_threat and oppRobots[i].get_pos().x > 50:
            highest_threat = OppRobotThreat[i]
            threat_index = i
    
    # 如果有高威胁球员，预测其射门路线
    if threat_index >= 0 and highest_threat > 60:
        # 获取威胁球员当前位置和预测位置
        threat_x = oppRobots[threat_index].get_pos().x
        threat_y = oppRobots[threat_index].get_pos().y
        pred_x = OppRobotPredictPos[threat_index].x
        pred_y = OppRobotPredictPos[threat_index].y
        
        # 计算移动向量
        move_x = pred_x - threat_x
        move_y = pred_y - threat_y
        
        # 如果球员朝我方球门移动
        if move_x > 0 and threat_x > 50:
            # 计算射门路线与球门线的交点
            if abs(move_x) > 0.001:  # 避免除以零
                # 计算斜率
                slope = move_y / move_x
                # 计算球门线上的交点 (球门线x坐标为110)
                intercept_y = threat_y + slope * (110 - threat_x)
                
                # 如果交点在球门范围内，认为有射门威胁
                if abs(intercept_y) < 30:  # 球门宽度
                    shooting_threat = True
                    shooting_threat_y = intercept_y

    if np.fabs(futureBally) <= 25: #球在球门y轴范围内
        if futureBallx < 0:  #球还在对方半场
            #  baseRobots[0].moveto_dis(GlobalVariable.goalkepper_X, 0)
            if football_now_y*0.8 + futureBally*0.2 >= goalie_y_limit:
                baseRobots[1].moveto_dis(GlobalVariable.goalkepper_X, goalie_y_limit)
            elif football_now_y*0.8 + futureBally*0.2 <= -goalie_y_limit:
                baseRobots[1].moveto_dis(GlobalVariable.goalkepper_X, -goalie_y_limit)
            else:
                # More responsive goalkeeper positioning
                baseRobots[1].moveto(GlobalVariable.goalkepper_X, football_now_y*0.7 + futureBally*0.3)  
        else:   #球在我们半场 
                if defend_flag == 1:  #点球防守（不主动出去）
                    # 改进的点球防守定位
                    goalie_x = GlobalVariable.goalkepper_X
                    # 当球接近时稍微前移
                    if futureBallx > 100 and abs(futureBally) < 15:
                        goalie_x = min(GlobalVariable.goalkepper_X + 2, 109)
                    
                    # 使用拦截点计算功能优化守门员位置
                    robot_pos = baseRobots[1].get_pos()
                    robot_speed = 8  # 守门员速度估计
                    intercept_x, intercept_y = calculate_interception_point(robot_pos, robot_speed)
                    
                    # 限制守门员在球门线附近
                    intercept_x = GlobalVariable.goalkepper_X
                    
                    # 结合拦截点和预测位置
                    target_y = (intercept_y * 0.6 + futureBally * 0.4)  # 加权平均
                    # 限制在球门范围内
                    target_y = max(min(target_y, 25), -25)
                    
                    baseRobots[1].moveto(goalie_x, target_y)
                elif shooting_threat:  # 如果检测到射门威胁
                    # 优先防守射门路线
                    baseRobots[1].moveto(GlobalVariable.goalkepper_X, shooting_threat_y * 0.8)  # 稍微偏向中心
                else:                   #正常防守
                    if futureBallx <= 65:    #球还未到球门区
                        if futureBally >= goalie_y_limit:
                            baseRobots[1].moveto_dis(GlobalVariable.goalkepper_X, goalie_y_limit - 2)
                        elif futureBally <= -goalie_y_limit:
                            baseRobots[1].moveto_dis(GlobalVariable.goalkepper_X, -goalie_y_limit + 2)
                        else:
                            baseRobots[1].moveto_dis(GlobalVariable.goalkepper_X, futureBally)
                    else:                    #x轴上球到球门区了           
                        num_in_goal = 0  
                        for i in range(0,5): #判断有没有对方球员进入我方守门区
                            if oppRobots[i].get_pos().x > 20 and np.fabs(oppRobots[i].get_pos().y) < 25 and oppRobots[i].get_pos().x-football_now_x<5: #如果有  
                                num_in_goal = 1 + num_in_goal
                            
                        if num_in_goal != 0:    #对方有球员进入守门区
                            # More aggressive ball interception when opponents are in the goal area
                            baseRobots[1].moveto(futureBallx - 1, futureBally)
                        else:
                            if futureBally >= goalie_y_limit:
                                baseRobots[1].moveto_dis(GlobalVariable.goalkepper_X, goalie_y_limit)
                            elif futureBally <= -goalie_y_limit:
                                baseRobots[1].moveto_dis(GlobalVariable.goalkepper_X, -goalie_y_limit)
                            else:
                                # Improved ball tracking with weighted prediction                        
                                baseRobots[1].moveto(GlobalVariable.goalkepper_X, futureBally*0.6 + football_now_y*0.4)                                      
    else:       #不在球门y轴范围
            if futureBallx <= 0:  #球在对方半场
                if futureBally > 20:
                    baseRobots[1].moveto_dis(GlobalVariable.goalkepper_X, min(18, futureBally * 0.4))             
                elif futureBally < -20:
                    baseRobots[1].moveto_dis(GlobalVariable.goalkepper_X, max(-18, futureBally * 0.4)) 
                else:
                    baseRobots[1].moveto_dis(GlobalVariable.goalkepper_X, futureBally * 0.4)   
            elif futureBallx <= 60:
                    # More central positioning when ball is in midfield
                    if futureBally >= 30:
                        baseRobots[1].moveto_dis(GlobalVariable.goalkepper_X, 15)    
                    elif futureBally <= -30:  
                        baseRobots[1].moveto_dis(GlobalVariable.goalkepper_X, -15)
                    else:  #球门范围内
                        baseRobots[1].moveto_dis(GlobalVariable.goalkepper_X, futureBally * 0.5)  
            elif futureBallx <= 95:          
                    if futureBally >= 20:
                        baseRobots[1].moveto_dis(GlobalVariable.goalkepper_X, 18)    
                    elif futureBally <= -20:  
                        baseRobots[1].moveto_dis(GlobalVariable.goalkepper_X, -18)
                    else:  #球门范围内
                        baseRobots[1].moveto(GlobalVariable.goalkepper_X, futureBally)                                                           
            else:  
                    # More aggressive ball interception for close threats
                    if abs(futureBally) < 25 and futureBallx > 100:
                        baseRobots[1].moveto(futureBallx - 1, futureBally)
                    else:
                        baseRobots[1].moveto(GlobalVariable.goalkepper_X, futureBally)

# 基于威胁度的防守策略，结合强化学习模型
def improved_defense_strategy(field):
    global OppRobotThreat, OppRobotPredictPos
    global football_now_x, football_now_y, futureBallx, futureBally
    
    # 找出威胁度最高的对方球员
    max_threat = 0
    threat_index = -1
    for i in range(5):
        if OppRobotThreat[i] > max_threat:
            max_threat = OppRobotThreat[i]
            threat_index = i
    
    # 如果找到威胁球员，安排防守
    if threat_index >= 0 and max_threat > 50:  # 威胁度阈值
        # 计算威胁球员到球门的路径
        threat_x = oppRobots[threat_index].get_pos().x
        threat_y = oppRobots[threat_index].get_pos().y
        predicted_x = OppRobotPredictPos[threat_index].x
        predicted_y = OppRobotPredictPos[threat_index].y
        
        # 计算威胁球员的移动方向
        direction_x = predicted_x - threat_x
        direction_y = predicted_y - threat_y
        
        # 根据威胁球员的移动方向动态调整拦截点
        # 如果威胁球员朝我方球门移动，提前拦截
        if direction_x > 0:  # 朝我方球门移动
            intercept_factor = 1.5  # 更积极的拦截
        else:
            intercept_factor = 1.0
            
        # 计算拦截点（在威胁球员和球门之间）
        intercept_x = threat_x - 10 * intercept_factor  # 在威胁球员前方
        intercept_y = threat_y + direction_y * intercept_factor  # 考虑Y方向移动
        
        # 如果威胁球员接近球门，让4号机器人去拦截
        if threat_x > 50:
            baseRobots[4].moveto(intercept_x, intercept_y)
            
            # 让2号和3号机器人形成第二防线，位置更加动态
            # 根据威胁球员的位置调整防线位置
            defense_line_x = 70
            if threat_x > 80:
                defense_line_x = 85  # 如果威胁更近，防线后撤
            
            # 调整防线的宽度，根据威胁球员的Y位置
            defense_width = 30
            if abs(threat_y) < 20:
                defense_width = 20  # 如果威胁在中路，防线收紧
            
            baseRobots[2].move_in_still_x(defense_line_x, defense_width)
            baseRobots[3].move_in_still_x(defense_line_x, -defense_width)
            
            return True  # 返回True表示已经执行了威胁防守
    
    # 检查是否有对方球员接近球
    ball_threat_index = -1
    min_distance_to_ball = float('inf')
    for i in range(5):
        distance = math.sqrt((football_now_x - oppRobots[i].get_pos().x)**2 + 
                            (football_now_y - oppRobots[i].get_pos().y)**2)
        if distance < min_distance_to_ball:
            min_distance_to_ball = distance
            ball_threat_index = i
    
    # 如果有对方球员接近球，预测传球路线并拦截
    if ball_threat_index >= 0 and min_distance_to_ball < 15:
        # 找出除了最接近球的球员外，威胁度最高的对方球员（可能的传球目标）
        second_threat = -1
        second_max_threat = 0
        for i in range(5):
            if i != ball_threat_index and OppRobotThreat[i] > second_max_threat:
                second_max_threat = OppRobotThreat[i]
                second_threat = i
        
        if second_threat >= 0:
            # 计算可能的传球路线
            ball_threat_pos = oppRobots[ball_threat_index].get_pos()
            second_threat_pos = oppRobots[second_threat].get_pos()
            
            # 计算传球方向向量
            pass_dir_x = second_threat_pos.x - ball_threat_pos.x
            pass_dir_y = second_threat_pos.y - ball_threat_pos.y
            pass_dist = math.sqrt(pass_dir_x**2 + pass_dir_y**2)
            
            if pass_dist > 0.001:  # 避免除以零
                # 归一化方向向量
                pass_dir_x /= pass_dist
                pass_dir_y /= pass_dist
                
                # 计算拦截点，位于传球路线的1/3处（更靠近持球者）
                intercept_factor = pass_dist / 3
                pass_x = ball_threat_pos.x + pass_dir_x * intercept_factor
                pass_y = ball_threat_pos.y + pass_dir_y * intercept_factor
                
                # 安排一个机器人去拦截传球路线
                baseRobots[3].moveto(pass_x, pass_y)
                
                # 安排另一个机器人去盯防可能的接球者
                baseRobots[2].moveto(second_threat_pos.x - 5, second_threat_pos.y)
                
                return True  # 返回True表示已经执行了传球拦截
    
    # 如果没有明显威胁，使用强化学习模型的建议进行防守布局
    # 这部分在get_instruction函数中已经实现
    
    return False  # 返回False表示没有执行特殊防守策略

# 策略行为主函数，可将以下函数用策略模式封装
def strategy_common(field):      #正常的时候 
    # 最基本最常规情况下的执行策略
    global football_now_x,football_now_y
    global marl_model  # 引用强化学习模型
    global prev_state  # 用于存储前一帧的状态

    football_now_x= field.ball.position.x
    football_now_y = field.ball.position.y    
    # 预测足球位置
    global futureBallx,futureBally
    
    # 更新球的历史位置
    ball_predictor.update_history(field.ball.position)
    
    # 使用线性预测球轨迹
    if GlobalVariable.tick > 0:
        # 使用线性预测
        futureBallx, futureBally = predict_ball_trajectory_nn(field.ball.position, BallPos[:GlobalVariable.tick])
    else:
        # 如果没有历史数据，使用当前位置
        futureBallx, futureBally = football_now_x, football_now_y
        
    # 获取当前状态，用于强化学习模型
    current_state = marl_model.get_state(field)
    
    # 初始化prev_state（如果是第一帧）
    global prev_state
    if 'prev_state' not in globals() or prev_state is None:
        prev_state = current_state
    
    # 在策略执行前，预先计算强化学习建议的位置
    # 这样可以在保证接口不变的情况下调整球员位置
    if GlobalVariable.tick > 100:  # 给模型一些时间学习
        # 使用强化学习模型为非守门员球员选择动作
        rl_actions = []
        rl_target_positions = []
        
        for i in range(5):
            if i != 1:  # 不调整守门员
                # 选择动作
                action = marl_model.select_action(current_state, i)
                rl_actions.append(action)
                
                # 将动作转换为目标位置
                target_pos = marl_model.action_to_position(action, baseRobots[i].get_pos())
                rl_target_positions.append(target_pos)
                
                # 将强化学习建议的位置和动作保存到机器人对象中，供后续策略使用
                baseRobots[i].rl_target_pos = target_pos
                baseRobots[i].rl_action = action  # 保存动作，用于经验存储
            else:
                rl_actions.append(0)  # 守门员占位
                rl_target_positions.append((0, 0))  # 守门员占位
                baseRobots[i].rl_target_pos = None  # 守门员不使用强化学习
                baseRobots[i].rl_action = None  # 守门员不使用强化学习

    # print ('dis',baseRobots[1].PredictInformation[GlobalVariable.tick_delay].position.x,'tick',GlobalVariable.tick)
    # baseRobots[1].moveto_dis (110,20)
    #1号一直作为防守人员   2号控场  0号主要进攻
    #********************************防守大策略***********************************************************                
    if football_now_x > 5:
        # 尝试使用改进的基于威胁度的防守策略
        defense_handled = improved_defense_strategy(field)
        
        # 如果改进的防守策略没有处理，则使用原来的策略结合强化学习模型
        if not defense_handled:
            # 计算球的速度和方向
            ball_vx = football_now_x - (BallPos[GlobalVariable.tick - 1].x if GlobalVariable.tick > 0 else football_now_x)
            ball_vy = football_now_y - (BallPos[GlobalVariable.tick - 1].y if GlobalVariable.tick > 0 else football_now_y)
            ball_speed = math.sqrt(ball_vx**2 + ball_vy**2)
            
            # 0号主攻
            # 使用预先计算好的强化学习建议位置
            rl_target_pos_0 = baseRobots[0].rl_target_pos if hasattr(baseRobots[0], 'rl_target_pos') and baseRobots[0].rl_target_pos else (0, 0)
            
            # 计算原有策略位置
            if (futureBallx > 80 or np.fabs(futureBally) < 40) or (football_now_x > 80 or np.fabs(football_now_y) < 40): #防止进入守门区
                original_x_0 = 65
                original_y_0 = futureBally*0.8
                # 结合强化学习建议和原有策略
                # 如果强化学习建议的位置在安全区域内，采纳强化学习建议
                if rl_target_pos_0[0] < 80 and np.fabs(rl_target_pos_0[1]) >= 40:
                    baseRobots[0].moveto(rl_target_pos_0[0], rl_target_pos_0[1])
                else:
                    baseRobots[0].moveto_dis(original_x_0, original_y_0)
            else:
                if futureBallx > baseRobots[1].get_pos().x:
                    # 在突破时考虑强化学习建议的方向
                    # 计算强化学习建议的方向向量
                    robot0_pos = baseRobots[0].get_pos()
                    rl_dir_x = rl_target_pos_0[0] - robot0_pos.x
                    rl_dir_y = rl_target_pos_0[1] - robot0_pos.y
                    rl_dist = math.sqrt(rl_dir_x**2 + rl_dir_y**2)
                    
                    # 如果强化学习建议明确且距离足够大，调整突破方向
                    if rl_dist > 10:
                        # 归一化方向向量
                        rl_dir_x /= rl_dist
                        rl_dir_y /= rl_dist
                        
                        # 调整突破目标点，结合原有目标和强化学习建议
                        adjusted_x = futureBallx + 1 + rl_dir_x * 5
                        adjusted_y = futureBally + rl_dir_y * 5
                        baseRobots[0].breakthrough(football_now_x, football_now_y, adjusted_x, adjusted_y)
                    else:
                        baseRobots[0].breakthrough(football_now_x, football_now_y, futureBallx+1, futureBally)
                else:
                    # 在射门时考虑强化学习建议的方向
                    # 计算强化学习建议的距离
                    robot0_pos = baseRobots[0].get_pos()
                    rl_dist = math.sqrt((rl_target_pos_0[0] - robot0_pos.x)**2 + (rl_target_pos_0[1] - robot0_pos.y)**2)
                    
                    if ball_speed > 1.0 and rl_dist > 10:  # 如果球速较快且强化学习建议明确
                        baseRobots[0].shoot(rl_target_pos_0[0], rl_target_pos_0[1], 10)
                    else:
                        baseRobots[0].shoot(futureBallx, futureBally, 10)
            # 0号主攻结束

            # 3号机器人
            # 使用预先计算好的强化学习建议位置
            rl_target_pos_3 = baseRobots[3].rl_target_pos if hasattr(baseRobots[3], 'rl_target_pos') and baseRobots[3].rl_target_pos else (0, 0)
            
            # 计算原有策略位置
            if (futureBallx > 70.5 or np.fabs(futureBally) < 40) or (football_now_x > 70 or np.fabs(football_now_y) < 40): #防止进入守门区
                original_x_3 = 60
                original_y_3 = -70
                # 结合强化学习建议和原有策略
                # 如果强化学习建议的位置在安全区域内，采纳强化学习建议
                if rl_target_pos_3[0] < 70 and np.fabs(rl_target_pos_3[1]) >= 40:
                    baseRobots[3].moveto(rl_target_pos_3[0], rl_target_pos_3[1])
                else:
                    baseRobots[3].moveto_dis(original_x_3, original_y_3)
            else:
                if baseRobots[0].get_pos().x > football_now_x and football_now_x > baseRobots[3].get_pos().x: #3号机器人挡住了0号主攻的路线就避开
                    # 结合强化学习建议调整避让位置
                    robot3_pos = baseRobots[3].get_pos()
                    rl_dist_3 = math.sqrt((rl_target_pos_3[0] - robot3_pos.x)**2 + (rl_target_pos_3[1] - robot3_pos.y)**2)
                    
                    if rl_dist_3 > 15:  # 如果强化学习建议明确
                        baseRobots[3].moveto(rl_target_pos_3[0], rl_target_pos_3[1])
                    else:
                        baseRobots[3].moveto(futureBallx, -futureBally)
                else:
                    if football_now_x > baseRobots[3].get_pos().x:
                        # 在突破时考虑强化学习建议的方向
                        robot3_pos = baseRobots[3].get_pos()
                        rl_dir_x = rl_target_pos_3[0] - robot3_pos.x
                        rl_dir_y = rl_target_pos_3[1] - robot3_pos.y
                        rl_dist = math.sqrt(rl_dir_x**2 + rl_dir_y**2)
                        
                        if rl_dist > 10:  # 如果强化学习建议明确
                            # 归一化方向向量
                            rl_dir_x /= rl_dist
                            rl_dir_y /= rl_dist
                            
                            # 调整突破目标点
                            adjusted_x = futureBallx + 1 + rl_dir_x * 5
                            adjusted_y = futureBally + rl_dir_y * 5
                            baseRobots[3].breakthrough(football_now_x, football_now_y, adjusted_x, adjusted_y)
                        else:
                            baseRobots[3].breakthrough(football_now_x, football_now_y, futureBallx+1, futureBally)
                    else:
                        # 结合强化学习建议和原有策略
                        robot3_pos = baseRobots[3].get_pos()
                        rl_dist_3 = math.sqrt((rl_target_pos_3[0] - robot3_pos.x)**2 + (rl_target_pos_3[1] - robot3_pos.y)**2)
                        
                        if rl_dist_3 > 15:  # 如果强化学习建议明确
                            baseRobots[3].moveto(rl_target_pos_3[0], rl_target_pos_3[1])
                        else:
                            baseRobots[3].moveto(futureBallx, futureBally)
            # 3号机器人结束

            # 2号机器人
            # 使用预先计算好的强化学习建议位置
            rl_target_pos_2 = baseRobots[2].rl_target_pos if hasattr(baseRobots[2], 'rl_target_pos') and baseRobots[2].rl_target_pos else (0, 0)
            
            # 计算原有策略位置
            if (futureBallx > 70.5 or np.fabs(futureBally) < 40) or (football_now_x > 70 or np.fabs(football_now_y) < 40): #防止进入守门区
                original_x_2 = 60
                original_y_2 = 70
                # 结合强化学习建议和原有策略
                if rl_target_pos_2[0] < 70 and np.fabs(rl_target_pos_2[1]) >= 40:
                    baseRobots[2].moveto(rl_target_pos_2[0], rl_target_pos_2[1])
                else:
                    baseRobots[2].moveto_dis(original_x_2, original_y_2)
            else:
                if football_now_x > baseRobots[2].get_pos().x:
                    # 在突破时考虑强化学习建议的方向
                    robot2_pos = baseRobots[2].get_pos()
                    rl_dir_x = rl_target_pos_2[0] - robot2_pos.x
                    rl_dir_y = rl_target_pos_2[1] - robot2_pos.y
                    rl_dist = math.sqrt(rl_dir_x**2 + rl_dir_y**2)
                    
                    if rl_dist > 10:  # 如果强化学习建议明确
                        # 归一化方向向量
                        rl_dir_x /= rl_dist
                        rl_dir_y /= rl_dist
                        
                        # 调整突破目标点
                        adjusted_x = futureBallx + 1 + rl_dir_x * 5
                        adjusted_y = futureBally + rl_dir_y * 5
                        baseRobots[2].breakthrough(football_now_x, football_now_y, adjusted_x, adjusted_y)
                    else:
                        baseRobots[2].breakthrough(football_now_x, football_now_y, futureBallx+1, futureBally)
                else:
                    # 结合强化学习建议和原有策略
                    robot2_pos = baseRobots[2].get_pos()
                    rl_dist_2 = math.sqrt((rl_target_pos_2[0] - robot2_pos.x)**2 + (rl_target_pos_2[1] - robot2_pos.y)**2)
                    
                    if rl_dist_2 > 15:  # 如果强化学习建议明确
                        baseRobots[2].moveto(rl_target_pos_2[0], rl_target_pos_2[1])
                    else:
                        baseRobots[2].moveto(futureBallx, futureBally)
            # 4号机器人结束

            # 4号机器人
            # 使用预先计算好的强化学习建议位置
            rl_target_pos_4 = baseRobots[4].rl_target_pos if hasattr(baseRobots[4], 'rl_target_pos') and baseRobots[4].rl_target_pos else (0, 0)
            
            # 计算原有策略位置和强化学习建议的距离
            robot4_pos = baseRobots[4].get_pos()
            if futureBallx > robot4_pos.x:
                original_x_4 = 55
                original_y_4 = -futureBally
            else:
                original_x_4 = 55
                original_y_4 = football_now_y*0.8 + futureBally*0.2
            
            rl_dist_4 = math.sqrt((rl_target_pos_4[0] - original_x_4)**2 + (rl_target_pos_4[1] - original_y_4)**2)
            
            # 如果强化学习建议明确且在安全区域内，采纳强化学习建议
            if rl_dist_4 > 15 and rl_target_pos_4[0] < 80 and np.fabs(rl_target_pos_4[1]) >= 30:
                baseRobots[4].moveto(rl_target_pos_4[0], rl_target_pos_4[1])
            else:
                # 使用改进的拦截策略
                if futureBallx > robot4_pos.x:
                    # 计算最佳拦截点
                    robot_speed = 10  # 估计机器人速度
                    intercept_x, intercept_y = calculate_interception_point(robot4_pos, robot_speed)
                    
                    # 限制拦截点在合理范围内
                    intercept_x = min(intercept_x, 80)  # 不要太靠近对方球门
                    intercept_x = max(intercept_x, 30)  # 不要太靠近自己球门
                    
                    # 保持x坐标在55附近，但使用计算的y坐标进行拦截
                    baseRobots[4].move_in_still_x(55, intercept_y)
                else:
                    # 计算最佳拦截点
                    robot_speed = 10  # 估计机器人速度
                    intercept_x, intercept_y = calculate_interception_point(robot4_pos, robot_speed)
                    
                    # 结合原有策略和拦截点
                    target_y = (intercept_y + football_now_y*0.4 + futureBally*0.2) / 1.6  # 加权平均
                    baseRobots[4].move_in_still_x(55, target_y)  # 4号后卫起一个拦截作用
                    
                    dx = football_now_x - robot4_pos.x
                    dy = football_now_y - robot4_pos.y
                    distance = math.sqrt(dx * dx + dy * dy)        
                    if distance < 2 and dx > 0:
                        if football_now_y < 0:
                            baseRobots[4].set_wheel_velocity(125, -125)
                        else:
                            baseRobots[4].set_wheel_velocity(-125, 125)
            
            # 计算奖励并存储经验
            reward = marl_model.calculate_reward(field, current_state)
            
            # 定期从经验回放缓冲区学习
            if GlobalVariable.tick % 100 == 0:  # 每100帧学习一次
                marl_model.learn()
                
                # 每1000帧保存一次模型
                if GlobalVariable.tick % 1000 == 0:
                    marl_model.save_model()
        
        # 守门员专区
        goalkeeper(field)

    #********************************进攻大策略***********************************************************
    if football_now_x <= 5:
        # 强化学习建议位置已经存储在各个机器人对象的rl_target_pos属性中
        
        # 计算球的速度和方向
        ball_vx = football_now_x - (BallPos[GlobalVariable.tick - 1].x if GlobalVariable.tick > 0 else football_now_x)
        ball_vy = football_now_y - (BallPos[GlobalVariable.tick - 1].y if GlobalVariable.tick > 0 else football_now_y)
        ball_speed = math.sqrt(ball_vx**2 + ball_vy**2)
        
        if futureBallx+1 > -75 + 3: #球还没到禁区
            # 结合强化学习建议和原有策略
            robot0_pos = baseRobots[0].get_pos()
            rl_target_pos_0 = baseRobots[0].rl_target_pos if hasattr(baseRobots[0], 'rl_target_pos') and baseRobots[0].rl_target_pos else (0, 0)
            rl_dist_0 = math.sqrt((rl_target_pos_0[0] - robot0_pos.x)**2 + (rl_target_pos_0[1] - robot0_pos.y)**2)
            
            # 如果强化学习建议明确且在合理范围内，采纳强化学习建议
            if rl_dist_0 > 15 and rl_target_pos_0[0] < 0:
                # 使用强化学习建议的位置，但保持射门行为
                baseRobots[0].shoot(rl_target_pos_0[0], rl_target_pos_0[1], 10)
            else:
                # 使用原有策略
                baseRobots[0].shoot(futureBallx+1, futureBally, 10)
            
            # 3号机器人追球，结合强化学习建议和拦截点计算
            robot3_pos = baseRobots[3].get_pos()
            rl_target_pos_3 = baseRobots[3].rl_target_pos if hasattr(baseRobots[3], 'rl_target_pos') and baseRobots[3].rl_target_pos else (0, 0)
            rl_dist_3 = math.sqrt((rl_target_pos_3[0] - robot3_pos.x)**2 + (rl_target_pos_3[1] - robot3_pos.y)**2)
            if rl_dist_3 > 15 and rl_target_pos_3[0] < 0:
                # 使用强化学习建议的位置
                baseRobots[3].moveto(rl_target_pos_3[0], rl_target_pos_3[1])
            else:
                # 计算最佳拦截点
                robot_speed = 12  # 估计机器人速度，3号机器人速度较快
                intercept_x, intercept_y = calculate_interception_point(robot3_pos, robot_speed)
                
                # 结合拦截点和预测位置
                # 如果拦截点在合理范围内，使用拦截点
                if -90 < intercept_x < 90 and -75 < intercept_y < 75:
                    # 使用拦截点，但稍微提前一点以便更好地控制球
                    angle = math.atan2(intercept_y - robot3_pos.y, intercept_x - robot3_pos.x) * 180 / math.pi
                    baseRobots[3].move_with_angle(intercept_x, intercept_y, angle)  # 3号自由人追球
                else:
                    # 使用原有策略
                    baseRobots[3].move_with_angle(futureBallx+1, futureBally, 180)  # 3号自由人追球
        else: #超过守门区了就踢球 防止冲撞守门员
            num_in_goal = 0
            for i in range(0,5): #判断有没有对方守门员
                if oppRobots[i].get_pos().x < -94 and np.fabs(oppRobots[i].get_pos().y) < 30: #如果有  
                    num_in_goal += 1          

            if num_in_goal == 1:  #有守门员
                distance1_ball = math.sqrt((football_now_x - baseRobots[0].get_pos().x) ** 2 + (football_now_y - baseRobots[0].get_pos().y) ** 2)
                if futureBallx < -80 and distance1_ball < 10:  #在球门区而且球在跟前
                    # 在球门区域，需要精确控制，不使用强化学习建议
                    if baseRobots[0].get_pos().x <= football_now_x:  #0号在球的左侧                 
                        baseRobots[0].breakthrough(football_now_x, football_now_y, futureBallx+1, futureBally) 
                    else:
                        if football_now_y < 0:
                            baseRobots[0].set_wheel_velocity(125, -125)
                        else:
                            baseRobots[0].set_wheel_velocity(-125, 125) 
                else:
                    # 结合强化学习建议和原有策略
                    robot0_pos = baseRobots[0].get_pos()
                    rl_target_pos_0 = baseRobots[0].rl_target_pos if hasattr(baseRobots[0], 'rl_target_pos') and baseRobots[0].rl_target_pos else (0, 0)
                    rl_dist_0 = math.sqrt((rl_target_pos_0[0] - robot0_pos.x)**2 + (rl_target_pos_0[1] - robot0_pos.y)**2)
                    
                    if rl_dist_0 > 15 and rl_target_pos_0[0] < -30 and np.fabs(rl_target_pos_0[1]) < 50:
                        # 使用强化学习建议的位置
                        baseRobots[0].move_with_angle(rl_target_pos_0[0], rl_target_pos_0[1], 180)
                    else:
                        # 使用原有策略
                        baseRobots[0].move_with_angle(futureBallx+1, futureBally, 180)
                        if (futureBallx<-90 and np.fabs(futureBally)<=60):
                            baseRobots[0].moveto_dis(-80, futureBally)

                # 3号机器人策略，结合强化学习建议
                robot3_pos = baseRobots[3].get_pos()
                if baseRobots[3].get_pos().x <= football_now_x:  #3号在球的左侧 
                    # 在突破时考虑强化学习建议的方向
                    rl_target_pos_3 = baseRobots[3].rl_target_pos if hasattr(baseRobots[3], 'rl_target_pos') and baseRobots[3].rl_target_pos else (0, 0)
                    rl_dir_x = rl_target_pos_3[0] - robot3_pos.x
                    rl_dir_y = rl_target_pos_3[1] - robot3_pos.y
                    rl_dist = math.sqrt(rl_dir_x**2 + rl_dir_y**2)
                    
                    if rl_dist > 15 and rl_target_pos_3[0] < -30:
                        # 归一化方向向量
                        rl_dir_x /= rl_dist
                        rl_dir_y /= rl_dist
                        
                        # 调整突破目标点
                        adjusted_x = futureBallx + 1 + rl_dir_x * 5
                        adjusted_y = futureBally + rl_dir_y * 5
                        baseRobots[3].breakthrough(football_now_x, football_now_y, adjusted_x, adjusted_y)
                    else:
                        baseRobots[3].breakthrough(football_now_x, football_now_y, futureBallx+1, futureBally)
                else:
                    # 结合强化学习建议和原有策略
                    rl_target_pos_3 = baseRobots[3].rl_target_pos if hasattr(baseRobots[3], 'rl_target_pos') and baseRobots[3].rl_target_pos else (0, 0)
                    rl_dist_3 = math.sqrt((rl_target_pos_3[0] - robot3_pos.x)**2 + (rl_target_pos_3[1] - robot3_pos.y)**2)
                    
                    if rl_dist_3 > 15 and rl_target_pos_3[0] < -30 and np.fabs(rl_target_pos_3[1]) < 50:
                        # 使用强化学习建议的位置
                        baseRobots[3].move_with_angle(rl_target_pos_3[0], rl_target_pos_3[1], 180)
                    else:
                        # 使用原有策略
                        baseRobots[3].move_with_angle(futureBallx+1, futureBally, 180)
                
                distance3_ball = math.sqrt((futureBallx - baseRobots[3].get_pos().x) ** 2 + (futureBally - baseRobots[3].get_pos().y) ** 2)
                if (baseRobots[3].get_pos().x > futureBallx and distance3_ball<10):
                    if football_now_y < 0:
                        baseRobots[3].set_wheel_velocity(125, -125)
                    else:
                        baseRobots[3].set_wheel_velocity(-125, 125) 
                if (futureBallx<-90 and np.fabs(futureBally)<=50):
                    baseRobots[3].moveto_dis(-75, futureBally)             
            else:  #没有守门员
                # 结合强化学习建议和原有策略
                robot0_pos = baseRobots[0].get_pos()
                rl_target_pos_0 = baseRobots[0].rl_target_pos if hasattr(baseRobots[0], 'rl_target_pos') and baseRobots[0].rl_target_pos else (0, 0)
                rl_dist_0 = math.sqrt((rl_target_pos_0[0] - robot0_pos.x)**2 + (rl_target_pos_0[1] - robot0_pos.y)**2)
                
                # 如果强化学习建议明确且指向球门方向，采纳强化学习建议
                if rl_dist_0 > 15 and rl_target_pos_0[0] < -90 and np.fabs(rl_target_pos_0[1]) < 30:
                    # 使用强化学习建议的位置，但保持射门行为
                    baseRobots[0].shoot(rl_target_pos_0[0], rl_target_pos_0[1], 10)
                else:
                    # 使用原有策略
                    baseRobots[0].shoot(futureBallx+1, futureBally, 10)

        # 中场后卫
        # 4号机器人策略，结合强化学习建议
        distance2_ball = math.sqrt((football_now_x - baseRobots[4].get_pos().x) ** 2 + (football_now_y - baseRobots[4].get_pos().y) ** 2)
        if distance2_ball < 5 and (baseRobots[4].get_pos().x - football_now_x) > 0: #球到眼前了就中场射门
            if football_now_y < 0:
                baseRobots[4].set_wheel_velocity(125, -125)
            else:
                baseRobots[4].set_wheel_velocity(-125, 125)            
        else:
            # 结合强化学习建议和原有策略
            robot4_pos = baseRobots[4].get_pos()
            rl_target_pos_4 = baseRobots[4].rl_target_pos if hasattr(baseRobots[4], 'rl_target_pos') and baseRobots[4].rl_target_pos else (0, 0)
            rl_dist_4 = math.sqrt((rl_target_pos_4[0] - robot4_pos.x)**2 + (rl_target_pos_4[1] - robot4_pos.y)**2)
            
            # 如果强化学习建议明确且在合理范围内，采纳强化学习建议
            if rl_dist_4 > 15 and rl_target_pos_4[0] > 0 and rl_target_pos_4[0] < 60:
                baseRobots[4].moveto(rl_target_pos_4[0], rl_target_pos_4[1])
            else:
                # 使用原有策略
                baseRobots[4].move_in_still_x(45, football_now_y*0.8 + futureBally*0.2)
                        
        # 2号机器人策略，结合强化学习建议
        distance4_ball = math.sqrt((football_now_x - baseRobots[2].get_pos().x) ** 2 + (football_now_y - baseRobots[2].get_pos().y) ** 2)
        if distance4_ball < 5 and (baseRobots[2].get_pos().x - football_now_x) > 0: #球到眼前了就中场射门
            if football_now_y < 0:
                baseRobots[2].set_wheel_velocity(125, -125)
            else:
                baseRobots[2].set_wheel_velocity(-125, 125)            
        else:
            # 结合强化学习建议和原有策略
            robot2_pos = baseRobots[2].get_pos()
            rl_target_pos_2 = baseRobots[2].rl_target_pos if hasattr(baseRobots[2], 'rl_target_pos') and baseRobots[2].rl_target_pos else (0, 0)
            rl_dist_2 = math.sqrt((rl_target_pos_2[0] - robot2_pos.x)**2 + (rl_target_pos_2[1] - robot2_pos.y)**2)
            
            # 如果强化学习建议明确且在合理范围内，采纳强化学习建议
            if rl_dist_2 > 15 and rl_target_pos_2[0] > -20 and rl_target_pos_2[0] < 20:
                baseRobots[2].moveto(rl_target_pos_2[0], rl_target_pos_2[1])
            else:
                # 使用原有策略
                # 使用拦截点计算功能优化2号机器人的防守位置
                robot2_pos = baseRobots[2].get_pos()
                robot_speed = 10  # 2号机器人速度估计
                intercept_x, intercept_y = calculate_interception_point(robot2_pos, robot_speed)
                
                # 限制2号机器人在中场区域
                intercept_x = min(max(intercept_x, -20), 20)  # 限制在中场区域
                
                # 结合拦截点和预测位置
                target_y = (intercept_y * 0.7 + futureBally * 0.3)  # 加权平均
                
                # 使用固定的x坐标，但优化的y坐标
                baseRobots[2].move_in_still_x(0, target_y)
        
        # 计算奖励并存储经验
        reward = marl_model.calculate_reward(field, current_state)
        
        # 存储经验（使用预先计算的动作）
        if GlobalVariable.tick > 0:
            for i, robot_idx in enumerate([0, 2, 3, 4]):
                # 使用预先存储的动作
                if hasattr(baseRobots[robot_idx], 'rl_action') and baseRobots[robot_idx].rl_action is not None:
                    action = baseRobots[robot_idx].rl_action
                    marl_model.store_experience(i, prev_state, action, reward[i], current_state, False)
        
        # 保存当前状态作为下一帧的前一状态
        prev_state = current_state
        
        # 定期从经验回放缓冲区学习
        if GlobalVariable.tick % 100 == 0:  # 每100帧学习一次
            marl_model.learn()
            
            # 每1000帧保存一次模型
            if GlobalVariable.tick % 1000 == 0:
                marl_model.save_model()
        
        # 守门员专区
        goalkeeper(field)


def strategy_penalty(field): #点球策略
    football_now_x = field.ball.position.x
    football_now_y = field.ball.position.y
    
    # 收集球轨迹数据用于训练神经网络
    collect_ball_trajectory_data(field.ball.position, BallPos[:GlobalVariable.tick])
    
    # 计算球的速度（在预测之前定义，避免变量作用域问题）
    # 添加边界检查，防止数组越界
    if GlobalVariable.tick > 0:
        ball_velocity_x = football_now_x - BallPos[GlobalVariable.tick - 1].x
        ball_velocity_y = football_now_y - BallPos[GlobalVariable.tick - 1].y
    else:
        ball_velocity_x = 0
        ball_velocity_y = 0
    
    # 使用神经网络预测球轨迹
    if ball_predictor.is_trained and GlobalVariable.tick > 5:
        # 如果神经网络模型已训练且有足够的历史数据，使用神经网络预测
        futureBallx, futureBally = predict_ball_trajectory_nn(field.ball.position, BallPos[:GlobalVariable.tick])
    else:
        # 否则使用改进的线性预测作为备选
        # 根据球速动态调整预测因子
        prediction_factor = min(12, 8 + 4 * math.sqrt(ball_velocity_x**2 + ball_velocity_y**2) / 10)
        
        # 添加边界检查，防止数组越界
        if GlobalVariable.tick > 0:
            futureBallx = prediction_factor * football_now_x - (prediction_factor - 1) * BallPos[GlobalVariable.tick - 1].x
            futureBally = prediction_factor * football_now_y - (prediction_factor - 1) * BallPos[GlobalVariable.tick - 1].y
        else:
            futureBallx = football_now_x
            futureBally = football_now_y
    global tickBeginPenalty
    global race_state_trigger
    global flag_penalty,penalty
    goalkeeper_num = 0
   
    if race_state_trigger == Team.Self:
        for i in range(0, 5):
            baseRobots[i].set_wheel_velocity(0, 0)
        # print(np.fabs(baseRobots[1].get_pos().x)) 
        if  penalty == 2 :   #点球策略1： 守门员定点射门#                   
            if GlobalVariable.tick - tickBeginPenalty <=30:        
              if   np.fabs(baseRobots[0].get_pos().x) < 64.5001:
                baseRobots[0].set_wheel_velocity(-125,-125)
              else:
               baseRobots[0].set_wheel_velocity(125,-125)
            else:
                flag_penalty = 1 #30拍没进球就是点球没进  换策略2
                strategy_common(field)
        elif penalty == 1:       #点球策略2： 守门员绕C字#
            if GlobalVariable.tick - tickBeginPenalty <= 8:
                baseRobots[0].set_wheel_velocity(-110, -77)
            elif GlobalVariable.tick - tickBeginPenalty <= 19:
                baseRobots[0].set_wheel_velocity(-90, -120)
            elif GlobalVariable.tick - tickBeginPenalty <= 28:
                baseRobots[0].set_wheel_velocity(-10, -125)
            elif GlobalVariable.tick - tickBeginPenalty <= 68:
                baseRobots[0].shoot(futureBallx, futureBally,10)
            else:
                flag_penalty = 1
                strategy_common(field)
        elif penalty == 4:    #3号战术，队友辅助射门
          if GlobalVariable.tick - tickBeginPenalty <= 65:
            baseRobots[3].set_wheel_velocity(80, 80)
            if  8 > np.fabs(baseRobots[4].get_pos().x) :
                baseRobots[0].set_wheel_velocity(80, 80)
                baseRobots[4].set_wheel_velocity(125,125)
            elif 8 < np.fabs(baseRobots[4].get_pos().x) < 31:
                baseRobots[4].set_wheel_velocity(105, 125)
                if 12 < np.fabs(baseRobots[4].get_pos().x) < 31:
                    baseRobots[0].set_wheel_velocity(40,20)  
            elif  50 > np.fabs(baseRobots[4].get_pos().x) > 31:
                baseRobots[0].set_wheel_velocity(65,45)
                baseRobots[4].set_wheel_velocity(125,125)
            elif  np.fabs(baseRobots[4].get_pos().x) > 50:
                baseRobots[4].set_wheel_velocity(125,-125)
          else:
                flag_penalty = 1 
                strategy_common(field)
        elif penalty == 3:    #4号战术，队友辅助射门
          if GlobalVariable.tick - tickBeginPenalty <= 55:
            if  10 > np.fabs(baseRobots[3].get_pos().x) :
                baseRobots[0].set_wheel_velocity(90, 90)
                baseRobots[3].set_wheel_velocity(125,125)
            elif 10 < np.fabs(baseRobots[3].get_pos().x) < 35:
                baseRobots[3].set_wheel_velocity(102.7, 125)
                if 12 < np.fabs(baseRobots[3].get_pos().x) < 35:
                    baseRobots[0].set_wheel_velocity(40,20)  
            elif 60 > np.fabs(baseRobots[3].get_pos().x) > 35:
                baseRobots[0].set_wheel_velocity(65,45)
                baseRobots[3].set_wheel_velocity(125,125)
          else:
                flag_penalty = 1 
                strategy_common(field)
        elif penalty == 5:    #点球策略5： 简易c字                                  
          if GlobalVariable.tick - tickBeginPenalty <=55:
             if  np.fabs(baseRobots[0].get_pos().x) < 69:
                baseRobots[0].set_wheel_velocity(91,95)
             elif  74 > np.fabs(baseRobots[0].get_pos().x) > 69:
                baseRobots[0].set_wheel_velocity(60,95)
             elif  np.fabs(baseRobots[0].get_pos().x) > 74:
                baseRobots[0].set_wheel_velocity(125,-125)
          else:
                flag_penalty = 1 
                strategy_common(field)
          
    if race_state_trigger == Team.Opponent:
        if GlobalVariable.tick - tickBeginPenalty <= 60 :
            baseRobots[0].moveto_dis(50, -15)
            baseRobots[2].moveto_dis(50, 15)
            baseRobots[3].moveto_dis(70, -65)
            baseRobots[4].moveto_dis(70, 65)
            goalkeeper (field)
        else:
            strategy_common(field) 

def strategy_goalkick(field):  #门球策略
    global tickBeginGoalKick
    global race_state_trigger
    global goal,guard_goal

    football_now_x = field.ball.position.x
    football_now_y = field.ball.position.y
    
    # 收集球轨迹数据用于训练神经网络
    collect_ball_trajectory_data(field.ball.position, BallPos[:GlobalVariable.tick])
    
    # 计算球的速度（在预测之前定义，避免变量作用域问题）
    # 添加边界检查，防止数组越界
    if GlobalVariable.tick > 0:
        ball_velocity_x = football_now_x - BallPos[GlobalVariable.tick - 1].x
        ball_velocity_y = football_now_y - BallPos[GlobalVariable.tick - 1].y
    else:
        ball_velocity_x = 0
        ball_velocity_y = 0
    
    # 使用神经网络预测球轨迹
    if ball_predictor.is_trained and GlobalVariable.tick > 5:
        # 如果神经网络模型已训练且有足够的历史数据，使用神经网络预测
        futureBallx, futureBally = predict_ball_trajectory_nn(field.ball.position, BallPos[:GlobalVariable.tick])
    else:
        # 否则使用改进的线性预测作为备选
        # 根据球速动态调整预测因子
        prediction_factor = min(12, 8 + 4 * math.sqrt(ball_velocity_x**2 + ball_velocity_y**2) / 10)
        
        # 添加边界检查，防止数组越界
        if GlobalVariable.tick > 0:
            futureBallx = prediction_factor * football_now_x - (prediction_factor - 1) * BallPos[GlobalVariable.tick - 1].x
            futureBally = prediction_factor * football_now_y - (prediction_factor - 1) * BallPos[GlobalVariable.tick - 1].y
        else:
            futureBallx = football_now_x
            futureBally = football_now_y

    if race_state_trigger == Team.Self:
        if goal == 1:  #造犯规策略
            for i in range(2, 5):
                baseRobots[i].set_wheel_velocity(125, -125)
            baseRobots[0].moveto_dis(-50, 30)              
            baseRobots[1].set_wheel_velocity(0, 0)  #守门员 
        
        elif goal == 2:  #正常策略
            if GlobalVariable.tick - tickBeginGoalKick <= 75:
                for i in range(0, 5):
                    baseRobots[i].set_wheel_velocity(0, 0)
                baseRobots[1].set_wheel_velocity(125, 125)
            else:
                strategy_common(field)
        elif goal == 3: #开球直踢型策略
            for i in range(0, 5):
                baseRobots[i].set_wheel_velocity(0, 0)
            if GlobalVariable.tick - tickBeginGoalKick <= 30:
                 baseRobots[1].set_wheel_velocity(125, 125) 
            elif GlobalVariable.tick - tickBeginGoalKick <= 45:
                 baseRobots[1].set_wheel_velocity(125, -125)                  
            else:
                 strategy_common(field)            

    if race_state_trigger == Team.Opponent:
        if guard_goal == 1:
            if GlobalVariable.tick - tickBeginGoalKick <= 80:
                baseRobots[0].shoot (futureBallx,futureBally,10)               
                baseRobots[2].move_in_still_x  (65,futureBally)  #2号后卫起一个拦截作用
                baseRobots[3].moveto(25, futureBally) 
                baseRobots[4].moveto(50, futureBally)
                goalkeeper (field)               
            else:               
                strategy_common(field)
        elif guard_goal == 2:  #防止被对方造犯规
            goalkeeper (field)
            distance1_ball = math.sqrt((football_now_x - baseRobots[0].get_pos().x) ** 2 + (football_now_y - baseRobots[0].get_pos().y) ** 2)
            if futureBallx < -80 and distance1_ball < 10:  #在球门区而且球在跟前
                if baseRobots[0].get_pos ().x <= football_now_x and np.fabs(baseRobots[0].get_pos ().x-football_now_y)<1:  #0号在球的左侧                 
                    baseRobots[0].breakthrough (football_now_x,football_now_y,futureBallx+1,futureBally)  
                else :
                    if football_now_y < 0:
                        baseRobots[0].set_wheel_velocity(125, -125)
                    else:
                        baseRobots[0].set_wheel_velocity(-125, 125) 
            else :
                baseRobots[0].shoot (futureBallx,futureBally,10)
                if (futureBallx<-95 and np.fabs(futureBally)<25):
                    baseRobots[0].moveto_dis (-80,futureBally) 
            baseRobots[2].move_in_still_x  (65,futureBally*0.9)  #2号后卫起一个拦截作用
            baseRobots[3].moveto(25, football_now_y) 
            baseRobots[4].moveto(50, football_now_y)                                                  

def strategy_PlaceKick (field): #开球策略
    global tickBeginPlaceKick
    global race_state_trigger

    football_now_x = field.ball.position.x
    football_now_y = field.ball.position.y
    
    # 收集球轨迹数据用于训练神经网络
    collect_ball_trajectory_data(field.ball.position, BallPos[:GlobalVariable.tick])
    
    # 计算球的速度（在预测之前定义，避免变量作用域问题）
    # 添加边界检查，防止数组越界
    if GlobalVariable.tick > 0:
        ball_velocity_x = football_now_x - BallPos[GlobalVariable.tick - 1].x
        ball_velocity_y = football_now_y - BallPos[GlobalVariable.tick - 1].y
    else:
        ball_velocity_x = 0
        ball_velocity_y = 0
    
    # 使用神经网络预测球轨迹
    if ball_predictor.is_trained and GlobalVariable.tick > 5:
        # 如果神经网络模型已训练且有足够的历史数据，使用神经网络预测
        futureBallx, futureBally = predict_ball_trajectory_nn(field.ball.position, BallPos[:GlobalVariable.tick])
        # 额外的预测用于特殊情况
        futureBally2 = futureBally + (futureBally - BallPos[GlobalVariable.tick - 1].y) * 0.5
    else:
        # 否则使用改进的线性预测作为备选
        # 根据球速动态调整预测因子
        prediction_factor = min(12, 8 + 4 * math.sqrt(ball_velocity_x**2 + ball_velocity_y**2) / 10)
        
        # 添加边界检查，防止数组越界
        if GlobalVariable.tick > 0:
            futureBallx = prediction_factor * football_now_x - (prediction_factor - 1) * BallPos[GlobalVariable.tick - 1].x
            futureBally = prediction_factor * football_now_y - (prediction_factor - 1) * BallPos[GlobalVariable.tick - 1].y
        else:
            futureBallx = football_now_x
            futureBally = football_now_y
        
        # 添加边界检查，防止数组越界
        if GlobalVariable.tick > 0:
            futureBally2 = 9 * football_now_y - 8 * BallPos[GlobalVariable.tick - 1].y
        else:
            futureBally2 = football_now_y


    if race_state_trigger == Team.Self:
        for i in range(2, 5):
            baseRobots[i].set_wheel_velocity(0, 0)
            if GlobalVariable.tick - tickBeginPlaceKick <= 100:
                if GlobalVariable.tick - tickBeginPlaceKick <= 27:
                    baseRobots[0].set_wheel_velocity (125,125)
                    baseRobots[3].set_wheel_velocity (125,125)
                    goalkeeper (field) 
                else:
                    strategy_common(field)                    
            else:
                    strategy_common(field)
    if race_state_trigger == Team.Opponent:
            if GlobalVariable.tick - tickBeginPlaceKick <= 135:
                strategy_common(field)      

                if futureBally > 35 :
                    baseRobots[4].moveto_dis(79,8)     
                elif futureBally < -35  :
                    baseRobots[4].moveto_dis(79,-8)  
                else:
                    baseRobots[4].moveto_dis(79,futureBally2)

                if  futureBallx > 55:
                    baseRobots[2].moveto (55,futureBally)                 
                    baseRobots[3].moveto (60,futureBally) 
                elif futureBallx < -65:
                    baseRobots[2].moveto (-60,futureBally)                 
                    baseRobots[3].moveto (-65,futureBally) 
                else:
                     if futureBallx < 0 :
                        baseRobots[2].moveto (futureBallx*1.8,30)                 
                        baseRobots[3].moveto (futureBallx*1.8,-30)  
                     else:
                        baseRobots[2].moveto (futureBallx,30)                 
                        baseRobots[3].moveto (futureBallx,-30)                         

            else:            
                strategy_common(field)


# 预测对方机器人位置函数
def predict_opponent_positions():
    global OppRobotPos, OppRobotPredictPos, OppRobotThreat
    
    # 对每个对方机器人进行位置预测
    for i in range(5):
        # 获取当前位置
        current_pos = oppRobots[i].get_pos()
        
        # 更新历史位置记录
        for j in range(9, 0, -1):
            OppRobotPos[i][j] = OppRobotPos[i][j-1]
        OppRobotPos[i][0] = current_pos
        
        # 如果有足够的历史数据，计算速度向量并预测未来位置
        if GlobalVariable.tick > 5:
            # 计算速度向量（使用多帧平均以减少噪声）
            velocity_x = 0
            velocity_y = 0
            for j in range(3):
                if j < len(OppRobotPos[i])-1:
                    velocity_x += (OppRobotPos[i][j].x - OppRobotPos[i][j+1].x)
                    velocity_y += (OppRobotPos[i][j].y - OppRobotPos[i][j+1].y)
            velocity_x /= 3
            velocity_y /= 3
            
            # 预测系数（可根据实际情况调整）
            prediction_factor = 5
            
            # 预测未来位置
            future_x = current_pos.x + prediction_factor * velocity_x
            future_y = current_pos.y + prediction_factor * velocity_y
            
            # 限制预测位置在场地范围内
            future_x = max(-110, min(110, future_x))
            future_y = max(-75, min(75, future_y))
            
            OppRobotPredictPos[i] = Vector2(future_x, future_y)
        else:
            # 如果历史数据不足，使用当前位置
            OppRobotPredictPos[i] = current_pos
        
        # 计算威胁度评分
        # 基于到我方球门的距离、移动方向和速度
        distance_to_goal = math.sqrt((110 - current_pos.x)**2 + (0 - current_pos.y)**2)
        
        # 判断是否朝我方球门移动
        moving_toward_goal = False
        if GlobalVariable.tick > 5:
            if OppRobotPredictPos[i].x > current_pos.x:
                moving_toward_goal = True
        
        # 计算速度大小
        speed = 0
        if GlobalVariable.tick > 5:
            dx = OppRobotPos[i][0].x - OppRobotPos[i][1].x
            dy = OppRobotPos[i][0].y - OppRobotPos[i][1].y
            speed = math.sqrt(dx**2 + dy**2)
        
        # 威胁度评分计算
        # 距离球门越近、朝球门方向移动、速度越快，威胁度越高
        threat = (1000 / (distance_to_goal + 1)) * (1.5 if moving_toward_goal else 0.5) * (1 + speed/10)
        
        OppRobotThreat[i] = threat

@unbox_field
def get_instruction(field: Field):  #策略接口
    # python start.py 20000    print(field.tick)  # tick从2起始
    GlobalVariable.tick = field.tick
    global resetHistoryRecord
    
    # 保存前一帧的场景信息，用于计算奖励
    prev_field = None
    if GlobalVariable.tick > 2:
        prev_field = copy.deepcopy(field)

    for i in range(0, 5): # 0 1 2 3 4
        baseRobots.append(BaseRobot()) 
        oppRobots.append(BaseRobot())
        baseRobots[i].update(field.self_robots[i], resetHistoryRecord)
        oppRobots[i].update(field.opponent_robots[i], resetHistoryRecord)
        global newMatch
        if field.tick == 2: #newMatch is True:
            for j in range(0, 8):
                baseRobots[i].HistoryInformation[j] = field.self_robots[i].copy()   # 第0拍主动维护历史数据
                baseRobots[i].PredictInformation[j] = field.self_robots[i].copy()	# 第0拍主动维护预测数据
            newMatch = False
        baseRobots[i].PredictRobotInformation(GlobalVariable.tick_delay)#(GlobalVariable.tick_delay)

    football_now_x = field.ball.position.x   # 蓝方假设
    football_now_y = field.ball.position.y

    field.ball.position.x = field.ball.position.x
    field.ball.position.y = field.ball.position.y

    global BallPos
    BallPos[GlobalVariable.tick] = Vector2(football_now_x, football_now_y)
    if resetHistoryRecord is True:
        # 添加边界检查，防止数组越界
        start_tick = GlobalVariable.tick
        end_tick = max(0, GlobalVariable.tick - 10)  # 确保不会访问负索引
        for i in range(start_tick, end_tick - 1, -1):
            BallPos[i] = Vector2(football_now_x, football_now_y)
    
    # 预测对方机器人位置
    predict_opponent_positions()
    
    # 使用简化版Q-learning模型优化球员位置
    # 只在正常比赛状态下使用Q-learning模型
    if race_state != JudgeResultEvent.ResultType.PenaltyKick and \
       race_state != JudgeResultEvent.ResultType.GoalKick and \
       race_state != JudgeResultEvent.ResultType.PlaceKick:
        
        # 获取当前状态
        current_state = marl_model.get_state(field)
        
        # 选择动作
        actions = []
        for i in range(marl_model.num_agents):
            # 守门员(1号)使用原有策略，不使用Q-learning
            if i == 1:
                actions.append(0)  # 不动
                continue
                
            # 其他球员使用简化版Q-learning模型选择动作
            action = marl_model.select_action(current_state, i)
            actions.append(action)
        
        # 将动作转换为目标位置
        target_positions = []
        for i in range(marl_model.num_agents):
            if i == 1:  # 守门员使用原有策略
                target_positions.append((0, 0))  # 占位，不会使用
            else:
                # 将动作转换为目标位置
                target_pos = marl_model.action_to_position(actions[i], field.self_robots[i].position)
                target_positions.append(target_pos)
        
        # 计算奖励（如果有前一帧）
        if prev_field is not None:
            rewards = marl_model.calculate_reward(field, current_state)
            
            # 存储经验到回放缓冲区
            next_state = current_state  # 当前状态就是下一个状态（对于当前帧）
            done = False  # 比赛未结束
            
            # 为每个非守门员球员存储经验
            for i, robot_idx in enumerate([0, 2, 3, 4]):
                if i < len(actions):
                    marl_model.store_experience(i, current_state, actions[i], rewards[i], next_state, done)
            
            # 定期从经验回放缓冲区中学习
            if GlobalVariable.tick % 10 == 0:  # 每10帧学习一次
                marl_model.learn()
            
            # 定期保存模型
            if GlobalVariable.tick % 1000 == 0:  # 每1000帧保存一次模型
                marl_model.save_model()
    
    #print (race_state)
    #根据情况执行什么样的策略
    if race_state == JudgeResultEvent.ResultType.PenaltyKick:
        strategy_penalty(field)  #点球策略 2
    elif race_state == JudgeResultEvent.ResultType.GoalKick:
        strategy_goalkick(field) #门球策略 1
    elif race_state == JudgeResultEvent.ResultType.PlaceKick:
        strategy_PlaceKick (field) #开球策略0
    else:
          strategy_common(field)

    for i in range(0, 5):
        baseRobots[i].save_last_information(football_now_x, football_now_y)
    data_loader.set_tick_state(GlobalVariable.tick, race_state)
    resetHistoryRecord = False

    velocity_to_set = []
    for i in range(0, 5):
        velocity_to_set.append((baseRobots[i].robot.wheel.left_speed, baseRobots[i].robot.wheel.right_speed))

    return velocity_to_set, 0    # 以第二元素的(0,1)表明重置开关,1表示重置


@unbox_field
def get_placement(field: Field) -> List[Tuple[float, float, float]]:  #返回摆球时的坐标 接口
    final_set_pos: List[Tuple[float, float, float]]
    set_pos = []  # 初始化set_pos变量
    global resetHistoryRecord
    resetHistoryRecord = True
    global flag_penalty,penalty
    global flag_goal,goal
    global last_race_state,last_race_state_trigger
    global guard_goal
    global defend_flag,time

    defend_flag = 0
    global goal_time

    # 点球决策树
    if  flag_penalty == 1 :
        penalty = penalty + 1
        if penalty > 5:
            penalty = 1
    flag_penalty = -1
    # 点球决策树结束

    # 门球决策树
    if flag_goal == 1:  #进入门球摆位了置1
        #1.造犯规成功了继续造犯规策略  2.对方连续犯了5次
        if last_race_state == race_state or (race_state == JudgeResultEvent.ResultType.PlaceKick and race_state_trigger != Team.Self and goal_time>=3): 
            goal = 1
            goal_time = goal_time + 1
            if goal_time>=3:
                goal_time = 0
        #1.对方不犯规  2.其他没有造成功的情况
        else:    #两种点球策略谁进球了就继续
            if race_state == JudgeResultEvent.ResultType.PlaceKick and race_state_trigger != Team.Self: #上一种门球进球了
                # goal = goal'
                flag_goal = 0
            else:
                goal = goal+1
                if goal >= 4:
                    if time >= 3:
                        goal = 2
                    else:
                        goal = 1
                time = time + 1

    flag_goal = 0

    #连续两次次触发对方门球 则说明对方在造犯规
    if (last_race_state == 1 and race_state == 1) and (last_race_state_trigger != Team.Self and race_state != Team.Self):
            guard_goal = 2
    else :
            guard_goal = 1

    # 门球决策树结束    

    if race_state == JudgeResultEvent.ResultType.PlaceKick:
        global tickBeginPlaceKick
        tickBeginPlaceKick = field.tick       
        if race_state_trigger == Team.Self:
            print("开球进攻摆位")
            set_pos = [
                [-7 ,-7.5, 55],   
                [GlobalVariable.goalkepper_X, 0, 90],#守门员             
                [5, 32, 180],
                [55.8, 61.5, -130],
                [50, 0, 180],       
                [0.0, 0.0, 0.0]]
            # set_pos = [(-103, 0, 90), (30, 0, 0), (-3, -10, 0), (-3, 10, 0), (-3, 0, 0), (0.0, 0.0, 0.0)]
        else:   # if race_state_trigger == Team.Opponent:
            print("开球防守摆位")
            set_pos = [            
                [30, 0, 180],
                [GlobalVariable.goalkepper_X, 0, 90],                           
                [7, 30, 180],
                [7, -30, 180],
                [79, 0, 180],
                [0.0, 0.0, 0.0]]
            # set_pos = [(-105, 0, 90), (10, 20, -90), (10, -20, -90), (10, 40, -90), (10, -40, -90), (0.0, 0.0, 0.0)]
    elif race_state == JudgeResultEvent.ResultType.PenaltyKick:
        global tickBeginPenalty
        tickBeginPenalty = field.tick
        if race_state_trigger == Team.Self:
            print("点球进攻摆位")
            if penalty == 2:#直接射门策略摆位
                set_pos = [
                       [-64.5,6,35],
                       [GlobalVariable.goalkepper_X, 0, 90],
                       [0, 50, 180],
                       [10, -50, 160],
                       [0, 0, -180],
                       [-5, -10, 0.0]]
            elif penalty == 1:   #复杂c字种策略摆位         
                set_pos = [
                         [-64, -1.2, -8.6],
                         [GlobalVariable.goalkepper_X, 0, 90],
                        [10, -70, 14],
                        [10, 60, -30],
                        [10, -60, 14],
                        [-5, 10, 0.0]]
            elif penalty == 4:#队友辅助射门策略摆位
                set_pos = [
                       [-76.5, -9, 70],
                       [GlobalVariable.goalkepper_X, 0, 90],
                       [5, 55, -180],
                       [5, 75, -165],
                       [5, 85,-165],
                       [-5, -10, 0.0]]     
            elif penalty == 3:#队友辅助射门策略摆位
                set_pos = [
                      [-76.5, -9, 70],
                      [GlobalVariable.goalkepper_X, 0, 90],
                       [0, 50, 180],
                       [5, 60,-170],
                       [0, 0, -180],
                       [-5, -10, 0.0]]     
            elif penalty == 5:#简易c字策略摆位
                set_pos = [
                       [-67.5, -8, 90],
                       [GlobalVariable.goalkepper_X, 0, 90],
                       [0, 50, 180],
                       [10, -50, 160],
                       [0, 0, -180],
                       [-5, -10, 0.0]]    
            
        else:   # if race_state_trigger == Team.Opponent:
            defend_flag = 1
            print("点球防守摆位")
            set_pos = [
                       [-10, 50,-40],
                       [GlobalVariable.goalkepper_X, 0, 180],
                       [-10, -40,40],
                       [-7, -65,-15],
                       [-7, 65, 15],
                       [0, 0.0, 0.0]]
    elif race_state == JudgeResultEvent.ResultType.GoalKick:
        global tickBeginGoalKick
        tickBeginGoalKick = field.tick
        if race_state_trigger == Team.Self:
            flag_goal = 1

            print("门球进攻摆位")
            if goal == 1:
                set_pos = [
                        [5, 85, -90],
                        [100, 9.05, 90],
                        [5, -85, 90],
                        [30, 85, -90],
                        [30, -85, 90],
                        [105, 9.05, 0.0]]
            elif goal == 2:
                set_pos = [
                        [50, -20, 90],
                        [102, 5.05, 90],
                        [50, -40, 90],
                        [30, 0, 90],
                        [50, 0, 90],
                        [104.5, 9.05, 0.0]] 
            elif goal == 3:
                 set_pos = [
                        [5, 85, 90],
                        [106, -4, 180],
                        [5, -80, 90],
                        [30, 80, 90],
                        [30, -80, 90],
                        [98, -4, 0.0]]                                
        else:   # if race_state_trigger == Team.Opponent:
            print("门球防守摆位")
            set_pos = [
                       [5, 0, 180],
                       [GlobalVariable.goalkepper_X, 0, 90],
                       [65, -40, 180],
                       [25, 40, 180],
                       [50, 0, 180],
                       [0.0, 0.0, 0.0]]
    elif (race_state == JudgeResultEvent.ResultType.FreeKickLeftTop
          or race_state == JudgeResultEvent.ResultType.FreeKickRightTop
          or race_state == JudgeResultEvent.ResultType.FreeKickRightBot
          or race_state == JudgeResultEvent.ResultType.FreeKickLeftBot):
        if race_state_trigger == Team.Self:
            print("争球进攻摆位")
            set_pos = [
                       [30, 0, 0],
                       [GlobalVariable.goalkepper_X, 0, 90],
                       [-3, -10, 0],
                       [-3, 10, 0],
                       [-3, 0, 0],
                       [0.0, 0.0, 0.0]]
        else:   # if race_state_trigger == Team.Opponent:
            print("争球防守摆位")
            set_pos = [
                       [30, 0, 0],
                       [GlobalVariable.goalkepper_X, 0, 90],
                       [10, -10, 0],
                       [10, 10, 0],
                       [10, 0, 0],
                       [0.0, 0.0, 0.0]]
    else:
        print("race_state = " + str(race_state))

    final_set_pos = [(set_pos[0][0], set_pos[0][1], set_pos[0][2]),
                     (set_pos[1][0], set_pos[1][1], set_pos[1][2]),
                     (set_pos[2][0], set_pos[2][1], set_pos[2][2]),
                     (set_pos[3][0], set_pos[3][1], set_pos[3][2]),
                     (set_pos[4][0], set_pos[4][1], set_pos[4][2]),
                     (set_pos[5][0], set_pos[5][1], set_pos[5][2])]

    last_race_state = race_state   #记录上一次比赛状态
    last_race_state_trigger  = race_state_trigger

    print(final_set_pos)
    return final_set_pos  # 最后一个是球位置（x,y,角）,角其实没用