import torch.nn as nn
import torch.optim as optim
import numpy as np
from util import *
import copy
import pickle
import pymetis
from pymetis import Options 
import scipy.sparse as sp
from itertools import cycle
import random
import hashlib
import torch
import json
import os





class OurMethod:
    def __init__(self,cloud_system, agent_para_path):
        self.cloud_system=cloud_system
        self.neural_network=None
        ####################################
        #call info
        self.ms_call_matrix=None
        ####################################
        #id
        self.ms_id_list_exist=None
        #kind
        self.ms_kind_list_exist=None
        #cluster
        self.ms_cluster_index_exist=None
        ####################################
        self.ms_num_for_ms_kind_exist=None
        self.ms_num_for_ms_kind_aim=None
        ####################################
        self.network_deploy_result=False
        self.agent_para_path=agent_para_path
        self.init_agent()
        return
        
    def init_agent(self):
        state_dim = 14  # 假设状态是3维的连续值
        action_dim = 1  # 假设有4个离散动作
        self.agent = StatelessRLAgent(state_dim, action_dim, learning_rate=0.001, exploration_rate=0.2)
        if os.path.exists(self.agent_para_path):
            self.agent.load_model(self.agent_para_path)
        
    
    def add_ms_update_record(self, ms_id, ms_kind, ms_cluster_index):
        #列表中添加id
        self.ms_id_list_exist.append(ms_id)
        #添加ms kind信息
        self.ms_kind_list_exist.append(ms_kind)
        #添加社团索引信息
        self.ms_cluster_index_exist.append(ms_cluster_index)
        #更新ms kind 对应的num
        self.ms_num_for_ms_kind_exist[self.cloud_system.ms_kind_list.index(ms_kind)]+=1
        #扩展矩阵
        new_row=np.zeros((1,self.ms_call_matrix.shape[1]))
        self.ms_call_matrix=np.vstack((self.ms_call_matrix, new_row))
        new_col=np.zeros((self.ms_call_matrix.shape[0],1))
        self.ms_call_matrix=np.hstack((self.ms_call_matrix, new_col))
        
    def rm_ms_update_record(self, ms_id, ms_kind):
        #列表中删除id
        ms_id_index=self.ms_id_list_exist.index(ms_id)
        self.ms_id_list_exist.pop(ms_id_index)
        #删除ms kind信息
        self.ms_kind_list_exist.pop(ms_id_index)
        #删除社团索引信息
        self.ms_cluster_index_exist.pop(ms_id_index)
        #更新ms kind 对应的num
        self.ms_num_for_ms_kind_exist[self.cloud_system.ms_kind_list.index(ms_kind)]-=1
        #缩减矩阵
        self.ms_call_matrix=np.delete(self.ms_call_matrix,ms_id_index,0)
        self.ms_call_matrix=np.delete(self.ms_call_matrix,ms_id_index,1)
        
        
    
    def get_adj_matrix_update_ms_kind_adj(self,task, ms_kind_adj, ms_kind_list):
        for sub_task in task.sub_task_list:
            task_index=ms_kind_list.index(task.ms_kind)
            sub_task_index=ms_kind_list.index(sub_task.ms_kind)
            call_num=task.call_num
            ms_kind_adj[task_index][sub_task_index]+=call_num
            ms_kind_adj[sub_task_index][task_index]+=call_num
            self.get_adj_matrix_update_ms_kind_adj(sub_task, ms_kind_adj, ms_kind_list)
    
    def get_adj_matrix(self, each_ms_kind_deploy_number):
        #基于类型，统计邻接矩阵
        ms_kind_list=self.cloud_system.ms_kind_list
        ms_kind_adj=np.zeros((len(ms_kind_list), len(ms_kind_list)))
        for service_name in self.cloud_system.service_to_callgraph_dict.keys():
            request_t=self.cloud_system.service_to_callgraph_dict[service_name]
            for task_t in request_t.start_task_list:
                self.get_adj_matrix_update_ms_kind_adj(task_t, ms_kind_adj,ms_kind_list)
        #基于类型统计的邻接矩阵，计算计划生成的微服务call 邻接矩阵
        ms_list=[]
        for ms_kind in ms_kind_list:
            for order in range(each_ms_kind_deploy_number):
                # ms_id_t=ms_kind+f"||{order}"
                ms_list.append(ms_kind)
        ms_adj=np.zeros((len(ms_list), len(ms_list)))
        for i in range(len(ms_list)):
            for j in range(0, i):
                pre_index_i=math.floor(i/each_ms_kind_deploy_number)
                pre_index_j=math.floor(j/each_ms_kind_deploy_number)
                call_num=ms_kind_adj[pre_index_i][pre_index_j]
                ms_adj[i][j]=call_num
                ms_adj[j][i]=call_num
                
        return ms_adj, ms_list
                
     
    
    def k_way_cut(self,adj_matrix, partition_num, imbalance=1.2):
        # ideal = 总顶点数 ÷ k
        # 上限 = ideal × (1 + ufactor ⁄ 1000)
        adj=sp.csr_matrix(adj_matrix)
        # ① CSR & 对称化
        csr = adj.tocsr()
        if (csr - csr.T).nnz:
            csr = csr.maximum(csr.T)

        xadj     = csr.indptr.astype(np.int32)
        adjncy   = csr.indices.astype(np.int32)
        eweights = csr.data.astype(np.int32)
        # print(f"xadj:{xadj}\nadjncy:{adjncy}\neweights:{eweights}")
        # ② 负载不均衡：ufactor = (imbalance-1) × 1000
        ufactor  = int(round((imbalance - 1) * 1000))   # 1.05 → 50

        # ③ 构造 Options —— 只能用类，不能用 dict
        opts = Options(ufactor=ufactor, numbering=0)    # numbering=0 表示 C-style 0…n-1

        # ④ 调用 part_graph（无 ubvec / adjwgt）
        objval, parts = pymetis.part_graph(
            partition_num,
            xadj=xadj,
            adjncy=adjncy,
            eweights=eweights,
            options=opts,
        )
        return np.asarray(parts), objval
    
    
    def init_deploy_according_to_community_division(self, ms_kind_list, node_index_list):
        #这里输入的ms_kind_list和cloud system 中的不一样，这里的可以重复，而cloud system中的不可以重复
        self.ms_id_list_exist=[]
        self.ms_kind_list_exist=[]
        self.ms_cluster_index_exist=[]
        self.ms_num_for_ms_kind_exist=[0]*len(self.cloud_system.ms_kind_list)
        self.ms_num_for_ms_kind_aim=[0]*len(self.cloud_system.ms_kind_list)
        for index in range(len(ms_kind_list)):
            ms_kind=ms_kind_list[index]
            # ms_kind=ms_str.split("||")[0]
            node_index=node_index_list[index]
            result, ms_id, node_id= self.cloud_system.bare_metal_node_list[node_index].deploy_ms(ms_kind)
            if result==False:
                print("The initial number of microservices was too large, resulting in the\
                    inability of a single node to accommodate the number of microservices after community division.")
                exit(-1)

            self.ms_id_list_exist.append(ms_id)
            self.ms_kind_list_exist.append(ms_kind)
            self.ms_cluster_index_exist.append(node_index)
            index_new=self.cloud_system.ms_kind_list.index(ms_kind)
            self.ms_num_for_ms_kind_exist[index_new]+=1
        
        self.ms_call_matrix=np.zeros((len(self.ms_id_list_exist), len(self.ms_id_list_exist)))
        
        
    def get_init_deploy_strategy(self,each_ms_kind_deploy_number,init_deploy_strategy_file_path):
        if self.cloud_system.print_level>1:
            print("get init deploy strategy...")
        adj_matrix, ms_kind_list=self.get_adj_matrix(each_ms_kind_deploy_number)
        
        node_index_list, cut=self.k_way_cut(adj_matrix, self.cloud_system.args.bare_metal_node_num)
        self.init_deploy_according_to_community_division(ms_kind_list, node_index_list)
        
        with open(init_deploy_strategy_file_path, "wb") as file:
            storage_data={
                "node_num":self.cloud_system.args.bare_metal_node_num,
                "each_ms_kind_deploy_number":each_ms_kind_deploy_number,
                "ms_kind_list":ms_kind_list,
                "node_index_list":node_index_list,
            }
            pickle.dump(storage_data, file)
            
        return
        
    
    def init_deploy(self, interval, each_ms_kind_deploy_number):
        init_deploy_strategy_file_path=self.cloud_system.args.dealed_history_var_file+".init_deploy_strategy"
        
        if os.path.exists(init_deploy_strategy_file_path):
            with open(init_deploy_strategy_file_path, "rb") as file:
                storage_data=pickle.load(file)
                file_node_num=storage_data["node_num"]
                file_each_ms_kind_deploy_number=storage_data["each_ms_kind_deploy_number"]
                if file_node_num != self.cloud_system.args.bare_metal_node_num or file_each_ms_kind_deploy_number!=each_ms_kind_deploy_number:
                    self.get_init_deploy_strategy(each_ms_kind_deploy_number,init_deploy_strategy_file_path)
                else:
                    ms_kind_list=storage_data["ms_kind_list"]
                    node_index_list=storage_data["node_index_list"]
                    self.init_deploy_according_to_community_division(ms_kind_list, node_index_list)
        else:
            self.get_init_deploy_strategy(each_ms_kind_deploy_number,init_deploy_strategy_file_path)
            
        return
        
    def deploy_adjust_ms_obj_num(self, ms_kind, change_num):
        if change_num==0:
            return
        elif change_num>0:
            #记录每个节点（社团）对当前类型微服务的需求程度
            cluster_acc_cross_dict={}
            for cluster_index in range(self.cloud_system.args.bare_metal_node_num):
                cluster_acc_cross_dict[cluster_index]=0
            #如果是增加，则以减少跨节点调用为目标
            #获取每个节点存在的指定类型的跨节点调用数量
            for row_index in range(len(self.ms_id_list_exist)):
                ms_kind_spec=self.ms_kind_list_exist[row_index]
                if ms_kind_spec != ms_kind:
                    continue
                cluster_index=self.ms_cluster_index_exist[row_index]
                for col_index in range(len(self.ms_id_list_exist)):
                    cluster_index_2=self.ms_cluster_index_exist[col_index]
                    if cluster_index_2 != cluster_index:
                        #不相等表明是跨社团的call
                        cluster_acc_cross_dict[cluster_index_2]+=self.ms_call_matrix[row_index][col_index]
            # 按 value 降序排序 key
            sorted_keys = sorted(cluster_acc_cross_dict, key=lambda k: cluster_acc_cross_dict[k], reverse=True)
            add_ms_num=0
            for node_index in cycle(sorted_keys):
                flage, ms_id, node_id=self.cloud_system.bare_metal_node_list[node_index].deploy_ms(ms_kind)
                if flage==True:
                    add_ms_num+=1
                    self.add_ms_update_record( ms_id, ms_kind, node_index)
                
                if add_ms_num==change_num:
                    break
        
        #如果是减少，则删除被调用次数最少得ms
        elif change_num<0:
            reduce_num=-change_num
            #记录每个微服务被调用的次数（不区分是否同一节点）
            cluster_acc_call_dict={}
            row_sum=np.sum(self.ms_call_matrix, axis=1)
            for row_index in range(len(self.ms_id_list_exist)):
                ms_id=self.ms_id_list_exist[row_index]
                ms_kind_spec=self.ms_kind_list_exist[row_index]
                if ms_kind_spec != ms_kind:
                    continue
                cluster_acc_call_dict[ms_id]=row_sum[row_index]
            # 按 value 升序排序 key
            sorted_keys = sorted(cluster_acc_call_dict, key=lambda k: cluster_acc_call_dict[k])
            rm_ms_num=0
            for ms_id_t in cycle(sorted_keys):
                index=self.ms_id_list_exist.index(ms_id_t)
                cluster_index=self.ms_cluster_index_exist[index]
                self.cloud_system.bare_metal_node_list[cluster_index].remove_ms(ms_id_t)
                self.rm_ms_update_record(ms_id_t,ms_kind)
                rm_ms_num+=1
                if rm_ms_num==reduce_num:
                    break
        else:
            print("change num wrong")
            exit(-1)
            
        return
    
    
    def deploy_migration_ms(self, ms_number):
        #随机选择ms_number个微服务，针对每个微服务判断来自哪个社团的call最多，则迁移到目标社团，如果来自当前社团的call最多则放弃迁移
        selected_ms_id_list=random.sample(self.ms_id_list_exist, ms_number)
        for ms_id in selected_ms_id_list:
            row_index=self.ms_id_list_exist.index(ms_id)
            ms_kind=self.ms_kind_list_exist[row_index]
            node_index_now=self.ms_cluster_index_exist[row_index]
            cluster_acc_call_dict={}
            for col_index in range(len(self.ms_id_list_exist)):
                if col_index != row_index:
                    node_index_t=self.ms_cluster_index_exist[col_index]
                    if node_index_t not in cluster_acc_call_dict.keys():
                        cluster_acc_call_dict[node_index_t]=self.ms_call_matrix[row_index][col_index]
                    else:
                        cluster_acc_call_dict[node_index_t]+=self.ms_call_matrix[row_index][col_index]
            # 按 value 降序排序 key
            sorted_keys = sorted(cluster_acc_call_dict, key=lambda k: cluster_acc_call_dict[k], reverse=True)
            for node_index_aim in sorted_keys:
                if node_index_aim == node_index_now:
                    break
                flage_t, ms_id_new, node_id=self.cloud_system.bare_metal_node_list[node_index_aim].deploy_ms(ms_kind)
                if flage_t==True:
                    self.add_ms_update_record( ms_id_new, ms_kind, node_index_aim)
                    self.cloud_system.bare_metal_node_list[node_index_now].remove_ms(ms_id)
                    self.rm_ms_update_record(ms_id, ms_kind)
                    break
                
        return 
    
    def deploy_get_state(self, ms_kind):
        # 影响因素：
        #当前此类微服务的名称的独热编码， 微服务数量、平均等待时间、平均完成时间、违反sub_deadline的比例
        ms_kind_code=self.string_to_fixed_list(ms_kind, list_length=10, min_val=0, max_val=100)
        ms_num_of_this_ms_kind,ave_waiting_time, ave_complete_time, ratio_of_deadline_violation=self.cloud_system.get_key_info(ms_kind)
        out_put=ms_kind_code
        out_put.extend([ms_num_of_this_ms_kind,ave_waiting_time,ave_complete_time,ratio_of_deadline_violation])
        return    out_put
        
    def deploy_get_reward(self, ms_kind):
        #违反sub_deadline
        ratio_of_deadline_violation, ratio_of_ms_idle=self.cloud_system.get_mean_ratio_of_deadline_violation_ms_idle(ms_kind)
        
        return -ratio_of_deadline_violation-ratio_of_ms_idle
    
    def deploy(self, interval, each_ms_kind_deploy_number=0):
        print(f" {self.cloud_system.env.now} start deploy ...")
        if self.cloud_system.env.now==0:
            #初始化部署
            self.init_deploy(interval=interval, each_ms_kind_deploy_number=each_ms_kind_deploy_number)
        else:  
            #增量部署
            #先进行参数更新，参数更新需要前一个interval有数据
            #在第一次进行增量部署的时候不进行更新，因为部署结果不是来自于当前网络的输出结果
            if self.network_deploy_result==True:
                #更新过程
                for index in range(len(self.cloud_system.ms_kind_list)):
                    ms_kind=self.cloud_system.ms_kind_list[index]
                    reward=self.deploy_get_reward(ms_kind)
                    state=self.state_action_record[ms_kind]["state"]
                    action=self.state_action_record[ms_kind]["action"]
                    self.agent.update(state, action, reward)
                    
                    
            #再进行前向传播，确定微服务数量
            self.state_action_record={}   #记录信息 state, action, (reward 下一个轮回再计算)
            self.network_deploy_result=True
            for index in range(len(self.cloud_system.ms_kind_list)): 
                ms_kind=self.cloud_system.ms_kind_list[index]
                state=self.deploy_get_state(ms_kind)
                action=self.agent.get_action(state)
                temp_dict={}
                temp_dict["state"]=state
                temp_dict["action"]=action
                self.state_action_record[ms_kind]=temp_dict
                self.ms_num_for_ms_kind_aim[index]=action
            #上面得到了每个类型的微服务需要多少个
            
            #先做微服务增加减少
            ##获取增加减少的数量
            for index in range(len(self.cloud_system.ms_kind_list)):
                ms_kind=self.cloud_system.ms_kind_list[index]
                ms_kind_change=max(self.ms_num_for_ms_kind_aim[index],1)-self.ms_num_for_ms_kind_exist[index]
                self.deploy_adjust_ms_obj_num(ms_kind, ms_kind_change)

            #再做微服务迁移 指定单次迁移数量
            self.deploy_migration_ms(self.cloud_system.args.our_method_ms_migration_num)
        # 将matrix 中的元素除以2，以减弱历史数据的影响
        self.ms_call_matrix=self.ms_call_matrix/2
        print("end deploy ...")
        
        yield self.cloud_system.env.timeout(interval)
        if not self.cloud_system.system_end:
            self.cloud_system.env.process(self.deploy(interval))
        else:
            self.agent.save_model(self.agent_para_path)
        return
   
   
    def schedule(self):
        #我们的调度方法
        not_scheduled_tasks=[]
        for task in self.cloud_system.pending_task_queue:
            need_update_flage=False
            if task.request.request_base==None:
                base_request=self.cloud_system.service_to_callgraph_dict[task.request.service_name]
                task.request.set_deepcopy_request_base(copy.deepcopy(base_request))
                
                #更新start_task_list
                task.request.request_base.update_base_start_task_list(task)
                task.request.request_base.reset_for_our_method(task.request.reliability, task.request.deadline)
                need_update_flage=True
            
            if len(task.sub_task_list)<len(task.request.request_base.task_id_to_obj[task.task_id].sub_task_list):
                need_update_flage=True   
            task.update_prohibit_ms_id_list()
            spec_request_base=task.request.request_base
            #删除子任务后，更新base的信息
            if need_update_flage==True:
                task.request.request_base.update_base_info(task)
                #对base request 中的所有task都进行处理，得到uprank,和sub_deadline
                self.schedule_get_upward_rank(spec_request_base)
                self.schedule_get_sub_deadline(spec_request_base)
                self.schedule_get_early_start_latest_end_time(spec_request_base)
            if task.task_id=="|T_0-MS_70930|":
                a=1
            replica_num=self.schedule_get_replica(spec_request_base,spec_request_base.task_id_to_obj[task.task_id])
            result=self.schedule_to_early_available_ms(task, replica_num)
            #说明调度失败
            if result==False:
                not_scheduled_tasks.append(task)
            else:
                self.schedule_update_matrix(task)
        #清空队列
        self.cloud_system.pending_task_queue=not_scheduled_tasks
        return 
    
    def schedule_to_early_available_ms(self, task, replica_num):
        #获取所有正在运行的微服务列表
        ms_obj_list=self.cloud_system.get_available_ms_obj_with_ms_kind(task.ms_kind)
        #获取每个微服务队列信息，并升序排序
        ##统计每个类型的微服务队列长度信息
        ms_obj_with_early_start_time_list=[]
        for ms_obj in ms_obj_list:
            #如果在禁止的列表中，则跳过
            if ms_obj.ms_id in task.prohibit_ms_id_list:
                continue
            queue_len=ms_obj.get_queue_len()
            TT=0
            if task.parent_task!=None:
                #处理父task有多个副本的情况，传输时间取平均值
                for parent_ms_obj in task.parent_task.aim_ms_obj_list:
                    TT+=self.cloud_system.get_TT_time(task.parent_task,parent_ms_obj ,task, ms_obj)
                TT=TT/len(task.parent_task.aim_ms_obj_list)
            early_start_time=TT+queue_len*task.deal_time
            ms_obj_with_early_start_time_list.append([early_start_time, ms_obj])
        if len(ms_obj_with_early_start_time_list)==0:
            return False
            # print("schedule_to_early_available_ms wrong!")
            # exit(-1)
        ms_obj_with_early_start_time_list.sort(key=lambda x:x[0])
        ##调度到微服务上
        for _ in range(replica_num):
            aim_ms=ms_obj_with_early_start_time_list[0][1]
            aim_ms.put_task_to_queue(task)
            ms_obj_with_early_start_time_list[0][0]+=task.deal_time
            ms_obj_with_early_start_time_list.sort(key=lambda x:x[0])
            task.aim_ms_obj_list.append(aim_ms)
        return True
    
    def schedule_update_matrix(self, task):
        #如果是请求的第一个task，则无需更改matrix
        #对于一个副本需要被删除的，则不用记录
        if task.parent_task==None:
            return
        for ms_obj in task.aim_ms_obj_list:
            if ms_obj.ms_id not in self.ms_id_list_exist:
                continue
            ms_id_index=self.ms_id_list_exist.index(ms_obj.ms_id)
            for ms_obj_parent_task in task.parent_task.aim_ms_obj_list:
                if ms_obj_parent_task.ms_id not in self.ms_id_list_exist:
                    continue
                ms_id_parent_index=self.ms_id_list_exist.index(ms_obj_parent_task.ms_id)
                self.ms_call_matrix[ms_id_index][ms_id_parent_index]+=1
                self.ms_call_matrix[ms_id_parent_index][ms_id_index]+=1
        
        
        
    def schedule_get_replica(self, request_base,task_t):
        if task_t.got_replica_flage==True:
            return task_t.replica_num
        single_up_relia=math.pow(request_base.reliability, 1/request_base.all_task_num)
        #获取当前task需要的可靠性大小
        try:
            this_req=request_base.reliability/(request_base.already_reliability*math.pow(single_up_relia, request_base.all_task_num-request_base.already_num-1))
        except:
            a=1
        #获取指定微服务故障参数
        ms_lambda=self.cloud_system.ms_kind_to_transient_lambda[task_t.ms_kind]
        #获取单个task无故障概率
        single_reliability=get_reliability_without_failure(task_t.latest_end_time-task_t.early_start_time,ms_lambda)
        #获取副本数量
        replica_num=get_aim_replica_num(this_req, single_reliability)
        #更新和存储副本信息，可靠性信息
        request_base.already_reliability=request_base.already_reliability*(1-math.pow(1-single_reliability, replica_num))
        request_base.already_num+=1
        task_t.got_replica_flage=True
        task_t.replica_num=replica_num
        return replica_num
    
    
    def schedule_get_upward_rank(self, request_base):
        waiting_deal_task_list=[]
        dealed_task_list=[]
        ms_record_for_task={} #task_id to ms_obj
        for task in request_base.task_id_to_obj.values():
            #首先找到最低端的task
            if len(task.sub_task_list)==0:
                waiting_deal_task_list.append(task)
        while len(waiting_deal_task_list)>0:
            task_t=waiting_deal_task_list.pop(0) 
            if len(task_t.sub_task_list)==0 or list_is_sub_of_another_list(task_t.sub_task_list,dealed_task_list):
                ms_obj_list=self.cloud_system.get_available_ms_obj_with_ms_kind(task_t.ms_kind)
                aim_ms_obj=self.cloud_system.random_generator.choice(ms_obj_list)
                ET=task_t.deal_time
                max_succ_uprank=0
                for succ_task in task_t.sub_task_list:
                    TT=self.cloud_system.get_TT_time(task_t, aim_ms_obj, succ_task, ms_record_for_task[succ_task.task_id])
                    succ_uprank=succ_task.uprank
                    max_succ_uprank=max(max_succ_uprank, TT+succ_uprank)
                task_t.uprank=max_succ_uprank+ET
                dealed_task_list.append(task_t)
                ms_record_for_task[task_t.task_id]=aim_ms_obj
                if task_t.parent_task != None and task_t.parent_task not in waiting_deal_task_list:
                    waiting_deal_task_list.append(task_t.parent_task)
            else:
                waiting_deal_task_list.append(task_t)
        # for task in request_base.start_task_list:
        #     if task.uprank==None:
        #         a=1            
        return
    
    def schedule_get_sub_deadline(self, request_base):
        uprank_entry=max(request_base.start_task_list, key=lambda x:x.uprank).uprank
        for task in request_base.task_id_to_obj.values():
            deadline=self.cloud_system.service_to_deadline[request_base.service_name]
            if uprank_entry==0:
                task.sub_deadline=deadline
            else:
                task.sub_deadline=deadline*(uprank_entry-task.uprank+task.deal_time)/uprank_entry
        return
    
    def schedule_get_early_start_latest_end_time(self, request_base):
        print(f"start schedule_get_early_start_latest_end_time ... ")
        for task_t in request_base.start_task_list:
            early_start_time=task_t.pre_time
            latest_end_time=request_base.deadline-task_t.suc_time
            task_t.set_early_start_latest_end_time(early_start_time,latest_end_time)
        print(f"end schedule_get_early_start_latest_end_time ... ")
        
    def string_to_fixed_list(self, input_str, list_length=32, min_val=0, max_val=255):
        """
        将输入字符串转换为固定长度的整数列表编码
        
        参数:
            input_str: 输入字符串
            list_length: 输出列表的长度，默认为32
            min_val: 输出列表元素的最小值，默认为0
            max_val: 输出列表元素的最大值，默认为255
        
        返回:
            list: 固定长度的整数列表
        """
        # 确保输入是字符串类型
        if not isinstance(input_str, str):
            input_str = str(input_str)
        
        # 使用SHA-256哈希算法生成固定长度的哈希值
        hash_object = hashlib.sha256(input_str.encode('utf-8'))
        hash_bytes = hash_object.digest()
        
        # 将哈希字节转换为整数列表
        hash_ints = list(hash_bytes)
        
        # 如果需要的长度比哈希字节长度短，截取前部分
        if list_length <= len(hash_ints):
            result = hash_ints[:list_length]
        else:
            # 如果需要的长度比哈希字节长度长，循环扩展并截断
            extend_times = (list_length // len(hash_ints)) + 1
            extended = hash_ints * extend_times
            result = extended[:list_length]
        
        # 缩放到指定的数值范围
        if min_val != 0 or max_val != 255:
            result = [min_val + (x * (max_val - min_val) // 255) for x in result]
        
        return result
        
    
    



class QNetwork(nn.Module):
    def __init__(self, state_dim, action_dim, hidden_dim=64):
        """
        初始化Q网络
        
        参数:
            state_dim: 状态空间维度
            action_dim: 动作空间维度
            hidden_dim: 隐藏层维度
        """
        super(QNetwork, self).__init__()
        self.fc1 = nn.Linear(state_dim, hidden_dim)
        self.fc2 = nn.Linear(hidden_dim, hidden_dim)
        self.fc3 = nn.Linear(hidden_dim, action_dim)
        self.relu = nn.ReLU()
        
    def forward(self, state):
        """前向传播计算Q值"""
        x = self.relu(self.fc1(state))
        x = self.relu(self.fc2(x))
        q_values = self.fc3(x)
        return q_values

class StatelessRLAgent:
    def __init__(self, state_dim, action_dim, learning_rate=0.001, discount_factor=0.9, exploration_rate=0.1, 
                 hidden_dim=64, batch_size=64, memory_size=10000):
        """
        初始化适用于连续状态空间的无状态强化学习模型
        
        参数:
            state_dim: 状态空间维度
            action_dim: 动作空间维度
            learning_rate: 学习率
            discount_factor: 折扣因子
            exploration_rate: 探索率
            hidden_dim: 神经网络隐藏层维度
            batch_size: 训练批次大小
            memory_size: 经验回放缓冲区大小
        """
        self.state_dim = state_dim
        self.action_dim = action_dim
        self.learning_rate = learning_rate
        self.discount_factor = discount_factor
        self.exploration_rate = exploration_rate
        self.batch_size = batch_size
        
        # 初始化Q网络和目标网络
        self.q_network = QNetwork(state_dim, action_dim, hidden_dim)
        self.target_network = QNetwork(state_dim, action_dim, hidden_dim)
        self.target_network.load_state_dict(self.q_network.state_dict())
        self.target_network.eval()
        
        # 优化器
        self.optimizer = optim.Adam(self.q_network.parameters(), lr=learning_rate)
        
        # 经验回放缓冲区
        self.memory = []
        self.memory_size = memory_size
        self.memory_ptr = 0
        
        # 记录上一次的状态和动作
        self.last_state = None
        self.last_action = None
        
        # 损失函数
        self.criterion = nn.MSELoss()

    def get_action(self, state, use_exploration=True):
        """
        根据当前状态获取动作
        
        参数:
            state: 当前环境状态( numpy数组 )
            use_exploration: 是否使用探索策略
        
        返回:
            action: 选择的动作
        """
        # 保存当前状态用于后续更新
        self.last_state = state
        
        # 探索策略 - epsilon-贪婪
        if use_exploration and np.random.uniform(0, 1) < self.exploration_rate:
            # 随机探索
            action = np.random.randint(0, self.action_dim)
            self.last_action = action
            return action
        
        # 利用已知最优动作
        state_tensor = torch.FloatTensor(state).unsqueeze(0)
        with torch.no_grad():
            q_values = self.q_network(state_tensor)
            action = torch.argmax(q_values, dim=1).item()
        
        # 保存选择的动作用于后续更新
        self.last_action = action
        return action

    def update(self, state, action, reward):
        """
        根据反馈收益更新模型
        
        参数:
            reward: 执行最后一个动作后获得的收益
        """
        # if self.last_state is None or self.last_action is None:
        #     return  # 没有先前的状态-动作对
        
        # 存储经验到回放缓冲区
        self.store_experience(state, action, reward)
        
        # 从经验回放中学习
        self.learn()

    def store_experience(self, state, action, reward):
        """
        存储经验到回放缓冲区
        
        参数:
            state: 状态
            action: 动作
            reward: 奖励
        """
        # 如果缓冲区已满，覆盖最早的经验
        if len(self.memory) < self.memory_size:
            self.memory.append((state, action, reward))
        else:
            self.memory[self.memory_ptr] = (state, action, reward)
            self.memory_ptr = (self.memory_ptr + 1) % self.memory_size

    def learn(self):
        """从经验回放中学习"""
        # 当经验回放缓冲区中的样本数量不足时，不进行学习
        if len(self.memory) < self.batch_size:
            return
        
        # 随机采样一批经验
        indices = np.random.choice(len(self.memory), self.batch_size, replace=False)
        batch = [self.memory[i] for i in indices]
        
        # 更高效地转换数据为张量
        states = np.array([exp[0] for exp in batch], dtype=np.float32)
        actions = np.array([exp[1] for exp in batch], dtype=np.int64)
        rewards = np.array([exp[2] for exp in batch], dtype=np.float32)
        
        # 转换为张量
        states_tensor = torch.as_tensor(states, dtype=torch.float32)
        actions_tensor = torch.as_tensor(actions, dtype=torch.long).unsqueeze(1)
        rewards_tensor = torch.as_tensor(rewards, dtype=torch.float32).unsqueeze(1)
        
        # 计算当前Q值
        current_q_values = self.q_network(states_tensor).gather(1, actions_tensor)
        
        # 计算目标Q值 (在无状态模型中，目标Q值简化为即时奖励)
        target_q_values = rewards_tensor
        
        # 计算损失
        loss = self.criterion(current_q_values, target_q_values)
        
        # 优化模型
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()
        
        # 软更新目标网络
        self.soft_update_target_network(tau=0.001)

    def soft_update_target_network(self, tau):
        """
        软更新目标网络参数
        
        参数:
            tau: 更新系数
        """
        for target_param, param in zip(self.target_network.parameters(), self.q_network.parameters()):
            target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data)

    def save_model(self, file_path):
        """
        保存模型参数到文件
        
        参数:
            file_path: 保存文件的路径
        """
        model_data = {
            'state_dim': self.state_dim,
            'action_dim': self.action_dim,
            'learning_rate': self.learning_rate,
            'discount_factor': self.discount_factor,
            'exploration_rate': self.exploration_rate,
            'q_network_state_dict': self.q_network.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict()
        }
        
        torch.save(model_data, file_path)

    def load_model(self, file_path):
        """
        从文件加载模型参数
        
        参数:
            file_path: 加载文件的路径
        """
        if not os.path.exists(file_path):
            raise FileNotFoundError(f"模型文件 {file_path} 不存在")
        
        model_data = torch.load(file_path)
        
        # 恢复模型参数
        self.state_dim = model_data['state_dim']
        self.action_dim = model_data['action_dim']
        self.learning_rate = model_data['learning_rate']
        self.discount_factor = model_data['discount_factor']
        self.exploration_rate = model_data['exploration_rate']
        
        # 恢复Q网络和优化器状态
        self.q_network.load_state_dict(model_data['q_network_state_dict'])
        self.target_network.load_state_dict(model_data['q_network_state_dict'])
        self.optimizer.load_state_dict(model_data['optimizer_state_dict'])