import numpy as np
import pandas as pd
# import modin.pandas as pd
from choose_action import choose_action
from greedy_policy import epsilon_decrease
import time
from numba import jit
time_start = time.time()
flight_state = pd.read_pickle(r'C:\Users\HP\Desktop\adaRLAMRP\case_pkl\case8.8.pkl')#读取航班状态信息
aircraft_action =pd.read_pickle(r'C:\Users\HP\Desktop\adaRLAMRP\case_pkl\飞机8.8.pkl')#读取航班状态信息#读取飞机行为可选信息
base_inf = pd.read_pickle(r'C:\Users\HP\Desktop\adaRLAMRP\case_pkl\基地8.pkl')#读取航班状态信息#读取基地信息，用于基地容量约束

# flight_state = pd.read_csv(r'C:\Users\HP\Desktop\adaRLAMRP\case\case8.8.csv',encoding='gbk')#读取航班状态信息
# aircraft_action = pd.read_csv(r'C:\Users\HP\Desktop\adaRLAMRP\case\飞机8.8.csv',encoding='gbk')#读取飞机行为可选信息
# base_inf = pd.read_csv(r'C:\Users\HP\Desktop\adaRLAMRP\case\基地8.csv',encoding='gbk')#读取基地信息，用于基地容

E1 = 200#探索过程中的最大回合
E2 = 120#探索过程中加快收敛的回合次数,这里应该得修改------但还不知道咋修改
Ge_list  = [0] * E1 #对于每一个回合中的累计汇报值进行存储记忆，并依次存入信息
goal = [0] * E1
C_profit = 500 #连接的通过价值
# W_profit = 0 #维修的产生成本
qvalue = np.zeros((len(flight_state), len(aircraft_action))) #Q值表的数据信息
epsilon = 0.5 #初始贪婪策略搜索数值
#主程序正式开始，以遍历可进行的所有回合数，直至episode=E1-1（因为索引值，所以是小1的情况

for episode in range(E1): #和索引信息类似，比直观数值-1
    S = 0
    reward_sample = [0] * len(flight_state)
    action_result = [10000] * len(flight_state)
    states = list(range(0, len(flight_state)))  # 导入所有航班号序号的索引值 作为状态一共FL个状态
    NIU = list(range(0, len(aircraft_action)))  # 构建未使用的飞机集合，一开始为所有飞机集合,仅用飞机索引号进行表示,以索引代表飞机号
    IU = []  # 构建在用的初始空列表
    MTN = []  # 构建在维修的初始空列表
    base_cap = [0] * len(base_inf)  # 用于记录基地的实时动态容量
    base = base_inf.loc[:, "base_name"].to_list()
    all_cap = base_inf.loc[:, 'maxparkingaircraft_number'].to_list()
    aircraft_time = [0] * len(aircraft_action)  # 记录累计飞行时间
    aircraft_flight_innumber = [0] * len(aircraft_action)  # 记录起降架次
    aircraft_maintain = [0] * len(aircraft_action)  # 记录飞机的维修状况，对应每个飞机的维修航班集合
    final_arr_time = [0] * len(aircraft_action)  # 记录飞机执行维修航班时的到达机场时间
    aircraft = []
    for i in range(0, len(aircraft_action)):
        aircraft.append([])
        #在一个回合中走完所有的航班连接状态，使其形成完整的航班规划设计，但会存在惩罚违规的情况，以此进行后续更新
    while not len(states) == 0:
        states = choose_action(S, IU, NIU, MTN,  flight_state, aircraft_maintain, base_cap,
                                                base, aircraft_action, aircraft, aircraft_time,
                                                aircraft_flight_innumber, states, final_arr_time, all_cap,reward_sample,epsilon,qvalue,action_result)  # 动作选择最终的反馈
        if len(states)>=1:
            S = states[0] #此时后续航段为剩下状态集合中的第一个元素
            # states = states_next
        else:
            print("所有航班状态遍历完毕，此回合结束",episode)
            break

    # 遍历所有的状态之后完成第一回合的更新结果，此时需要对于累计的汇报更新Q值与Ge总回报
    if episode <= E2:
        Ge_list[episode] = np.sum(reward_sample)
        for k in range(len(flight_state)):  # 遍历每个状态时所采取的动作，即各个航班执飞的飞机号，以此更新Q(S,A)的价值指标
            a = action_result[k]
            if episode == 1:
                aphla = 0.2
            if not episode == 1:
                if Ge_list[episode] > Ge_list[episode - 1]:
                    aphla = 0.3
                else:
                    aphla = 0.1
            qvalue[k, a] = qvalue[k, a] + aphla * (Ge_list[episode] - qvalue[k, a])
    if episode > E2:
        if episode == E2 + 1:
          qvalue = np.zeros((len(flight_state), len(aircraft_action)))  # 达到步骤之后使q表格初始化为0进行重新计算，以加速收敛性
        Ge_list[episode] = np.sum(reward_sample) - np.mean(Ge_list[:E2+1])
        for k in range(len(flight_state)):  # 遍历每个状态时所采取的动作，即各个航班执飞的飞机号，以此更新Q(S,A)的价值指标
            a = action_result[k]
            if episode == 1:
                aphla = 0.2
            if not episode == 1:
                if Ge_list[episode] > Ge_list[episode - 1]:
                    aphla = 0.3
                else:
                    aphla = 0.1
            qvalue[k, a] = qvalue[k, a] + aphla * (Ge_list[episode] - qvalue[k, a])

    goal[episode] = np.sum(list(filter(lambda x: x == 10, reward_sample)) * 500) / 10 - 500*(len(IU)+len(MTN))
    # 设置epilon的下降速率与下降趋势，是一个回合下降一次嘛？ 对于epsilon的下降趋势搜索，其下降规律论文里是说 先在一些回合里保持不变后续随着回合进行逐渐降低最终趋于0以使其收敛
    next_epsilon = epsilon_decrease(epsilon, Ge_list, episode,reward_sample)
    epsilon = next_epsilon
    # epsilon_list.append(epsilon)

time_end = time.time()
time_sum1 = time_end-time_start
print("运行总时间",time_sum1 )
print("目标收益",goal)
# print('最后一此回合的选择结果',action_result)
aircraft_final_last = []
for i in aircraft:
    airrrr = []
    for j in i:
        j = j+1
        airrrr.append(j)
    aircraft_final_last.append(airrrr)
print('最后一个回合各个飞机执飞航班串的结果',aircraft_final_last)
import matplotlib.pyplot as plt
x_axis_data = []
for i in range(1,E1+1):
    x_axis_data.append(i)
plt.plot(x_axis_data, goal, 'bo-', alpha=0.5, linewidth=1, label='profit')
# plt.plot(x_axis_data, epsilon_list, 'ro-', alpha=0.5, linewidth=1, label='epsilon')
plt.legend()  # 显示上面的label
plt.xlabel('episode')  # x_label
plt.ylabel('result')  # y_label
plt.show()
sum333 = 0
for i in goal:
    sum333 += i
print('最终计算平均结果为：',sum333/len(goal))

#
# final_action = []
# for i in range(len(flight_state)):
#     final_action.append(0)
#所有回合结束后，对Q价值表进行贪婪策略，以判断最终的价值选择
#------------------对于Q表进行更新的操作-----------------------------，不知道是不是要保留，先存着吧------------------------------------
# for i in range(len(flight_state)):
#     action_final = qvalue[i, :].argmax()  # 用于查找Q表中价值最大的位置对应的飞机号，即为所应该选择的动作
#     final_action[i] = action_final+1
# print('依据最终Q值表的最终结果',final_action)
#
# aircraft_final_Q = []
# for k in range(1,len(aircraft_action)+1):
#     index = [i for i, val in enumerate(final_action) if val == k]
#     index1 = []
#     for i in index:
#         i = i+1
#         index1.append(i)
#     aircraft_final_Q.append(index1)
# print('Q值表下各个飞机执飞航班串的结果',aircraft_final_Q)
# action_result_final = []
# for i in action_result:
#     i = i+1
#     action_result_final.append(i)
# print('最后一此回合的选择结果',action_result_final)


