import torch.optim

from Model import Model
from Model import device
from Network import Network
import numpy as np
import torch.nn as nn
import multiprocessing as mp
EXPLORE_FACTOR=0.1
GAMMA=1
LR=0.0001
#与network中的K保持一致
K=5
def choose_action(model,state_next,adj,eval_flag):
    #挑选合法动作
    legal=[]
    for i in range(len(state_next)):
        if state_next[i][0][0]!=-1:
            legal.append(i)
    if len(legal)==1:
        return K
    del legal[len(legal)-1]
    r=np.random.uniform()
    if (r<EXPLORE_FACTOR) & (not eval_flag) :
        action=np.random.choice(np.array(legal),1,replace=False)[0]
    else:
        scores=[]
        for i in range(len(legal)):
            score=model(state_next[legal[i]],adj).cpu().detach().numpy()
            scores.append(score)
        action=legal[np.argmax(np.array(scores))]
    return action
def choose_action_with_reject(model,state_next,adj,eval_flag):
    #挑选合法动作
    legal=[]
    for i in range(len(state_next)):
        if state_next[i][0][0]!=-1:
            legal.append(i)
    r=np.random.uniform()
    if (r<EXPLORE_FACTOR) & (not eval_flag) :
        action=np.random.choice(np.array(legal),1,replace=False)[0]
    else:
        scores=[]
        for i in range(len(legal)):
            score=model(state_next[legal[i]],adj).cpu().detach().numpy()
            scores.append(score)
        action=legal[np.argmax(np.array(scores))]
    return action

def learn(model,adj,state_list,reward_list,optimizer,loss_func):
    R=0
    discount_reward=[]
    for r in reward_list[::-1]:
        R=r+GAMMA*R
        discount_reward.insert(0,R)
    predict_list=[]
    loss_sum=0
    optimizer.zero_grad()
    for i in range(len(state_list))  :
        predict=model(state_list[i],adj)
        rewards = torch.FloatTensor( [discount_reward[i] ]).to(device)
        loss=loss_func(predict,rewards)
        loss.backward()
        loss_sum+=loss.cpu().detach().numpy()
        #predict_list.append(predict)
    optimizer.step()
    # predict=torch.stack(predict_list,dim=0).to(device)
    # rewards=torch.FloatTensor(discount_reward).to(device)
    # optimizer.zero_grad()
    # loss=loss_func(predict,rewards)
    # loss.backward()
    # # #print(loss)
    # optimizer.step()

def eval(model,env):
    env.reset()
    adj=env.link_adj
    reward_sum=0
    action_list=[]
    while True:
        state_next = env.observe()
        action = choose_action(model,state_next, adj, True)
        reward, done = env.step(action)
        reward_sum+=reward
        action_list.append(action)
        if done:
            break
    env.reset()
    return reward_sum
def KFF(env):
    env.reset()
    reward_sum=0
    action_list=[]
    while True:
        state_next = env.observe()
        action=-1
        for i in range(len(state_next)):
            if state_next[i][0][0]!=-1:
                action=i
                action_list.append(action)
                break
        reward,done=env.step(action)
        reward_sum+=reward
        if done:
            break
    env.reset()
    return reward_sum
def DFF(env):
    env.reset()
    reward_sum=0
    while True:
        state_next = env.observe()
        if state_next[0][0][0]==-1:
            action=K
        else:
            action=0
        reward,done=env.step(action)
        reward_sum+=reward
        if done:
            break
    env.reset()
    return reward_sum
def Random(env):
    env.reset()
    reward_sum=0
    while True:
        state_next = env.observe()
        r=np.random.randint(K)
        if state_next[r][0][0]==-1:
            action=K
        else:
            action=r
        reward,done=env.step(action)
        reward_sum+=reward
        if done:
            break
    env.reset()
    return reward_sum
def get_least_load(state):
    load=[]
    for i in range(len(state)):
        if np.all(state[i]==0):
            load.append(0)
        else:
            index=np.where(state[i]==1)
            load.append(np.max(index))
    return np.max(np.array(load))
def LFF(env):
    env.reset()
    reward_sum=0
    while True:
        state_next = env.observe()
        # 挑选合法动作
        legal = []
        for i in range(len(state_next)):
            if state_next[i][0][0] != -1:
                legal.append(i)
        if len(legal) == 1:
            action=K
        else:
            del legal[len(legal) - 1]
            scores = []
            for i in range(len(legal)):
                score = get_least_load(state_next[legal[i]])
                scores.append(score)
            action = legal[np.argmin(np.array(scores))]
        reward,done=env.step(action)
        reward_sum+=reward
        if done:
            break
    env.reset()
    return reward_sum
#用于遗传算法
def get_score(env,gene):
    reward_sum=0
    i=0
    env.reset()
    while True:
        state_next = env.observe()
        r = int(gene[i])
        if state_next[r][0][0] == -1:
            action = K
        else:
            action = r
        reward, done = env.step(action)
        reward_sum += reward
        if done:
            break
    env.reset()
    return reward_sum

def one_play(EPOCH,task_load):
    env = Network(task_load)
    #env.load_task()
    # dff = DFF(env)
    # kff = KFF(env)
    # print(kff)
    #random=Random(env)
    #lff=LFF(env)
    #model=torch.load('model.pth').to(device)
    model = Model(env.link_size).to(device)
    loss_func = nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=LR)
    eval_list=[]
    for i in range(EPOCH):
        reward_eval = eval(model,env)
        eval_list.append(reward_eval)
        state_list = []
        reward_list = []
        while True:
            state_next = env.observe()
            state_list.append(state_next[-1])
            action = choose_action(model, state_next, env.link_adj, False)
            reward, done = env.step(action)
            reward_list.append(reward)
            if done:
                break
        learn(model, env.link_adj, state_list, reward_list,optimizer,loss_func)
    #torch.save(model,'model.pth')
    return np.max(np.array(eval_list))

if __name__ == '__main__':
    one_play(500,100)


