"""
===============================================
* filename: DNQ_recommender.py
* author:   zhangbing
* describe: reinforcement learning besed recommendation refer movan's blog

* modifier:
* time:
* content:
===============================================
"""
import setproctitle
setproctitle.setproctitle("zb-rl-dqn")
import os
os.environ["CUDA_VISIBLE_DEVICES"] = '0'

import torch
import torch.nn as nn
import torch.nn.functional as F
torch.set_num_threads(3)

import numpy as np
# import gym
from load_data import *
from environment import *
import random
import time

import matplotlib.pyplot as plt
import networkx as nx


USE_GPU = torch.cuda.is_available()
# USE_GPU = False
# if USE_GPU == 1:
#     os.environ["CUDA_VISIBLE_DEVICES"] = '6'
USE_WORD2VECTOR = False #是否使用word to vector
USE_LSTM = False


# 超参数
MAX_EOPCH = 1000

LR = 0.01                   # learning rate
EPSILON = 0.95               # 最优选择动作百分比

EPSILON_MIN = 0.05               # 最优选择动作百分比
EPSILON_MAX = 0.9               # 最优选择动作百分比

GAMMA = 0.0                 # 奖励递减参数
TARGET_REPLACE_ITER = 20   # Q 现实网络的更新频率
MEMORY_CAPACITY = 2000      # 记忆库大小
BATCH_SIZE = 1800
RECOMMEND_TOPK_MAX = 1
MODEL_SAVE_FREQUENCE = 10


# env = gym.make('CartPole-v0')   # 立杆子游戏
# env = env.unwrapped
# N_ACTIONS = env.action_space.n  # 杆子能做的动作
# N_STATES = env.observation_space.shape[0]   # 杆子能获取的环境信息数
env = Environment()
N_ACTIONS = env.action_num  # 杆子能做的动作

if USE_WORD2VECTOR:
    N_STATES = env.states_dimension*VECTOR_DIM   # 杆子能获取的环境信息数
else:
    if USE_ONE_HOT:
        N_STATES = N_ACTIONS
    else:
        N_STATES = env.states_dimension

ENV_A_SHAPE = 0
MAX_STATE_VALUE = max(data_processer.dict_obj_to_num.values())


PATH_EVAL_MODEL = './data/model/model_eval_net'
PATH_TARGET_MODEL = './data/model/model_target_net'

pass
from torch.autograd import Variable
if USE_GPU:
    hidden = (torch.zeros([1, 1,200], dtype=torch.float).cuda(), torch.zeros([1, 1,200], dtype=torch.float).cuda() )
else:
    hidden = (torch.zeros([1, 1,200], dtype=torch.float), torch.zeros([1, 1,200], dtype=torch.float))
class Net(nn.Module):
    def __init__(self, ):
        super(Net, self).__init__()

        # self.lstm_i_dim = 1000    # input dimension of LSTM
        # self.lstm_h_dim = 1000     # output dimension of LSTM
        # self.lstm_N_layer = 1   # number of layers of LSTM

        self.fc1 = nn.Linear(N_STATES, 1000)
        self.fc1.weight.data.normal_(0, 0.1)   # initialization

        self.fc2 = nn.Linear(1000, 1000)
        self.fc2.weight.data.normal_(0, 0.1)

        if USE_LSTM:
            self.lstm = nn.LSTM(input_size=1000, hidden_size=1000, num_layers=1)
            self.fc3 = nn.Linear(1000, 1000)

        self.out = nn.Linear(1000, N_ACTIONS)
        self.out.weight.data.normal_(0, 0.1)   # initialization

    def forward(self, x):
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))

        if USE_LSTM:
            global hidden
            x = x.unsqueeze(1)
            x, hidden = self.lstm(x, hidden)
            x = x.squeeze(1)
            x = F.relu(self.fc3(x))

        actions_value = self.out(x)
        return actions_value

        # actions_value = self.out(x)
        # return actions_value


class DQN(object):
    def __init__(self,max_state_value,load_model=False):
        if USE_GPU:
            if load_model and os.path.exists(PATH_EVAL_MODEL) :
                self.eval_net = torch.load(PATH_EVAL_MODEL)
            else:
                self.eval_net = Net().cuda()
                # self.eval_net = nn.DataParallel(Net())
                # self.eval_net = self.eval_net.cuda()


            if load_model and os.path.exists(PATH_TARGET_MODEL):
                self.target_net = torch.load(PATH_TARGET_MODEL)
            else:
                self.target_net =  Net().cuda()
                # self.target_net = nn.DataParallel(Net())
                # self.target_net = self.target_net.cuda()

            self.loss_func = nn.MSELoss().cuda()  # 误差公式
        else:
            if load_model and os.path.exists(PATH_EVAL_MODEL):
                self.eval_net = torch.load(PATH_EVAL_MODEL)
            else:
                self.eval_net = Net()

            if load_model and os.path.exists(PATH_TARGET_MODEL):
                self.target_net = torch.load(PATH_TARGET_MODEL)
            else:
                self.target_net = Net()

            self.loss_func = nn.MSELoss()  # 误差公式
        self.max_state_value = max_state_value
        self.learn_step_counter = 0     # 用于 target 更新计时
        self.memory_counter = 0         # 记忆库记数
        self.memory = np.zeros((MEMORY_CAPACITY, N_STATES * 2 + 2))     # 初始化记忆库
        self.optimizer = torch.optim.Adam(self.eval_net.parameters(), lr=LR)    # torch 的优化器

        self.loss_value = 0xFFFF
        pass

    def choose_action_topk(self, x,action_range,top_k,epsilon):
        if USE_WORD2VECTOR:
            pass
        else:
            x = x / self.max_state_value
        if USE_GPU:
            x = torch.unsqueeze(torch.FloatTensor(x), 0).cuda()
        else:
            x = torch.unsqueeze(torch.FloatTensor(x), 0)
        # input only one sample
        if np.random.uniform() < epsilon:   # greedy

            actions_value = self.eval_net.forward(x)
            # print(actions_value.shape)
            # if USE_LSTM :
            #     actions_value = torch.reshape(actions_value,(1,N_ACTIONS))
            # print(actions_value.shape)

            if USE_GPU:
                action_select = torch.index_select(actions_value,1,torch.tensor(action_range).cuda())
            else:
                action_select = torch.index_select(actions_value, 1, torch.tensor(action_range))
            vals, indices = action_select.topk(k=top_k, dim=1, largest=True, sorted=True)
            if USE_GPU:
                indices = indices.cpu()
                vals = vals.cpu()

            indices = indices.data.numpy()[0].tolist()
            vals = vals.data.numpy()[0].tolist()

            useful_index = []
            for j in range(len(vals)):
                # if vals[j] > 0:
                #     useful_index.append(indices[j])
                useful_index.append(indices[j])
            if len(useful_index) <= 0:
                useful_index.append(indices[0])

            top_k_action = [action_range[i] for i in useful_index]
            return top_k_action

        # else random
        random_k_action = random.sample(action_range, top_k)
        return random_k_action


    def choose_action(self, x):
        if USE_WORD2VECTOR:
            pass
        else:
            x = x / self.max_state_value

        if USE_GPU:
            x = torch.unsqueeze(torch.FloatTensor(x).cuda(), 0).cuda()
        else:
            x = torch.unsqueeze(torch.FloatTensor(x), 0)

        # input only one sample
        if np.random.uniform() < EPSILON:   # greedy
            actions_value = self.eval_net.forward(x)
            if USE_GPU:
                action = torch.max(actions_value.cpu(), 1)[1].data.numpy()
            else:
                action = torch.max(actions_value, 1)[1].data.numpy()
            action = action[0] if ENV_A_SHAPE == 0 else action.reshape(ENV_A_SHAPE)  # return the argmax index
        else:   # random
            action = np.random.randint(0, N_ACTIONS)
            action = action if ENV_A_SHAPE == 0 else action.reshape(ENV_A_SHAPE)
        return action


    def store_transition(self, s, a, r, s_):
        if USE_WORD2VECTOR:
            pass
        else:
            s = s / self.max_state_value
            s_ = s_ / self.max_state_value
        transition = np.hstack((s, [a, r], s_))
        # 如果记忆库满了, 就覆盖老数据
        index = self.memory_counter % MEMORY_CAPACITY
        self.memory[index, :] = transition
        self.memory_counter += 1

    def learn(self):
        # target parameter update
        if self.learn_step_counter % TARGET_REPLACE_ITER == 0:
            self.target_net.load_state_dict(self.eval_net.state_dict())
        self.learn_step_counter += 1

        # sample batch transitions
        sample_index = np.random.choice(MEMORY_CAPACITY, BATCH_SIZE)
        b_memory = self.memory[sample_index, :]
        if USE_GPU:
            b_s = torch.FloatTensor(b_memory[:, :N_STATES]).cuda()
            b_a = torch.LongTensor(b_memory[:, N_STATES:N_STATES+1].astype(int)).cuda()
            b_r = torch.FloatTensor(b_memory[:, N_STATES+1:N_STATES+2]).cuda()
            b_s_ = torch.FloatTensor(b_memory[:, -N_STATES:]).cuda()
        else:
            b_s = torch.FloatTensor(b_memory[:, :N_STATES])
            b_a = torch.LongTensor(b_memory[:, N_STATES:N_STATES+1].astype(int))
            b_r = torch.FloatTensor(b_memory[:, N_STATES+1:N_STATES+2])
            b_s_ = torch.FloatTensor(b_memory[:, -N_STATES:])

        # q_eval w.r.t the action in experience
        # if USE_LSTM:
        #     # b_s = torch.reshape(b_s,b_a.shape)
        #     q_eval = self.eval_net(b_s).gather(0, b_a)
        q_eval = self.eval_net(b_s).gather(1, b_a)  # shape (batch, 1)
        q_next = self.target_net(b_s_).detach()     # detach from graph, don't backpropagate
        # q_target = b_r + GAMMA * q_next.max(1)[0].view(BATCH_SIZE, 1)   # shape (batch, 1)
        q_target = b_r
        loss = self.loss_func(q_eval, q_target)

        self.optimizer.zero_grad()
        # loss.backward
        if USE_LSTM:
            loss.backward(retain_graph=True)
        else:
            loss.backward()
        self.optimizer.step()
        self.loss_value = loss
        # print(loss)

def get_one_hot_state(state):
    state_one_hot = np.zeros((N_ACTIONS,))

    for obj in state:
        if obj == 0:
            continue
        state_one_hot[int(obj)] = 1

    return state_one_hot

def train():
    dqn = DQN(max_state_value=MAX_STATE_VALUE)  # 定义 DQN 系统
    print('\nCollecting experience...')
    for i_episode in range(MAX_EOPCH):
        env.data_list = dataset_train
        s = env.reset()

        # print(str(i_episode) + ': ' + str(dqn.loss_value))
        ep_r = 0

        if i_episode < 0.3 * MAX_EOPCH:
            global EPSILON
            # EPSILON = EPSILON_MIN + (EPSILON_MAX - EPSILON_MIN) * i_episode/(0.3 * MAX_EOPCH)
            EPSILON = EPSILON_MAX
        # print(EPSILON)

        while env.finish != 1:

            while True:
                action_range = data_processer.get_action_range(state=s)
                topk_action = []
                while True:
                    if USE_WORD2VECTOR:
                        topk_action.extend(dqn.choose_action_topk(env.get_state_vector(state=s), action_range, top_k = RECOMMEND_TOPK_MAX,epsilon=EPSILON))
                        break

                    if USE_ONE_HOT:
                        topk_action.extend(dqn.choose_action_topk(get_one_hot_state(state=s), action_range,top_k=RECOMMEND_TOPK_MAX,epsilon=EPSILON))
                        break

                    topk_action.extend(dqn.choose_action_topk(s, action_range, top_k=RECOMMEND_TOPK_MAX,epsilon=EPSILON))
                    break

                s_, reward_list, done = env.exec_action(action_list = topk_action)

                for i in range(0,len(topk_action)):
                    ep_r += reward_list[i]

                    while True:
                        if USE_WORD2VECTOR:
                            vector_s = env.get_state_vector(state=s)
                            vector_s_ = env.get_state_vector(state=s_)
                            dqn.store_transition(vector_s, topk_action[i], reward_list[i], vector_s_)
                            break
                        if USE_ONE_HOT:
                            vector_s = get_one_hot_state(state=s)
                            vector_s_ = get_one_hot_state(state=s_)
                            dqn.store_transition(vector_s, topk_action[i], reward_list[i], vector_s_)
                            break

                        dqn.store_transition(s, topk_action[i], reward_list[i], s_)
                        break



                # a = dqn.choose_action(s)
                # # take action
                # s_, r, done = env.step(a)
                # dqn.store_transition(s, a, r, s_)
                # ep_r += r


                if dqn.memory_counter > MEMORY_CAPACITY:
                    # for i in range(2):
                    dqn.learn()
                    # pass

                s = s_
                if done:
                    break


        localtime = time.asctime(time.localtime(time.time()))

        if type(dqn.loss_value) != int :
            print('Epoch: ', i_episode,'train | r: ', round(ep_r, 2),'top_acc: ',
                  testTop(1, dqn),
                  testTop(2, dqn),
                  testTop(3, dqn),
                  testTop(4, dqn),
                  testTop(5, dqn),
                  testTop(6, dqn),
                  testTop(7, dqn),
                  testTop(8, dqn),
                  testTop(9, dqn),
                  testTop(10, dqn),
                  'loss: {:.3f}'.format(dqn.loss_value.data), localtime)
        import datetime
        if i_episode % MODEL_SAVE_FREQUENCE == 0:
            timestamp = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
            eval_model_path = PATH_EVAL_MODEL+'{}.pt'.format(timestamp)
            target_model_path = PATH_TARGET_MODEL+'{}.pt'.format(timestamp)
            torch.save(dqn.eval_net, eval_model_path)
            torch.save(dqn.target_net, target_model_path)
        pass

def testTop(topK, dqn=None,load_model = False):
    if not dqn:
        dqn = DQN(max_state_value=MAX_STATE_VALUE,load_model=load_model)  # 定义 DQN 系统
    env.data_list = dataset_test
    s = env.reset()

    ep_r = 0
    epsilon=1.0
    sum = 0
    right_num = 0

    while env.finish != 1:

        while True:
            sum += 1
            action_range = data_processer.get_action_range(state=s)
            topk_action = []
            while True:
                if USE_WORD2VECTOR:
                    topk_action.extend(dqn.choose_action_topk(env.get_state_vector(state=s), action_range, top_k=topK,epsilon=epsilon))
                    break

                if USE_ONE_HOT:
                    topk_action.extend(dqn.choose_action_topk(get_one_hot_state(state=s), action_range, top_k=topK,epsilon=epsilon))
                    break

                topk_action.extend(dqn.choose_action_topk(s, action_range, top_k=topK,epsilon=epsilon))
                break

            s_, reward_list, done = env.exec_action(action_list=topk_action)

            if 1.0 in reward_list:
                right_num += 1

            for i in range(0,len(topk_action)):
                ep_r += reward_list[i]

            s = s_
            if done:
                break

    return right_num/sum
    # return ep_r

def assemble(obj_list):
    ret_list = []
    for obj in obj_list:
        ret_list.append([obj])
    return merge(ret_list)

# [
#     [[1,0,2],[2,0,2]],
#     [[4,0,2]]
# ]
def merge(obj_list):
    length_start = len(obj_list)
    if length_start <= 1:
        return obj_list

    is_merge = False
    for i in range(length_start - 1,-1,-1):
        for j in range(i - 1,-1,-1):
            if judge_has_same_entity(obj_list[i],obj_list[j]):
                obj_list[j].extend(obj_list[i])
                del obj_list[i]
                is_merge = True
                break
            else:
                continue

    if is_merge:#没有可以合并的了
        return merge(obj_list)
    else:
        return obj_list
def judge_has_same_entity(obj_list_a, obj_list_b):
    entity_list = []
    for a in obj_list_a:

        entity_list.append(a[0])
        entity_list.append(a[2])

    for b in obj_list_b:
        if b[0] in entity_list or b[2] in entity_list:
            return True

    return False
def trans_obj_list_to_edge_list(obj_list):
    connect_list = []
    for obj in obj_list:
        connect_list.append((obj[0],obj[2]))
    return connect_list
    pass

def show_recommend_result(full_flow,step_flow,recommend_list):
    G = nx.MultiDiGraph()

    step = len(step_flow)
    # dotted_line_edge_list = [] #虚线
    # solid_line_edge_list = [] #实线
    for i in range(len(full_flow)):
        if i < step:
            G.add_edge(str(full_flow[i][0]), str(full_flow[i][2]), weight=0.7,name=str(i))#实线
            continue
        G.add_edge(str(full_flow[i][0]), str(full_flow[i][2]), weight=0.8,name=str(i)) #实线

    recommend = []
    for r in recommend_list:
        recommend.extend(r)

    for i in range(0,len(recommend)):
        G.add_edge(str(recommend[i][0]), str(recommend[i][2]), weight=1.0,name='p'+str(i))  # 虚线

    solid_line_edge_list_red = [(u, v) for (u, v, d) in G.edges(data=True) if d['weight'] <= 0.75]
    solid_line_edge_list_blue = [(u, v) for (u, v, d) in G.edges(data=True) if d['weight'] > 0.75 and d['weight'] < 0.9]
    dotted_line_list = [(u, v) for (u, v, d) in G.edges(data=True) if d['weight'] > 0.9]

    pos = nx.shell_layout(G,scale=3)  # positions for all nodes

    # nodes
    nx.draw_networkx_nodes(G, pos, node_size=300)

    # edges

    nx.draw_networkx_edges(G, pos, connectionstyle='arc3, rad = 0.3',edgelist=solid_line_edge_list_blue,edge_color='blue',width=2)#实线
    nx.draw_networkx_edges(G, pos,connectionstyle='arc3, rad = 0.5', edgelist=solid_line_edge_list_red,edge_color='red', width=2)#实线 style='dashed',
    nx.draw_networkx_edges(G, pos, connectionstyle='arc3, rad = 0.8', edgelist=dotted_line_list,edge_color='green', style='dashed',width=2)  # 虚线 style='dashed',

    edge_labels = nx.get_edge_attributes(G, 'name')
    # nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels,font_size=7,label_pos=0.8)

    # labels
    nx.draw_networkx_labels(G, pos, font_size=10, font_family='sans-serif')

    plt.axis('off')
    plt.show()
    pass

RECOMMEND_STEP = 3
def test_and_show():

    dqn = DQN(max_state_value=MAX_STATE_VALUE)  # 定义 DQN 系统
    env.data_list = dataset_test
    s = env.reset()

    ep_r = 0
    global EPSILON
    EPSILON = 0.99

    while env.finish != 1:

        while True:

            action_range = data_processer.get_action_range(state=s)
            # if USE_WORD2VECTOR:
            #     topk_action = dqn.choose_action_topk(env.get_state_vector(state=s), action_range, top_k = RECOMMEND_TOPK_MAX)
            # else:
            #     topk_action = dqn.choose_action_topk(s, action_range, top_k=RECOMMEND_TOPK_MAX)
            topk_action = []
            while True:
                if USE_WORD2VECTOR:
                    topk_action = dqn.choose_action_topk(env.get_state_vector(state=s), action_range, top_k = RECOMMEND_TOPK_MAX)
                    break

                if USE_ONE_HOT:
                    topk_action = dqn.choose_action_topk(get_one_hot_state(state=s), action_range,top_k=RECOMMEND_TOPK_MAX)
                    break

                topk_action = dqn.choose_action_topk(s, action_range, top_k=RECOMMEND_TOPK_MAX)
                break

            global RECOMMEND_STEP

            action_pre_list = []
            state_dim = len(s)

            step_image_state_list = [[s.tolist()]]

            for step in range(0, RECOMMEND_STEP):
                image_next_state_list = step_image_state_list[step]
                step_image_state_list.append([])
                for image_state_now in image_next_state_list:
                    action_range = data_processer.get_action_range(state=np.array(image_state_now))
                    # pre_obj_list = dqn.choose_action_topk(np.array(image_state_now), action_range, top_k=RECOMMEND_TOPK_MAX)
                    # pre_obj_list = dqn.choose_action_topk(get_one_hot_state(np.array(image_state_now)), action_range,
                    #                                       top_k=RECOMMEND_TOPK_MAX)

                    while True:
                        if USE_WORD2VECTOR:
                            pre_obj_list = dqn.choose_action_topk(env.get_state_vector(np.array(image_state_now)), action_range, top_k=RECOMMEND_TOPK_MAX,epsilon=1.0)
                            break

                        if USE_ONE_HOT:
                            pre_obj_list = dqn.choose_action_topk(get_one_hot_state(np.array(image_state_now)),
                                                                  action_range,
                                                                  top_k=RECOMMEND_TOPK_MAX,
                                                                  epsilon=1.0)
                            break

                        pre_obj_list = dqn.choose_action_topk(np.array(image_state_now), action_range, top_k=RECOMMEND_TOPK_MAX,epsilon=1.0)
                        break

                    action_pre_list.extend(pre_obj_list)
                    for pre_obj in pre_obj_list:
                        image_state = image_state_now[1:state_dim]
                        image_state.append(pre_obj)
                        step_image_state_list[step + 1].append(image_state)

            obj_list = []
            for o in action_pre_list:
                temp_obj = data_processer.num_to_obj(o)
                if temp_obj in obj_list:
                    continue
                if temp_obj[0] == 0 or temp_obj[2] == 0:  # 看数据集具体设置过滤什么东西
                    continue
                if temp_obj[0] == temp_obj[2]:
                    continue
                obj_list.append(temp_obj)
            recommend_list = assemble(obj_list)


            # original_list = []
            # current_flow = env.get_current_full_flow()
            # for o in current_flow:
            #     temp_obj = data_processer.num_to_obj(o)
            #     original_list.append(temp_obj)
            #


            # 显示原始流程图
            # show_graph(trans_obj_list_to_edge_list(env.get_current_step_flow()), color='blue', title='current step')
            current_step_flow = env.get_current_step_flow()
            full_flow = env.get_current_full_flow()
            # 显示几个推荐的结果
            # i = 0
            # for i_obj_list in recommend_list:
            #     connect_list = trans_obj_list_to_edge_list(i_obj_list)
            #     show_graph(connect_list,color='red',title='recommend :' + str(i))
            #     i += 0

            # 显示原始流程图
            # show_graph(trans_obj_list_to_edge_list(env.get_current_full_flow()), color='blue',title='full graph')
            show_recommend_result(full_flow=full_flow, step_flow=current_step_flow, recommend_list = recommend_list)

            s_, reward_list, done = env.exec_action(action_list = topk_action)

            for i in range(0,len(topk_action)):
                ep_r += reward_list[i]
                print(reward_list)

            s = s_
            if done:
                break


    return ep_r

if __name__ == '__main__':
    train()
    # top_k_list = [1,2,3,4,5]
    # for topk in top_k_list:
    #     print(test(topk))
    # test_and_show()
    for i in range(1,10):
        acc = testTop(i,load_model=True)
        print(acc)