"""
===============================================
* filename: environment.py
* author:   zhangbing
* describe: environment of the recommend

* modifier:
* time:
* content:
===============================================
"""
from load_data import *
import numpy as np
import math
import gensim
from gensim.models import FastText

USE_ONE_HOT = True
REWARD_DISCOUNT = 0.7
VECTOR_DIM = 8
NEXT_K = 6


class Environment():
    def __init__(self):
        self.action_num = data_processer.dict_action_to_num.__len__()
        self.states_dimension = 15
        self.state_now = list(np.zeros((self.states_dimension,)))
        self.exec_list = 0 #record exec which list
        self.exec_step = 0 #record exec which step
        self.done = 0
        self.record = {}
        self.data_list = dataset_train

        str_data_set = []
        for data_i in self.data_list:
            str_data_set.append([str(x) for x in data_i])
        # self.word2vector_model = gensim.models.Word2Vec(str_data_set, min_count=1, size=VECTOR_DIM,sg=1)
        # self.word2vector_model = FastText(str_data_set, size=VECTOR_DIM, window=4, min_count=1, iter=10, min_n=3, max_n=6, word_ngrams=0)
        # aa = self.word2vector_model.most_similar('99999')
        pass
    def word_to_vector(self,word):
        return self.word2vector_model[word]

    def reset(self):
        self.state_now = list(np.zeros((self.states_dimension,)))
        self.exec_list = 0 #record exec which list
        self.exec_step = 0 #record exec which step

        # give a init state
        obj_new = self.data_list[self.exec_list][self.exec_step]
        state_next = self.state_now[1:self.states_dimension]
        state_next.append(obj_new)
        self.state_now = state_next
        self.done = 0
        self.finish = 0

        return self.get_state_now()
        # return self.get_state_vector(state=self.state_now)


    def step_to_next(self):
        self.exec_step += 1
        self.done = 0
        self.finish = 0

        if self.exec_step >= len(self.data_list[self.exec_list]):
            self.exec_step = 0
            self.exec_list += 1
            self.state_now = list(np.zeros((self.states_dimension,))) #init the state
            self.done = 1

        if self.exec_list >= len(self.data_list):
                self.exec_list = 0
                self.finish = 1

        obj_new = self.data_list[self.exec_list][self.exec_step]
        state_next = self.state_now[1:self.states_dimension]
        state_next.append(obj_new)
        self.state_now = state_next


        return np.array(state_next)

    def get_state_now(self):
        return np.array(self.state_now)

    #simulate train
    def step(self,action_num):
        # s_, r, done, info = env.step(a)
        reward = -1

        # print(self.state_now, action_num)
        state_next = self.step_to_next()
        obj_true = self.state_now[-1]

        if action_num == obj_true:
            reward = 1
        # print(reward)
        return state_next,reward,self.done

    #simulate train

    def exec_action(self,action_list):
        reward_list = []

        for a in action_list:
            reward_list.append(self.calculate_reward(action=a))

        state_next = self.step_to_next()

        return state_next,reward_list,self.done
    def get_current_full_flow(self):
        obj_list = self.data_list[self.exec_list]
        ret = []
        for o in obj_list:
            temp_obj = data_processer.num_to_obj(o)
            ret.append(temp_obj)
        return ret

    def get_current_step_flow(self):
        obj_list = self.data_list[self.exec_list][0:self.exec_step + 1]
        ret = []
        for o in obj_list:
            temp_obj = data_processer.num_to_obj(o)
            ret.append(temp_obj)
        return ret

    def get_state_vector(self,state):
        state_vector = []

        for obj in state:
            if(obj == 0.0):
                vector = np.zeros((VECTOR_DIM)).tolist()
            else:
                vector = self.word_to_vector(word = str(int(obj))).tolist()
            state_vector.extend(vector)

        return np.array(state_vector)

    def calculate_reward(self,action):
        true_obj_list = self.get_next_k_obj(k=NEXT_K)
        if len(true_obj_list) <= 0:
            return -1.0
        if action not in true_obj_list:
            return -1.0
        index = true_obj_list.index(action)
        # return math.pow(REWARD_DISCOUNT,index)
        return 1
    def get_next_k_obj(self,k):
        obj_list = []
        for i in range(self.exec_step + 1, len(self.data_list[self.exec_list])):
            obj_list.append(self.data_list[self.exec_list][i])

        return obj_list
        pass


if __name__ == '__main__':
    env = Environment()
    env.step(action='[0,1,16]')

