import torch
import torch.nn as nn
import numpy as np
import torch.optim as optim
from model import *
from KNN import KNN
from NN import NN
from GRU import GRU
from dqn import DQN
from RNN import RNN
from LSTM import LSTM
from prepare_data import *

class Base_model(nn.Module):
    def __init__(self, opt, embedding_dim):
        super(Base_model, self).__init__()
        self.opt = opt
        self.visit_count = 0
        self.pb_info, self.pb_info_idx, self.pb_nd_update = get_all_pbs_dimension(opt.pb_path)
        self.paint_to_course, self.course_to_dimension, self.dimension_to_idx = get_all_reflect_relation()
        self.model_name = opt.model_name

        if self.model_name == str('KNN'):
            self.baseline_model = KNN(k=10).cuda()
        elif self.model_name == str('NN'):
            self.baseline_model = NN(input_dim=77, hidden_dim=32, output_dim=len(self.pb_info)).cuda()
        elif self.model_name == str('GRU'):
            self.baseline_model = GRU(77, 128, 3, len(self.pb_info)).cuda()
        elif self.model_name == str('DQN'):
            self.baseline_model = DQN(opt.state_nums, opt.hidden_dim, len(self.pb_info), opt.lr, opt.gamma, opt.epsilon, opt.target_update_nums,
                       self.pb_info, self.pb_info_idx, opt.least_score, opt.ReplayBuffer_capacity, opt.batch_size).cuda()
        elif self.model_name == str('RNN'):
            self.baseline_model = RNN(77, 128, len(self.pb_info), 3).cuda()
        elif self.model_name == str('LSTM'):
            self.baseline_model = LSTM(77, 128, len(self.pb_info), 3).cuda()
        # self.baseline_model = GRU(77, 1024, 3, len(self.pb_info)).cuda()#28
        if self.model_name != str('KNN'):
            self.optimizer = optim.Adam(self.baseline_model.parameters(), lr=opt.lr)


        self.aes = AesModel(embedding_dim, opt)
        self.train_agents, self.test_agents = {}, {}
        self.ReplayBuffer_capacity = opt.ReplayBuffer_capacity
        self.loss_func = nn.CrossEntropyLoss()

    # def cal_new_ability(self, action, old_ability):
    #     new_ability = [1 / (1 + torch.exp(-self.opt.D * self.opt.a * (old_ability[_] - (1-self.pb_info[action][_])) )) for _ in range(len(old_ability))][0]
    #     # temp = old_ability.tolist()[0]
    #     # te = new_ability.tolist()
    #     # new_ability = [max(old_ability.tolist()[0][_], new_ability.tolist()[_]) for _ in range(len(new_ability))]
    #     return torch.tensor(new_ability).cuda()

    def cal_new_ability(self, action, old_ability):
        temp = self.pb_nd_update[action]
        old_ability = old_ability[0]
        new_ability = []
        temp_ability = []
        for _ in range(len(old_ability)):
            if _ in self.pb_nd_update[action]:
                new_ability.append(1 / (1 + torch.exp(-self.opt.D * self.opt.a * abs((old_ability[_] - (1-self.pb_info
                [action][_]))) )))
            else:
                new_ability.append(old_ability[_])
        flag = False
        temp_ability = [_.item() for _ in new_ability]
        for _ in range(len(old_ability)):
            if temp_ability[_] < old_ability.tolist()[_]:
                flag = True
        return torch.tensor(new_ability).cuda()


    def cal_reward(self, old_ability, new_ability, discount):
        # print(f'new is', len(new_ability))
        # print(f'old is', len(old_ability))
        # print(old_ability)
        rsum = sum([(float(new_ability[_]) - float(old_ability[_])) / (1 - float(old_ability[_])) for _ in range(len(new_ability))]) / len(new_ability)
        reward = discount * rsum
        return reward

    def fit(self, train_loader):
        if self.model_name == str('KNN'):
            X_train = torch.tensor([va[0:77] for ke, va in self.pb_info.items()]).cuda()
            Y_train = []
            for ke, va in self.pb_info.items():
                rewards = []
                for ke, va1 in self.pb_info.items():
                    rewards.append(self.cal_reward(va[0:77], va1[0:77], 1))
                Y_train.append(rewards.index(max(rewards)))
            Y_train = torch.tensor(Y_train).cuda()

            self.baseline_model.fit(X_train, Y_train)

        self.aes.load_state_dict(torch.load('./aes_model_weight.pth'))
        num_visits, tl_visits = 0, int(train_loader.batch_size) *  len(train_loader) * self.opt.path_len
        self.train_agents = {}
        while num_visits < tl_visits:
            for img, name in train_loader:
                for i in range(len(img)):
                    if name[i] not in self.train_agents:
                        self.train_agents[name[i]] = {}
                        one_picture = img[i].unsqueeze(0).cuda()
                        aes_ability, _ = self.aes(one_picture)
                        self.train_agents[name[i]]['cur_ability'] = torch.tensor(aes_ability).cuda()
                        self.train_agents[name[i]]['init_ability'] = aes_ability
                        self.train_agents[name[i]]['discount'] = self.opt.gamma
                        self.train_agents[name[i]]['iter_count'] = 0
                        self.train_agents[name[i]]['pbs'] = []
                    if self.train_agents[name[i]]['iter_count'] >= self.opt.path_len:
                        continue
                    realted_dimension = self.course_to_dimension[self.paint_to_course[name[i]]]
                    realted_dimension = [self.dimension_to_idx[_] for _ in realted_dimension]
                    rewards = [self.cal_reward(self.train_agents[name[i]]['cur_ability'][0].tolist(), va[0:77], self.train_agents[name[i]]['discount']) for ke, va in self.pb_info.items()]
                    action_label = torch.tensor([rewards.index(max(rewards))]).cuda()

                    if self.model_name == str('KNN'):
                        action = self.baseline_model(self.train_agents[name[i]]['cur_ability'], self.pb_info_idx, self.train_agents[name[i]]['pbs'])#KNN
                    else:
                        if self.model_name == str('GRU') or self.model_name == str('RNN') or self.model_name == str('LSTM'):
                            score = self.baseline_model(self.train_agents[name[i]]['cur_ability'].unsqueeze(0))#GRU
                        else:
                            score = self.baseline_model(self.train_agents[name[i]]['cur_ability'])
                        loss = self.loss_func(score, action_label)
                        # print(f'loss is {loss}')
                        loss.backward()
                        self.optimizer.step()
                        rec = torch.argsort(score, descending=True)[0].tolist()
                        action = "0"
                        for va in rec:
                            if self.pb_info_idx[va] not in set(self.train_agents[name[i]]['pbs']):
                                action = self.pb_info_idx[va]
                                break

                    new_ability = self.cal_new_ability(action, self.train_agents[name[i]]['cur_ability'])

                    self.train_agents[name[i]]['cur_ability'], self.train_agents[name[i]]['discount'] = new_ability.unsqueeze(0), self.train_agents[name[i]]['discount'] * self.opt.gamma
                    self.train_agents[name[i]]['iter_count'] += 1
                    self.train_agents[name[i]]['pbs'].append(action)
                    num_visits += 1

    def evaluate(self, test_loader):
        self.aes.load_state_dict(torch.load('./aes_model_weight.pth'))
        num_visits, tl_visits = 0, int(test_loader.batch_size) * len(test_loader) * self.opt.path_len
        self.test_agents = {}
        while num_visits < tl_visits:
            for img, name in test_loader:
                for i in range(len(img)):
                    if name[i] not in self.test_agents:
                        self.test_agents[name[i]] = {}
                        one_picture = img[i].unsqueeze(0).cuda()
                        aes_ability, _ = self.aes(one_picture)
                        self.test_agents[name[i]]['cur_ability'] = torch.tensor(aes_ability).cuda()
                        self.test_agents[name[i]]['init_ability'] = aes_ability
                        self.test_agents[name[i]]['discount'] = self.opt.gamma
                        self.test_agents[name[i]]['iter_count'] = 0
                        self.test_agents[name[i]]['pbs'] = []
                    if self.test_agents[name[i]]['iter_count'] >= self.opt.path_len:
                        continue
                    realted_dimension = self.course_to_dimension[self.paint_to_course[name[i]]]
                    realted_dimension = [self.dimension_to_idx[_] for _ in realted_dimension]

                    rewards = [self.cal_reward(self.test_agents[name[i]]['cur_ability'][0].tolist(), va[0:77],
                                               self.test_agents[name[i]]['discount']) for ke, va in
                               self.pb_info.items()]

                    action_label = torch.tensor([rewards.index(max(rewards))]).cuda()

                    if self.model_name == str('KNN'):
                        action = self.baseline_model(self.test_agents[name[i]]['cur_ability'], self.pb_info_idx, self.test_agents[name[i]]['pbs'])#KNN
                    else:
                        if self.model_name == str('GRU') or self.model_name == str('RNN') or self.model_name == str('LSTM'):
                            score = self.baseline_model(self.test_agents[name[i]]['cur_ability'].unsqueeze(0))#GRU
                        else:
                            score = self.baseline_model(self.test_agents[name[i]]['cur_ability'])#NN
                        rec = torch.argsort(score, descending=True)[0].tolist()
                        action = "0"
                        for va in rec:
                            if self.pb_info_idx[va] not in self.test_agents[name[i]]['pbs']:
                                action = self.pb_info_idx[va]
                                break

                    new_ability = self.cal_new_ability(action, self.test_agents[name[i]]['cur_ability'])
                    self.test_agents[name[i]]['cur_ability'], self.test_agents[name[i]][
                        'discount'] = new_ability.unsqueeze(0), self.test_agents[name[i]]['discount'] * self.opt.gamma
                    self.test_agents[name[i]]['iter_count'] += 1
                    self.test_agents[name[i]]['pbs'].append(action)
                    num_visits += 1
        test_score, tl = 0, 0
        for ke, va in self.test_agents.items():
            tl += 1
            te = va['pbs']
            # print(f'{ke} {te}')
            for _ in range(len(va['cur_ability'].squeeze(0).tolist())):
                now, old = va['cur_ability'].squeeze(0).tolist()[_], va['init_ability'].squeeze(0).tolist()[_]
                # print(f'{now} {old}')
                test_score += (va['cur_ability'].squeeze(0).tolist()[_] - va['init_ability'].squeeze(0).tolist()[_]) / (
                            1 - va['init_ability'].squeeze(0).tolist()[_])
        test_score /= tl
        print(test_score)
        with open(f"result_{self.model_name}.txt", mode='a') as file:
            ans = str(test_score) + str('\n')
            file.write(ans)

