import torch
import torch.nn as nn
import numpy as np
from model import *
from dqn import DQN
from prepare_data import *

class MCS_model(nn.Module):
    def __init__(self, opt, embedding_dim):
        super(MCS_model, self).__init__()
        self.opt = opt
        self.visit_count = 0
        self.pb_info, self.pb_info_idx, self.pb_nd_update = get_all_pbs_dimension(opt.pb_path)
        self.paint_to_course, self.course_to_dimension, self.dimension_to_idx = get_all_reflect_relation()
        self.actor_critic = ActorCritic(opt.state_nums, opt.hidden_dim, len(self.pb_info), opt.lr, opt.gamma, opt.epsilon, opt.target_update_nums,
                       self.pb_info, self.pb_info_idx, opt.least_score, opt.ReplayBuffer_capacity, opt.batch_size)
        self.aes = AesModel(embedding_dim, opt)
        self.train_agents, self.test_agents = {}, {}
        self.ReplayBuffer_capacity = opt.ReplayBuffer_capacity

    def cal_new_ability(self, action, old_ability):
        temp = self.pb_nd_update[action]
        old_ability = old_ability[0]
        # new_ability = [1 / (1 + torch.exp(-self.opt.D * self.opt.a * (old_ability[_] - ((1-self.pb_info[action][_]))) )) for _ in range(len(old_ability))][0]
        # new_ability = [1 / (1 + torch.exp(-self.opt.D * self.opt.a * (old_ability[_] - ((1-self.pb_info[action][_]))) )) if _ in self.pb_nd_update[action] else old_ability[_] for _ in range(len(old_ability))][0]
        new_ability = []
        temp_ability = []
        for _ in range(len(old_ability)):
            if _ in self.pb_nd_update[action]:
                # print('_ is', _)
                new_ability.append(1 / (1 + torch.exp(-self.opt.D * self.opt.a * abs((old_ability[_] - (1-self.pb_info
                [action][_]))) )))
            else:
                new_ability.append(old_ability[_])
        flag = False
        temp_ability = [_.item() for _ in new_ability]
        for _ in range(len(old_ability)):
            if temp_ability[_] < old_ability.tolist()[_]:
                flag = True
        if flag:
            with open(f"reason_new.txt", mode='a') as file:
                ans = str('chosen pb: ') + str(self.pb_info[action]) + str('\n')
                ans = ans + str('old ability : ') + str(old_ability.tolist()) + str('\n')
                ans = ans + str('new ability : ') + str(temp_ability) + str('\n\n')
                file.write(ans)
        # temp = old_ability.tolist()[0]
        # te = new_ability.tolist()
        # new_ability = [max(old_ability.tolist()[0][_], new_ability.tolist()[_]) for _ in rang
        # e(len(new_ability))]
        return torch.tensor(new_ability).cuda()

    def cal_reward(self, old_ability, new_ability, discount):
        rsum = sum([(float(new_ability[_].item()) - float(old_ability)) / (1 - float(old_ability)) for _ in range(len(new_ability))]) / len(new_ability)
        reward = discount * rsum
        return reward

    def fit(self, train_loader):
        self.aes.load_state_dict(torch.load('H:\HJL\Aes3\AestheticPerception_v2\\aes_model_weight.pth'))
        num_visits, tl_visits = 0, int(train_loader.batch_size) *  len(train_loader) * self.opt.path_len
        self.train_agents = {}
        while num_visits < tl_visits:
            for img, name in train_loader:
                for i in range(len(img)):
                    if name[i] not in self.train_agents:
                        self.train_agents[name[i]] = {}
                        one_picture = img[i].unsqueeze(0).cuda()
                        aes_ability, _ = self.aes(one_picture)
                        min_val = aes_ability.min()
                        max_val = aes_ability.max()

                        # min-max 归一化到 [0.2, 0.8]
                        aes_ability = (aes_ability - min_val) * (0.8 - 0.2) / (max_val - min_val) + 0.2
                        temp = aes_ability
                        # print('temp is', temp)
                        self.train_agents[name[i]]['cur_ability'] = torch.tensor(aes_ability).cuda()
                        self.train_agents[name[i]]['init_ability'] = aes_ability
                        self.train_agents[name[i]]['discount'] = self.opt.gamma
                        self.train_agents[name[i]]['iter_count'] = 0
                        self.train_agents[name[i]]['pbs'] = []
                    if self.train_agents[name[i]]['iter_count'] >= self.opt.path_len:
                        continue
                    realted_dimension = self.course_to_dimension[self.paint_to_course[name[i]]]
                    realted_dimension = [self.dimension_to_idx[_] for _ in realted_dimension]
                    action = self.actor_critic.choose_action(realted_dimension, self.train_agents[name[i]]['pbs'], self.train_agents[name[i]]['cur_ability'])
                    new_ability = self.cal_new_ability(action, self.train_agents[name[i]]['cur_ability'])
                    old_ability = self.train_agents[name[i]]['cur_ability']
                    te = self.train_agents[name[i]]['cur_ability'].squeeze(0).tolist()
                    rsum = sum([(float(new_ability[_].item()) - float(self.train_agents[name[i]]['cur_ability'].squeeze(0).tolist()[_])) / (1 - float(self.train_agents[name[i]]['cur_ability'].squeeze(0).tolist()[_])) for _ in range(len(new_ability))]) / len(new_ability)
                    reward = self.train_agents[name[i]]['discount'] * rsum
                    self.actor_critic.update(self.train_agents[name[i]]['cur_ability'], action, reward, new_ability)
                    # self.dqn.store_transition(self.train_agents[name[i]]['cur_ability'], action, reward, new_ability)
                    self.train_agents[name[i]]['cur_ability'], self.train_agents[name[i]]['discount'] = new_ability.unsqueeze(0), self.train_agents[name[i]]['discount'] * self.opt.gamma
                    self.train_agents[name[i]]['iter_count'] += 1
                    self.train_agents[name[i]]['pbs'].append(action)

    def evaluate(self, test_loader):
        num_visits, tl_visits = 0, int(test_loader.batch_size) *  len(test_loader) * self.opt.path_len
        while num_visits < tl_visits:
            for img, name in test_loader:
                for i in range(len(img)):
                    one_picture = img[i].unsqueeze(0).cuda()
                    if name[i] not in self.test_agents:
                        self.test_agents[name[i]] = {}
                        aes_ability, _ = self.aes(one_picture)
                        min_val = aes_ability.min()
                        max_val = aes_ability.max()
                        # min-max 归一化到 [0.2, 0.8]
                        aes_ability = (aes_ability - min_val) * (0.8 - 0.2) / (max_val - min_val) + 0.2
                        temp = aes_ability
                        # print('temp is', temp)
                        self.test_agents[name[i]]['cur_ability'] = aes_ability
                        self.test_agents[name[i]]['init_ability'] = aes_ability
                        self.test_agents[name[i]]['iter_count'] = 0
                        self.test_agents[name[i]]['pbs'] = []
                    if self.test_agents[name[i]]['iter_count'] >= self.opt.path_len:
                        continue
                    realted_dimension = self.course_to_dimension[self.paint_to_course[name[i]]]
                    realted_dimension = [self.dimension_to_idx[_] for _ in realted_dimension]
                    action = self.actor_critic.choose_action(realted_dimension, self.test_agents[name[i]]['pbs'], self.test_agents[name[i]]['cur_ability'])
                    new_ability = self.cal_new_ability(action, self.test_agents[name[i]]['cur_ability'])
                    self.test_agents[name[i]]['cur_ability'] = new_ability.unsqueeze(0)
                    self.test_agents[name[i]]['iter_count'] += 1
                    self.test_agents[name[i]]['pbs'].append(action)
                    num_visits += 1
        test_score, tl = 0, 0
        with open(f"reason.txt", mode='a') as file:
            for ke, va in self.test_agents.items():
                tl += 1
                te = va['pbs']
                print(f'{ke} {te}')
                ans = ke + str(' ') + str(te) + str(' : ')
                for _ in range(len(va['cur_ability'].squeeze(0).tolist())):
                    now, old = va['cur_ability'].squeeze(0).tolist()[_], va['init_ability'].squeeze(0).tolist()[_]
                    if now - old < 0:
                        ans = ans + str('now : ') + str(now) + str(' ') + str('old : ') + str(old) + str(' ')
                    print(f'{now} {old}')
                    test_score += (va['cur_ability'].squeeze(0).tolist()[_] - va['init_ability'].squeeze(0).tolist()[_]) / (1 - va['init_ability'].squeeze(0).tolist()[_])
                ans = ans + str('\n')
                file.write(ans)
            test_score /= tl
        print(test_score)



