from asyncio import FastChildWatcher
from cgi import test
from code import interact
from collections import deque
from gc import collect
from json import load
import os
import time
import numpy as np
from isaacgym import gymutil
from isaacgym import gymapi
#from isaacgym import gymtorch
from math import sqrt
import math
from sympy import false
import cv2
from draw import *
from pcgworker.PCGWorker import *

from wfc_vecenv_stable_baselines import PCGVecEnvStableBaselines
from wfc_env import *

total_generations = 11
finess_function = 2
test_trail = 14

# load performance.json and convert to a float list
def load_performance(pth):
    performance = None
    with open(pth, "r") as f:
        for line in f:
            performance = line
    performance = eval(performance)
    performance = np.array(performance)
    return performance

# compute performance
def compute_performance(performance_records, verbose=True):

    if verbose:
        print("---------------------")

    evaluations = []
    for i in range(len(performance_records)):

        larva_eval = performance_records[i][0]
        adult_eval = performance_records[i][1]

        # 3.1. maximum performance drop on old maps
        sr_drop_max = -1000
        for j in range(len(larva_eval)-1):
            sr_drop = larva_eval[j] - adult_eval[j]
            if sr_drop > sr_drop_max:
                sr_drop_max = sr_drop

        # 3.2. performance gain on new maps
        sr_gain = adult_eval[-1] - larva_eval[-1]

        # 3.3. success rate on new maps
        sr_new = adult_eval[-1]

        if verbose:
            print("-------------")
            print("sr_initial: ", larva_eval[-1], " i : ", i)
            print("sr_final: ", sr_new, " i : ", i)

        old_sr_threshold = 0.53
        new_sr_threshold = 0.65

        evaluations.append(copy.deepcopy([sr_drop_max, sr_gain, sr_new]))

    scores = []
    for i in range(len(evaluations)):

        adult_eval = performance_records[i][1]
        sr_new = adult_eval[-1]

        min_old_sr = 10000
        for j in range(len(adult_eval)-1):
            if adult_eval[j] < min_old_sr:
                min_old_sr = adult_eval[j]

        if min_old_sr < old_sr_threshold or sr_new < new_sr_threshold:
            if verbose:
                print("min_old_sr : ", min_old_sr, " sr_new : ", sr_new)
            continue

        if finess_function ==1:
            score = sqrt((evaluations[i][0]+1)**2 + (evaluations[i][1]-1)**2 + (evaluations[i][2]-1)**2)
        elif finess_function ==2:
            score = abs(evaluations[i][0]+1) + abs(evaluations[i][1]-1) + abs(evaluations[i][2]-1)
        scores.append((i, copy.deepcopy(score)))

    scores = sorted(scores, key=lambda x: x[1])

    return copy.deepcopy(scores), copy.deepcopy(evaluations)


def switch_element(evaluations,index1,index2,item):

    evaluations_ = copy.deepcopy(evaluations)

    temp = evaluations_[index1][item]
    evaluations_[index1][item] = evaluations_[index2][item]
    evaluations_[index2][item] = temp

    return evaluations_

# construct string:performance_records_gen_* where * ranges from 0 to 16
def construct_performance_records_gen_path(gen):
    # return "./trial2/performance_records_gen_" + str(gen) + ".json"
    return "./performance_records_gen_" + str(gen) + ".json"

influences_of_g = []

for g in range(total_generations):

    pr_pth = construct_performance_records_gen_path(g)

    performance_records = load_performance(pr_pth)

    influence_of_g = []

    # print(performance_records)

    scores, evaluations = compute_performance(performance_records)

    for i in scores:
        print(i)

    if len(scores) >= 2:

        idx1 = scores[0][0]
        idx2 = scores[1][0]

        test_item = 0

        rank0 = []
        for i in range(len(scores)):
            rank0.append(scores[i][0])

        for test_item in range(3):

            rank1 = []

            # print("-------------")

            # for e in evaluations:
            #     print(e)

            evaluations_ = switch_element(evaluations,idx1,idx2,test_item)

            scores_ = []
            old_sr_threshold = 0.53
            new_sr_threshold = 0.65
            for i in range(len(evaluations_)):
                adult_eval = performance_records[i][1]
                sr_new = adult_eval[-1]
                min_old_sr = 10000
                for j in range(len(adult_eval)-1):
                    if adult_eval[j] < min_old_sr:
                        min_old_sr = adult_eval[j]
                if min_old_sr < old_sr_threshold or sr_new < new_sr_threshold:
                    # print("min_old_sr : ", min_old_sr, " sr_new : ", sr_new)
                    continue

                if finess_function ==1:
                    score = sqrt((evaluations_[i][0]+1)**2 + (evaluations_[i][1]-1)**2 + (evaluations_[i][2]-1)**2)
                elif finess_function ==2:
                    score = abs(evaluations_[i][0]+1) + abs(evaluations_[i][1]-1) + abs(evaluations_[i][2]-1)

                scores_.append((i, copy.deepcopy(score)))

            scores_ = sorted(scores_, key=lambda x: x[1])

            # for i in scores_:
            #     print(i)

            for i in range(len(scores_)):
                rank1.append(scores_[i][0])

            # find original winner in new rank
            rank_of_winner = 0
            for i in range(len(rank1)):
                if rank1[i] == rank0[0]:
                    rank_of_old_winner = i

            if rank_of_winner != rank_of_old_winner:
                influence_of_g.append(test_item)

    # print("generation : ", g, " influence_of_g : ", influence_of_g)
    influences_of_g.append(influence_of_g)


inf_1 = 0
inf_2 = 0
inf_3 = 0
winner_list = []

for g in range(total_generations):

    pr_pth = construct_performance_records_gen_path(g)
    performance_records = load_performance(pr_pth)
    scores, evaluations = compute_performance(performance_records, verbose=False)

    if len(influences_of_g[g])>0:
        print("generation : ", g, " influence_of_g : ", influences_of_g[g], " scores : ", len(scores), "winner : ", scores[0][0])
    else:
        print("generation : ", g, " influence_of_g : ", "[-]", " scores : ", len(scores), "winner : ", scores[0][0])

    # construct winner list
    winner_list.append("gen_"+str(g)+"_dec_"+str(scores[0][0])+".json")

    for i in influences_of_g[g]:
        if i == 0:
            inf_1 += 1
        elif i == 1:
            inf_2 += 1
        elif i == 2:
            inf_3 += 1

print("inf_1 : ", inf_1)
print("inf_2 : ", inf_2)
print("inf_3 : ", inf_3)

for w in winner_list:
    # add double quotes to the string
    w = "\"" + w + "\"" + ","
    print(w)

inf_0 = []
inf_1 = []
inf_2 = []
for g in range(total_generations):
    for i in influences_of_g[g]:
        if i == 0:
            inf_0.append(1)
        else:
            inf_0.append(0)

    for i in influences_of_g[g]:
        if i == 1:
            inf_1.append(1)
        else:
            inf_1.append(0)

    for i in influences_of_g[g]:
        if i == 2:
            inf_2.append(1)
        else:
            inf_2.append(0)

wd_sz = 13
inf_0_ = []
inf_1_ = []
inf_2_ = []
for g in range(total_generations):
    wind_start = min(total_generations-1-wd_sz, g)
    wind_end = wind_start + wd_sz

    inf_0_n = 0
    inf_1_n = 0
    inf_2_n = 0
    for i in range(wind_start, wind_end):
        inf_0_n += inf_0[i]
        inf_1_n += inf_1[i]
        inf_2_n += inf_2[i]

    inf_0_.append(inf_0_n)
    inf_1_.append(inf_1_n)
    inf_2_.append(inf_2_n)
        
# draw a line plot of influence of each item using matplotlib
plt.plot(inf_0_, label="knowledge retention")
plt.plot(inf_1_, label="knowledge increasement")
plt.plot(inf_2_, label="final success rate")
plt.legend()
plt.show()


# pr_pth = construct_performance_records_gen_path(test_trail)
# performance_records = load_performance(pr_pth)
# scores, evaluations = compute_performance(performance_records)
# for e in evaluations:
#     print(e)
# for i in scores:
#     print(i)

# print(performance_records)

