import torch
from IQA_pytorch import SSIM, GMSD, LPIPSvgg, DISTS
import os
import numpy as np
import xlwt
import xlrd
import xml.dom.minidom
import copy
import cv2
#计算boxiou jaccard
from layers.box_utils import jaccard, center_size, mask_iou
#keyword = ['person','bicycle','car','motorcycle','bus','truck','traffic light','fire hydrant','stop sign','parking meter','bench','dog','cat','sheep','cow']
keyword = ['car']
#获取截图所在路径
def get_screenShot_Path(filepath):
    #for dirpath, dirnames, filenames in os.walk('F:/autodrive_data/meta/ScreenShot'):\
    screenShot_path = False
    for dirpath, dirnames, filenames in os.walk(filepath):
        screenShot_path = dirnames
        print(screenShot_path)
        #print(screenShot_path)
        break
        # print('dirpaath',dirpath)
        # print('dirnames')
        # print(dirnames)
    if screenShot_path:
        return screenShot_path

#获取变异后的抽象场景所生成的具体场景路径
def get_trans_Path(filepath):
    trans_path = False
    for dirpath, dirnames, filenames in os.walk(filepath):
        trans_path = dirnames
        #print(trans_path)
        break
        # print('dirpaath',dirpath)
        # print('dirnames')
        # print(dirnames)
    if trans_path:
        return trans_path
def get_result(filepath):
    screenShot_path = get_screenShot_Path(filepath)
    print(screenShot_path)
    if screenShot_path:
        for i in screenShot_path:
            #for dirpath, dirnames, filenames in os.walk('F:/autodrive_data/original/ScreenShot/'+i):
            for dirpath, dirnames, filenames in os.walk(filepath+'/' + i):
                print(filepath+'/' + i)
                for j in filenames:
                    if 'screenshot' in j:
                        print(1)
                        #获取输出图来手工验证
                        os.system(
                            'D:\Anaconda\envs\yolact-env\python.exe F:/PycharmProjects/yolact/eval.py --trained_model=F:/PycharmProjects/yolact/weights/yolact_base_54_800000.pth --score_threshold=0.1 --top_k=15 --image=' + filepath + '/' + i + '/' + j+'+' + filepath + '/' + i + '/output_image.png')
                        #修改概率阈值
                        # os.system(
                        #     'D:\Anaconda\envs\yolact-env\python.exe F:/PycharmProjects/yolact/eval.py --trained_model=F:/PycharmProjects/yolact/weights/yolact_base_54_800000.pth --score_threshold=0.10 --top_k=15 --image='+filepath+'/' + i + '/' + j)
                        # #os.system('D:\Anaconda\envs\yolact-env\python.exe F:/PycharmProjects/yolact/eval.py --trained_model=F:/PycharmProjects/yolact/weights/yolact_base_54_800000.pth --score_threshold=0.15 --top_k=15 --image=F:/autodrive_data/meta/ScreenShot/'+i+'/'+j)
#计算IOU,数据结构不同，未使用
def _bbox_iou(bbox1, bbox2, iscrowd=False):
    ret = jaccard(bbox1, bbox2, iscrowd)
    return ret.cpu()


def bb_intersection_over_union(boxA, boxB):
    boxA = [int(x) for x in boxA]
    boxB = [int(x) for x in boxB]

    xA = max(boxA[0], boxB[0])
    yA = max(boxA[1], boxB[1])
    xB = min(boxA[2], boxB[2])
    yB = min(boxA[3], boxB[3])

    interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)

    boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
    boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)

    iou = interArea / float(boxAArea + boxBArea - interArea)

    return iou
#将预测结果保存为excel格式，用来最对比挑选合适的阈值取值
#新增计算iou的方法
def get_result_excel(filepath):
    #用来计算pr
    pr_dict = {}
    screenShot_path = get_screenShot_Path(filepath)
    print(screenShot_path)
    for i in screenShot_path:
        # for dirpath, dirnames, filenames in os.walk('F:/autodrive_data/original/ScreenShot/'+i):
        for dirpath, dirnames, filenames in os.walk(filepath + '/' + i):
            for j in filenames:
                if 'groundtruth' in j:
                    # 打开xml文档
                    truth_dict = {}
                    dom = xml.dom.minidom.parse(filepath + '/' + i + '/' + j)
                    # 得到文档元素对象
                    root = dom.documentElement
                    sub_obj = root.getElementsByTagName('object')
                    #print(sub_obj)
                    #记录tp fp的值，坐标分别对应每个物体的选择框
                    # tp = np.zeros(len(sub_obj))
                    # fp = np.zeros(len(sub_obj))
                    count = 0
                    #获得groundtruth的物体数量
                    #count_groundtruth = len(sub_obj)
                    for num in range(len(sub_obj)):
                        truth_list = []
                        name = sub_obj[num].getElementsByTagName('name')[0]
                        print(name.childNodes[0].data)
                        truth_list.append(name.childNodes[0].data)
                        #print(obj1.childNodes[0].data)
                        # print(obj1.firstChild.data)
                        bndbox = sub_obj[num].getElementsByTagName('bndbox')[0]
                        xmin = bndbox.getElementsByTagName('xmin')[0]
                        ymin = bndbox.getElementsByTagName('ymin')[0]
                        xmax = bndbox.getElementsByTagName('xmax')[0]
                        ymax = bndbox.getElementsByTagName('ymax')[0]
                        truth_list.append(xmin.childNodes[0].data)
                        truth_list.append(ymin.childNodes[0].data)
                        truth_list.append(xmax.childNodes[0].data)
                        truth_list.append(ymax.childNodes[0].data)
                        truth_dict[count]  = truth_list
                        count += 1
                        #print(obj3.childNodes[0].data)
            for j in filenames:
                if 'result_dict' in j:
                    result_dict = np.load(filepath + '/' + i + '/' + j, allow_pickle=True).item()
                    del_list = []
                    for k, value in enumerate(result_dict['classes_name']):
                        # 判断是否为重要元素
                        if value in keyword:
                            for k1, value1 in enumerate(result_dict['classes_name']):
                                if k1!=k:
                                    iou = bb_intersection_over_union(result_dict['boxes'][k],
                                                                     result_dict['boxes'][k1])
                                    if iou>0.5:
                                        if result_dict['scores'][k]>result_dict['scores'][k1]:
                                            print(type(result_dict['scores']))
                                            if k1 not in del_list:
                                                del_list.append(k1)
                                            # del result_dict['scores'][k1]
                                            # del result_dict['boxes'][k1]
                                            # del result_dict['classesname'][k1]
                                        else:
                                            if k not in del_list:
                                                del_list.append(k)
                                            # del result_dict['scores'][k]
                                            # del result_dict['boxes'][k]
                                            # del result_dict['classesname'][k]
                    print(del_list)
                    # result_dict['scores'] = np.delete(result_dict['scores'],del_list)
                    # result_dict['boxes'] = np.delete(result_dict['boxes'], del_list)
                    # result_dict['classes_name'] = np.delete(result_dict['classes_name'], del_list)
                    # 设置excel表格格式
                    wb = xlwt.Workbook()
                    ws = wb.add_sheet('sheet')
                    style = xlwt.XFStyle()
                    style.num_format_str = '0%'
                    count = 0  # 记录excel表格的行数
                    ws.write(count, 0, '物体id')
                    ws.write(count, 1, '物体名字')
                    ws.write(count, 2, '置信概率')
                    ws.write(count, 3, 'tp')
                    ws.write(count, 4, 'fp')
                    #np = len(result_dict['classes_name'])
                    count += 1
                    #判断物体有没有被检测到
                    flag_list = [0 for x in range(0,len(result_dict['classes_name']))]
                    result_dict['flag'] = flag_list
                    for k, value in enumerate(result_dict['classes_name']):
                        # 判断是否为重要元素
                        if value in keyword and k not in del_list:
                            if value not in pr_dict.keys():
                                pr_list = []
                                for p in range(11):
                                    pr_list.append([0,0,0])
                                pr_dict[value] = copy.deepcopy(pr_list)
                            iou_max = 0
                            #truth_value 0 物体标签 1 2 3 4 分别为四个点的位置
                            for truth_k,truth_value in truth_dict.items():
                                print(result_dict['boxes'][k])
                                iou = bb_intersection_over_union(result_dict['boxes'][k],[truth_value[1],truth_value[2],truth_value[3],truth_value[4]])
                                if iou_max< iou:
                                    iou_max = iou
                                    #获取对应的标签名字
                                    truth_value_max = truth_value[0]
                            #计算置信概率的的区间
                            res_score = int(result_dict['scores'][k]/0.1)
                            if iou_max > 0.5 and value == truth_value_max:
                                if result_dict['flag'][k] == 0:
                                    ws.write(count, 3,1)
                                    result_dict['flag'][k] = 1
                                    #大于某个置信概率后，都应该被添加
                                    for r in range(res_score+1):
                                        pr_dict[value][r][0]+=1
                                else:
                                    for r in range(res_score + 1):
                                        pr_dict[value][r][1] += 1
                                    ws.write(count, 4, 1)
                            else:
                                for r in range(res_score + 1):
                                    pr_dict[value][r][1] += 1
                                ws.write(count, 4, 1)
                            ws.write(count, 0, int(result_dict['classes'][k]))
                            ws.write(count, 1, value)
                            ws.write(count, 2, float(result_dict['scores'][k]))
                            count += 1
                            # print(j)
                            wb.save(filepath + '/' + i + '/result_excel.xls')
                    for truth_k, truth_value in truth_dict.items():
                        # 非空验证，之前标注的标签比现在的keyword要多
                        if truth_value[0] in keyword:
                            for m in range(len(pr_dict[truth_value[0]])):
                                pr_dict[truth_value[0]][m][2]+=1
    print('*******************************')
    print(pr_dict)
    wb = xlwt.Workbook()
    ws = wb.add_sheet('sheet')
    style = xlwt.XFStyle()
    style.num_format_str = '0%'
    count = 0  # 记录excel表格的行数
    ws.write(count, 0, '物体名字')
    ws.write(count, 1, '置信概率阈值')
    ws.write(count, 2, 'precision')
    ws.write(count, 3, 'recall')
    ws.write(count, 4, 'f1')
    count+=1
    for k,v in pr_dict.items():
        #print(type(v))
        for x,y in enumerate(v):
            print('***')
            print(k)
            print(x)
            print('***')
            ws.write(count, 0, k)
            ws.write(count, 1, x*0.1, style)
            #print(type(v[x]))
            if int(v[x][0]+v[x][1]) == 0:
                print('precision', 0)
                precision = 0
            else:
                print('precision',v[x][0]/(v[x][0]+v[x][1]))
                precision = v[x][0]/(v[x][0]+v[x][1])
            ws.write(count, 2, precision)
            if int(v[x][2]) == 0:
                print('recall',0)
                recall = 0
            else:
                print('recall',v[x][0]/v[x][2])
                recall = v[x][0]/v[x][2]
            ws.write(count, 3, recall)
            if precision + recall == 0:
                f1 = 0
            else:
                f1 = 2*precision*recall/(precision + recall)
            ws.write(count, 4, f1)
            count +=1
    #print(filepath)
    wb.save(filepath.replace('screenShot','')+'pr_excel.xls')
def get_result_deeptest(filepath):
    #获取当前文件夹的下一层文件夹
    screenShot_path = get_screenShot_Path(filepath)
    #不同的变异类型
    print(screenShot_path)
    # for c in range(1):
    #     i = 'scale'
    # screenShot_path = ['scale']
    for i in screenShot_path:
        screenShot_path2 = get_screenShot_Path(filepath+'/' + i)
        for j in screenShot_path2:
            #for dirpath, dirnames, filenames in os.walk('F:/autodrive_data/original/ScreenShot/'+i):
            for dirpath, dirnames, filenames in os.walk(filepath+'/' + i + '/' +j):
                for k in filenames:
                    if 'mutate' in k:
                        print(1)
                        #获取输出图来手工验证
                        os.system(
                            'D:\Anaconda\envs\yolact-env\python.exe F:/PycharmProjects/yolact/eval.py --trained_model=F:/PycharmProjects/yolact/weights/yolact_base_54_800000.pth --score_threshold=0.1 --top_k=15 --image=' + filepath + '/' + i + '/' + j+'/'+k +'+' + filepath + '/' + i +'/' + j+ '/output_image.png')
                        #修改概率阈值
                        # os.system(
                        #     'D:\Anaconda\envs\yolact-env\python.exe F:/PycharmProjects/yolact/eval.py --trained_model=F:/PycharmProjects/yolact/weights/yolact_base_54_800000.pth --score_threshold=0.10 --top_k=15 --image='+filepath+'/' + i + '/' + j)
                        # #os.system('D:\Anaconda\envs\yolact-env\python.exe F:/PycharmProjects/yolact/eval.py --trained_model=F:/PycharmProjects/yolact/weights/yolact_base_54_800000.pth --score_threshold=0.15 --top_k=15 --image=F:/autodrive_data/meta/ScreenShot/'+i+'/'+j)

#针对蜕变后的场景，获得识别结果（也可针对放置在指定位置的）
def get_result_all(filepath):
    trans_Path = get_trans_Path(filepath)
    if trans_Path:
        for i in trans_Path:
            print(filepath + '/' + i)
            get_result(filepath + '/' + i+'/screenShot')


#计算同一个文件夹下的具体场景两两是否符合蜕变关系
def cal_result(filepath):
    all_count = 0
    fit_count = 0
    #图片所在路径
    threshold = 0.8
    screenShot_path = get_screenShot_Path(filepath)
    list_fit = []
    list_fit_2 = []
    #for i in screenShot_path:
    length = len(screenShot_path)
    for m in range(0, length):
        compare_dict_m = {}
        compare_dict_n = {}
        for n in range(m + 1, length):
            if screenShot_path[m] != screenShot_path[n]:
                #for dirpath, dirnames, filenames in os.walk('F:/autodrive_data/meta/ScreenShot/'+screenShot_path[m]):
                for dirpath, dirnames, filenames in os.walk(filepath+'/' + screenShot_path[m]):
                    for j in filenames:
                        if 'result_dict' in j:
                            
                            result_dict_m = np.load(filepath+'/'+screenShot_path[m]+'/'+j, allow_pickle=True).item()
                            print(screenShot_path[m])
                            print(result_dict_m)
                for dirpath, dirnames, filenames in os.walk(filepath+'/' + screenShot_path[n]):
                    for j in filenames:
                        if 'result_dict' in j:
                            result_dict_n = np.load(
                                filepath+'/' + screenShot_path[n] + '/' + j, allow_pickle=True).item()
                            print(screenShot_path[n])
                            print(result_dict_n)
                for i,value in enumerate(result_dict_m['classes_name']):
                    #判断是否为重要元素
                    if value in keyword:
                        if result_dict_m['scores'][i] >= threshold:
                            if value not in compare_dict_m.keys():
                                compare_dict_m[value] = 1
                            else:
                                compare_dict_m[value] = compare_dict_m[value]+1
                for i, value in enumerate(result_dict_n['classes_name']):
                    if value in keyword:
                        if result_dict_n['scores'][i] >= threshold:
                            if value not in compare_dict_n.keys():
                                compare_dict_n[value] = 1
                            else:
                                compare_dict_n[value] = compare_dict_n[value] + 1
                if compare_dict_m == compare_dict_n :
                    fit_count = fit_count + 1
                    print(screenShot_path[m] + '与' + screenShot_path[n] + '符合蜕变关系')
                    list_fit.append(screenShot_path[m])
                    list_fit.append(screenShot_path[n])
                    list_fit_2.append([screenShot_path[m],screenShot_path[n]])
                else:
                    print(screenShot_path[m] + '与' + screenShot_path[n] + '不符合蜕变关系')
                all_count = all_count + 1
                print(screenShot_path[m]+'中所识别的对象')
                print(compare_dict_m)
                print(screenShot_path[n] + '中所识别的对象')
                print(compare_dict_n)
    list_fit = list(set(list_fit))
    print("*******************************************************************************************")
    print(fit_count,all_count)
    return list_fit,list_fit_2
def cal_result_trans_coverage(filepath1, filepath2):
    k1 = 1000
    k2 = 10000
    if not os.path.exists(filepath2 + '/output_coverage_' + str(k1) + '.npy'):  # 若不存在路径则创建
        output_coverage_k1 = {}
        np.save(filepath2 + '/output_coverage_' + str(k1) + '.npy', output_coverage_k1)
    output_coverage_k1 = np.load(filepath2 + '/output_coverage_' + str(k1) + '.npy', allow_pickle=True).item()
    if not os.path.exists(filepath2 + '/output_coverage_' + str(k2) + '.npy'):  # 若不存在路径则创建
        output_coverage_k2 = {}
        np.save(filepath2 + '/output_coverage_' + str(k2) + '.npy', output_coverage_k2)
    output_coverage_k2 = np.load(filepath2 + '/output_coverage_' + str(k2) + '.npy', allow_pickle=True).item()
    trans_path = get_trans_Path(filepath2)
    if trans_path:
        print('****')
        print(trans_path)
        for i in trans_path:
            # 路径都取到screenShot
            print(filepath2 + '/' + i)
            screenShot_path2 = get_screenShot_Path(filepath2 + '/' + i)
            if screenShot_path2:
                for n in screenShot_path2:
                    for dirpath1, dirnames1, filenames1 in os.walk(filepath2 + '/' + i + '/' + n):
                        for j in filenames1:
                            if 'result_dict' in j:
                                result_dict_n = np.load(filepath2 + '/' + i + '/' + n + '/' + j,
                                                        allow_pickle=True).item()
                                for k, value in enumerate(result_dict_n['all_classes_name']):
                                    # 判断是否为重要元素
                                    if value in keyword:
                                        output_coverage_k1[int(result_dict_n['all_scores'][k] * k1)] = 1
                                        output_coverage_k2[int(result_dict_n['all_scores'][k] * k2)] = 1
        np.save(filepath2 + '/output_coverage_' + str(k1) + '.npy', output_coverage_k1)
        np.save(filepath2 + '/output_coverage_' + str(k2) + '.npy', output_coverage_k2)

        # output_coverage_k1 = np.load(filepath + '/output_coverage_1000.npy', allow_pickle=True).item()
        # output_coverage_k2 = np.load(filepath + '/output_coverage_10000.npy', allow_pickle=True).item()
        wb = xlwt.Workbook()
        ws = wb.add_sheet('sheet')
        count_k1 = sum(output_coverage_k1.values())
        count_k2 = sum(output_coverage_k2.values())
        # for k,v in output_coverage_k1.items():

        count = 0  # 记录excel表格的行数
        ws.write(count, 0, k1)
        ws.write(count, 1, count_k1 / k1)
        ws.write(count, 2, k2)
        ws.write(count, 3, count_k2 / k2)
        wb.save(filepath2 + '/output_coverage.xls')
#修改后的一个文件夹对另一个文件夹的对比2.10
def cal_result_trans(filepath1, filepath2,threshold):
    #相似度计算
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    list_xiangsi = []
    L = LPIPSvgg(channels=3).to(device)

    all_count = 0
    fit_count = 0
    open(filepath2 + '/info.txt', "w")
    file = open(filepath2 + '/info.txt', "r")
    newlines = file.readlines()
    # newlines = []
    trans_path = get_trans_Path(filepath2)

    # cal_result_trans_1by1_new(filepath1,filepath2+'/'+i + '/screenShot',0.8)

    # 用于控制多少置信度被认定为一个物体
    # threshold = 0.7
    screenShot_path1 = get_screenShot_Path(filepath1)
    # screenShot_path2 = get_screenShot_Path(filepath2)
    # print(screenShot_path2)
    # list_fit = []
    # list_fit_trans = []

    compare_dict_m = {}

    for m in screenShot_path1:
        for dirpath, dirnames, filenames in os.walk(filepath1 + '/' + m):
            for j in filenames:
                if 'screenshot' in j:
                    x = cv2.imread(filepath1 + '/' + m + '/' + j)
                    x = np.array(cv2.split(x))
                    x = torch.from_numpy(x).to(device)
                    x = x.to(torch.float32).to(device)
                    print(x.shape)
                if 'result_dict' in j:
                    result_dict_m = np.load(filepath1 + '/' + m + '/' + j, allow_pickle=True).item()
                    del_list_m = []
                    for k, value in enumerate(result_dict_m['classes_name']):
                        # 判断是否为重要元素
                        if value in keyword:
                            for k1, value1 in enumerate(result_dict_m['classes_name']):
                                if k1 != k:
                                    iou = bb_intersection_over_union(result_dict_m['boxes'][k],
                                                                     result_dict_m['boxes'][k1])
                                    if iou > 0.5:
                                        if result_dict_m['scores'][k] > result_dict_m['scores'][k1]:
                                            print(type(result_dict_m['scores']))
                                            if k1 not in del_list_m:
                                                del_list_m.append(k1)
                                            # del result_dict['scores'][k1]
                                            # del result_dict['boxes'][k1]
                                            # del result_dict['classesname'][k1]
                                        else:
                                            if k not in del_list_m:
                                                del_list_m.append(k)
                                            # del result_dict['scores'][k]
                                            # del result_dict['boxes'][k]
                                            # del result_dict['classesname'][k]
                    # print(result_dict_m)

    for k, value in enumerate(result_dict_m['classes_name']):
        # 判断是否为重要元素
        if value in keyword and k not in del_list_m:
            if result_dict_m['scores'][k] > threshold:
                if value not in compare_dict_m.keys():
                    compare_dict_m[value] = 1
                else:
                    compare_dict_m[value] = compare_dict_m[value] + 1
    if trans_path:
        print('****')
        print(trans_path)
        for i in trans_path:
            # 路径都取到screenShot
            print(filepath2 + '/' + i)
            screenShot_path2 = get_screenShot_Path(filepath2 + '/' + i)
            if screenShot_path2:
                for n in screenShot_path2:
                    compare_dict_n = {}
                    for dirpath1, dirnames1, filenames1 in os.walk(filepath2 + '/' + i + '/' + n):
                        for j in filenames1:
                            if 'screenshot' in j:
                                y = cv2.imread(filepath2 + '/'+ i+'/' + n + '/' + j)
                                y = np.array(cv2.split(y))
                                y = torch.from_numpy(y).to(device)
                                y = y.to(torch.float32).to(device)
                                print(y.shape)
                                print(L(x,y,as_loss=False))
                                print(L(x,y,as_loss=False).cpu().numpy()[0])
                                LPIPS = float(L(x,y,as_loss=False).cpu().numpy()[0])
                                list_xiangsi.append([n,LPIPS])
                            if 'result_dict' in j:
                                result_dict_n = np.load(filepath2 + '/' + i + '/' + n + '/' + j,
                                                        allow_pickle=True).item()
                                # print(result_dict_n)
                                del_list_n = []
                                for k, value in enumerate(result_dict_n['classes_name']):
                                    # 判断是否为重要元素
                                    if value in keyword:
                                        for k1, value1 in enumerate(result_dict_n['classes_name']):
                                            if k1 != k:
                                                iou = bb_intersection_over_union(result_dict_n['boxes'][k],
                                                                                 result_dict_n['boxes'][k1])
                                                if iou > 0.5:
                                                    if result_dict_n['scores'][k] > result_dict_n['scores'][k1]:
                                                        print(type(result_dict_n['scores']))
                                                        if k1 not in del_list_n:
                                                            del_list_n.append(k1)
                                                        # del result_dict['scores'][k1]
                                                        # del result_dict['boxes'][k1]
                                                        # del result_dict['classesname'][k1]
                                                    else:
                                                        if k not in del_list_n:
                                                            del_list_n.append(k)
                                                        # del result_dict['scores'][k]
                                                        # del result_dict['boxes'][k]
                                                        # del result_dict['classesname'][k]
                                for k, value in enumerate(result_dict_n['classes_name']):
                                    if value in keyword and k not in del_list_n:
                                        if result_dict_n['scores'][k] > threshold:
                                            if value not in compare_dict_n.keys():
                                                compare_dict_n[value] = 1
                                            else:
                                                compare_dict_n[value] = compare_dict_n[value] + 1
                        # print(compare_dict_m,compare_dict_n)
                        if compare_dict_m == compare_dict_n:
                            fit_count = fit_count + 1
                            print(filepath1 + '与' + filepath2 + '符合蜕变关系')
                            # list_fit.append(m)
                            # list_fit.append(n)
                            # list_fit_trans.append([m, n])
                        else:
                            if 'car' in compare_dict_n.keys():
                                newlines.append(
                                    filepath2 + '/' + i + '/' + n + 'car:' + str(compare_dict_n['car']) + '\r\n')
                            else:
                                newlines.append(filepath2 + '/' + i + '/' + n + 'car:0' + '\r\n')
                            print(filepath1 + '与' + filepath2 + '不符合蜕变关系')
                        all_count = all_count + 1
    # 存放字典文件的位置,若出现问题请考虑将filepath2与output_path调换
    output_path = filepath2.rsplit('/', 1)[0]
    if not os.path.exists(filepath2 + '/output_result.npy'):  # 若不存在路径则创建
        output_result = {}
        # 路径如果出现问题请查看这里
        # np.save(output_path + '/output_result.npy', output_result)
        np.save(filepath2 + '/output_result.npy', output_result)
    file.close()
    file = open(filepath2 + '/info.txt', "w")
    print(newlines)
    file.writelines(newlines)
    file.close()
    output_result = np.load(filepath2 + '/output_result.npy', allow_pickle=True).item()
    list = []
    list.append(threshold)
    list.append(fit_count)
    list.append(all_count)
    output_result[filepath2 + '/' + n + '/' + i + '+threshold=' + str(threshold)] = list
    np.save(filepath2 + '/output_result.npy', output_result)
    print(
        "*****************************************************************************************************************")
    print(output_result)
    print(fit_count, all_count)
    wb = xlwt.Workbook()
    ws = wb.add_sheet('sheet')
    style = xlwt.XFStyle()
    style.num_format_str = '0%'
    count = 0  # 记录excel表格的行数
    ws.write(count, 0, '变异图片')
    ws.write(count, 1, '相似度')
    count += 1
    for i in list_xiangsi:
        ws.write(count, 0, i[0])
        ws.write(count, 1, i[1])
        count += 1
    wb.save(filepath2 + '/xiangsi.xls')
#原始抽象场景先生成一个具体场景   再生成n个具体场景来比较
def cal_result_itself(filepath1,filepath2):
    all_count = 0
    fit_count = 0
    threshold = 0.8
    # 图片所在路径
    screenShot_path1 = get_screenShot_Path(filepath1)
    screenShot_path2 = get_screenShot_Path(filepath2)



    for m in screenShot_path1:
        compare_dict_m = {}
        for dirpath, dirnames, filenames in os.walk(filepath1 + '/' + m):
            for j in filenames:
                if 'result_dict' in j:
                    result_dict_m = np.load(filepath1 + '/' + m + '/' + j, allow_pickle=True).item()
                    # print(result_dict_m)
        for k, value in enumerate(result_dict_m['classes_name']):
            # 判断是否为重要元素
            if value in keyword:
                if result_dict_m['scores'][k] > threshold:
                    if value not in compare_dict_m.keys():
                        compare_dict_m[value] = 1
                    else:
                        compare_dict_m[value] = compare_dict_m[value] + 1
        for n in screenShot_path2:
            compare_dict_n = {}
            for dirpath1, dirnames1, filenames1 in os.walk(filepath2 + '/' + n):
                for i in filenames1:
                    if 'result_dict' in i:
                        result_dict_n = np.load(filepath2 + '/' + n + '/' + i, allow_pickle=True).item()

            for k, value in enumerate(result_dict_n['classes_name']):
                if value in keyword:
                    if result_dict_n['scores'][k] > threshold:
                        if value not in compare_dict_n.keys():
                            compare_dict_n[value] = 1
                        else:
                            compare_dict_n[value] = compare_dict_n[value] + 1
            if compare_dict_m == compare_dict_n:
                fit_count = fit_count + 1
                print(filepath1 + '与' + filepath2 + n + '符合蜕变关系')
            # list_fit.append(m)
            # list_fit.append(n)
            # list_fit_trans.append([m, n])
            else:
                print(filepath1 + '与' + filepath2 + n + '不符合蜕变关系')
            all_count = all_count + 1
            print(filepath1 + '中所识别的对象')
            print(compare_dict_m)
            print(filepath2 +n+ '中所识别的对象')
            print(compare_dict_n)
        print(
        "*****************************************************************************************************************")
    print(fit_count, all_count)
def cal_result_trans_1by1(filepath1, filepath2):
    all_count = 0
    fit_count = 0
    # 用于控制多少置信度被认定为一个物体
    threshold = 0.7
    screenShot_path1 = get_screenShot_Path(filepath1)
    screenShot_path2 = get_screenShot_Path(filepath2)
    # list_fit = []
    # list_fit_trans = []

    compare_dict_m = {}
    compare_dict_n = {}
    for m in screenShot_path1:
        for dirpath, dirnames, filenames in os.walk(filepath1 + '/' + m):
            for j in filenames:
                if 'result_dict' in j:
                    result_dict_m = np.load(filepath1 + '/' + m + '/'+ j, allow_pickle=True).item()
                    # print(result_dict_m)
    for n in screenShot_path2:
        for dirpath1, dirnames1, filenames1 in os.walk(filepath2 + '/' + n):
            for i in filenames1:
                if 'result_dict' in i:
                    result_dict_n = np.load(filepath2 + '/' + n + '/' + i, allow_pickle=True).item()
    for k, value in enumerate(result_dict_m['classes_name']):
        # 判断是否为重要元素
        if value in keyword:
            if result_dict_m['scores'][k] > threshold:
                if value not in compare_dict_m.keys():
                    compare_dict_m[value] = 1
                else:
                    compare_dict_m[value] = compare_dict_m[value] + 1
    for k, value in enumerate(result_dict_n['classes_name']):
        if value in keyword:
            if result_dict_n['scores'][k] > threshold:
                if value not in compare_dict_n.keys():
                    compare_dict_n[value] = 1
                else:
                    compare_dict_n[value] = compare_dict_n[value] + 1
    if compare_dict_m == compare_dict_n:
        fit_count = fit_count + 1
        print(filepath1 + '与' + filepath2 + '符合蜕变关系')
        # list_fit.append(m)
        # list_fit.append(n)
        # list_fit_trans.append([m, n])
    else:
        print(filepath1  + '与' + filepath2 + '不符合蜕变关系')
    all_count = all_count + 1
    print(filepath1 + '中所识别的对象')
    print(compare_dict_m)
    print(filepath2 + '中所识别的对象')
    print(compare_dict_n)
    print(
        "*****************************************************************************************************************")
    print(fit_count, all_count)
    cal_coverage_deeptest()
def cal_coverage_trans(filepath1, filepath2):
    k1 = 1000
    k2 = 10000


    trans_path = get_trans_Path(filepath2)

    # list_fit = []
    # list_fit_trans = []
    if not os.path.exists(filepath2 + '/output_coverage_' + str(k1) + '.npy'):  # 若不存在路径则创建
        output_coverage_k1 = {}
        np.save(filepath2 + '/output_coverage_' + str(k1) + '.npy', output_coverage_k1)
    output_coverage_k1 = np.load(filepath2 + '/output_coverage_' + str(k1) + '.npy', allow_pickle=True).item()
    if not os.path.exists(filepath2 + '/output_coverage_' + str(k2) + '.npy'):  # 若不存在路径则创建
        output_coverage_k2 = {}
        np.save(filepath2 + '/output_coverage_' + str(k2) + '.npy', output_coverage_k2)
    output_coverage_k2 = np.load(filepath2 + '/output_coverage_' + str(k2) + '.npy', allow_pickle=True).item()
    if trans_path:
        for i in trans_path:
            #路径都取到screenShot
            print(filepath2+'/'+i)
            print(filepath1)
            screenShot_path2 = get_screenShot_Path(filepath2+'/'+i + '/screenShot')
            if screenShot_path2:
                for n in screenShot_path2:
                    for dirpath1, dirnames1, filenames1 in os.walk(filepath2 +'/'+i + '/screenShot'+'/' + n):
                        for j in filenames1:
                            if 'result_dict' in j:
                                result_dict_n = np.load(filepath2 + '/' + i + '/screenShot' + '/' + n + '/' + j,
                                                        allow_pickle=True).item()
                                for k, value in enumerate(result_dict_n['all_classes_name']):
                                    # 判断是否为重要元素
                                    if value in keyword:
                                        output_coverage_k1[int(result_dict_n['all_scores'][k] * k1)] = 1
                                        output_coverage_k2[int(result_dict_n['all_scores'][k] * k2)] = 1
    np.save(filepath2 + '/output_coverage_' + str(k1) + '.npy', output_coverage_k1)
    np.save(filepath2 + '/output_coverage_' + str(k2) + '.npy', output_coverage_k2)
 #为新实验做的
def cal_result_trans_1by1_new(filepath1, filepath2,threshold):
    # 相似度计算
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    list_xiangsi = []
    L = LPIPSvgg(channels=3).to(device)

    all_count = 0
    fit_count = 0
    file = open(filepath2+ '/info.txt', "r")
    newlines = file.readlines()
    #newlines = []
    trans_path = get_trans_Path(filepath2)

        #cal_result_trans_1by1_new(filepath1,filepath2+'/'+i + '/screenShot',0.8)


    # 用于控制多少置信度被认定为一个物体
    #threshold = 0.7
    screenShot_path1 = get_screenShot_Path(filepath1)
    #screenShot_path2 = get_screenShot_Path(filepath2)
    #print(screenShot_path2)
    # list_fit = []
    # list_fit_trans = []

    compare_dict_m = {}

    for m in screenShot_path1:
        for dirpath, dirnames, filenames in os.walk(filepath1 + '/' + m):
            for j in filenames:
                if 'screenshot' in j:
                    x = cv2.imread(filepath1 + '/' + m + '/' + j)
                    x = np.array(cv2.split(x))
                    x = torch.from_numpy(x).to(device)
                    x = x.to(torch.float32).to(device)
                    print(x.shape)
                if 'result_dict' in j:
                    result_dict_m = np.load(filepath1 + '/' + m + '/'+ j, allow_pickle=True).item()
                    del_list_m = []
                    for k, value in enumerate(result_dict_m['classes_name']):
                        # 判断是否为重要元素
                        if value in keyword:
                            for k1, value1 in enumerate(result_dict_m['classes_name']):
                                if k1 != k:
                                    iou = bb_intersection_over_union(result_dict_m['boxes'][k],
                                                                     result_dict_m['boxes'][k1])
                                    if iou > 0.5:
                                        if result_dict_m['scores'][k] > result_dict_m['scores'][k1]:
                                            print(type(result_dict_m['scores']))
                                            if k1 not in del_list_m:
                                                del_list_m.append(k1)
                                            # del result_dict['scores'][k1]
                                            # del result_dict['boxes'][k1]
                                            # del result_dict['classesname'][k1]
                                        else:
                                            if k not in del_list_m:
                                                del_list_m.append(k)
                                            # del result_dict['scores'][k]
                                            # del result_dict['boxes'][k]
                                            # del result_dict['classesname'][k]
                    # print(result_dict_m)

    for k, value in enumerate(result_dict_m['classes_name']):
        # 判断是否为重要元素
        if value in keyword and k not in del_list_m:
            if result_dict_m['scores'][k] > threshold:
                if value not in compare_dict_m.keys():
                    compare_dict_m[value] = 1
                else:
                    compare_dict_m[value] = compare_dict_m[value] + 1
    if trans_path:
        for i in trans_path:
            #路径都取到screenShot
            print(filepath2+'/'+i)
            print(filepath1)
            screenShot_path2 = get_screenShot_Path(filepath2+'/'+i + '/screenShot')
            if screenShot_path2:
                for n in screenShot_path2:
                    compare_dict_n = {}
                    for dirpath1, dirnames1, filenames1 in os.walk(filepath2 +'/'+i + '/screenShot'+'/' + n):
                        for j in filenames1:
                            if 'screenshot' in j:
                                y = cv2.imread(filepath2 +'/'+i + '/screenShot'+'/' + n+'/'+j)
                                y = np.array(cv2.split(y))
                                y = torch.from_numpy(y).to(device)
                                y = y.to(torch.float32).to(device)
                                print(y.shape)
                                print(L(x, y, as_loss=False))
                                print(L(x, y, as_loss=False).cpu().numpy()[0])
                                LPIPS = float(L(x, y, as_loss=False).cpu().numpy()[0])
                                list_xiangsi.append([n, LPIPS])
                            if 'result_dict' in j:
                                result_dict_n = np.load(filepath2 +'/'+i + '/screenShot'+'/' + n + '/' + j, allow_pickle=True).item()
                                #print(result_dict_n)
                                del_list_n = []
                                for k, value in enumerate(result_dict_n['classes_name']):
                                    # 判断是否为重要元素
                                    if value in keyword:
                                        for k1, value1 in enumerate(result_dict_n['classes_name']):
                                            if k1 != k:
                                                iou = bb_intersection_over_union(result_dict_n['boxes'][k],
                                                                                 result_dict_n['boxes'][k1])
                                                if iou > 0.5:
                                                    if result_dict_n['scores'][k] > result_dict_n['scores'][k1]:
                                                        print(type(result_dict_n['scores']))
                                                        if k1 not in del_list_n:
                                                            del_list_n.append(k1)
                                                        # del result_dict['scores'][k1]
                                                        # del result_dict['boxes'][k1]
                                                        # del result_dict['classesname'][k1]
                                                    else:
                                                        if k not in del_list_n:
                                                            del_list_n.append(k)
                                                        # del result_dict['scores'][k]
                                                        # del result_dict['boxes'][k]
                                                        # del result_dict['classesname'][k]
                                for k, value in enumerate(result_dict_n['classes_name']):
                                    if value in keyword and k not in del_list_n:
                                        if result_dict_n['scores'][k] > threshold:
                                            if value not in compare_dict_n.keys():
                                                compare_dict_n[value] = 1
                                            else:
                                                compare_dict_n[value] = compare_dict_n[value] + 1
                        #print(compare_dict_m,compare_dict_n)
                        if compare_dict_m == compare_dict_n:
                            fit_count = fit_count + 1
                            print(filepath1 + '与' + filepath2 + '符合蜕变关系')
                            # list_fit.append(m)
                            # list_fit.append(n)
                            # list_fit_trans.append([m, n])
                        else:
                            if 'car' in compare_dict_n.keys():
                                newlines.append(filepath2+'/'+i+'/'+n+'car:'+str(compare_dict_n['car'])+'\r\n')
                            else:
                                newlines.append(filepath2 + '/'+i+'/'+n + 'car:0'+ '\r\n')
                            print(filepath1  + '与' + filepath2 + '不符合蜕变关系')
                        all_count = all_count + 1
    # 存放字典文件的位置,若出现问题请考虑将filepath2与output_path调换
    output_path = filepath2.rsplit('/', 1)[0]
    if not os.path.exists(filepath2 + '/output_result.npy'):  # 若不存在路径则创建
        output_result = {}
        #路径如果出现问题请查看这里
        #np.save(output_path + '/output_result.npy', output_result)
        np.save(filepath2 + '/output_result.npy', output_result)
    file.close()
    file = open(filepath2 + '/info.txt', "w")
    print(newlines)
    file.writelines(newlines)
    file.close()
    output_result = np.load(filepath2 + '/output_result.npy', allow_pickle=True).item()
    list = []
    list.append(threshold)
    list.append(fit_count)
    list.append(all_count)
    output_result[filepath2 + '/' + n + '/' + i + '+threshold=' + str(threshold)] = list
    np.save(filepath2 + '/output_result.npy', output_result)
    print(
        "*****************************************************************************************************************")
    print(output_result)
    print(fit_count, all_count)
    wb = xlwt.Workbook()
    ws = wb.add_sheet('sheet')
    style = xlwt.XFStyle()
    style.num_format_str = '0%'
    count = 0  # 记录excel表格的行数
    ws.write(count, 0, '变异图片')
    ws.write(count, 1, '相似度')
    count += 1
    for i in list_xiangsi:
        ws.write(count, 0, i[0])
        ws.write(count, 1, i[1])
        count += 1
    wb.save(filepath2 + '/xiangsi.xls')
def cal_coverage_deeptest(filepath1, filepath2):
    one_coverage_1000 = {}
    one_coverage_10000 = {}
    k1 = 1000
    k2 = 10000
    output_path = filepath2.rsplit('/', 1)[0]
    print(output_path)
    #寻找是哪种修改类型
    deeptest_class = filepath2.rsplit('/', 1)[1]
    output_path = filepath2.rsplit('/', 1)[0]
    screenShot_path1 = get_screenShot_Path(filepath1)
    screenShot_path2 = get_screenShot_Path(filepath2)
    print('*********************')
    print(screenShot_path2)
    # list_fit = []
    # list_fit_trans = []
    if not os.path.exists(output_path + '/output_coverage_'+str(k1)+'.npy'):  # 若不存在路径则创建
        output_coverage_k1 = {}
        np.save(output_path + '/output_coverage_'+str(k1)+'.npy', output_coverage_k1)
    output_coverage_k1 = np.load(output_path + '/output_coverage_'+str(k1)+'.npy', allow_pickle=True).item()
    if not os.path.exists(output_path + '/output_coverage_'+str(k2)+'.npy'):  # 若不存在路径则创建
        output_coverage_k2 = {}
        np.save(output_path + '/output_coverage_'+str(k2)+'.npy', output_coverage_k2)
    output_coverage_k2 = np.load(output_path + '/output_coverage_'+str(k2)+'.npy', allow_pickle=True).item()

    if not os.path.exists('F:/paper_result/deeptest_class' + '/output_coverage_' + str(k1) + '_' + deeptest_class + '.npy'):  # 若不存在路径则创建
        all_deeptest_class_coverage_k1 = {}
        np.save('F:/paper_result/deeptest_class' + '/output_coverage_' + str(k1) + '_' + deeptest_class + '.npy',
                all_deeptest_class_coverage_k1)
    if not os.path.exists('F:/paper_result/deeptest_class' + '/output_coverage_' + str(k2) + '_' + deeptest_class + '.npy'):  # 若不存在路径则创建
        all_deeptest_class_coverage_k2 = {}
        np.save('F:/paper_result/deeptest_class' + '/output_coverage_' + str(
            k2) + '_' + deeptest_class + '.npy', all_deeptest_class_coverage_k2)
    all_deeptest_class_coverage_k1 = np.load(
        'F:/paper_result/deeptest_class' + '/output_coverage_' + str(k1) + '_' + deeptest_class + '.npy',
        allow_pickle=True).item()

    all_deeptest_class_coverage_k2 = np.load(
        'F:/paper_result/deeptest_class' + '/output_coverage_' + str(
            k2) + '_' + deeptest_class + '.npy',
        allow_pickle=True).item()
    for n in screenShot_path2:
        for dirpath1, dirnames1, filenames1 in os.walk(filepath2 + '/' + n):
            for i in filenames1:
                if 'result_dict' in i:
                    result_dict_n = np.load(filepath2 + '/' + n + '/' + i, allow_pickle=True).item()
                    print(result_dict_n)
                    for k, value in enumerate(result_dict_n['all_classes_name']):
                        # 判断是否为重要元素
                        if value in keyword:
                            output_coverage_k1[int(result_dict_n['all_scores'][k]*k1)]=1
                            output_coverage_k2[int(result_dict_n['all_scores'][k] * k2)] = 1
                            #单个类别的统计
                            one_coverage_1000[int(result_dict_n['all_scores'][k] * k1)] = 1
                            one_coverage_10000[int(result_dict_n['all_scores'][k] * k2)] = 1

                            all_deeptest_class_coverage_k1[int(result_dict_n['all_scores'][k] * k1)] = 1
                            all_deeptest_class_coverage_k2[int(result_dict_n['all_scores'][k] * k2)] = 1
                            np.save('F:/paper_result/deeptest_class' + '/output_coverage_' + str(
                                k1) + '_' + deeptest_class + '.npy',
                                    all_deeptest_class_coverage_k1)
                            np.save('F:/paper_result/deeptest_class' + '/output_coverage_' + str(
                                k2) + '_' + deeptest_class + '.npy',
                                    all_deeptest_class_coverage_k2)
    count = 0  # 记录excel表格的行数
    wb = xlwt.Workbook()
    ws = wb.add_sheet('sheet')
    ws.write(count, 0, '1000')
    ws.write(count, 1, sum(one_coverage_1000.values()) / 1000)
    ws.write(count, 2, '10000')
    ws.write(count, 3, sum(one_coverage_10000.values()) / 10000)
    wb.save(filepath2+'/output_coverage.xls')

    np.save(output_path + '/output_coverage_'+str(k1)+'.npy', output_coverage_k1)
    np.save(output_path + '/output_coverage_'+str(k2)+'.npy', output_coverage_k2)


def cal_result_deeptest_1by1(filepath1, filepath2, threshold,):
    # 相似度计算
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    list_xiangsi = []
    L = LPIPSvgg(channels=3).to(device)

    output_path = filepath2.rsplit('/', 1)[0]
    print(output_path)
    all_count = 0
    fit_count = 0
    file = open(output_path+'/info.txt', "r")
    newlines = file.readlines()
    # 用于控制多少置信度被认定为一个物体
    #threshold = 0.7
    screenShot_path1 = get_screenShot_Path(filepath1)
    screenShot_path2 = get_screenShot_Path(filepath2)
    print('*********************')
    print(screenShot_path2)
    # list_fit = []
    # list_fit_trans = []


    for m in screenShot_path1:
        for dirpath, dirnames, filenames in os.walk(filepath1 + '/' + m):
            for j in filenames:
                if 'screenshot' in j:
                    x = cv2.imread(filepath1 + '/' + m + '/' + j)
                    x = np.array(cv2.split(x))
                    x = torch.from_numpy(x).to(device)
                    x = x.to(torch.float32).to(device)
                    print(x.shape)
                if 'result_dict' in j:
                    result_dict_m = np.load(filepath1 + '/' + m + '/'+ j, allow_pickle=True).item()
                    print(result_dict_m)
                    #判断是否需要删除同一个框的物体
                    del_list_m = []
                    for k, value in enumerate(result_dict_m['classes_name']):
                        # 判断是否为重要元素
                        if value in keyword:
                            for k1, value1 in enumerate(result_dict_m['classes_name']):
                                if k1 != k:
                                    iou = bb_intersection_over_union(result_dict_m['boxes'][k],
                                                                     result_dict_m['boxes'][k1])
                                    if iou > 0.5:
                                        if result_dict_m['scores'][k] > result_dict_m['scores'][k1]:
                                            print(type(result_dict_m['scores']))
                                            if k1 not in del_list_m:
                                                del_list_m.append(k1)
                                            # del result_dict['scores'][k1]
                                            # del result_dict['boxes'][k1]
                                            # del result_dict['classesname'][k1]
                                        else:
                                            if k not in del_list_m:
                                                del_list_m.append(k)
                                            # del result_dict['scores'][k]
                                            # del result_dict['boxes'][k]
                                            # del result_dict['classesname'][k]
                    # print(result_dict_m)

            for n in screenShot_path2:
                for dirpath1, dirnames1, filenames1 in os.walk(filepath2 + '/' + n):
                    for i in filenames1:
                        if 'mutate' in i:
                            y = cv2.imread(filepath2 + '/' + n + '/' + i)
                            if x.shape!=y.shape:
                                y = cv2.resize(y,(1280,720))
                            y = np.array(cv2.split(y))
                            y = torch.from_numpy(y).to(device)
                            y = y.to(torch.float32).to(device)
                            print(y.shape)
                            print(L(x, y, as_loss=False))
                            print(L(x, y, as_loss=False).cpu().numpy()[0])
                            LPIPS = float(L(x, y, as_loss=False).cpu().numpy()[0])
                            list_xiangsi.append([n, LPIPS])
                        if 'result_dict' in i:
                            result_dict_n = np.load(filepath2 + '/' + n + '/' + i, allow_pickle=True).item()
                            del_list_n = []
                            for k, value in enumerate(result_dict_n['classes_name']):
                                # 判断是否为重要元素
                                if value in keyword:
                                    for k1, value1 in enumerate(result_dict_n['classes_name']):
                                        if k1 != k:
                                            iou = bb_intersection_over_union(result_dict_n['boxes'][k],
                                                                             result_dict_n['boxes'][k1])
                                            if iou > 0.5:
                                                if result_dict_n['scores'][k] > result_dict_n['scores'][k1]:
                                                    print(type(result_dict_n['scores']))
                                                    if k1 not in del_list_n:
                                                        del_list_n.append(k1)
                                                    # del result_dict['scores'][k1]
                                                    # del result_dict['boxes'][k1]
                                                    # del result_dict['classesname'][k1]
                                                else:
                                                    if k not in del_list_n:
                                                        del_list_n.append(k)
                                                    # del result_dict['scores'][k]
                                                    # del result_dict['boxes'][k]
                                                    # del result_dict['classesname'][k]
                            #如果有问题可能是因为这个缩进
                            compare_dict_n = {}
                            compare_dict_m = {}

                            for k, value in enumerate(result_dict_m['classes_name']):
                                # 判断是否为重要元素
                                if value in keyword and k not in del_list_m:
                                    if result_dict_m['scores'][k] > threshold:
                                        if value not in compare_dict_m.keys():
                                            compare_dict_m[value] = 1
                                        else:
                                            compare_dict_m[value] = compare_dict_m[value] + 1
                            for k, value in enumerate(result_dict_n['classes_name']):
                                if value in keyword and k not in del_list_n:
                                    if result_dict_n['scores'][k] > threshold:
                                        if value not in compare_dict_n.keys():
                                            compare_dict_n[value] = 1
                                        else:
                                            compare_dict_n[value] = compare_dict_n[value] + 1
                            if compare_dict_m == compare_dict_n:
                                fit_count = fit_count + 1
                                print(filepath1 + '与' + filepath2 +'/'+n+'符合蜕变关系')
                                # list_fit.append(m)
                                # list_fit.append(n)
                                # list_fit_trans.append([m, n])
                            else:
                                print(filepath1  + '与' + filepath2 + '/'+n+'不符合蜕变关系')
                                if 'car' in compare_dict_n.keys():
                                    newlines.append(filepath2+'/'+n+'car:'+str(compare_dict_n['car'])+'\r\n')
                                else:
                                    newlines.append(filepath2 + '/' + n + 'car:0' + '\r\n')
                            all_count = all_count + 1
                            print(filepath1 + '中所识别的对象')
                            print(compare_dict_m)
                            print(filepath2 + '/'+n+ '中所识别的对象')
                            print(compare_dict_n)
            #存放字典文件的位置

            file.close()
            file = open(output_path + '/info.txt', "w")
            file.writelines(newlines)
            file.close()
            if not os.path.exists(output_path + '/output_result.npy'):  # 若不存在路径则创建
                output_result = {}
                np.save(output_path + '/output_result.npy', output_result)
            output_result = np.load(output_path + '/output_result.npy',allow_pickle=True).item()
            list = []
            list.append(threshold)
            list.append(fit_count)
            list.append(all_count)
            output_result[filepath2 + '/' + n + '/' + i + '+threshold=' + str(threshold)] = list
            np.save(output_path + '/output_result.npy', output_result)
            print(
                "*****************************************************************************************************************")
            print(fit_count, all_count)
            wb = xlwt.Workbook()
            ws = wb.add_sheet('sheet')
            style = xlwt.XFStyle()
            style.num_format_str = '0%'
            count = 0  # 记录excel表格的行数
            ws.write(count, 0, '变异图片')
            ws.write(count, 1, '相似度')
            count += 1
            for i in list_xiangsi:
                ws.write(count, 0, i[0])
                ws.write(count, 1, i[1])
                count += 1
            wb.save(filepath2 + '/xiangsi.xls')
#filepath1原始具体场景路径 filepath2 衍生具体场景路径集
def cal_result_trans_all(filepath1,filepath2):
    # get_result(filepath1)
    # get_result_all(filepath2)
    trans_path = get_trans_Path(filepath2)
    for i in trans_path:
        #路径都取到screenShot
        print(filepath2+'/'+i)
        print(filepath1)
        cal_result_trans_1by1(filepath1,filepath2+'/'+i + '/screenShot')
def cal_result_trans_all_new(filepath1,filepath2,threhold=0.4):
    # get_result(filepath1)
    # get_result_all(filepath2)
    if os.path.exists(filepath2 + '/info.txt'):  # 如果文件存在
        # 删除文件，可使用以下两种方法。
        os.unlink(filepath2 + '/info.txt')
        open(filepath2 + '/info.txt', "w")
    else:
        open(filepath2 + '/info.txt', "w")
    cal_result_trans_1by1_new(filepath1, filepath2, threhold)
    cal_coverage_trans(filepath1, filepath2)
def cal_result_trans_all_deeptest(filepath1,filepath2):
    # get_result(filepath1)
    # get_result_all(filepath2)
    if os.path.exists(filepath2+'/info.txt'):  # 如果文件存在
        # 删除文件，可使用以下两种方法。
        os.unlink(filepath2+'/info.txt')
        open(filepath2+'/info.txt', "w")
    else:
        open(filepath2 + '/info.txt', "w")
    trans_path = get_trans_Path(filepath2)
    #获取下一层目录
    for i in trans_path:
        #trans_path2 = get_trans_Path(filepath2+'/'+i)
        #路径都取到screenShot
        print(filepath1,filepath2+'/'+i)
        print(filepath1)
        #for j in trans_path2:
        #置信概率
        #for j in range(1,11):
        cal_result_deeptest_1by1(filepath1, filepath2+'/'+i, 0.4)
        cal_coverage_deeptest(filepath1, filepath2+'/'+i)
        #cal_result_trans_1by1_new(filepath1, filepath2+'/'+i, 0.5)
#获取箱图数据 qrs
def get_box_values(filepath):
    #设置excel表格格式
    wb = xlwt.Workbook()
    ws = wb.add_sheet('sheet')
    style = xlwt.XFStyle()
    style.num_format_str = '0%'
    count = 0  #记录excel表格的行数
    ws.write(count, 0, '置信度')
    ws.write(count, 1, '物体个数')
    count += 1

    trans_path = get_trans_Path(filepath)
    for i in trans_path:
        screenShot_Path = get_screenShot_Path(filepath+'/'+i)
        for j in screenShot_Path:
            #截图数据所在位置
            screenShot_Path1 = get_screenShot_Path(filepath+'/'+i+'/'+j)
            for k in screenShot_Path1:
                #获取每个截图的预测结果
                result_dict = np.load(filepath+'/'+i+'/'+j+'/'+ k + '/result_dict.npy', allow_pickle=True).item()
                obj_num = {}
                for threshold in range(1, 11):

                    for m, value in enumerate(result_dict['classes_name']):
                    # 判断是否为重要元素
                        if value in keyword:

                            if result_dict['scores'][m] >= threshold/10:
                                if threshold/10 not in obj_num.keys():
                                    obj_num[threshold/10] = 1
                                else:
                                    obj_num[threshold/10] +=1
                    if threshold / 10 not in obj_num.keys():
                        obj_num[threshold / 10] = 0
                for m,value in obj_num.items():
                    ws.write(count,0,m,style)
                    ws.write(count,1,value)
                    count += 1
            #print(j)
    wb.save('F:/autodrive_data/qrs_result/box.xls')
#获取deeptest实验数据
def get_deeptest_values(filepath):
    # 设置excel表格格式
    wb = xlwt.Workbook()
    ws = wb.add_sheet('sheet')
    style = xlwt.XFStyle()
    style.num_format_str = '0%'
    count = 0  # 记录excel表格的行数
    ws.write(count, 0, '变异类型')
    ws.write(count, 1, '违反变异关系对比数')
    ws.write(count, 2, '总对比数')
    ws.write(count, 3, '缺陷比例')
    ws.write(count, 4, '置信概率阈值')
    count += 1
    result_dict = np.load(filepath+'/output_result.npy', allow_pickle=True).item()
    for k,v in result_dict.items():
        if 'blur' in k:
            ws.write(count, 0, 'blur')
            ws.write(count, 1, v[2]-v[1])
            ws.write(count, 2, v[2])
            ws.write(count, 3, (v[2]-v[1])/v[2], style)
            ws.write(count, 4, v[0])
            count += 1
        elif 'brightness' in k:
            ws.write(count, 0, 'brightness')
            ws.write(count, 1, v[2]-v[1])
            ws.write(count, 2, v[2])
            ws.write(count, 3, (v[2]-v[1])/v[2], style)
            ws.write(count, 4, v[0])
            count += 1
        elif 'contrast' in k:
            ws.write(count, 0, 'contrast')
            ws.write(count, 1, v[2]-v[1])
            ws.write(count, 2, v[2])
            ws.write(count, 3, (v[2]-v[1])/v[2], style)
            ws.write(count, 4, v[0])
            count += 1
        elif 'rotation' in k:
            ws.write(count, 0, 'rotation')
            ws.write(count, 1, v[2]-v[1])
            ws.write(count, 2, v[2])
            ws.write(count, 3, (v[2]-v[1])/v[2], style)
            ws.write(count, 4, v[0])
            count += 1
        elif 'shear' in k:
            ws.write(count, 0, 'shear')
            ws.write(count, 1, v[2]-v[1])
            ws.write(count, 2, v[2])
            ws.write(count, 3, (v[2]-v[1])/v[2], style)
            ws.write(count, 4, v[0])
            count += 1
        elif 'translation' in k:
            ws.write(count, 0, 'translation')
            ws.write(count, 1, v[2]-v[1])
            ws.write(count, 2, v[2])
            ws.write(count, 3, (v[2]-v[1])/v[2], style)
            ws.write(count, 4, v[0])
            count += 1
        elif 'scale' in k:
            ws.write(count, 0, 'scale')
            ws.write(count, 1, v[2]-v[1])
            ws.write(count, 2, v[2])
            ws.write(count, 3, (v[2]-v[1])/v[2], style)
            ws.write(count, 4, v[0])
            count += 1
    wb.save(filepath+'/output_result.xls')
    k1 = 1000
    k2 = 10000
    output_coverage_k1 = np.load(filepath + '/output_coverage_1000.npy', allow_pickle=True).item()
    output_coverage_k2 = np.load(filepath + '/output_coverage_10000.npy', allow_pickle=True).item()
    wb = xlwt.Workbook()
    ws = wb.add_sheet('sheet')
    count_k1 = sum(output_coverage_k1.values())
    count_k2 = sum(output_coverage_k2.values())
    #for k,v in output_coverage_k1.items():

    count = 0  # 记录excel表格的行数
    ws.write(count, 0, k1)
    ws.write(count, 1, count_k1/k1)
    ws.write(count, 2, k2)
    ws.write(count, 3, count_k2/k2)
    wb.save(filepath + '/output_coverage.xls')
#获得蜕变测试实验数据
def get_meta_values(filepath):
    # 设置excel表格格式
    wb = xlwt.Workbook()
    ws = wb.add_sheet('sheet')
    style = xlwt.XFStyle()
    style.num_format_str = '0%'
    count = 0  # 记录excel表格的行数
    ws.write(count, 0, '变异类型')
    ws.write(count, 1, '违反变异关系对比数')
    ws.write(count, 2, '总对比数')
    ws.write(count, 3, '缺陷比例')
    ws.write(count, 4, '置信概率阈值')
    count += 1
    result_dict = np.load(filepath+'/output_result.npy', allow_pickle=True).item()
    for k,v in result_dict.items():
        ws.write(count, 0, k)
        ws.write(count, 1, v[2]-v[1])
        ws.write(count, 2, v[2])
        ws.write(count, 3, (v[2]-v[1])/v[2], style)
        ws.write(count, 4, v[0])
        count += 1
    wb.save(filepath+'/output_result.xls')
    k1 = 1000
    k2 = 10000
    output_coverage_k1 = np.load(filepath + '/output_coverage_1000.npy', allow_pickle=True).item()
    output_coverage_k2 = np.load(filepath + '/output_coverage_10000.npy', allow_pickle=True).item()
    wb = xlwt.Workbook()
    ws = wb.add_sheet('sheet')
    count_k1 = sum(output_coverage_k1.values())
    count_k2 = sum(output_coverage_k2.values())
    # for k,v in output_coverage_k1.items():

    count = 0  # 记录excel表格的行数
    ws.write(count, 0, k1)
    ws.write(count, 1, count_k1 / k1)
    ws.write(count, 2, k2)
    ws.write(count, 3, count_k2 / k2)
    wb.save(filepath + '/output_coverage.xls')
def get_all_mr_coverage():
    output_coverage_k1_5 = np.load('F:\drive_scene_5\mutants_mr1/output_coverage_1000.npy', allow_pickle=True).item()
    output_coverage_k2_5 = np.load('F:\drive_scene_5\mutants_mr1/output_coverage_10000.npy', allow_pickle=True).item()
    output_coverage_k1_6 = np.load('F:\drive_scene\mutants_mr1/output_coverage_1000.npy', allow_pickle=True).item()
    output_coverage_k2_6 = np.load('F:\drive_scene\mutants_mr1/output_coverage_10000.npy', allow_pickle=True).item()
    output_coverage_k1_7 = np.load('F:\drive_scene_7\mutants_mr1/output_coverage_1000.npy', allow_pickle=True).item()
    output_coverage_k2_7 = np.load('F:\drive_scene_7\mutants_mr1/output_coverage_10000.npy', allow_pickle=True).item()
    output_coverage_k1 = np.load('F:\drive_scene_5\mutants_mr1/output_coverage_1000.npy', allow_pickle=True).item()
    output_coverage_k2 = np.load('F:\drive_scene_5\mutants_mr1/output_coverage_10000.npy', allow_pickle=True).item()
    output_coverage_k1 = np.load('F:\drive_scene_5\mutants_mr1/output_coverage_1000.npy', allow_pickle=True).item()
    output_coverage_k2 = np.load('F:\drive_scene_5\mutants_mr1/output_coverage_10000.npy', allow_pickle=True).item()


if __name__ == '__main__':
    # get_screenShot_Path('F:/autodrive_data/meta/ScreenShot')
    # #get_result()
    # list_fit,list_fit_2 = cal_result('F:/autodrive_data/meta/ScreenShot')
    # print(list_fit)
    # print(list_fit_2)
    #get_result('F:/autodrive_data/original/ScreenShot')
    #cal_result_trans('F:/autodrive_data/meta/ScreenShot','F:/autodrive_data/original/ScreenShot')

    #cal_result_trans_all('F:/autodrive_data/original/ScreenShot','F:/autodrive_data/mutants_carla_challenge_6')
    #坐标变异
    #cal_result_trans_all('F:/autodrive_data/original/ScreenShot', 'F:/autodrive_data/mutants_coordinatecarla_challenge_6')

    #使用同一抽象变异
    # get_result('F:/autodrive_data/original/ScreenShot')
    # get_result('F:/autodrive_data/original_extend/ScreenShot')

    #cal_result_itself('F:/autodrive_data/original/ScreenShot','F:/autodrive_data/original_extend/ScreenShot')
    #qrs实验数据
    #get_result_all('F:/autodrive_data/qrs')
    #qrs ppt数据
    #get_result_all('F:/autodrive_data/qrs-ppt')
    #cal_result('F:/autodrive_data/qrs/10/screenShot')

    #获取变异后的图片的识别结果
    #get_result_all('F:/autodrive_data/mutants_coordinatecarla_challenge_6')

    #get_box_values('F:/autodrive_data/qrs')
    #get_result('F:/drive_scene/deeptest_original')

    # get_result('F:/drive_scene/original')
    # get_result('F:/drive_scene_5/original')
    # get_result('F:/drive_scene_7/original')
    # get_result('F:/drive_scene_ajust/original')
    # get_result('F:/drive_scene_car/original')

    #对原始图片进行识别得到结果,生成excel文件也在同一位置
    # get_result('F:/drive_scene/object_compare/pedestrian/screenShot')
    # get_result_excel('F:/drive_scene/object_compare/pedestrian/screenShot')
    # get_result('F:/drive_scene/object_compare/6/screenShot')
    # get_result_excel('F:/drive_scene/object_compare/6/screenShot')
    # get_result('F:/drive_scene/object_compare/carla_challenge_5/screenShot')
    # get_result_excel('F:/drive_scene/object_compare/carla_challenge_5/screenShot')
    # get_result('F:/drive_scene/object_compare/carla_challenge_7/screenShot')
    # get_result_excel('F:/drive_scene/object_compare/carla_challenge_7/screenShot')
    # get_result('F:/drive_scene/object_compare/adjacentOpposingPair/screenShot')
    # get_result_excel('F:/drive_scene/object_compare/adjacentOpposingPair/screenShot')
    # get_result('F:/drive_scene/object_compare/car/screenShot')
    # get_result_excel('F:/drive_scene/object_compare/car/screenShot')
    # #2.2deeptest实验
    # deeptest实验

    # get_result_deeptest('F:/drive_scene/deep_test/1511038')
    cal_result_trans_all_deeptest('F:/drive_scene/original','F:/drive_scene/deep_test/1511038')
    get_deeptest_values('F:/drive_scene/deep_test/1511038')
    # get_result_deeptest('F:/drive_scene_5/deep_test/1450380')
    cal_result_trans_all_deeptest('F:/drive_scene_5/original','F:/drive_scene_5/deep_test/1450380')
    get_deeptest_values('F:/drive_scene_5/deep_test/1450380')

    # get_result_deeptest('F:/drive_scene_7/deep_test/926176')
    cal_result_trans_all_deeptest('F:/drive_scene_7/original','F:/drive_scene_7/deep_test/926176')
    get_deeptest_values('F:/drive_scene_7/deep_test/926176')
    #
    # get_result_deeptest('F:/drive_scene_ajust/deep_test/730684')
    cal_result_trans_all_deeptest('F:/drive_scene_ajust/original','F:/drive_scene_ajust/deep_test/730684')
    get_deeptest_values('F:/drive_scene_ajust/deep_test/730684')
    #
    # get_result_deeptest('F:/drive_scene_car/deep_test/331140')
    cal_result_trans_all_deeptest('F:/drive_scene_car/original', 'F:/drive_scene_car/deep_test/331140')
    get_deeptest_values('F:/drive_scene_car/deep_test/331140')


    # get_result_deeptest('F:/drive_scene_5/deep_test/1576898')
    # cal_result_trans_all_deeptest('F:/drive_scene_5/original','F:/drive_scene_5/deep_test/1576898')
    # get_deeptest_values('F:/drive_scene_5/deep_test/1576898')
    # get_result('F:/drive_scene/object_compare/adjacentOpposingPair/screenShot')
    # get_result_excel('F:/drive_scene/object_compare/adjacentOpposingPair/screenShot')
    # get_result('F:/drive_scene/object_compare/car/screenShot')
    # get_result_excel('F:/drive_scene/object_compare/car/screenShot')
    # #get_result('F:/drive_scene/object_compare/carla_challenge_3/screenShot')
    # get_result('F:/drive_scene/object_compare/carla_challenge_5/screenShot')
    # get_result_excel('F:/drive_scene/object_compare/carla_challenge_5/screenShot')

    # get_result('F:/drive_scene/object_compare/carla_challenge_7/screenShot')
    # get_result_excel('F:/drive_scene/object_compare/carla_challenge_7/screenShot')



    #11.29新实验
    # cal_result_trans_all_new('F:/drive_scene/deeptest_original','F:/autodrive_data/mutants_coordinatecarla_challenge_6')
    # get_result_deeptest('F://drive_scene/deep_test/198920')

    #deeptest实验数据
    # get_deeptest_values('F:/drive_scene/deep_test/198920')
    # get_deeptest_values('F:/autodrive_data/mutants_coordinatecarla_challenge_6')
    #
    # get_result_deeptest('F:/drive_scene_5/deep_test/1576898')
    # get_deeptest_values('F:/drive_scene_5/deep_test/1576898')
    # get_deeptest_values('F:/drive_scene/deep_test/198920')
    # get_deeptest_values('F:/drive_scene/deep_test/198920')


    #print(bb_intersection_over_union([1,2,3,4],[1,2,3,4]))

    #12.31实验
    # get_result_all('F:/drive_scene/mutants_angle_carla_challenge_6')
    # get_result_all('F:/drive_scene/mutants_carla_challenge_6')
    # get_result_all('F:/drive_scene/mutants_coordinate_carla_challenge_6')
    # #
    # # get_result_all('F:/drive_scene/original')
    # # #对比实验结果
    # cal_result_trans_all_new('F:/drive_scene/original','F:/drive_scene/mutants_angle_carla_challenge_6',0.4)
    # cal_result_trans_all_new('F:/drive_scene/original', 'F:/drive_scene/mutants_carla_challenge_6',0.4)
    # cal_result_trans_all_new('F:/drive_scene/original', 'F:/drive_scene/mutants_coordinate_carla_challenge_6',0.4)
    # get_meta_values('F:/drive_scene/mutants_angle_carla_challenge_6')
    # get_meta_values('F:/drive_scene/mutants_carla_challenge_6')
    # get_meta_values('F:/drive_scene/mutants_coordinate_carla_challenge_6')
    #
    # get_result_all('F:/drive_scene/mutants_add')
    # get_result_all('F:/drive_scene/mutants_del')
    # cal_result_trans_all_new('F:/drive_scene/original', 'F:/drive_scene/mutants_add', 0.4)
    # cal_result_trans_all_new('F:/drive_scene/original', 'F:/drive_scene/mutants_del', 0.4)
    # get_meta_values('F:/drive_scene/mutants_add')
    # get_meta_values('F:/drive_scene/mutants_del')
    # #challenge_7实验
    # get_result_all('F:/drive_scene_7/mutants_angle_carla_challenge_7')
    # get_result_all('F:/drive_scene_7/mutants_carla_challenge_7')
    # get_result_all('F:/drive_scene_7/mutants_coordinate_carla_challenge_7')
    # #
    # # get_result_all('F:/drive_scene_7/original')
    # # # 对比实验结果
    # cal_result_trans_all_new('F:/drive_scene_7/original','F:/drive_scene_7/mutants_angle_carla_challenge_7',0.4)
    # cal_result_trans_all_new('F:/drive_scene_7/original', 'F:/drive_scene_7/mutants_carla_challenge_7',0.4)
    # cal_result_trans_all_new('F:/drive_scene_7/original', 'F:/drive_scene_7/mutants_coordinate_carla_challenge_7',0.4)
    # get_meta_values('F:/drive_scene_7/mutants_angle_carla_challenge_7')
    # get_meta_values('F:/drive_scene_7/mutants_carla_challenge_7')
    # get_meta_values('F:/drive_scene_7/mutants_coordinate_carla_challenge_7')
    #
    # get_result_all('F:/drive_scene_7/mutants_add')
    # get_result_all('F:/drive_scene_7/mutants_del')
    # cal_result_trans_all_new('F:/drive_scene_7/original', 'F:/drive_scene_7/mutants_add', 0.4)
    # cal_result_trans_all_new('F:/drive_scene_7/original', 'F:/drive_scene_7/mutants_del', 0.4)
    # get_meta_values('F:/drive_scene_7/mutants_add')
    # get_meta_values('F:/drive_scene_7/mutants_del')
    # # challenge_5实验
    # get_result_all('F:/drive_scene_5/mutants_angle_carla_challenge_5')
    # get_result_all('F:/drive_scene_5/mutants_carla_challenge_5')
    # get_result_all('F:/drive_scene_5/mutants_coordinate_carla_challenge_5')
    # #
    # # get_result_all('F:/drive_scene_5/original')
    # # # 对比实验结果
    # cal_result_trans_all_new('F:/drive_scene_5/original', 'F:/drive_scene_5/mutants_angle_carla_challenge_5', 0.4)
    # cal_result_trans_all_new('F:/drive_scene_5/original', 'F:/drive_scene_5/mutants_carla_challenge_5', 0.4)
    # cal_result_trans_all_new('F:/drive_scene_5/original', 'F:/drive_scene_5/mutants_coordinate_carla_challenge_5', 0.4)
    # get_meta_values('F:/drive_scene_5/mutants_angle_carla_challenge_5')
    # get_meta_values('F:/drive_scene_5/mutants_carla_challenge_5')
    # get_meta_values('F:/drive_scene_5/mutants_coordinate_carla_challenge_5')
    #
    # get_result_all('F:/drive_scene_5/mutants_add')
    # get_result_all('F:/drive_scene_5/mutants_del')
    # cal_result_trans_all_new('F:/drive_scene_5/original', 'F:/drive_scene_5/mutants_add', 0.4)
    # cal_result_trans_all_new('F:/drive_scene_5/original', 'F:/drive_scene_5/mutants_del', 0.4)
    # get_meta_values('F:/drive_scene_5/mutants_add')
    # get_meta_values('F:/drive_scene_5/mutants_del')
    # # ajust实验
    # get_result_all('F:/drive_scene_ajust/mutants_angle_adjacentOpposingPair')
    # get_result_all('F:/drive_scene_ajust/mutants_adjacentOpposingPair')
    # get_result_all('F:/drive_scene_ajust/mutants_coordinate_adjacentOpposingPair')
    # #
    # # get_result_all('F:/drive_scene_ajust/original')
    # # # 对比实验结果
    # cal_result_trans_all_new('F:/drive_scene_ajust/original', 'F:/drive_scene_ajust/mutants_angle_adjacentOpposingPair', 0.4)
    # cal_result_trans_all_new('F:/drive_scene_ajust/original', 'F:/drive_scene_ajust/mutants_adjacentOpposingPair', 0.4)
    # cal_result_trans_all_new('F:/drive_scene_ajust/original', 'F:/drive_scene_ajust/mutants_coordinate_adjacentOpposingPair', 0.4)
    # get_meta_values('F:/drive_scene_ajust/mutants_angle_adjacentOpposingPair')
    # get_meta_values('F:/drive_scene_ajust/mutants_adjacentOpposingPair')
    # get_meta_values('F:/drive_scene_ajust/mutants_coordinate_adjacentOpposingPair')
    # #
    # get_result_all('F:/drive_scene_ajust/mutants_add')
    # get_result_all('F:/drive_scene_ajust/mutants_del')
    # cal_result_trans_all_new('F:/drive_scene_ajust/original', 'F:/drive_scene_ajust/mutants_add', 0.4)
    # cal_result_trans_all_new('F:/drive_scene_ajust/original', 'F:/drive_scene_ajust/mutants_del', 0.4)
    # get_meta_values('F:/drive_scene_ajust/mutants_add')
    # get_meta_values('F:/drive_scene_ajust/mutants_del')
    # #car实验
    # get_result_all('F:/drive_scene_car/mutants_car')
    # # get_result('F:/drive_scene_car/original')
    # # # # 对比实验结果
    # cal_result_trans_all_new('F:/drive_scene_car/original', 'F:/drive_scene_car/mutants_car', 0.4)
    #
    # get_meta_values('F:/drive_scene_car/mutants_car')
    # #
    #
    # get_result_all('F:/drive_scene_car/mutants_add')
    #
    # cal_result_trans_all_new('F:/drive_scene_car/original', 'F:/drive_scene_car/mutants_add', 0.4)
    #
    # get_meta_values('F:/drive_scene_car/mutants_add')
    # screenShotPath = 'F:/drive_scene/mutants_mr1/screenShot'
    # screenShotPath = 'F:/drive_scene_5/mutants_mr1/screenShot'
    # screenShotPath = 'F:/drive_scene_7/mutants_mr1/screenShot'
    # screenShotPath = 'F:/drive_scene_ajust/mutants_mr1/screenShot'

    # get_result('F:/drive_scene/mutants_mr1/screenShot')
    # cal_result_trans_coverage('F:/drive_scene/original','F:/drive_scene/mutants_mr1')
    # #cal_result_trans('F:/drive_scene/original','F:/drive_scene/mutants_mr1',0.4)
    # get_meta_values('F:/drive_scene/mutants_mr1')
    #
    # get_result('F:/drive_scene_5/mutants_mr1/screenShot')
    # cal_result_trans_coverage('F:/drive_scene_5/original','F:/drive_scene_5/mutants_mr1')
    # # cal_result_trans('F:/drive_scene_5/original','F:/drive_scene_5/mutants_mr1',0.4)
    # get_meta_values('F:/drive_scene_5/mutants_mr1')
    #
    # get_result('F:/drive_scene_7/mutants_mr1/screenShot')
    # cal_result_trans_coverage('F:/drive_scene_7/original','F:/drive_scene_7/mutants_mr1')
    # # cal_result_trans('F:/drive_scene_7/original','F:/drive_scene_7/mutants_mr1',0.4)
    # get_meta_values('F:/drive_scene_7/mutants_mr1')
    #
    # get_result('F:/drive_scene_ajust/mutants_mr1/screenShot')
    # cal_result_trans_coverage('F:/drive_scene_ajust/original','F:/drive_scene_ajust/mutants_mr1')
    # # cal_result_trans('F:/drive_scene_ajust/original','F:/drive_scene_ajust/mutants_mr1',0.4)
    # get_meta_values('F:/drive_scene_ajust/mutants_mr1')
    #
    # get_result('F:/drive_scene_car/mutants_mr1/screenShot')
    # cal_result_trans_coverage('F:/drive_scene_car/original','F:/drive_scene_car/mutants_mr1')
    # # cal_result_trans('F:/drive_scene_car/original','F:/drive_scene_car/mutants_mr1',0.4)
    # get_meta_values('F:/drive_scene_car/mutants_mr1')



