from collections import Counter
from itertools import chain
import sys
import json
import os
import random
import torch.utils.data as torchdata
import torch


sys.path.append(r'./')

from codesecurity.nn.api import train_model,eval_model
from codesecurity.tasks.malicious_code_detect.objects import MaliciousDataset
from codesecurity.data.api import list_all_file_in_dir,ShareMemory
from codesecurity.tasks.malicious_code_detect.preprocessing import AstImageBuilder, MCD_Feature, MCD_Features, MCDFeatureBuilder,MCD_BigFeatures
from codesecurity.tasks.malicious_code_detect.meta import SuperParameter_MCD
from codesecurity.tasks.malicious_code_detect.model import get_model



def get_ast_caches():
    
    bad_jstap_train='data/js/jstap_train/caches/bad_ast_caches.pt'
    bad_bazzar_train='model_caches/data_caches/bazaar_malicious_ast_caches.pt'
    bad_jstap_test='data/js/jstap_test/caches/bad_ast_caches.pt'
    bad_hydek_all='model_caches/data_caches/hynek_dataset_ast_caches.pt'
    bad_caches=[bad_jstap_train,bad_bazzar_train,bad_jstap_test,bad_hydek_all]
    bad_memory=[]

    bad_total=0
    for bad_cache in bad_caches:
        memory=ShareMemory.from_file(bad_cache)
        bad_total+=len(memory)
        bad_memory.append(memory)
        print(f'bad_cache:{bad_cache},len:{len(memory)}')

    print(f'bad_total:{bad_total}')
    #return memory

    good_jstap_train='data/js/jstap_train/caches/good_ast_caches.pt'
    good_jstap_test='data/js/jstap_test/caches/good_ast_caches.pt'
    ast_caches_file='model_caches/data_caches/_ast_caches.pt'

    good_caches=[good_jstap_train,good_jstap_test,ast_caches_file]
    good_memory=[]

    good_total=0
    for good_cache in good_caches:
        memory=ShareMemory.from_file(good_cache)
        good_total+=len(memory)
        good_memory.append(memory)
        print(f'good_cache:{good_cache},len:{len(memory)}')
        
    print(f'good_total:{good_total}')


    train_size=5000
    test_size=1000
    inbalance_ratio=9
    
    good_range=list(range(good_total))
    bad_range=list(range(bad_total))
    
    good_choose=set(random.sample(good_range,train_size+test_size*inbalance_ratio))
    bad_choose=set(random.sample(bad_range,train_size+test_size))
    
    good_objs=[]
    for i,e in enumerate(chain(*good_memory)):
        if i in good_choose:
            good_objs.append(e)
    
    bad_objs=[]
    for i,e in enumerate(chain(*bad_memory)):
        if i in bad_choose:
            bad_objs.append(e)
            
    exp_dataset=ShareMemory('model_caches/mcd_caches/exp_dataset.pt',addon={'good':len(good_objs),'bad':len(bad_objs)})
    for obj in good_objs:
        exp_dataset.add(obj)
    for obj in bad_objs:
        exp_dataset.add(obj)
        
    exp_dataset.save()
    
    print(f'finish job! good_objs:{len(good_objs)},bad_objs:{len(bad_objs)}')

def get_sample_images(compare=False):
    dataset_asts_path='model_caches/mcd_caches/exp_dataset.pt'
    
    dataset_asts=ShareMemory.from_file(dataset_asts_path)
    
    image_asts_path='model_caches/mcd_caches/exp_dataset_preprocess_{}path.pt' if not compare else 'model_caches/mcd_caches/exp_dataset_preprocess_jsreaver_{}path.pt'

    sps=[SuperParameter_MCD.SeGra(path_number=256)]
    
    for sp in sps:
        images=ShareMemory.from_file(image_asts_path.format(sp.path_number),True,addon=dataset_asts.addon)    
        builder=MCDFeatureBuilder(sp)
        
        has_preprocess_number=len(images)
        
        for i,e in enumerate(dataset_asts):
            if i<has_preprocess_number:
                print(f'pass {i} for {images.caches_path}')
                continue
            image=builder.build(e) if not compare else builder.build_jsreaver(e)
            images.add(image)
            
        images.save()

def get_sample_images_jstap(path_number,compare=False):
    dataset_asts_path=['data/js/jstap_train/caches/good_ast_caches.pt',
                       'data/js/jstap_test/caches/good_ast_caches.pt',
                       'data/js/jstap_train/caches/bad_ast_caches.pt',
                       'data/js/jstap_test/caches/bad_ast_caches.pt']
    
    dataset_asts=[ShareMemory.from_file(e) for e in dataset_asts_path]
    
    image_asts_path='model_caches/mcd_caches/jstap_preprocess_dfg{}path.pt' if not compare else 'model_caches/mcd_caches/jstap_preprocess_{}path.pt'

    sps=[SuperParameter_MCD.miniSeGra(path_number=path_number)]
    
    for sp in sps:
        images=ShareMemory(image_asts_path.format(sp.path_number))    
        builder=MCDFeatureBuilder(sp)
        
        #has_preprocess_number=len(images)
        
        count=0
        for i,memory in enumerate(dataset_asts):
            for e in memory:
                image=builder.build(e) if not compare else builder.build_ast_image(e)
                images.add(image)
                count+=1
                #if random.random()<0.5:
                print(f'{count}/4000: add {e.path} to {images.caches_path}')
                    #images.save()

        images.save()

def get_sample_images_jstap_obs(path_number,level,compare=False):
    dataset_asts_path=[f'data/js/obfucscate/{level}/jstap_train/caches/good_ast_caches.pt',
                       f'data/js/obfucscate/{level}/jstap_train/caches/bad_ast_caches.pt']
    
    dataset_asts=[ShareMemory.from_file(e) for e in dataset_asts_path]
    
    image_asts_path='model_caches/mcd_caches/jstap_preprocess_{}_dfg{}path.pt' if not compare else 'model_caches/mcd_caches/jstap_preprocess_{}_{}path.pt'

    sps=[SuperParameter_MCD.miniSeGra(path_number=path_number)]
    total=sum([len(e) for e in dataset_asts])
    
    for sp in sps:
        images=ShareMemory(image_asts_path.format(level,sp.path_number))    
        builder=MCDFeatureBuilder(sp)
        
        #has_preprocess_number=len(images)
        
        count=0
        for i,memory in enumerate(dataset_asts):
            for e in memory:
                image=builder.build(e) if not compare else builder.build_ast_image(e)
                images.add(image)
                count+=1
                #if random.random()<0.5:
                print(f'{count}/{total}: add {e.path} to {images.caches_path}')
                    #images.save()

        images.save()

def validate_images():
    image_asts_path='model_caches/mcd_caches/exp_dataset_preprocess_256path.pt'
    images=ShareMemory.from_file(image_asts_path)
    
    ast_path='model_caches/mcd_caches/exp_dataset.pt'
    ast_memory=ShareMemory.from_file(ast_path)
    
    print(images.group(0)[0].shape)
    #print(len(ast_memory))

def test_model(model_name,path_number,need_dfg=True,need_ast=True):
    sp=SuperParameter_MCD.miniSeGra(path_number=path_number)
    model=get_model(sp,model_name,device='cuda:1')
    
    label_maping={}
    traing_indexes=list(range(1000))
    traing_indexes+=[i+2000 for i in range(1000)]
    test_indexes=[i+1000 for i in traing_indexes]
    for i in range(4000):
        if i<2000:
            label_maping[i]=0
        else:
            label_maping[i]=1
    #good_number=2000
    #bad_number=2000

    all_features=[]
    if need_dfg:
        all_features.append(ShareMemory.from_file(f'model_caches/mcd_caches/jstap_preprocess_dfg{path_number}path.pt'))
    if need_ast:
        all_features.append(ShareMemory.from_file(f'model_caches/mcd_caches/jstap_preprocess_{path_number}path.pt'))


    big_features=MCD_BigFeatures(all_features,label_mapping=label_maping)

    big_dataset=MaliciousDataset(big_features)
    
    training_dataset,test_dataset=torchdata.Subset(big_dataset,traing_indexes),torchdata.Subset(big_dataset,test_indexes),
    print(len(training_dataset),len(test_dataset))
    
    training_loader=torchdata.DataLoader(training_dataset,batch_size=64)
    eval_loader=torchdata.DataLoader(test_dataset,batch_size=64)
    
    train_model(model,training_loader,eval_loader,device='cuda:1',epoch_number=500,model_call_handle=lambda model,x:model(*x),lr=0.0001,out_file=f'model_caches/common_caches/mini{model_name}_{path_number}.pt')

def test_SeGra(path_number):
    sp=SuperParameter_MCD.miniSeGra(path_number=path_number)

    ast_model='segra_ast'
    dfg_model='segra_dfg'
    segra='segra'

    model=get_model(sp,'segra',device='cuda:1')
    model.ast_extractor.load_state_dict(torch.load(f'model_caches/common_caches/mini{ast_model}_{path_number}.pt'),strict=False)
    model.dfg_extractor.load_state_dict(torch.load(f'model_caches/common_caches/mini{dfg_model}_{path_number}.pt'),strict=False)

    model.ast_extractor.requires_grad_(False)
    model.dfg_extractor.requires_grad_(False)

    all_features=[]
    all_features.append(ShareMemory.from_file(f'model_caches/mcd_caches/jstap_preprocess_dfg{path_number}path.pt'))
    all_features.append(ShareMemory.from_file(f'model_caches/mcd_caches/jstap_preprocess_{path_number}path.pt'))

    label_maping={}
    traing_indexes=list(range(1000))
    traing_indexes+=[i+2000 for i in range(1000)]
    test_indexes=[i+1000 for i in traing_indexes]
    for i in range(4000):
        if i<2000:
            label_maping[i]=0
        else:
            label_maping[i]=1
    #good_number=2000
    #bad_number=2000
    big_features=MCD_BigFeatures(all_features,label_mapping=label_maping)

    big_dataset=MaliciousDataset(big_features)
    


    training_dataset,test_dataset=torchdata.Subset(big_dataset,traing_indexes),torchdata.Subset(big_dataset,test_indexes),
    #print(len(training_dataset),len(test_dataset))
    
    training_loader=torchdata.DataLoader(training_dataset,batch_size=64)
    eval_loader=torchdata.DataLoader(test_dataset,batch_size=64)
    
    # count=0
    # for e in big_dataset:
    #     dfg,ast,label=e
    #     if label==0:
    #         count+=1
    train_model(model,training_loader,eval_loader,device='cuda:1',epoch_number=100,model_call_handle=lambda model,x:model(*x),lr=0.0001,out_file=f'model_caches/common_caches/mini{segra}_{path_number}.pt')
    # print(f'good:{count},bad:{len(big_dataset)-count}')
    #eval_model(model,eval_loader,device='cuda:1',model_call_handle=lambda model,x:model(*x))

def test_jstap_sample():
    #ast_features=ShareMemory.from_file('model_caches/mcd_caches/jstap_preprocess_256path_dfg.pt')
    #dfg_features=ShareMemory.from_file('model_caches/mcd_caches/jstap_preprocess_256path.pt')
    origin_features=ShareMemory.from_file('data/js/obfucscate/medium/jstap_train/caches/good_ast_caches.pt')

    peek_number=0

    #print(f'dfg_features:{len(dfg_features)}')
    #print(f'ast_features:{len(ast_features)}')

    builder=MCDFeatureBuilder(SuperParameter_MCD.miniSeGra(path_number=256))

    obj=None
    

    for i,e in enumerate(origin_features):
        print(f'dfg_features:{i}')
        obj=e
        if i>=peek_number:
            break

    #print(len(obj.ast_object.nodes))

    obj=builder.build(obj)
    print(obj.shape)
    previous=0
    #print(data)
    # for i in range(256):
    #     now = data[i,0,0]
    #     if now>=previous:
    #         #print(f'{now}')
    #         previous=now
    #     else:
    #         print(f'error: {now} < {previous}')
    #         break
    # print(obj[0,0,:])
    # print(obj[1,0,:])
    # print(obj[2,0,:])

def eval_model_obs(model_name,level,path_number,need_dfg=True,need_ast=True):

    # dataset_asts_path=['data/js/obfucscate/medium/jstap_train/caches/good_ast_caches.pt',
    #                    'data/js/obfucscate/medium/jstap_train/caches/bad_ast_caches.pt']
    
    # dataset_asts=[ShareMemory.from_file(e) for e in dataset_asts_path]
    # label_maping={}
    # memory_index=0
    # for memory in dataset_asts:
    #     print(f'len:{len(memory)}')
    #     for e in memory:
    #         if 'good' in e.path:
    #             label_maping[memory_index]=0
    #         else:
    #             label_maping[memory_index]=1
    #         memory_index+=1

    # print('bad',sum([e for e in label_maping.values()]))
    # print('good',len(label_maping)-sum([e for e in label_maping.values()]))

    sp=SuperParameter_MCD.miniSeGra(path_number=path_number)
    model=get_model(sp,model_name,device='cuda:1')
    model.load_state_dict(torch.load(f'model_caches/common_caches/mini{model_name}_{path_number}.pt'))

    all_features=[]
    if need_dfg:
        all_features.append(ShareMemory.from_file(f'model_caches/mcd_caches/jstap_preprocess_{level}_dfg{path_number}path.pt'))
    if need_ast:
        all_features.append(ShareMemory.from_file(f'model_caches/mcd_caches/jstap_preprocess_{level}_{path_number}path.pt'))
    
    #print(f'dfg_features:{len(dfg_features)}')
    #print(f'ast_features:{len(ast_features)}')

    label_maping={}
    for i in range(1995):
        if i<1000:
            label_maping[i]=0
        else:
            label_maping[i]=1
    #good_number=2000
    #bad_number=2000
    big_features=MCD_BigFeatures(all_features,label_mapping=label_maping)

    big_dataset=MaliciousDataset(big_features)
    
    #training_dataset,test_dataset=torchdata.Subset(big_dataset,traing_indexes),torchdata.Subset(big_dataset,test_indexes),
    #print(len(training_dataset),len(test_dataset))
    
    #training_loader=torchdata.DataLoader(training_dataset,batch_size=64)
    eval_loader=torchdata.DataLoader(big_dataset,batch_size=64)
    
    # count=0
    # for e in big_dataset:
    #     dfg,ast,label=e
    #     if label==0:
    #         count+=1

    # print(f'good:{count},bad:{len(big_dataset)-count}')
    eval_model(model,eval_loader,device='cuda:1',model_call_handle=lambda model,x:model(*x))

    #train_model(model,training_loader,eval_loader,device='cuda:1',epoch_number=500,model_call_handle=lambda model,x:model(*x),lr=0.0001,out_file='model_caches/common_caches/miniSeGra_256.pt')

def statistic_word():
    legal_identifier_chars=list('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_')
    level='medium'
    dataset_asts_path=[f'data/js/obfucscate/{level}/jstap_train/caches/good_ast_caches.pt',
                       f'data/js/obfucscate/{level}/jstap_train/caches/bad_ast_caches.pt']
    
    check_succcess_number=0
    total=0
    
    dataset_asts=[ShareMemory.from_file(e) for e in dataset_asts_path]
    for memory in dataset_asts:
        for e in memory:
            total+=1
            variables=set()
            bag_variables=set()
            nodes=e.search_nodes('identifier')
            for node in nodes:
                variables.add(node.ast_value)
                counter=Counter(node.ast_value)
                bag_str=''
                for e in legal_identifier_chars:
                    bag_str+=(e+str(counter.get(e,0)))
                bag_variables.add(bag_str)
            check_succcess_number+=len(bag_variables)/len(variables)
            #print(f'{len(bag_variables)},{len(variables)}')
            #break
            #print
            print(f'total:{total},check_succcess_number:{check_succcess_number},rate:{check_succcess_number/total}')
#ShareMemory.from_file(f'model_caches/mcd_caches/jstap_preprocess_medium_256path.pt').rename('model_caches/mcd_caches/jstap_preprocess_light_256path.pt')
#ShareMemory.from_file(f'model_caches/mcd_caches/jstap_preprocess_medium_dfg256path.pt').rename('model_caches/mcd_caches/jstap_preprocess_light_dfg256path.pt')

level='medium'
path_number=256

#get_sample_images_jstap_obs(256,'medium',True)
#get_sample_images_jstap_obs(256,'medium')
#get_sample_images_jstap_obs(512,'medium',True)
#get_sample_images_jstap_obs(512,'medium')
#get_sample_images_jstap_obs(512,'light',True)
#get_sample_images_jstap_obs(512,'light')
#get_sample_images_jstap(512)
#get_sample_images_jstap(512,True)
#test_model('segra_ast',512,False,True)
#test_model('segra_dfg',512,True,False)    
#validate_images()
#get_ast_caches()
#get_sample_images()
#get_sample_images(True)
#test_jstap_sample()
#test_SeGra(path_number=512)
#eval_model_obs('segra_ast',level,path_number,False,True)
#eval_model_obs('segra_dfg',level,path_number,True,False)
#eval_model_obs('segra',level,path_number,True,True)

statistic_word()
