from codesecurity.feature.objects import CommonFeatureSet,Ast
from codesecurity.data.api import GroupPipe,MutilevelCaches,HistoryFile,ShareMemory,list_file_in_dir,pickle_load
from codesecurity.tasks.malicious_code_detect.meta import ModelCachesMeta
from codesecurity.tasks.malicious_code_detect.preprocessing import MCDFeatureBuilder,MCD_Feature,MCD_Features
from codesecurity.tasks.malicious_code_detect.objects import MaliciousDataset
from codesecurity.nn.api import train_model,eval_model,model_flatten_call,run_model
from codesecurity.tasks.common.torch_env import default_device
from codesecurity.data.parallel import do_parallel_lazy
from codesecurity.utils.data_reinforce import ImageReinforcer

import codesecurity.tasks.malicious_code_detect.model as model_enum
import torch.utils.data as torchdata
import torch
import numpy as np


import os
import time

def build_ast_caches_without_labels(target_dir,caches_file,parallel=False):
    def handle(path):
        print(f'building ast for {path}')
        return CommonFeatureSet.from_file(path,None)
    pipe=ShareMemory.from_file(caches_file)
    all_files=list_file_in_dir(target_dir)
    all_files=[e for e in all_files if e.endswith('.js')]
    
    errors=[]
    
    print(f'find {len(all_files)} number file in {target_dir}.')
    
    if parallel:
        delegates=[]
        for i,path in enumerate(all_files):
            delegates.append((handle,[path]))
        
        for e in do_parallel_lazy(delegates,10,True):
            pipe.add_list(e)
            
    else:
        for i,path in enumerate(all_files):
            print(f'processing {i+1}-th file: {path}')
            common_feature=CommonFeatureSet.from_file(path,None)
            if common_feature:
                pipe.add(common_feature)
            else:
                errors.append(path)
        
            if i%100==0 or i==len(all_files)-1:
                print(f'processing : {i+1}/{len(all_files)}')

    pipe.save()
    print(f'build finish. total {len(all_files)} files.')
    if len(errors)>0:
        print(f'error files: {len(errors)}')
        for error in errors:
            print(error)

def build_ast_caches_without_labels_with_history(target_dir,caches_file,history_file=None):
    
    status_enum=['corrupt','pass']
    
    if history_file is None:
        ext=os.path.splitext(caches_file)[1]
        history_file=caches_file[:len(caches_file)-len(ext)]+'_history.txt'
    
    history=HistoryFile(history_file,auto_update=True)
    
    share_memory=ShareMemory.from_file(caches_file)
    all_files=list_file_in_dir(target_dir)
    all_files=[e for e in all_files if e.endswith('.js')]
    
    restore_start=len(share_memory)
    
    if len(history.history)<restore_start:
        new_history=[(e,status_enum[1]) for e in all_files[:restore_start]]
        history.history=new_history
        history.update()
    
    all_files=all_files[restore_start:]
    
    errors=[]
    
    for i,path in enumerate(all_files):
        if i+restore_start<len(history):
            status=history.get_status(i+restore_start)
            if status==status_enum[0]:
                continue
        else:
            history.add_item(path,status_enum[0])
        common_feature=CommonFeatureSet.from_file(path,None)
        if common_feature:
            history.set_status(i+restore_start,status_enum[1])
            share_memory.add(common_feature)
        else:
            errors.append(path)
    
        if i%100==0 or i==len(all_files)-1:
            print(f'processing : {i+1}/{len(all_files)}')


    history.update()

    share_memory.save()
    print(f'build finish. total {len(all_files)} files.')
    if len(errors)>0:
        print(f'error files: {len(errors)}')
        for error in errors:
            print(error)
            
    #history.delete()
  
def build_source_image_without_labels(ast_caches,sp,image_caches,parallel=False):
    def handle(common_feature,feature_builder):
            print("build image for "+common_feature.abstract())
            ret=feature_builder.build(common_feature)
            
            return ret
    
    status=[0,0]
    
    builder=MCDFeatureBuilder(sp)
    
    start_time=time.time()
    pipe=GroupPipe(image_caches,250)
    ast_pipe=pickle_load(ast_caches)
    
    if parallel:
        delegates=[]
        for i,group in enumerate(ast_pipe.iter_group()):
            for common_feature in group:
                delegates.append((handle,[common_feature,builder]))
        
        for e in do_parallel_lazy(delegates,10,False):
            for image in e:
                status[0]+=1
                validate_number=np.sum(image)
                if validate_number<1:
                    status[1]+=1
                pipe.add_group(image)
                
            print(f'process: {status[0]} files. Avg time per sample: {(time.time()-start_time)/(status[0])}s')
            
    else:
        for i,group in enumerate(ast_pipe.iter_group()):
            for common_feature in group:
                print(common_feature.abstract())
                image=builder.build(common_feature)
                
                status[0]+=1
                validate_number=np.sum(image)
                if validate_number<1:
                    status[1]+=1
                
                pipe.add_group(image)
            print(f'processing {i+1}-th group sample')
        
            print(f'avg time per sample: {(time.time()-start_time)/(status[0])}s')
    pipe.save()
    print(f"build finish. empty ast image number: {status[1]}/{status[0]}")
    
def build_ast_image_without_labels(ast_caches,sp,image_caches,parallel=False):
    def handle(common_feature,feature_builder:MCDFeatureBuilder):
            print("build ast image for "+common_feature.abstract())
            ret=feature_builder.build_ast_image(common_feature)
            
            return ret
    
    status=[0,0]
    
    builder=MCDFeatureBuilder(sp)
        
    start_time=time.time()
    pipe=GroupPipe(image_caches,250)
    ast_pipe=pickle_load(ast_caches)
    
    if parallel:
        delegates=[]
        for i,group in enumerate(ast_pipe.iter_group()):
            for common_feature in group:
                delegates.append((handle,[common_feature,builder]))
        
        for e in do_parallel_lazy(delegates,10,False):
            for image in e:
                status[0]+=1
                validate_number=np.sum(image)
                if validate_number<1:
                    status[1]+=1
                pipe.add_group(image)
                
            print(f'process: {status[0]} files. Avg time per sample: {(time.time()-start_time)/(status[0])}s')
            
    else:
        for i,group in enumerate(ast_pipe.iter_group()):
            for common_feature in group:
                print(common_feature.abstract())
                image=builder.build_ast_image(common_feature)
                
                status[0]+=1
                validate_number=np.sum(image)
                if validate_number<1:
                    status[1]+=1
                
                pipe.add_group(image)
            print(f'processing {i+1}-th group sample')
        
            print(f'avg time per sample: {(time.time()-start_time)/(status[0])}s')
    pipe.save()
    print(f"build finish. empty ast image number: {status[1]}/{status[0]}")
    
def build_ast_caches(dataset_dir,good_prefix,bad_prefix,use_parallel=False):
    good_dir=os.path.join(dataset_dir,good_prefix)
    bad_dir=os.path.join(dataset_dir,bad_prefix)
    
    good_caches_dir,good_caches_file=ModelCachesMeta.local_ast_caches(good_dir)
    bad_caches_dir,bad_caches_file=ModelCachesMeta.local_ast_caches(bad_dir)
    
    if not os.path.exists(good_caches_dir):
        os.makedirs(good_caches_dir)
        
    if not os.path.exists(bad_caches_dir):
        os.makedirs(bad_caches_dir)
    
    build_ast_caches_without_labels(good_dir,good_caches_file,use_parallel)
    build_ast_caches_without_labels(bad_dir,bad_caches_file,use_parallel)
    # good_pipe=GroupPipe(good_caches_file,250)
    # bad_pipe=GroupPipe(bad_caches_file,250)
    
    # all_good_files=list_file_in_dir(good_dir)
    # all_good_files=[e for e in all_good_files if e.endswith('.js')]
    # print(f'find {len(all_good_files)} number good file.')
    # for i,path in enumerate(all_good_files):
    #     common_feature=CommonFeatureSet.from_file(path,None)
    #     good_pipe.add_group(common_feature)
        
    #     if i%100==0 or i==len(all_good_files)-1:
    #         print(f'processing : {i+1}/{len(all_good_files)}')
    # good_pipe.save()
    
    # all_bad_files=list_file_in_dir(bad_dir)
    # all_bad_files=[e for e in all_bad_files if e.endswith('.js')]
    # print(f'find {len(all_bad_files)} number bad file.')
    # for i,path in enumerate(all_bad_files):
    #     common_feature=CommonFeatureSet.from_file(path,None)
    #     bad_pipe.add_group(common_feature)

    #     if i%100==0 or i==len(all_bad_files)-1:
    #         print(f'processing : {i}/{len(all_bad_files)}')
    # bad_pipe.save()
    
def build_source_image(dataset_dir,sp,good_prefix,bad_prefix,use_parallel=False):
    
    status=[0,0]
    
    def _build_source_image(builder,ast_file,caches_file,parallel=False):
        def handle(common_feature,feature_builder):
            print("build image for "+common_feature.abstract())
            ret=feature_builder.build(common_feature)
            
            return ret
        start_time=time.time()
        pipe=GroupPipe(caches_file,250)
        ast_pipe=pickle_load(ast_file)
        
        if parallel:
            delegates=[]
            for i,group in enumerate(ast_pipe.iter_group()):
                for common_feature in group:
                    delegates.append((handle,[common_feature,builder]))
            
            for e in do_parallel_lazy(delegates,10,False):
                for image in e:
                    status[0]+=1
                    validate_number=np.sum(image)
                    if validate_number<1:
                        status[1]+=1
                    pipe.add_group(image)
                    
                print(f'process: {status[0]} files. Avg time per sample: {(time.time()-start_time)/(status[0])}s')
                
        else:
            for i,group in enumerate(ast_pipe.iter_group()):
                for common_feature in group:
                    print(common_feature.abstract())
                    image=builder.build(common_feature)
                    
                    status[0]+=1
                    validate_number=np.sum(image)
                    if validate_number<1:
                        status[1]+=1
                    
                    pipe.add_group(image)
                print(f'processing {i+1}-th group sample')
            
                print(f'avg time per sample: {(time.time()-start_time)/(status[0])}s')
        pipe.save()
                    
    feature_builder=MCDFeatureBuilder(sp)
    
    good_dir=os.path.join(dataset_dir,good_prefix)
    bad_dir=os.path.join(dataset_dir,bad_prefix)
    
    good_cache_dir,good_cache_file=ModelCachesMeta.local_ast_caches(good_dir)
    bad_cache_dir,bad_cache_file=ModelCachesMeta.local_ast_caches(bad_dir)
    
    _,good_image_file=ModelCachesMeta.local_image_caches(good_dir)
    _,bad_image_file=ModelCachesMeta.local_image_caches(bad_dir)
    
    print("building image for good sample.")
    _build_source_image(feature_builder,good_cache_file,good_image_file,use_parallel)
    # start_time=time.time()
    
    # for i,group in enumerate(good_ast_pipe.iter_group()):
    #     for common_feature in group:
    #         print(common_feature.abstract())
    #         image=feature_builder.build(common_feature)
    #         status[0]+=1
    #         validate_number=np.sum(image)
    #         if validate_number<1:
    #             status[1]+=1
            
    #         good_image_pipe.add_group(image)
    #     print(f'processing {i+1}-th group good sample')
        
    #     print(f'avg time per group: {(time.time()-start_time)/(i+1)}s')
    # good_image_pipe.save()
    
    print("building image for bad sample.")
    _build_source_image(feature_builder,bad_cache_file,bad_image_file,use_parallel)
    # start_time=time.time()
    
    # for i,group in enumerate(bad_ast_pipe.iter_group()):
    #     for common_feature in group:
    #         print(common_feature.abstract())
    #         image=feature_builder.build(common_feature)
    #         status[0]+=1
    #         validate_number=np.sum(image)
    #         if validate_number<1:
    #             status[1]+=1
                
    #         bad_image_pipe.add_group(image)
    #     print(f'processing {i+1}-th group bad sample')
                
    #     print(f'avg time per group: {(time.time()-start_time)/(i+1)}s')
    # bad_image_pipe.save()
    print(f"build finish. empty image number: {status[1]}/{status[0]}")

def build_ast_image(dataset_dir,sp,good_prefix,bad_prefix,use_parallel=False):
    status=[0,0]
    
    def _build_source_ast_image(builder,ast_file,caches_file,parallel=False):
        def handle(common_feature,feature_builder:MCDFeatureBuilder):
            print("build ast image for "+common_feature.abstract())
            ret=feature_builder.build_ast_image(common_feature)
            
            return ret
        start_time=time.time()
        pipe=GroupPipe(caches_file,250)
        ast_pipe=pickle_load(ast_file)
        
        if parallel:
            delegates=[]
            for i,group in enumerate(ast_pipe.iter_group()):
                for common_feature in group:
                    delegates.append((handle,[common_feature,builder]))
            
            for e in do_parallel_lazy(delegates,10,False):
                for image in e:
                    status[0]+=1
                    validate_number=np.sum(image)
                    if validate_number<1:
                        status[1]+=1
                    pipe.add_group(image)
                    
                print(f'process: {status[0]} files. Avg time per sample: {(time.time()-start_time)/(status[0])}s')
                
        else:
            for i,group in enumerate(ast_pipe.iter_group()):
                for common_feature in group:
                    print(common_feature.abstract())
                    image=builder.build_ast_image(common_feature)
                    
                    status[0]+=1
                    validate_number=np.sum(image)
                    if validate_number<1:
                        status[1]+=1
                    
                    pipe.add_group(image)
                print(f'processing {i+1}-th group sample')
            
                print(f'avg time per sample: {(time.time()-start_time)/(status[0])}s')
        pipe.save()
                    
    feature_builder=MCDFeatureBuilder(sp)
    
    good_dir=os.path.join(dataset_dir,good_prefix)
    bad_dir=os.path.join(dataset_dir,bad_prefix)
    
    good_cache_dir,good_cache_file=ModelCachesMeta.local_ast_caches(good_dir)
    bad_cache_dir,bad_cache_file=ModelCachesMeta.local_ast_caches(bad_dir)
    
    _,good_image_file=ModelCachesMeta.local_ast_image_caches(good_dir)
    _,bad_image_file=ModelCachesMeta.local_ast_image_caches(bad_dir)
    
    print("building ast image for good sample.")
    _build_source_ast_image(feature_builder,good_cache_file,good_image_file,use_parallel)

    print("building ast image for bad sample.")
    _build_source_ast_image(feature_builder,bad_cache_file,bad_image_file,use_parallel)

    print(f"build finish. empty ast image number: {status[1]}/{status[0]}")
    
def build_dataset(dataset_dirs,good_prefix,bad_prefix,include_features=['dfg']):
    if isinstance(dataset_dirs,str):
        dataset_dirs=[dataset_dirs]
    
    good_features={e:[] for e in include_features}
    bad_features={e:[] for e in include_features}
    
    for dataset_dir in dataset_dirs:
        
        good_dir=os.path.join(dataset_dir,good_prefix)
        bad_dir=os.path.join(dataset_dir,bad_prefix)
        
        for feature_name in include_features:
        
            if feature_name=='dfg':
                _,good_image_file=ModelCachesMeta.local_image_caches(good_dir)
                _,bad_image_file=ModelCachesMeta.local_image_caches(bad_dir)
            
            elif feature_name=='ast':
                _,good_image_file=ModelCachesMeta.local_ast_image_caches(good_dir)
                _,bad_image_file=ModelCachesMeta.local_ast_image_caches(bad_dir)
            else:
                raise ValueError(f'unknown feature name: {feature_name}')
            
            
            good_image_pipe=pickle_load(good_image_file)
            bad_image_pipe=pickle_load(bad_image_file)
    
    
    
            for group in good_image_pipe.iter_group():
                for image in group:
                    good_features[feature_name].append(image)
                    
            for group in bad_image_pipe.iter_group():
                for image in group:
                    bad_features[feature_name].append(image)
    
    feature_objs=[]
    
    good_seq=zip(*[good_features[e] for e in include_features])
    bad_seq=zip(*[bad_features[e] for e in include_features])
    
    for feature in good_seq:
        feature_objs.append(MCD_Feature(feature,"good"))
    for feature in bad_seq:
        feature_objs.append(MCD_Feature(feature,"bad"))
    
    features=MCD_Features(feature_objs)
    
    return MaliciousDataset(features)

def build_dataset_without_labels(caches_file_pairs,include_features=['dfg'],include_origin=False):

    features={e:[] for e in include_features}
    
    for caches_file_pair in caches_file_pairs:
        for feature_name,caches_file in zip(include_features,caches_file_pair):
            
            image_file=caches_file
            
            image_pipe=pickle_load(image_file)
    
            for group in image_pipe.iter_group():
                for image in group:
                    features[feature_name].append(image)
    
    feature_objs=[]
    
    seq=zip(*[features[e] for e in include_features])

    if include_origin:
        for feature in seq:
            feature_objs.append(MCD_Feature(feature[1:],'',feature[0]))
    
    else:
        for feature in seq:
            feature_objs.append(MCD_Feature(feature,''))
    
    features=MCD_Features(feature_objs)
    
    return MaliciousDataset(features)

def training_mcd_model(dataset:MaliciousDataset,sp,out_file=None,model_name=None,k=10,seed=42):
    dataset.shuffle(seed)
    
    eval_number=max(1,len(dataset)//k)
    training_number=len(dataset)-eval_number
    
    print(len(dataset[0]))
    
    training_set=torchdata.Subset(dataset,range(training_number))
    eval_set=torchdata.Subset(dataset,range(training_number,len(dataset)))
    
    return training_eval_mcd_model(training_set,eval_set,sp,out_file,model_name)

def training_eval_mcd_model(training_set,eval_set,sp,out_file=None,model_name=None):
        
    device=default_device()
    reinforce=ImageReinforcer.default()
    reinforce_handle=lambda x:x
    
    
    model=model_enum.get_model(sp,model_name,device)
    
    training_loader=torchdata.DataLoader(training_set,batch_size=64)
    eval_loader=torchdata.DataLoader(eval_set,batch_size=64)
    
    return train_model(model,training_loader,eval_loader,device,epoch_number=150,out_file=out_file,reinforce=reinforce_handle,model_call_handle=model_flatten_call)

def eval_mcd_model(dataset:torchdata.Dataset,sp,model_file,model_name=None):
    device=default_device()
    model=model_enum.get_model(sp,model_name,device)
    model_params=torch.load(model_file)
    
    model.load_state_dict(model_params)
    
    data_loader=torchdata.DataLoader(dataset,batch_size=64)
    
    return eval_model(model,data_loader,device)

def execute_mcd_model(model_file,sp,dataset:torchdata.Dataset,model_name=None):
    device=default_device()
    model=model_enum.get_model(sp,model_name,device)
    model_params=torch.load(model_file)
    
    model.load_state_dict(model_params)
    
    data_loader=torchdata.DataLoader(dataset,batch_size=64)
    
    for e in run_model(model,data_loader):
        x,y_hat=e
        x,y=x[:-1],x[-1]
        #print(y.shape)
        for i,input_x in enumerate(zip(*x)):
            yield input_x,y[i],y_hat[i]