from collections import Counter
import sys
import json
import itertools
import os
import math

sys.path.append(r'./')

from codesecurity.feature.ast_image import AstImageBuilder
from codesecurity.feature.objects import CommonFeatureSet
from codesecurity.tasks.common.intelligence_data import *
from codesecurity.utils.pretty_print import printlist
from codesecurity.tasks.common.code_api import get_api_list,get_api_common_words,feature_update_api_list,unuse_memory_release,export_feature,default_feature_file_name,export_mini_feature
from codesecurity.feature.property_graph import JsValueDependency
from codesecurity.data.api import GroupPipe,caches_load
from codesecurity.tasks.malicious_code_detect.meta import ModelCachesMeta
import codesecurity.tasks.common.api_name_word2vec as api_word2vec
from codesecurity.tasks.common.features import mini_feature
from codesecurity.utils.pretty_print import lazy_rprint

from codesecurity.data.api import pickle_load,pickle_save
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import RandomForestClassifier

import numpy as np

init_intelligence_data_js('exp_result/hot_api_all_2024424_list.txt')

js_intelligence=commonAPIs_Js()

def read_dataset():
    dataset_dir='data/js/jstap_train'
    
    bad_caches='model_caches/data_caches/bazaar_malicious_ast_caches.pt'
    good_caches='model_caches/data_caches/20231201_20231219_ast_caches.pt'
    
    good_asts=pickle_load(good_caches)
    bad_asts=pickle_load(bad_caches)
    
    return good_asts,bad_asts

def generate_word_pn(good_asts,bad_asts):
    good_counter=Counter()
    bad_counter=Counter()

    for target_ast in good_asts:
        api_list=JsValueDependency.api_call_table(target_ast.ast_object,AstImageBuilder(target_ast.ast_object))
        words=get_api_common_words(api_list)
        good_counter.update(words)

    for target_ast in bad_asts:
        api_list=JsValueDependency.api_call_table(target_ast.ast_object,AstImageBuilder(target_ast.ast_object))
        words=get_api_common_words(api_list)
        bad_counter.update(words)

    eva_result={}
    for k,v in good_counter.items():
        eva_result[k]=[v,bad_counter.get(k,0)]

    with open('exp_result/word_pn.txt','w') as f:
        for k in eva_result:
            #total=eva_result[k][0]+eva_result[k][1]
            f.write(f'{k}\t{eva_result[k][0]}\t{eva_result[k][1]}\n')

def test_word_pn():
    
    dataset_dir='data/js/jstap_train'
    good_dir=os.path.join(dataset_dir,'good')
    bad_dir=os.path.join(dataset_dir,'bad')
    
    good_caches_dir,good_test_caches_file=ModelCachesMeta.local_ast_caches(good_dir)
    bad_caches_dir,bad_test_caches_file=ModelCachesMeta.local_ast_caches(bad_dir)
    
    dataset_dir='data/js/jstap_test'
    good_dir=os.path.join(dataset_dir,'good')
    bad_dir=os.path.join(dataset_dir,'bad')
    
    good_caches_dir,good_train_caches_file=ModelCachesMeta.local_ast_caches(good_dir)
    bad_caches_dir,bad_train_caches_file=ModelCachesMeta.local_ast_caches(bad_dir)

    #good_train_caches_file='model_caches/data_caches/20231201_20231219_ast_caches.pt'
    #bad_train_caches_file='model_caches/data_caches/bazaar_malicious_ast_caches.pt'
    
    export_property_train_good=default_feature_file_name(good_train_caches_file,[CommonFeatureSet.Addon_Api_List,CommonFeatureSet.Addon_Common_Api_Word])
    export_property_train_bad=default_feature_file_name(bad_train_caches_file,[CommonFeatureSet.Addon_Api_List,CommonFeatureSet.Addon_Common_Api_Word])
    
    export_property_test_good=default_feature_file_name(good_test_caches_file,[CommonFeatureSet.Addon_Api_List,CommonFeatureSet.Addon_Common_Api_Word])
    export_property_test_bad=default_feature_file_name(bad_test_caches_file,[CommonFeatureSet.Addon_Api_List,CommonFeatureSet.Addon_Common_Api_Word])

    train_good_asts=caches_load(export_property_train_good)
    train_bad_asts=caches_load(export_property_train_bad)
    
    test_good_asts=caches_load(export_property_test_good)
    test_bad_asts=caches_load(export_property_test_bad)

    good_ast_number=len(test_good_asts)
    #bad_ast_number=len(bad_asts)

    word_pns=js_intelligence.load_word_stand('exp_result/word_pn.txt')

    key_mapping={k:i for i,k in enumerate(word_pns)}

    classifier=RandomForestClassifier()

    pred_labels=[]

    test_true_labels=[]
    test_inputs=[]

    train_true_labels=[]
    train_inputs=[]
    
    unavailable_number_test=0
    unavailable_number_train=0

    for i,e in enumerate(itertools.chain(train_good_asts,train_bad_asts)):
        words=e[2]
        x_input=np.zeros(len(word_pns),dtype=np.float32)
        for w in words:
            if w in key_mapping:
                word_p,word_n=word_pns[w]
                
                word_p=max(0.1,word_p)
                word_n=max(0.1,word_n)

                pn_ratio=word_n/word_p

                x_input[key_mapping[w]]=pn_ratio**2

                #p+=word_p
        if sum(x_input)<0.1:
            unavailable_number_train+=1
            continue
        else:        
            if i<good_ast_number:
                train_true_labels.append(1)
            else:
                train_true_labels.append(0)
                
            train_inputs.append(x_input)

    print(f'build train data done, unavailable_number: {unavailable_number_train}')

    for i,e in enumerate(itertools.chain(test_good_asts,test_bad_asts)):
        # api_list=JsValueDependency.api_call_table(e.ast_object,AstImageBuilder(e.ast_object))
        # words=get_api_common_words(api_list)
        words=e[2]
        x_input=np.zeros(len(word_pns),dtype=np.float32)
        for w in words:
            if w in key_mapping:
                word_p,word_n=word_pns[w]
                
                word_p=max(0.1,word_p)
                word_n=max(0.1,word_n)

                pn_ratio=word_n/word_p

                x_input[key_mapping[w]]=pn_ratio**2


        if sum(x_input)<0.1:
            unavailable_number_test+=1
            continue
        else:        
            if i<good_ast_number:
                test_true_labels.append(1)
            else:
                test_true_labels.append(0)
                
            test_inputs.append(x_input)
    
    print(f'build test data done, unavailable_number: {unavailable_number_test}')

    classifier.fit(train_inputs,train_true_labels)
    pred_labels=classifier.predict(test_inputs)
    

    confusion_m=confusion_matrix(test_true_labels,pred_labels,labels=[1,0])
    # print accuracy
    print(confusion_m)
    #print(confusion_m[0][0]/(confusion_m[0][0]+confusion_m[0][1]))
    print("precision: ",confusion_m[0][0]/(confusion_m[0][0]+confusion_m[1][0]))
    print(f'unavailable_number: {unavailable_number_test}')
    
def update_common_word():
    bad_caches='model_caches/data_caches/bazaar_malicious_ast_caches.pt'
    good_caches='model_caches/data_caches/20231201_20231219_ast_caches.pt'
    
    # dataset_dir='data/js/jstap_test'
    # good_dir=os.path.join(dataset_dir,'good')
    # bad_dir=os.path.join(dataset_dir,'bad')
    
    # good_caches_dir,good_caches=ModelCachesMeta.local_ast_caches(good_dir)
    # bad_caches_dir,bad_caches=ModelCachesMeta.local_ast_caches(bad_dir)
    
    good_caches='model_caches/data_caches/npm_wild_ast_caches.pt'

    feature_update_api_list(good_caches)
    #feature_update_api_list(bad_caches)

def release_memory():
    bad_caches='model_caches/data_caches/bazaar_malicious_ast_caches.pt'
    good_caches='model_caches/data_caches/20231201_20231219_ast_caches.pt'
    
    unuse_memory_release(good_caches)
    unuse_memory_release(bad_caches)
    
def export():
    property_names=[CommonFeatureSet.Addon_Api_List,CommonFeatureSet.Addon_Common_Api_Word]
    bad_caches='model_caches/data_caches/bazaar_malicious_ast_caches.pt'
    good_caches='model_caches/data_caches/_ast_caches.pt'
    
    good_caches='model_caches/data_caches/npm_wild_ast_caches.pt'
    # dataset_dir='data/js/jstap_test'
    # good_dir=os.path.join(dataset_dir,'good')
    # bad_dir=os.path.join(dataset_dir,'bad')
    # good_caches_dir,jstap_good_caches_file=ModelCachesMeta.local_ast_caches(good_dir)
    # bad_caches_dir,jstap_bad_caches_file=ModelCachesMeta.local_ast_caches(bad_dir)
    
    #export_feature(jstap_good_caches_file,property_names)
    #export_feature(jstap_bad_caches_file,property_names)
    export_mini_feature(good_caches)
    #export_feature(bad_caches,property_names)

def fix_caches():
    bad_caches_file='model_caches/data_caches/bazaar_malicious_ast_caches.pt'
    
    obj=caches_load(bad_caches_file)
    obj.update_meta()

def test_share_memory():
    bad_caches_file='model_caches/data_caches/20231201_20231219_ast_caches.pt'
    obj=caches_load(bad_caches_file)
    
    buffer=obj.group(0)
    
    printlist(buffer[3].addon_properties.values())
    printlist(buffer[3].addon_properties.keys())

def test_common_word_extract():
    caches='model_caches/data_caches/_ast_cachesapi_list_common_api_word_feature.pt'
    
    obj=caches_load(caches)
    for i,group in enumerate(obj.iter_group()):
        group_sentences=[list(e[2]) for e in group]
        api_word2vec.train(group_sentences)
        print(f'group {i} done. vocab size: {len(api_word2vec.model().wv)}')
        
    api_word2vec.save()

    #printlist(obj.group_meta)

def test_word_simliar():
    word_pns=js_intelligence.load_word_stand('exp_result/word_pn.txt')
    keys_pn=[]
    for k in word_pns:
        word_p,word_n=word_pns[k]
        word_p=max(0.1,word_p)
        word_n=max(0.1,word_n)
        keys_pn.append((k,word_p/word_n))
    #keys_pn=[(k,word_pns[k][0]/word_pns[k][1]) for k in word_pns]
    keys_pn.sort(key=lambda e:e[1],reverse=False)
    
    keys_pn=keys_pn[:50]
    #keys_pn.append(['require',0.0001])
    not_exist_list=[]
    
    print(f'vocab size: {len(api_word2vec.model().wv)}')
    
    # example_keys=api_word2vec.model().wv.key_to_index
    # example_keys=[k for k in example_keys][:5]
    #printlist(example_keys)
    
    for k,v in keys_pn:
        print(k,v)
        if k in api_word2vec.model().wv:
            printlist(api_word2vec.model().wv.most_similar(k))
        else:
            not_exist_list.append(k)
        print('-----------------')
    printlist(not_exist_list)

def test_vocab():
    import gensim
    
    test_model=gensim.models.Word2Vec(vector_size=256,min_count=2)
    
    caches='model_caches/data_caches/npm_wild_ast_cachesapi_list_common_api_word_feature.pt'
    caches_file='model_caches/common_caches/api_name_word2vec.pt'
    obj=caches_load(caches)
    
    buffer=[]
    threshold=5000
    update_group=False

    for i,group in enumerate(obj.iter_group()):
        buffer.extend([list(e[2]) for e in group])
        
        if len(buffer)>threshold or i==obj.group_number-1:
            test_model.build_vocab(buffer,update=update_group)
            del buffer
            buffer=[]
            if not update_group:
                update_group=True

        print(f'group {i} done. vocab size: {len(test_model.wv)}')
    
    
    
    print('start training...')
    
    for i,group in enumerate(obj.iter_group()):
        buffer=[list(e[2]) for e in group]
        if len(buffer)>threshold or i==obj.group_number-1:
            test_model.train(buffer,total_examples=test_model.corpus_count,epochs=test_model.epochs)
            del buffer
            buffer=[]
        print(f'group {i} done. vocab size: {len(test_model.wv)}')
    
    print(f'vocab size: {len(test_model.wv)}')
    test_model.save(caches_file)
    
def test_malicious_detect():
    target_caches='model_caches/data_caches/npm_wild_ast_cachesapi_list_common_api_word_feature.pt'
    
    malicious_file='exp_result/malicious_example.txt'
    malicious_examples=[]
    
    word_pns=js_intelligence.load_word_stand('exp_result/word_pn.txt')
    keys_pn=[]
    
    for k in word_pns:
        word_p,word_n=word_pns[k]
        word_p=max(0.1,word_p)
        word_n=max(0.1,word_n)
        keys_pn.append((k,word_p/word_n))

    keys_pn.sort(key=lambda e:e[1],reverse=False)
    
    keys_pn=keys_pn[:80]
    
    keys=[e[0] for e in keys_pn]
    all_examples=caches_load(target_caches)
    
    for example in all_examples:
        words=example[2]
        
        score=0
        for k in keys:
            if k in words:
                score+=1
                
        if score>5:
            print(example[0])
        
        if score>0:
            malicious_examples.append((example[0],score))
    
    malicious_examples.sort(key=lambda e:e[1],reverse=True)
    
    with open(malicious_file,'w') as f:
        for e in malicious_examples:
            f.write(f'{e[0]}\t{e[1]}'+'\n')        

def test_hidden_api_call():
    target_caches='model_caches/data_caches/npm_wild_ast_cachesapi_list_common_api_word_feature.pt'
    
    malicious_file='exp_result/malicious_example.txt'
    #malicious_examples=[]
    
    all_examples=caches_load(target_caches)
    hidden_status=[]
    
    
    for example in all_examples:
        words=example[2]
        if len(words)==0:
            #print(f'{example[0]} words is empty.')
            continue
        
        score=0
        for k in words:
            if k not in api_word2vec.model().wv:
                score+=1
        
        if score==0:
            hidden_status.append((example[0],-math.log10(len(words)))) 
        else:      
            hidden_status.append((example[0],score/len(words)))
    
    hidden_status.sort(key=lambda e:e[1],reverse=False)
    
    print('size:',len(hidden_status))
    print('average:',sum([e[1] for e in hidden_status])/len(hidden_status))
    print('min:',hidden_status[0])
    print('max:',hidden_status[-1])
    print('median:',hidden_status[len(hidden_status)//2])
    print('75%:',hidden_status[len(hidden_status)*3//4])
    print('90%:',hidden_status[len(hidden_status)*9//10])
    print('95%:',hidden_status[len(hidden_status)*19//20])
    print('99%:',hidden_status[len(hidden_status)*99//100])
    print('deviation:',np.std([e[1] for e in hidden_status]))
    
    printlist(hidden_status[:10])
    # malicious_examples.sort(key=lambda e:e[1],reverse=True)
    
    with open(malicious_file,'w') as f:
        for e in hidden_status[:50]:
            f.write(f'{e[0]}\t{e[1]}'+'\n') 

def test_stat():
    target_caches='model_caches/data_caches/npm_wild_ast_caches.pt'
    obj=caches_load(target_caches)
    
    size_dist=[]
    size_detail={}

    for e in obj:
        size_dist.append(len(e))

    del obj

    for e in size_dist:
        t=int(math.log(e,2))
        if t not in size_detail:
            size_detail[t]=0
        size_detail[t]+=1

    for k in size_detail:
        print(f'{k}: {size_detail[k]}')

def test_mini_features():
    target_caches='model_caches/data_caches/npm_wild_ast_caches_mini_feature.pt'
    origin_caches='model_caches/data_caches/npm_wild_ast_caches.pt'
    obj=caches_load(target_caches)
    #obj_origin=caches_load(origin_caches)
    # first_1000=obj.group(0)
    
    # mini_1000=[mini_feature(e) for e in first_1000]
    
    # target_f='model_caches/data_caches/npm_wild_ast_caches_mini_feature0.pt'
    
    # pickle_save(mini_1000,target_f)
    
    # total=len(obj_origin)
    
    # for i,e in enumerate(obj_origin.dev_iter(len(obj_mini))):
    #     obj_mini.add(mini_feature(e))
    #     lazy_rprint(f'process: {i}/{total}',0.01)
        
    # obj_mini.save()
    
    # print(len(obj_origin),len(obj_mini))
    
    obj.compact()
    
    print(len(obj))
    
test_mini_features()
#test_stat()   
#test_malicious_detect()
#test_hidden_api_call()
#test_common_word_extract()
#test_vocab()
#test_word_simliar()
#target_file=r'test_data/sample.js'
#target_dir=r'data'
#export()
#test_word_pn()
#fix_caches()
#update_common_word()
#test_word_pn()
#test_share_memory()
#good_asts,bad_asts=read_dataset()




# good_keys=set()
# for e in good_counter.most_common(300):
#     good_keys.add(e[0])

# for e in bad_counter.most_common(100):
#     if e[0] not in good_keys:
#         print(e)

# printlist(good_counter.most_common(100))
# print('-------------------------')
# printlist(bad_counter.most_common(100))
#printlist(counter.most_common(5000))