from codesecurity.data.api import pickle_load
from codesecurity.data.objects import ProgramLanguage
from codesecurity.data.package_extract import *
from codesecurity.feature.objects import CommonFeatureSet
from codesecurity.feature.tfidf import TfidfModule
from codesecurity.tasks.code_authorship_attribution import ForseeCachesMetatadata,caa_forsee,ForseeSuperParameter
from codesecurity.tasks.code_authorship_attribution.prepare_torch import DLCAISDataset
from codesecurity.tasks.code_authorship_attribution.preprocessing import DLCAISSuperParameter, ForseeFeatures
from codesecurity.tasks.code_perspective_project.code_see import data_migration,read_author_set,read_core_author_set,group_by_language
from codesecurity.tasks.code_perspective_project import AuthorObjectBuilder
from codesecurity.tasks.code_perspective_project.objects import Author

import torch
import torch.nn.functional
import os

def case_select(dataset,index):
    sample,label=dataset[index][:-1],dataset[index][-1]
    x,y,z=sample
    
    x=torch.tensor(x)
    y=torch.tensor(y)
    z=torch.tensor(z)

    return [torch.stack([x]),torch.stack([y]),torch.stack([z])],label

def group_select(dataset,label,group_number=10):
    
    group=[]
    
    for i in range(len(dataset)):
        x,y=case_select(dataset,i)
        if y!=label:
            continue
        group.append(x)
        if len(group)==group_number:
            break
    
    x,y,z=zip(*group)
    
    return [torch.concat(x),torch.concat(y),torch.concat(z)],[label]*len(group)

def test_for_model():
    meta=ForseeCachesMetatadata.auto('data/gcj_cpp')
    sp=ForseeSuperParameter.gcj()

    device=torch.device('cuda:0')

    dataset=torch.load(meta.training_default_task_data_file)

    model=caa_forsee(meta,sp,dataset.class_number,device)

    group1,label0=group_select(dataset,0)
    group2,label1=group_select(dataset,1)

    sample0,label0=case_select(dataset,0)


    print(torch.nn.functional.cosine_similarity(model.embeding(*group1),model.embeding(*group2)))

def test_author_project():
    path='/mnt/XiaoweiGuo/data/MaliciousRepo/authors1/00xZ'
    builder=AuthorObjectBuilder(path)
    
    print(builder.get_author_name())
    #print(builder.get_all_repo_info())
    print(builder.get_author_info())
    print(builder.get_all_repo_path())

    author=builder.build()
    print(author.author_info)
    print(author.repos[0].sources[0].path)

def test_features():
    meta=ForseeCachesMetatadata.auto('data/java40')
    sp=ForseeSuperParameter.github_java()
    
    dataset=torch.load(meta.training_default_task_data_file)
    
    print(dataset.features.syntactic[0])
    print(dataset.features.lexical[0])

def test_data_migrate():
    path='/home/codedataset/gxw/data/MaliciousRefine'
    new_path='/home/codedataset/gxw/data/MaliciousCore500'
    
    authors=read_core_author_set(path,500)
    
    data_migration(authors,new_path)

def bug_fix():
    # swap repo.json and author.json
    path='/home/codedataset/gxw/data/MaliciousRefine'
    
    authors=os.listdir(path)
    
    repo_names=[os.path.join(path,author,Author.RepoMeta) for author in authors]
    author_names=[os.path.join(path,e,f'{e}.json') for e in authors]
    
    flag=False

    for i in range(len(repo_names)):
        repo_name=repo_names[i]
        author_name=author_names[i]
        
        temp_name=os.path.join(path,authors[i],'temp.json')
        
        if os.path.exists(os.path.join(path,authors[i],'temp')):
            flag=True

        if flag:
            continue
        


        os.rename(repo_name,temp_name)
        os.rename(author_name,repo_name)
        os.rename(temp_name,author_name)

def test_group_by_language():
    origin_dir='/mnt/XiaoweiGuo/data/MaliciousCore500'
    new_dir='/mnt/XiaoweiGuo/data/Malicious500Group'
    authors=read_author_set(origin_dir)
    group_by_language(authors,[ProgramLanguage.C,ProgramLanguage.Cpp,ProgramLanguage.Java,ProgramLanguage.JavaScript,ProgramLanguage.Java,ProgramLanguage.Python],new_dir)
        

def test_mongo_insert():
    from pymongo import MongoClient
    host='10.12.180.129'
    authSource='admin'
    
    import json

    with open('temp/mongo_output/repo_embeding.json','r') as f:
        data=json.load(f)


    connector=MongoClient('mongodb://ewds:Hust5146279@10.12.180.129/ewds?authSource=admin')
    db=connector.get_database('ewds')
    db.drop_collection('repo_vector')
    repo_vector_collection=db.get_collection('repo_vector')
    result_id=repo_vector_collection.insert_many(data)
    result=repo_vector_collection.find().limit(10)
    for e in result:
        print(e)

def test_mongo_connect():
    from pymongo import MongoClient
    host='10.12.180.129'
    authSource='admin'
    
    
    connector=MongoClient('mongodb://ewds:Hust5146279@10.12.180.129/ewds?authSource=admin')
    db=connector.get_database('ewds')
    #db.drop_collection('repo_vector')
    repo_vector_collection=db.get_collection('repo_vector')
    
    # print the length of collection
    #print(repo_vector_collection.count_documents({}))
    # print the first 10 elements
    #print(repo_vector_collection.find().limit(10))
    # print the column 'url' of collection
    rows=list(repo_vector_collection.find({},{'url':1}))
    urls=[]
    for row in rows:
        urls.append(row['url'])
    print(urls)
    
def test_jstap():
    from codesecurity.interface.jStap import build_pdg
    
    build_pdg('third_party/JStap/samples/Bad-validate',True)


def test_model_param():
    import torch
    path='/home/passwd123/XiaoweiGuo/vscode/code-security/data_refine/working_cdc6506260d2bc95dbde09e206958dea/layout_extractor.pt'
    a=torch.load(path)
    print(a)

def test_tfidf():
    path='data/gcj_cpp'
    example_dataset='/home/passwd123/XiaoweiGuo/vscode/code-security/data/gcj_cpp'
    example_dirs=[os.path.join(example_dataset,e) for e in os.listdir(example_dataset) if os.path.isdir(os.path.join(example_dataset,e))][:1]
    example_paths=[]
    for e in example_dirs:
        example_paths.extend([os.path.join(e,example) for example in os.listdir(e)])
        #print(f"success : {len(example_paths)}/{204*8}")
    meta=ForseeCachesMetatadata.auto(path)
    features=[CommonFeatureSet.from_file(example_path) for example_path in example_paths]

    #print(features[0].ast_object.nodes)

    module:TfidfModule=TfidfModule()
    docs=[e.tokens for e in features]

    docs1=docs[:5]
    docs2=docs[5:]

    #docs=[docs[0]]
    module.add_documents(docs1)
    #print(module.vocab2id.vocabulary_.__len__())
    module.add_documents(docs2)
    #print(module.vocab2id.vocabulary_.__len__())
    

    # module.build()
    # #print(doc)
    # #print(module.vocab2id.token2id.keys())
    # #print(module.tfidf.dfs)
    # #print(docs[0])
    # print(len(module.tf))
    # print(module.vocab2id.vocabulary_)
    # print(module.get_tfidf_vec(docs,2500)[0].shape)
    #print(module.get_tfidf_vec(doc,20))
    #print(module.vocab2id.most_common(10))
    #print(module.compute_tfidf(doc))
    
    #print(module.tfidf.dfs[16])

def test_DLCAIS_dataset():
    path='data/gcj_cpp'
    meta=ForseeCachesMetatadata.auto(path)
    features:ForseeFeatures=pickle_load(meta.training_refine_data_file)
    
    print(features.samples[0].lexical)
    #dataset=DLCAISDataset(features)

    # mutux=dataset.init(features)
    
    
    # count=0
    # for e in mutux:
    #     if e>1e-6:
    #         count+=1
    
    # print(count)

def test_random_forest():
    from sklearn.ensemble import RandomForestClassifier
    import random
    path='data/gcj_cpp'
    meta=ForseeCachesMetatadata.auto(path)
    features:ForseeFeatures=pickle_load(meta.training_refine_data_file)
    dataset=DLCAISDataset(features,DLCAISSuperParameter.default())
    
    all_indexes=list(range(len(dataset)))
    #random.shuffle(all_indexes)
    
    train_indexes=all_indexes[:int(len(all_indexes)*0.8)]
    test_indexes=all_indexes[int(len(all_indexes)*0.8):]
    
    x_train,y_train=dataset.get_feature_matrix()[train_indexes],dataset.get_labels()[train_indexes]
    x_test,y_test=dataset.get_feature_matrix()[test_indexes],dataset.get_labels()[test_indexes]
    model=RandomForestClassifier()
    model.fit(x_train,y_train)
    
    print(model.score(x_test,y_test))

test_tfidf()   
#test_random_forest()
#test_model_param()
#bug_fix()    
#test_features()
#test_data_migrate()
#test_group_by_language()
#test_mongo_insert()
#test_mongo_connect()
# sample0,label0=case_select(dataset,0)
# sample1,label1=case_select(dataset,1)

# print(model(*sample0),label0)
# print(model(*sample1),label1)
# for e in list_npm('/mnt/XiaoweiGuo/data/test/package'):
#     print(e)