from __future__ import annotations
import datetime
import json
import os
import pickle

import fire
import torch
import torch.utils.data as torchdata
import numpy as np
from codesecurity.tasks.code_authorship_attribution.caa import caa_build_external_data, caa_forsee, label_sample_with_author_repo
from codesecurity.tasks.code_authorship_attribution.caches_manager import ForseeCachesMetatadata
from codesecurity.tasks.code_authorship_attribution.prepare_torch import prepare_training_set,prepare_single_data

from codesecurity.tasks.code_authorship_attribution.preprocessing import ForseeSuperParameter
from codesecurity.tasks.code_authorship_attribution.training_model import embeding_model
from codesecurity.tasks.code_perspective_project.code_see import read_author_set,AuthorObjectBuilder
from codesecurity.tasks.code_perspective_project.objects import Author
from main import get_class_number

from pymongo import MongoClient
import json

def deduce_url(absolute_path,lang='cpp'):
    start_index = absolute_path.find(f"{lang}/") + len(f"{lang}/")
    segment1 = absolute_path[start_index:]
    start_index = absolute_path.find(f"{lang}/") + len(f"{lang}/")
    end_index = absolute_path.find("/", start_index)
    segment1 = absolute_path[start_index:end_index]

    # 提取 ctf_tools
    start_index = absolute_path.find(segment1) + len(segment1) + 1
    end_index = absolute_path.find("/", start_index)
    segment2 = absolute_path[start_index:end_index]

    return f'{segment1}/{segment2}'


DEVICE_GPU=torch.device('cuda:0')
DEVICE_CPU=torch.device('cpu')

def get_repo_embeding(meta_dir,input_dir,lang,caches_file,sp,device,out_dir=None,mode='repo',label="",min_number=1,max_number=2**32-1):

    meta=ForseeCachesMetatadata.auto(meta_dir)
    sp=ForseeSuperParameter.instance(sp)

    if device=='gpu':
        device=DEVICE_GPU
    else:
        device=DEVICE_CPU

    list_author_handle=None
    if mode=='repo' or mode=='test':
        list_author_handle=label_sample_with_author_repo

    class_number=get_class_number(meta_dir)

    if meta_dir==input_dir:
        external_data=prepare_training_set(meta_dir,None,None,meta,sp)
    else:
        external_data=caa_build_external_data(input_dir,caches_file,meta,sp,list_author_handle=list_author_handle,min_number=min_number,max_number=max_number)
    
    model=caa_forsee(meta,sp,class_number,device)


    if not os.path.exists(out_dir):
        os.mkdir(out_dir)

    
    max_file=5000 if mode=='test' else 1000000
    i=0

    repos={}

    for embeding,labels in embeding_model(model,external_data):
        create_time=datetime.datetime.now()
        update_time=create_time
        for one_embeding in embeding:
            url=external_data.features.samples[i].origin_paths
            
            repo_url=deduce_url(url,lang)
            if repo_url not in repos:
                repos[repo_url]={
                'url':deduce_url(url,lang),
                'create_time':create_time.__str__(),
                'update_time':update_time.__str__(),
                'vector':one_embeding,
                'layout_vec':one_embeding[:sp.lay_hidden_dim],
                'lexical_vec':one_embeding[sp.lay_hidden_dim:sp.lay_hidden_dim+sp.lex_hidden_dim],
                'syntactic_vec':one_embeding[sp.lay_hidden_dim+sp.lex_hidden_dim:],
                'type':'repo'
            }
            else:
                repos[repo_url]['vector']+=one_embeding
                repos[repo_url]['layout_vec']+=one_embeding[:sp.lay_hidden_dim]
                repos[repo_url]['lexical_vec']+=one_embeding[sp.lay_hidden_dim:sp.lay_hidden_dim+sp.lex_hidden_dim]
                repos[repo_url]['syntactic_vec']+=one_embeding[sp.lay_hidden_dim+sp.lex_hidden_dim:]

            i+=1

        if i>=max_file:
            break

    if out_dir:
        json_path=os.path.join(out_dir,f'repo_embeding{label}.json')
        for e in repos:
            v=repos[e]['vector']
            v=v/np.linalg.norm(v)
            repos[e]['vector']=v.tolist()
            
            v=repos[e]['lexical_vec']
            v=v/np.linalg.norm(v)
            repos[e]['lexical_vec']=v.tolist()

            v=repos[e]['layout_vec']
            v=v/np.linalg.norm(v)
            repos[e]['layout_vec']=v.tolist()
            
            v=repos[e]['syntactic_vec']
            v=v/np.linalg.norm(v)
            repos[e]['syntactic_vec']=v.tolist()

        repos=list(repos.values())

        with open(json_path,'w') as f:
            if mode=='test':
                json.dump(repos,f,indent=4)
            else:
                json.dump(repos,f)


def persp_repos(input_dir,caches_file,device,out_dir,label=""):
    dataset_dir='/mnt/XiaoweiGuo/data/Malicious500Group/cpp'
    sp=ForseeSuperParameter.instance('persp_cpp')
    
    min_number=1
    max_number=1200
    
    get_repo_embeding(dataset_dir,input_dir,caches_file,sp,device,out_dir=out_dir,mode='repo',label=label,min_number=min_number,max_number=max_number)

def persp_repo(repo_dir,repo_url,device,out_file):
    dataset_dir='/mnt/XiaoweiGuo/data/Malicious500Group/cpp'
    sp=ForseeSuperParameter.instance('persp_cpp')
    meta=ForseeCachesMetatadata.auto(dataset_dir)

    min_number=1
    max_number=1000

    class_number=get_class_number(dataset_dir)

    if device=='gpu':
        device=DEVICE_GPU
    else:
        device=DEVICE_CPU
    
    external_data=prepare_single_data(meta,[repo_dir],[""],sp,min_number,max_number)

    model=caa_forsee(meta,sp,class_number,device)
    repos={}

    for embeding,labels in embeding_model(model,external_data):
        create_time=datetime.datetime.now()
        update_time=create_time
        for one_embeding in embeding:
            if repo_url not in repos:
                repos[repo_url]={
                'url':repo_url,
                'create_time':create_time.__str__(),
                'update_time':update_time.__str__(),
                'vector':one_embeding,
                'layout_vec':one_embeding[:sp.lay_hidden_dim],
                'lexical_vec':one_embeding[sp.lay_hidden_dim:sp.lay_hidden_dim+sp.lex_hidden_dim],
                'syntactic_vec':one_embeding[sp.lay_hidden_dim+sp.lex_hidden_dim:],
                'type':'repo'
            }
            else:
                repos[repo_url]['vector']+=one_embeding
                repos[repo_url]['layout_vec']+=one_embeding[:sp.lay_hidden_dim]
                repos[repo_url]['lexical_vec']+=one_embeding[sp.lay_hidden_dim:sp.lay_hidden_dim+sp.lex_hidden_dim]
                repos[repo_url]['syntactic_vec']+=one_embeding[sp.lay_hidden_dim+sp.lex_hidden_dim:]

    if out_file:
        json_path=out_file
        for e in repos:
            v=repos[e]['vector']
            v=v/np.linalg.norm(v)
            repos[e]['vector']=v.tolist()
            
            v=repos[e]['lexical_vec']
            v=v/np.linalg.norm(v)
            repos[e]['lexical_vec']=v.tolist()

            v=repos[e]['layout_vec']
            v=v/np.linalg.norm(v)
            repos[e]['layout_vec']=v.tolist()
            
            v=repos[e]['syntactic_vec']
            v=v/np.linalg.norm(v)
            repos[e]['syntactic_vec']=v.tolist()

        repos=list(repos.values())

        with open(json_path,'w') as f:
            json.dump(repos,f)

def persp_many_repos(repo_dirs,repo_urls,device,out_file,lang=None):
    dataset_dir='/mnt/XiaoweiGuo/data/Malicious500Group/cpp'
    sp=ForseeSuperParameter.instance('persp_cpp')
    meta=ForseeCachesMetatadata.auto(dataset_dir)

    min_number=1
    max_number=1000

    class_number=get_class_number(dataset_dir)

    if device=='gpu':
        device=DEVICE_GPU
    else:
        device=DEVICE_CPU

    model=caa_forsee(meta,sp,class_number,device)
    repos={}
    for i in range(len(repo_dirs)):
        repo_dir=repo_dirs[i]
        repo_url=repo_urls[i]
        external_data=prepare_single_data(meta,[repo_dir],[""],sp,lang,min_number,max_number)

        
        

        for embeding,labels in embeding_model(model,external_data):
            create_time=datetime.datetime.now()
            update_time=create_time
            for one_embeding in embeding:
                if repo_url not in repos:
                    repos[repo_url]={
                    'url':repo_url,
                    'create_time':create_time.__str__(),
                    'update_time':update_time.__str__(),
                    'vector':one_embeding,
                    'layout_vec':one_embeding[:sp.lay_hidden_dim],
                    'lexical_vec':one_embeding[sp.lay_hidden_dim:sp.lay_hidden_dim+sp.lex_hidden_dim],
                    'syntactic_vec':one_embeding[sp.lay_hidden_dim+sp.lex_hidden_dim:],
                    'type':'repo'
                }
                else:
                    repos[repo_url]['vector']+=one_embeding
                    repos[repo_url]['layout_vec']+=one_embeding[:sp.lay_hidden_dim]
                    repos[repo_url]['lexical_vec']+=one_embeding[sp.lay_hidden_dim:sp.lay_hidden_dim+sp.lex_hidden_dim]
                    repos[repo_url]['syntactic_vec']+=one_embeding[sp.lay_hidden_dim+sp.lex_hidden_dim:]

    if out_file:
        json_path=out_file
        for e in repos:
            v=repos[e]['vector']
            v=v/np.linalg.norm(v)
            repos[e]['vector']=v.tolist()
            
            v=repos[e]['lexical_vec']
            v=v/np.linalg.norm(v)
            repos[e]['lexical_vec']=v.tolist()

            v=repos[e]['layout_vec']
            v=v/np.linalg.norm(v)
            repos[e]['layout_vec']=v.tolist()
            
            v=repos[e]['syntactic_vec']
            v=v/np.linalg.norm(v)
            repos[e]['syntactic_vec']=v.tolist()

        repos=list(repos.values())

        with open(json_path,'w') as f:
            json.dump(repos,f)

def get_batch_file(input_dir,out_file,level='dataset'):
    repo_dirs=[]
    repo_urls=[]
    
    if level=='dataset':
        authors=read_author_set(input_dir)
    else:
        authors=[AuthorObjectBuilder(input_dir).build()]
    
    
    for author in authors:
        if author is None: continue
        for repo in author.repos:
            repo_dir=repo.local_dir
            repo_url=repo.repo_info.full_name

            repo_dirs.append(repo_dir)
            repo_urls.append(repo_url)

    if out_file:
        with open(out_file,'w') as f:
            for repo_dir,repo_url in zip(repo_dirs,repo_urls):
                f.write(f'{repo_dir} {repo_url}\n')

def persp_batch_repos(batch_file,device,out_file):
    repo_dirs=[]
    repo_urls=[]
    with open(batch_file,'r') as f:
        lines=f.readlines()
        for line in lines:
            repo_dir,repo_url=line.split()
            repo_dirs.append(repo_dir)
            repo_urls.append(repo_url)

    persp_many_repos(repo_dirs,repo_urls,device,out_file)

def insert_to_mongo(json_file,collection_name='repo_vector'):
        
    with open(json_file,'r') as f:
        data=json.load(f)


    connector=MongoClient('mongodb://ewds:Hust5146279@10.12.180.129/ewds?authSource=admin')
    db=connector.get_database('ewds')
    repo_vector_collection=db.get_collection(collection_name)
    
    rows=list(repo_vector_collection.find({},{'url':1}))
    urls=[]
    for row in rows:
        urls.append(row['url'])
    
    data=[d for d in data if d['url'] not in urls]
    
    #print(f'{data[0]["url"]}')
    
    repo_vector_collection.insert_many(data)
    
    for d in data:
        print(f'repo for url {d["url"]} is insert')
    
    print(f'insert complete. now collection {collection_name} has {len(urls)+len(data)} row')
if __name__=="__main__":
    fire.Fire()
    
#/home/codedataset/gxw/data/MaliciousRefine
#sshfs -p 22244 codedataset@222.20.94.23:/home/codedataset/gxw/data/MaliciousRefine /home/passwd123/XiaoweiGuo/data/remote_mapping/malicious_code