import os
import sys
import logging



dir_path=os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0,os.path.join(dir_path,'src'))
sys.path.insert(0,os.path.join(dir_path,'src/codeql_adapter'))
sys.path.insert(0,os.path.join(dir_path,'src/dataset_adapter'))

from collections import Counter
from src.codeql_tools.preprocess import *

def get_caches(index):
    global _caches,_caches_list
    if "_caches" not in globals():
        _caches={}
    if "_caches_list" not in globals():
        _caches_list=[]
        
    if index not in _caches:
        return None
    
    _caches_list.remove(index)
    _caches_list.insert(0,index)
    
    return _caches[index]
    
def save_caches(index,obj):
    global _caches,_caches_list
    if "_caches" not in globals():
        _caches={}
    if "_caches_list" not in globals():
        _caches_list=[]
        
    if index not in _caches_list:
        _caches_list.insert(0,index)
        
    if len(_caches_list)>600:
        _caches.pop(_caches_list[-1])
        _caches_list=_caches_list[:-1]
        
    _caches[index]=obj
    

def example_id(example:Example):
    
    token_unique=set(example.tokens)
    
    hash_value=0
    for e in token_unique:
        hash_value+=hash(e)
    
    return hash_value

def get_all_example_id(dirs):
    id_counter=Counter()
    id_example={}
    all_files=[]
    for e in dirs:
        all_files+=[os.path.join(e,p) for p in os.listdir(e)]
        
    for i,filepath in enumerate(all_files):
        examples=load_as_json(filepath)
        #save_caches(filepath,examples)
            
        for e in examples:
            example=Example(e)
            id_counter.update([example_id(example)])
            #id_example[example_id(example)]=(i,filepath)
        
        print(f"process: {i}/{len(all_files)}     ",end='\r')
            
    return id_counter,id_example

def get_example_id_freq(id_counter:Counter):
    
    number_counter=Counter()
    for e in id_counter.values():
        number_counter.update([e])
        
    return number_counter

def percent(number):
    return f"{round(number*100,3)}%"

def duplicate_summary(dirs,dataset_name=""):
    def topk_duplicate(all_ids,k=3):
        number_freq=get_example_id_freq(all_ids)
        topk=number_freq.most_common(k)
        
        total=sum([all_ids[e] for e in all_ids])
        
        for key,number in topk:
            yield key,number*key/total
    
    ids,examples=get_all_example_id(dirs)
    
    print()
    
    total=sum([ids[e] for e in ids])
    
    logging.info(f"{dataset_name} duplicate ratio : {percent(1-len(ids)/total)}")
    
    top5=topk_duplicate(ids,5)
    logging.info('duplicate_number\tpercent:')
    for key,p in top5:
        logging.info(f'{key}\t{percent(p)}')
    
    
    return ids,examples

def cross_duplicate(origin_dirs,target_dirs,task_name=""):
    origin_ids,_=get_all_example_id(origin_dirs)
    target_ids,_=get_all_example_id(target_dirs)
    
    duplicate_ids=set(origin_ids).intersection(target_ids)
    
    origin_total=sum([origin_ids[e] for e in origin_ids])
    target_total=sum([target_ids[e] for e in target_ids])
    
    origin_duplicate_number=sum([origin_ids[e] for e in duplicate_ids])
    target_duplicate_number=sum([target_ids[e] for e in duplicate_ids])
    
    cross_duplicate_percent=percent(max(origin_duplicate_number/origin_total,target_duplicate_number/target_total))
    
    logging.info(f'{task_name} cross duplicate ratio:{cross_duplicate_percent}')


logging.basicConfig(filename='summary.log',
                    filemode='w',
                    format="%(asctime)s %(message)s",
                    level=logging.DEBUG)

train_dirs=['/mnt/XiaoweiGuo/data/py150.varmisuse/train']
eval_dirs=['/mnt/XiaoweiGuo/data/py150.varmisuse/eval']
dev_dirs=['/mnt/XiaoweiGuo/data/py150.varmisuse/dev']

duplicate_summary(train_dirs,'train')
duplicate_summary(eval_dirs,'eval')
duplicate_summary(dev_dirs,'dev')

cross_duplicate(eval_dirs,train_dirs,'train-eval')
cross_duplicate(train_dirs,dev_dirs,'train-dev')

# train_ids,train_examples=get_all_example_id(train_dirs)
# eval_ids,eval_examples=get_all_example_id(eval_dirs)


# train_ids_freq=get_example_id_freq(train_ids)
# eval_ids_freq=get_example_id_freq(eval_ids)        


# print(train_ids_freq)
# print(eval_ids_freq)

# train_unique_ids=set(train_ids)
# eval_unique_ids=set(eval_ids)

# six_example_in_train_number=6*train_ids_freq[6]
# six_example_in_eval_number=6*eval_ids_freq[6]

# train_total=sum([train_ids[e] for e in train_ids])
# eval_total=sum([eval_ids[e] for e in eval_ids])

# print(six_example_in_train_number/train_total)
# print(six_example_in_eval_number/eval_total)

# overlap_ids=train_unique_ids.intersection(eval_unique_ids)

# overlap_number=sum([eval_ids[e] for e in overlap_ids])

# print(overlap_number/eval_total)

# overlap_ids_list=list(overlap_ids)

# train_example=train_examples[overlap_ids_list[0]]
# eval_example=eval_examples[overlap_ids_list[0]]


# print(train_example.get_source())
# print(eval_example.get_source())