from copy import copy
from itertools import groupby
import json
import os
import io
import sys
import re
import shutil

import src.codeql_tools.config as config

target_path=r'D:\Code\SortByIDE\VSCode\CodeComplete\testdata\train__VARIABLE_MISUSE__SStuB.txt-00000-of-00300'
root_dir='data/py150ksource/train'
dataset_dirs=['/mnt/XiaoweiGuo/data/py150.varmisuse/train']
index_path='indexes.json'

def train_config():
    global dataset_dirs,index_path,root_dir
    dataset_dirs=['/mnt/XiaoweiGuo/data/py150.varmisuse/train']
    index_path="indexes_train.json"
    root_dir='data/py150ksource/train'
    
def dev_config():
    global dataset_dirs,index_path,root_dir
    dataset_dirs=['/mnt/XiaoweiGuo/data/py150.varmisuse/dev']    
    index_path="indexes_dev.json"
    root_dir='data/py150ksource/dev'
    
def eval_config():
    global dataset_dirs,index_path,root_dir
    dataset_dirs=['/mnt/XiaoweiGuo/data/py150.varmisuse/eval']
    index_path="indexes_eval.json"
    root_dir='data/py150ksource/eval'
    
def test_config():
    global dataset_dirs,index_path,root_dir
    dataset_dirs=['/mnt/XiaoweiGuo/data/py150.varmisuse/test']
    index_path="indexes_test.json"
    root_dir='data/py150ksource/test'
    
def default_config():
    global dataset_dirs,index_path,root_dir
    dataset_dirs=config.dataset_dir
    index_path=config.index_path
    root_dir=config.code_source
    
replace_word='auxx.py'
remove_name=['def __init__(']

MaxBatchNumber=500
BlockNumber=50

def keyword_protect(function_name:str,count):
    prefix="def __"
    suffix="__("    
    pattern="def __(.+?)__\("
    
    prefix_check_out = function_name.startswith(prefix)
    suffix_check_out = function_name.endswith(suffix)
    
    unique_name_pattern="def __{}__{}("
    
    if suffix_check_out and prefix_check_out:
        keyword=re.findall(pattern,function_name)[0]
        return unique_name_pattern.format(keyword,count)
    
    return None

def extract_protect(function_name:str):
    prefix="def __" 
    pattern="def __(.+?)__[0-9]*\("
    
    prefix_check_out = function_name.startswith(prefix)
    
    unique_name_pattern="def __{}__("
    
    if prefix_check_out:
        keywords=re.findall(pattern,function_name)
        if len(keywords)>0:
            return unique_name_pattern.format(keywords[0])
    
    return None

def keyword_common(function_name:str,count):
    pattern="def (.+?)\("
    common_word=["create","run","get","update","render","delete","save","main","setUp"]
    
    keyword=re.findall(pattern,function_name)
    if len(keyword)>0:
        keyword=keyword[0]
    else:
        return None
    unique_name_pattern="def {}__{}("
    
    if keyword in common_word:
        return unique_name_pattern.format(keyword,count)
    
    return None

def extract_common(function_name:str):
    pattern="def (.+?)__[0-9]+\("
    common_word=["create","run","get","update","render","delete","save","main","setUp"]
    
    keyword=re.findall(pattern,function_name)
    if len(keyword)>0:
        keyword=keyword[0]
    else:
        return None

    if keyword in common_word:
        return 'def {}('.format(keyword)

    return None

def unique_keyword(function_name:str,count):
    result = keyword_common(function_name,count)
    
    if result is None:
        result=keyword_protect(function_name,count)

    if result is None:
        return function_name

    return result

def extract_keyword(function_name:str):
    result = extract_common(function_name)
    
    if result is None:
        result=extract_protect(function_name)

    if result is None:
        return function_name

    return result

class Example:
    def __init__(self,json_data) -> None:
        #print(json_data)
        self.provenances=json_data["provenances"]
        self.filepath=self.provenances[0]["datasetProvenance"]["filepath"]
        self.tokens=json_data["source_tokens"]
        self.error_location=json_data["error_location"]
        self.repair_candidates=json_data["repair_candidates"]
        self.has_bug=json_data["has_bug"]
        self.repair_targets=json_data["repair_targets"]
        self.edges=json_data["edges"]
        
    def get_function_name(self,index_file=None):
        for token in self.tokens:
            if token.startswith("def "):
                #print(token)
                if index_file is None:
                    return token
                
                count=index_file.get_function_freq(token)
                return unique_keyword(token,count)
        
        return None
    
    def get_function_head(self):
        function_head=""
        
        start_flag=False
        end_flag=False
        
        end_index=0
        
        for i,token in enumerate(self.tokens):
            if token.startswith("def "):
                start_flag=True
                
            if token==":":
                end_flag=True
                
            if end_flag:
                end_index=i
                break
            
            if start_flag:
                function_head+=f'{token} '
                
        return function_head,end_index+1
    
    def get_origin_id(self):
        return f'{self.filepath}.{self.get_function_name()}'
    
    def get_repair_info(self):
        repair_info={}
        repair_info["has_bug"]=self.has_bug
        repair_info["repair_candidate"]=[self.tokens[e] if isinstance(e,int) else e for e in self.repair_candidates]
        repair_info["repair_target"]=[self.tokens[e] if isinstance(e,int) else e for e in self.repair_targets ]
    
        return repair_info
    
    def get_group_by_info(self):
        return Example.format_group_by_token(self.tokens)
    
    def get_validate_info(self):
        validate_array=[]
        error_key=self.tokens[self.error_location]
        for e in self.repair_candidates:
            if isinstance(e,int):
                validate_array.append(e)
                
        #validate_array+=self.repair_targets
        
        validate_dict=set()
        key_occur=0
        for e in validate_array:
            v=self.tokens[e]
            if v==error_key:
                key_occur+=1
            validate_dict.add(v)
            
        
        return validate_dict,(error_key,key_occur)
    
    def get_text(self,locations):
        return [self.tokens[e] if isinstance(e,int) else e for e in locations]
    
    def get_source(self,new_function_name=None,remove_decorator=False):

        return get_source(self.tokens,new_function_name,remove_decorator)
    
    def get_source_example(self):
        new_example=copy(self)
        new_example.tokens=get_tokens(self.get_source())
        
        return new_example
    
    def to_json(self):
        json_data={}
        json_data["source_tokens"]=self.tokens
        json_data["error_location"]=self.error_location
        json_data["repair_candidates"]=self.repair_candidates
        json_data["has_bug"]=self.has_bug
        json_data["repair_targets"]=self.repair_targets
        json_data["edges"]=self.edges
        json_data["provenances"]=self.provenances
        return json.dumps(json_data)

    def get_identify_name(self):
        return self.filepath+self.get_function_name()

    @staticmethod
    def format_group_by_token(tokens):
        index_of_tokens=range(len(tokens))
        index_of_tokens=sorted(index_of_tokens,key=lambda x:tokens[x],reverse=False)
        return {k:list(v) for k,v in groupby(index_of_tokens,lambda x:tokens[x])}
    
class IndexFile:
    def __init__(self) -> None:
        self.indexes={}
        self.group={}
        self.batch_numbers={}
        self.function_freq={}
        
    def register_group(self,identify_name):
        if identify_name not in self.group:
            self.group[identify_name]=0
        self.group[identify_name]+=1
    
        return self.group[identify_name]
    
    def get_group_number(self,identify_name):
        if identify_name not in self.group:
            return 0
        
        return self.group[identify_name]
    
    def get_function_freq(self,function_name):
        if function_name not in self.function_freq:
            return 0
        else:
            return self.function_freq[function_name]
        
    def add_function_freq(self,function_name):
        if function_name not in self.function_freq:
            self.function_freq[function_name]=0
            
        self.function_freq[function_name]+=1
        return self.function_freq[function_name]
    
    def register_function(self,group_id):
        if group_id not in self.batch_numbers:
            self.batch_numbers[group_id]=[0]
            
        if self.batch_numbers[group_id][-1]>MaxBatchNumber:
            self.batch_numbers[group_id].append(0)    
            
        self.batch_numbers[group_id][-1]+=1
        
        return len(self.batch_numbers[group_id]),self.batch_numbers[group_id][-1]
    
    def save_index(self,group_id,example_path,function_number,dataset_path,line_number):
        if group_id not in self.indexes:
            self.indexes[group_id]={}
        if example_path not in self.indexes[group_id]:
            self.indexes[group_id][example_path]=[]
        
        self.indexes[group_id][example_path].append((function_number,dataset_path,line_number))
        
    def save(self,path):
        
        dir=os.path.dirname(path)
        if not os.path.exists(dir):
            os.makedirs(dir)
            
        with open(path,'w') as f:
            json.dump(self.indexes,f,indent=4)

    def load(self,path):
        with open(path,'r') as f:
            self.indexes=json.load(f)

def load_as_json(path,limit=100000):
    with open(path,'r') as f:
        i=0
        lines=f.readlines(65536)
        end_flag=False
        while lines is not None and len(lines)>0:
            for json_data in lines:
                i+=1
                if i>limit:
                    end_flag=True
                    break
                yield json.loads(json_data)
            lines=f.readlines(65536)
            if end_flag:
                break

def count_line(path):
    with open(path,'r') as f:
        return len(f.readlines(-1))

        

def get_source(tokens,new_function_name=None,use_identify=False):
    def identify(token):
        if token in ['#INDENT#','#NEWLINE#']:
            return 1
        elif token.startswith('def '):
            return 0
        else:
            return -1  
    from io import StringIO
    buffer=StringIO()
    
    start=0
    active=False
    
    function_name_index=None
    
    if new_function_name:
        for i,token in enumerate(tokens):
            if token.startswith("def "):
                function_name_index=i
                break
        
    for i,token in enumerate(tokens):
        if use_identify:
            status=identify(token)
            if status==0:
                use_identify=False
            elif status==1:
                pass
            else:
                continue
            
        if function_name_index==i:
            token=new_function_name
            
        if token=="#INDENT#":
            start+=1
        elif token=='#UNINDENT#':
            start-=1
        elif token=='#NEWLINE#':
            buffer.write('\n')
            active=True
        else:
            start_indent="\t"*start
            if active:
                buffer.write(start_indent+token)       
                active=False
            else:
                buffer.write(" "+token)
    buffer.write('\n')
    s=buffer.getvalue()
    buffer.close()
    return s            

def get_tokens(source):
    def in_value_region(c,identify):
        if identify is None:
            return c in value_region
        if c==identify:
            return False
        else:
            return True
    result=[]
    
    value_region=['"',"'"]
    cut_word=[" "]
    now_region=0
    start_identify=None
    
    word=""
    for c in source:
        if in_value_region(c,start_identify):
            if start_identify is None:
                start_identify=c
                
            else:
                start_identify=None
                
        if c not in cut_word:
            word+=c
        else:
            result.append(word)
            word=""

            
    base=source.split()
    return base

def make_dir(path):
    owner_dir='{}/{}'.format(root_dir,os.path.dirname(path))
    _,filename=os.path.split(path)
    if not os.path.exists(owner_dir):
        os.makedirs(owner_dir)
    return owner_dir,filename

def get_data(train_example):
    provenances=train_example["provenances"]
    filepath=provenances[0]["datasetProvenance"]["filepath"]
    tokens=train_example["source_tokens"]
    return filepath,tokens

def extract_all_data(data,path_exists_flag=set()):
    for d in data:
        filepath,tokens=get_data(d)
        source=get_source(tokens,True)
        
        target_dir,_=make_dir(filepath)
        _,filename=os.path.split(filepath)
        
        if filename == 'aux.py':
            filename=replace_word
        
        
        user_path='{}/{}'.format(target_dir,filename)
        
        
        if user_path not in path_exists_flag:
            path_exists_flag.add(user_path)
            with open(user_path,'w',errors='ignore') as f:
                f.write(source+'\n')
            
        else:
            with open(user_path,'a',errors='ignore') as f:
                f.write(source+'\n')

def extract_data(example:Example,path_exists_flag):
    filepath=example.filepath
    source=example.get_source()
    
    target_dir,_=make_dir(filepath)
    _,filename=os.path.split(filepath)
    
    if filename == 'aux.py':
        filename=replace_word
    
    
    user_path='{}/{}'.format(target_dir,filename)
    
    
    if user_path not in path_exists_flag:
        path_exists_flag.add(user_path)
        with open(user_path,'w',errors='ignore') as f:
            f.write(source+'\n')
        
    else:
        with open(user_path,'a',errors='ignore') as f:
            f.write(source+'\n')

def test_source_and_tokens():
    examples=list(load_as_json(target_path,10))
    
    for example in examples:
        target_example=example
    
        obj=Example(target_example)
        new_obj=obj.get_source_example()
        
        print(obj.tokens)
        print(new_obj.tokens)

def test():
    examples=load_as_json(target_path,10)
    index_file=IndexFile()
    
    for i,example in enumerate(examples):
        obj=Example(example)
        print(obj.get_function_name())
        index_file.save_index(obj.filepath,target_path,i)

def extract_data_by_group(buffer,filename_construct_handle,dataset_path,start_line_number,index_file:IndexFile,block_id,max_examples=10000):
    
    data=buffer[:min(len(buffer),max_examples)]
    data=[Example(e) for e in data]
    #group_ids=[index_file.register_group(e.get_function_name()) for e in data]
    #function_numbers=[index_file.register_function(e.get_function_name(),filename_construct_handle(group_ids[i],batch_number)) for i,e in enumerate(data)]
    #sources=[]
    
    file_content={}
    for i,e in enumerate(data):
        function_freq=index_file.add_function_freq(e.get_function_name())
        group_id=index_file.register_group(e.get_function_name(index_file))
        batch_id,function_number=index_file.register_function(group_id)
        filename=filename_construct_handle(block_id,group_id,batch_id)
        source=e.get_source(e.get_function_name(index_file),remove_decorator=config.remove_decorator)
        if filename not in file_content:
            file_content[filename]=""
        
        file_content[filename]+=source+'\n'

        index_file.save_index(group_id,filename,function_number-1,dataset_path,start_line_number+i)
    
    for key in file_content:
        directory_name = os.path.dirname(key)
        if not os.path.exists(directory_name):
            os.makedirs(directory_name)
        with open(key,'a',errors='ignore') as f:
            content=file_content[key]
            f.write(content) 

    return len(data)

def main():
    def construct_filename(block_id,group_id,batch_number):
        return '{}/{}/{}/batch_{}.py'.format(root_dir,block_id,group_id,batch_number)

    shutil.rmtree(root_dir)
    os.makedirs(root_dir)

    filenames=[]
    for directory in dataset_dirs:
        buffer=os.listdir(directory)
        for e in buffer:
            filenames.append('{}/{}'.format(directory,e))
    
    index_file=IndexFile()
    max_examples=10000
    
    now_block=0
    
    
    for process,filename in enumerate(filenames):
        if process%BlockNumber == 0:
            now_block+=1
        start_line_number=0
        batch_number=0
        examples=list(load_as_json(filename,100000))
        total_number=len(examples)
        while len(examples)>0:
            start_line_number+=extract_data_by_group(examples,construct_filename,filename,start_line_number,index_file,now_block,max_examples)
            examples=examples[min(len(examples),max_examples):]
            batch_number+=1
            print("process line: {}/{}".format(start_line_number,total_number))

        print("Done File: {}. Process: {}/{}.".format(filename,process+1,len(filenames)))
        
    index_file.save(index_path)

def count_total_number():
    filenames=[]
    for directory in dataset_dirs:
        buffer=os.listdir(directory)
        for e in buffer:
            filenames.append('{}/{}'.format(directory,e))
    total=0
    for process,filename in enumerate(filenames):
        number=count_line(filename)
        total+=number
        print("adding number: {}".format(number))
    print("total line number: {}".format(total))
    
if __name__=="__main__":
    args_length=len(sys.argv)
    if args_length==1:
        main()
    elif args_length==2:
        if sys.argv[1]=="test":
            test_config()
            main()
        elif sys.argv[1]=="run":
            train_config()
            main()
        elif sys.argv[1]=="run_dev":
            dev_config()
            main()
        elif sys.argv[1]=="run_eval":
            eval_config()
            main()
        elif sys.argv[1]=="wintest":
            dataset_dirs=[r"D:\Code\SortByIDE\VSCode\CodeComplete\testdata"]
            
            main()
        else:
            print("input error.")
#test_source_and_tokens()