from __future__ import annotations

import json
import os

from codesecurity.data.package_extract import iter_dir
from codesecurity.tasks.code_perspective_project.objects import (
    Author, AuthorInfo, ProgramLanguage, Repository, RepositoryInfo, Source,
    SourceInfo)


class AuthorObjectBuilder:
    def __init__(self,path:str) -> None:
        self.path=path
        if self.path.endswith('/') or self.path.endswith('\\'):
            self.path=self.path[:-1]
    
    def get_author_name(self):
        author_name=os.path.split(self.path)[-1]        
        return author_name

    def get_all_repo_path(self):
        files=[os.path.join(self.path,e) for e in os.listdir(self.path)]
        dirs=[e for e in files if os.path.isdir(e)]

        return dirs
    
    def get_author_info(self):
        author_info_path=os.path.join(self.path,f'{self.get_author_name()}.json')
        if not os.path.exists(author_info_path): return None
        with open(author_info_path,'rb') as f:
            data=json.load(f)
            if not isinstance(data,dict): return None
            return AuthorInfo(**data)
    
    def get_all_repo_info(self):
        repos_info_path=os.path.join(self.path,Author.RepoMeta)
        if not os.path.exists(repos_info_path):
            repos_info_path=repos_info_path+'l'
            if not os.path.exists(repos_info_path):
                return None
        
        try:        
            with open(repos_info_path,'r',encoding='utf-8') as f:
                infos=[info for info in json.load(f) if isinstance(info,dict)]
                return [RepositoryInfo(**info) for info in infos]
        except:
            return None
        
    def build(self):
        author_info=self.get_author_info()
        all_repo_info=self.get_all_repo_info()
        
        if author_info is None or all_repo_info is None: return None
        
        repos=[]
        
        
        for repo_info in all_repo_info:
            repo_path=os.path.join(self.path,repo_info.name)
            if os.path.exists(repo_path):
                repo_builder=RepoObjectBuilder(repo_path,repo_info)
                repo_obj=repo_builder.build()
                repos.append(repo_obj)
        
        author_obj=Author(repos,author_info,self.path)
                
        return author_obj

        
class RepoObjectBuilder:
    def __init__(self,path:str,repo_info:RepositoryInfo) -> None:
        self.path=path
        self.repo_info=repo_info
        if self.path.endswith('/') or self.path.endswith('\\'):
            self.path=self.path[:-1]
        
        self.filter=('.c','.cpp','.java','.py','.js')
        
        
    def build(self):
        sources=[]
        for path in iter_dir(self.path):
            if not path.endswith(self.filter):
                continue
            
            if os.path.isfile(path):
                source_obj=self.build_source_obj(path)
                sources.append(source_obj)

        repo_obj=Repository(sources,self.repo_info,self.path)

        return repo_obj
        
    def build_source_obj(self,path):
        file_size=os.path.getsize(path)
        lang=ProgramLanguage.match(path)
        origin_path=path[self.path.find(self.repo_info.name):] 
        
        
        source_info=SourceInfo(origin_path=origin_path,size=file_size,lang=lang)
        return Source(path,source_info)

def read_author_set(path):
    authors=[os.path.join(path,e) for e in os.listdir(path)]
    
    author_objs=[]
    for e in authors:
        builder=AuthorObjectBuilder(e)
        
        author_obj=builder.build()
        if author_obj is not None:
            author_objs.append(author_obj)
    
    print(f"Loading Complete. Legal Author: {len(author_objs)}/{len(authors)}")
    
    return author_objs

def read_core_author_set(path,k=1000):
    def core_eval(author_builder:AuthorObjectBuilder):
        author_info=author_builder.get_author_info()
        repo_infos=author_builder.get_all_repo_info()
        if author_info is None or repo_infos is None: return 0
        
        score=0
        score+=10*author_info.followers
        
        for repo_info in repo_infos:
            if not repo_info.fork:
                score+=repo_info.watchers_count
                score+=2*repo_info.forks_count

        return score
    authors=[os.path.join(path,e) for e in os.listdir(path)]
    
    print(f"find {len(authors)} author in dataset.")
    
    i=0
    author_builders=[]
    high_score=0
    total_score=0
    for e in authors:
        i+=1
        builder=AuthorObjectBuilder(e)
        score=core_eval(builder)
        
        if score>high_score:
            high_score=score
        
        total_score+=score
        
        if i%int(len(authors)*0.1)==0:
            print(f'Potential core author number: {len(author_builders)}.\nProcess: {i}/{len(authors)}')
            print(f'Highest/Avg score:{high_score}/{total_score//i}')
        
        if score<500: continue
        author_builders.append([builder,score])
        
    author_builders.sort(key=lambda k:k[1],reverse=True)
    
    author_builders=author_builders[:min(k+20,len(author_builders))]
    
    core_authors=[builder.build() for builder,score in author_builders]
    core_authors=[e for e in core_authors if e]
    core_authors[:min(len(core_authors),k)]    
    
    print(f"Loading Complete. Find {len(author_builders)} core authors in {len(authors)} authors.")
    print(f'The top-{min(len(core_authors),k)} will be used.')
    
    return core_authors

def data_migration(authors:list[Author],new_dir):
    total_size=sum([e.size for e in authors])
    total_size_GB=total_size/(1024**3)
    total_size_GB=round(total_size_GB,4)
    print(f'migrate data size: {total_size_GB}GB')
    
    for author in authors:
        file_number=sum([len(e.sources) for e in author.repos])
        print(f'find {file_number} file and {len(author.repos)} repo in {author.author_info.login}.')
        author.migrate(new_dir)
        print(f'{author.author_info.login}: migrate {len(author.repos)} repo to {new_dir}.')
   
def group_by_language(authors:list[Author],langs:list[ProgramLanguage],new_dir):
    
    os.makedirs(new_dir,exist_ok=True)
    
    for lang in langs:
        lang_new_dir=os.path.join(new_dir,lang.value)
        filter_authors=[author.filter(lang) for author in authors]
        data_migration(filter_authors,lang_new_dir)
    
def make_dataset(authors:list[Author],new_dir):
    os.makedirs(new_dir,exist_ok=True)
    

    
    
    
# def extract(dataset_dir,filter):
#     pass