import os
import numpy as np
import json

from dataclasses import dataclass

from .info_enum import ScoreEnum
from .metadata import *
from .property_selector import default_property_selector,default_label_selector,default_extend_property_selector,author_label

def default_parse(json_data):
        def extract_freq_info(obj:dict):
                return FileFrequencyFeatureInfo(
                    comment_type=obj["CommentTypeFrequency"],
                    word=obj["wordFrequency"],
                    word_number_of_line=obj["WordNumberOfLineFrequency"],
                    line_length=obj["LineLengthFrequency"],
                    keyword=obj["keywordFrequency"],
                    ast_leaves=obj["ASTLeavesFrequency"],
                    identifier_length=obj["IndentifierLengthFrequency"],
                    access_control=obj["AccessControlFrequency"]
                )
                
        def extract_scalar_info(obj:dict):
            return FileScalarFeatureInfo(
                new_usage_number=obj["NewUsageNumber"],
                old_usage_number=obj["OldUseageNumber"],
                safety_usage_number=obj["SafetyUsageNumber"],
                comment_number=obj["CommentNumber"],
                word_number=obj["WordNumber"],
                ternary_operator_number=obj["TernaryOperatorNumber"],
                control_struct_number=obj["ControlStructNumber"],
                literal_number=obj["LiteralNumber"],
                blank_line_number=obj["BlankLineNumberNumber"],
                tab_indent_number=obj["TabIndentNumber"],
                new_line_before_open_brance_number=obj["NewLineBeforeOpenBranceNumber"],
                on_line_before_open_brance_number=obj["OnLineBeforeOpenBranceNumber"]
            )
            
        def extract_file_info(obj:dict):
            file_features=FileFeatureInfo(
                extract_scalar_info(obj["CodeFeatures"])
                ,extract_freq_info(obj["CodeFeatures"]))
            
            return FileInfo(
                file_name=obj["FileName"],
                file_path=obj["FilePath"],
                file_length=obj["FileLength"],
                file_line_number=obj["FileLineNumber"],
                file_features=file_features
                )
            
        def extract_file_info_for_cpp(obj:dict):
            file_features=FileFeatureInfo(frequencys=extract_freq_info_for_cpp(obj["FunctionInfo"]))
            
            return FileInfo(
                file_name=obj["PersonPath"],
                file_path=obj["PersonPath"],
                file_length=100,
                file_line_number=100,
                file_features=file_features
                )
            
        def extract_scalars_info_for_cpp(obj:dict):
            pass

        def extract_freq_info_for_cpp(obj:dict):
            obj=obj[0]
            if  obj is None or "wordTF" not in obj:
                return FileFrequencyFeatureInfo()
            return FileFrequencyFeatureInfo(
                #comment_type={k:obj["CommentKind"][k]*100 for k in obj["CommentKind"]},
                word={k:obj["wordTF"][k]*100 if obj["wordTF"][k]!=None else 0 for k in obj["wordTF"]}
            )
            
        obj=json.loads(json_data)
        #print(type(obj))
        extract_handle=extract_file_info
        person_info=None
        if isinstance(obj,list):
            extract_handle=extract_file_info_for_cpp
            if len(obj)==0: return None
            person_info=PersonInfo(obj[0]["PersonName"],obj[0]["PersonPath"])
            file_infos=[extract_handle(e) for e in obj]
        else:
            person_info=PersonInfo(obj["PersonName"],obj["PersonPath"])
            file_infos=[extract_handle(e) for e in obj["FileFeatures"]]




        return PersonSample(person_info,file_infos)

def parse_cpp(json_data):
    def extract_freq_info(obj:dict):
        return FileFrequencyFeatureInfo(
            comment_type=obj["CommentTypeNumber"],
            word=obj["wordTypeNumber"],
            word_number_of_line=obj["WordNumberOfLineFrequency"],
            line_length=obj["LineLengthDict"],
            keyword=obj["keywordFrequency"],
            #ast_leaves=obj["ASTLeavesFrequency"],#这个提取不出来
            identifier_length=obj["IndentifierLengthFrequency"],
            access_control=obj["AccessControlFrequency"],
            function_length=obj["FunctionLength"],
            function_parameter=obj["Parameters"],
            )
                
    def extract_scalar_info(obj:dict):
        return FileScalarFeatureInfo(
            new_usage_number=obj["NewUsageNumber"],
            old_usage_number=obj["OldUsageNumber"],
            safety_usage_number=obj["SafetyUsageNumber"],
            comment_number=obj["CommentNumber"],
            word_number=obj["TokenNumber"],
            ternary_operator_number=obj["TernaryOperatorNumber"],
            control_struct_number=obj["ControlStructNumber"],
            literal_number=obj["LiteralSumNumber"],
            blank_line_number=obj["BlankLineNumberNumber"],
            tab_indent_number=obj["TabIndentNumber"],
            new_line_before_open_brance_number=obj["NewLineBeforeOpenBranceNumber"],
            on_line_before_open_brance_number=obj["OnLineBeforeOpenBranceNumber"],
            english_number=obj["EnglishNumber"],
            camel_case_number=obj["cammelIdentifierNumber"],
            snake_ase_number=obj["underScoreIdentifierNumber"],
            function_number=obj["FunctionNumber"],
            tab_number=obj["tab_num"],
            white_space_number=obj["white_space_num"],
            white_space_indent_line_number=obj["SpaceIndentNumber"],
            anonymous_function_number=obj["anonymousFunctionNumber"],
            try_catch_block_number=obj["abnormalNumber"]
            )
            
            
    def extract_file_info(obj:dict):
        try:
            file_features=FileFeatureInfo(
            extract_scalar_info(obj["FileFeatures"])
            ,extract_freq_info(obj["FileFeatures"])
            ) 
            return FileInfo(
                file_name=obj["PersonName"],
                file_path=obj["PersonPath"],
                file_length=obj["FileFeatures"]["FileLength"],  #更正为从json文件提取
                file_line_number=obj["FileFeatures"]["FileLineNumber"],  #更正为从json文件提取
                file_features=file_features
                )
        except Exception as e:
            print(f"extract happend error: {e}")
            return None

    obj=json.loads(json_data)
    person_info=None
    extract_handle=extract_file_info
    if len(obj)==0: return None
    person_info=PersonInfo(obj[0]["PersonName"],obj[0]["PersonPath"])
    file_infos=[extract_handle(e) for e in obj]
    file_infos=[e for e in file_infos if e]
    return PersonSample(person_info,file_infos)

language_mapping={
    'cpp':parse_cpp,
    'java':default_parse,
    'c':default_parse,
    'csharp':default_parse,
    'python':default_parse,
    'mix':default_parse
}

def person_extend(person_sample:PersonSample,dict_obj:dict):
    def extract_social_info(questionaire:QuestionnaireFeedback):
        religious_belief=questionaire.person_info_answers[0]
        sex=questionaire.person_info_answers[1]
        ranking=questionaire.person_info_answers[3]
        birth_date_str=questionaire.person_info_answers[4]
        birth_date=datetime.strptime(birth_date_str,"%Y/%m/%d")
        in_love_time=questionaire.person_info_answers[5]
        birth_province=questionaire.person_info_answers[6]
        exp_province=questionaire.person_info_answers[7]
        cet6=questionaire.person_info_answers[8]
        
        return PersonSocialInfo(
            birth_date=birth_date,
            sex=PNEnum.from_digit_str(sex),
            birth_province=get_province_id(birth_province),
            exp_province=get_province_id(exp_province),
            religious_belief=PNEnum.from_digit_str(religious_belief),
            in_love_time=ChoiceEnum.from_digit_str(in_love_time),
            ranking=ChoiceEnum.from_digit_str(ranking),
            cet6=ScoreEnum.from_digit_str(cet6)
        )
    def extract_coding_info(questionaire:QuestionnaireFeedback):
        return PersonCodingInfo()
    def extract_pysco_info(questionaire:QuestionnaireFeedback):
        psyco=questionaire.psy_info_answers
        
        freshness,responsibility,optimistic,kindness,emotional=map(lambda x:ScoreEnum.from_digit_str(x),psyco[5:10]) 
        
        return PersonPsycoInfo(
            freshness=freshness,
            responsibility=responsibility,
            optimistic=optimistic,
            kindness=kindness,
            emotional=emotional
        )

    questionaire=QuestionnaireFeedback(**dict_obj)
    social_info=extract_social_info(questionaire)
    coding_info=extract_coding_info(questionaire)
    psyco_info=extract_pysco_info(questionaire)
    
    return PersonExtendSample(person_sample,social_info,psyco_info,coding_info)


def person(path,lang)->PersonSample:
    with open(path,'r',encoding='utf-8') as f:
        instance=language_mapping.get(lang,None)(f.read(-1))
        
        if instance is None:
            return None
        
        if instance.file_infos is None:
            return None

        if len(instance.file_infos)==0:
            return None

        return instance

def is_exclude_author(person:PersonSample):
    if person is None: return True
    inlegal_word=["Demo"]
    for e in inlegal_word:
        if e in person.person_info.person_name:
            return True
    return False

def create_person_from_dataset(dir,lang='java',suffix=['.json']):
    def suffix_check(file_name):
        _,ext=os.path.splitext(file_name)
        return ext in suffix
    
    files=os.listdir(dir)
    for i,f in enumerate(files):
        if not suffix_check(f):
            continue
        path=os.path.join(dir,f)
        
        instance=person(path,lang)
        
        if not is_exclude_author(instance):
            yield instance

def create_person_extends_from_dataset(source_dataset_dirs:list[str],author_extend_data_paths:list[str],lang="java",suffix=['.json']):
    if isinstance(source_dataset_dirs,str):
        source_dataset_dirs=[source_dataset_dirs]
        
    authors=[]
    for source_dataset_dir in source_dataset_dirs:
        authors+=list(create_person_from_dataset(source_dataset_dir,lang,suffix))
        
    author2feature={}
    for author in authors:
        person_name=author.person_info.person_name
        if person_name not in authors:
            author2feature[person_name]=author
        else:
            author2feature[person_name].file_infos+=author.file_infos
    
    
    
    author_extend_infos={}
    for path in author_extend_data_paths:
        with open(path,'r',encoding='utf-8') as f:
            content=f.read(-1)
            author_extend_infos.update(json.loads(content))
    
    
    for author in author_extend_infos:
        person_sample=author2feature.get(author)
        if person_sample:
            yield person_extend(person_sample,author_extend_infos[author])

def make_author_attribution_dataset(source_dataset_dirs:list[str],lang='java',suffix=['.json'],min_number=5,property_selector=default_property_selector,extend_property_selector=default_extend_property_selector):
    if isinstance(source_dataset_dirs,str):
        source_dataset_dirs=[source_dataset_dirs]
    
    persons=[]
    for source_dataset_dir in source_dataset_dirs:
        persons+=list(create_person_from_dataset(source_dataset_dir,lang,suffix))
        
    x=[] #data
    y=[] #label
    
    
    x_extend=[] #不参与分类任务，作为x的解释 
    for i,person in enumerate(persons):
        if len(person.file_infos)<5:
            continue
        for file_info in person.file_infos:
            data=property_selector(file_info)
            detail=extend_property_selector(file_info,person)
                
            person_label=author_label(person)
            
            x.append(data)
            y.append(person_label)
            x_extend.append(detail)
            
    return np.array(x,dtype=np.float32),np.array(y,dtype=np.float32),x_extend

def make_author_perspertive_dataset(source_dataset_dirs:list[str],author_extend_data_paths:list[str],lang="java",suffix=['.json'],min_number=5,property_selector=default_property_selector,label_selector=default_label_selector,extend_property_selector=default_extend_property_selector):
    person_extends=create_person_extends_from_dataset(source_dataset_dirs,author_extend_data_paths,lang,suffix)
    
    x=[]
    y=[]
    x_extend=[]
    
    for i,person_extend in enumerate(person_extends):
        if len(person_extend.person_sample.file_infos)<min_number:
            continue
        for file_info in person_extend.person_sample.file_infos:
            data=property_selector(file_info)
            detail=extend_property_selector(file_info,person_extend)
            label=label_selector(person_extend)
            
            x.append(data)
            y.append(label)
            x_extend.append(detail)
            
    return np.array(x,dtype=np.float32),np.array(y,dtype=np.float32),x_extend

def make_dataset_summary(source_dataset_dirs:list[str],author_extend_data_paths:list[str]=[],lang='java',min_number=5,property_selector=default_property_selector):
    source_file_number=0
    valid_source_file_number=0

    persons={}
    for e in source_dataset_dirs:
        buffer=list(create_person_from_dataset(e,lang))
        for p in buffer:
            if p.person_info.person_name not in persons:
                persons[p.person_info.person_name]=p
            else:
                persons[p.person_info.person_name].file_infos+=p.file_infos
    
    feature_vector_sample = ""
    for e in persons:
        for f in persons[e].file_infos:
            if feature_vector_sample == "":
                feature_vector_sample=str(property_selector(f))
                break
    
    source_file_number=sum([len(persons[p].file_infos) for p in persons])
    valid_source_file_number=sum([len(persons[p].file_infos) for p in persons if len(persons[p].file_infos)>=min_number])
    author_number=len(persons)
    valid_author_number=sum([1 for p in persons  if len(persons[p].file_infos)>=min_number])


    link_author_number=0
    link_source_number=0

    valid_link_author_number=0
    valid_link_source_number=0

    if len(author_extend_data_paths)>0:
        link_persons=list(create_person_extends_from_dataset(source_dataset_dirs,author_extend_data_paths,lang))
        link_author_number=len(link_persons)
        link_source_number=sum([len(p.person_sample.file_infos) for p in link_persons])
        valid_link_source_number=sum([len(p.person_sample.file_infos) for p in link_persons  if len(p.person_sample.file_infos)>=min_number])
        valid_link_author_number=sum([1 for p in link_persons  if len(p.person_sample.file_infos)>=min_number])

    return DatasetProperty(author_number=author_number,link_author_number=link_author_number,
    link_source_file_number=link_source_number,feature_vector_example=feature_vector_sample,source_file_number=source_file_number,
    valid_link_author_number=valid_link_author_number,
    valid_author_number=valid_author_number,
    valid_link_source_file_number=valid_link_source_number,
    valid_source_file_number=valid_source_file_number)


@dataclass
class DatasetProperty:
    author_number:int=0
    link_author_number:int=0
    valid_author_number:int=0
    valid_link_author_number:int=0
    source_file_number:int=0
    valid_source_file_number:int=0
    valid_link_source_file_number:int=0
    link_source_file_number:int=0
    feature_vector_example:str=""