import random
import numpy as np

from collections import Counter
from sklearn.ensemble import RandomForestClassifier
from src.feature_extract import default_property_selector,default_label_selector,author_attribution_specify_extend_property_selector,make_author_attribution_dataset,make_author_perspertive_dataset,sex_label



def train_valid_split(x,y,detail,random_seed=None):
    if random_seed:
        random.seed(random_seed)
        
    sample_number=len(x)
    train_ratio=0.8
    
    shuffle_indexes=random.sample(list(range(sample_number)),sample_number)
    
    train_number=int(sample_number*train_ratio)
    train_x=x[shuffle_indexes[:train_number],:]
    train_y=y[shuffle_indexes[:train_number],:] if len(np.shape(y))>1 else  y[shuffle_indexes[:train_number]]
    train_detail=[detail[i] for i in shuffle_indexes[:train_number]]  
    
    val_x=x[shuffle_indexes[train_number:],:]
    val_y=y[shuffle_indexes[train_number:],:] if len(np.shape(y))>1 else  y[shuffle_indexes[train_number:]]
    val_detail=[detail[i] for i in shuffle_indexes[train_number:]] 
    
    return (train_x,train_y,train_detail),(val_x,val_y,val_detail)

def data_balance(data:tuple,author_map,hash_func,group_number=3):
    
    x,y,detail=data
    
    author2x={}
    for i,e in enumerate(x):
        key=author_map(detail[i])
        if key not in author2x:
            author2x[key]=[]
        author2x[key].append(x[i])
        
    label2x={}
    for i,e in enumerate(x):
        key=hash_func(y[i])
        if key not in label2x:
            label2x[key]=[]
        label2x[key].append((detail[i],y[i],e))
        
    assert min(map(lambda x:len(label2x[x]),label2x))>=3,"数据平衡要求每一类型的个数至少为3"

    balance_x=[]
    balance_y=[]
    balance_detail=[]
    
    max_number=max(map(lambda x:len(label2x[x]),label2x))
    for label in label2x:
        data=[e for e in label2x[label]]
        while len(data)<max_number:
             author_detail,author_label,features=random.choice(data)
             author_data=author2x[author_map(author_detail)]
             author_group_number=min(len(author_data),group_number)
             data.append(
                 (author_detail,
                  author_label,
                 sum(random.sample(author_data,author_group_number))/author_group_number
                 )
             )
             
        balance_x+=[e for _,_,e in data]
        balance_y+=[e for _,e,_ in data]
        balance_detail+=[e for e,_,_ in data]

    return np.array(balance_x),np.array(balance_y),balance_detail

def data_reinforce(data:tuple,hash_func,group_number=3,random_seed=None):
    x,y,detail=data

    
    number=len(x)*group_number
    
    if random_seed:
        random.seed(random_seed)
    
    x_group={}
    for i,e in enumerate(x):
        key=hash_func(y[i])
        if key not in x_group:
            x_group[key]=[]
        x_group[key].append(e)
    
    min_number_of_group=min(map(lambda x:len(x_group[x]),x_group))
    group_number=min(min_number_of_group,group_number)
    
    reinfore_x=[]
    reinfore_y=[]
    
    
    for i in range(number):
        
        target_x=x[i%len(x)]
        target_y=y[i%len(y)]
        target_group=x_group[hash_func(target_y)]
        
        class_index=list(range(len(target_group)))
        
        sample_indexes=random.sample(class_index,group_number-1) if group_number>=2 else []
        
        sample_rows=list(map(lambda index:target_group[index],sample_indexes))
        sample_rows.append(target_x)
        
        reinfore_x.append(sum(sample_rows)/group_number)
        reinfore_y.append(target_y)
        
    return reinfore_x,reinfore_y

def run_author_attribution_task(source_dir):
    model=RandomForestClassifier(random_state=0)

    x,y,detail=make_author_attribution_dataset(source_dir,'java',property_selector=default_property_selector)

    model.fit(x,y)

    #print(x,y)
    print(f"train: {model.score(x,y)}")
    
def run_author_perspective_task(source_dir,extend_data_paths):
    
    def label_hash(y):
        result=0
        for e in y:
            result+=hash(e)
        return hash(result)
    
    model=RandomForestClassifier(random_state=0)
    x,y,detail=make_author_perspertive_dataset(source_dir,extend_data_paths,"cpp",label_selector=sex_label,extend_property_selector=author_attribution_specify_extend_property_selector)
    
    x,y,detail=data_balance((x,y,detail),lambda x:x[0],label_hash)
    
    train_data,vali_data=train_valid_split(x,y,detail,0)
    
    train_x,train_y=data_reinforce(train_data,label_hash,1,0)
    val_x,val_y=data_reinforce(vali_data,label_hash,1,0)
    
    train_label_counter=Counter()
    val_label_counter=Counter()
    for e in val_y:
        val_label_counter.update(e) 
    
    for e in train_y:
        train_label_counter.update(e)
    
    model.fit(train_x,train_y)
    
    print(f"train: {model.score(train_x,train_y)}")
    print(f"val: {model.score(val_x,val_y)}")
    
    print(f"train distribute: {train_label_counter}")
    print(f"val distribute: {val_label_counter}")
    
source_dir='data/cpp/experiment'
extend_data_paths=["data/psychology/psy.json"]

run_author_perspective_task(source_dir,extend_data_paths)