import random
import numpy as np

from sklearn.ensemble import RandomForestClassifier


from src.feature_extract import make_author_attribution_dataset,default_property_selector,default_extend_property_selector
from src.tasks.util import train_valid_split,log
from src.feature_extract.dataset_maker import make_dataset_summary

use_property_selector=default_property_selector
use_extend_property_selector=default_extend_property_selector

class AuthorAttributionModelAdapter:
    def __init__(self,train_handle,score_handle) -> None:
        self.train_handle=train_handle
        self.score_handle=score_handle

    def train(self,x,y):
        return self.train_handle(x,y)

    def score(self,x,y):
        return self.score_handle(x,y)

    @staticmethod
    def default_model():
        model=RandomForestClassifier(random_state=0)
        train_handle=lambda x,y:model.fit(x,y)
        score_handle=lambda x,y:model.score(x,y)

        return AuthorAttributionModelAdapter(train_handle,score_handle)

def author_attribution_data_reinforce(data,group_number=3,random_seed=None):
    def label_hash(_y):
        return _y[0]

    x,y,_=data

    number=len(x)*group_number
    
    if random_seed:
        random.seed(random_seed)
    
    x_group={}
    for i,e in enumerate(x):
        key=label_hash(y[i])
        if key not in x_group:
            x_group[key]=[]
        x_group[key].append(e)
    
    min_number_of_group=min(map(lambda x:len(x_group[x]),x_group))
    group_number=min(min_number_of_group,group_number)
    
    reinfore_x=[]
    reinfore_y=[]
    
    
    for i in range(number):
        
        target_x=x[i%len(x)]
        target_y=y[i%len(y)]
        target_group=x_group[label_hash(target_y)]
        
        class_index=list(range(len(target_group)))
        
        sample_indexes=random.sample(class_index,group_number-1) if group_number>=2 else []
        
        sample_rows=list(map(lambda index:target_group[index],sample_indexes))
        sample_rows.append(target_x)
        
        dev=np.max(sample_rows,0)-np.min(sample_rows,0)

        reinfore_x.append(sum(sample_rows)/group_number+dev*np.random.uniform(-0.5,0.5,size=dev.shape))
        reinfore_y.append(target_y)
        
    return reinfore_x,reinfore_y


def dataset_summary(source_dirs:list[str],lang,min_number=5):
    summary=make_dataset_summary(source_dirs,[],lang,min_number,use_property_selector)

    log(f"author/valid author: {summary.author_number}/{summary.valid_author_number}")
    log(f"source/valid source: {summary.source_file_number}/{summary.valid_source_file_number}")
    log(f"feature vector: {summary.feature_vector_example}")

def data_preprocess(source_dirs:list[str],lang,min_number,property_selector=None):
    if property_selector is None:
        property_selector=use_property_selector
    x,y,detail=make_author_attribution_dataset(source_dirs,lang,min_number=min_number,property_selector=property_selector,extend_property_selector=use_extend_property_selector)
    train,val=train_valid_split(x,y,detail)

    train_x,train_y=author_attribution_data_reinforce(train,1,0)
    val_x,val_y=author_attribution_data_reinforce(val,1,0)

    return (train_x,train_y),(val_x,val_y)

def run_model(x,y,model_adapter:AuthorAttributionModelAdapter):
    model_adapter.train(x,np.ravel(y))

def run_author_attribution_task(source_dirs:list[str],model_adapter:AuthorAttributionModelAdapter,lang:str,property_selector=None,min_number=5,enable_summary=True,task_name="author attribution"):

    if enable_summary:
        dataset_summary(source_dirs,lang,min_number)

    model=model_adapter

    (train_x,train_y),(val_x,val_y)=data_preprocess(source_dirs,lang,min_number,property_selector)

    run_model(train_x,train_y,model)

    log(f"{task_name} train/val: {model.score(train_x,train_y)}/{model.score(val_x,val_y)}")


