import random
import numpy as np

from sklearn.ensemble import RandomForestClassifier

from src.feature_extract.dataset_maker import make_author_perspertive_dataset,make_dataset_summary
from src.feature_extract.property_selector import default_extend_property_selector, default_property_selector,author_label
from src.tasks.util import train_valid_split,log
from src.feature_extract.metadata import PersonExtendSample

use_property_selector=default_property_selector
use_extend_property_selector=default_extend_property_selector

class AuthorPerspertiveModelAdapter:
    def __init__(self,train_handle,score_handle) -> None:
        self.train_handle=train_handle
        self.score_handle=score_handle

    def train(self,x,y):
        return self.train_handle(x,y)

    def score(self,x,y):
        return self.score_handle(x,y)

    @staticmethod
    def default_model():
        model=RandomForestClassifier(random_state=0)
        train_handle=lambda x,y:model.fit(x,y)
        score_handle=lambda x,y:model.score(x,y)

        return AuthorPerspertiveModelAdapter(train_handle,score_handle)

def author_perspertive_data_balance(data,hash_func,group_number=3,random_seed=None):
    if random_seed:
        random.seed(random_seed)
    
    x,y,_=data

    y,x_author=y[:,:-1],y[:,-1]

    author2x={}
    for i,e in enumerate(x):
        key=x_author[i]
        if key not in author2x:
            author2x[key]=[]
        author2x[key].append(x[i])
        
    label2x={}
    for i,e in enumerate(x):
        key=hash_func(y[i])
        if key not in label2x:
            label2x[key]=[]
        label2x[key].append((x_author[i],y[i],e))
        
    assert min(map(lambda x:len(label2x[x]),label2x))>=3,"数据平衡要求每一类型的个数至少为3"

    balance_x=[]
    balance_y=[]
    
    max_number=max(map(lambda x:len(label2x[x]),label2x))
    for label in label2x:
        data=[e for e in label2x[label]]
        while len(data)<max_number:
             author_detail,author_label,features=random.choice(data)
             author_data=author2x[author_detail]
             author_group_number=min(len(author_data),group_number)

             new_data=np.stack(random.sample(author_data,author_group_number))
             dev=np.max(new_data,axis=0)-np.min(new_data,axis=0)

             data.append(
                 (author_detail,
                  author_label,
                  np.sum(new_data,0)/author_group_number+np.random.uniform(-0.5,0.5,size=dev.shape)*dev
                 )
             )
             
        balance_x+=[e for _,_,e in data]
        balance_y+=[e for _,e,_ in data]

    return np.array(balance_x),np.array(balance_y)



# def data_reinfore(train_data,val_data):
#     def label_hash(y):
#         result=0
#         i=0
#         for e in y:
#             result+=256**(i)*e
#             i+=1
#         return result
    
#     train_x,train_y=author_perspertive_data_balance(train_data,label_hash,1,0)
#     val_x,val_y,_=val_data
#     val_y=val_y[:,:-1]

#     return (train_x,train_y),(val_x,val_y)

# def prepare_data(source_dirs:list[str],extend_data_paths:list[str],lang,label_selector):
#     def extend_label(person_sample:PersonExtendSample):
#         return label_selector(person_sample)+author_label(person_sample.person_sample)

#     x,y,detail=make_author_perspertive_dataset(source_dirs,extend_data_paths,lang,property_selector=use_property_selector,extend_property_selector=use_extend_property_selector,label_selector=extend_label)
#     train,val=train_valid_split(x,y,detail)


#     return train,val

def data_preprocess(source_dirs:list[str],extend_data_paths:list[str],lang,label_selector,min_number):
    def extend_label(person_sample:PersonExtendSample):
        return label_selector(person_sample)+author_label(person_sample.person_sample)

    def label_hash(y):
        result=0
        i=0
        for e in y:
            result+=256**(i)*e
            i+=1
        return result

    x,y,detail=make_author_perspertive_dataset(source_dirs,extend_data_paths,lang,min_number=min_number,property_selector=use_property_selector,extend_property_selector=use_extend_property_selector,label_selector=extend_label)
    train_data,val_data=train_valid_split(x,y,detail)

    
    
    train_x,train_y=author_perspertive_data_balance(train_data,label_hash,1,0)
    val_x,val_y,_=val_data
    val_y=val_y[:,:-1]

    return (train_x,train_y),(val_x,val_y)

def dataset_summary(source_dirs:list[str],extend_data_paths:list[str],lang,min_number=5):
    summary=make_dataset_summary(source_dirs,extend_data_paths,lang,min_number,use_property_selector)

    log(f"author/link author/valid author: {summary.author_number}/{summary.link_author_number}/{summary.valid_link_author_number}")
    log(f"source/link source/valid source: {summary.source_file_number}/{summary.link_source_file_number}/{summary.valid_link_source_file_number}")
    log(f"feature vector: {summary.feature_vector_example}")

    
def run_model(x,y,model_adapter:AuthorPerspertiveModelAdapter):
    model_adapter.train(x,np.ravel(y))


def run_author_perspertive_task(source_dirs:list[str],extend_data_paths:list[str],model_adapter:AuthorPerspertiveModelAdapter,lang:str,label_selector,min_number=5,enable_summary=True,enable_random_test=False,task_name="author perspertive"):

    if enable_summary:
        dataset_summary(source_dirs,extend_data_paths,lang,min_number)


    (train_x,train_y),(val_x,val_y)=data_preprocess(source_dirs,extend_data_paths,lang,label_selector,min_number)

    run_model(train_x,train_y,model_adapter)

    #print(x,y)
    if enable_random_test:
        log(f"{task_name} train/val/test: {model_adapter.score(train_x,train_y)}/{model_adapter.score(val_x,val_y)}/{model_adapter.score(np.random.random(np.shape(val_x)),val_y)}")
    else:
        log(f"{task_name} train/val: {model_adapter.score(train_x,train_y)}/{model_adapter.score(val_x,val_y)}")