import sys, os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import numpy as np
from protocol.decorators import edp_service, instant
from protocol.prelude import now
from ds import CBDataset, create_balanced_sampler, from_npz
import torch
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
DEVICE = torch.device('cuda')
SAVE_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'deep-models')
FIG_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'figs')
os.makedirs(SAVE_DIR, exist_ok=True)
os.makedirs(FIG_DIR, exist_ok=True)
from train import train
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from protocol.decorators import edp_service, instant

def split_indice(dt, indices):
    mask = np.zeros(len(dt), dtype=bool)
    mask[indices] = True
    selected = dt[mask]
    remaining = dt[~mask]
    return selected, remaining

def get_model(modelpath:str):
    import importlib.util
    spec = importlib.util.spec_from_file_location("model_module", modelpath)
    model_module = importlib.util.module_from_spec(spec)
    spec.loader.exec_module(model_module)
    return model_module.get_model()

@edp_service
class FeatureExtract:
    name = "feature extract"
    desc = ''
    category = "feature"

    tasks = [
        'go_train: (List<Str>,Str,Json)->(Str,Str)',
        # 'train_val_test_split_npz: List<Str>->(List<Str>,List<Str>,List<Str>)',
        'train_val_test_split_dt: (DSData, Float, Float)->(DSData,DSData,DSData)',
        'split_by_ratio_stim: (DSData, Int, Int)->(DSData,DSData,DSData)',
        'get_npz: Str->List<Str>',
        'data_dist: DSData->List<Int>',
        'bar_chart: List<Float>->Str',
        'ds_stim_dist: DSData->List<DSData>',
        'filter_stim: DSData->DSData',
        'filter_out_zstim: DSData->DSData',
    ]
    def go_train(self, inputs):
        npz = inputs[0]
        modelpath = inputs[1]
        conf = inputs[2]
        ds = CBDataset(npz).load()
        no_zstim = conf.get('no_zstim', False)
        if no_zstim:
            ds.data = self.filter_out_zstim(ds.data)
        train_dt, val_dt, test_dt = self.split_by_ratio_stim((ds.data, 50, 50))
        # 获取dropout参数，并传递给模型
        dropout = conf.get('dropout', 0.2)  # 默认值为0.2
        # model = LSTM(dropout=dropout).to(DEVICE)
        model = get_model(modelpath).to(DEVICE)
        epochs = conf.get('epochs', 500)
        lr = conf.get('lr', 0.001)
        batch_size = conf.get('batch_size', 8)
        weight_decay = conf.get('weight_decay', 1e-5)
        momentum = conf.get('momentum', 0.9)
        scheduler = conf.get('scheduler')
        min_lr = conf.get('min_lr')
        warmup_epochs = conf.get('warmup_epochs')
        patience = conf.get('patience')
        clip_grad_norm = conf.get('clip_grad_norm')
        zstim_number = conf.get('zstim_number', 1000)

        train_ds = CBDataset(data=train_dt).to_tensor(DEVICE)
        val_ds = CBDataset(data=val_dt).to_tensor(DEVICE)
        test_ds = CBDataset(data=test_dt).to_tensor(DEVICE)
        train_sampler = create_balanced_sampler(train_ds, zstim_number)
        train_loader = torch.utils.data.DataLoader(
            train_ds, 
            batch_size=batch_size, 
            sampler=train_sampler,
            shuffle=False
        )
        val_loader = torch.utils.data.DataLoader(val_ds, batch_size=batch_size, shuffle=False)
        test_loader = torch.utils.data.DataLoader(test_ds, batch_size=batch_size, shuffle=False)
        # optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay)
        optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
        if scheduler:
            optimizer.param_groups[0]["scheduler"] = scheduler
        if min_lr:
            optimizer.param_groups[0]["min_lr"] = min_lr
        if warmup_epochs:
            optimizer.param_groups[0]["warmup_epochs"] = warmup_epochs
        if clip_grad_norm:
            optimizer.param_groups[0]["clip_grad_norm"] = clip_grad_norm
        criterion = torch.nn.MSELoss()
        model_save_dir = os.path.join(SAVE_DIR, f'lstm-{now()}')
        os.makedirs(model_save_dir, exist_ok=True)
        fig_save_dir = os.path.join(FIG_DIR, f'lstm-{now()}')
        os.makedirs(fig_save_dir, exist_ok=True)
        print(f"train: {len(train_dt)}, val: {len(val_dt)}, test: {len(test_dt)}")
        _, _, train_info = train(model, DEVICE, train_loader, val_loader, test_loader, criterion, optimizer, epochs, patience, model_save_dir, fig_save_dir)
        print(train_info)
        return modelpath, model_save_dir, fig_save_dir
    def train_val_test_split_dt(self, inputs):
        dt = inputs[0]
        train_ratio = inputs[1]
        val_ratio = inputs[2]
        assert train_ratio + val_ratio < 1
        train_size = int(len(dt) * train_ratio)
        val_size = int(len(dt) * val_ratio)
        # shuffle
        indices = np.random.permutation(len(dt))
        dt = dt[indices]
        train_dt = dt[:train_size]
        val_dt = dt[train_size:train_size+val_size]
        test_dt = dt[train_size+val_size:]
        return train_dt, val_dt, test_dt
    def split_by_ratio_stim(self, inputs):
        dt = inputs[0]
        val_number = inputs[1]
        test_number = inputs[2]
        dts = self.ds_stim_dist(dt)
        train_dts = []
        val_dts = []
        test_dts = []
        for each in dts:
            if len(each) == 0:
                continue
            indices = np.random.choice(len(each), val_number+test_number, replace=False)
            tv, tr = split_indice(each, indices)
            t = tv[:val_number]
            v = tv[val_number:]
            train_dts.append(tr)
            val_dts.append(v)
            test_dts.append(t)
        return (np.vstack(x) for x in (train_dts, val_dts, test_dts))
    def get_npz(self, inputs):
        nd = inputs
        import glob
        return glob.glob(f'{nd}/**/*.npz')
    def data_dist(self, inputs):
        dt = inputs
        stims = np.zeros(7)
        for each in dt:
            stim = np.max(each[:5,3:])
            if stim == 0:
                stims[0] += 1
            elif stim == 60:
                stims[6] += 1
            else:
                stims[int(stim / 10) + 1] += 1
        return stims
    def bar_chart(self, inputs):
        data = inputs
        plt.figure(figsize=(10, 6))
        ax = sns.barplot(x=list(range(len(data))), y=data)
        for i, v in enumerate(data):
            ax.text(i, v, str(int(v)), ha='center', va='bottom')
        plt.title('data distribution')
        plt.xlabel('category')
        plt.ylabel('count')
        fig_path = os.path.join(FIG_DIR, f'barchart_{now()}.png')
        plt.savefig(fig_path)
        plt.close()
        return fig_path
    def ds_stim_dist(self, inputs):
        dt = inputs
        max_stim = np.max(dt[:,:5,3:], axis=(1,2))
        dt_0 = dt[max_stim == 0]
        dt_10 = dt[(max_stim > 0) & (max_stim <= 10)]
        dt_20 = dt[(max_stim > 10) & (max_stim <= 20)]
        dt_30 = dt[(max_stim > 20) & (max_stim <= 30)]
        dt_40 = dt[(max_stim > 30) & (max_stim <= 40)]
        dt_50 = dt[(max_stim > 40) & (max_stim <= 50)]
        dt_60 = dt[(max_stim > 50) & (max_stim <= 60)]
        dts = [dt_0, dt_10, dt_20, dt_30, dt_40, dt_50, dt_60]
        return dts
    def filter_out_zstim(self, inputs):
        dt = inputs
        dts = self.ds_stim_dist(dt)
        return np.vstack(dts[1:])
    def filter_stim(self, inputs):
        dt = inputs[0]
        numbers = inputs[1]
        dts = self.ds_stim_dist(dt)
        lens = [len(x) for x in dts]
        filtered = [min(x,y) for x,y in zip(lens, numbers)]
        for i, each in enumerate(dts):
            if len(each) > filtered[i]:
                # choice
                indices = np.random.choice(len(each), filtered[i], replace=False)
                dts[i] = each[indices]
        dt = np.vstack(dts)
        return dt

if __name__ == '__main__':
    FeatureExtract.app.run(port="12345", debug=True)
    # nd = r"C:\Users\songy\Desktop\data-0518"
    # modelpath = r"E:\25spring\FYP\pymodules\feature_extract\model_v3.py"
    # o = FeatureExtract()
    # npz = o.get_npz(nd)
    # # print(from_npz(npz[0]))
    # o.go_train((npz, modelpath, {
    #     'epochs': 500,
    #     'batch_size': 16,
    #     'lr': 0.0005,
    #     # 'weight_decay': 1e-4,
    #     # 'momentum': 0.95,
    #     # 'scheduler': 'cosine',
    #     'min_lr': 1e-7,
    #     # 'warmup_epochs': 20,
    #     'patience': 500,
    #     'zstim_number': 1500,
    #     'no_zstim': False,
    #     # 'clip_grad_norm': 1.0,
    #     # 'dropout': 0.25,
    # }))
