#%%
import numpy as np
import torch
import torch.nn as nn
import h5py
import os
import os.path as osp
from torch.utils.data import DataLoader, TensorDataset, SubsetRandomSampler
import tqdm
from openlabcluster.training_utils.ssl.SeqModel import SemiSeq2Seq, seq2seq,DecoderRNN
from openlabcluster.training_utils.ssl.data_loader import SupDataset, pad_collate_iter
from openlabcluster.utils import auxiliaryfunctions
import lilab.OpenLabCluster_train.model
from torch import optim
from openlabcluster.training_utils.ssl.utilities import load_model
from openlabcluster.training_utils.ssl.seq_train import training
from pathlib import Path
import sys
import argparse

from lilab.OpenLabCluster_train.model import seq2seq, SemiSeq2Seq

#%%

project = '/DATA/taoxianming/rat/data/Mix_analysis/SexAgeDay55andzzcWTinAUT_MMFF/result32/olc-iter4-2024-05-27'
# model_path0 = '/DATA/taoxianming/rat/data/Mix_analysis/SexAgeDay55andzzcWTinAUT_MMFF/result32/olc-iter1-2024-05-23/models/OPCA0.00_P100_en3_hid30_epoch14'
model_path0 = '/DATA/taoxianming/rat/data/Mix_analysis/SexAgeDay55andzzcWTinAUT_MMFF/result32/olc-iter1-2024-05-23/models/FWPCA0.00_P100_en3_hid30_epoch2' 

class FAKE: pass
cfg = osp.join(project, 'config.yaml')
cfg_data = auxiliaryfunctions.read_config(cfg)
self = FAKE()
self.cfg = cfg
self.model_name = model_path0
self.cfg_data = cfg_data

num_class = self.cfg_data['num_class'][0]
root_path = self.cfg_data["project_path"]
batch_size = self.cfg_data['batch_size']
feature_length = self.cfg_data['feature_length']
hidden_size = self.cfg_data['hidden_size']
cla_dim = self.cfg_data['cla_dim']
en_num_layers = self.cfg_data['en_num_layers']
de_num_layers = self.cfg_data['de_num_layers']
cla_num_layers = self.cfg_data['cla_num_layers']
fix_state = self.cfg_data['fix_state']
fix_weight = self.cfg_data['fix_weight']
teacher_force = self.cfg_data['teacher_force']
device = 'cuda:0'



label_path = os.path.join(self.cfg_data['label_path'],'label.npy')
if not os.path.exists(label_path):
    label_path = None
dataset_traintest = SupDataset(root_path, self.cfg_data['data_path'], self.cfg_data['train'] , label_path)
nsample_traintest = len(dataset_traintest)
nsample_train = int(nsample_traintest * 0.9)

#random sample nsample_train from dataset_traintest
indices_shuffle =np.arange(nsample_traintest)
random_seed = 11111
np.random.seed(random_seed)
np.random.shuffle(indices_shuffle)
indices_train = indices_shuffle[:nsample_train]
indices_test = indices_shuffle[nsample_train:]


# seperate train and validation
train_sampler = SubsetRandomSampler(indices_train)
train_loader = torch.utils.data.DataLoader(dataset_traintest, batch_size=batch_size,
                                            sampler=train_sampler, collate_fn=pad_collate_iter)
test_sampler = SubsetRandomSampler(indices_test)
test_loader = torch.utils.data.DataLoader(dataset_traintest, batch_size=batch_size,
                                            sampler=test_sampler, collate_fn=pad_collate_iter)
databatch = next(iter(train_loader))[0]


model:nn.Module = SemiSeq2Seq(feature_length, hidden_size, feature_length, batch_size,
                                cla_dim, en_num_layers, de_num_layers, cla_num_layers, fix_state, fix_weight, teacher_force, device).to(device)
with torch.no_grad():
    for child in list(model.children()):
        print(child)
        for param in list(child.parameters()):
            if param.dim() == 2:
                # nn.init.xavier_uniform_(param)
                nn.init.uniform_(param, a=-0.05, b=0.05)

if model_path0 is not None:
    model_dict = torch.load(model_path0)
    print('load from seq2seq model')
    print(model.seq.load_state_dict(model_dict['model_state_dict']))

X = databatch.to(device)

inter, deout, pred, deout_seq, encoder_hidden = model.forward_test(X, [24]*64)

