#%%
import pickle
import h5py
import numpy as np
import torch
import torch.nn as nn
import os
import os.path as osp
import tqdm
from openlabcluster.training_utils.ssl.data_loader import SupDataset, pad_collate_iter
from openlabcluster.utils import auxiliaryfunctions
from lilab.OpenLabCluster_train.model import SemiSeq2Seq, SemiSeq2SeqLite
from lilab.openlabcluster_postprocess.s1_merge_3_file import get_assert_1_file
from lilab.openlabcluster_postprocess.s1a_clipNames_inplace_parse import parse_name
import matplotlib.pyplot as plt
import argparse



project = '/DATA/taoxianming/rat/data/Mix_analysis/SexAgeDay55andzzcWTinAUT_MMFF/result32_k36/semiseq2seq_iter1/'
model_path = osp.join(project, 'models/FWPC_iter3A0.10_P100_en3_hid30_epoch4')
data_path_h5 = '/DATA/taoxianming/rat/data/Mix_analysis/SexAgeDay55andzzcWTinAUT_MMFF/result32_k36/data_far.h5'
clippredpkl_repr = get_assert_1_file(osp.join('/mnt/liying.cibr.ac.cn_Data_Temp/multiview_9/chenxf/00_BehaviorAnalysis-seq2seq/SexAge/Day55_Mix_analysis/SexAgeDay55andzzcWTinAUT_MMFF/result32/representitive_k36_filt_perc65', '*.clippredpkl'))
output_dir = osp.join(project, 'output/far_ns_recluster_k36')
output_clippredpkl = osp.join(output_dir, 'olc-2024-05-23-semiseq2seq-iter3-epoch5_pca12_farns.clippredpkl')

clipnames_txt = osp.join(osp.dirname(data_path_h5), 'clipNames_far.txt')
clipnames_l = np.loadtxt(clipnames_txt, dtype=str)
df_clipnames = parse_name(clipnames_l)

#%%
os.makedirs(output_dir, exist_ok=True)

if True:
    SemiSeq2Seq = SemiSeq2SeqLite

assert osp.isfile(model_path) and osp.isfile(data_path_h5)
with h5py.File(data_path_h5, 'r') as f:
    label = f['label'][:]
    assert np.all(label == 0)

np.save('label_temp.npy', np.arange(len(label)))
label_path = 'label_temp.npy'



class FAKE: pass
cfg = osp.join(project, 'config.yaml')
cfg_data = auxiliaryfunctions.read_config(cfg)
self = FAKE()
self.cfg = cfg
self.cfg_data = cfg_data

num_class = self.cfg_data['num_class'][0]
root_path = self.cfg_data["project_path"]
batch_size = self.cfg_data['batch_size']
feature_length = self.cfg_data['feature_length']
hidden_size = self.cfg_data['hidden_size']
cla_dim = self.cfg_data['cla_dim']
en_num_layers = self.cfg_data['en_num_layers']
de_num_layers = self.cfg_data['de_num_layers']
cla_num_layers = self.cfg_data['cla_num_layers']
fix_state = self.cfg_data['fix_state']
fix_weight = self.cfg_data['fix_weight']
teacher_force = self.cfg_data['teacher_force']
device = 'cuda:0'
assert self.cfg_data['tr_modelType'] == 'seq2seq'
self.model_name = model_path
assert osp.exists(self.model_name)
print('load model: %s' % self.model_name)


dataset_traintest = SupDataset(root_path, self.cfg_data['data_path'], data_path_h5, label_path)
traintest_loader = torch.utils.data.DataLoader(dataset_traintest, batch_size=batch_size)

model:nn.Module = SemiSeq2Seq(feature_length, hidden_size, feature_length, batch_size,
                                cla_dim, en_num_layers, de_num_layers, cla_num_layers, fix_state, fix_weight, teacher_force, device).to(device)
model_dict = torch.load(self.model_name)
model.load_state_dict(model_dict['model_state_dict'])

seq_len = dataset_traintest[0][0].shape[0]

def testing():
    pred_label_l = []
    semi_label_l = []
    for data, _, semi_label, _ in tqdm.tqdm(traintest_loader):
        input_tensor = data.to(device)
        semi_label = torch.tensor(semi_label, dtype=torch.long).to(device)
        with torch.no_grad():
            en_hi, de_out, cla_pre = model(input_tensor, [seq_len]*len(input_tensor))
            pred_label_l.extend(cla_pre.argmax(1).tolist())
            semi_label_l.extend((semi_label-1).tolist())

    pred_label_l = np.array(pred_label_l)
    semi_label_l = np.array(semi_label_l)
    return pred_label_l, semi_label_l

pred_label_l, semi_label_l = testing()
assert np.all(np.diff(semi_label_l)==1)

#%%
nsample = len(pred_label_l)
clippreddata_repr = pickle.load(open(clippredpkl_repr,'rb'))
cluster_names = [s.split(') ')[-1] for s in clippreddata_repr['cluster_names']]

plt.figure(figsize=(10,10))
plt.subplot(1,2,2)
u, c = np.unique(pred_label_l, return_counts=True)
plt.barh(u, c)
for ui, ci in zip(u, c): #align center
    plt.text(ci*0.9+200, ui, f'{ci}', fontsize=8, ha='center', va='center')
plt.yticks(np.arange(len(cluster_names)), [f'[{i+1}] {c}' for i, c in enumerate(cluster_names)], fontsize=8)
plt.ylim(len(cluster_names), -1)
plt.title('predicted')
plt.savefig(osp.join(output_dir, 'histcount.jpg'))

#%%
clippreddata_newiter = {
    "ncluster": clippreddata_repr['ncluster'].max(),
    "embedding": np.zeros((nsample, clippreddata_repr['embedding'].shape[1]))+np.nan,
    "embedding_d2": np.zeros((nsample, clippreddata_repr['embedding_d2'].shape[1]))+np.nan,
    "cluster_labels": pred_label_l + 1, #start from 1
    "cluster_names": cluster_names,
    "ntwin": 24,
    "clipNames": clipnames_l,
    'df_clipNames': df_clipnames,
    'nK_mutual': clippreddata_repr['nK_mutual'],
    'nK_mirror_half': clippreddata_repr['nK_mirror_half']
}

pickle.dump(clippreddata_newiter, open(output_clippredpkl, 'wb'))
print('save to   ', output_clippredpkl)


from lilab.OpenLabCluster_train.a1_mirror_mutual_filt_clippredpkl import filt_mirror_df
df_clipnames['cluster_labels'] = clippreddata_newiter['cluster_labels']
filt_mirror_df(df_clipnames, clippreddata_repr['nK_mutual'], clippreddata_repr['nK_mirror_half'], start=1)

