# conda activate OpenLabCluster
# python -m lilab.OpenLabCluster_train.a5_export_latent_iter_repr $PROJECT_REPR $PROJECT_SEMISEQ2SEQ --epoch 3 --iter 0
from sklearn.decomposition import PCA
import pickle
import numpy as np

feat_latent_pca = PCA().fit_transform(np.random.random((66666, 60)))


import torch
import torch.nn as nn
import h5py
import os
import os.path as osp
from torch.utils.data import DataLoader, TensorDataset
import tqdm
# from openlabcluster.training_utils.ssl.SeqModel import SemiSeq2Seq
from lilab.OpenLabCluster_train.model import SemiSeq2Seq, SemiSeq2SeqLite
import umap
from lilab.openlabcluster_postprocess.s1a_clipNames_inplace_parse import parse_name
import matplotlib.pyplot as plt
import argparse
from lilab.openlabcluster_postprocess.s1_merge_3_file import get_assert_1_file
from lilab.OpenLabCluster_train.a1_mirror_mutual_filt_clippredpkl import factory_label_mirror_start0
from openlabcluster.utils import auxiliaryfunctions
import multiprocessing as mp


parser = argparse.ArgumentParser()
parser.add_argument("dir_representitive", type=str)
parser.add_argument("dir_semiseq2seq", type=str)
parser.add_argument("--iter", type=int, default=0)
parser.add_argument("--epoch", type=int, default=3)
parser.add_argument('--modeltype', type=str, default='SemiSeq2Seq')
args = parser.parse_args()

dir_representitive = args.dir_representitive
dir_semiseq2seq = project = args.dir_semiseq2seq
iteri = args.iter
epoch = args.epoch

if args.modeltype == 'SemiSeq2SeqLite':
    SemiSeq2Seq = SemiSeq2SeqLite

clippredpkl_repr = get_assert_1_file(osp.join(dir_representitive,'*.clippredpkl'))
clippreddata_repr = pickle.load(open(clippredpkl_repr,'rb'))
nK_mutual = clippreddata_repr['nK_mutual']
nK_mirror_half = clippreddata_repr['nK_mirror_half']
ind_rawclip = clippreddata_repr['ind_rawclip']
_, fun_label_mirror = factory_label_mirror_start0(nK_mutual, nK_mirror_half)


def load_model():
    c = auxiliaryfunctions.read_config(project+'/config.yaml')
    device = 'cuda:0'

    model:nn.Module = SemiSeq2Seq(c['feature_length'], c['hidden_size'], c['feature_length'], c['batch_size'],
                                c['cla_dim'], c['en_num_layers'], c['de_num_layers'], c['cla_num_layers'],
                                c['fix_state'], c['fix_weight'], c['teacher_force'], device).to(device)

    model_path = get_assert_1_file(dir_semiseq2seq + f'/models/FWPC*iter{iteri}*_P*_en*_hid*_epoch{epoch-1}')
    model_dict = torch.load(model_path)
    model.load_state_dict(model_dict['model_state_dict'])
    model.eval()
    return device, model

def create_dataLoader():
    clipNames_file = osp.join(project, 'videos/clipNames.txt')
    clipNames = [osp.basename(i.strip()) for i in open(clipNames_file,'r').readlines()]
    data_h5 = osp.join(project, 'datasets/data.h5')
    hf = h5py.File(data_h5,'r')
    label = np.array(hf['label'])
    data_rawfeature = np.array([np.array(hf[f'{i}'], dtype=np.float32) for i in range(len(label))])
    hf.close()
    dataset = TensorDataset(torch.from_numpy(data_rawfeature).float(), torch.from_numpy(label).long())
    dataLoader = DataLoader(dataset, batch_size=64, shuffle=False)
    return clipNames, label, dataLoader

device, model = load_model()
clipNames, label, dataLoader = create_dataLoader()

#%% model prediction
def model_prediction():
    label_pred =[]
    deout_seq_l = []
    encoder_l = []
    with torch.no_grad():
        for feat32_B, _ in tqdm.tqdm(dataLoader):
            feat32_B = feat32_B.to(device)
            
            inter, deout, pred, deout_seq, encoder_hidden = model.forward_test(feat32_B, [24]*len(feat32_B))
            label_pred_B = torch.argmax(pred, dim=1) + 1
            label_pred.extend(label_pred_B.cpu().numpy().ravel().tolist())
            deout_seq_l.append(deout_seq.cpu().numpy())
            encoder_l.append(encoder_hidden[0].cpu().numpy())

    deout_seq_l = np.concatenate(deout_seq_l, axis=0)
    deout_seq_l = deout_seq_l.reshape(deout_seq_l.shape[0], -1)
    encoder_l = np.concatenate(encoder_l, axis=0)
    encoder_l = encoder_l.reshape(encoder_l.shape[0], -1)

    label_pred = np.array(label_pred)
    return label_pred, deout_seq_l, encoder_l

label_pred, deout_seq_l, encoder_l = model_prediction()

#%% latent space save to new clippredpkl and calculate accurancy
def latent_save(useEnc=True):
    # import sklearn
    # import importlib
    # importlib.reload(sklearn)
    # from sklearn.decomposition import PCA
    pca = PCA()
    reducer = umap.UMAP(random_state=1000)
    if useEnc:
        # use encoder
        output_dir = osp.join(project, f'output/semisupervise-enc-iter{iteri}-epoch{epoch}')
        feat_latent = encoder_l
    else:
        # use decoder seqence
        output_dir = osp.join(project, f'output/semisupervise-decSeq-iter{iteri}-epoch{epoch}')
        feat_latent = deout_seq_l

    feat_latent_pca = pca.fit_transform(feat_latent)
    explained_var = pca.explained_variance_ratio_
    pc_n_90 = 12 #np.sum(explained_var.cumsum() < 0.9)
    print('Done PCA')
    feat_latent_pca = feat_latent_pca[:, :pc_n_90]
    embedding_d2 = reducer.fit_transform(feat_latent_pca)
    df_clipNames_newiter = parse_name(clipNames)
    print('Done UMAP')
    # save data
    assert label_pred.max() == nK_mutual + nK_mirror_half*2
    cluster_names = [s.split(') ')[-1] for s in clippreddata_repr['cluster_names']]
    clippreddata_newiter = {
        "ncluster": label_pred.max(),
        "embedding": feat_latent_pca,
        "embedding_d2": embedding_d2.astype(np.float64),
        "cluster_labels": label_pred,
        "cluster_names": cluster_names,
        "ntwin": 24,
        "clipNames": np.array(clipNames),
        'df_clipNames': df_clipNames_newiter,
        'nK_mutual': nK_mutual,
        'nK_mirror_half': nK_mirror_half
    }
    os.makedirs(output_dir, exist_ok=True)
    outdata_path = osp.join(output_dir, f'olc-2024-05-23-semiseq2seq-iter{iteri}-epoch{epoch}_pca{pc_n_90}.clippredpkl')
    pickle.dump(clippreddata_newiter, open(outdata_path, 'wb'))
    np.save(osp.join(output_dir, 'deout_seq.npy'), deout_seq_l)
    np.save(osp.join(output_dir, 'encoder.npy'), encoder_l)

    # calculate accuracy
    df_sort = df_clipNames_newiter.sort_values(by=['vnake', 'startFrame', 'isBlack'])
    ind_sort = df_sort.index.values
    new_label_sort = clippreddata_newiter['cluster_labels'][ind_sort] - 1 #start from 0
    new_label_W = new_label_sort[::2]
    new_label_B = new_label_sort[1::2]
    new_label_B_mirror = fun_label_mirror(new_label_B)
    repr_new_iter_perc = np.mean(new_label_W == new_label_B_mirror)
    print(f'mean mirror of {len(df_sort)} samples', repr_new_iter_perc)

    df_newiter_sub = df_clipNames_newiter.loc[ind_rawclip]
    # assert np.all(clippreddata_repr['df_clipNames']['startFrame'].values == df_newiter_sub['startFrame'].values) #训练与预测用同一批数据集时进行检查

    repr_acc = np.mean(clippreddata_newiter['cluster_labels'][ind_rawclip] == clippreddata_repr['cluster_labels'])
    print(f'acc class of {len(ind_rawclip)} representive samples', repr_acc)

    # plot figure
    plt.figure(figsize=(12, 10))
    plt.scatter(embedding_d2[::2, 0], embedding_d2[::2, 1], c=label_pred[::2], s=1, cmap='hsv') #
    plt.colorbar()
    plt.title((f'Repr. Proportion {repr_new_iter_perc*100:.2f} / {len(label_pred)}(all) \n'
               f'Accuracy traintest {np.sum((label_pred == label) & (label>0)) / np.sum(label>0) :.2f} / {np.sum(label>0)} (iter_pre) \n'
               f'Repr. Accuracy {repr_acc*100:.2f} / {len(ind_rawclip)} (original)'))
    plt.xticks([])
    plt.yticks([])
    plt.savefig(osp.join(output_dir, f'olc-2024-05-23-semiseq2seq-iter{iteri}-epoch{epoch}_pca{pc_n_90}.png'))

latent_save(False)
# latent_save(True)


#%% print result
print('All sample: ', len(label))
print('Labeled sample: ', np.sum(label>0))
print('Unlabeled sample: ', np.sum(label==0))
correct_N = np.sum((label_pred == label) & (label>0))
print('Correct labeled sample: ', correct_N)
print('Correct probability labeled sample: %.2f' % (correct_N / np.sum(label>0)))
