#%%
# conda activate OpenLabCluster
# python -m lilab.OpenLabCluster_train.a4_semiseqseq_train dir_semiseq2seq --epoch 3 --iter-now 0
import numpy as np
import torch
import torch.nn as nn
import os
import os.path as osp
from torch.utils.data import SubsetRandomSampler
import tqdm
# from openlabcluster.training_utils.ssl.SeqModel import SemiSeq2Seq
from openlabcluster.training_utils.ssl.data_loader import SupDataset, pad_collate_iter
from openlabcluster.utils import auxiliaryfunctions
from lilab.OpenLabCluster_train.model import SemiSeq2Seq, SemiSeq2SeqLite
from torch import optim
from openlabcluster.training_utils.ssl.utilities import load_model
from openlabcluster.training_utils.ssl.seq_train import training
from pathlib import Path
import sys
import argparse


parser = argparse.ArgumentParser()
parser.add_argument('dir_semiseq2seq', type=str)
parser.add_argument('--epoch', type=int, default=3)
parser.add_argument('--iter-now', type=int, default=0)
parser.add_argument('--modeltype', type=str, default='SemiSeq2Seq')
args = parser.parse_args()

project = args.dir_semiseq2seq
epoch = args.epoch #训练2-3轮比较合适
iteri = args.iter_now

if args.modeltype == 'SemiSeq2SeqLite':
    SemiSeq2Seq = SemiSeq2SeqLite

# model_path0 = '/DATA/taoxianming/rat/data/Mix_analysis/SexAgeDay55andzzcWTinAUT_MMFF/result32/olc-iter1-2024-05-23/models/OPCA0.00_P100_en3_hid30_epoch14'
# model_path0 = '/DATA/taoxianming/rat/data/Mix_analysis/SexAgeDay55andzzcWTinAUT_MMFF/result32/olc-iter1-2024-05-23/models/FWPCA0.00_P100_en3_hid30_epoch2' 
class FAKE: pass
cfg = osp.join(project, 'config.yaml')
cfg_data = auxiliaryfunctions.read_config(cfg)
self = FAKE()
self.cfg = cfg
self.cfg_data = cfg_data

num_class = self.cfg_data['num_class'][0]
root_path = self.cfg_data["project_path"]
batch_size = self.cfg_data['batch_size']
feature_length = self.cfg_data['feature_length']
hidden_size = self.cfg_data['hidden_size']
cla_dim = self.cfg_data['cla_dim']
en_num_layers = self.cfg_data['en_num_layers']
de_num_layers = self.cfg_data['de_num_layers']
cla_num_layers = self.cfg_data['cla_num_layers']
fix_state = self.cfg_data['fix_state']
fix_weight = self.cfg_data['fix_weight']
teacher_force = self.cfg_data['teacher_force']
device = 'cuda:0'
assert self.cfg_data['tr_modelType'] == 'seq2seq'
self.model_name = self.cfg_data['tr_modelName']
assert osp.exists(self.model_name)
print('load model: %s' % self.model_name)

if iteri==0:
    label_path = os.path.join(self.cfg_data['label_path'], 'label.npy')
    data_path_h5 = osp.join(self.cfg_data['data_path'], self.cfg_data['train'])
else:
    label_path = os.path.join(self.cfg_data['label_path'], f'label-iter{iteri}.npy')
    data_path_h5 = osp.join(self.cfg_data['data_path'], self.cfg_data['train'].replace('.h5', f'-iter{iteri}.h5'))
assert osp.exists(label_path) and osp.exists(data_path_h5)
dataset_traintest = SupDataset(root_path, self.cfg_data['data_path'], data_path_h5, label_path)
nsample_traintest = len(dataset_traintest)
nsample_train = int(nsample_traintest * 0.9)

#random sample nsample_train from dataset_traintest
indices_shuffle =np.arange(nsample_traintest)
random_seed = 11111
np.random.seed(random_seed)
np.random.shuffle(indices_shuffle)
indices_train = indices_shuffle[:nsample_train]
indices_test = indices_shuffle[nsample_train:]


# seperate train and validation
train_sampler = SubsetRandomSampler(indices_train)
train_loader = torch.utils.data.DataLoader(dataset_traintest, batch_size=batch_size,
                                            sampler=train_sampler, collate_fn=pad_collate_iter)
test_sampler = SubsetRandomSampler(indices_test)
test_loader = torch.utils.data.DataLoader(dataset_traintest, batch_size=batch_size,
                                            sampler=test_sampler, collate_fn=pad_collate_iter)

print("training data length: %d, train_loader: %d" % (len(indices_train), len(train_loader)))
print("testing data length: %d, test_loader: %d" % (len(indices_test), len(test_loader)))

phase = 'PC'
fix_weight = True

if fix_weight:
    network = 'FW' + phase + f'_iter{iteri}'

if fix_state:
    network = 'FS' + phase + f'_iter{iteri}'

if not fix_state and not fix_weight:
    network = 'O' + phase + f'_iter{iteri}'

# hyperparameter
learning_rate = self.cfg_data['learning_rate']

fix_weight = False
model:nn.Module = SemiSeq2Seq(feature_length, hidden_size, feature_length, batch_size,
                                cla_dim, en_num_layers, de_num_layers, cla_num_layers, fix_state, fix_weight, teacher_force, device).to(device)
with torch.no_grad():
    for child in list(model.children()):
        print(child)
        for param in list(child.parameters()):
            if param.dim() == 2:
                # nn.init.xavier_uniform_(param)
                nn.init.uniform_(param, a=-0.05, b=0.05)

if self.model_name is not None:
    model_dict = torch.load(self.model_name)
    if True:
        print('load from seq2seq model', self.model_name)
        model.seq.load_state_dict(model_dict['model_state_dict'])
    else:
        model.load_state_dict(model_dict['model_state_dict'])



k = 2  # top k accuracy
# for classification
percentage = 1
few_knn = False
# global variable
cla_dim = self.cfg_data['cla_dim']  # 0 non labeled class
print('network fix state=', fix_state)

optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=learning_rate)

if False:
    model, optimizer = load_model(self.model_name, model, optimizer, device)

criterion_seq = nn.L1Loss(reduction='none')
criterion_cla = nn.CrossEntropyLoss(reduction='sum')

alpha = 0.1

file_output = open(os.path.join(root_path, self.cfg_data['output_path'], '%sA%.2f_P%d_en%d_hid%d.txt' % (
    network, alpha, percentage * 100, en_num_layers, hidden_size)), 'w')
file_test_output = open(os.path.join(root_path, self.cfg_data['output_path'], '%sA%.2f_P%d_en%d_hid%d_test.txt' % (
    network, alpha, percentage * 100, en_num_layers, hidden_size)), 'w')
model_prefix = os.path.join(root_path, self.cfg_data['model_path'], '%sA%.2f_P%d_en%d_hid%d' % (
    network, alpha, percentage * 100, en_num_layers, hidden_size))
model_path = Path(model_prefix).parent
pre = Path(model_prefix).name
lambda1 = lambda ith_epoch: 0.95 ** (ith_epoch)
model_scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda1)
past_loss = sys.float_info.max
self.train_loader = train_loader
self.hidden_size = hidden_size
self.num_class = num_class
self.alpha = alpha
self.few_knn = few_knn
self.device = device
print_every = 1
self.canvas = None


def testing(file_test_output):
    pred_label_l = []
    semi_label_l = []
    cla_loss_l = []
    seq_loss_l = []
    for data, seq_len, _, semi_label, _ in tqdm.tqdm(test_loader):
        input_tensor = data.to(device)
        semi_label = torch.tensor(semi_label, dtype=torch.long).to(device)
        with torch.no_grad():
            en_hi, de_out, cla_pre = model(input_tensor, seq_len)
            pred_label_l.extend(cla_pre.argmax(1).tolist())
            semi_label_l.extend((semi_label-1).tolist())
            label = semi_label
            if sum(label != 0) != 0:
                cla_loss = criterion_cla(cla_pre[label != 0], label[label != 0] - 1)
            else:
                cla_loss = 0

            mask = torch.zeros([len(seq_len), max(seq_len)]).to(device)
            for ith_batch in range(len(seq_len)):
                mask[ith_batch, 0:seq_len[ith_batch]] = 1
            mask = torch.sum(mask, 1)

            seq_loss = torch.sum(criterion_seq(de_out, input_tensor), 2)
            seq_loss = torch.mean(torch.sum(seq_loss, 1) / mask)
            cla_loss_l.append(cla_loss.item())
            seq_loss_l.append(seq_loss.item())

    seq = np.mean(seq_loss_l)
    cla = np.mean(cla_loss_l)

    pred_label_l = np.array(pred_label_l)
    semi_label_l = np.array(semi_label_l)
    acc_test = np.sum(pred_label_l == semi_label_l) / np.sum(semi_label_l >=0)

    file_test_output.write(f"{seq:.3f} {cla:.3f} {acc_test:.3f}\n")
    print(f"Test clas loss: {cla:.3f} seq_loss:{seq:.3f} acc:{acc_test:.3f}")
    return acc_test



for ith_epoch in range(epoch):
    past_loss, model_name, self.acc = training(ith_epoch, epoch, train_loader, print_every, self.canvas,
            model, optimizer, criterion_seq, criterion_cla, alpha, k, file_output, past_loss,model_path, pre,
            hidden_size, model_prefix, num_class,
            few_knn, device)
    acc_test = testing(file_test_output)
    model_scheduler.step()
    file_output.flush()
    file_test_output.flush()
