# conda activate OpenLabCluster
# python -m lilab.OpenLabCluster_train.a3_semiseqseq_init_train_seqseq DIR_semiseq2seq --epoch 3
import numpy as np
import torch
import torch.nn as nn
import os
import os.path as osp
from torch.utils.data import SubsetRandomSampler
import tqdm
from openlabcluster.training_utils.ssl.SeqModel import SemiSeq2Seq, seq2seq
from openlabcluster.training_utils.ssl.data_loader import SupDataset, pad_collate_iter
from openlabcluster.utils import auxiliaryfunctions
from torch import optim
from openlabcluster.training_utils.ssl.utilities import load_model
from openlabcluster.training_utils.ssl.seq_train import training
from pathlib import Path
import sys
import argparse
# project = '/DATA/taoxianming/rat/data/Mix_analysis/SexAgeDay55andzzcWTinAUT_MMFF/result32/olc-iter1-2024-05-23'

parser = argparse.ArgumentParser()
parser.add_argument('dir_semiseq2seq', type=str)
parser.add_argument('--epoch', type=int, default=3)
args = parser.parse_args()

project = args.dir_semiseq2seq
epoch = args.epoch #训练2-3轮比较合适
assert osp.exists(project), 'project {} not exists'.format(project)
model_path = None


class FAKE: pass
cfg = osp.join(project, 'config.yaml')
cfg_data = auxiliaryfunctions.read_config(cfg)
self = FAKE()
self.cfg = cfg
self.model_name = model_path
self.cfg_data = cfg_data

num_class = self.cfg_data['num_class'][0]
root_path = self.cfg_data["project_path"]
batch_size = self.cfg_data['batch_size']
feature_length = self.cfg_data['feature_length']
hidden_size = self.cfg_data['hidden_size']
cla_dim = self.cfg_data['cla_dim']
en_num_layers = self.cfg_data['en_num_layers']
de_num_layers = self.cfg_data['de_num_layers']
cla_num_layers = self.cfg_data['cla_num_layers']
fix_state = self.cfg_data['fix_state']
fix_weight = self.cfg_data['fix_weight']
teacher_force = self.cfg_data['teacher_force']
device = 'cuda:0'

label_path = os.path.join(self.cfg_data['label_path'],'label.npy')
if not os.path.exists(label_path):
    label_path = None
dataset_traintest = SupDataset(root_path, self.cfg_data['data_path'], self.cfg_data['train'] , label_path)
nsample_traintest = len(dataset_traintest)
nsample_train = int(nsample_traintest * 0.9)


#random sample nsample_train from dataset_traintest
indices_shuffle =np.arange(nsample_traintest)
random_seed = 11111
np.random.seed(random_seed)
np.random.shuffle(indices_shuffle)
indices_train = indices_shuffle[:nsample_train]
indices_test = indices_shuffle[nsample_train:]


# seperate train and validation
train_sampler = SubsetRandomSampler(indices_train)
train_loader = torch.utils.data.DataLoader(dataset_traintest, batch_size=batch_size,
                                            sampler=train_sampler, collate_fn=pad_collate_iter)
test_sampler = SubsetRandomSampler(indices_test)
test_loader = torch.utils.data.DataLoader(dataset_traintest, batch_size=batch_size,
                                            sampler=test_sampler, collate_fn=pad_collate_iter)

print("training data length: %d, train_loader: %d" % (len(indices_train), len(train_loader)))
print("testing data length: %d, test_loader: %d" % (len(indices_test), len(test_loader)))



phase = 'PC'
fix_weight = True

if fix_weight:
    network = 'FW' + phase

if fix_state:
    network = 'FS' + phase

if not fix_state and not fix_weight:
    network = 'O' + phase

# hyperparameter
learning_rate = self.cfg_data['learning_rate']
# epoch = self.cfg_data["su_epoch"]

model:nn.Module = seq2seq(feature_length, hidden_size, feature_length, batch_size,
                        en_num_layers, de_num_layers, fix_state, fix_weight, teacher_force, device).to(device)
with torch.no_grad():
    for child in list(model.children()):
        print(child)
        for param in list(child.parameters()):
            if param.dim() == 2:
                # nn.init.xavier_uniform_(param)
                nn.init.uniform_(param, a=-0.05, b=0.05)


k = 2  # top k accuracy
percentage = 1
few_knn = False
# global variable
cla_dim = self.cfg_data['cla_dim']  # 0 non labeled class
print('network fix state=', fix_state)

optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=learning_rate)
if self.model_name is not None and os.path.exists(self.model_name):
    print('Load a <seq2seq> model.')
    model, optimizer = load_model(self.model_name, model, optimizer, device)

criterion_seq = nn.L1Loss(reduction='none')
criterion_cla = nn.CrossEntropyLoss(reduction='sum')

alpha = 0

file_output = open(os.path.join(root_path, self.cfg_data['output_path'], '%sA%.2f_P%d_en%d_hid%d.txt' % (
    network, alpha, percentage * 100, en_num_layers, hidden_size)), 'w')
file_test_output = open(os.path.join(root_path, self.cfg_data['output_path'], '%sA%.2f_P%d_en%d_hid%d_test.txt' % (
    network, alpha, percentage * 100, en_num_layers, hidden_size)), 'w')
model_prefix = os.path.join(root_path, self.cfg_data['model_path'], '%sA%.2f_P%d_en%d_hid%d' % (
    network, alpha, percentage * 100, en_num_layers, hidden_size))
model_path = Path(model_prefix).parent
pre = Path(model_prefix).name
lambda1 = lambda ith_epoch: 0.95 ** (ith_epoch // 5)
model_scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda1)
past_loss = sys.float_info.max
self.train_loader = train_loader
self.hidden_size = hidden_size
self.num_class = num_class
self.alpha = alpha
self.few_knn = few_knn
self.device = device
print_every = 1
self.canvas = None


def testing(file_test_output):
    pred_label_l = []
    semi_label_l = []
    cla_loss_l = []
    seq_loss_l = []
    for data, seq_len, _, semi_label, _ in tqdm.tqdm(test_loader, ncols=100):
        input_tensor = data.to(device)
        semi_label = torch.tensor(semi_label, dtype=torch.long).to(device)
        with torch.no_grad():
            en_hi, de_out = model(input_tensor, seq_len)
            cla_pre = torch.Tensor([[0]])
            pred_label_l.extend(cla_pre.argmax(1).tolist())
            semi_label_l.extend((semi_label-1).tolist())
            label = semi_label
            cla_loss = torch.Tensor([0])

            mask = torch.zeros([len(seq_len), max(seq_len)]).to(device)
            for ith_batch in range(len(seq_len)):
                mask[ith_batch, 0:seq_len[ith_batch]] = 1
            mask = torch.sum(mask, 1)

            seq_loss = torch.sum(criterion_seq(de_out, input_tensor), 2)
            seq_loss = torch.mean(torch.sum(seq_loss, 1) / mask)
            cla_loss_l.append(cla_loss.item())
            seq_loss_l.append(seq_loss.item())

    seq = np.mean(seq_loss_l)
    cla = np.mean(cla_loss_l)

    pred_label_l = np.array(pred_label_l)
    semi_label_l = np.array(semi_label_l)

    file_test_output.write(f"{seq:.3f} {cla:.3f}\n")
    print(f"Test clas loss: {cla:.3f} seq_loss:{seq:.3f}")
    return None


for ith_epoch in range(epoch):
    past_loss, model_name = training(ith_epoch, epoch, train_loader, print_every, self.canvas,
            model, optimizer, criterion_seq, criterion_cla, alpha, k, file_output, past_loss,model_path, pre,
            hidden_size, model_prefix, num_class,
            few_knn, device)
    acc_test = testing(file_test_output)
    if model_name:
        auxiliaryfunctions.edit_config(self.cfg, {'tr_modelName':model_name, 'tr_modelType': 'seq2seq'})
    else:
        auxiliaryfunctions.edit_config(self.cfg, {'tr_modelType': 'seq2seq'})
    model_scheduler.step()
    file_output.flush()
    file_test_output.flush()
