from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import sys
if '../../../embeddings' not in sys.path:
    sys.path.append('../../../embeddings')

from seq2tensor import s2t
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import (
    Dense, Bidirectional, concatenate, multiply, LeakyReLU,
    Conv1D, MaxPooling1D, GlobalAveragePooling1D, Input, GRU
)
from tensorflow.keras.optimizers import Adam, RMSprop
from tensorflow.keras.callbacks import TensorBoard
from datetime import datetime

import os
os.environ['TF_GPU_ALLOCATOR'] = 'cuda_malloc_async'

gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
    try:
        for gpu in gpus:
            tf.config.experimental.set_memory_growth(gpu, True)
    except RuntimeError as e:
        print(e)

import numpy as np
from tqdm import tqdm
import yaml
import csv
import argparse

def parse_args():
    parser = argparse.ArgumentParser(description="Process some arguments.")
    parser.add_argument("--id2seq_file", type=str, default='../../../sun/preprocessed/SEQ-Supp-ABCD.tsv', help="Path to the id2seq file")
    parser.add_argument("--ds_file", type=str, default='../../../sun/preprocessed/Supp-AB.tsv', help="Path to the dataset file")
    parser.add_argument("--label_index", type=int, default=2, help="Index of the label in the dataset file. default: 2")
    parser.add_argument("--output", type=str, default='results/15k_onehot_cnn.txt', help="Path to the results file")
    parser.add_argument("--use_emb", type=int, default=3, help="Index of the embedding file to use. default: 3")
    parser.add_argument("--hidden_dim", type=int, default=25, help="Hidden dimension for the model. default: 25")
    parser.add_argument("--n_epochs", type=int, default=50, help="Number of epochs for training. default: 50")
    parser.add_argument("--batch_size", type=int, default=512, help="Batch size for training. default: 512")
    parser.add_argument("--max_data", type=int, default=-1, help="Maximum number of data to use, -1 for no limit. default: -1")
    parser.add_argument("--seq_size", type=int, default=600, help="Sequence size for the model. default: 600")
    parser.add_argument("--model_format", type=str, default='h5', help="Model format (h5, keras). default: h5")
    return parser.parse_args()

args = parse_args()

id2seq_file = args.id2seq_file
id2index = {} # dict[protein_id: str] = index: int
seqs = [] # list[index] = seq: str
index = 0
with open(id2seq_file, 'r') as file:
    for line in file:
        line = line.strip().split('\t')
        id2index[line[0]] = index 
        seqs.append(line[1]) # list[index] = seq
        index += 1

emb_files = ['../../../embeddings/default_onehot.txt',
             '../../../embeddings/string_vec5.txt',
             '../../../embeddings/CTCoding_onehot.txt',
             '../../../embeddings/vec7_CTC.txt',
             '../../../embeddings/vec7_CTC_onehot.txt'] # only index 1-3 are available
use_emb = args.use_emb
seq2t = s2t(emb_files[use_emb])

hidden_dim = args.hidden_dim
n_epochs = args.n_epochs

ds_file = args.ds_file
label_index = args.label_index
output = args.output
batch_size = args.batch_size

def process_dataset(ds_file, id2index, seqs, max_data = -1, sid1_index = 0, sid2_index = 1, skip_head = True):
    seq_array: list[str] = [] 
    raw_data = []
    id2_aid: dict[str, int] = {} # dict[protein_id: str] = index: int
    
    limit_data = max_data > 0
    sid = 0 # index of the protein_id
    count = 0

    for line in open(ds_file):
        if skip_head:
            skip_head = False
            continue
        line = line.rstrip('\n').rstrip('\r').split('\t')
        interactor_a_id = line[sid1_index]
        interactor_b_id = line[sid2_index]
        if id2index.get(interactor_a_id) is None or id2index.get(interactor_b_id) is None:
            continue

        if id2_aid.get(interactor_a_id) is None:
            id2_aid[interactor_a_id] = sid
            sid += 1
            seq_array.append(seqs[id2index[interactor_a_id]])
        
        if id2_aid.get(interactor_b_id) is None:
            id2_aid[interactor_b_id] = sid
            sid += 1
            seq_array.append(seqs[id2index[interactor_b_id]])
        
        line[sid1_index] = id2_aid[interactor_a_id]
        line[sid2_index] = id2_aid[interactor_b_id]
        
        raw_data.append(line)
        if limit_data:
            count += 1
            if count >= max_data:
                break
    print(f'loaded {len(raw_data)} matched PPI-pairs in ds_file')
    return seq_array, raw_data

seq_array, raw_data = process_dataset(ds_file, id2index, seqs, max_data = args.max_data)

seq_size = args.seq_size

len_m_seq = np.array([len(line) for line in seq_array])
avg_m_seq = int(np.average(len_m_seq))
max_m_seq = max(len_m_seq)
print(f'avg aa seq length: {avg_m_seq}, max aa seq length: {max_m_seq}')

print("Embedding unique AA sequences...")
seq_tensor = np.array([seq2t.embed_normalized(line, seq_size) for line in tqdm(seq_array)], dtype=np.float32)

# transform index to np.array
seq_index1 = np.array([line[0] for line in raw_data])
seq_index2 = np.array([line[1] for line in raw_data])

class_map = {'0':1,'1':0}
class_labels = np.zeros((len(raw_data), 2))
# n_pairs x 2 matrix, [i,0] = 1 if i-th pair is positive, [i,1] = 1 if i-th pair is negative
for i in range(len(raw_data)):
    class_labels[i][class_map[raw_data[i][label_index]]] = 1.

dim = seq2t.dim # dimension of amino acid embedding

def build_model():
    seq_input1 = Input(shape=(seq_size, dim), name='seq1')
    seq_input2 = Input(shape=(seq_size, dim), name='seq2')
    l1=Conv1D(hidden_dim, 3)
    r1=Bidirectional(GRU(hidden_dim, return_sequences=True))
    l2=Conv1D(hidden_dim, 3)
    r2=Bidirectional(GRU(hidden_dim, return_sequences=True))
    l3=Conv1D(hidden_dim, 3)
    r3=Bidirectional(GRU(hidden_dim, return_sequences=True))
    l4=Conv1D(hidden_dim, 3)
    r4=Bidirectional(GRU(hidden_dim, return_sequences=True))
    l5=Conv1D(hidden_dim, 3)
    r5=Bidirectional(GRU(hidden_dim, return_sequences=True))
    l6=Conv1D(hidden_dim, 3)
    s1=MaxPooling1D(3)(l1(seq_input1))
    s1=concatenate([r1(s1), s1])
    s1=MaxPooling1D(3)(l2(s1))
    s1=concatenate([r2(s1), s1])
    s1=MaxPooling1D(2)(l3(s1))
    s1=concatenate([r3(s1), s1])
    s1=MaxPooling1D(2)(l4(s1))
    s1=concatenate([r4(s1), s1])
    s1=MaxPooling1D(2)(l5(s1))
    s1=concatenate([r5(s1), s1])
    s1=l6(s1)
    s1=GlobalAveragePooling1D()(s1)
    s2=MaxPooling1D(3)(l1(seq_input2))
    s2=concatenate([r1(s2), s2])
    s2=MaxPooling1D(3)(l2(s2))
    s2=concatenate([r2(s2), s2])
    s2=MaxPooling1D(2)(l3(s2))
    s2=concatenate([r3(s2), s2])
    s2=MaxPooling1D(2)(l4(s2))
    s2=concatenate([r4(s2), s2])
    s2=MaxPooling1D(2)(l5(s2))
    s2=concatenate([r5(s2), s2])
    s2=l6(s2)
    s2=GlobalAveragePooling1D()(s2)
    merge_text = multiply([s1, s2])
    x = Dense(hidden_dim, activation='linear')(merge_text)
    x = LeakyReLU(alpha=0.3)(x)
    x = Dense(int((hidden_dim+7)/2), activation='linear')(x)
    x = LeakyReLU(alpha=0.3)(x)
    main_output = Dense(2, activation='softmax')(x)
    merge_model = Model(inputs=[seq_input1, seq_input2], outputs=[main_output])
    return merge_model

# 定义 TensorBoard 日志目录（按时间戳命名）
log_dir = os.path.join("logs", "fit", datetime.now().strftime("%Y%m%d-%H%M%S"))
tensorboard_callback = TensorBoard(
    log_dir=log_dir,
    write_graph=True,          # 记录计算图
    write_images=True,         # 记录模型权重直方图
    update_freq='epoch'       # 每 epoch 记录一次指标
)

def reset_gpu():
    # 清理 TensorFlow 的显存占用
    tf.keras.backend.clear_session()
    # 可选：重新设置 GPU 显存按需分配
    gpus = tf.config.experimental.list_physical_devices('GPU')
    if gpus:
        tf.config.experimental.set_memory_growth(gpus[0], True)

from sklearn.model_selection import KFold
kf = KFold(n_splits=10, shuffle=True)
tries = 3
cur = 0
train_test = []
for train, test in kf.split(class_labels):
    if np.sum(class_labels[train], 0)[0] > 0.8 * len(train) or np.sum(class_labels[train], 0)[0] < 0.2 * len(train):
        continue
    train_test.append((train, test))
    cur += 1
    if cur >= tries:
        break

print(f'Setup {len(train_test)} cross-validation folds in total')

set_cv = 1

# Prepare to collect metrics for all folds
all_metrics = []

# --- Replace numpy-based batching with tf.data.Dataset ---
def make_tf_dataset(seq_tensor, seq_index1, seq_index2, class_labels, indices, batch_size, shuffle=True):
    # Prepare the input pairs and labels
    x1 = seq_tensor[seq_index1[indices]]
    x2 = seq_tensor[seq_index2[indices]]
    y = class_labels[indices]
    ds = tf.data.Dataset.from_tensor_slices(((x1, x2), y))
    if shuffle:
        ds = ds.shuffle(buffer_size=len(indices))
    ds = ds.batch(batch_size)
    ds = ds.prefetch(tf.data.AUTOTUNE)
    return ds

for train, test in train_test:
    num_hit = 0.
    num_total = 0.
    num_pos = 0.
    num_true_pos = 0.
    num_false_pos = 0.
    num_true_neg = 0.
    num_false_neg = 0.
    print(f'Training on fold {set_cv}...')
    rms = RMSprop(learning_rate=0.001)
    merge_model = build_model()
    merge_model.compile(optimizer=rms, loss='categorical_crossentropy', metrics=['accuracy'])

    # Use tf.data for training and validation
    train_ds = make_tf_dataset(seq_tensor, seq_index1, seq_index2, class_labels, train, batch_size=batch_size, shuffle=True)
    test_ds = make_tf_dataset(seq_tensor, seq_index1, seq_index2, class_labels, test, batch_size=32, shuffle=False)

    merge_model.fit(train_ds, epochs=n_epochs, callbacks=[tensorboard_callback])
    merge_model.save(f'{output}_model_weights_cv{set_cv}.{args.model_format}')
    print(f'Evaluating on fold {set_cv}...')
    pred = merge_model.predict(test_ds)
    reset_gpu()
    # Convert predictions to numpy if needed
    pred = np.array(pred)
    y_true = class_labels[test]
    for i in range(len(y_true)):
        num_total += 1
        if np.argmax(y_true[i]) == np.argmax(pred[i]):
            num_hit += 1
        if y_true[i][0] > 0.:
            num_pos += 1.
            if pred[i][0] > pred[i][1]:
                num_true_pos += 1
            else:
                num_false_neg += 1
        else:
            if pred[i][0] > pred[i][1]:
                num_false_pos += 1
            else:
                num_true_neg += 1
    accuracy = num_hit / num_total
    prec = num_true_pos / (num_true_pos + num_false_pos) if (num_true_pos + num_false_pos) > 0 else 0.0
    recall = num_true_pos / num_pos if num_pos > 0 else 0.0
    spec = num_true_neg / (num_true_neg + num_false_neg) if (num_true_neg + num_false_neg) > 0 else 0.0
    f1 = 2. * prec * recall / (prec + recall) if (prec + recall) > 0 else 0.0
    mcc_den = ((num_true_pos + num_true_neg) * (num_true_pos + num_false_neg) * (num_false_pos + num_true_neg) * (num_false_pos + num_false_neg))
    mcc = (num_true_pos * num_true_neg - num_false_pos * num_false_neg) / (mcc_den ** 0.5) if mcc_den > 0 else 0.0
    print(f'accuracy: {accuracy:.4f}, prec: {prec:.4f}, recall: {recall:.4f}, spec: {spec:.4f}, f1: {f1:.4f}, mcc: {mcc:.4f}')
    all_metrics.append([set_cv, accuracy, prec, recall, spec, f1, mcc])
    set_cv += 1

# Save all metrics to a CSV file
csv_path = f'{output}_metrics.csv'
with open(csv_path, 'w', newline='') as csvfile:
    writer = csv.writer(csvfile)
    writer.writerow(['fold', 'accuracy', 'precision', 'recall', 'specificity', 'f1', 'mcc'])
    for row in all_metrics:
        writer.writerow(row)

# Optionally, print the average metrics
if all_metrics:
    avg_metrics = np.mean(np.array(all_metrics)[:, 1:], axis=0)
    print('Average metrics across all folds:')
    print(f'accuracy: {avg_metrics[0]:.4f}, prec: {avg_metrics[1]:.4f}, recall: {avg_metrics[2]:.4f}, spec: {avg_metrics[3]:.4f}, f1: {avg_metrics[4]:.4f}, mcc: {avg_metrics[5]:.4f}')

# Gather hyperparameters and settings
hyperparams = {
    'id2seq_file': id2seq_file,
    'ds_file': ds_file,
    'label_index': label_index,
    'output': output,
    'use_emb': use_emb,
    'hidden_dim': hidden_dim,
    'n_epochs': n_epochs,
    'batch_size': batch_size,
    'max_data': args.max_data,
    'seq_size': args.seq_size,
    'emb_file': emb_files[use_emb],
    'cross_validation_folds': len(train_test),
    'timestamp': datetime.now().strftime("%Y%m%d-%H%M%S"),
}

# Save to YAML file
yaml_path = f'{output}_hyperparams.yaml'
with open(yaml_path, 'w') as f:
    yaml.dump(hyperparams, f)
print(f'Hyperparameters saved to {yaml_path}')