import torch
import torch.nn as nn
import pandas as pd
import numpy as np
from sklearn.model_selection import KFold
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
from collections import Counter
import random
import os
from datetime import datetime
import json
import pickle
import warnings
import re
import threading
import queue

# 忽略特定警告
warnings.filterwarnings("ignore", message="The PyTorch API of nested tensors is in prototype stage")

# GUI相关导入
import tkinter as tk
from tkinter import ttk, scrolledtext, messagebox, filedialog
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg

# 设置matplotlib字体以支持中文
plt.rcParams['font.sans-serif'] = ['SimHei', 'Arial Unicode MS', 'DejaVu Sans']
plt.rcParams['axes.unicode_minus'] = False

# 设置随机种子
torch.manual_seed(42)
np.random.seed(42)
random.seed(42)

# 氨基酸理化性质字典
AMINO_ACID_PROPERTIES = {
    # 格式: [分子量, 疏水性, 等电点, 体积, 极性, 芳香性, 带电性]
    'A': [89.09, 1.8, 6.00, 88.6, 8.1, 0, 0],  # 丙氨酸
    'R': [174.20, -4.5, 10.76, 173.4, 105.0, 0, 1],  # 精氨酸
    'N': [132.12, -3.5, 5.41, 114.1, 116.0, 0, 0],  # 天冬酰胺
    'D': [133.10, -3.5, 2.77, 111.1, 118.0, 0, -1],  # 天冬氨酸
    'C': [121.16, 2.5, 5.07, 108.5, 55.0, 0, 0],  # 半胱氨酸
    'E': [147.13, -3.5, 3.22, 138.4, 120.0, 0, -1],  # 谷氨酸
    'Q': [146.15, -3.5, 5.65, 143.9, 117.0, 0, 0],  # 谷氨酰胺
    'G': [75.07, -0.4, 5.97, 60.1, 60.0, 0, 0],  # 甘氨酸
    'H': [155.16, -3.2, 7.59, 153.2, 79.0, 1, 1],  # 组氨酸
    'I': [131.17, 4.5, 6.02, 166.7, 0.0, 0, 0],  # 异亮氨酸
    'L': [131.17, 3.8, 5.98, 166.7, 0.0, 0, 0],  # 亮氨酸
    'K': [146.19, -3.9, 9.74, 168.6, 100.0, 0, 1],  # 赖氨酸
    'M': [149.21, 1.9, 5.74, 162.9, 5.0, 0, 0],  # 甲硫氨酸
    'F': [165.19, 2.8, 5.48, 189.9, 0.0, 1, 0],  # 苯丙氨酸
    'P': [115.13, -1.6, 6.30, 112.7, 61.0, 0, 0],  # 脯氨酸
    'S': [105.09, -0.8, 5.68, 89.0, 95.0, 0, 0],  # 丝氨酸
    'T': [119.12, -0.7, 5.87, 116.1, 89.0, 0, 0],  # 苏氨酸
    'W': [204.23, -0.9, 5.89, 227.8, 0.0, 1, 0],  # 色氨酸
    'Y': [181.19, -1.3, 5.66, 193.6, 63.0, 1, 0],  # 酪氨酸
    'V': [117.15, 4.2, 5.96, 140.0, 0.0, 0, 0]  # 缬氨酸
}

# 氨基酸二级结构倾向性
SECONDARY_STRUCTURE_PROPENSITY = {
    # α螺旋倾向, β折叠倾向, 转角倾向
    'A': [1.42, 0.83, 0.66],
    'R': [0.98, 0.93, 0.95],
    'N': [0.67, 0.89, 1.56],
    'D': [1.01, 0.54, 1.46],
    'C': [0.70, 1.19, 1.19],
    'E': [1.51, 0.37, 0.74],
    'Q': [1.11, 1.10, 0.98],
    'G': [0.57, 0.75, 1.56],
    'H': [1.00, 0.87, 0.95],
    'I': [1.08, 1.60, 0.47],
    'L': [1.21, 1.30, 0.59],
    'K': [1.17, 0.74, 1.01],
    'M': [1.45, 1.05, 0.60],
    'F': [1.13, 1.38, 0.59],
    'P': [0.57, 0.55, 1.52],
    'S': [0.77, 0.75, 1.43],
    'T': [0.83, 1.19, 0.96],
    'W': [1.08, 1.37, 0.96],
    'Y': [0.69, 1.47, 1.14],
    'V': [1.06, 1.70, 0.50]
}


# 检查CUDA可用性并处理兼容性问题
def setup_device():
    """设置计算设备，处理CUDA兼容性问题"""
    try:
        if torch.cuda.is_available():
            device = torch.device('cuda')
            print(f"CUDA is available. Using device: {device}")
            print(f"CUDA device: {torch.cuda.get_device_name(0)}")
            torch.backends.cudnn.benchmark = True
            torch.backends.cudnn.enabled = True
            return device
        else:
            print("CUDA not available. Using CPU.")
            return torch.device('cpu')
    except Exception as e:
        print(f"CUDA test failed: {e}")
        print("Falling back to CPU.")
        return torch.device('cpu')


device = setup_device()


class PeptideDataset(Dataset):
    def __init__(self, sequences, labels, max_length=100):
        self.sequences = sequences
        self.labels = labels
        self.max_length = max_length
        self.aa_to_idx = self._create_aa_mapping()

    def _create_aa_mapping(self):
        # 标准氨基酸字母表
        amino_acids = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L',
                       'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']
        aa_to_idx = {aa: i + 1 for i, aa in enumerate(amino_acids)}
        aa_to_idx['<PAD>'] = 0
        return aa_to_idx

    def _get_aa_features(self, aa):
        """获取氨基酸的理化性质特征"""
        aa = aa.upper()
        if aa in AMINO_ACID_PROPERTIES:
            properties = AMINO_ACID_PROPERTIES[aa]
            structure = SECONDARY_STRUCTURE_PROPENSITY.get(aa, [0, 0, 0])
            return properties + structure
        else:
            # 返回默认值
            return [0] * 10  # 7个理化性质 + 3个结构倾向

    def _encode_sequence_with_features(self, sequence):
        """编码序列并添加理化性质特征"""
        encoded = []
        features = []

        for aa in sequence[:self.max_length]:
            # 基本编码
            aa_idx = self.aa_to_idx.get(aa.upper(), 0)
            encoded.append(aa_idx)

            # 理化性质特征
            aa_features = self._get_aa_features(aa)
            features.append(aa_features)

        # 填充到最大长度
        while len(encoded) < self.max_length:
            encoded.append(0)
            features.append([0] * 10)

        encoded = encoded[:self.max_length]
        features = features[:self.max_length]

        return encoded, features

    def __len__(self):
        return len(self.sequences)

    def __getitem__(self, idx):
        sequence = self.sequences[idx]
        label = self.labels[idx]

        encoded_seq, aa_features = self._encode_sequence_with_features(sequence)

        return {
            'sequence': torch.tensor(encoded_seq, dtype=torch.long),
            'features': torch.tensor(aa_features, dtype=torch.float32),
            'label': torch.tensor(label, dtype=torch.float32),
            'original_sequence': sequence
        }


class EnhancedTransformerPeptideModel(nn.Module):
    def __init__(self, vocab_size, feature_dim=10, d_model=128, nhead=8, num_layers=3, max_length=100, num_classes=5,
                 dropout=0.1):
        super(EnhancedTransformerPeptideModel, self).__init__()

        self.d_model = d_model
        self.feature_dim = feature_dim

        # 序列嵌入层
        self.sequence_embedding = nn.Embedding(vocab_size, d_model // 2, padding_idx=0)

        # 特征嵌入层
        self.feature_embedding = nn.Linear(feature_dim, d_model // 2)

        # 位置编码
        self.pos_encoding = self._create_positional_encoding(max_length, d_model)

        # Transformer编码器层
        encoder_layer = nn.TransformerEncoderLayer(
            d_model=d_model,
            nhead=nhead,
            dim_feedforward=512,
            dropout=dropout,
            batch_first=True,
            activation='gelu'
        )
        self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers)

        # 全局平均池化
        self.global_pool = nn.AdaptiveAvgPool1d(1)

        # 分类头
        self.classifier = nn.Sequential(
            nn.LayerNorm(d_model),
            nn.Linear(d_model, 256),
            nn.GELU(),
            nn.Dropout(dropout),
            nn.Linear(256, 128),
            nn.GELU(),
            nn.Dropout(dropout),
            nn.Linear(128, 64),
            nn.GELU(),
            nn.Dropout(dropout),
            nn.Linear(64, num_classes)
        )

    def _create_positional_encoding(self, max_len, d_model):
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-np.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        return pe.unsqueeze(0)

    def forward(self, sequence_ids, features):
        batch_size = sequence_ids.size(0)

        # 序列嵌入
        seq_embed = self.sequence_embedding(sequence_ids)  # [batch, seq_len, d_model//2]

        # 特征嵌入
        feat_embed = self.feature_embedding(features)  # [batch, seq_len, d_model//2]

        # 合并嵌入
        combined_embed = torch.cat([seq_embed, feat_embed], dim=-1)  # [batch, seq_len, d_model]

        # 添加位置编码
        seq_len = combined_embed.size(1)
        if seq_len <= 100:
            pos_enc = self.pos_encoding[:, :seq_len, :].to(combined_embed.device)
            x = combined_embed + pos_enc
        else:
            pos_enc = self.pos_encoding[:, :100, :].to(combined_embed.device)
            x = combined_embed[:, :100, :] + pos_enc

        # 创建mask - 修复：确保mask是正确的维度
        # mask应该是[batch_size, seq_len]，标记填充位置为True
        mask = (sequence_ids == 0)  # 填充位置为0，标记为True

        # Transformer编码
        x = self.transformer_encoder(x, src_key_padding_mask=mask)

        # 全局平均池化
        # x: [batch, seq_len, d_model]
        mask_expanded = mask.unsqueeze(-1).expand_as(x)  # [batch, seq_len, d_model]
        x_masked = x.masked_fill(mask_expanded, 0)
        lengths = (~mask).sum(dim=1, keepdim=True).float()  # [batch, 1]
        x_pooled = x_masked.sum(dim=1) / lengths.clamp(min=1)  # [batch, d_model]

        # 分类
        output = self.classifier(x_pooled)
        return output


def parse_function_labels(function_string, all_functions):
    """解析功能标签字符串"""
    if pd.isna(function_string) or not function_string:
        return [0.0] * len(all_functions)

    function_string = str(function_string).strip()

    if ',' in function_string:
        functions = [f.strip() for f in function_string.split(',')]
    else:
        functions = re.split(r'[\s/;|]+', function_string)
        functions = [f.strip() for f in functions if f.strip()]

    label_vector = [0.0] * len(all_functions)
    for func in functions:
        func_clean = func.strip()
        if func_clean in all_functions:
            idx = all_functions.index(func_clean)
            label_vector[idx] = 1.0

    return label_vector


def load_csv_data(file_path):
    """加载CSV格式的数据"""
    try:
        try:
            df = pd.read_csv(file_path)
        except:
            df = pd.read_csv(file_path, sep=None, engine='python')

        print(f"Loaded {len(df)} rows from {file_path}")

        sequences = []
        labels_data = []

        if len(df.columns) == 1:
            column_name = df.columns[0]
            for idx, row in df.iterrows():
                line = str(row[column_name]).strip()
                if 'Antimicrobial' in line or 'Antibacterial' in line:
                    func_keywords = ['Antimicrobial', 'Antibacterial', 'Anti-Gram+', 'Anti-Gram-', 'Antifungal']
                    func_start = len(line)
                    for keyword in func_keywords:
                        pos = line.find(keyword)
                        if pos != -1:
                            func_start = min(func_start, pos)

                    if func_start < len(line):
                        sequence = line[:func_start].strip()
                        functions_str = line[func_start:].strip()
                    else:
                        sequence = line
                        functions_str = ""
                else:
                    sequence = line
                    functions_str = ""

                if len(sequence) > 3 and any(c.isalpha() for c in sequence):
                    sequences.append(sequence)
                    labels_data.append(functions_str)
        else:
            sequence_column = df.columns[0]
            sequences = df[sequence_column].astype(str).tolist()

            function_column = None
            for col in df.columns[1:]:
                if any(keyword.lower() in col.lower() for keyword in
                       ['function', 'activity', 'label', 'antimicrobial', 'antibacterial']):
                    function_column = col
                    break

            if function_column:
                labels_data = df[function_column].astype(str).tolist()
            else:
                if len(df.columns) > 1:
                    labels_data = df[df.columns[1]].astype(str).tolist()
                else:
                    labels_data = [""] * len(sequences)

        standard_functions = ['Antimicrobial', 'Antibacterial', 'Anti-Gram+', 'Anti-Gram-', 'Antifungal']

        all_functions_found = set()
        for func_str in labels_data:
            if pd.notna(func_str) and func_str and func_str.lower() not in ['nan', 'none', '']:
                funcs = re.split(r'[,;/|]+', str(func_str))
                for func in funcs:
                    func_clean = func.strip()
                    if func_clean:
                        for std_func in standard_functions:
                            if std_func.lower() in func_clean.lower():
                                all_functions_found.add(std_func)

        if all_functions_found:
            final_functions = list(all_functions_found)
        else:
            final_functions = standard_functions

        labels = []
        for func_str in labels_data:
            label_vector = parse_function_labels(func_str, final_functions)
            labels.append(label_vector)

        print(f"Sequences: {len(sequences)}, Labels: {len(labels)}")
        print(f"Function names: {final_functions}")

        if labels:
            labels_array = np.array(labels)
            print(f"\nLabel statistics:")
            for i, func_name in enumerate(final_functions):
                if i < labels_array.shape[1]:
                    positive_count = np.sum(labels_array[:, i] > 0.5)
                    print(f"  {func_name}: {positive_count}/{len(labels)} ({positive_count / len(labels) * 100:.1f}%)")

        return sequences, labels, final_functions

    except Exception as e:
        print(f"Error loading CSV file: {e}")
        import traceback
        traceback.print_exc()
        sequences = [
            "GLFDIVKKVVGALGSL", "INLKALAAALKKLL", "RLARIVVIRVAR", "GLFDIVKKVVGAIGQV",
            "INLKALAALLKKLL", "RLARIVVIRVAG", "GLFDIVKKVVGAIGQL", "INLKALAAALKKLLG",
            "RLARIVVIRWAR", "GLFDIVKKVVGAIGQLL", "INLKALAALLKKLLG", "RLARIVVIRVARG",
            "GLFDIVKKVVGAIGQLLG", "INLKALAAALKKLLGG", "RLARIVVIRVARGS", "GLFDIVKKVVGAIGQLLGS",
            "INLKALAALLKKLLGG", "RLARIVVIRVARGSS", "GLFDIVKKVVGAIGQLLGST", "INLKALAAALKKLLGGT"
        ]

        labels = []
        function_names = ['Antimicrobial', 'Antibacterial', 'Anti-Gram+', 'Anti-Gram-', 'Antifungal']

        for i, seq in enumerate(sequences):
            if i < 5:
                label_vector = [1.0, 1.0, 0.0, 0.0, 1.0]
            elif i < 10:
                label_vector = [1.0, 1.0, 0.0, 0.0, 0.0]
            elif i < 15:
                label_vector = [1.0, 1.0, 1.0, 1.0, 1.0]
            else:
                label_vector = [1.0, 1.0, 1.0, 1.0, 0.0]
            labels.append(label_vector)

        print(f"Created {len(sequences)} sample sequences with realistic labels")
        return sequences, labels, function_names


def train_single_fold(model, train_loader, val_loader, fold_num, num_epochs=100, learning_rate=0.001):
    """优化的训练函数"""
    criterion = nn.BCEWithLogitsLoss()
    optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate, weight_decay=1e-4)
    scheduler = torch.optim.lr_scheduler.OneCycleLR(
        optimizer,
        max_lr=learning_rate,
        epochs=num_epochs,
        steps_per_epoch=len(train_loader),
        pct_start=0.3,
        anneal_strategy='cos'
    )

    model.to(device)

    best_val_loss = float('inf')
    patience_counter = 0
    patience_limit = 15
    train_losses = []
    val_losses = []

    print(f"\n=== Training Fold {fold_num} ===")

    use_amp = torch.cuda.is_available() and hasattr(torch.cuda.amp, 'GradScaler')
    scaler = torch.cuda.amp.GradScaler() if use_amp else None

    for epoch in range(num_epochs):
        model.train()
        train_loss = 0.0
        train_batches = 0
        total_samples = 0

        for batch_idx, batch in enumerate(train_loader):
            sequence_ids = batch['sequence'].to(device, non_blocking=True)
            features = batch['features'].to(device, non_blocking=True)
            labels = batch['label'].to(device, non_blocking=True)

            optimizer.zero_grad()

            try:
                if use_amp and scaler is not None:
                    with torch.cuda.amp.autocast():
                        outputs = model(sequence_ids, features)
                        loss = criterion(outputs, labels)

                        l2_reg = torch.tensor(0., device=device)
                        for param in model.parameters():
                            l2_reg += torch.norm(param)
                        loss += 1e-4 * l2_reg

                    scaler.scale(loss).backward()
                    scaler.unscale_(optimizer)
                    torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
                    scaler.step(optimizer)
                    scaler.update()
                else:
                    outputs = model(sequence_ids, features)
                    loss = criterion(outputs, labels)

                    l2_reg = torch.tensor(0., device=device)
                    for param in model.parameters():
                        l2_reg += torch.norm(param)
                    loss += 1e-4 * l2_reg

                    loss.backward()
                    torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
                    optimizer.step()

                scheduler.step()

                train_loss += loss.item()
                train_batches += 1
                total_samples += sequence_ids.size(0)

            except Exception as e:
                print(f"Training error in fold {fold_num}, epoch {epoch}, batch {batch_idx}: {e}")
                continue

        model.eval()
        val_loss = 0.0
        val_batches = 0
        val_samples = 0

        with torch.no_grad():
            for batch in val_loader:
                sequence_ids = batch['sequence'].to(device, non_blocking=True)
                features = batch['features'].to(device, non_blocking=True)
                labels = batch['label'].to(device, non_blocking=True)

                try:
                    outputs = model(sequence_ids, features)
                    loss = criterion(outputs, labels)
                    val_loss += loss.item()
                    val_batches += 1
                    val_samples += sequence_ids.size(0)
                except Exception as e:
                    print(f"Validation error in fold {fold_num}, epoch {epoch}: {e}")
                    continue

        avg_train_loss = train_loss / train_batches if train_batches > 0 else float('inf')
        avg_val_loss = val_loss / val_batches if val_batches > 0 else float('inf')

        train_losses.append(avg_train_loss)
        val_losses.append(avg_val_loss)

        if epoch % 5 == 0 or epoch == num_epochs - 1:
            current_lr = scheduler.get_last_lr()[0] if hasattr(scheduler, 'get_last_lr') else learning_rate
            print(f'Fold {fold_num} - Epoch [{epoch + 1}/{num_epochs}]')
            print(f'  Train Loss: {avg_train_loss:.4f} | Val Loss: {avg_val_loss:.4f} | LR: {current_lr:.6f}')

        if avg_val_loss < best_val_loss:
            best_val_loss = avg_val_loss
            patience_counter = 0
            torch.save(model.state_dict(), f'best_peptide_model_fold_{fold_num}.pth')
        else:
            patience_counter += 1
            if patience_counter >= patience_limit:
                print(f"Early stopping at epoch {epoch + 1} for fold {fold_num}")
                break

    try:
        model.load_state_dict(torch.load(f'best_peptide_model_fold_{fold_num}.pth', map_location=device))
        print(f"Loaded best model for fold {fold_num} with val loss: {best_val_loss:.4f}")
    except Exception as e:
        print(f"Warning: Could not load best model for fold {fold_num}: {e}")

    return model, best_val_loss, train_losses, val_losses


def cross_validation_training(sequences, labels, function_names, n_splits=5, batch_size=32):
    """优化的交叉验证训练"""
    if len(sequences) < n_splits:
        n_splits = max(2, len(sequences) // 2)

    kf = KFold(n_splits=n_splits, shuffle=True, random_state=42)

    all_fold_results = []
    fold_models = []

    print(f"Starting {n_splits}-fold cross-validation...")
    print(f"Total samples: {len(sequences)}")
    print(f"Function names: {function_names}")
    print(f"Number of functions: {len(function_names)}")

    if labels:
        labels_array = np.array(labels)
        print("Label distribution:")
        for i, func_name in enumerate(function_names):
            if i < labels_array.shape[1]:
                positive_count = np.sum(labels_array[:, i] > 0.5)
                print(f"  {func_name}: {positive_count}/{len(labels)} ({positive_count / len(labels) * 100:.1f}%)")

    successful_folds = 0

    for fold, (train_idx, val_idx) in enumerate(kf.split(sequences)):
        print(f"\n{'=' * 50}")
        print(f"Processing Fold {fold + 1}/{n_splits}")
        print(f"{'=' * 50}")

        train_sequences = [sequences[i] for i in train_idx]
        train_labels = [labels[i] for i in train_idx]
        val_sequences = [sequences[i] for i in val_idx]
        val_labels = [labels[i] for i in val_idx]

        print(f"Train set size: {len(train_sequences)}, Validation set size: {len(val_sequences)}")

        if len(val_sequences) == 0:
            print(f"Skipping fold {fold + 1} due to empty validation set")
            continue

        train_dataset = PeptideDataset(train_sequences, train_labels, max_length=100)
        val_dataset = PeptideDataset(val_sequences, val_labels, max_length=100)

        train_batch_size = min(batch_size, len(train_dataset))
        val_batch_size = min(batch_size, len(val_dataset))

        if train_batch_size == 0 or val_batch_size == 0:
            print(f"Skipping fold {fold + 1} due to insufficient data")
            continue

        train_loader = DataLoader(
            train_dataset,
            batch_size=train_batch_size,
            shuffle=True,
            num_workers=0,
            pin_memory=True,
            persistent_workers=False
        )
        val_loader = DataLoader(
            val_dataset,
            batch_size=val_batch_size,
            shuffle=False,
            num_workers=0,
            pin_memory=True,
            persistent_workers=False
        )

        vocab_size = len(train_dataset.aa_to_idx)
        model = EnhancedTransformerPeptideModel(
            vocab_size=vocab_size,
            num_classes=len(function_names),
            d_model=128,
            nhead=8,
            num_layers=3,
            feature_dim=10  # 7个理化性质 + 3个结构倾向
        )

        try:
            trained_model, best_loss, train_losses, val_losses = train_single_fold(
                model, train_loader, val_loader, fold + 1, num_epochs=80
            )

            val_metrics = evaluate_model(trained_model, val_loader, function_names)

            fold_result = {
                'fold': fold + 1,
                'val_loss': best_loss,
                'metrics': val_metrics,
                'train_losses': train_losses,
                'val_losses': val_losses
            }

            all_fold_results.append(fold_result)
            fold_models.append((trained_model, train_dataset.aa_to_idx, function_names))
            successful_folds += 1

            print(f"\nFold {fold + 1} Results:")
            print(f"Best Validation Loss: {best_loss:.4f}")
            print_metrics(val_metrics)

        except Exception as e:
            print(f"Error in fold {fold + 1}: {e}")
            import traceback
            traceback.print_exc()
            continue

    if successful_folds == 0:
        print("Error: No folds completed successfully!")
        return [], []

    print(f"\nSuccessfully completed {successful_folds}/{n_splits} folds")

    if all_fold_results:
        avg_loss = np.mean([result['val_loss'] for result in all_fold_results])
        std_loss = np.std([result['val_loss'] for result in all_fold_results])

        print(f"Average Validation Loss: {avg_loss:.4f} +/- {std_loss:.4f}")

        metric_names = ['accuracy', 'precision', 'recall', 'f1_score']
        for metric in metric_names:
            values = [result['metrics'][metric] for result in all_fold_results]
            avg_value = np.mean(values)
            std_value = np.std(values)
            print(f"Average {metric.capitalize()}: {avg_value:.4f} +/- {std_value:.4f}")

    if fold_models:
        model_info = {
            'aa_to_idx': fold_models[0][1],
            'function_names': function_names,
            'device': str(device)
        }

        with open('model_info.pkl', 'wb') as f:
            pickle.dump(model_info, f)
        print(f"Model info saved to model_info.pkl")

    return all_fold_results, fold_models


def evaluate_model(model, data_loader, function_names):
    """评估模型性能"""
    model.eval()
    model.to(device)

    all_predictions = []
    all_labels = []

    with torch.no_grad():
        for batch in data_loader:
            sequence_ids = batch['sequence'].to(device, non_blocking=True)
            features = batch['features'].to(device, non_blocking=True)
            labels = batch['label'].to(device, non_blocking=True)

            try:
                outputs = model(sequence_ids, features)
                probabilities = torch.sigmoid(outputs)
                all_predictions.extend(probabilities.cpu().numpy())
                all_labels.extend(labels.cpu().numpy())
            except Exception as e:
                print(f"Evaluation error: {e}")
                continue

    if not all_predictions or not all_labels:
        return {
            'accuracy': 0.0,
            'precision': 0.0,
            'recall': 0.0,
            'f1_score': 0.0,
            'individual_metrics': {
                'precision': [],
                'recall': [],
                'f1': []
            }
        }

    predictions = np.array(all_predictions)
    labels = np.array(all_labels)

    threshold = 0.5
    predicted_binary = (predictions > threshold).astype(int)

    exact_matches = np.all(predicted_binary == labels, axis=1)
    accuracy = np.mean(exact_matches)

    precision_list = []
    recall_list = []
    f1_list = []

    for i in range(min(len(function_names), predictions.shape[1])):
        tp = np.sum((predicted_binary[:, i] == 1) & (labels[:, i] == 1))
        fp = np.sum((predicted_binary[:, i] == 1) & (labels[:, i] == 0))
        fn = np.sum((predicted_binary[:, i] == 0) & (labels[:, i] == 1))

        precision = tp / (tp + fp) if (tp + fp) > 0 else 0
        recall = tp / (tp + fn) if (tp + fn) > 0 else 0
        f1 = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0

        precision_list.append(precision)
        recall_list.append(recall)
        f1_list.append(f1)

    macro_precision = np.mean(precision_list) if precision_list else 0
    macro_recall = np.mean(recall_list) if recall_list else 0
    macro_f1 = np.mean(f1_list) if f1_list else 0

    return {
        'accuracy': accuracy,
        'precision': macro_precision,
        'recall': macro_recall,
        'f1_score': macro_f1,
        'individual_metrics': {
            'precision': precision_list,
            'recall': recall_list,
            'f1': f1_list
        }
    }


def print_metrics(metrics):
    """打印评估指标"""
    print(f"  Accuracy: {metrics['accuracy']:.4f}")
    print(f"  Precision: {metrics['precision']:.4f}")
    print(f"  Recall: {metrics['recall']:.4f}")
    print(f"  F1-Score: {metrics['f1_score']:.4f}")


def predict_function(models_info, sequence, function_names, max_length=100):
    """使用集成模型预测给定序列的功能"""
    if not models_info:
        return np.array([0.0] * len(function_names))

    all_predictions = []

    for model, aa_to_idx, _ in models_info:
        model.eval()
        model.to(device)

        # 编码序列和特征
        encoded = []
        features = []

        for aa in sequence[:max_length]:
            aa_idx = aa_to_idx.get(aa.upper(), 0)
            encoded.append(aa_idx)

            # 获取氨基酸特征
            aa_features = [0] * 10
            if aa.upper() in AMINO_ACID_PROPERTIES:
                properties = AMINO_ACID_PROPERTIES[aa.upper()]
                structure = SECONDARY_STRUCTURE_PROPENSITY.get(aa.upper(), [0, 0, 0])
                aa_features = properties + structure
            features.append(aa_features)

        # 填充
        while len(encoded) < max_length:
            encoded.append(0)
            features.append([0] * 10)

        encoded = encoded[:max_length]
        features = features[:max_length]

        with torch.no_grad():
            sequence_tensor = torch.tensor([encoded], dtype=torch.long).to(device, non_blocking=True)
            features_tensor = torch.tensor([features], dtype=torch.float32).to(device, non_blocking=True)

            try:
                output = model(sequence_tensor, features_tensor)
                probabilities = torch.sigmoid(output)
                all_predictions.append(probabilities.cpu().numpy()[0])
            except Exception as e:
                print(f"Prediction error: {e}")
                all_predictions.append(np.array([0.0] * len(function_names)))

    # 平均所有模型的预测结果
    avg_predictions = np.mean(all_predictions, axis=0) if all_predictions else np.array([0.0] * len(function_names))
    return avg_predictions


def generate_peptide_sequence(models_info, target_length, function_names, num_attempts=200):
    """生成具有特定长度的新肽序列"""
    if not models_info:
        return "", 0.0

    print(f"Generating {target_length}-mer peptide...")

    # 标准氨基酸列表
    standard_aas = list(AMINO_ACID_PROPERTIES.keys())
    print(f"Valid amino acids: {standard_aas}")

    best_sequence = ""
    best_score = 0

    for attempt in range(num_attempts):
        generated_seq = ''.join(random.choices(standard_aas, k=target_length))

        scores = []
        for model_idx, (model, aa_to_idx, _) in enumerate(models_info):
            try:
                pred = predict_function([models_info[model_idx]], generated_seq, function_names)
                avg_pred = np.mean(pred)
                scores.append(avg_pred)
            except Exception as e:
                print(f"Error in model {model_idx}: {e}")
                scores.append(0.0)

        avg_score = np.mean(scores) if scores else 0.0

        if avg_score > best_score:
            best_score = avg_score
            best_sequence = generated_seq
            if attempt % 50 == 0:
                print(f"  New best (attempt {attempt + 1}): '{generated_seq[:10]}...' score {avg_score:.4f}")

    print(f"Final generated sequence: '{best_sequence}' with score {best_score:.4f}")
    return best_sequence, best_score


def save_model_summary(results, function_names, filename="cross_validation_results.txt"):
    """保存交叉验证结果摘要"""
    with open(filename, 'w') as f:
        f.write("PEPTIDE FUNCTION PREDICTION - CROSS VALIDATION RESULTS\n")
        f.write("=" * 60 + "\n")
        f.write(f"Generated on: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n")

        f.write("FUNCTION NAMES:\n")
        for i, name in enumerate(function_names):
            f.write(f"  {i + 1}. {name}\n")
        f.write("\n")

        f.write("FOLD RESULTS:\n")
        for result in results:
            f.write(f"\nFold {result['fold']}:\n")
            f.write(f"  Validation Loss: {result['val_loss']:.4f}\n")
            f.write(f"  Accuracy: {result['metrics']['accuracy']:.4f}\n")
            f.write(f"  Precision: {result['metrics']['precision']:.4f}\n")
            f.write(f"  Recall: {result['metrics']['recall']:.4f}\n")
            f.write(f"  F1-Score: {result['metrics']['f1_score']:.4f}\n")

        if results:
            avg_loss = np.mean([r['val_loss'] for r in results])
            avg_accuracy = np.mean([r['metrics']['accuracy'] for r in results])
            avg_precision = np.mean([r['metrics']['precision'] for r in results])
            avg_recall = np.mean([r['metrics']['recall'] for r in results])
            avg_f1 = np.mean([r['metrics']['f1_score'] for r in results])

            f.write(f"\nOVERALL AVERAGES:\n")
            f.write(f"  Validation Loss: {avg_loss:.4f}\n")
            f.write(f"  Accuracy: {avg_accuracy:.4f}\n")
            f.write(f"  Precision: {avg_precision:.4f}\n")
            f.write(f"  Recall: {avg_recall:.4f}\n")
            f.write(f"  F1-Score: {avg_f1:.4f}\n")

    print(f"Results saved to {filename}")


def load_models_from_files():
    """从文件加载训练好的模型"""
    try:
        with open('model_info.pkl', 'rb') as f:
            model_config = pickle.load(f)

        models_info = []
        function_names = model_config['function_names']
        aa_to_idx = model_config['aa_to_idx']

        loaded_models = 0
        for fold in range(1, 6):
            try:
                vocab_size = len(aa_to_idx)
                model = EnhancedTransformerPeptideModel(
                    vocab_size=vocab_size,
                    num_classes=len(function_names),
                    d_model=128,
                    nhead=8,
                    num_layers=3,
                    feature_dim=10
                )
                model.load_state_dict(torch.load(f'best_peptide_model_fold_{fold}.pth', map_location=device))
                model.eval()
                model.to(device)
                models_info.append((model, aa_to_idx, function_names))
                loaded_models += 1
            except Exception as e:
                print(f"Could not load model for fold {fold}: {e}")

        print(f"Loaded {loaded_models} models")
        return models_info, function_names
    except Exception as e:
        print(f"Error loading models: {e}")
        import traceback
        traceback.print_exc()
        return None, []


class PeptideGUI:
    def __init__(self, root):
        self.root = root
        self.root.title("Peptide Function Prediction System")
        self.root.geometry("1200x800")

        # 创建消息队列用于线程间通信
        self.message_queue = queue.Queue()

        # 存储每个fold的损失数据
        self.fold_train_losses = {}  # {fold_num: [losses]}
        self.fold_val_losses = {}  # {fold_num: [losses]}
        self.fold_epochs = {}  # {fold_num: [epochs]}
        self.current_fold = 0

        # 模型相关变量
        self.models_info = None
        self.function_names = None

        self.setup_ui()

    def setup_ui(self):
        # 创建主框架
        main_frame = ttk.Frame(self.root)
        main_frame.pack(fill=tk.BOTH, expand=True, padx=10, pady=10)

        # 创建笔记本控件（标签页）
        notebook = ttk.Notebook(main_frame)
        notebook.pack(fill=tk.BOTH, expand=True)

        # 训练标签页
        self.setup_training_tab(notebook)

        # 预测标签页
        self.setup_prediction_tab(notebook)

        # 生成标签页
        self.setup_generation_tab(notebook)

        # 启动消息处理循环
        self.process_messages()

    def setup_training_tab(self, notebook):
        """设置训练标签页"""
        training_frame = ttk.Frame(notebook)
        notebook.add(training_frame, text="Model Training")

        # 标题
        title_label = ttk.Label(training_frame, text="Peptide Function Prediction Training Monitor",
                                font=("Arial", 16, "bold"))
        title_label.pack(pady=10)

        # 状态面板
        status_frame = ttk.LabelFrame(training_frame, text="Training Status", padding=10)
        status_frame.pack(fill=tk.X, pady=5)

        self.status_label = ttk.Label(status_frame, text="Ready to start training...",
                                      font=("Arial", 12))
        self.status_label.pack()

        # 进度条
        self.progress_var = tk.DoubleVar()
        self.progress_bar = ttk.Progressbar(status_frame, variable=self.progress_var,
                                            maximum=100, length=400)
        self.progress_bar.pack(pady=5)

        # 折叠信息
        fold_frame = ttk.Frame(status_frame)
        fold_frame.pack(fill=tk.X, pady=5)

        self.fold_label = ttk.Label(fold_frame, text="Current Fold: -/-")
        self.fold_label.pack(side=tk.LEFT)

        self.epoch_label = ttk.Label(fold_frame, text="Current Epoch: -/-")
        self.epoch_label.pack(side=tk.RIGHT)

        # 损失图表
        chart_frame = ttk.LabelFrame(training_frame, text="Training Loss Chart", padding=10)
        chart_frame.pack(fill=tk.BOTH, expand=True, pady=5)

        self.fig, self.ax = plt.subplots(figsize=(10, 4))
        self.canvas = FigureCanvasTkAgg(self.fig, chart_frame)
        self.canvas.get_tk_widget().pack(fill=tk.BOTH, expand=True)

        # 初始化图表
        self.ax.set_xlabel('Epoch')
        self.ax.set_ylabel('Loss')
        self.ax.set_title('Training Process Loss Variation')
        self.ax.grid(True, alpha=0.3)

        # 日志文本框
        log_frame = ttk.LabelFrame(training_frame, text="Training Log", padding=10)
        log_frame.pack(fill=tk.BOTH, expand=True, pady=5)

        self.log_text = scrolledtext.ScrolledText(log_frame, height=15, width=80)
        self.log_text.pack(fill=tk.BOTH, expand=True)

        # 控制按钮
        button_frame = ttk.Frame(training_frame)
        button_frame.pack(fill=tk.X, pady=10)

        self.start_button = ttk.Button(button_frame, text="Start Training",
                                       command=self.start_training)
        self.start_button.pack(side=tk.LEFT, padx=5)

        self.stop_button = ttk.Button(button_frame, text="Stop Training",
                                      command=self.stop_training, state=tk.DISABLED)
        self.stop_button.pack(side=tk.LEFT, padx=5)

        # 结果面板
        result_frame = ttk.LabelFrame(training_frame, text="Training Results", padding=10)
        result_frame.pack(fill=tk.X, pady=5)

        self.result_text = scrolledtext.ScrolledText(result_frame, height=8, width=80)
        self.result_text.pack(fill=tk.BOTH, expand=True)

    def setup_prediction_tab(self, notebook):
        """设置预测标签页"""
        prediction_frame = ttk.Frame(notebook)
        notebook.add(prediction_frame, text="Function Prediction")

        # 标题
        title_label = ttk.Label(prediction_frame, text="Peptide Function Prediction",
                                font=("Arial", 16, "bold"))
        title_label.pack(pady=10)

        # 输入区域
        input_frame = ttk.LabelFrame(prediction_frame, text="Input Sequence", padding=10)
        input_frame.pack(fill=tk.X, pady=5)

        ttk.Label(input_frame, text="Enter peptide sequence:").pack(anchor=tk.W)
        self.sequence_entry = ttk.Entry(input_frame, width=80)
        self.sequence_entry.pack(fill=tk.X, pady=5)
        self.sequence_entry.insert(0, "GLFDIVKKVVGALGSL")

        # 预测按钮
        predict_button = ttk.Button(input_frame, text="Predict Function",
                                    command=self.predict_sequence)
        predict_button.pack(pady=5)

        # 结果显示区域
        result_frame = ttk.LabelFrame(prediction_frame, text="Prediction Results", padding=10)
        result_frame.pack(fill=tk.BOTH, expand=True, pady=5)

        self.prediction_result_text = scrolledtext.ScrolledText(result_frame, height=20, width=80)
        self.prediction_result_text.pack(fill=tk.BOTH, expand=True)

        # 加载模型按钮
        load_model_button = ttk.Button(prediction_frame, text="Load Trained Models",
                                       command=self.load_trained_models)
        load_model_button.pack(pady=5)

        # 模型状态
        self.model_status_label = ttk.Label(prediction_frame, text="Models not loaded",
                                            foreground="red")
        self.model_status_label.pack(pady=5)

    def setup_generation_tab(self, notebook):
        """设置生成标签页"""
        generation_frame = ttk.Frame(notebook)
        notebook.add(generation_frame, text="Peptide Generation")

        # 标题
        title_label = ttk.Label(generation_frame, text="Peptide Sequence Generation",
                                font=("Arial", 16, "bold"))
        title_label.pack(pady=10)

        # 参数设置
        param_frame = ttk.LabelFrame(generation_frame, text="Generation Parameters", padding=10)
        param_frame.pack(fill=tk.X, pady=5)

        ttk.Label(param_frame, text="Peptide length (10-50):").pack(anchor=tk.W)
        self.length_var = tk.StringVar(value="15")
        length_entry = ttk.Entry(param_frame, textvariable=self.length_var, width=20)
        length_entry.pack(pady=5)

        # 生成按钮
        generate_button = ttk.Button(param_frame, text="Generate Peptide",
                                     command=self.generate_peptide)
        generate_button.pack(pady=5)

        # 结果显示区域
        result_frame = ttk.LabelFrame(generation_frame, text="Generated Peptide", padding=10)
        result_frame.pack(fill=tk.BOTH, expand=True, pady=5)

        self.generation_result_text = scrolledtext.ScrolledText(result_frame, height=20, width=80)
        self.generation_result_text.pack(fill=tk.BOTH, expand=True)

        # 加载模型按钮
        load_model_button = ttk.Button(generation_frame, text="Load Trained Models",
                                       command=self.load_trained_models)
        load_model_button.pack(pady=5)

        # 模型状态
        self.gen_model_status_label = ttk.Label(generation_frame, text="Models not loaded",
                                                foreground="red")
        self.gen_model_status_label.pack(pady=5)

    def log_message(self, message):
        """向GUI日志添加消息"""
        timestamp = datetime.now().strftime("%H:%M:%S")
        formatted_message = f"[{timestamp}] {message}\n"
        self.log_text.insert(tk.END, formatted_message)
        self.log_text.see(tk.END)
        self.root.update_idletasks()

    def update_status(self, message):
        """更新状态标签"""
        self.status_label.config(text=message)
        self.root.update_idletasks()

    def update_progress(self, value):
        """更新进度条"""
        self.progress_var.set(value)
        self.root.update_idletasks()

    def update_fold_info(self, fold_info, epoch_info):
        """更新折叠和轮次信息"""
        self.fold_label.config(text=fold_info)
        self.epoch_label.config(text=epoch_info)
        self.root.update_idletasks()

    def update_chart(self, train_loss, val_loss, epoch):
        """更新损失图表 - 每个fold独立显示"""
        # 为当前fold添加数据
        if self.current_fold not in self.fold_train_losses:
            self.fold_train_losses[self.current_fold] = []
            self.fold_val_losses[self.current_fold] = []
            self.fold_epochs[self.current_fold] = []

        self.fold_train_losses[self.current_fold].append(train_loss)
        self.fold_val_losses[self.current_fold].append(val_loss)
        self.fold_epochs[self.current_fold].append(epoch)

        self.ax.clear()

        # 为每个fold绘制独立的曲线，使用渐变色
        colors = plt.cm.viridis(np.linspace(0, 1, max(1, len(self.fold_train_losses))))

        for i, (fold_num, train_losses) in enumerate(self.fold_train_losses.items()):
            val_losses = self.fold_val_losses[fold_num]
            epochs = self.fold_epochs[fold_num]

            color = colors[i] if i < len(colors) else colors[-1]

            # 训练损失 - 实线
            self.ax.plot(epochs, train_losses, '-', color=color,
                         label=f'Fold {fold_num} Train', linewidth=2, alpha=0.8)

            # 验证损失 - 虚线
            self.ax.plot(epochs, val_losses, '--', color=color,
                         label=f'Fold {fold_num} Val', linewidth=2, alpha=0.8)

        self.ax.set_xlabel('Epoch')
        self.ax.set_ylabel('Loss')
        self.ax.set_title('Training Process Loss Variation')
        self.ax.legend()
        self.ax.grid(True, alpha=0.3)

        self.canvas.draw()
        self.root.update_idletasks()

    def show_results(self, results):
        """显示训练结果"""
        self.result_text.delete(1.0, tk.END)
        result_text = "Training completed!\n" + "=" * 50 + "\n"

        if results:
            avg_loss = np.mean([result['val_loss'] for result in results])
            avg_accuracy = np.mean([result['metrics']['accuracy'] for result in results])
            avg_precision = np.mean([result['metrics']['precision'] for result in results])
            avg_recall = np.mean([result['metrics']['recall'] for result in results])
            avg_f1 = np.mean([result['metrics']['f1_score'] for result in results])

            result_text += f"Average Validation Loss: {avg_loss:.4f}\n"
            result_text += f"Average Accuracy: {avg_accuracy:.4f}\n"
            result_text += f"Average Precision: {avg_precision:.4f}\n"
            result_text += f"Average Recall: {avg_recall:.4f}\n"
            result_text += f"Average F1 Score: {avg_f1:.4f}\n"

            result_text += "\nDetailed results for each fold:\n"
            for result in results:
                result_text += f"\nFold {result['fold']}:\n"
                result_text += f"  Validation Loss: {result['val_loss']:.4f}\n"
                result_text += f"  Accuracy: {result['metrics']['accuracy']:.4f}\n"
                result_text += f"  Precision: {result['metrics']['precision']:.4f}\n"
                result_text += f"  Recall: {result['metrics']['recall']:.4f}\n"
                result_text += f"  F1 Score: {result['metrics']['f1_score']:.4f}\n"

        self.result_text.insert(tk.END, result_text)

    def start_training(self):
        """开始训练"""
        self.log_message("Start Training button clicked")
        self.start_button.config(state=tk.DISABLED)
        self.stop_button.config(state=tk.NORMAL)
        self.log_text.delete(1.0, tk.END)

        # 清空之前的数据
        self.fold_train_losses.clear()
        self.fold_val_losses.clear()
        self.fold_epochs.clear()
        self.current_fold = 0

        # 在新线程中运行训练
        training_thread = threading.Thread(target=self.run_training)
        training_thread.daemon = True
        training_thread.start()

    def stop_training(self):
        """停止训练"""
        self.log_message("Training interrupted by user")
        self.start_button.config(state=tk.NORMAL)
        self.stop_button.config(state=tk.DISABLED)
        self.update_status("Training stopped")

    def run_training(self):
        """实际运行训练的函数"""
        try:
            self.message_queue.put(("log", "Loading data..."))

            csv_file_path = "peptide_library.csv"

            if os.path.exists(csv_file_path):
                self.message_queue.put(("log", f"Loading data from {csv_file_path}..."))
                try:
                    sequences, labels, function_names = load_csv_data(csv_file_path)
                except Exception as e:
                    self.message_queue.put(("log", f"Error loading data: {e}"))
                    self.message_queue.put(("log", "Creating sample data..."))
                    create_sample_csv()
                    sequences, labels, function_names = load_csv_data(csv_file_path)
            else:
                self.message_queue.put(("log", f"CSV file {csv_file_path} not found."))
                create_sample_csv()
                sequences, labels, function_names = load_csv_data(csv_file_path)

            if not sequences or not labels or not function_names:
                self.message_queue.put(("log", "Error: Invalid data loaded"))
                self.message_queue.put(("enable_start", None))
                return

            self.message_queue.put(("log", f"Data Summary:"))
            self.message_queue.put(("log", f"  Total sequences: {len(sequences)}"))
            self.message_queue.put(("log", f"  Function types: {len(function_names)}"))
            self.message_queue.put(("log", f"  Function names: {function_names}"))

            n_splits = min(5, max(2, len(sequences) // 3))
            self.message_queue.put(("log", f"Using {n_splits}-fold cross-validation"))

            self.message_queue.put(("status", "Starting model training..."))
            self.message_queue.put(("progress", 0))

            # 重定向训练函数以包含GUI更新
            cv_results, trained_models = self.cross_validation_training_with_gui(
                sequences, labels, function_names, n_splits=n_splits, batch_size=32
            )

            if not cv_results:
                self.message_queue.put(("log", "Training failed"))
                self.message_queue.put(("enable_start", None))
                return

            save_model_summary(cv_results, function_names)
            self.message_queue.put(("results", cv_results))
            self.message_queue.put(("status", "Training completed!"))
            self.message_queue.put(("progress", 100))
            self.message_queue.put(("enable_start", None))

            # 自动加载训练好的模型
            self.message_queue.put(("log", "Loading trained models..."))
            models_info, function_names = load_models_from_files()
            if models_info:
                self.models_info = models_info
                self.function_names = function_names
                self.message_queue.put(("update_model_status", "Models loaded successfully"))
                self.message_queue.put(("log", "Models loaded successfully for prediction and generation"))
            else:
                self.message_queue.put(("update_model_status", "Failed to load models"))
                self.message_queue.put(("log", "Failed to load models"))

        except Exception as e:
            self.message_queue.put(("log", f"Error during training: {e}"))
            import traceback
            traceback.print_exc()
            self.message_queue.put(("enable_start", None))

    def cross_validation_training_with_gui(self, sequences, labels, function_names, n_splits=5, batch_size=32):
        """带GUI更新的交叉验证训练"""
        if len(sequences) < n_splits:
            n_splits = max(2, len(sequences) // 2)

        kf = KFold(n_splits=n_splits, shuffle=True, random_state=42)

        all_fold_results = []
        fold_models = []

        self.message_queue.put(("log", f"Starting {n_splits}-fold cross-validation..."))
        self.message_queue.put(("log", f"Total samples: {len(sequences)}"))

        successful_folds = 0
        total_folds = n_splits

        for fold, (train_idx, val_idx) in enumerate(kf.split(sequences)):
            fold_info = f"Current Fold: {fold + 1}/{n_splits}"
            self.message_queue.put(("fold_info", (fold_info, "")))
            self.message_queue.put(("log", f"{'=' * 50}"))
            self.message_queue.put(("log", f"Processing Fold {fold + 1}/{n_splits}"))
            self.message_queue.put(("log", f"{'=' * 50}"))

            # 更新当前fold
            self.current_fold = fold + 1

            train_sequences = [sequences[i] for i in train_idx]
            train_labels = [labels[i] for i in train_idx]
            val_sequences = [sequences[i] for i in val_idx]
            val_labels = [labels[i] for i in val_idx]

            self.message_queue.put(
                ("log", f"Train set size: {len(train_sequences)}, Validation set size: {len(val_sequences)}"))

            if len(val_sequences) == 0:
                self.message_queue.put(("log", f"Skipping fold {fold + 1} due to empty validation set"))
                continue

            train_dataset = PeptideDataset(train_sequences, train_labels, max_length=100)
            val_dataset = PeptideDataset(val_sequences, val_labels, max_length=100)

            train_batch_size = min(batch_size, len(train_dataset))
            val_batch_size = min(batch_size, len(val_dataset))

            if train_batch_size == 0 or val_batch_size == 0:
                self.message_queue.put(("log", f"Skipping fold {fold + 1} due to insufficient data"))
                continue

            train_loader = DataLoader(
                train_dataset,
                batch_size=train_batch_size,
                shuffle=True,
                num_workers=0,
                pin_memory=True,
                persistent_workers=False
            )
            val_loader = DataLoader(
                val_dataset,
                batch_size=val_batch_size,
                shuffle=False,
                num_workers=0,
                pin_memory=True,
                persistent_workers=False
            )

            vocab_size = len(train_dataset.aa_to_idx)
            model = EnhancedTransformerPeptideModel(
                vocab_size=vocab_size,
                num_classes=len(function_names),
                d_model=128,
                nhead=8,
                num_layers=3,
                feature_dim=10
            )

            try:
                trained_model, best_loss, train_losses, val_losses = self.train_single_fold_with_gui(
                    model, train_loader, val_loader, fold + 1, num_epochs=80
                )

                val_metrics = evaluate_model(trained_model, val_loader, function_names)

                fold_result = {
                    'fold': fold + 1,
                    'val_loss': best_loss,
                    'metrics': val_metrics,
                    'train_losses': train_losses,
                    'val_losses': val_losses
                }

                all_fold_results.append(fold_result)
                fold_models.append((trained_model, train_dataset.aa_to_idx, function_names))
                successful_folds += 1

                self.message_queue.put(("log", f"Fold {fold + 1} Results:"))
                self.message_queue.put(("log", f"Best Validation Loss: {best_loss:.4f}"))
                self.print_metrics_to_gui(val_metrics)

            except Exception as e:
                self.message_queue.put(("log", f"Error in fold {fold + 1}: {e}"))
                import traceback
                traceback.print_exc()
                continue

        if successful_folds == 0:
            self.message_queue.put(("log", "Error: No folds completed successfully!"))
            return [], []

        self.message_queue.put(("log", f"Successfully completed {successful_folds}/{n_splits} folds"))

        return all_fold_results, fold_models

    def train_single_fold_with_gui(self, model, train_loader, val_loader, fold_num, num_epochs=100,
                                   learning_rate=0.001):
        """带GUI更新的单折叠训练"""
        criterion = nn.BCEWithLogitsLoss()
        optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate, weight_decay=1e-4)
        scheduler = torch.optim.lr_scheduler.OneCycleLR(
            optimizer,
            max_lr=learning_rate,
            epochs=num_epochs,
            steps_per_epoch=len(train_loader),
            pct_start=0.3,
            anneal_strategy='cos'
        )

        model.to(device)

        best_val_loss = float('inf')
        patience_counter = 0
        patience_limit = 15
        train_losses = []
        val_losses = []

        self.message_queue.put(("log", f"=== Training Fold {fold_num} ==="))

        use_amp = torch.cuda.is_available() and hasattr(torch.cuda.amp, 'GradScaler')
        scaler = torch.cuda.amp.GradScaler() if use_amp else None

        for epoch in range(num_epochs):
            progress = (fold_num - 1) / 5 * 100 + (epoch + 1) / num_epochs * 20
            self.message_queue.put(("progress", min(progress, 100)))
            epoch_info = f"Current Epoch: {epoch + 1}/{num_epochs}"
            self.message_queue.put(("fold_info", (f"Current Fold: {fold_num}/5", epoch_info)))

            model.train()
            train_loss = 0.0
            train_batches = 0
            total_samples = 0

            for batch_idx, batch in enumerate(train_loader):
                sequence_ids = batch['sequence'].to(device, non_blocking=True)
                features = batch['features'].to(device, non_blocking=True)
                labels = batch['label'].to(device, non_blocking=True)

                optimizer.zero_grad()

                try:
                    if use_amp and scaler is not None:
                        with torch.cuda.amp.autocast():
                            outputs = model(sequence_ids, features)
                            loss = criterion(outputs, labels)

                            l2_reg = torch.tensor(0., device=device)
                            for param in model.parameters():
                                l2_reg += torch.norm(param)
                            loss += 1e-4 * l2_reg

                        scaler.scale(loss).backward()
                        scaler.unscale_(optimizer)
                        torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
                        scaler.step(optimizer)
                        scaler.update()
                    else:
                        outputs = model(sequence_ids, features)
                        loss = criterion(outputs, labels)

                        l2_reg = torch.tensor(0., device=device)
                        for param in model.parameters():
                            l2_reg += torch.norm(param)
                        loss += 1e-4 * l2_reg

                        loss.backward()
                        torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
                        optimizer.step()

                    scheduler.step()

                    train_loss += loss.item()
                    train_batches += 1
                    total_samples += sequence_ids.size(0)

                except Exception as e:
                    self.message_queue.put(
                        ("log", f"Training error in fold {fold_num}, epoch {epoch}, batch {batch_idx}: {e}"))
                    continue

            model.eval()
            val_loss = 0.0
            val_batches = 0
            val_samples = 0

            with torch.no_grad():
                for batch in val_loader:
                    sequence_ids = batch['sequence'].to(device, non_blocking=True)
                    features = batch['features'].to(device, non_blocking=True)
                    labels = batch['label'].to(device, non_blocking=True)

                    try:
                        outputs = model(sequence_ids, features)
                        loss = criterion(outputs, labels)
                        val_loss += loss.item()
                        val_batches += 1
                        val_samples += sequence_ids.size(0)
                    except Exception as e:
                        self.message_queue.put(("log", f"Validation error in fold {fold_num}, epoch {epoch}: {e}"))
                        continue

            avg_train_loss = train_loss / train_batches if train_batches > 0 else float('inf')
            avg_val_loss = val_loss / val_batches if val_batches > 0 else float('inf')

            train_losses.append(avg_train_loss)
            val_losses.append(avg_val_loss)

            # 更新图表
            self.message_queue.put(("chart_update", (avg_train_loss, avg_val_loss, epoch + 1)))

            if epoch % 5 == 0 or epoch == num_epochs - 1:
                current_lr = scheduler.get_last_lr()[0] if hasattr(scheduler, 'get_last_lr') else learning_rate
                self.message_queue.put(("log", f'Fold {fold_num} - Epoch [{epoch + 1}/{num_epochs}]'))
                self.message_queue.put(("log",
                                        f'  Train Loss: {avg_train_loss:.4f} | Val Loss: {avg_val_loss:.4f} | LR: {current_lr:.6f}'))

            if avg_val_loss < best_val_loss:
                best_val_loss = avg_val_loss
                patience_counter = 0
                torch.save(model.state_dict(), f'best_peptide_model_fold_{fold_num}.pth')
            else:
                patience_counter += 1
                if patience_counter >= patience_limit:
                    self.message_queue.put(("log", f"Early stopping at epoch {epoch + 1} for fold {fold_num}"))
                    break

        try:
            model.load_state_dict(torch.load(f'best_peptide_model_fold_{fold_num}.pth', map_location=device))
            self.message_queue.put(("log", f"Loaded best model for fold {fold_num} with val loss: {best_val_loss:.4f}"))
        except Exception as e:
            self.message_queue.put(("log", f"Warning: Could not load best model for fold {fold_num}: {e}"))

        return model, best_val_loss, train_losses, val_losses

    def print_metrics_to_gui(self, metrics):
        """向GUI打印评估指标"""
        self.message_queue.put(("log", f"  Accuracy: {metrics['accuracy']:.4f}"))
        self.message_queue.put(("log", f"  Precision: {metrics['precision']:.4f}"))
        self.message_queue.put(("log", f"  Recall: {metrics['recall']:.4f}"))
        self.message_queue.put(("log", f"  F1 Score: {metrics['f1_score']:.4f}"))

    def load_trained_models(self):
        """加载训练好的模型"""
        try:
            models_info, function_names = load_models_from_files()
            if models_info:
                self.models_info = models_info
                self.function_names = function_names
                self.model_status_label.config(text="Models loaded successfully", foreground="green")
                self.gen_model_status_label.config(text="Models loaded successfully", foreground="green")
                self.log_message("Models loaded successfully")
            else:
                self.model_status_label.config(text="Failed to load models", foreground="red")
                self.gen_model_status_label.config(text="Failed to load models", foreground="red")
                self.log_message("Failed to load models")
        except Exception as e:
            self.model_status_label.config(text=f"Error loading models: {e}", foreground="red")
            self.gen_model_status_label.config(text=f"Error loading models: {e}", foreground="red")
            self.log_message(f"Error loading models: {e}")

    def predict_sequence(self):
        """预测肽序列功能"""
        if not self.models_info or not self.function_names:
            messagebox.showerror("Error", "Please load trained models first!")
            return

        sequence = self.sequence_entry.get().strip()
        if not sequence:
            messagebox.showerror("Error", "Please enter a peptide sequence!")
            return

        if len(sequence) < 5:
            messagebox.showerror("Error", "Sequence length must be at least 5 amino acids!")
            return

        try:
            # 在新线程中执行预测
            prediction_thread = threading.Thread(target=self.run_prediction, args=(sequence,))
            prediction_thread.daemon = True
            prediction_thread.start()
        except Exception as e:
            messagebox.showerror("Error", f"Prediction error: {e}")

    def run_prediction(self, sequence):
        """实际执行预测"""
        try:
            self.message_queue.put(("prediction_status", "Analyzing sequence function..."))

            predictions = predict_function(self.models_info, sequence, self.function_names)

            result_text = f"Sequence Analysis Results\n{'=' * 50}\n"
            result_text += f"Input Sequence: {sequence}\n\n"
            result_text += "Predicted Functions:\n"

            if len(predictions) > 0:
                active_count = 0
                for i, (func_name, prob) in enumerate(zip(self.function_names, predictions)):
                    percentage = prob * 100
                    if percentage > 70:
                        status = "HIGH PROBABILITY"
                        active_count += 1
                    elif percentage > 40:
                        status = "MODERATE PROBABILITY"
                    else:
                        status = "LOW PROBABILITY"

                    result_text += f"  {func_name}: {percentage:.1f}% ({status})\n"

                if active_count == 0:
                    result_text += "\nNo high-probability function predictions found.\n"
            else:
                result_text += "No prediction results available.\n"

            self.message_queue.put(("prediction_result", result_text))
            self.message_queue.put(("prediction_status", "Analysis completed!"))

        except Exception as e:
            self.message_queue.put(("prediction_status", f"Error: {e}"))
            import traceback
            traceback.print_exc()

    def generate_peptide(self):
        """生成肽序列"""
        if not self.models_info or not self.function_names:
            messagebox.showerror("Error", "Please load trained models first!")
            return

        try:
            length = int(self.length_var.get())
            if length < 10 or length > 50:
                messagebox.showerror("Error", "Length should be between 10-50!")
                return
        except ValueError:
            messagebox.showerror("Error", "Please enter a valid number for length!")
            return

        try:
            # 在新线程中执行生成
            generation_thread = threading.Thread(target=self.run_generation, args=(length,))
            generation_thread.daemon = True
            generation_thread.start()
        except Exception as e:
            messagebox.showerror("Error", f"Generation error: {e}")

    def run_generation(self, length):
        """实际执行生成"""
        try:
            self.message_queue.put(("generation_status", "Designing peptide sequence with optimized functions..."))

            generated_seq, score = generate_peptide_sequence(self.models_info, length, self.function_names,
                                                             num_attempts=200)

            if not generated_seq:
                self.message_queue.put(("generation_status", "Sequence generation failed"))
                return

            # 预测生成序列的功能
            predictions = predict_function(self.models_info, generated_seq, self.function_names)

            result_text = f"Generated {length}-mer Peptide Sequence\n{'=' * 50}\n"
            result_text += f"Sequence: {generated_seq}\n"
            result_text += f"Comprehensive Score: {score:.4f}\n\n"
            result_text += "Predicted Functions:\n"

            if len(predictions) > 0:
                active_count = 0
                for i, (func_name, prob) in enumerate(zip(self.function_names, predictions)):
                    percentage = prob * 100
                    if percentage > 60:
                        status = "HIGH ACTIVITY"
                        active_count += 1
                    elif percentage > 30:
                        status = "MODERATE ACTIVITY"
                    else:
                        status = "LOW ACTIVITY"

                    result_text += f"  {func_name}: {percentage:.1f}% ({status})\n"

                if active_count == 0:
                    result_text += "\nNo significant functional activity detected.\n"
            else:
                result_text += "No prediction results available.\n"

            self.message_queue.put(("generation_result", result_text))
            self.message_queue.put(("generation_status", "Generation completed!"))

        except Exception as e:
            self.message_queue.put(("generation_status", f"Error: {e}"))
            import traceback
            traceback.print_exc()

    def process_messages(self):
        """处理来自训练线程的消息"""
        try:
            while True:
                message_type, message_data = self.message_queue.get_nowait()

                if message_type == "log":
                    self.log_message(message_data)
                elif message_type == "status":
                    self.update_status(message_data)
                elif message_type == "progress":
                    self.update_progress(message_data)
                elif message_type == "fold_info":
                    self.update_fold_info(message_data[0], message_data[1])
                elif message_type == "chart_update":
                    self.update_chart(message_data[0], message_data[1], message_data[2])
                elif message_type == "results":
                    self.show_results(message_data)
                elif message_type == "enable_start":
                    self.start_button.config(state=tk.NORMAL)
                    self.stop_button.config(state=tk.DISABLED)
                elif message_type == "update_model_status":
                    if "successfully" in message_data:
                        self.model_status_label.config(text=message_data, foreground="green")
                        self.gen_model_status_label.config(text=message_data, foreground="green")
                    else:
                        self.model_status_label.config(text=message_data, foreground="red")
                        self.gen_model_status_label.config(text=message_data, foreground="red")
                elif message_type == "prediction_status":
                    # 可以在这里更新预测状态标签
                    pass
                elif message_type == "prediction_result":
                    self.prediction_result_text.delete(1.0, tk.END)
                    self.prediction_result_text.insert(tk.END, message_data)
                elif message_type == "generation_status":
                    # 可以在这里更新生成状态标签
                    pass
                elif message_type == "generation_result":
                    self.generation_result_text.delete(1.0, tk.END)
                    self.generation_result_text.insert(tk.END, message_data)

        except queue.Empty:
            pass

        # 继续处理消息
        self.root.after(100, self.process_messages)


def create_sample_csv():
    """创建示例CSV文件"""
    sample_data = [
        ["GLFDIVKKVVGALGSL", "Antimicrobial, Antibacterial"],
        ["INLKALAAALKKLL", "Antimicrobial, Antibacterial, Antifungal"],
        ["RLARIVVIRVAR", "Antimicrobial, Antibacterial, Anti-Gram+"],
        ["GLFDIVKKVVGAIGQV", "Antimicrobial, Antibacterial, Anti-Gram+"],
        ["INLKALAALLKKLL", "Antimicrobial, Antibacterial, Anti-Gram-, Antifungal"],
        ["RLARIVVIRVAG", "Antimicrobial, Antibacterial"],
        ["GLFDIVKKVVGAIGQL", "Antimicrobial, Antibacterial, Anti-Gram+"],
        ["INLKALAAALKKLLG", "Antimicrobial, Antibacterial, Antifungal"],
        ["RLARIVVIRWAR", "Antimicrobial, Antibacterial, Anti-Gram+"],
        ["GLFDIVKKVVGAIGQLL", "Antimicrobial, Antibacterial, Anti-Gram+"],
        ["INLKALAALLKKLLG", "Antimicrobial, Antibacterial, Anti-Gram-, Antifungal"],
        ["RLARIVVIRVARG", "Antimicrobial, Antibacterial, Anti-Gram+"],
        ["YSSGYTRPLPKPSRPIFIRPIGCDVCYGIPSSTARLCCFRYGDCCHR", "Antimicrobial, Antibacterial, Antifungal"],
        ["GKPRPYLPRPTSHPRPIRV", "Antimicrobial, Antibacterial"],
        ["RPDKPRPYLPRPRPPRPVR", "Antimicrobial, Antibacterial"]
    ]

    df = pd.DataFrame(sample_data, columns=['sequence', 'functions'])
    df.to_csv('peptide_library.csv', index=False)
    print("Created sample peptide_library.csv with 15 sequences")


def main():
    """主函数"""
    print("PEPTIDE FUNCTION PREDICTION USING ENHANCED TRANSFORMER MODEL")
    print("=" * 70)
    print("Enhanced with amino acid physicochemical properties and structure features")
    print("=" * 70)
    print(f"Device: {device}")
    print(f"CUDA available: {torch.cuda.is_available()}")
    if torch.cuda.is_available():
        try:
            print(f"CUDA device: {torch.cuda.get_device_name(0)}")
            if hasattr(torch.cuda, 'get_device_properties'):
                props = torch.cuda.get_device_properties(0)
                print(f"GPU Memory: {props.total_memory / 1024 ** 3:.1f} GB")
        except:
            print("CUDA device: Unknown")
    print()

    # 创建GUI应用
    root = tk.Tk()
    app = PeptideGUI(root)
    root.mainloop()


if __name__ == "__main__":
    main()
