import os
import json
import logging
import torch
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
from sklearn.model_selection import KFold

# 配置文件
config = {
    'data_dir': '/mnt/sdb/zq/software/chromToolKit/simulated_data',  # 更新为绝对路径
    'data_generation': {
        'genome_size': 1000000,  # 1Mb
        'n_chromosomes': 5,
        'n_enhancers': 1000,
        'n_promoters': 1000,
        'n_genes': 2000,
        'enhancer_length': {
            'min': 100,
            'max': 1000
        },
        'promoter_length': {
            'min': 100,
            'max': 500
        },
        'histone_modification': {
            'h3k27ac': {
                'base_level': 0.4,
                'noise_level': 0.15,
                'peak_height': {
                    'min': 0.8,
                    'max': 2.0
                }
            },
            'h3k4me3': {
                'base_level': 0.6,
                'noise_level': 0.25,
                'peak_height': {
                    'min': 0.8,
                    'max': 2.0
                }
            }
        }
    },
    'data_params': {
        'window_size': 2000,
        'batch_size': 64,
        'test_size': 0.2,
        'random_state': 42,
        'use_cache': True
    },
    'model_params': {
        'seq_input_dim': 4,  # A, C, G, T
        'hidden_dim': 256,
        'num_classes': 9,    # 修改为9种染色质状态：active_promoter, weak_promoter, strong_enhancer, weak_enhancer, transcribed, polycomb_repressed, heterochromatin, insulator, bivalent_promoter
        'dropout': 0.1
    },
    'training_params': {
        'num_epochs': 50,
        'learning_rate': 1e-4,
        'weight_decay': 1e-5,
        'patience': 10,
        'min_delta': 1e-4,
        'batch_size': 64
    },
    'prediction_params': {
        'batch_size': 64,
        'output_format': 'csv'
    }
}

# 设置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

class EarlyStopping:
    """早停机制类"""
    def __init__(self, patience=7, min_delta=0, verbose=True):
        self.patience = patience
        self.min_delta = min_delta
        self.counter = 0
        self.best_loss = None
        self.early_stop = False
        self.verbose = verbose
        
    def __call__(self, val_loss):
        if self.best_loss is None:
            self.best_loss = val_loss
        elif val_loss > self.best_loss - self.min_delta:
            self.counter += 1
            if self.verbose:
                logger.info(f'EarlyStopping counter: {self.counter} out of {self.patience}')
            if self.counter >= self.patience:
                self.early_stop = True
        else:
            self.best_loss = val_loss
            self.counter = 0
        return self.early_stop

class TrainingLogger:
    """训练日志记录类"""
    def __init__(self, log_dir='logs'):
        self.log_dir = log_dir
        os.makedirs(log_dir, exist_ok=True)
        self.train_losses = []
        self.val_losses = []
        self.metrics = {}
        self.start_time = datetime.now()
    
    def log_epoch(self, epoch, train_loss, val_loss, metrics=None):
        self.train_losses.append(train_loss)
        self.val_losses.append(val_loss)
        if metrics:
            for key, value in metrics.items():
                if key not in self.metrics:
                    self.metrics[key] = []
                self.metrics[key].append(value)
        
        # 保存到文件
        log_file = os.path.join(self.log_dir, f'training_log_{self.start_time:%Y%m%d_%H%M%S}.json')
        log_data = {
            'train_losses': self.train_losses,
            'val_losses': self.val_losses,
            'metrics': self.metrics
        }
        with open(log_file, 'w') as f:
            json.dump(log_data, f, indent=4)
    
    def plot_losses(self):
        plt.figure(figsize=(10, 5))
        plt.plot(self.train_losses, label='Training Loss')
        plt.plot(self.val_losses, label='Validation Loss')
        plt.xlabel('Epoch')
        plt.ylabel('Loss')
        plt.title('Training and Validation Losses')
        plt.legend()
        plt.savefig(os.path.join(self.log_dir, f'losses_{self.start_time:%Y%m%d_%H%M%S}.png'))
        plt.show()
    
    def plot_metrics(self):
        if not self.metrics:
            return
        
        num_metrics = len(self.metrics)
        fig, axes = plt.subplots(1, num_metrics, figsize=(5*num_metrics, 4))
        if num_metrics == 1:
            axes = [axes]
            
        for ax, (metric_name, metric_values) in zip(axes, self.metrics.items()):
            ax.plot(metric_values)
            ax.set_title(metric_name)
            ax.set_xlabel('Epoch')
            ax.set_ylabel(metric_name)
        
        plt.tight_layout()
        plt.savefig(os.path.join(self.log_dir, f'metrics_{self.start_time:%Y%m%d_%H%M%S}.png'))
        plt.show()

class ModelCheckpoint:
    """模型检查点保存类"""
    def __init__(self, save_dir='checkpoints', save_best_only=True):
        self.save_dir = save_dir
        os.makedirs(save_dir, exist_ok=True)
        self.save_best_only = save_best_only
        self.best_loss = float('inf')
    
    def save_checkpoint(self, model, epoch, loss, optimizer=None):
        if self.save_best_only and loss >= self.best_loss:
            return
        
        self.best_loss = loss
        checkpoint = {
            'epoch': epoch,
            'model_state_dict': model.state_dict(),
            'loss': loss
        }
        if optimizer:
            checkpoint['optimizer_state_dict'] = optimizer.state_dict()
        
        filename = f'model_checkpoint_epoch_{epoch}_{loss:.4f}.pth'
        torch.save(checkpoint, os.path.join(self.save_dir, filename))
        logger.info(f'Saved checkpoint: {filename}')
    
    def load_checkpoint(self, model, filename, optimizer=None):
        checkpoint = torch.load(os.path.join(self.save_dir, filename))
        model.load_state_dict(checkpoint['model_state_dict'])
        if optimizer and 'optimizer_state_dict' in checkpoint:
            optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        return checkpoint['epoch'], checkpoint['loss']

class CrossValidator:
    """K折交叉验证类"""
    def __init__(self, n_splits=5, shuffle=True, random_state=42):
        self.kf = KFold(n_splits=n_splits, shuffle=shuffle, random_state=random_state)
        self.current_fold = 0
        self.results = []
    
    def split(self, dataset):
        indices = np.arange(len(dataset))
        return self.kf.split(indices)
    
    def log_fold_results(self, metrics):
        self.current_fold += 1
        self.results.append(metrics)
        
        # 计算平均指标
        avg_metrics = {}
        for metric in self.results[0].keys():
            values = [fold[metric] for fold in self.results]
            avg_metrics[metric] = {
                'mean': np.mean(values),
                'std': np.std(values)
            }
        
        # 记录结果
        logger.info(f"\nFold {self.current_fold} Results:")
        for metric, value in metrics.items():
            logger.info(f"{metric}: {value:.4f}")
        
        if self.current_fold == self.kf.n_splits:
            logger.info("\nOverall Results:")
            for metric, stats in avg_metrics.items():
                logger.info(f"{metric}: {stats['mean']:.4f} ± {stats['std']:.4f}")
        
        return avg_metrics

def setup_device():
    """设置计算设备"""
    if torch.cuda.is_available():
        device = torch.device("cuda")
        logger.info(f"Using GPU: {torch.cuda.get_device_name(0)}")
    else:
        device = torch.device("cpu")
        logger.info("Using CPU")
    return device

def count_parameters(model):
    """统计模型参数数量"""
    return sum(p.numel() for p in model.parameters() if p.requires_grad)

def set_seed(seed):
    """设置随机种子"""
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False 