import os
import torch
import torch.nn as nn
import numpy as np
import segmentation_models_pytorch as smp
from torch.utils.data import DataLoader
from datasets import SegmentationDataset, dataset_info
from main import parse_args, prepare_hybrid_training_dataset
from utils.image import visualize_result
import argparse
import cv2
import time

def train_one_epoch(model, loader, optimizer, loss_fn, device, epoch, args, tb_writer=None):
    model.train()
    epoch_loss = 0
    
    imgs_res_folder = os.path.join(args.output_dir, 'current_res')
    os.makedirs(imgs_res_folder, exist_ok=True)
    
    for batch_id, batch in enumerate(loader):
        images = batch['images'].to(device)
        masks = batch['labels'].to(device)
        
        optimizer.zero_grad()
        outputs = model(images)
        loss = loss_fn(outputs, masks)
        loss.backward()
        optimizer.step()
        
        epoch_loss += loss.item()
        
        if batch_id % 10 == 0:
            current_lr = optimizer.param_groups[0]['lr']
            print(f"Epoch [{epoch+1}/{args.epochs}], Step [{batch_id}/{len(loader)}], Loss: {loss.item():.4f}, LR: {current_lr:.6f}")

            if tb_writer is not None:
                global_step = epoch * len(loader) + batch_id
                tb_writer.add_scalar('train/batch_loss', loss.item(), global_step)
                tb_writer.add_scalar('train/learning_rate', current_lr, global_step)
        
        # Visualization logic similar to main.py
        if batch_id % args.log_interval_vis == 0:
            res_data = []
            
            # Use first sample or middle sample
            sample_idx = min(images.size(0) - 1, images.size(0) // 2)
            
            # 1. Input Image
            img = images.cpu().numpy()
            res_data.append(img[sample_idx])
            
            # 2. Ground Truth Mask
            gt_mask = masks.cpu().numpy()
            res_data.append(gt_mask[sample_idx])
            
            # 3. Prediction
            # Apply sigmoid to convert logits to probabilities
            pred = torch.sigmoid(outputs).detach().cpu().numpy()
            res_data.append(pred[sample_idx])
            
            # Generate visualization image
            vis_imgs = visualize_result(res_data, arg=args)
            vis_imgs = np.array(vis_imgs, dtype=np.uint8)
            
            # Resize for display if needed (optional, following main.py style)
            h, w = vis_imgs.shape[:2]
            new_size = (int(w * 0.8), int(h * 0.8))
            vis_imgs = cv2.resize(vis_imgs, new_size)
            
            # Add text info
            img_test = 'Epoch: {0} Sample {1}/{2} Loss: {3:.4f}'.format(
                epoch, batch_id, len(loader), loss.item())
            
            # Add file path info
            file_path = batch['file_names'][sample_idx]
            
            BLACK = (0, 0, 255)
            font = cv2.FONT_HERSHEY_SIMPLEX
            font_size = 1.1
            font_color = BLACK
            font_thickness = 2
            x, y = 30, 30
            vis_imgs = cv2.putText(vis_imgs,
                                   img_test,
                                   (x, y),
                                   font, font_size, font_color, font_thickness, cv2.LINE_AA)
            
            # Draw file path below the first line
            font_size_path = 0.6
            y_path = y + 40
            vis_imgs = cv2.putText(vis_imgs,
                                   file_path,
                                   (x, y_path),
                                   font, font_size_path, font_color, 1, cv2.LINE_AA)
            
            cv2.imwrite(os.path.join(imgs_res_folder, 'results.png'), vis_imgs)
            
    return epoch_loss / len(loader)

def validate(model, loader, loss_fn, device, args, output_dir=None, tb_writer=None, epoch=None, stage='val'):
    model.eval()
    total_loss = 0
    true_positives = 0
    false_positives = 0
    false_negatives = 0
    
    with torch.no_grad():
        for batch in loader:
            images = batch['images'].to(device)
            masks = batch['labels'].to(device)
            file_names = batch['file_names']
            
            outputs = model(images)
            loss = loss_fn(outputs, masks)
            total_loss += loss.item()
            
            # Metrics calculation
            prob = torch.sigmoid(outputs)
            pred_mask = (prob >= 0.5).long()
            label_mask = (masks >= 0.5).long()
            
            true_positives += torch.sum((pred_mask == 1) & (label_mask == 1)).item()
            false_positives += torch.sum((pred_mask == 1) & (label_mask == 0)).item()
            false_negatives += torch.sum((pred_mask == 0) & (label_mask == 1)).item()
            
            # Save results
            if output_dir:
                pred_mask_np = pred_mask.cpu().numpy()
                for i, fname in enumerate(file_names):
                    save_path = os.path.join(output_dir, os.path.splitext(fname)[0] + '.png')
                    mask_img = (pred_mask_np[i, 0] * 255).astype(np.uint8)
                    cv2.imwrite(save_path, mask_img)
            
    precision = true_positives / (true_positives + false_positives + 1e-8)
    recall = true_positives / (true_positives + false_negatives + 1e-8)
    f1_score = 2 * precision * recall / (precision + recall + 1e-8)
    
    val_loss = total_loss / len(loader)
    
    if tb_writer is not None and epoch is not None:
        tb_writer.add_scalar(f'{stage}/loss', val_loss, epoch)
        tb_writer.add_scalar(f'{stage}/precision', precision, epoch)
        tb_writer.add_scalar(f'{stage}/recall', recall, epoch)
        tb_writer.add_scalar(f'{stage}/f1', f1_score, epoch)
    
    return val_loss, precision, recall, f1_score

def main():
    args = parse_args()
    
    # Handle hybrid data
    if hasattr(args, 'hy_data_dir') and args.hy_data_dir:
        prepare_hybrid_training_dataset(args)
        # Force train_data to BSDS-like behavior (list based) if it was BIPED
        if args.train_data.lower() == 'biped':
            print("Switching train_data to 'BSDS' mode to support hybrid dataset list.")
            args.train_data = 'BSDS'

    # Define model parameters and get preprocessing function
    encoder_name = "resnet34"
    encoder_weights = "imagenet"
    
    if args.grayscale:
        print("Grayscale mode enabled. Disabling SMP preprocessing and using mean subtraction.")
        preprocessing_fn = None
        in_channels = 1
    else:
        preprocessing_fn = smp.encoders.get_preprocessing_fn(encoder_name, encoder_weights)
        print(f"Using smp preprocessing for {encoder_name} with {encoder_weights} weights")
        in_channels = 3

    # 1. Dataset
    # Config from dataset_info (e.g., BSDS, BIPED)
    # data_config = dataset_info(args.train_data) 
    
    # Use args.input_dir which is updated by prepare_hybrid_training_dataset if used
    # or defaults to the one from parse_args
    
    train_dataset = SegmentationDataset(
        data_root=args.input_dir,
        img_height=args.img_height, # Use training height
        img_width=args.img_width,   # Use training width
        mean_bgr=args.mean_pixel_values,
        train_mode='train',
        crop_img=args.crop_img,
        arg=args,
        preprocessing=preprocessing_fn
    )
    
    train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.workers)
    
    # Validation Dataset
    val_dataset = SegmentationDataset(
        data_root=args.input_val_dir,
        img_height=args.test_img_height,
        img_width=args.test_img_width,
        mean_bgr=args.mean_pixel_values,
        train_mode='test',
        crop_img=False,
        arg=args,
        preprocessing=preprocessing_fn
    )
    val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)

    print(f"Training on {len(train_dataset)} images. Validation on {len(val_dataset)} images.")

    # 2. Model (U-Net with ResNet34)
    # Use ImageNet pre-trained weights for the encoder
    model = smp.Unet(
        encoder_name=encoder_name,        
        encoder_weights=encoder_weights,     
        in_channels=in_channels,                  
        classes=1                       
    )
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model.to(device)

    # Enable DataParallel for multi-GPU
    if device.type == 'cuda' and torch.cuda.device_count() > 1:
        print(f"Using DataParallel on {torch.cuda.device_count()} GPUs")
        model = nn.DataParallel(model)

    # 3. Loss & Optimizer
    # Combine Dice Loss and BCE for stable binary segmentation
    loss_fn = smp.losses.DiceLoss(mode='binary', from_logits=True)
    optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr)

    # Tensorboard
    tb_writer = None
    if args.tensorboard:
        from torch.utils.tensorboard import SummaryWriter
        log_dir = os.path.join(args.output_dir, 'logs')
        os.makedirs(log_dir, exist_ok=True)
        tb_writer = SummaryWriter(log_dir=log_dir)

    # 4. Training Loop
    output_dir = args.output_dir
    os.makedirs(output_dir, exist_ok=True)
    
    for epoch in range(args.epochs):
        # Create output directories for this epoch
        output_dir_epoch = os.path.join(args.output_dir, str(epoch))
        img_test_dir = os.path.join(output_dir_epoch, 'val_res')
        os.makedirs(output_dir_epoch, exist_ok=True)
        os.makedirs(img_test_dir, exist_ok=True)

        # Validate before training (Preview)
        print(f"Epoch {epoch+1}: Validating before training...")
        validate(model, val_loader, loss_fn, device, args, output_dir=img_test_dir, tb_writer=tb_writer, epoch=epoch, stage='val_preview')

        loss = train_one_epoch(model, train_loader, optimizer, loss_fn, device, epoch, args, tb_writer)
        
        # Validate after training (Metrics)
        val_loss, precision, recall, f1 = validate(model, val_loader, loss_fn, device, args, output_dir=img_test_dir, tb_writer=tb_writer, epoch=epoch, stage='val')
        
        if tb_writer is not None:
            tb_writer.add_scalar('train/epoch_loss', loss, epoch)
        
        print(f"Epoch {epoch+1}/{args.epochs}, Train Loss: {loss:.4f}, Val Loss: {val_loss:.4f}")
        print(f"Validation Metrics - Precision: {precision:.4f}, Recall: {recall:.4f}, F1: {f1:.4f}")
        
        # Save checkpoint
        if (epoch + 1) % args.save_interval == 0:
            save_path = os.path.join(output_dir_epoch, f"segmentation_epoch_{epoch+1}.pth")
            # Handle DataParallel model saving
            state_dict = model.module.state_dict() if isinstance(model, nn.DataParallel) else model.state_dict()
            torch.save(state_dict, save_path)
            print(f"Saved checkpoint to {save_path}")

if __name__ == '__main__':
    main()
