import torch
import torchvision
import cv2
import numpy as np
from models.yolo import YOLOv8
import argparse
import os
from tqdm import tqdm
import time
from pathlib import Path


class YOLOPredictor:
    def __init__(self, model_path, num_classes, conf_thres=0.25, iou_thres=0.45, device='cuda'):
        self.conf_threshold = conf_thres
        self.iou_threshold = iou_thres
        self.device = device
        
        # Load model
        self.model = YOLOv8(num_classes=num_classes)
        checkpoint = torch.load(model_path, map_location=device)
        self.model.load_state_dict(checkpoint['model_state_dict'])
        self.model = self.model.to(device)
        self.model.eval()
        
        print(f"Model loaded from {model_path}")

    def preprocess_image(self, img_path, img_size=640):
        # Read image
        img = cv2.imread(img_path)
        if img is None:
            raise ValueError(f"Cannot read image: {img_path}")
        
        # Store original image size
        orig_height, orig_width = img.shape[:2]
        
        # Resize and pad image
        ratio = img_size / max(orig_height, orig_width)
        new_height, new_width = int(orig_height * ratio), int(orig_width * ratio)
        
        # Resize image
        img_resized = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_LINEAR)
        
        # Create padded image
        pad_height = img_size - new_height
        pad_width = img_size - new_width
        pad_top = pad_height // 2
        pad_bottom = pad_height - pad_top
        pad_left = pad_width // 2
        pad_right = pad_width - pad_left
        
        # Add padding
        img_padded = cv2.copyMakeBorder(img_resized, pad_top, pad_bottom, pad_left, pad_right,
                                       cv2.BORDER_CONSTANT, value=(114, 114, 114))
        
        # Normalize and convert to tensor
        img_normalized = img_padded.astype(np.float32) / 255.0
        img_normalized = img_normalized.transpose(2, 0, 1)
        img_tensor = torch.from_numpy(img_normalized).unsqueeze(0)
        
        return img_tensor, (orig_height, orig_width), (pad_left, pad_top, ratio)

    def postprocess(self, predictions, orig_size, pad_info):
        """
        Post-process YOLO predictions
        """
        pad_left, pad_top, ratio = pad_info
        orig_height, orig_width = orig_size
        
        # Process all prediction layers
        results = []
        for pred in predictions:
            # Reshape predictions
            batch_size, _, height, width = pred.shape
            pred = pred.view(batch_size, 3, -1, height, width)
            pred = pred.permute(0, 1, 3, 4, 2).contiguous()
            pred = pred.view(batch_size, -1, pred.shape[-1])
            
            # Sigmoid for box coordinates and objectness
            box_xy = pred[..., :2].sigmoid()
            box_wh = pred[..., 2:4].sigmoid()
            conf = pred[..., 4].sigmoid()
            cls = pred[..., 5:].sigmoid()
            
            # Calculate box coordinates
            grid_y, grid_x = torch.meshgrid([torch.arange(height), torch.arange(width)], indexing='ij')
            grid = torch.stack((grid_x, grid_y), 2).to(pred.device)
            grid = grid.view(1, -1, 2)
            
            box_xy = (box_xy + grid) * (orig_width / width)
            box_wh = box_wh * orig_width
            
            # Convert to corner coordinates
            box_x1y1 = box_xy - box_wh / 2
            box_x2y2 = box_xy + box_wh / 2
            boxes = torch.cat((box_x1y1, box_x2y2), dim=-1)
            
            # Remove padding offset
            boxes[..., [0, 2]] -= pad_left
            boxes[..., [1, 3]] -= pad_top
            
            # Scale boxes back to original image size
            boxes /= ratio
            
            # Clip boxes to image bounds
            boxes[..., [0, 2]] = boxes[..., [0, 2]].clamp(0, orig_width)
            boxes[..., [1, 3]] = boxes[..., [1, 3]].clamp(0, orig_height)
            
            # Combine predictions
            scores, labels = cls.max(dim=-1)
            scores *= conf  # multiply by objectness
            
            # Filter by confidence
            keep = scores > self.conf_threshold
            boxes = boxes[keep]
            scores = scores[keep]
            labels = labels[keep]
            
            if len(boxes):
                # Apply NMS
                keep = torchvision.ops.nms(boxes, scores, self.iou_threshold)
                boxes = boxes[keep]
                scores = scores[keep]
                labels = labels[keep]
                
                results.append((boxes, scores, labels))
            else:
                results.append((torch.zeros(0, 4), torch.zeros(0), torch.zeros(0)))
        
        # Combine results from all prediction layers
        boxes = torch.cat([x[0] for x in results])
        scores = torch.cat([x[1] for x in results])
        labels = torch.cat([x[2] for x in results])
        
        return boxes, scores, labels

    def draw_boxes(self, img, boxes, scores, labels, class_names=None):
        """
        Draw bounding boxes on the image
        """
        for box, score, label in zip(boxes, scores, labels):
            x1, y1, x2, y2 = map(int, box.tolist())
            
            # Draw box
            color = (0, 255, 0)  # Green color
            cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)
            
            # Draw label
            label_text = f"{class_names[int(label)] if class_names else int(label)} {score:.2f}"
            cv2.putText(img, label_text, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
        
        return img

    @torch.no_grad()
    def predict(self, img_path, class_names=None, save_img=True):
        # Preprocess image
        img_tensor, orig_size, pad_info = self.preprocess_image(img_path)
        img_tensor = img_tensor.to(self.device)
        
        # Forward pass
        predictions = self.model(img_tensor)
        
        # Postprocess predictions
        boxes, scores, labels = self.postprocess(predictions, orig_size, pad_info)
        
        # Draw boxes on image if requested
        if save_img or class_names is not None:
            img = cv2.imread(img_path)
            img_with_boxes = self.draw_boxes(img.copy(), boxes, scores, labels, class_names)
            
            if save_img:
                output_path = str(Path(img_path).with_name(f"{Path(img_path).stem}_pred{Path(img_path).suffix}"))
                cv2.imwrite(output_path, img_with_boxes)
                print(f"Saved prediction to {output_path}")
        
        return boxes, scores, labels


def main():
    parser = argparse.ArgumentParser(description='YOLOv8 Inference')
    parser.add_argument('--model-path', type=str, required=True, help='path to model checkpoint')
    parser.add_argument('--image-path', type=str, required=True, help='path to image or directory')
    parser.add_argument('--num-classes', type=int, default=80, help='number of classes')
    parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold')
    parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold')
    parser.add_argument('--class-names', type=str, default=None, help='path to class names file')
    parser.add_argument('--device', type=str, default='cuda', help='device to use (cuda or cpu)')
    
    args = parser.parse_args()
    
    # Load class names if provided
    class_names = None
    if args.class_names:
        with open(args.class_names) as f:
            class_names = f.read().strip().split('\n')
    
    # Initialize predictor
    predictor = YOLOPredictor(
        model_path=args.model_path,
        num_classes=args.num_classes,
        conf_thres=args.conf_thres,
        iou_thres=args.iou_thres,
        device=args.device
    )
    
    # Process single image or directory
    if os.path.isfile(args.image_path):
        image_paths = [args.image_path]
    else:
        image_paths = [os.path.join(args.image_path, f) for f in os.listdir(args.image_path)
                      if f.lower().endswith(('.png', '.jpg', '.jpeg'))]
    
    # Process images
    for img_path in tqdm(image_paths, desc="Processing images"):
        try:
            boxes, scores, labels = predictor.predict(img_path, class_names)
            
            # Print results
            print(f"\nResults for {img_path}:")
            for box, score, label in zip(boxes, scores, labels):
                label_name = class_names[int(label)] if class_names else int(label)
                print(f"{label_name}: {score:.2f} at {[round(x, 2) for x in box.tolist()]}")
        
        except Exception as e:
            print(f"Error processing {img_path}: {str(e)}")


if __name__ == '__main__':
    main()
