import json
import os
import numpy as np
from collections import defaultdict
from scipy.spatial.distance import cdist
from sklearn.metrics import average_precision_score
import xml.etree.ElementTree as ET

# Load ground truth from XML files in Annotations folder
annotations_dir = 'Annotations'

# Create dictionaries for GT images and annotations
gt_images = {}
gt_anns = defaultdict(list)

for xml_file in os.listdir(annotations_dir):
    if xml_file.endswith('.xml'):
        try:
            tree = ET.parse(os.path.join(annotations_dir, xml_file))
            root = tree.getroot()
            # Debug: Print root tag and children to inspect structure
            print(f"Processing {xml_file}, root tag: {root.tag}, children: {[child.tag for child in root]}")
            
            # Extract image info with error handling
            filename_tag = root.find('filename')
            if filename_tag is None or not filename_tag.text:
                print(f"Warning: No or empty 'filename' tag in {xml_file}, skipping.")
                continue
            file_name = filename_tag.text
            size_tag = root.find('size')
            if size_tag is None:
                print(f"Warning: Missing 'size' tag in {xml_file}, skipping.")
                continue
            width_tag = size_tag.find('width')
            height_tag = size_tag.find('height')
            if width_tag is None or height_tag is None:
                print(f"Warning: Missing 'width' or 'height' in {xml_file}, skipping.")
                continue
            try:
                width = float(width_tag.text)
                height = float(height_tag.text)
            except (ValueError, TypeError):
                print(f"Warning: Invalid 'width' or 'height' in {xml_file}, skipping.")
                continue
            image_id = xml_file.replace('.xml', '')  # Use filename (without .xml) as image_id
            gt_images[image_id] = {'file_name': file_name, 'width': width, 'height': height}
            
            # Extract annotations
            for obj in root.findall('object'):
                category_name = obj.find('name')
                if category_name is None or not category_name.text:
                    print(f"Warning: Missing or empty 'name' in object of {xml_file}, skipping.")
                    continue
                category_name = category_name.text
                # Map category name to ID
                category_map = {
                    'noBrick_Outer_Vertical': 0,
                    'noBrick_Outer_Parallel': 1,
                    'noBrick_Outer_Slant': 2,
                    'noBrick_Outer_Vertical_Occupied': 3,
                    'noBrick_Outer_Parallel_Occupied': 4,
                    'noBrick_Outer_Slant_Occupied': 5
                }
                category_id = category_map.get(category_name, -1)
                if category_id == -1:
                    print(f"Warning: Unknown category {category_name} in {xml_file}, skipping.")
                    continue
                # Extract bounding box
                bndbox = obj.find('bndbox')
                if bndbox is None:
                    print(f"Warning: Missing 'bndbox' in object of {xml_file}, skipping.")
                    continue
                try:
                    bbox = [
                        float(bndbox.find('xmin').text) if bndbox.find('xmin') is not None else 0,
                        float(bndbox.find('ymin').text) if bndbox.find('ymin') is not None else 0,
                        float(bndbox.find('xmax').text) if bndbox.find('xmax') is not None else 0,
                        float(bndbox.find('ymax').text) if bndbox.find('ymax') is not None else 0
                    ]
                except (ValueError, TypeError, AttributeError):
                    print(f"Warning: Invalid 'bndbox' coordinates in {xml_file}, skipping.")
                    continue
                # Extract keypoints from child/points or polyline
                keypoints = []
                child = obj.find('child')
                if child is not None:
                    points = child.find('points')
                    if points is not None:
                        for point in points.findall('point'):
                            x_tag = point.find('x')
                            y_tag = point.find('y')
                            if x_tag is None or y_tag is None or not x_tag.text or not y_tag.text:
                                print(f"Warning: Missing or empty 'x' or 'y' in point of {xml_file}, skipping point.")
                                continue
                            try:
                                x = float(x_tag.text)
                                y = float(y_tag.text)
                                keypoints.extend([x, y, 2])  # Assume all keypoints are visible (v=2)
                            except (ValueError, TypeError):
                                print(f"Warning: Invalid 'x' or 'y' in point of {xml_file}, skipping point.")
                                continue
                else:
                    # Fallback to polyline for compatibility
                    polyline = obj.find('polyline')
                    if polyline is not None:
                        for point in polyline.findall('point'):
                            x_tag = point.find('x')
                            y_tag = point.find('y')
                            if x_tag is None or y_tag is None or not x_tag.text or not y_tag.text:
                                print(f"Warning: Missing or empty 'x' or 'y' in point of {xml_file}, skipping point.")
                                continue
                            try:
                                x = float(x_tag.text)
                                y = float(y_tag.text)
                                keypoints.extend([x, y, 2])  # Assume all keypoints are visible (v=2)
                            except (ValueError, TypeError):
                                print(f"Warning: Invalid 'x' or 'y' in point of {xml_file}, skipping point.")
                                continue
                if len(keypoints) != 12:  # Expect 4 keypoints (x, y, v)
                    print(f"Warning: Invalid keypoints count in {xml_file}, expected 12, got {len(keypoints)}, skipping.")
                    continue
                ann = {
                    'image_id': image_id,
                    'category_id': category_id,
                    'bbox': bbox,
                    'keypoints': keypoints,
                    'iscrowd': 0
                }
                gt_anns[image_id].append(ann)
        except ET.ParseError as e:
            print(f"Error: Failed to parse XML in {xml_file}: {e}")
            continue

# Load predicted annotations from pred folder
pred_dir = 'pred'
pred_anns = defaultdict(list)
for pred_file in os.listdir(pred_dir):
    if pred_file.endswith('.json'):
        with open(os.path.join(pred_dir, pred_file), 'r') as f:
            try:
                pred_img_anns = json.load(f)
                if not isinstance(pred_img_anns, dict) or 'objects' not in pred_img_anns:
                    print(f"Warning: {pred_file} does not contain 'objects' key, skipping.")
                    continue
                image_file_name = pred_file.replace('.json', '')
                image_id = image_file_name
                if image_id not in gt_images:
                    print(f"Warning: No matching image_id for {image_file_name}, skipping.")
                    continue
                for ann in pred_img_anns['objects']:
                    if not isinstance(ann, dict):
                        print(f"Warning: Annotation in {pred_file} is not a dictionary, skipping: {ann}")
                        continue
                    coco_keypoints = []
                    try:
                        for kp, score in zip(ann['keypoints'], ann['keypoint_scores']):
                            coco_keypoints.extend([kp[0], kp[1], 2 if score > 0.5 else 0])
                    except (KeyError, TypeError) as e:
                        print(f"Warning: Invalid keypoints or scores in {pred_file}, skipping: {e}")
                        continue
                    new_ann = {
                        'image_id': image_id,
                        'category_id': ann['labels'],
                        'keypoints': coco_keypoints,
                        'score': ann.get('bbox_score', 1.0)
                    }
                    pred_anns[image_id].append(new_ann)
            except json.JSONDecodeError as e:
                print(f"Error: Failed to parse JSON in {pred_file}: {e}")
                continue

# Thresholds for mAP calculation
dist_thresholds = np.arange(5, 55, 5)

# Function to extract keypoints from ann
def get_visible_keypoints(ann, center, max_dist):
    kps = np.array(ann['keypoints']).reshape(-1, 3)
    visible_kps = []
    for kp in kps:
        if kp[2] > 0:
            dist = np.sqrt((kp[0] - center[0])**2 + (kp[1] - center[1])**2)
            if dist <= max_dist:
                visible_kps.append(kp[:2])
    return np.array(visible_kps)

# Function to compute deviations
def compute_deviations(gt_kps, pred_kps):
    if len(gt_kps) == 0 or len(pred_kps) == 0:
        return [], [], []
    dist_matrix = cdist(gt_kps, pred_kps)
    matches = np.argmin(dist_matrix, axis=1)
    dx = []
    dy = []
    euclid = []
    for i, j in enumerate(matches):
        dx.append(abs(gt_kps[i][0] - pred_kps[j][0]))
        dy.append(abs(gt_kps[i][1] - pred_kps[j][1]))
        euclid.append(np.sqrt(dx[-1]**2 + dy[-1]**2))
    return dx, dy, euclid

# Function to compute TP, FP, FN, Precision, Recall
def compute_pr(gt_kps, pred_kps_with_scores, T):
    if len(gt_kps) == 0:
        return 0, len(pred_kps_with_scores), 0, 0.0, 0.0
    try:
        dist_matrix = cdist(gt_kps, np.array([p[:2] for p in pred_kps_with_scores]))
    except ValueError as e:
        print(f"Error in compute_pr: Invalid pred_kps_with_scores format: {e}")
        return 0, len(pred_kps_with_scores), len(gt_kps), 0.0, 0.0
    matched_gt = set()
    matched_pred = set()
    for p_idx, p in enumerate(pred_kps_with_scores):
        min_dist_idx = np.argmin(dist_matrix[:, p_idx])
        if dist_matrix[min_dist_idx, p_idx] < T and min_dist_idx not in matched_gt:
            matched_gt.add(min_dist_idx)
            matched_pred.add(p_idx)
    TP = len(matched_gt)
    FP = len(pred_kps_with_scores) - TP
    FN = len(gt_kps) - TP
    precision = TP / (TP + FP) if TP + FP > 0 else 0.0
    recall = TP / (TP + FN) if TP + FN > 0 else 0.0
    return TP, FP, FN, precision, recall

# Function to compute AP
def compute_ap(gt_kps, pred_kps_with_scores, T):
    if len(gt_kps) == 0 or len(pred_kps_with_scores) == 0:
        return 0.0  # Return 0 if no ground truth or predicted keypoints
    pred_kps_with_scores = sorted(pred_kps_with_scores, key=lambda x: x[2], reverse=True)
    y_true = []
    y_scores = []
    for p in pred_kps_with_scores:
        min_dist = np.min(cdist(gt_kps, np.array([p[:2]])))
        y_true.append(1 if min_dist < T else 0)
        y_scores.append(p[2])
    if sum(y_true) == 0:
        return 0.0
    return average_precision_score(y_true, y_scores)

# Main computation
results = defaultdict(dict)
per_image_results = []  # Store per-image results for CSV
for image_id, gt_img in gt_images.items():
    # Dynamic ranges based on image width
    ranges = [gt_img['width'] * 0.3, gt_img['width'] * 0.6, gt_img['width'] * 0.9]
    print(f"Image {image_id}: Dynamic ranges={ranges}")
    
    center = np.array([gt_img['width']/2, gt_img['height']/2])
    
    for r in ranges:
        gt_kps_in_range = []
        for ann in gt_anns.get(image_id, []):
            gt_kps = get_visible_keypoints(ann, center, r)
            if len(gt_kps) > 0:
                print(f"Image {image_id}, Range {r:.1f}: GT keypoints={gt_kps}")
            gt_kps_in_range.extend(gt_kps)
        gt_kps_in_range = np.array(gt_kps_in_range) if gt_kps_in_range else np.empty((0, 2))
        
        pred_kps_in_range_with_scores = []
        for ann in pred_anns.get(image_id, []):
            score = ann.get('score', 1.0)
            try:
                kps = np.array(ann['keypoints']).reshape(-1, 3)
                for kp in kps:
                    if kp[2] > 0:
                        dist = np.sqrt((kp[0] - center[0])**2 + (kp[1] - center[1])**2)
                        if dist <= r:
                            pred_kps_in_range_with_scores.append(list(kp[:2]) + [score])
            except (ValueError, KeyError) as e:
                print(f"Error processing keypoints for image {image_id}: {e}")
                continue
        if len(pred_kps_in_range_with_scores) > 0:
            print(f"Image {image_id}, Range {r:.1f}: Pred keypoints={pred_kps_in_range_with_scores}")
        
        print(f"Image {image_id}, Range {r:.1f}: GT keypoints count={len(gt_kps_in_range)}, Pred keypoints count={len(pred_kps_in_range_with_scores)}")
        
        dx, dy, euclid = compute_deviations(gt_kps_in_range, np.array([p[:2] for p in pred_kps_in_range_with_scores] if pred_kps_in_range_with_scores else []))
        
        aps = {}
        for T in dist_thresholds:
            ap = compute_ap(gt_kps_in_range, pred_kps_in_range_with_scores, T)
            aps[T] = ap
        
        fixed_T = 50
        tp, fp, fn, prec, rec = compute_pr(gt_kps_in_range, pred_kps_in_range_with_scores, fixed_T)
        
        # Store per-image results
        mean_dx_img = np.mean(dx) if dx else 0.0
        mean_dy_img = np.mean(dy) if dy else 0.0
        mean_euclid_img = np.mean(euclid) if euclid else 0.0
        per_image_results.append({
            'ImageID': image_id,
            'Range': r,
            'mean_dx': mean_dx_img,
            'mean_dy': mean_dy_img,
            'mean_euclid': mean_euclid_img,
            'TP': tp,
            'FP': fp,
            'FN': fn,
            'Precision': prec,
            'Recall': rec
        })
        
        # Aggregate results for overall metrics
        if r not in results:
            results[r] = {
                'dx': [], 'dy': [], 'euclid': [],
                'tp': [], 'fp': [], 'fn': [],
                'precision': [], 'recall': [],
                'aps': {t: [] for t in dist_thresholds}
            }
        results[r]['dx'].extend(dx)
        results[r]['dy'].extend(dy)
        results[r]['euclid'].extend(euclid)
        results[r]['tp'].append(tp)
        results[r]['fp'].append(fp)
        results[r]['fn'].append(fn)
        results[r]['precision'].append(prec)
        results[r]['recall'].append(rec)
        for T in dist_thresholds:
            results[r]['aps'][T].append(aps[T])

# Compute overall results
for r in results:
    all_dx = results[r]['dx']
    all_dy = results[r]['dy']
    all_euclid = results[r]['euclid']
    all_tp = results[r]['tp']
    all_fp = results[r]['fp']
    all_fn = results[r]['fn']
    all_precision = results[r]['precision']
    all_recall = results[r]['recall']
    aps = results[r]['aps']
    
    mean_dx = np.mean(all_dx) if all_dx else 0
    mean_dy = np.mean(all_dy) if all_dy else 0
    mean_euclid = np.mean(all_euclid) if all_euclid else 0
    total_tp = sum(all_tp)
    total_fp = sum(all_fp)
    total_fn = sum(all_fn)
    overall_precision = total_tp / (total_tp + total_fp) if total_tp + total_fp > 0 else 0
    overall_recall = total_tp / (total_tp + total_fn) if total_tp + total_fn > 0 else 0
    
    map50 = np.mean(aps[50]) if 50 in aps else 0
    map75 = np.mean(aps.get(25, []))
    map5095 = np.mean([np.mean(aps[t]) for t in dist_thresholds])
    
    results[r] = {
        'mean_dx': mean_dx,
        'mean_dy': mean_dy,
        'mean_euclid': mean_euclid,
        'map50': map50,
        'map75': map75,
        'map50-95': map5095,
        'TP': total_tp,
        'FP': total_fp,
        'FN': total_fn,
        'Precision': overall_precision,
        'Recall': overall_recall
    }

# Output as Markdown table
markdown_table = """
| Range | mean_dx | mean_dy | mean_euclid | mAP50 | mAP75 | mAP50-95 | TP | FP | FN | Precision | Recall |
|-------|---------|---------|-------------|-------|-------|----------|----|----|----|-----------|--------|
"""
for r in sorted(results.keys()):
    res = results[r]
    markdown_table += f"| {r:.1f} | {res['mean_dx']:.2f} | {res['mean_dy']:.2f} | {res['mean_euclid']:.2f} | {res['map50']:.3f} | {res['map75']:.3f} | {res['map50-95']:.3f} | {res['TP']} | {res['FP']} | {res['FN']} | {res['Precision']:.3f} | {res['Recall']:.3f} |\n"

print("### Evaluation Results")
print(markdown_table)

# Save overall results to CSV
csv_file = 'results.csv'
with open(csv_file, 'w') as f:
    f.write("Range,mean_dx,mean_dy,mean_euclid,mAP50,mAP75,mAP50-95,TP,FP,FN,Precision,Recall\n")
    for r in sorted(results.keys()):
        res = results[r]
        f.write(f"{r:.1f},{res['mean_dx']:.2f},{res['mean_dy']:.2f},{res['mean_euclid']:.2f},{res['map50']:.3f},{res['map75']:.3f},{res['map50-95']:.3f},{res['TP']},{res['FP']},{res['FN']},{res['Precision']:.3f},{res['Recall']:.3f}\n")
print(f"Overall results saved to {csv_file}")

# Save per-image results to CSV
per_image_csv_file = 'per_image_results.csv'
with open(per_image_csv_file, 'w') as f:
    f.write("ImageID,Range,mean_dx,mean_dy,mean_euclid,TP,FP,FN,Precision,Recall\n")
    for res in per_image_results:
        f.write(f"{res['ImageID']},{res['Range']:.1f},{res['mean_dx']:.2f},{res['mean_dy']:.2f},{res['mean_euclid']:.2f},{res['TP']},{res['FP']},{res['FN']},{res['Precision']:.3f},{res['Recall']:.3f}\n")
print(f"Per-image results saved to {per_image_csv_file}")