grmchn's picture
feat(issue-045): Canvas aspect sync + data scaling + export fixes\n\n- Canvas display follows output aspect (long-side 640)\n- Align poseData resolution to output form on load\n- Keep JS->Python resolution sync; fix background fit\n- Export uses declared resolution; safe fallback\n\nClose: issue_045_キャンバスサイズ動的更新機能実装.md
ac4b077
# Export utilities for dwpose-editor
import json
import numpy as np
from PIL import Image, ImageDraw
import io
import base64
from datetime import datetime
from .notifications import notify_success, notify_error
def _detect_source_resolution_from_data(pose_data):
"""推定的にデータ座標系の解像度を検出(最大x/yから推定)"""
try:
max_x = 0.0
max_y = 0.0
def scan_points(arr):
nonlocal max_x, max_y
if not arr: return
for i in range(0, len(arr), 3):
if i + 2 < len(arr):
x, y, conf = arr[i], arr[i+1], arr[i+2]
if conf is None or conf <= 0:
continue
# 正規化の可能性は別で判定するので、そのまま最大値を取る
if isinstance(x, (int, float)) and isinstance(y, (int, float)):
max_x = max(max_x, float(x))
max_y = max(max_y, float(y))
if isinstance(pose_data, dict) and 'people' in pose_data and pose_data['people']:
person = pose_data['people'][0]
scan_points(person.get('pose_keypoints_2d', []))
scan_points(person.get('hand_left_keypoints_2d', []))
scan_points(person.get('hand_right_keypoints_2d', []))
scan_points(person.get('face_keypoints_2d', []))
else:
# bodies/hands/faces 互換
if 'bodies' in pose_data and pose_data['bodies'] and 'candidate' in pose_data['bodies']:
cands = pose_data['bodies']['candidate'] or []
for c in cands:
if c and len(c) >= 2:
max_x = max(max_x, float(c[0]))
max_y = max(max_y, float(c[1]))
for hand in (pose_data.get('hands') or []):
scan_points(hand)
for face in (pose_data.get('faces') or []):
scan_points(face)
# 正規化(<=1)っぽい場合はNone返却
if max_x <= 1.01 and max_y <= 1.01:
return None
# ゼロは不正
if max_x <= 0 or max_y <= 0:
return None
# 端数をそのまま使うより、丸め込む(最小でも整数)
return (int(round(max_x)), int(round(max_y)))
except Exception:
return None
def get_timestamp_filename(prefix, extension):
"""
タイムスタンプ付きファイル名を生成
Args:
prefix: ファイル名の前置詞
extension: ファイル拡張子(ドットなし)
Returns:
str: タイムスタンプ付きファイル名
"""
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
return f"{prefix}_{timestamp}.{extension}"
def export_pose_as_image(pose_data, canvas_size=(640, 640), background_color=(0, 0, 0), enable_hands=True, enable_face=True):
"""
ポーズデータを画像として出力
Args:
pose_data: DWPoseデータ
canvas_size: 出力画像サイズ
background_color: 背景色 (R, G, B)
enable_hands: 手を描画するかどうか
enable_face: 顔を描画するかどうか
Returns:
PIL.Image: ポーズ画像
"""
try:
print(f"[DEBUG] 🎨 export_pose_as_image開始 - データ: {bool(pose_data)}")
if not pose_data:
print(f"[DEBUG] ❌ export_pose_as_image: ポーズデータなし")
notify_error("ポーズデータがありません")
return None
print(f"[DEBUG] 🎨 ポーズデータ構造: {list(pose_data.keys()) if isinstance(pose_data, dict) else type(pose_data)}")
# 新しい画像を作成
image = Image.new('RGB', canvas_size, background_color)
draw = ImageDraw.Draw(image)
print(f"[DEBUG] 🎨 背景画像作成完了: {canvas_size}")
# 解像度(元データ座標系)
src_w, src_h = canvas_size
if isinstance(pose_data, dict):
if 'resolution' in pose_data and isinstance(pose_data['resolution'], (list, tuple)) and len(pose_data['resolution']) >= 2:
src_w, src_h = int(pose_data['resolution'][0] or canvas_size[0]), int(pose_data['resolution'][1] or canvas_size[1])
elif 'metadata' in pose_data and isinstance(pose_data['metadata'], dict) and 'resolution' in pose_data['metadata']:
res = pose_data['metadata'].get('resolution', canvas_size)
if isinstance(res, (list, tuple)) and len(res) >= 2:
src_w, src_h = int(res[0] or canvas_size[0]), int(res[1] or canvas_size[1])
# 解像度が未設定のときのみ、データから推定した解像度を利用(誤検出による過度な拡大を防止)
if (not isinstance(pose_data, dict)) or (
('resolution' not in pose_data or not pose_data.get('resolution')) and
(pose_data.get('metadata') is None or not pose_data['metadata'].get('resolution'))
):
detected = _detect_source_resolution_from_data(pose_data)
if detected is not None:
src_w, src_h = detected
# ボディの描画(refs準拠)
print(f"[DEBUG] 🧭 Export scale info: src_res=({src_w},{src_h}) -> out=({canvas_size[0]},{canvas_size[1]})")
if 'people' in pose_data and pose_data['people']:
print(f"[DEBUG] 🎨 ボディ描画開始(refs準拠)")
draw_body_on_image(draw, pose_data, canvas_size, (src_w, src_h))
print(f"[DEBUG] 🎨 ボディ描画完了")
else:
print(f"[DEBUG] ⚠️ ボディデータなし - people: {'people' in pose_data}, count: {len(pose_data.get('people', []))}")
# 💖 手の描画(people形式とhands形式両対応)
if enable_hands:
hands_data = None
if 'people' in pose_data and pose_data['people'] and len(pose_data['people']) > 0:
person = pose_data['people'][0]
left_hand = person.get('hand_left_keypoints_2d', [])
right_hand = person.get('hand_right_keypoints_2d', [])
if left_hand or right_hand:
hands_data = [left_hand, right_hand]
print(f"[DEBUG] 🎨 手描画開始(people形式)- 左: {len(left_hand)}, 右: {len(right_hand)}")
elif 'hands' in pose_data and pose_data['hands']:
hands_data = pose_data['hands']
print(f"[DEBUG] 🎨 手描画開始(hands形式)")
if hands_data:
draw_hands_on_image(draw, hands_data, canvas_size, (src_w, src_h))
print(f"[DEBUG] 🎨 手描画完了")
else:
print(f"[DEBUG] ⚠️ 手描画スキップ - 手データなし")
# 💖 顔の描画(people形式とfaces形式両対応)
if enable_face:
face_data = None
if 'people' in pose_data and pose_data['people'] and len(pose_data['people']) > 0:
person = pose_data['people'][0]
face_keypoints = person.get('face_keypoints_2d', [])
if face_keypoints:
face_data = [face_keypoints]
print(f"[DEBUG] 🎨 顔描画開始(people形式)- キーポイント: {len(face_keypoints)}")
elif 'faces' in pose_data and pose_data['faces']:
face_data = pose_data['faces']
print(f"[DEBUG] 🎨 顔描画開始(faces形式)")
if face_data:
draw_faces_on_image(draw, face_data, canvas_size, (src_w, src_h))
print(f"[DEBUG] 🎨 顔描画完了")
else:
print(f"[DEBUG] ⚠️ 顔描画スキップ - 顔データなし")
print(f"[DEBUG] 🎨 export_pose_as_image成功!")
# 通知はapp.py側で行う(重複回避)
return image
except Exception as e:
print(f"[DEBUG] ❌ export_pose_as_image例外: {e}")
notify_error(f"ポーズ画像エクスポートに失敗しました: {str(e)}")
return None
def draw_body_on_image(draw, pose_data, canvas_size, source_resolution=None):
"""画像にボディを描画(refs準拠)"""
try:
print(f"[DEBUG] 🎨 draw_body_on_image開始(refs準拠)")
# refs準拠:peopleからpose_keypoints_2dを取得
people = pose_data.get("people", [])
if not people:
print(f"[DEBUG] ⚠️ people が空のため描画スキップ")
return
# refs準拠:接続定義(issue_042修正版 - JavaScript側と統一)
connections = [
[1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [8, 9],
[9, 10], [1, 11], [11, 12], [12, 13], [1, 0], [0, 14], [14, 16],
[0, 15], [15, 17], [13, 18], [10, 19] # 修正:右足首→右つま先、左足首→左つま先
]
# refs準拠:色定義(BGR→RGB変換)
skeleton_colors = [
(0, 0, 255), (0, 85, 255), (0, 170, 255), (0, 255, 255), (0, 255, 170), (0, 255, 85), (0, 255, 0),
(85, 255, 0), (170, 255, 0), (255, 255, 0), (170, 255, 0), (85, 255, 0), (255, 0, 0), (255, 0, 85),
(255, 0, 170), (255, 0, 255), (170, 0, 255), (85, 0, 255), (255, 255, 170), (170, 255, 255)
]
W, H = canvas_size
srcW, srcH = (source_resolution or canvas_size)
if srcW <= 0 or srcH <= 0:
srcW, srcH = W, H
detection_threshold = 0.3
for person in people:
keypoints_flat = person.get("pose_keypoints_2d", [])
print(f"[DEBUG] 🎨 keypoints_flat length: {len(keypoints_flat)}")
# refs準拠:3要素ずつ分割してキーポイントリスト作成
keypoints = []
for i in range(0, len(keypoints_flat), 3):
if i + 2 < len(keypoints_flat):
x, y, confidence = keypoints_flat[i:i+3]
keypoints.append([x, y, confidence])
print(f"[DEBUG] 🎨 keypoints count: {len(keypoints)}")
# 座標の正規化/解像度差吸収(0..1正規化 or ピクセル→出力解像度へスケール)
is_normalized = len(keypoints) > 0 and all(0 <= kp[0] <= 1 and 0 <= kp[1] <= 1 for kp in keypoints if kp[2] > 0)
if is_normalized:
for kp in keypoints:
if kp[2] > 0:
kp[0] *= W
kp[1] *= H
else:
# ピクセル座標 → 出力サイズへスケール(元解像度→出力解像度)
sx = W / float(srcW)
sy = H / float(srcH)
for kp in keypoints:
if kp[2] > 0:
kp[0] *= sx
kp[1] *= sy
# refs準拠:接続線の描画
for i, connection in enumerate(connections):
if i < len(skeleton_colors):
color = skeleton_colors[i]
else:
color = skeleton_colors[i % len(skeleton_colors)]
idx1, idx2 = connection
if 0 <= idx1 < len(keypoints) and 0 <= idx2 < len(keypoints):
kp1 = keypoints[idx1]
kp2 = keypoints[idx2]
if kp1[2] > detection_threshold and kp2[2] > detection_threshold:
# refs準拠:太い線の描画(PIL版)
draw.line([
(int(kp1[0]), int(kp1[1])),
(int(kp2[0]), int(kp2[1]))
], fill=color, width=4)
# refs準拠:キーポイントの描画
for i, kp in enumerate(keypoints):
x, y, confidence = kp
if confidence > detection_threshold:
if i < len(skeleton_colors):
color = skeleton_colors[i]
else:
color = skeleton_colors[i % len(skeleton_colors)]
draw.ellipse([int(x)-4, int(y)-4, int(x)+4, int(y)+4], fill=color)
print(f"[DEBUG] 🎨 draw_body_on_image完了")
except Exception as e:
print(f"[DEBUG] ❌ draw_body_on_image例外: {e}")
import traceback
traceback.print_exc()
def draw_hands_on_image(draw, hands_data, canvas_size, source_resolution=None):
"""💖 画像に手を描画(座標変換対応)"""
W, H = canvas_size
srcW, srcH = (source_resolution or canvas_size)
if srcW <= 0 or srcH <= 0:
srcW, srcH = W, H
for hand in hands_data:
if hand and len(hand) > 0:
for i in range(0, len(hand), 3):
if i + 2 < len(hand):
x, y, conf = hand[i], hand[i+1], hand[i+2]
if conf > 0.3:
# 💖 座標の正規化/ピクセルスケール
if 0 <= x <= 1 and 0 <= y <= 1:
x = x * W
y = y * H
else:
x = x * (W / float(srcW))
y = y * (H / float(srcH))
# refs準拠: OpenCV(255,0,0)BGR → PIL(0,0,255)RGB = 青
draw.ellipse([int(x)-3, int(y)-3, int(x)+3, int(y)+3], fill=(0, 0, 255))
def draw_faces_on_image(draw, faces_data, canvas_size, source_resolution=None):
"""💖 画像に顔を描画(座標変換対応)"""
W, H = canvas_size
srcW, srcH = (source_resolution or canvas_size)
if srcW <= 0 or srcH <= 0:
srcW, srcH = W, H
for face in faces_data:
if face and len(face) > 0:
for i in range(0, len(face), 3):
if i + 2 < len(face):
x, y, conf = face[i], face[i+1], face[i+2]
if conf > 0.3:
# 💖 座標の正規化/ピクセルスケール
if 0 <= x <= 1 and 0 <= y <= 1:
x = x * W
y = y * H
else:
x = x * (W / float(srcW))
y = y * (H / float(srcH))
# refs準拠: OpenCV(255,255,255)BGR → PIL(255,255,255)RGB = 白
draw.ellipse([int(x)-2, int(y)-2, int(x)+2, int(y)+2], fill=(255, 255, 255))
def export_pose_as_json(pose_data, include_metadata=False):
"""
ポーズデータをpeople形式のJSONとして出力
Args:
pose_data: DWPoseデータ(people形式またはbodies形式)
include_metadata: メタデータを含めるかどうか(デフォルト: False)
Returns:
str: people形式のJSON文字列
"""
try:
if not pose_data:
notify_error("ポーズデータがありません")
return None
# people形式の出力データ構造を作成
export_data = []
# デフォルトの解像度
canvas_width = 512
canvas_height = 512
# pose_dataから解像度情報を取得
if 'resolution' in pose_data and pose_data['resolution']:
resolution = pose_data['resolution']
if isinstance(resolution, list) and len(resolution) >= 2:
canvas_width = int(resolution[0])
canvas_height = int(resolution[1])
elif 'metadata' in pose_data and 'resolution' in pose_data['metadata']:
resolution = pose_data['metadata']['resolution']
if isinstance(resolution, list) and len(resolution) >= 2:
canvas_width = int(resolution[0])
canvas_height = int(resolution[1])
# people形式データの構築
person_data = {
"pose_keypoints_2d": [],
"face_keypoints_2d": [],
"hand_left_keypoints_2d": [],
"hand_right_keypoints_2d": []
}
# people形式が既に存在する場合はそのまま使用
if 'people' in pose_data and pose_data['people']:
person_data = pose_data['people'][0].copy()
# 🦶✨ DWPose 25キーポイント対応:people形式でもパディング確認
if "pose_keypoints_2d" in person_data:
keypoint_count = len(person_data["pose_keypoints_2d"]) // 3
if keypoint_count < 25:
padding_needed = 25 - keypoint_count
for _ in range(padding_needed):
person_data["pose_keypoints_2d"].extend([0, 0, 0])
else:
# bodies形式からpeople形式に変換
if 'bodies' in pose_data and 'candidate' in pose_data['bodies']:
candidates = pose_data['bodies']['candidate']
for candidate in candidates:
if candidate and len(candidate) >= 2:
person_data["pose_keypoints_2d"].extend([
candidate[0],
candidate[1],
candidate[2] if len(candidate) > 2 else 1.0
])
# 🦶✨ DWPose 25キーポイント対応:25個未満の場合は0でパディング
keypoint_count = len(person_data["pose_keypoints_2d"]) // 3
if keypoint_count < 25:
padding_needed = 25 - keypoint_count
for _ in range(padding_needed):
person_data["pose_keypoints_2d"].extend([0, 0, 0])
# 手データ
if 'hands' in pose_data and pose_data['hands']:
hands = pose_data['hands']
if len(hands) > 0:
person_data["hand_left_keypoints_2d"] = hands[0] if hands[0] else []
if len(hands) > 1:
person_data["hand_right_keypoints_2d"] = hands[1] if hands[1] else []
# 顔データ
if 'faces' in pose_data and pose_data['faces']:
faces = pose_data['faces']
if len(faces) > 0:
person_data["face_keypoints_2d"] = faces[0] if faces[0] else []
# フレームデータの構築
frame_data = {
"people": [person_data],
"canvas_width": canvas_width,
"canvas_height": canvas_height
}
export_data.append(frame_data)
json_str = json.dumps(export_data, indent=2, ensure_ascii=False)
# 通知はapp.py側で行う(重複回避)
return json_str
except Exception as e:
notify_error(f"JSONエクスポートに失敗しました: {str(e)}")
return None
def create_download_link(content, filename, content_type="text/plain"):
"""
ダウンロードリンク用のデータURLを作成
Args:
content: ファイル内容(文字列またはバイト)
filename: ファイル名
content_type: MIMEタイプ
Returns:
str: データURL
"""
try:
if isinstance(content, str):
content = content.encode('utf-8')
b64_content = base64.b64encode(content).decode()
return f"data:{content_type};base64,{b64_content}"
except Exception as e:
print(f"Download link creation error: {e}")
return None