"""
全自动条纹间距测量脚本
0. 输入图片
1. 自动检测条纹密集区域
2. FFT 提取最亮对称频点并逆变换增强
3. 对逆变换结果进行比例尺识别、角度矫正、水平投影测距
"""
import re
import cv2
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pytesseract
from PIL import Image
from skimage import feature, exposure, transform, morphology
from scipy.signal import find_peaks
from scipy.spatial.distance import cdist

matplotlib.rc('font', family='Microsoft YaHei')     # 中文显示
# ---------- 1. 自动 ROI ----------
def auto_dense_stripe_roi(img,
                          win_size=32,
                          stride=16,
                          thres_percentile=95,
                          margin=5):
    h, w = img.shape[:2]
    gx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=3)
    gy = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=3)
    grad_mag = np.sqrt(gx**2 + gy**2)

    idx_y, idx_x = np.mgrid[0:h-win_size:stride, 0:w-win_size:stride]
    idx_y, idx_x = idx_y.ravel(), idx_x.ravel()
    score = np.array([np.mean(grad_mag[y0:y0+win_size, x0:x0+win_size]) *
                      np.std(grad_mag[y0:y0+win_size, x0:x0+win_size])
                      for y0, x0 in zip(idx_y, idx_x)])

    th = np.percentile(score, thres_percentile)
    mask = score >= th
    if not np.any(mask):
        print("未检测到明显的条纹密集区域")
        return None

    centers = np.column_stack((idx_x[mask] + win_size//2,
                               idx_y[mask] + win_size//2))
    x_min, y_min = centers.min(axis=0)
    x_max, y_max = centers.max(axis=0)
    x, y = max(0, int(x_min - margin)), max(0, int(y_min - margin))
    roi_w = min(w - x, int(x_max - x_min + 2*margin))
    roi_h = min(h - y, int(y_max - y_min + 2*margin))
    roi = (x, y, roi_w, roi_h)

    # 可视化
    vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
    cv2.rectangle(vis, (x, y), (x+roi_w, y+roi_h), (0, 0, 255), 2)
    plt.figure(figsize=(5,5))
    plt.title('Auto-detected dense stripe ROI')
    plt.imshow(vis[..., ::-1]); plt.axis('off'); plt.show()
    return roi

# ---------- 2. FFT 最亮对称点对 ----------
def fft_brightest_pair(img, inner_radius=15, outer_frac=0.4, thres_rel=0.2, pixel_tol=2):
    h, w = img.shape
    f = np.fft.fft2(img)
    fshift = np.fft.fftshift(f)
    mag = np.abs(fshift)

    cy, cx = h//2, w//2
    Y, X = np.ogrid[:h, :w]
    dist = np.sqrt((X-cx)**2 + (Y-cy)**2)
    outer_radius = min(cx, cy) * outer_frac
    mask = (dist > inner_radius) & (dist < outer_radius)
    mag_band = mag * mask

    mag_norm = (mag_band - mag_band.min()) / (mag_band.max() - mag_band.min())
    _, bw = cv2.threshold(mag_norm.astype(np.float32), thres_rel, 1, cv2.THRESH_BINARY)

    bw_u8 = (bw * 255).astype(np.uint8)
    n, labels, stats, centroids = cv2.connectedComponentsWithStats(bw_u8)
    areas = stats[:, cv2.CC_STAT_AREA]
    idx = np.argsort(areas)[::-1][1:]   # 去掉背景
    pts = centroids[idx]

    # 去掉中心附近
    dists_to_center = cdist(pts, [[cx, cy]])
    pts = pts[dists_to_center[:, 0] > inner_radius]
    if len(pts) < 2:
        print('过滤中心后未找到足够亮点！'); return None, None

    center = np.array([cx, cy])
    pts_centered = pts - center
    pairs, used = [], set()
    for i, p in enumerate(pts_centered):
        if i in used: continue
        p_sym = -p
        j = np.argmin(cdist([p_sym], pts_centered)[0])
        if cdist([p_sym], pts_centered)[0][j] < pixel_tol and j not in used:
            pairs.append((pts[i], pts[j]))
            used.update([i, j])
    if not pairs:
        print('未检测到对称亮点！'); return None, None

    # 选最亮
    brightest = max(pairs, key=lambda P: mag[int(P[0][1]), int(P[0][0])] + mag[int(P[1][1]), int(P[1][0])])
    p1, p2 = brightest

    mask_spots = np.zeros_like(mag, dtype=np.uint8)
    for (xi, yi) in (p1, p2):
        cv2.circle(mask_spots, (int(round(xi)), int(round(yi))), 3, 1, -1)
    f_masked = fshift * mask_spots
    img_back = np.real(np.fft.ifft2(np.fft.ifftshift(f_masked)))
    return img_back, brightest

# ---------- 3. 脚本2 其余函数 ----------
def detect_scale_bar(gray, scale_region_size=(113, 472), fallback_nm=10.0):
    h, w = gray.shape
    bar_h, bar_w = scale_region_size

    # 1. 保证裁剪区域合法
    y0 = max(0, h - bar_h)
    y1 = min(h, 1920)          # 防止越界
    region = gray[y0:y1, :bar_w].copy()  # copy 避免步长问题
    if region.size == 0:       # 极端情况
        print('比例尺区域无法裁剪，使用默认值')
        return fallback_nm, 1

    # 2. 转成 8-bit PIL 图像
    pil_img = Image.fromarray(region)

    # 3. OCR
    ocr_text = pytesseract.image_to_string(pil_img, config='--psm 6').lower()
    m = re.search(r"(\d+(?:\.\d+)?)\s*nm", ocr_text)
    scale_nm = float(m.group(1)) if m else fallback_nm

    # 4. 黑条像素长度
    dark = (region < 40).astype(np.uint8)
    cnts, _ = cv2.findContours(dark, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    max_w = max([cv2.boundingRect(c)[2] for c in cnts], default=1)
    print(f'比例尺 OCR:{scale_nm} nm, 像素长度:{max_w} px')
    return scale_nm, max_w

def preprocess(img):
    blur = cv2.GaussianBlur(img, (3,3), 0)
    p2, p98 = np.percentile(blur, (4, 98))
    enh = exposure.rescale_intensity(blur, in_range=(p2, p98))
    sigma = 5 if np.mean(np.sqrt(cv2.Sobel(enh, cv2.CV_64F, 1,0,3)**2 +
                                 cv2.Sobel(enh, cv2.CV_64F, 0,1,3)**2)) > 233 else 3
    edges = feature.canny(enh, sigma=sigma)
    return enh, edges

def hough_angle(edges, img_vis):
    lines = cv2.HoughLinesP((edges*255).astype(np.uint8), 1, np.pi/180,
                            threshold=10, minLineLength=15, maxLineGap=3)
    if lines is None or len(lines) < 3:
        return None
    angles = []
    for x1,y1,x2,y2 in lines[:,0]:
        ang = np.degrees(np.arctan2(-(y2-y1), x2-x1)) % 180
        if ang > 90: ang -= 180
        angles.append(ang)
    ang = np.rad2deg(np.arctan2(np.median(np.sin(np.deg2rad(angles))),
                                np.median(np.cos(np.deg2rad(angles)))))
    return ang

def remove_black_lines(img, max_black=10):
    keep = []
    for row in img:
        streak = 0
        for p in row:
            if p == 0: streak += 1
            else: streak = 0
        if streak <= max_black:
            keep.append(row)
    return np.array(keep) if keep else np.empty((0, img.shape[1]), dtype=img.dtype)

def measure_spacing(rot_img, scale_nm, scale_px):
    rot = 255 - rot_img
    proj = np.sum(rot, axis=1)
    thr = np.mean(proj) + 0.6*np.std(proj)
    peaks, _ = find_peaks(proj, height=thr, distance=10, prominence=10)
    if len(peaks) < 2:
        print('投影峰值不足，无法计算间距'); return
    spacing_px = np.mean(np.diff(peaks))
    spacing_nm = spacing_px * (scale_nm / scale_px)
    print(f'检测到 {len(peaks)} 个条纹，平均间距 = {spacing_nm:.2f} nm')

    plt.figure(figsize=(10,4))
    plt.subplot(121); plt.imshow(rot, cmap='gray'); plt.title('旋转后 ROI')
    plt.subplot(122); plt.plot(proj); plt.axhline(thr, color='r')
    plt.plot(peaks, proj[peaks], 'ro'); plt.title('水平投影')
    plt.tight_layout(); plt.show()

# ---------- 主流程 ----------
def main(image_path):
    raw = cv2.imread(image_path, 0)
    if raw is None:
        raise FileNotFoundError(image_path)

    roi = auto_dense_stripe_roi(raw)
    if roi is None:
        return
    x,y,w,h = roi
    roi_img = raw[y:y+h, x:x+w]

    # FFT 增强
    fft_img, _ = fft_brightest_pair(roi_img)
    if fft_img is None:
        return

    # 比例尺
    scale_nm, scale_px = detect_scale_bar(raw)

    # 预处理 & 霍夫
    _, edges = preprocess(fft_img)
    angle = hough_angle(edges, fft_img)
    if angle is None:
        print('霍夫直线不足，退出')
        return

    # 旋转
    rot = transform.rotate(fft_img, -angle, mode='constant')
    rot = remove_black_lines(rot)
    measure_spacing(rot, scale_nm, scale_px)

# ---------------- run ----------------
if __name__ == '__main__':
    main(r'..\img\new\0006.tif')