import os
import json
import math
import random

import numpy as np
from numpy.core.numeric import ones_like
import pandas as pd
import cv2
from numpy.core.shape_base import block
from pandas.core import indexing
from scipy import signal
from numpy.lib.function_base import average, gradient

import matplotlib.pyplot as plt
import seaborn as sns
import sklearn.mixture as mix

import ransac

# sys_dir = "E:\\"
sys_dir = "/Users/ruideng"

def detectHighLight(img):
    thresh, binary_img = cv2.threshold(img[:, :, 0], None, 255, cv2.THRESH_OTSU)
    return binary_img

def test(img):
    if img.shape[2] >= 1:
        img = img[:, :, 0]
    threshOTSU, binary_img = cv2.threshold(img, None, 255, cv2.THRESH_OTSU)
    num_ = (binary_img // 255).sum()
    sum_ = ((binary_img // 255) * img).sum()
    mean_ = sum_ // num_
    adbin_img = cv2.adaptiveThreshold(
            img, 
            255, 
            cv2.ADAPTIVE_THRESH_MEAN_C, 
            cv2.THRESH_BINARY, 
            15, 
            mean_ - threshOTSU - 20
        )
    return binary_img, adbin_img

def getCmpImg(org_img, cmp_img):
    u16_org_img = org_img.astype(np.uint16)
    u16_cmp_img = cmp_img.astype(np.uint16)
    channel_zeros = np.zeros(u16_org_img.shape[:2], dtype=np.uint16)
    u16_cmp_img = cv2.merge((channel_zeros, channel_zeros, u16_cmp_img))
    mg_img = cv2.addWeighted(u16_org_img, 256*0.8, u16_cmp_img, 256*0.2, 0)
    return mg_img

def getBoxFilter(W, enhance=1):
    Box1 = np.ones((W, W), dtype=np.int32)
    Box2 = np.ones((W, W), dtype=np.int32) * -1
    Box = np.concatenate((Box1, Box2), axis=0) / (2 * W * W) * enhance
    return Box

def precut(img, **kwargs):
    up_per = 0.2
    down_per = 0.9
    column_per = 0.6
    if kwargs != None:
        if 'up_per' in kwargs:
            up_per = kwargs['up_per']
        if 'down_per' in kwargs:
            down_per = kwargs['down_per']
        if 'column_per' in kwargs:
            column_per = kwargs['column_per']
    
    img_size = img.shape
    row_slice = slice(round(up_per*img_size[0]), round(down_per*img_size[0]+1))
    col_slice = slice(round((0.5-0.5*column_per)*img_size[1]+1), round((0.5+0.5*column_per)*img_size[1]))

    cuted_img = img[row_slice, col_slice]

    return cuted_img

def segImg(img, blocks=7):
    shape = img.shape
    height = shape[0]
    block_size = height / blocks
    block_rows = [round(i * block_size) for i in range(blocks)]
    return block_rows

def imgEntropy(img, img_slice=None):
    if img_slice:
        img = img[img_slice]
    hist = cv2.calcHist([img], [0], None, [256], [0, 256])
    entropy = 0
    mean = 0
    p = 0
    sum = hist.sum()
    for i, j in enumerate(hist):
        p = j / sum
        mean += i * p
        if p != 0:
            entropy -= p * np.log2(p)
    return mean[0], entropy[0]
    
# 返回的blocks最后一个元素是图像的高
def blockEntropy(img, block_num=7):
    blocks = segImg(img, block_num)
    blocks.append(img.shape[0])
    mean = []
    entropy = []
    for i in range(block_num):
        block_slice = slice(blocks[i], blocks[i+1])
        tmp = imgEntropy(img, block_slice)
        mean.append(tmp[0])
        entropy.append(tmp[1])
    return mean, entropy, blocks

# 可视化图像不同块的熵，blocks,entropy都是列表
def visualBlockEntropy(img, blocks, entropy):
    visual = np.zeros((img.shape[0], len(blocks)))

    # 使用简单的list排序得到元素大小名次
    ids_sort = list(range(0, len(entropy)))
    ids_sort.sort(key=lambda ids: entropy[ids], reverse=False)
    sort = list(range(0, len(entropy)))
    sort.sort(key=lambda ids: ids_sort[ids])

    for i in range(len(sort)):
        block_slice = slice(blocks[i], blocks[i+1])
        visual[block_slice, 0:sort[i]+1] = 255 * np.ones((blocks[i+1]-blocks[i], sort[i]+1))
    
    img = np.hstack((img, visual))

    return img

# 计算直方图信息
def getHistInfo(Hist, **kwargs):
    top_num_key = False
    top_rate_key = False
    thresh1, thresh2 = None, None
    if "top_num" in kwargs:
        top_num_key = True
        top_num = kwargs["top_num"]
        thresh1 = []
    if "top_rate" in kwargs:
        top_rate_key = True
        top_rate = kwargs["top_rate"]
        thresh2 = []
    
    section_start = None
    section_end = None
    # 计算有效灰度区间
    for i, j in enumerate(Hist):
        if j and section_start == None:
            section_start = i
            break
    for i in range(len(Hist)-1, -1, -1):
        if Hist[i]:
            section_end = i
            break
    section_len=  section_end - section_start + 1
    sum = Hist[section_start: section_end+1].sum()

    results = []

    if top_num_key:
        num_n_l = []
        num_ratio = []
        for i in top_num:
            if i <= section_len:
                tmp = Hist[section_end: section_end-i: -1].sum()
                num_ratio.append(tmp/sum)
                num_n_l.append(tmp/i)
                thresh1.append(section_end-i)
            else:
                num_ratio.append(None)
                num_n_l.append(None)
                thresh1.append(section_end-i)
        
        results.append(num_ratio)
        results.append(num_n_l)
            
    if top_rate_key:
        hist_section = section_len
        rate_n_l = []
        rate_ratio = []
        for i in top_rate:
            top_ = round(i * section_len / 100)
            if top_ == 0:
                top_ = 1

            tmp = Hist[section_end: section_end-top_: -1].sum()
            rate_ratio.append(tmp/sum)
            rate_n_l.append(tmp/top_)
            thresh2.append(section_end-top_)

        results.append(rate_ratio)
        results.append(rate_n_l)
        results.append(hist_section)
        
    results.append(sum/section_len)

    return results, thresh1, thresh2

# 展开list中list
def listRavel(ls, ptr=None):
    if ptr == None:
        results = []
        for i in ls:
            if isinstance(i, list):
                listRavel(i, [results])
            else:
                results.append(i)
        return results
    else:
        for i in ls:
            if isinstance(i, list):
                listRavel(i, [ptr[0]])
            else:
                ptr[0].append(i)
    
# 分割图像子块(region表示在图像上切片的区域，如果长度为2，则理解为行的区间，如果长度为4， 
# 则理解为行列的采样区间，可采样区间包括region中的数字所表示的区域)
# 返回的形式为(x-left, x-right, y-left, y-right)
def imgSubblock(shape, size, num=10, region=None):
    if isinstance(shape, int):
        shape = (shape, shape)
    if isinstance(size, int):
        size = (size, size)
    
    sample_area = [0, shape[0]-1, 0, shape[1]-1]
    if region != None:
        if len(region) == 2:
            sample_area[0] = region[0]
            sample_area[1] = region[1]
        elif len(region) == 4:
            sample_area[0] = region[0]
            sample_area[1] = region[1]
            sample_area[2] = region[2]
            sample_area[3] = region[3]
    
    sample_area[1] = sample_area[1] - size[0]
    sample_area[3] = sample_area[3] - size[1]

    y = np.random.randint(sample_area[0], sample_area[1]+1, size=num)
    x = np.random.randint(sample_area[2], sample_area[3]+1, size=num)

    bbox = np.empty((0, 4))
    for y_iter, x_iter in zip(y, x):
        bbox = np.vstack((bbox, np.array([x_iter, x_iter+size[1]-1, y_iter, y_iter+size[0]-1])))
        if bbox[-1, :2].max() > shape[1]:
            print('shape: {}, region: {}, fatal wrong, bbox: {}'.format(shape, region, np.array([x_iter, x_iter+size[1]-1, y_iter, y_iter+size[0]-1])))

    return bbox

# 对不小于3*3的dct图左上角的6个像素点zigzag展开
def zigzag6(dct_img):
    if dct_img.shape[0] < 3 or dct_img.shape[1] < 3:
        raise ValueError('the shape of dct_img should not least than 3*3')
    zigzag = []
    choose = [
        [0, 0],
        [0, 1],
        [1, 0],
        [2, 0],
        [1, 1],
        [0, 2],
    ]

    for i in choose:
        zigzag.append(dct_img[i[0], i[1]])

    zigzag = np.array(zigzag)

    return zigzag

# 粗划分图像子块，相关设置同imgSubblock
# 图像划分将确保图像最下面的部分被划分成子块，而最上面的部分可能无法被子块包括在内
def imgCoarseSubblock(shape, size):
    if isinstance(shape, int):
        shape = (shape, shape)
    if isinstance(size, int):
        size = (size, size)

    num_seg = (math.floor(shape[0]/size[0]), math.floor(shape[1]/size[1]))
    # 采样起始点与终止点
    start = (shape[0]-1, num_seg[1]*size[1]-1)
    end = (shape[0]-num_seg[0]*size[0]-1, 0)

    bbox = np.empty((0, 4), dtype=np.int)

    for row_ids in range(1, num_seg[0]+1):
        for col_ids in range(1, num_seg[1]+1):
            bbox = np.concatenate((bbox, 
            np.array(
                [[start[1]-size[1]*col_ids+1, start[1]-size[1]*(col_ids-1), 
                  start[0]-size[0]*row_ids+1, start[0]-size[0]*(row_ids-1), ]], dtype=np.int
            )), axis=0)
    
    bbox = bbox[-1::-1, :]

    return bbox

# 计算图像粗划分后子块的个数
def imgCoarseSubblockNum(shape, size):
    if isinstance(shape, int):
        shape = (shape, shape)
    if isinstance(size, int):
        size = (size, size)

    num_seg = (math.floor(shape[0]/size[0]), math.floor(shape[1]/size[1]))
    return num_seg

# 划分确定区域
def segDefinite(img, value1=1, value2=0):
    thresh = 0.4 # 某行不符合区域值的个数与列数的比超过该阈值则视为本行不确定
    step_back = 3 # 找到第一个不确定行后向其对应的方向退却若干步再视为确定行

    def_area = np.empty((2, 2), dtype=np.int64)
    def_area[0, 0] = 0
    for ids, row in enumerate(img):
        if (row != value1).sum() / img.shape[1] > thresh:
            def_area[0, 1] = ids - step_back
            if def_area[0, 1] < 0:
                def_area[0, 1] = 0
            break

    def_area[1, 1] = img.shape[0] - 1
    for ids, row in enumerate(img[-1::-1]):
        if (row != value2).sum() / img.shape[1] > thresh:
            def_area[1, 0] = img.shape[0] - ids - 1 + step_back
            if def_area[1, 0] > img.shape[0] - 1:
                def_area[1, 0] = img.shape[0] - 1
            break
    
    if def_area[0, 1]+1 >= def_area[1, 0]:
        def_area[0, 1] = def_area[0, 1] - 1 if def_area[0, 0] != def_area[0, 1] else def_area[0, 1]
        def_area[1, 0] = def_area[1, 0] + 1 if def_area[1, 0] != def_area[1, 1] else def_area[1, 0]

    return def_area

# 细划分图像子块
# 默认输入的不确定区域格式为[不确定的第一行， 不确定的最后一行]
# 返回的形式为(x-left, x-right, y-left, y-right)
# bbox数量返回为(行个数，列个数)
def imgFineSubblock(shape, size):
    if isinstance(shape, int):
        shape = (shape, shape)
    if isinstance(size, int):
        size = (size, size)

    bbox = np.empty((0, 4), dtype=np.int64)
    valid_region = np.empty(4, dtype=np.int64)
    valid_region[0] = 0
    valid_region[1] = shape[0] - size[0]
    valid_region[2] = 0
    valid_region[3] = shape[1] - size[1]
    for i in range(valid_region[0], valid_region[1]+1):
        for j in range(valid_region[2], valid_region[3]+1):
            bbox = np.concatenate((bbox, np.array([[j, j+size[1]-1, i, i+size[0]-1]])), axis=0)
    
    bbox_num = (valid_region[1]+1-valid_region[0], valid_region[3]+1-valid_region[2])

    return bbox, bbox_num

# 细划分及确定区域生成海天分类掩膜
# input：图像大小，子块尺寸，
#        有效区域[天空有效第一行，天空有效最后一行，海洋有效第一行，海洋有效最后一行]，
#        dct特征分类结果（子块个数），
#        细划分子块（子块个数，6）
def getSSMask(shape, size, def_region=None, cls_mask=None, fine_blocks=None):
    if (cls_mask is None) and (fine_blocks is None):
        raise TypeError('cls_mask and fine_blocks should be both None or both value')

    if isinstance(shape, int):
        shape = (shape, shape)
    if isinstance(size, int):
        size = (size, size)
    
    mask_img = np.zeros(shape, dtype=np.int64)
    sea_value = 0
    sky_value = 1

    if def_region != None:
        mask_img[def_region[0]: def_region[1]+1] += sky_value
        mask_img[def_region[2]: def_region[3]+1] += sea_value
    
    if not (cls_mask is None):
        left_col = fine_blocks[:, 0].min()
        for cls_iter, block_iter in zip(cls_mask, fine_blocks):
            # 判断是否是首列子块
            if block_iter[0] == left_col:
                mask_img[block_iter[3], block_iter[0]:block_iter[1]+1] += cls_iter
            else:
                mask_img[block_iter[3], block_iter[1]] += cls_iter
    
    return mask_img

def fit_line_by_ransac(point_list, sigma, iters = 1000, P = 0.99):
    # 使用RANSAC算法拟合直线
    # 迭代最大次数 iters = 1000
    # 数据和模型之间可接受的差值 sigma
    # 希望的得到正确模型的概率P = 0.99
    
    # 最好模型的参数估计
    best_a = 0#直线斜率
    best_b = 0#直线截距
    n_total = 0#内点数目
    for i in range(iters):
        # 随机选两个点去求解模型
        sample_index = random.sample(range(len(point_list)), 2)
        x_1 = point_list[sample_index[0]][0]
        y_1 = point_list[sample_index[0]][1]
        x_2 = point_list[sample_index[1]][0]
        y_2 = point_list[sample_index[1]][1]
        if x_2 == x_1:
            continue
            
        # y = ax + b 求解出a，b
        a = (y_2 - y_1) / (x_2 - x_1)
        b = y_1 - a * x_1

        # 算出内点数目
        total_inlier = 0
        for index in range(len(point_list)):
            y_estimate = a * point_list[index][0] + b
            if abs(y_estimate - point_list[index][1]) < sigma:
                total_inlier += 1

        # 判断当前的模型是否比之前估算的模型好
        if total_inlier > n_total:
            eps = 1e-8
            iters = math.log(1 - P) / math.log(1 - pow(total_inlier/len(point_list), 2) + eps)
            n_total = total_inlier
            best_a = a
            best_b = b

        # 判断是否当前模型已经符合超过一半的点
        if total_inlier > len(point_list)//2:
            break

    return best_a, best_b

# mask图的滤波
def filteMask(mask_img):
    filte_win = 3
    thresh = 0.6
    tgt_value = 0
    last_row = mask_img.shape[1] - 1

    filte_iter = mask_img.shape[0] - filte_win + 1
    for i in range(filte_iter):
        sum_ = (mask_img[i: i+filte_win] == tgt_value).sum()
        if sum_ / filte_win / mask_img.shape[1] > thresh:
            last_row = i
            break

    return last_row

# 计算指数权值
def exWeight(x, y, center):
    if x.shape != y.shape:
        raise ValueError('inconsistent shape between x and y')
    shape = x.shape
    x_dis = x - center[1]
    y_dis = y - center[0]
    x_dis = np.abs(x_dis)
    y_dis = np.abs(y_dis)
    max_x = x_dis.max()
    max_y = y_dis.max()
    x_dis = x_dis / max_x
    y_dis = y_dis / max_y
    sqr_dis = np.sqrt(x_dis) + np.sqrt(y_dis)
    sqr_dis = sqr_dis / sqr_dis.max()
    weight = 1 - sqr_dis
    weight = weight / weight.sum()

    return weight

# 对图像的连通域计算填充值
def fillRegion(org_img, mask_img):
    if org_img.shape != mask_img.shape:
        raise ValueError('inconsistent shape of org_img and mask_img')

    # 求均值的窗口大小
    ksize = (9, 9)
    deltaSize = (2, 2)
    validPixel = 30 # 至少需要多少　个像素来计算高亮部分的均值
    
    shape = org_img.shape
    x_, y_ = np.meshgrid(range(shape[1]), range(shape[0]))
    x_ = x_.astype(np.int32)
    y_ = y_.astype(np.int32)
    max_value = mask_img.max()
    invalid_num = 999
    valid_img = np.ones_like(org_img, dtype=np.int32) * invalid_num
    valid_img[mask_img==0] = org_img[mask_img==0]

    for value in range(1, max_value+1):
        regoin_mask = (mask_img == value)
        x_mask = x_[regoin_mask]
        y_mask = y_[regoin_mask]
        for x, y in zip(x_mask, y_mask):
            kernel = [
                round(x-ksize[1]//2), round(y-ksize[0]//2), 
                round(x+ksize[1]//2), round(y+ksize[0]//2), 
            ]
            while 1:
                if kernel[0] < 0:
                    kernel[0] = 0
                if kernel[1] < 0:
                    kernel[1] = 0
                if kernel[2] >= shape[1]:
                    kernel[2] = shape[1] - 1
                if kernel[3] >= shape[0]:
                    kernel[3] = shape[0] - 1
                if (
                    mask_img[kernel[1]:kernel[3]+1, kernel[0]:kernel[2]+1] == 0
                ).sum() < validPixel:
                    kernel[0] -= deltaSize[1]
                    kernel[2] += deltaSize[1]
                    kernel[1] -= deltaSize[0]
                    kernel[3] += deltaSize[0]
                else:
                    break
            y_coord, x_coord = np.meshgrid(
                range(kernel[1], kernel[3]+1), 
                range(kernel[0], kernel[2]+1), indexing='ij'
            )
            weight = exWeight(x_coord, y_coord, [x, y])
            neibor = valid_img[kernel[1]:kernel[3]+1, kernel[0]:kernel[2]+1]
            neibor_mask = (neibor != invalid_num)
            weight = weight[neibor_mask] / weight[neibor_mask].sum()
            cur_pixel = round(
                (neibor[neibor_mask] * weight).sum()
            )
            # 修改最终图像和模
            valid_img[y, x] = cur_pixel
            mask_img[y, x] = 0

    valid_img = valid_img.astype(np.uint8)
    
    return valid_img


if __name__ == '__main__0':
    # img_path = "E:\\lightBand\\medium\\3\\00113.bmp"
    # img_path = "E:\\a\\00113.bmp"

    # src_dir = 'E:\\LBOS_img\\origin'
    # dst_dir = "E:\\LBOS_img\\process"
    src_dir = '/Volumes/DrWD/LBOS_img/origin'
    dst_dir = '/Volumes/DrWD/LBOS_img/process'

    # org_img = cv2.imread(img_path) # 数个通道的值相同
    # print(org_img.max())
    # cv2.namedWindow('origin', cv2.WINDOW_AUTOSIZE)
    # cv2.namedWindow('binary', cv2.WINDOW_AUTOSIZE)

    # img_path_list = os.listdir(src_dir)
    # for path in 
    # bin_img = detect(org_img)
    img_path_list = os.listdir(src_dir)
    for i in img_path_list:
        name_key = ""
        img_path = os.path.join(src_dir, i)
        org_img = cv2.imread(img_path)
        filter_img = cv2.medianBlur(org_img, 3)
        # cv2.imwrite(os.path.join(dst_dir, 'median_'+i), filter_img)
        _, bin_img = cv2.threshold(filter_img[:, :, 0], None, 255, cv2.THRESH_OTSU)
        # cv2.imwrite(os.path.join(dst_dir, 'binary_'+i), bin_img)
        adbin_img = cv2.adaptiveThreshold(
            filter_img[:, :, 0], 
            255, 
            cv2.ADAPTIVE_THRESH_MEAN_C, 
            cv2.THRESH_BINARY, 
            35, 
            -10
        )
        # bin_img, adbin_img = test(filter_img)
        # cv2.imwrite(os.path.join(dst_dir, 'adbin_35_10_'+i), adbin_img)
        and_img = cv2.bitwise_and(adbin_img, bin_img)
        # cv2.imwrite(os.path.join(dst_dir, 'adbin_35_10_and_'+i), and_img)
        # cv2.addWeighted(img, and_img)
        mg_img = getCmpImg(org_img, bin_img)
        cv2.imwrite(os.path.join(dst_dir, 'mg_'+name_key+i[:i.find('.bmp')]+'.PNG'), mg_img)

    # cv2.imshow('origin', org_img)
    # cv2.imshow('binary', bin_img)
    # cv2.waitKey(0)
    # cv2.destroyAllWindows()
    
if __name__ == '__main__1':
    src_dir = '/Volumes/DrWD/LBOS_img/origin'
    dst_dir = '/Volumes/DrWD/LBOS_img/process'

    kernel_size = 10
    kernel = getBoxFilter(kernel_size, 5)
    bias = 128

    img_path_list = os.listdir(src_dir)
    for i in img_path_list:
        name_key = str(kernel_size) + '_'
        img_path = os.path.join(src_dir, i)
        org_img = cv2.imread(img_path)

        equal_img = cv2.equalizeHist(org_img[:, :, 0])
        cv2.imwrite(os.path.join(dst_dir, 'equalize', i), equal_img)
        
        filter_img = cv2.medianBlur(equal_img, 3)

        # grad_img = cv2.filter2D(filter_img[:, :, 0], -1, kernel)
        # grad_img = np.abs(grad_img)
        # grad_img = cv2.equalizeHist(grad_img)

        grad_img = cv2.Laplacian(filter_img, -1, ksize=3)

        cv2.imwrite(os.path.join(dst_dir, 'gradient', name_key+i), grad_img)
        
if __name__ == '__main__2':
    src_dir = '/Volumes/DrWD/LBOS_img/origin'
    dst_dir = '/Volumes/DrWD/LBOS_img/process'

    img_path_list = os.listdir(src_dir)
    for i in img_path_list:
        name_key = ''
        img_path = os.path.join(src_dir, i)
        org_img = cv2.imread(img_path)

        cuted_img = precut(org_img[:, :, 0])

        cv2.imwrite(os.path.join(dst_dir, 'precut', i), cuted_img)
        
if __name__ == '__main__3':
    # src_dir = '/Volumes/DrWD/LBOS_img/process/precut'
    src_dir = '/Volumes/DrWD/LBOS_img/origin'
    dst_dir = '/Volumes/DrWD/LBOS_img/process'

    kernel_size = 3
    kernel = getBoxFilter(kernel_size, 5)

    img_path_list = os.listdir(src_dir)
    for i in img_path_list:
        name_key = 'grad'
        img_path = os.path.join(src_dir, i)
        org_img = cv2.imread(img_path)

        equal_img = cv2.equalizeHist(org_img[:, :, 0])
        # cv2.imwrite(os.path.join(dst_dir, 'equalize', 'precut'+i), equal_img)
        
        filter_img = cv2.medianBlur(org_img[:, :, 0], 3)

        grad_img = signal.convolve2d(filter_img, kernel, mode='same', boundary='symm')
        grad_img = np.abs(grad_img)
        grad_img = cv2.add(grad_img, np.zeros_like(grad_img))
        # grad_img = cv2.equalizeHist(grad_img)

        # grad_img = cv2.Laplacian(filter_img, -1, ksize=3)

        cv2.imwrite(os.path.join(dst_dir, 'gradient', name_key+i), grad_img)
        
# 测试图像熵
if __name__ == '__main__4':
    # src_dir = '/Volumes/DrWD/LBOS_img/process/precut'
    src_dir = '/Volumes/DrWD/LBOS_img/origin'
    dst_dir = '/Volumes/DrWD/LBOS_img/process'

    kernel_size = 3
    kernel = getBoxFilter(kernel_size, 5)

    img_path_list = os.listdir(src_dir)
    for i in img_path_list:
        name_key = 'grad'
        img_path = os.path.join(src_dir, i)
        org_img = cv2.imread(img_path)

        # equal_img = cv2.equalizeHist(org_img[:, :, 0])
        
        filter_img = cv2.medianBlur(org_img[:, :, 0], 3)

        mean, entropy, blocks = blockEntropy(org_img)

        block_img = visualBlockEntropy(filter_img, blocks, entropy)
        cv2.imwrite(os.path.join(dst_dir, 'entropy', i), block_img)
                
# 测试图像熵
if __name__ == '__main__5':
    # src_dir = '/Volumes/DrWD/LBOS_img/process/precut'
    # src_dir = '/Volumes/DrWD/LBOS_img/origin'
    # dst_dir = '/Volumes/DrWD/LBOS_img/process'
    src_dir = '/Users/ruideng/LBOS_img/process/gradient'
    dst_dir = '/Users/ruideng/LBOS_img/process'

    img_path_list = os.listdir(src_dir)
    for i in img_path_list:
        if not 'grad' in i:
            continue
        name_key = ''
        img_path = os.path.join(src_dir, i)
        org_img = cv2.imread(img_path)
        
        filter_img = cv2.medianBlur(org_img[:, :, 0], 3)
        _, binary_img = cv2.threshold(filter_img, 15, 255, cv2.THRESH_BINARY)

        cv2.imwrite(os.path.join(dst_dir, 'grad_binary', i), binary_img)

# 打标
if __name__ == '__main__6':
    src_dir = '/Users/ruideng/LBOS_img/origin'
    dst_dir = '/Users/ruideng/LBOS_img/process'

    label_dict = {
    'lh00021' : {'ssline_right' : 82}, 
    'lh00070' : {'ssline_right' : 84}, 
    'lh00098' : {'ssline_right' : 84}, 
    'll00001' : {'ssline_right' : 80}, 
    'll00048' : {'ssline_right' : 83}, 
    'll00099' : {'ssline_right' : 83}, 
    'lw00011' : {'ssline_right' : 85}, 
    'lw00093' : {'ssline_right' : 81}, 
    'lw00185' : {'ssline_right' : 82}, 
    'lw00252' : {'ssline_right' : 83}, 
    'lw00397' : {'ssline_right' : 82}, 
    'm300013' : {'ssline_right' : 138}, 
    'm300066' : {'ssline_right' : 149}, 
    'm300128' : {'ssline_right' : 162}, 
    'm300195' : {'ssline_right' : 206}, 
    'm700013' : {'ssline_right' : 77}, 
    'm700098' : {'ssline_right' : 78}, 
    'm700184' : {'ssline_right' : 75}, 
    'm800015' : {'ssline_right' : 157}, 
    'm800117' : {'ssline_right' : 112}, 
    'm800218' : {'ssline_right' : 137}, 
    'm800300' : {'ssline_right' : 151}, 
    'm918001' : {'ssline_right' : 286}, 
    'm918002' : {'ssline_right' : 283}, 
    'm918003' : {'ssline_right' : 285},
    }

    with open(os.path.join(src_dir, "label.json"), 'w') as fp:
        fp.write(json.dumps(label_dict, indent=4))

    fp.close()

# 根据海天线的标试验阈值法
if __name__ == '__main__7':
    src_dir = '/Users/ruideng/LBOS_img/origin'
    dst_dir = '/Users/ruideng/LBOS_img/process'
    inf_dir = '/Users/ruideng/LBOS_img'

    labels = json.load(open(os.path.join(src_dir, 'label.json'), 'r'))

    top_num = list(range(7, 12))
    top_rate = [1, 5, 10, 15, 20]
    columns = []
    for i in top_num:
        tmp = "top_{}ratio".format(i)
        columns.append(tmp)
    for i in top_num:
        tmp = "top_{}n/l".format(i)
        columns.append(tmp)
    for i in top_rate:
        tmp = "top_{}%_ratio".format(i)
        columns.append(tmp)
    for i in top_rate:
        tmp = "top_{}%_n/l".format(i)
        columns.append(tmp)
    columns.append('sec_len')
    columns.append('avg_n/l')
    df = pd.DataFrame(data=None, columns=columns)
    df_labels = []

    img_path_list = os.listdir(src_dir)
    for i in img_path_list:
        if '.json' in i:
            continue
        name_key = "hlmask20p_"
        img_path = os.path.join(src_dir, i)
        org_img = cv2.imread(img_path)

        file_name = i[0: i.find('.')]
        ROI_img = org_img[labels[file_name]['ssline_right']:]

        filter_img = cv2.medianBlur(ROI_img, 3)
        thresh, bin_img = cv2.threshold(filter_img[:, :, 0], None, 255, cv2.THRESH_OTSU)
        thresh = round(thresh)
        adbin_img = cv2.adaptiveThreshold(
            filter_img[:, :, 0], 
            255, 
            cv2.ADAPTIVE_THRESH_MEAN_C, 
            cv2.THRESH_BINARY, 
            35, 
            -10
        )
        and_img = cv2.bitwise_and(adbin_img, bin_img)
        # 计算灰度直方图 
        sea_hist = cv2.calcHist([filter_img], [0], None, [256], [0, 256])
        # plt.figure()
        # plt.hist(filter_img.ravel(), 255-thresh, [thresh+1, 256]) # 该函数输入是图像
        # plt.savefig(os.path.join(dst_dir, 'sstest', name_key+i[:i.find('.bmp')]+'.png'))

        # 计算相关信息
        hist_info, thresh1, thresh2 = getHistInfo(sea_hist, top_num=top_num, top_rate=top_rate)
        hist_info = listRavel(hist_info)
        df_new = pd.DataFrame(np.array([hist_info]), columns=columns)
        df = df.append(df_new, ignore_index=True)

        if i[:-4] in ['m800117', 'm800218', 'm800300', 'm300013', 'm300066', 'll00099', 'lw00397']:
            df_labels.append(0)
        else:
            df_labels.append(1)

        # 计算高亮模
        if hist_info[11] >= 0.02:
            _, lmask = cv2.threshold(filter_img[:, :, 0], thresh2[4], 255, cv2.THRESH_BINARY)
        else:
            _, lmask = cv2.threshold(filter_img[:, :, 0], 255, 255, cv2.THRESH_BINARY)

        or_img = cv2.bitwise_or(and_img, lmask)

        mg_img = getCmpImg(ROI_img, or_img)
        cv2.imwrite(os.path.join(dst_dir, 'sstest', name_key+i[:i.find('.bmp')]+'.PNG'), mg_img)

    
    df['label'] = df_labels

    # df.to_excel(os.path.join(inf_dir, 'info', 'ssline.xlsx'))

# dct变换
if __name__ == '__main__8':
    # 存储设置
    src_dir = os.path.join(sys_dir, 'LBOS_img', 'origin')
    dst_dir = os.path.join(sys_dir, 'LBOS_img', 'process')

    labels = json.load(open(os.path.join(src_dir, 'label.json'), 'r'))
    img_path_list = os.listdir(src_dir)
    img_path_list.pop(img_path_list.index('label.json'))
    img_path_list.sort()

    # 子块采样相关设置
    bbox_generate_flag = False # 是否生成bbox
    # 采样子块方法:'rand','coarse'
    bbox_sample_type = 'coarse'
    bbox_visualize_flag = False # 是否可视化bbox
    bbox_num_per = 40
    if bbox_sample_type == 'rand':
        bbox_total = np.empty((0, 2, bbox_num_per, 4), dtype=np.int)
    bbox_save_dir = os.path.join(sys_dir, 'LBOS_img')
    bbox_file = 'bbox_carse.npy' # flag为真则保存至此路径，flag为假则读取则此路径

    # zigzag设置
    draw_zigzag_flag = False

    # 记录zigzag展开的协方差数据等
    mean_file = 'mean.npy'
    cov_file = 'cov.npy'
    sea_cov_total = np.empty((0, 5, 5))
    sky_cov_total = np.empty((0, 5, 5))
    sea_mean_total = np.empty((0, 5))
    sky_mean_total = np.empty((0, 5))
    compute_cov_flag = True # 是否计算平均值和协方差
    save_cov_flag = False if compute_cov_flag else False # 是否存储平均值和协方差

    # 高斯模型估计
    check_calc_flag = True if compute_cov_flag else False # 是否验算平均值和协方差
    # 海天线存在区域占比，此区域之外便肯定是海或者天
    sky_per = 0.2
    sea_per = 0.9

    if not bbox_generate_flag and bbox_sample_type == 'rand':
        bbox_total = np.load(os.path.join(bbox_save_dir, bbox_file))

    for ids, i in enumerate(img_path_list):
        if not '.bmp' in i:
            continue
        name_key = ''
        img_path = os.path.join(src_dir, i)
        org_img = cv2.imread(img_path)
        
        filter_img = cv2.medianBlur(org_img[:, :, 0], 3)

        file_name = i[0: i.find('.')]
        
        # 产生bbox
        if bbox_generate_flag:
            if bbox_sample_type == 'rand':
                bbox1 = imgSubblock(
                    filter_img.shape, 8, num=bbox_num_per, 
                    region=[0, labels[file_name]['ssline_right']-15]
                )
                bbox1 = bbox1.astype(np.int64)
                if bbox_visualize_flag:
                    for j in bbox1:
                        org_img = cv2.rectangle(org_img, [j[0], j[2]], [j[1], j[3]], [255, 0, 0])

                bbox2 = imgSubblock(
                    filter_img.shape, 8, num=bbox_num_per, 
                    region=[labels[file_name]['ssline_right']+15, filter_img.shape[0]-1]
                )
                bbox2 = bbox2.astype(np.int64)
                if bbox_visualize_flag:
                    for j in bbox2:
                        org_img = cv2.rectangle(org_img, (j[0], j[2]), (j[1], j[3]), (255, 0, 0))    

                bbox = np.stack((bbox1, bbox2), axis=0)
                bbox = bbox.reshape((1, bbox.shape[0], bbox.shape[1], bbox.shape[2]))
                bbox_total = np.concatenate((bbox_total, bbox), axis=0) 
            elif bbox_sample_type == 'coarse':
                bbox = imgCoarseSubblock(filter_img.shape, 8)
                
                if bbox_visualize_flag:
                    for j in bbox:
                        org_img = cv2.rectangle(org_img, (j[0], j[2]), (j[1], j[3]), (255, 0, 0))  
                
                np.save(os.path.join(bbox_save_dir, bbox_file[:bbox_file.find('.npy')]+'_'+file_name+'.npy'), bbox)
            

            if bbox_visualize_flag:
                cv2.imwrite(os.path.join(dst_dir, 'tmp2', i), org_img)

        else:

            if bbox_visualize_flag:
                bbox = bbox_total[ids].reshape(-1, 4)
                for j in bbox:
                    org_img = cv2.rectangle(org_img, (j[0], j[2]), (j[1], j[3]), (255, 0, 0))

                    cv2.imwrite(os.path.join(dst_dir, 'tmp1', i), org_img)

            if bbox_sample_type == 'rand':
                # dct变换及展开
                bbox = bbox_total[ids]
                sky_distrib = np.empty((0, 6))
                sea_distrib = np.empty((0, 6))
                ptr_list = [sky_distrib, sea_distrib]
                for ss_label, j in enumerate(bbox):
                    for l in j:
                        dct_img = cv2.dct(filter_img[l[2]:l[3]+1, l[0]:l[1]+1] / 255.0)
                        dct_feat = zigzag6(dct_img)
                        if ss_label == 0:
                            sky_distrib = np.concatenate((sky_distrib, dct_feat.reshape(1, -1)), axis=0)
                        if ss_label == 1:
                            sea_distrib = np.concatenate((sea_distrib, dct_feat.reshape(1, -1)), axis=0)

                # 绘制zigzag展开概率分布图
                if draw_zigzag_flag:
                    df_kde = pd.DataFrame( sky_distrib[:, 1:] )
                    df_kde.columns = ['dct1', 'dct2', 'dct3', 'dct4', 'dct5']
                    plt.figure()
                    sns.kdeplot(data=df_kde)
                    plt.savefig(os.path.join(dst_dir, 'sky_kde'+file_name+'.png'))
                    plt.close()
                    df_kde = pd.DataFrame( sea_distrib[:, 1:] )
                    df_kde.columns = ['dct1', 'dct2', 'dct3', 'dct4', 'dct5']
                    plt.figure()
                    sns.kdeplot(data=df_kde)
                    plt.savefig(os.path.join(dst_dir, 'sea_kde_'+file_name+'.png'))
                    plt.close()

                # 尝试计算协方差与均值
                if compute_cov_flag:
                    sky_mean = np.expand_dims(sky_distrib[:, 1:].mean(axis=0), axis=0)
                    sky_cov = np.expand_dims(np.cov(sky_distrib[:, 1:].T), axis=0)
                    sea_mean = np.expand_dims(sea_distrib[:, 1:].mean(axis=0), axis=0)
                    sea_cov = np.expand_dims(np.cov(sea_distrib[:, 1:].T), axis=0)

                    sky_cov_total = np.concatenate((sky_cov_total, sky_cov), axis=0)
                    sea_cov_total = np.concatenate((sea_cov_total, sea_cov), axis=0)
                    sky_mean_total = np.concatenate((sky_mean_total, sky_mean))
                    sea_mean_total = np.concatenate((sea_mean_total, sea_mean))

                # 计算高斯模型参数
                Gm_sea = mix.GaussianMixture(n_components=1, covariance_type='full')
                Gm_sky = mix.GaussianMixture(n_components=1, covariance_type='full')
                Gm_sea.fit(sea_distrib[:, 1:])
                Gm_sky.fit(sky_distrib[:, 1:])

                if check_calc_flag:
                    error_mean_sea = (sea_mean - Gm_sea.means_).mean()
                    error_mean_sky = (sky_mean - Gm_sky.means_).mean()
                    error_cov_sea = (sea_cov - Gm_sea.covariances_).mean()
                    error_cov_sky = (sky_cov - Gm_sky.covariances_).mean()
                    print([error_mean_sea, error_mean_sky, error_cov_sea, error_cov_sky])

            elif bbox_sample_type == 'coarse':
                bbox = np.load(os.path.join(bbox_save_dir, bbox_file[:bbox_file.find('.npy')]+'_'+file_name+'.npy'))
                sky_bbox = bbox[bbox[:, 3] <= round(sky_per*filter_img.shape[0])-1]
                sea_bbox = bbox[bbox[:, 3] >= round(sea_per*filter_img.shape[0])-1]
                sky_distrib = np.empty((0, 6))
                sea_distrib = np.empty((0, 6))
                dct_total = np.empty((0, 6))

                for j in sky_bbox:
                    dct_img = cv2.dct(filter_img[j[2]:j[3]+1, j[0]:j[1]+1] / 255.0)
                    dct_feat = zigzag6(dct_img)
                    sky_distrib = np.concatenate((sky_distrib, dct_feat.reshape(1, -1)), axis=0)
                for j in sea_bbox:
                    dct_img = cv2.dct(filter_img[j[2]:j[3]+1, j[0]:j[1]+1] / 255.0)
                    dct_feat = zigzag6(dct_img)
                    sea_distrib = np.concatenate((sea_distrib, dct_feat.reshape(1, -1)), axis=0)
                for j in bbox:
                    dct_img = cv2.dct(filter_img[j[2]:j[3]+1, j[0]:j[1]+1] / 255.0)
                    dct_feat = zigzag6(dct_img)
                    dct_total = np.concatenate((dct_total, dct_feat.reshape(1, -1)), axis=0)

                # 计算高斯模型参数
                Gm_sea = mix.GaussianMixture(n_components=1, covariance_type='full')
                Gm_sky = mix.GaussianMixture(n_components=1, covariance_type='full')
                Gm_sea.fit(sea_distrib[:, 1:])
                Gm_sky.fit(sky_distrib[:, 1:])

                sea_prob = Gm_sea.score_samples(dct_total[:, 1:])
                sky_prob = Gm_sky.score_samples(dct_total[:, 1:])

                # sky_mask = np.bitwise_and((sky_prob >= 0), (sky_prob > sea_prob))
                sky_mask = sky_prob > sea_prob
                # sky_mask = sky_prob > sea_prob

                # sky_mask_img = np.zeros_like(filter_img)
                # for j, l in zip(bbox, sky_mask):
                #     if l:
                #         sky_mask_img = cv2.rectangle(sky_mask_img, (j[0], j[2]), (j[1], j[3]), 255, thickness=cv2.FILLED)
                
                # cmp_img = getCmpImg(org_img, sky_mask_img)
                # cv2.imwrite(os.path.join(dst_dir, 'sea_mask', 'sky_mask_'+i[:i.find('.bmp')]+'.PNG'), cmp_img)

                # 划分确定区域
                CoarseSubblock_num = imgCoarseSubblockNum(filter_img.shape, 8)
                def_block_seg = segDefinite(sky_mask.reshape(CoarseSubblock_num))
                # 确定区域
                def_seg = [
                    0, 
                    bbox.reshape((CoarseSubblock_num[0], CoarseSubblock_num[1], 4))[def_block_seg[0, 1], 0, 3], 
                    bbox.reshape((CoarseSubblock_num[0], CoarseSubblock_num[1], 4))[def_block_seg[1, 0], 0, 2], 
                    filter_img.shape[0]-1
                    ]
                # sky_mask_img = np.zeros_like(filter_img)
                # sky_mask_img = cv2.rectangle(sky_mask_img, (0, def_seg[0]), (filter_img.shape[1]-1, def_seg[1]), 255, thickness=cv2.FILLED)
                # sky_mask_img = cv2.rectangle(sky_mask_img, (0, def_seg[2]), (filter_img.shape[1]-1, def_seg[3]), 255, thickness=cv2.FILLED)
                # cmp_img = getCmpImg(org_img, sky_mask_img)
                # cv2.imwrite(os.path.join(dst_dir, 'def_area', 'def_area_'+i[:i.find('.bmp')]+'.PNG'), cmp_img)

                # 细划分区域
                indef_region = np.empty(2)
                indef_region[0] = def_seg[1] + 1
                indef_region[1] = def_seg[2] - 1
                fine_bbox, fine_bbox_num = imgFineSubblock(filter_img.shape, 8)

                # 分别确定海、天、争议区域的子块
                tmp_mask = np.bitwise_and((fine_bbox[:, 3] >= indef_region[0]), (fine_bbox[:, 3] <= indef_region[1]))
                indef_fine_bbox = fine_bbox[tmp_mask]
                tmp_mask = fine_bbox[:, 3] < indef_region[0]
                sky_fine_bbox = fine_bbox[tmp_mask]
                tmp_mask = fine_bbox[:, 3] > indef_region[1]
                sea_fine_bbox = fine_bbox[tmp_mask]
                indef_blocks_num = indef_fine_bbox.reshape((-1, fine_bbox_num[1], 4)).shape[:2]

                # 确定一个较小的确定区域，
                num_sky = sky_fine_bbox.shape[0]
                num_sea = sea_fine_bbox.shape[0]
                if num_sky <= num_sea:
                    num_sea = num_sky
                else:
                    num_sky = num_sea

                # 对较大的确定区域取样
                if num_sky < sky_fine_bbox.shape[0]:
                    def_area_samples = random.sample(range(sky_fine_bbox.shape[0]), num_sky)
                    sky_fine_bbox = sky_fine_bbox[def_area_samples]
                if num_sea < sea_fine_bbox.shape[0]:
                    def_area_samples = random.sample(range(sea_fine_bbox.shape[0]), num_sea)
                    sea_fine_bbox = sea_fine_bbox[def_area_samples]

                # dct
                sky_fine_distrib = np.empty((0, 6))
                sea_fine_distrib = np.empty((0, 6))
                dct_fine_total = np.empty((0, 6))

                for j in sky_fine_bbox:
                    dct_img = cv2.dct(filter_img[j[2]:j[3]+1, j[0]:j[1]+1] / 255.0)
                    dct_feat = zigzag6(dct_img)
                    sky_fine_distrib = np.concatenate((sky_fine_distrib, dct_feat.reshape(1, -1)), axis=0)
                for j in sea_fine_bbox:
                    dct_img = cv2.dct(filter_img[j[2]:j[3]+1, j[0]:j[1]+1] / 255.0)
                    dct_feat = zigzag6(dct_img)
                    sea_fine_distrib = np.concatenate((sea_fine_distrib, dct_feat.reshape(1, -1)), axis=0)
                for j in indef_fine_bbox:
                    dct_img = cv2.dct(filter_img[j[2]:j[3]+1, j[0]:j[1]+1] / 255.0)
                    dct_feat = zigzag6(dct_img)
                    dct_fine_total = np.concatenate((dct_fine_total, dct_feat.reshape(1, -1)), axis=0)

                # 计算高斯模型参数
                Gm_fine_sea = mix.GaussianMixture(n_components=1, covariance_type='full')
                Gm_fine_sky = mix.GaussianMixture(n_components=1, covariance_type='full')
                Gm_fine_sea.fit(sea_fine_distrib[:, 1:])
                Gm_fine_sky.fit(sky_fine_distrib[:, 1:])

                sea_fine_prob = Gm_fine_sea.score_samples(dct_fine_total[:, 1:])
                sky_fine_prob = Gm_fine_sky.score_samples(dct_fine_total[:, 1:])

                # sky_mask = np.bitwise_and((sky_prob >= 0), (sky_prob > sea_prob))
                sky_fine_mask = sky_fine_prob > sea_fine_prob
                # sky_mask = sky_prob > sea_prob

                # for m, l in zip(sky_fine_mask, indef_fine_bbox):
                #     if m == 0:
                #         org_img = cv2.circle(org_img, (l[1], l[3]), 1, (255, 0, 0,))
                # cv2.imwrite(os.path.join(dst_dir, 'tmp5', 'sky_mask_'+i[:i.find('.bmp')]+'.PNG'), org_img)

                # 最终mask结果
                mask_img = getSSMask(filter_img.shape, 8, def_region=def_seg, cls_mask=sky_fine_mask, fine_blocks=indef_fine_bbox)
                # cmp_img = getCmpImg(org_img, mask_img*255)
                # cv2.imwrite(os.path.join(dst_dir, 'fine_mask', 'sky_mask_'+i[:i.find('.bmp')]+'.PNG'), cmp_img)
                cv2.imwrite(os.path.join(dst_dir, 'fine_mask', 'sky_mask_'+i), mask_img*255)




    if save_cov_flag:
        np.save(os.path.join(bbox_save_dir, 'sea_'+mean_file), sea_mean_total)
        np.save(os.path.join(bbox_save_dir, 'sky_'+mean_file), sky_mean_total)
        np.save(os.path.join(bbox_save_dir, 'sea_'+cov_file), sea_cov_total)
        np.save(os.path.join(bbox_save_dir, 'sky_'+cov_file), sky_cov_total)


    if bbox_generate_flag and bbox_sample_type == 'rand':
        np.save(os.path.join(bbox_save_dir, bbox_file), bbox_total)
    
# 拟合海天线
if __name__ == '__main__9':
    # 存储设置
    src_dir = os.path.join(sys_dir, 'LBOS_img', 'process', 'fine_mask')
    dst_dir = os.path.join(sys_dir, 'LBOS_img', 'process')
    org_dir = os.path.join(sys_dir, 'LBOS_img', 'origin')

    org_dir_list = os.listdir(org_dir)
    org_dir_list.pop(org_dir_list.index('label.json'))
    org_dir_list.sort()
    img_path_list = os.listdir(src_dir)
    img_path_list.sort()

    line_param = np.empty((0, 2))

    for i, m in zip(img_path_list, org_dir_list):
        mask_img = cv2.imread(os.path.join(src_dir, i))
        org_img = cv2.imread(os.path.join(org_dir, m))

        # 预处理
        # mask_img = cv2.morphologyEx(
        #     mask_img, cv2.MORPH_OPEN, 
        #     cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5)), 
        #     iterations=2
        # )
        # cv2.imwrite(os.path.join(dst_dir, 'mask_filter', i), mask_img)
        last_row = filteMask(mask_img[:, :, 0])
        mask_img[last_row:] = np.zeros_like(mask_img[last_row:])
        # cv2.imwrite(os.path.join(dst_dir, 'mask_filter', 'win_filte_'+i), mask_img)
        
        # 海天连接点只计算由天变海的部分
        # 囊括的点是天空部分的
        dif_img = mask_img[:-1, :, 0] - mask_img[1:, :, 0]

        # cmp_img = getCmpImg(mask_img, 3*np.concatenate((dif_img, np.zeros_like(dif_img[-1, :]).reshape(1, -1)), axis=0))
        # cv2.imwrite(os.path.join(dst_dir, 'dif_point', i[:i.find('.bmp')]+'.PNG'), cmp_img)
        
        # shape = org_img.shape[:2]
        # coord_x, coord_y = np.meshgrid(range(shape[1]), range(shape[0]))
        # a_mask = np.concatenate((dif_img, np.zeros_like(dif_img[-1, :]).reshape(1, -1)), axis=0)
        # coord_x = coord_x[a_mask>0].astype(np.int32)
        # coord_y = coord_y[a_mask>0].astype(np.int32)
        # coord_ = np.stack((coord_x, coord_y), axis=1)
        # for ll in coord_:
        #     org_img = cv2.circle(org_img, ll, 0, (0, 0, 255))
        # cv2.imwrite(os.path.join(dst_dir, 'dif_point', i), org_img)
        # continue

        point_x, point_y = np.meshgrid(range(mask_img.shape[1]), range(mask_img.shape[0]-1))
        x_ = point_x[dif_img > 0]
        y_ = point_y[dif_img > 0]
        coord = np.stack((x_, y_), axis=1)
        # print(coord)

        # 拟合
        # param = cv2.fitLine(coord, cv2.DIST_L2, 0, 0.01, 0.01)
        # param = param.squeeze()
        # vx, vy, x0, y0 = param[0], param[1], param[2], param[3]
        # x_left, x_right = 0, mask_img.shape[1] - 1
        # y_left = round(vy * (x_left - x0) / vx +y0)
        # y_right = round(vy * (x_right - x0) / vx +y0)
        # mask_img = cv2.line(mask_img, (x_left, y_left), (x_right, y_right), (0, 255, 0))
        # cv2.imwrite(os.path.join(dst_dir, 'ssline', i), mask_img)

        param = fit_line_by_ransac(coord, 2)
        # x_left, x_right = 0, mask_img.shape[1] - 1
        # y_left = round(param[0] * x_left + param[1])
        # y_right = round(param[0] * x_right + param[1])
        # org_img = cv2.line(org_img, (x_left, y_left), (x_right, y_right), (0, 255, 0))
        # cv2.imwrite(os.path.join(dst_dir, 'ssline', 'org_line_'+i), org_img)

        line_param = np.concatenate((line_param, np.array(param).reshape(1, -1)), axis=0)

    np.save(os.path.join(sys_dir, 'LBOS_img', 'line_param'), line_param)

# 根据拟合的海天线使用阈值法
# 根据海天线的标试验阈值法
if __name__ == '__main__10':
    src_dir = '/Users/ruideng/LBOS_img/origin'
    dst_dir = '/Users/ruideng/LBOS_img/process'
    param_dir = '/Users/ruideng/LBOS_img'

    img_path_list = os.listdir(src_dir)
    img_path_list.pop(img_path_list.index('label.json'))
    img_path_list.sort()

    line_param = np.load(os.path.join(param_dir, 'line_param.npy'))

    for i, j in zip(img_path_list, line_param):
        name_key = "hlmask20p_"
        img_path = os.path.join(src_dir, i)
        org_img = cv2.imread(img_path)

        file_name = i[0: i.find('.')]

        filter_img = cv2.medianBlur(org_img, 3)

        # 根据海天线计算需要处理的区域
        img_shape = filter_img.shape[0: 2]
        y_ = []
        for x_ in range(img_shape[1]):
            y_.append(round(j[0]*x_+j[1]))
        y_ = np.array(y_, dtype=np.int64)
        last_line = y_.min() # 海天线最靠上的一行
        sea_mask = np.meshgrid(
            range(img_shape[0]), range(img_shape[1]), indexing='ij'
        )[0] > y_.reshape(1, -1)
        rgn_img = filter_img[last_line:, :, 0]
        sea_hist = filter_img[:, :, 0][sea_mask]
        sea_mask = sea_mask[last_line:]

        # 对灰度求大津法
        thresh, _ = cv2.threshold(
            sea_hist, None, 255, cv2.THRESH_OTSU
        )
        thresh = round(thresh)
        _, Otsu_mask = cv2.threshold(
            rgn_img, thresh, 255, cv2.THRESH_BINARY
        )

        # 自适应阈值法
        Ada_mask = cv2.adaptiveThreshold(
            rgn_img, 
            255, 
            cv2.ADAPTIVE_THRESH_MEAN_C, 
            cv2.THRESH_BINARY, 
            21, 
            -10
        )

        # 计算灰度图和灰度信息
        sea_hist = cv2.calcHist([filter_img], [0], None, [256], [0, 256])
        hist_info, _, thresh2 = getHistInfo(sea_hist, top_rate=[20])
        hist_info = listRavel(hist_info)

        # 计算高亮模
        if hist_info[0] >= 0.05:
            _, light_mask = cv2.threshold(rgn_img, thresh2[0], 255, cv2.THRESH_BINARY)
        else:
            _, light_mask = cv2.threshold(rgn_img, 255, 255, cv2.THRESH_BINARY)

        # 根据三个模计算最终模
        lb_mask = cv2.bitwise_and(Otsu_mask, Ada_mask)
        lb_mask = cv2.bitwise_or(lb_mask, light_mask)
        lb_mask[sea_mask == 0] = 0

        # 将对海面部分的模换算为对整张图的模
        img_mask = np.vstack(
            (np.zeros((last_line, img_shape[1])), 
             lb_mask),
        )

        # mg_img = getCmpImg(filter_img, img_mask)
        # cv2.imwrite(os.path.join(dst_dir, 't_mask_img', name_key+i[:i.find('.bmp')]+'.PNG'), mg_img)
        # cv2.imwrite(os.path.join(dst_dir, 't_mask', i), img_mask*255)

        # 对模形态学运算
        # img_mask_filter = cv2.morphologyEx(img_mask, cv2.MORPH_CLOSE, np.ones((3, 3)))
        # cv2.imwrite(os.path.join(dst_dir, 't_mask_pro', i), img_mask_filter*255)

        # 计算连通域
        _, labels, stats, centroids = cv2.connectedComponentsWithStats(
            img_mask.astype(np.int8), connectivity=8)

        # 可视化连通域信息
        img_connect = img_mask * 255
        # img_connect = img_connect.astype(np.int8)
        area_thresh_max = 260
        area_thresh = 60
        ratio_thresh_min = 2.0
        ratio_thresh_max = 5.8
        # for l in stats:
        #     if (l[0], l[1]) == (0, 0):
        #         continue
        #     if (l[4] > area_thresh and 
        #     l[4] <= area_thresh_max and 
        #     l[2]/l[3] > ratio_thresh_min and 
        #     l[2]/l[3] < ratio_thresh_max):
        #         continue
        #     img_connect = cv2.rectangle(
        #         img_connect, (l[0], l[1]), (l[0]+l[2]-1, l[1]+l[3]-1), 127 
        #     )
        # cv2.imwrite(os.path.join(dst_dir, 't_mask_connect', i), img_connect)

        # cv2.imwrite(os.path.join(dst_dir, 'tmp7', i), labels)

        fill_img = fillRegion(filter_img[:, :, 0], labels)
        cv2.imwrite(os.path.join(dst_dir, 'tmp9', i), fill_img)