import numpy as np
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
import ecg_display as display
import pan_tompkin as pt


# 信号处理pan-tompkin
def scg(data, threshold):
    filter_data = pt.bandpass_filter_scg(data, 200, 5, 15)  # 滤波
    derivative_data = pt.derivative(filter_data)  # 差分
    square_data = pt.square(derivative_data)  # 平方
    window_data = pt.moving_window_average(square_data)  # 积分窗
    peaks, locs = pt.findpeaks(window_data, int(0.2 * 200 + 0.5))  # 基准peak
    qrs_amp_win, qrs_idx_win, qrs_amp_flt, qrs_idx_flt, thrs_win1, thrs_win2, thrs_flt1, thrs_flt2 \
        = pt.judge_rule(filter_data, window_data, peaks, locs, 200)

    # 冗余缺失处理
    qrs_idx_flt, qrs_amp_flt, threshold, delete_indices = redundant_data(qrs_idx_flt, qrs_amp_flt,
                                                                         threshold)
    qrs_idx_flt, qrs_amp_flt, threshold, insert_indices = missing_data(qrs_idx_flt, qrs_amp_flt,
                                                                       filter_data, threshold)

    return qrs_idx_flt, len(delete_indices) + len(insert_indices), threshold


def ppg(data, threshold):
    filter_data = pt.bandpass_filter_scg(data, 200, 0.5, 10)  # 滤波
    derivative_data = pt.derivative(filter_data)  # 差分
    square_data = pt.square(derivative_data)  # 平方
    window_data = pt.moving_window_average(square_data)  # 积分窗
    peaks, locs = pt.findpeaks(window_data, int(0.2 * 200 + 0.5))  # 基准peak
    qrs_amp_win, qrs_idx_win, qrs_amp_flt, qrs_idx_flt, thrs_win1, thrs_win2, thrs_flt1, thrs_flt2 \
        = pt.judge_rule(filter_data, window_data, peaks, locs, 200)

    # 冗余缺失处理
    qrs_idx_flt, qrs_amp_flt, threshold, delete_indices = redundant_data(qrs_idx_flt, qrs_amp_flt,
                                                                         threshold)
    qrs_idx_flt, qrs_amp_flt, threshold, insert_indices = missing_data(qrs_idx_flt, qrs_amp_flt,
                                                                       filter_data,
                                                                       threshold)

    return qrs_idx_flt, len(delete_indices) + len(insert_indices), threshold


def scg_analysis(data, threshold, pic_name):
    filter_data = pt.bandpass_filter_scg(data, 200, 5, 15)  # 滤波
    derivative_data = pt.derivative(filter_data)  # 差分
    square_data = pt.square(derivative_data)  # 平方
    window_data = pt.moving_window_average(square_data)  # 积分窗
    peaks, locs = pt.findpeaks(window_data, int(0.2 * 200 + 0.5))  # 基准peak
    qrs_amp_win, qrs_idx_win, qrs_amp_flt, qrs_idx_flt, thrs_win1, thrs_win2, thrs_flt1, thrs_flt2 \
        = pt.judge_rule(filter_data, window_data, peaks, locs, 200)

    # 冗余缺失处理
    qrs_idx_flt, qrs_amp_flt, threshold, delete_indices = redundant_data(qrs_idx_flt, qrs_amp_flt,
                                                                         threshold)
    qrs_idx_flt, qrs_amp_flt, threshold, insert_indices = missing_data(qrs_idx_flt, qrs_amp_flt,
                                                                       filter_data, threshold)

    display.plot_peak_sig_and_noise_for_win_and_filter(window_data, filter_data, qrs_idx_win, qrs_amp_win,
                                                       qrs_idx_flt,
                                                       qrs_amp_flt, locs, thrs_win1, thrs_win2,
                                                       thrs_flt1, thrs_flt2,
                                                       title1='scg信号经过Pan-Tompkins算法处理后的结果',
                                                       title2='scg信号经过滤波修复处理后的结果', path=pic_name)

    return qrs_idx_flt, len(delete_indices) + len(insert_indices), threshold


def ppg_analysis(data, threshold, pic_name):
    filter_data = pt.bandpass_filter_scg(data, 200, 0.5, 10)  # 滤波
    derivative_data = pt.derivative(filter_data)  # 差分
    square_data = pt.square(derivative_data)  # 平方
    window_data = pt.moving_window_average(square_data)  # 积分窗
    peaks, locs = pt.findpeaks(window_data, int(0.2 * 200 + 0.5))  # 基准peak
    qrs_amp_win, qrs_idx_win, qrs_amp_flt, qrs_idx_flt, thrs_win1, thrs_win2, thrs_flt1, thrs_flt2 \
        = pt.judge_rule(filter_data, window_data, peaks, locs, 200)

    # 冗余缺失处理
    qrs_idx_flt, qrs_amp_flt, threshold, delete_indices = redundant_data(qrs_idx_flt, qrs_amp_flt,
                                                                         threshold)
    qrs_idx_flt, qrs_amp_flt, threshold, insert_indices = missing_data(qrs_idx_flt, qrs_amp_flt,
                                                                       filter_data,
                                                                       threshold)

    display.plot_peak_sig_and_noise_for_win_and_filter(window_data, filter_data, qrs_idx_win, qrs_amp_win,
                                                       qrs_idx_flt,
                                                       qrs_amp_flt, locs, thrs_win1, thrs_win2,
                                                       thrs_flt1, thrs_flt2,
                                                       title1='ppg信号经过Pan-Tompkins算法处理后的处理结果',
                                                       title2='ppg信号经过滤波修复处理后的结果', path=pic_name)

    return qrs_idx_flt, len(delete_indices) + len(insert_indices), threshold


# 心率计算
def heart_rate_cal(time, label):
    heart_rate = 0
    num = 0
    for i in range(len(label) - 1):
        # print((time[label[i + 1]] - time[label[i]]))
        if (time[label[i + 1]] - time[label[i]]) == 0:
            continue
        heart_rate += 60 / (time[label[i + 1]] - time[label[i]])
        num += 1
    return heart_rate / num


def calculate_ptt(ppg_index, scg_index, ppg_time, scg_time):
    diff = []
    # 检查相位差
    for i in range(len(scg_index)):
        for j in range(len(ppg_index)):
            if -0.5 < scg_time[scg_index[i]] - ppg_time[ppg_index[j]] < 0:
                diff.append(scg_time[scg_index[i]] - ppg_time[ppg_index[j]])
    if diff:
        # 对距离聚类分析获得正常距离的范围
        clusters = classify_data(diff)
        max = 0
        max_cluster = []
        for cluster in clusters:
            # print(f'分类：{cluster}')
            if len(cluster) > max:
                max_cluster = cluster
                max = len(cluster)
        # print(sum(diff)/len(diff))
        return sum(max_cluster) / len(max_cluster)
    else:
        return 1


def linear_least_squares_fit(x, y):
    # 将输入转换为numpy数组
    x = np.array(x)
    y = np.array(y)

    # 计算斜率和截距
    n = len(x)
    x_mean = np.mean(x)
    y_mean = np.mean(y)

    numerator = np.sum((x - x_mean) * (y - y_mean))
    denominator = np.sum((x - x_mean) ** 2)

    slope = numerator / denominator
    intercept = y_mean - slope * x_mean

    return slope, intercept


# --------------------------------------------------#
# --------------------------------------------------#
# --------------------------------------------------#
# --------------------------------------------------#

def find_best_k(data, max_k=10):
    """
    寻找最佳的k值，确保每个类中的数据方差最小
    :param data: 数字列表
    :param max_k: 最大的类数量
    :return: 最佳的k值
    """
    data = np.array(data).reshape(-1, 1)
    distortions = []
    silhouette_scores = []

    for k in range(2, max_k + 1):
        if k > len(data):
            break

        kmeans = KMeans(n_clusters=k, random_state=0).fit(data)
        distortions.append(kmeans.inertia_)

        try:
            score = silhouette_score(data, kmeans.labels_)
        except ValueError:
            score = -1  # Assign a low score for invalid clustering
        silhouette_scores.append(score)

    if silhouette_scores:
        # Find the best k based on silhouette scores
        best_k = silhouette_scores.index(max(silhouette_scores)) + 2  # +2 because range starts from 2
    else:
        best_k = 1  # In case all silhouette_scores are invalid

    return best_k


def classify_data(data, max_k=10):
    """
    将数据按照值的大小进行分类
    :param data: 数字列表
    :param max_k: 最大的类数量
    :return: 分类结果
    """
    best_k = find_best_k(data, max_k)
    data = np.array(data).reshape(-1, 1)
    kmeans = KMeans(n_clusters=best_k, random_state=0).fit(data)
    labels = kmeans.labels_
    clusters = [[] for _ in range(best_k)]
    for idx, label in enumerate(labels):
        clusters[label].append(data[idx][0])
    return clusters


def find_max_in_range(data, start, end):
    """
    在列表的指定范围内查找最大值，并返回其绝对下标
    :param data: 数字列表
    :param start: 范围起始下标（包含）
    :param end: 范围结束下标（包含）
    :return: 最大值的绝对下标
    """
    if start < 0 or end >= len(data) or start > end:
        raise ValueError("无效的范围")

    max_value = max(data[start:end + 1])
    max_index = data[start:end + 1].index(max_value) + start

    return max_index


# 计算阈值
def calculate_threshold(distance, threshold):
    # 对距离聚类分析获得正常距离的范围
    clusters = classify_data(distance)
    max = 0
    max_cluster = []
    for cluster in clusters:
        # print(f'分类：{cluster}')
        if len(cluster) > max:
            max_cluster = cluster
            max = len(cluster)
    # print(len(distance))
    # print(len(max_cluster))
    # 样本足够用自己的
    if len(max_cluster) >= 0.5 * len(distance):
        threshold = (sum(max_cluster) / len(max_cluster))
    # 样本不够别人也没有
    elif len(max_cluster) < 0.5 * len(distance) and threshold == 0:
        # print('噪音太多')
        return 0
    return threshold


def redundant_data(index, value, threshold):
    # print(len(index))
    # 计算峰值间距离
    distance = [index[i + 1] - index[i] for i in range(len(index) - 1)]
    # 备份原始数据
    value_backup = value.copy()
    index_backup = index.copy()
    threshold = calculate_threshold(distance, threshold)
    # 冗余阈值
    threshold_1 = threshold / 2
    delete_indices = []
    # 对index中异常点进行判断
    # 距离小于均值一半认定为噪声
    for i in range(len(distance)):
        # 小于阈值
        if distance[i] < threshold_1:
            # print(f'距离：{distance[i]}')
            # 两端值小的判定为噪声
            short = i + 1 if value[i] > value[i + 1] else i
            delete_indices.append(short)
    # 同时删除对应下标的数据
    delete_indices = sorted(list(set(delete_indices)), reverse=True)
    # print(f'要删除的{delete_indices}')
    for i in delete_indices:
        del index_backup[i]
        del value_backup[i]

    # print(len(index_backup))

    return index_backup, value_backup, threshold, delete_indices


def missing_data(index, value, original_data, threshold):
    # 计算峰值间距离
    distance = [index[i + 1] - index[i] for i in range(len(index) - 1)]
    # 备份原始数据
    value_backup = value.copy()
    index_backup = index.copy()
    threshold = calculate_threshold(distance, threshold)
    insert_indices = []
    insert_values_indices = []
    insert_values = []
    num = 0
    # 距离大于均值两倍认定为缺少数据
    for i in range(len(distance)):
        # 查找缺少多少条数据
        loss = 0
        for j in range(len(original_data)):
            if distance[i] < (j + 1.75) * threshold:
                loss = j
                break

        # 寻找丢失的数据
        for k in range(loss):
            # 向后查找第k个可能的区域
            idx = find_max_in_range(original_data.tolist(), int(index[i] + (k + 0.75) * threshold),
                                    int(index[i] + (k + 1.25) * threshold))
            # 在峰值表插入位置
            insert_indices.append(i + k + 1 + num)
            # 峰值在数据中的位置
            insert_values_indices.append(idx)
            # 峰值
            insert_values.append(original_data[idx])
        num += loss

    # 在下标列表中插入数据
    for idx, val1 in zip(insert_indices, insert_values_indices):
        index_backup.insert(idx, val1)

    # 在峰值列表中插入数据
    for idx, val2 in zip(insert_indices, insert_values):
        value_backup.insert(idx, val2)

    return index_backup, value_backup, threshold, insert_indices
