import numpy as np
import pandas as pd
import re
from scipy.fft import fft
from scipy import signal
import pywt
import csv
import os
def read_file(file_path):
    """
    读取文本文件中的振动加速度信号，并转换成数字列表。
    """
    with open(file_path, 'r') as file:
        content = file.readlines()
    data = [float(x.strip()) for x in content]
    return data

def get_fft(data, sample_rate):
    """
    将输入信号转换到频域，并返回频域振幅值的规范化结果。
    """
    n = len(data)
    freqs = np.fft.fftfreq(n, 1/sample_rate)
    mask = (freqs > 0) & (freqs < sample_rate/2)
    fft_data = fft(data)
    fft_abs = np.abs(fft_data[mask])
    fft_norm = fft_abs / np.max(fft_abs)
    return fft_norm

def remove_low_freq(data, fs=10000, lowcut=10):
    """
    将输入信号进行低通滤波，去除低于设定频率的部分。
    """
    nyquist = 0.5 * fs
    low = lowcut / nyquist
    b, a = signal.butter(4, low, btype='low')
    filtered_data = signal.filtfilt(b, a, data)
    return filtered_data

def cosine_similarity(vec1, vec2):
    dot_product = np.dot(vec1, vec2)
    norm_vec1 = np.linalg.norm(vec1)
    norm_vec2 = np.linalg.norm(vec2)
    return dot_product / (norm_vec1 * norm_vec2)


def get_similarity(file_path1, file_path2, num_freqs=17000, time_weight=0.4):
    """
    计算两个文本文件中振动加速度信号的相似度。
    """
    # 读取数据并进行低通滤波
    data1 = read_file(file_path1)
    data2 = read_file(file_path2)
    filtered_data1 = remove_low_freq(data1)
    filtered_data2 = remove_low_freq(data2)
    # 小波分解
    cA1, cD1 = pywt.dwt(filtered_data1, 'db4')
    cA2, cD2 = pywt.dwt(filtered_data2, 'db4')
    # 计算时域距离
    distance1 = np.linalg.norm(np.array(cA1) - np.array(cA2))
    # 计算频域距离
    fft_norm1 = get_fft(cD1,10000)
    fft_norm2 = get_fft(cD2,10000)
    # 提取前N个主要频率的振幅值
    freq_idx1 = np.argsort(fft_norm1)[::-1][:num_freqs]
    freq_idx2 = np.argsort(fft_norm2)[::-1][:num_freqs]
    freq_amp1 = fft_norm1[freq_idx1]
    freq_amp2 = fft_norm2[freq_idx2]
    # 归一化处理
    freq_amp1 = freq_amp1 / np.max(freq_amp1)
    freq_amp2 = freq_amp2 / np.max(freq_amp2)
    # 计算皮尔逊相关系数
    freq_pearson_similarity = np.corrcoef(freq_amp1, freq_amp2)[0, 1]
    # 计算频域误差
    freq_error = np.linalg.norm(fft_norm1 - fft_norm2)
    # 计算时域和频域的余弦相似度
    time_similarity = cosine_similarity(cA1, cA2)
    freq_similarity = cosine_similarity(fft_norm1, fft_norm2)
    # 计算综合相似度
    similarity = time_weight * time_similarity + (1 - time_weight) * freq_similarity
    return similarity

def tryint(s):
    try:
        return int(s)
    except ValueError:
        return s

def alphanum_key(s):
    """ Turn a string into a list of string and number chunks.
        "z23a" -> ["z", 23, "a"]
    """
    return [ tryint(c) for c in re.split('([0-9]+)', s) ]

def calculate_similarity_and_save(directory, output_csv):
    file_paths = sorted([os.path.join(directory, file) for file in os.listdir(directory) if file.endswith(".txt")], key=alphanum_key)
    num_files = len(file_paths)
    if num_files < 3:
        print(f"Less than three text files in directory {directory}. No output will be generated.")
        return

    results = []
    for i in range(num_files - 2):
        triplet_files = [file_paths[i], file_paths[i + 1], file_paths[i + 2]]
        similarities = np.zeros((3, 3))
        for j in range(2):
                similarity = get_similarity(triplet_files[j], triplet_files[j+1])
                similarities[j, j+1] = similarity
                similarities[j+1, j] = similarity

        # Compute weighted sum and mean similarity
        weighted_sum = np.sum(np.square(similarities))
        mean_similarity = np.sqrt(weighted_sum / (3 * (3 - 1) / 2))

        results.append([triplet_files[0], triplet_files[1], triplet_files[2], mean_similarity])

    with open('similarity_results5.csv', 'w', newline='') as file:
        writer = csv.writer(file)
        writer.writerow(["File 1", "File 2", "File 3", "Mean similarity"])
        writer.writerows(results)

    print(f"Similarity results saved to 'out1.csv'.")


# Call the function on a sample directory
calculate_similarity_and_save("F:\\code\\pythonProject\\L1", "output1.csv")
