import struct
import numpy as np
import os
import re
import glob
from collections import *
from ctypes import *
import scipy.io as sio
import pandas as pd
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
#用来正常显示中文标签
plt.rcParams['font.sans-serif']=['SimHei']
#用来正常显示负号
plt.rcParams['axes.unicode_minus']=False


def loaddata(folder_denoise):  # folder_denoise输入为Raw8文件
    binfile=open(folder_denoise, 'rb')
    binfile.seek(89)
    context=binfile.read(4)
    realContext=struct.unpack('2H',context)
    len = realContext[1] - realContext[0] + 1
    binfile2 = open(folder_denoise, 'rb')
    binfile2.seek(328)
    context2 = binfile2.read(4 * 2 * len)
    realContext = struct.unpack(str(len*2)+'f',context2)
    realContext = list(realContext)
    wave = realContext[:len]
    specture = realContext[len:]
    return wave, specture

def fileload(folder_denoise, savefolder_denoise, pattern):
    if os.path.exists(savefolder_denoise):
        print('解密文件夹存在！')
    else:
        os.makedirs(savefolder_denoise)
    for filefolder_denoise in os.listdir(folder_denoise):
        if filefolder_denoise == "解密":
            print("解密")
        else:
            print(filefolder_denoise)
            filefolder_denoisepath = os.path.join(folder_denoise, filefolder_denoise)
            savefile = os.path.join(savefolder_denoise, filefolder_denoise)
            if os.path.exists(savefile):
                print('文件夹存在！')
            else:
                os.makedirs(savefile)
            data = {}
            waves, sums, datas = [], [], []
            #print('------',filefolder_denoisepath)
            for file in os.listdir(filefolder_denoisepath):
                print(file)
                mes = re.match(pattern, file)
                if mes:
                    wave, specture = loaddata(os.path.join(filefolder_denoisepath, file))
                    name = file.split('_')[0]
                    if wave in waves:
                        pass
                    else:
                        waves.append(wave)
                        sums.append(sum(wave))
                    if name in data:
                        data[name].append(specture)
                    else:
                        data[name] = []
                        data[name].append(specture)
                else:
                    print('文件不匹配！')
            print("++++++++++++%s" % len(waves))
            for key, value in data.items():
                datas.append(np.vstack(value).T)
            lis = np.array(sums)
            sorted_index = np.argsort(lis)
            print("------------------------%s" % sorted_index)
            sumss, wavess, datass = [], [], []
            for i in sorted_index:
                sumss.append(sums[i])
                wavess.append(waves[i])
                print(datas)
                print(i)
                datass.append(datas[i])
            print("------------------------%s"%datass)
            # 找出最小的值
            folder_denoise_name = os.listdir(filefolder_denoisepath)
            list1 = []
            for i in range(len(folder_denoise_name)):
                list1.append(folder_denoise_name[i].split('_')[0])
            minzhi = min(Counter(list1).values())
            # 上面时自己写的
            print(minzhi)
            wavess = np.hstack(wavess).reshape(-1,1)
            #lines = []
            datass2 = []
            for i in datass:
                print('--------',minzhi)
                datass2.append(i[:,:minzhi])  # --------------------------------每个通道取前200条谱
            datass = np.vstack(datass2)
            print(wavess.shape)
            print(datass.shape)
            datasss = np.hstack((wavess, datass))
            savefiledata = os.path.join(savefile, 'Times_0.csv')
            print('数据保存至：', savefiledata)
            np.savetxt(savefiledata, datasss, fmt='%.04f',delimiter=',')
            del datass
            del wavess

# 扣背底
def remove_baseline2(wave, csvdata, passage):
    basedatas = []
    datas = np.hstack((wave.reshape(-1,1), csvdata.T))
    row = c_int(datas.shape[1])
    column = c_int(datas.shape[0])
    dpassage = c_double(passage)
    n1 = datas.shape[1] * datas.shape[0]
    n2 = (datas.shape[1]-1) * datas.shape[0]
    data = datas.T.flatten()
    hist = (c_double*n1)()
    Xvec = (c_double*n2)()
    for i in range(n1):
        hist[i] = data[i]
    curpath = os.path.dirname(__file__)
    dllfolder_denoise = os.path.join(curpath, 'removebase_python.dll')
    dll = cdll.LoadLibrary(dllfolder_denoise)
    dll.main_removebases(hist, row, column, Xvec, dpassage)
    for j in range(n2):
        basedatas.append(Xvec[j])
    basedatas = np.array(basedatas).reshape(datas.shape[1]-1, datas.shape[0])
    basedatas = np.around(basedatas, decimals=4)
    return basedatas

def passageborder(wave):
    # 计算通道边界
    passaged = []
    diff = wave[1:] - wave[0:-1]

    for i in range(diff.size):
        if (abs(diff[i]) > 0.8):
            passaged.append(i + 1)
        else:
            continue
    passaged.append(wave.size)
    return  passaged

def back_data3(savefolder_denoise, savefile):
    if os.path.exists(savefile):
        print('扣背底文件夹存在！')
    else:
        os.makedirs(savefile)
    for filefolder_denoise in os.listdir(savefolder_denoise):
        if filefolder_denoise == "扣背底":
            print(filefolder_denoise)
        else:
            print(filefolder_denoise)
            filefolder_denoisepath = os.path.join(savefolder_denoise, filefolder_denoise)
            savefile123 = os.path.join(savefile, filefolder_denoise)
            if os.path.exists(savefile123):
                print('文件夹存在！')
            else:
                os.makedirs(savefile123)
            csvdata = np.loadtxt(r"%s\Times_0.csv"%filefolder_denoisepath, delimiter=',')
            print(csvdata)
            value = csvdata[:, 1:].T
            wave = csvdata[:, 0]
            a = 0
            csvdatas = []
            passaged = passageborder(wave)
            for i in passaged:
                # print(wave[a:i])
                # print(value[:, a:i])
                # print(i - a)
                csvdata = remove_baseline2(wave[a:i], value[:, a:i], i - a)
                # print(csvdata)
                csvdatas.append(csvdata.T)
                a = i
            csvdata5 = np.vstack(csvdatas)
            #csvdata5 = back_data_screening(csvdata5)
            data = pd.DataFrame(csvdata5, index=wave)
            data.to_csv(r"%s\Times_0.csv"%savefile123, header=None)
            sio.savemat(os.path.join(savefile123, 'judge.mat'), {})

def peak_area123(num1,num2,k):  # 峰面积 num1:左边界  num2:有边界  直接计算
    df_bc = data.iloc[:, 0:1].T.values[0]
    df = data.iloc[:, k:k+1].T.values[0]
    list_single_area = []  # 接收单个峰的面积--很多个梯形的面积
    for j in range(num1, num2-1):  # 每个梯形单独计算
        area = (abs(df[j]) + abs(df[j + 1])) * (df_bc[j + 1] - df_bc[j]) / 2  # 计算每个峰中每个梯形的面积
        list_single_area.append(area)
    peak_area1 = sum(list_single_area)
    return peak_area1

def raw_peak_area123(num1,num2,k):  # 峰面积 num1:左边界  num2:有边界  直接计算
    df_bc = data.iloc[:, 0:1].T.values[0]
    df = data.iloc[:, k:k+1].T.values[0]
    list_single_area = []  # 接收单个峰的面积--很多个梯形的面积
    for j in range(num1, num2-1):  # 每个梯形单独计算
        area = (df[j] + df[j + 1]) * (df_bc[j + 1] - df_bc[j]) / 2  # 计算每个峰中每个梯形的面积
        list_single_area.append(area)
    peak_area1 = sum(list_single_area)
    return peak_area1

# def noise(num1,num2,k):
#     wavelength=data.iloc[:, 0:1].T.values[0]   #选取噪声的波长范围
#     Intensity = data.iloc[:, k:k+1].T.values[0]   #噪声的光谱强度
#     if num2 >=len(Intensity)-1:
#         raise IndexError("num2 insex out of range")
#     trapezoid_Intensity= (abs(Intensity[num1]) + abs(Intensity[num2 + 1])) * (wavelength[num2 + 1] - wavelength[num1]) / 2
#     return trapezoid_Intensity

# def Lorentz(x, y0, A, xc, w):  # 洛伦兹函数
#     y = y0 + (2 * A / np.pi) * (w / (4 * (x - xc) ** 2 + w ** 2))
#     return y
#
# def peak_area234(num1,num2,k):  # 洛伦兹拟合
#     df_bc = data.iloc[num1:num2, 0:1].T.values[0]
#     df = data.iloc[num1:num2, k:k+1].T.values[0]
#     p0 = ([0, 0 * (df_bc[-1] - df_bc[0]) * max(df), df_bc[list(df).index(max(df))] - 0.15, 0],[500, 1 * (df_bc[-1] - df_bc[0]) * max(df), df_bc[list(df).index(max(df))] + 0.15, 1])
#     p, c = curve_fit(Lorentz, df_bc, df, bounds=p0, absolute_sigma=True)  # 赋初值对拟合结果影响较大
#     y0, A, xc, w = p  # A--峰面积；xO--峰值位置横坐标；w--半宽高；
#     xfit = np.linspace(df_bc.min(), df_bc.max(), 100)  # 在间隔start和stop之间返回num个均匀间隔的数据
#     yfit = Lorentz(xfit, y0, A, xc, w)
#     return df_bc,df,A,xfit,yfit


if __name__ == '__main__':
    folder_denoise = r'C:\Users\Administrator\Desktop\新建文件夹 (2)\新建文件夹'  # 读取前面的二级目录
    savefolder_denoise = r'%s\解密'%folder_denoise  # 解密文件的文件夹位置
    pattern = '731.*' #  通道的匹配符
    fileload(folder_denoise, savefolder_denoise, pattern)  # 解密
    savefile = r'%s\扣背底'%savefolder_denoise  # 扣背底文件夹
    ###  扣背底
    back_data3(savefolder_denoise, savefile)
    # 卡位置元素峰
    # Na 峰1 588.995nm
    num1, num2 = [8355, 8367]
    # Na 峰2 589.592nm
    num3, num4 = [8366, 8376]
    # H 峰3 656.271nm
    num5, num6 = [9401, 9498]
    # 加载文档路径
    folder_denoise = os.listdir(savefile)
    Raw_data_folder=os.listdir(savefolder_denoise)
    print(folder_denoise, Raw_data_folder)
    for i in folder_denoise:
        data = pd.read_csv(r"%s\%s\Times_0.csv" % (savefile, i), header=None)
        list_peakarea1, list_peakarea2, list_peakarea3 = [], [], []  # 直接计算峰1,2,3
        list_noise_Intensity1, list_noise_Intensity2, list_noise_Intensity3 = [], [], []   #计算三个峰的噪声
        # list_peakarea11, list_peakarea22, list_peakarea33 = [], [], []  # 拟合峰1,2,3------------拟合峰
        for j in range(data.shape[1] - 1):
            peak_area1 = peak_area123(num1, num2, j + 1)
            peak_area2 = peak_area123(num3, num4, j + 1)
            peak_area3 = peak_area123(num5, num6, j + 1)

            list_peakarea1.append(peak_area1)
            list_peakarea2.append(peak_area2)
            list_peakarea3.append(peak_area3)

            # trapezoid_Intensity1 = noise(num1, num2, j + 1)
            # trapezoid_Intensity2 = noise(num3, num4, j + 1)
            # trapezoid_Intensity3 = noise(num5, num6, j + 1)
            #
            # list_noise_Intensity1.append(trapezoid_Intensity1)
            # list_noise_Intensity2.append(trapezoid_Intensity2)
            # list_noise_Intensity3.append(trapezoid_Intensity3)

            # df_bc1, df1, A1, xfit1, yfit1 = peak_area234(num1, num2, j + 1)     # ------------拟合峰
            # df_bc2, df2, A2, xfit2, yfit2 = peak_area234(num3, num4, j + 1)     # ------------拟合峰
            # df_bc3, df3, A3, xfit3, yfit3 = peak_area234(num5, num6, j + 1)     # ------------拟合峰
            # list_peakarea11.append(A1)
            # list_peakarea22.append(A2)
            # list_peakarea33.append(A3)
        # df_zong = pd.concat([pd.DataFrame(list_peakarea1), pd.DataFrame(list_peakarea2), pd.DataFrame(list_peakarea3),pd.DataFrame(list_peakarea11), pd.DataFrame(list_peakarea22),pd.DataFrame(list_peakarea33)], axis=1)   # ------------拟合峰
        # df_zong.columns = ["峰1计算", "峰2计算", "峰3计算", "峰1拟合计算", "峰2拟合计算", "峰3拟合计算"]   # ------------拟合峰
        df_zong = pd.concat([pd.DataFrame(list_peakarea1), pd.DataFrame(list_peakarea2), pd.DataFrame(list_peakarea3)],axis=1)
        df_zong.columns = ["峰1计算", "峰2计算", "峰3计算"]
        df_zong.to_csv(r"%s\%s\峰面积.csv" % (savefile, i))
    for i in Raw_data_folder:
        # base_path为搜索csv文件根目录
        base_path=savefolder_denoise
        # 使用glob模块查找所有以'log_'开头的文件夹
        log_folders=[f for f in os.listdir(base_path) if os.path.isdir(os.path.join(base_path, f)) and f.startswith('log_')]
        # 遍历每个以'log_'开头的文件夹
        for folder in log_folders:
            folder_path = os.path.join(base_path, folder)
            # 查找该文件夹下的所有CSV文件
            csv_files = glob.glob(os.path.join(folder_path, '*.csv'))
            # 遍历每个CSV文件
            for csv_file in csv_files:
                # 读取CSV文件
                data = pd.read_csv(csv_file, header=None)
        raw_list_peakarea1, raw_list_peakarea2, raw_list_peakarea3 = [], [], []  # 直接计算峰1,2,3
        raw_list_noise_Intensity1, raw_list_noise_Intensity2, raw_list_noise_Intensity3 = [], [], []  # 计算三个峰的噪声
        # list_peakarea11, list_peakarea22, list_peakarea33 = [], [], []  # 拟合峰1,2,3------------拟合峰
        for j in range(data.shape[1] - 1):
            raw_peak_area1 = raw_peak_area123(num1, num2, j + 1)
            raw_peak_area2 = raw_peak_area123(num3, num4, j + 1)
            raw_peak_area3 = raw_peak_area123(num5, num6, j + 1)

            raw_list_peakarea1.append(raw_peak_area1)
            raw_list_peakarea2.append(raw_peak_area2)
            raw_list_peakarea3.append(raw_peak_area3)

        df_zong = pd.concat(
            [pd.DataFrame(raw_list_peakarea1), pd.DataFrame(raw_list_peakarea2), pd.DataFrame(raw_list_peakarea3)], axis=1)
        df_zong.columns = ["峰1计算", "峰2计算", "峰3计算"]
        df_zong.to_csv(base_path, index=False)


