from influxdb_client import InfluxDBClient, Point, WritePrecision
from influxdb_client.client.write_api import SYNCHRONOUS
import numpy as np
import pandas as pd
from collections import deque
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from datetime import datetime
import threading
import time
import pytz
from datetime import datetime
import os

#def share_last_data_time():
    #return last_data_time

# InfluxDB 配置
url = "http://127.0.0.1:8086/"  # "http://localhost:8086"  # InfluxDB服务器地址
token = "zQVf2aXlUxQzVQS6THc-9RpZzOnpQqLimoKSmuZIPgkxxQLVyoRo-khhAgKbTFmxkWRs_yAoLa2sLWGHIuo2ag=="  # 你的InfluxDB令牌
org = "太原理工大学"  # 你的组织名称
bucket = "dataone"  # 你的bucket名称

# 连接到InfluxDB
client = InfluxDBClient(url=url, token=token, org=org)
write_api = client.write_api(write_options=SYNCHRONOUS)
query_api = client.query_api()
delete_api = client.delete_api()

# 自定义读写锁
class ReadWriteLock:
    def __init__(self):
        self.readers = 0  # 记录当前有多少个读线程
        self.read_lock = threading.Lock()  # 控制读线程的计数操作
        self.write_lock = threading.Lock()  # 控制写线程

    def acquire_read(self):
        """获取读锁"""
        with self.read_lock:
            self.readers += 1
            # 如果是第一个读线程，则阻塞写线程
            if self.readers == 1:
                self.write_lock.acquire()

    def release_read(self):
        """释放读锁"""
        with self.read_lock:
            self.readers -= 1
            # 如果没有读线程了，释放写线程
            if self.readers == 0:
                self.write_lock.release()

    def acquire_write(self):
        """获取写锁"""
        self.write_lock.acquire()

    def release_write(self):
        """释放写锁"""
        self.write_lock.release()

# 创建全局读写锁
rw_lock = ReadWriteLock()

# 缓冲池
# 实时数据一级缓冲池
class DataBufferOtherPool:
    def __init__(self, max_len=50):
        self.max_len = max_len  # 缓冲池最大长度
        self.data_buffer_other = deque(maxlen=self.max_len)
        self.data_buffer_other_info = deque(maxlen=self.max_len)
        self.lock = threading.Lock()  # 线程锁，保证线程安全

    def update_buffer(self, data_other):
        self.data_buffer_other.append(data_other)
        self.data_buffer_other_info.append(data_other)

    # 删除队列公共部分（删除重复数据 前n个)
    def delete_buffer(self, n, logger):
        for _ in range(n):
            if self.data_buffer_other_info:  # 检查队列是否为空
                self.data_buffer_other_info.popleft()  # 删除队列的前 n 个元素
                logger.info("删除成功")
            else:
                logger.info("队列为空")


    def get_data_buffer_other(self):
        # 获取队列所有数据
        #return self.data_buffer_other
        return self.data_buffer_other_info

    def get_data_buffer_other_dui(self):
        # 获取队列所有数据
        return self.data_buffer_other
        #return self.data_buffer_other_info

    def get_first_n_data_other(self, n=50):
        with self.lock:
            # 获取前 n 个数据
            return list(self.data_buffer_other)[:n]

    # 用这种方式获得最新的数据，但可能有时间戳一致的需要去除时间戳一致的
    def get_latest_data_other(self, n=1):
        with self.lock:
            # 获取最新的 n 条数据
            return list(self.data_buffer_other)[-n:]

    def get_latest_data_other_muti(self, n=50):
        enough_his = False
        with self.lock:
            # 获取最新的 n 条数据
            data_list = list(self.data_buffer_other)
            # 提取时间戳和对应的数据
            timestamps = [item[0] for item in data_list]
            data = [item for item in data_list]
            # 创建一个 DataFrame，方便去重和排序
            df = pd.DataFrame({
                'timestamp': timestamps,
                'data': data
            })
            # print(f'全部的历史数据{df}')
            # 按时间戳去重（保留最后一个出现的重复项）
            df = df.drop_duplicates(subset='timestamp', keep='last')
            # print(f'去重后的历史数据{df}')
            # 按时间戳排序
            df = df.sort_values(by='timestamp')
            # print(f'排序后的历史数据{df}')
            latest_one = df['data'].iloc[-1:].tolist()
            # 去除第二个元素为 0 的行
            df = df[df['data'].apply(lambda x: x[1] != 0)]
            # print(f'整理后的历史数据{df}')
            if len(df) >= n:
                enough_his = True
                # 获取最新的 50 个元素
                latest_n = df['data'].iloc[-n:].tolist()
            else:
                latest_n = False

            return latest_one, latest_n, enough_his


class DataBufferModelPool:
    def __init__(self, max_len=50):
        self.max_len = max_len  # 缓冲池最大长度
        self.data_buffer_model = deque(maxlen=self.max_len)
        self.data_buffer_model_info = deque(maxlen=self.max_len)
        self.lock = threading.Lock()  # 线程锁，保证线程安全

    def update_buffer(self, data_model):
        self.data_buffer_model.append(data_model)
        self.data_buffer_model_info.append(data_model)

    def delete_buffer(self, n, logger):
        for _ in range(n):
            if self.data_buffer_model_info:  # 检查队列是否为空
                self.data_buffer_model_info.popleft()  # 删除队列的前 n 个元素
                logger.info("删除成功")
            else:
                logger.info("队列为空")


    def get_data_buffer_model(self):
        # 获取队列所有数据
        # return self.data_buffer_model
        return self.data_buffer_model_info

    def get_data_buffer_model_dui(self):
        # 获取队列所有数据
        return self.data_buffer_model
        #return self.data_buffer_model_info


    def get_first_n_data_model(self, n=50):
        with self.lock:
            # 获取前 n 个数据
            return list(self.data_buffer_model)[:n]

    def get_latest_data_model(self, n=1):
        with self.lock:
            # 获取最新的 n 条数据
            return list(self.data_buffer_model)[-n:]

class SecondDataBufferModelPool:
    def __init__(self, max_len=50):
        self.max_len = max_len  # 缓冲池最大长度
        self.second_data_buffer_model = deque(maxlen=self.max_len)
        self.number = 0
        self.lock = threading.Lock()  # 线程锁，保证线程安全

    def update_buffer(self, data_model, logger):
        # 检查数组是否为空
        value = data_model.get('value')
        if value.shape[0] == 0 or value.shape[1] != 34:
            logger.warning('保存二级缓冲池时，数组为空或列数不符合预期')
            return  # 数组为空或列数不符合预期
        self.second_data_buffer_model.append(data_model)
        # 打印当前缓冲池的长度
        logger.info(f"距上一次保存数据库已向二级缓冲池保存{self.number+1}条数据")
        self.get_opt_pc_compare()
        #self.flag = len(self.second_data_buffer_model)
        self.number += 1
        if self.number > 50:
            data_to_store = [data for data in self.second_data_buffer_model if not data.get('is_stored')]
            # 数据处理
            print(data_to_store)
            values = []
            for d in data_to_store:
                if 'value' in d:
                    values.append(d['value'])
            #print(values)
            date_to_DB = data_to_store[0]['value']
            for i in range(len(values)-1):
                list1 = data_to_store[i+1]['value']
                date_to_DB = np.concatenate((date_to_DB, list1), axis=0)
                #combined_array_new = np.concatenate((date_to_DB, list1), axis=0)
            print(f"准备写入数据库的数据：{date_to_DB}")
            # # 提取时间戳列
            # timestamps = pd.Series(date_to_DB[:, 0])
            # # 初始化保留标记列表
            # keep = [True]  # 第一个数据必然保留
            # # 遍历时间戳，标记需要保留的时间点
            # last_kept_index = 0  # 上一个被保留的索引
            # for i in range(1, len(timestamps)):
            #     # 当前时间与上一个被保留时间的间隔
            #     time_diff = (timestamps.iloc[i] - timestamps.iloc[last_kept_index]).total_seconds()
            #     if time_diff >= 5:
            #         keep.append(True)  # 间隔大于等于5秒，保留当前行
            #         last_kept_index = i  # 更新上一个保留行索引
            #     else:
            #         keep.append(False)  # 间隔小于5秒，删除当前行
            # # 筛选符合条件的数据
            # date_to_DB = date_to_DB[keep]
            #numpy = np.array(values)=
            # 数据处理结束
            #print(numpy)
            #need_other=[0, 12, 8, 20, 21, 1, 19, 3, 7, 13, 15, 14, 23, 24]
            need_other = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]
            need_predict = [0, 25, 26]
            need_best = [0, 27, 28, 29]
            need_result = [0, 30, 31, 32, 33]
            numpy_other = date_to_DB[:, need_other]
            nmupy_predict = date_to_DB[:, need_predict]
            nmupy_best = date_to_DB[:, need_best]
            nmupy_result = date_to_DB[:, need_result]
            data_written(numpy_other, nmupy_predict, nmupy_best, nmupy_result, logger)
            # 标记已存储数据
            for data in data_to_store:
                data['is_stored'] = True
            self.number = 0
        return


    def get_data_buffer_model(self):
        # 获取队列所有数据
        return self.second_data_buffer_model

    def get_first_n_data_model(self, n=50):
        with self.lock:
            # 获取前 n 个数据
            return list(self.second_data_buffer_model)[:n]

    def get_latest_data_model(self, n=1):
        with self.lock:
            # 获取最新的 n 条数据
            return list(self.second_data_buffer_model)[-n:]

    def get_opt_pc_compare(self):
        data_to_store = [data for data in self.second_data_buffer_model]
        # 数据处理
        print(data_to_store)
        values = []
        for d in data_to_store:
            if 'value' in d:
                values.append(d['value'])
        # print(values)
        data_show = data_to_store[0]['value']
        for i in range(len(values) - 1):
            list1 = data_to_store[i + 1]['value']
            data_show = np.concatenate((data_show, list1), axis=0)
        print(f"准备展示的数据：{data_show}")
        # 时间戳，实际转速，实际背压，实际净功率，最优转速，最优背压，最大净功率
        need_point = [0, 14, 18, 24, 27, 28, 29]
        # numpy_need = data_show[:, need_point].astype(float)
        numpy_need = data_show[:, need_point]
        numpy_need =numpy_need[:, 1:].astype(float)
        # 实际背压
        real_pc = numpy_need[:, 1]
        # 最优背压
        opt_pc = numpy_need[:, 4]
        # 实际净功率
        real_netpower = numpy_need[:, 2]
        # 最优净功率
        opt_netpower = numpy_need[:, 5]
        # 绘制背压优化前后对比曲线
        save_path = "pics/"
        os.makedirs(save_path, exist_ok=True)
        plt.figure(figsize=(10, 5))
        plt.plot(opt_pc, label="最优背压", color="g")
        plt.plot(real_pc, label="实际背压", color="r", linestyle='--')
        plt.xlabel("时间步", fontsize=10.5)
        plt.ylabel("背压 (KPa)", fontsize=10.5)
        plt.title(f"优化前后背压对比", fontsize=12)
        plt.legend()
        plt.savefig(f"{save_path}/优化前后背压比较.png", dpi=300)
        plt.close()

        plt.figure(figsize=(10, 5))
        plt.plot(opt_netpower, label="最优净功率", color="g")
        plt.plot(real_netpower, label="实际净功率", color="r", linestyle='--')
        plt.xlabel("时间步", fontsize=10.5)
        plt.ylabel("净功率(MW)", fontsize=10.5)
        plt.title(f"优化前后净功率对比", fontsize=12)
        plt.legend()
        plt.savefig(f"{save_path}/优化前后净功率比较.png", dpi=300)
        plt.close()
        np.savez('ba_arrays.npz', opt_pc=opt_pc, real_pc=real_pc, opt_netpower=opt_netpower, real_netpower=real_netpower)
        return

# 实时数据二级缓冲池（数据对齐）
def Secondary_buffer_pool_for_real_time_data():

    buffer_matrix = np.zeros((20, 28))  # 生成一个20行28列的矩阵，矩阵中的所有元素初始化为0

# NumPy 矩阵
#data = np.load("Alldatatest.npy", allow_pickle=True)
#data = np.load("Alldatatestnew.npy", allow_pickle=True)
# 数据预处理---计算风机功耗与净功率
def calculate_power_generation(data):
    #print(f'calculate_power_generation_other_data:{data.shape}')
    num_rows =data.shape[0]
    selected_columnss = []
    fan = []
    Jing = []
    m = 0
    # 风机功耗
    for m in range(num_rows):
        fan_now = data[m, 14]
        #print(f'第{m+1}行风机转速为：{fan_now}')
        Nf0 = 115/1000  # 变单位
        fan0 = 110
        Ne_fan = (fan_now / fan0) ** 3 * Nf0 * data[m, 16]
        fan.append(Ne_fan)
        value_fan = np.array(fan)
    value_fan_data = value_fan.reshape(num_rows,1)
    selected_columnss = np.concatenate((data, value_fan_data), axis=1)
    # 净功率
    for m in range(num_rows):
        Ne_now = data[m, 1]
        value_fan_now = selected_columnss[m, -1]
        Ne_jing = Ne_now - value_fan_now
        Jing.append(Ne_jing)
        value_jing = np.array(Jing)
    value_jing_data = value_jing.reshape(num_rows, 1)
    selected_columnss = np.concatenate((selected_columnss, value_jing_data), axis=1)
    str_array = np.array(selected_columnss)
    #print(f'str_array:{str_array}')
    return str_array

last_data_time = np.zeros((1, 1), dtype=np.float32)

def data_written(data, wmodel_outout_data, xmodel_outout_data, ymodel_outout_data, logger):
    global last_data_time
    # 转换时间戳为 UTC 时间
    times = pd.to_datetime(data[:, 0])  # 转换为 pandas 时间戳
    local_timezone = pytz.timezone("Asia/Shanghai")  # 中国时区
    times_utc = times.tz_localize(local_timezone).tz_convert("UTC")  # 转为 UTC

    # 转换为纳秒时间戳
    timestamps_ns = (times_utc - pd.Timestamp("1970-01-01", tz="UTC")) // pd.Timedelta("1ns")
    last_data_time = np.full((1,1),data[0, 0])

    # 其他数据
    other_data = data[:, 1:].astype(float)
    rw_lock.acquire_write()  # 获取写锁
    try:
        # 构建数据点
        # 写入汽轮机数据
        for i, timestamp in enumerate(timestamps_ns):
            point = (
                Point("TurbineSystem")  # 替换为实际的测量名称
                .tag("host", "server01")
                .field("APowerGeneration", other_data[i, 0])
                .field("BMainSteamFlow", other_data[i, 1])
                .field("CMainSteamTemperature", other_data[i, 2])
                .field("DMainSteamPressure", other_data[i, 3])
                .field("EControlValveOpening", other_data[i, 4])
                .field("FReheatSteamPressure", other_data[i, 5])
                .field("GReheatSteamTemperature", other_data[i, 6])
                .field("HExhaustTemperature", other_data[i, 7])
                .field("IExhaustFlowRate", other_data[i, 8])
                .field("JExhaustEnthalpy", other_data[i, 9])
                .field("KMainFeedwaterFlow", other_data[i, 10])
                # .field("LFanPowerConsumption", other_data[i, 29]) # 旧
                # .field("MNetPower", other_data[i, 30]) # 旧
                .field("LFanPowerConsumption", other_data[i, 22])  # 修改
                .field("MNetPower", other_data[i, 23])  # 修改
                .time(timestamp, WritePrecision.NS)
            )
            #print(f'otherdata:{timestamp}-{other_data}')
            #print(f'otherdatasing:{timestamp}-{other_data[i, 0]}')
            write_api.write(bucket=bucket, org=org, record=point)
        logger.info("TurbineSystem Data written to InfluxDB.")
        # 写入冷端系统数据
        for i, timestamp in enumerate(timestamps_ns):
            point = (
                Point("ColdSystem")  # 替换为实际的测量名称
                .tag("host", "server01")
                .field("ACondensateFlowRate", other_data[i, 11])
                .field("BCondensateTemperature", other_data[i, 12])
                .field("CFanSpeed", other_data[i, 13])
                .field("DFanCurrent", other_data[i, 14])
                .field("EFanNum", other_data[i, 15])
                .field("FFanShaftTemperature", other_data[i, 16])
                .field("GBackPressure", other_data[i, 17])
                .time(timestamp, WritePrecision.NS)
            )
            write_api.write(bucket=bucket, org=org, record=point)
        logger.info("CoolSystem Data written to InfluxDB.")
        # 写入环境条件数据
        for i, timestamp in enumerate(timestamps_ns):
            point = (
                Point("EnvironmentalConditions")  # 替换为实际的测量名称
                .tag("host", "server01")
                .field("AAmbientTemperature", other_data[i, 18])
                .field("BAmbientWindSpeed", other_data[i, 19])
                .field("CAmbientWindDirection", other_data[i, 20])
                .field("DAtmosphericPressure", other_data[i, 21])
                .time(timestamp, WritePrecision.NS)
            )
            write_api.write(bucket=bucket, org=org, record=point)
        logger.info("EnvironmentalConditions Data written to InfluxDB.")
        # 其他数据
        other_data = wmodel_outout_data[:, 1:].astype(float)
        # 写入模型算法输出数据
        for i, timestamp in enumerate(timestamps_ns):
            point = (
                Point("WModelAlgorithmOutput")  # 替换为实际的测量名称
                .tag("host", "server01")
                .field("APredictedBackPressure", other_data[i, 0])
                .field("BPredictedNetPower", other_data[i, 1])
                .time(timestamp, WritePrecision.NS)
            )
            # print(f'modeloutput:{timestamp}-{other_data[i, 0]}')
            write_api.write(bucket=bucket, org=org, record=point)
        logger.info("ModelAlgorithmOutput Predict Data written to InfluxDB.")
        # 其他数据
        other_data = xmodel_outout_data[:, 1:].astype(float)
        # 写入模型算法输出数据
        for i, timestamp in enumerate(timestamps_ns):
            point = (
                Point("XModelAlgorithmOutput")  # 替换为实际的测量名称
                .tag("host", "server01")
                .field("AOptimalFanSpeed", other_data[i, 0])
                .field("BOptimalBackPressure", other_data[i, 1])
                .field("COptimalNetPower", other_data[i, 2])
                .time(timestamp, WritePrecision.NS)
            )
            write_api.write(bucket=bucket, org=org, record=point)
        logger.info("ModelAlgorithmOutput Best Data written to InfluxDB.")
        # 其他数据
        other_data = ymodel_outout_data[:, 1:].astype(float)
        # 写入模型算法输出数据
        for i, timestamp in enumerate(timestamps_ns):
            point = (
                Point("YModelAlgorithmOutput")  # 替换为实际的测量名称
                .tag("host", "server01")
                .field("AMSE", other_data[i, 0])
                .field("BRMSE", other_data[i, 1])
                .field("CMDA", other_data[i, 2])
                .field("DRSquree", other_data[i, 3])
                .time(timestamp, WritePrecision.NS)
            )
            write_api.write(bucket=bucket, org=org, record=point)
        logger.info("ModelAlgorithmOutput Result Assess Data written to InfluxDB.")
    finally:
        rw_lock.release_write()  # 释放写锁

# 写入模型算法输出的相关数据数据
def Model_Outout_predict_data(model_outout_data):
    # 将时间列转换为 Pandas 时间戳
    print("model_output_predict_data.shape", model_outout_data.shape)
    times = pd.to_datetime(model_outout_data[:, 0])
    #print(f"modeltimes{times}")
    timestamps_ns = (times - pd.Timestamp("1970-01-01")) // pd.Timedelta("1ns")
    # 其他数据
    other_data = model_outout_data[:, 1:].astype(float)
    # 写入模型算法输出数据
    for i, timestamp in enumerate(timestamps_ns):
        point = (
            Point("WModelAlgorithmOutput")  # 替换为实际的测量名称
            .tag("host", "server01")
            .field("APredictedBackPressure", other_data[i, 0])
            .field("BPredictedNetPower", other_data[i, 1])
            .time(timestamp, WritePrecision.NS)
        )
        #print(f'modeloutput:{timestamp}-{other_data[i, 0]}')
        write_api.write(bucket=bucket, org=org, record=point)
    print("ModelAlgorithmOutput Predict Data written to InfluxDB.")
def Model_Outout_best_data(model_outout_data):
    # 将时间列转换为 Pandas 时间戳
    print("model_output_best_data.shape", model_outout_data.shape)
    times = pd.to_datetime(model_outout_data[:, 0])
    timestamps_ns = (times - pd.Timestamp("1970-01-01")) // pd.Timedelta("1ns")
    # 其他数据
    other_data = model_outout_data[:, 1:].astype(float)
    # 写入模型算法输出数据
    for i, timestamp in enumerate(timestamps_ns):
        point = (
            Point("XModelAlgorithmOutput")  # 替换为实际的测量名称
            .tag("host", "server01")
            .field("AOptimalFanSpeed", other_data[i, 0])
            .field("BOptimalBackPressure", other_data[i, 1])
            .field("COptimalNetPower", other_data[i, 2])
            .time(timestamp, WritePrecision.NS)
        )
        write_api.write(bucket=bucket, org=org, record=point)
    print("ModelAlgorithmOutput Best Data written to InfluxDB.")
def Model_Outout_result_assess_data(model_outout_data):
    # 将时间列转换为 Pandas 时间戳
    print("model_output_result_assess_data.shape", model_outout_data.shape)
    times = pd.to_datetime(model_outout_data[:, 0])
    timestamps_ns = (times - pd.Timestamp("1970-01-01")) // pd.Timedelta("1ns")
    # 其他数据
    other_data = model_outout_data[:, 1:].astype(float)
    # 写入模型算法输出数据
    for i, timestamp in enumerate(timestamps_ns):
        point = (
            Point("YModelAlgorithmOutput")  # 替换为实际的测量名称
            .tag("host", "server01")
            .field("AMSE", other_data[i, 0])
            .field("BRMSE", other_data[i, 1])
            .field("CMDA", other_data[i, 2])
            .field("DRSquree", other_data[i, 3])
            .time(timestamp, WritePrecision.NS)
        )
        write_api.write(bucket=bucket, org=org, record=point)
    print("ModelAlgorithmOutput Result Assess Data written to InfluxDB.")
# 查询数据
flux_turbine_query = f'from(bucket:"{bucket}") |> range(start:-24h) |> filter(fn: (r) => r["_measurement"] == "TurbineSystem")'
flux_cold_query = f'from(bucket:"{bucket}") |> range(start:-24h) |> filter(fn: (r) => r["_measurement"] == "ColdSystem")'
flux_environment_query = f'from(bucket:"{bucket}") |> range(start:-24h) |> filter(fn: (r) => r["_measurement"] == "EnvironmentalConditions")'
flux_model_query = f'from(bucket:"{bucket}") |> range(start:-24h) |> filter(fn: (r) => r["_measurement"] == "ModelAlgorithmOutput")'
turbine_results = query_api.query(org=org, query=flux_turbine_query)
cold_results = query_api.query(org=org, query=flux_cold_query)
environment_results = query_api.query(org=org, query=flux_environment_query)
model_results = query_api.query(org=org, query=flux_model_query)

# 按需查询数据New
def query_data(need,query_time):
    time = query_time  #time = -72h
    #flux_turbine_query = f'from(bucket:"{bucket}") |> range(start:{time}) |> filter(fn: (r) => r["_measurement"] == "{MeasureMent}") '
    flux_turbine_query = f'from(bucket:"{bucket}") |> range(start:{time})'
    rw_lock.acquire_read()  # 获取读锁
    try:
        turbine_result = query_api.query(org=org, query=flux_turbine_query)
    finally:
        rw_lock.release_read()  # 释放读锁
    #print(f'turbineresult:{turbine_result}')
    results = []
    # # 处理查询结果
    for table in turbine_result:
        table1 = []
        for record in table.records:
            # table1.append((record.get_time().isoformat(), record.get_value()))
            table1.append([record.get_time().isoformat(), record.get_value()])
        results.append(table1)
    #print(f'results:{results}')
    #print(f'results list:{results}')
    result = np.array(results)
    #print(f'resultshape:{result.shape[0]}')
    if result.shape[0] == 0:
        sssdata = np.zeros((1, len(need)))
        if 0 in need:
            out_matrix = [
                [row[0]] + [float(value) for value in row[1:]]
                for row in sssdata
            ]
            out_data = np.array(out_matrix)
        else:
            out_matrix = [[float(value) for value in row] for row in sssdata]
            out_data = np.array(out_matrix)
        return out_data
    result_value = result[:, :, 1]
    result_time = result[0:1, :, 0]
    selected_columns = np.concatenate((result_time, result_value), axis=0)
    ssdata = np.transpose(selected_columns)
    print('ssdata的维度')
    print(ssdata.shape)
    sssdata = ssdata[:, need]
    #1凝结水流量、2凝结水温度、3风机转速、4风机电流、5风机频率、6风机转轴温度、7背压
    #8环境温度、9环境风速、10环境风向、11气压
    #12发电功率、13主蒸汽流量、14主蒸汽温度、15主蒸汽压力、16调门开度、17再热蒸汽压力、18再热蒸汽温度、19排汽温度、20排汽流量、21排汽焓值、22主给水流量、23风机功率消耗、24净功率
    if 0 in need:
        out_matrix = [
            [row[0]] + [float(value) for value in row[1:]]
            for row in sssdata
        ]
        out_data = np.array(out_matrix)
    else:
        out_matrix = [[float(value) for value in row] for row in sssdata]
        out_data = np.array(out_matrix)
    return out_data

# 按需查询数据
def query_need_data(query_data, query_time):
    selected_columns = []
    num_rows = query_data.shape[0]
    time = query_time
    for i in range(num_rows):
        num = query_data[i, :]
        MeasureMent = num[0]
        Field = num[1]
        flux_query = f'from(bucket:"{bucket}") |> range(start:{time}) |> filter(fn: (r) => r["_measurement"] == "{MeasureMent}")|> filter(fn: (r) => r["_field"] == "{Field}") '
        query_result = query_api.query(org=org, query=flux_query)
        results = []
        results_time = []
        data = []
        data_time = []
        for table in query_result:
            for record in table.records:
                results_time.append((record.get_time().isoformat()))
                results.append((record.get_value()))
                value_time = np.array(results_time)
                num_elements_time = len(value_time)
                data_time = value_time.reshape(num_elements_time, 1)
                #print(data_time)
                value_data = np.array(results)
                num_elements = len(value_data)
                data = value_data.reshape(num_elements, 1)
        if i == 0:
            selected_columns = data_time
            selected_columns = np.concatenate((selected_columns, data), axis=1)
        else:
            selected_columns = np.concatenate((selected_columns, data), axis=1)
    converted_data = selected_columns.astype(object)
    converted_data[:, 1:] = converted_data[:, 1:].astype(float)
    #print(converted_data)
    return converted_data

# 删除数据
# 按需删除数据
def delete_need_data(query_data, delete_start_time,delete_stop_time):
    start = delete_start_time  # "2024-09-09T00:00:00Z"
    stop = delete_stop_time  # "2024-09-10T10:20:00Z"
    delete_api.delete(start, stop, '_measurement="TurbineSystem" AND host="server01"', bucket=bucket, org=org)

def Give_Model_Every(query_data):
    print('最新实时数据')
    num_rows = query_data.shape[0]
    print(query_data[num_rows-1, :])
    print("-" * 40)  # 分隔符，便于阅读

# 实例化数据缓冲池
data_buffer_other_pool = DataBufferOtherPool(max_len=1500)
data_buffer_model_pool = DataBufferModelPool(max_len=1500)
data_buffer_pool = SecondDataBufferModelPool(max_len=1500)

def written_data_buffer(data_other):
    data_buffer_other_pool.update_buffer(data_other)

def written_model_data_buffer(data_model):
    data_buffer_model_pool.update_buffer(data_model)


def data_buffer(logger):
    numbuffer = 0
    while True:
        time.sleep(5)
        # time.sleep(60)
        data_buffer_other_buffer = data_buffer_other_pool.get_data_buffer_other()   # 读取info队列
        data_buffer_model_buffer = data_buffer_model_pool.get_data_buffer_model()   # 读取info队列
        data_buffer_other_new = np.array(data_buffer_other_buffer)
        data_buffer_model_new = np.array(data_buffer_model_buffer)
        logger.info(f"buffer_other数据{data_buffer_other_new}")
        logger.info(f"buffer_model数据{data_buffer_model_new}")
        if numbuffer > 1:
            # 提取两个矩阵的时间戳列
            if data_buffer_model_new.shape[0] == 0 or data_buffer_other_new.shape[0] == 0:
                # logger.info(f"data_buffer_model_new.shape[0]{data_buffer_model_new.shape[0]}")
                continue
            timestamps_other = data_buffer_other_new[:, 0]
            timestamps_model = data_buffer_model_new[:, 0]
            logger.info(f"timestamps_other{timestamps_other}")
            logger.info(f"timestamps_model{timestamps_model}")
            # # # 处理重复时间戳（数据预处理）
            # 使用 numpy.unique 找到唯一的时间戳及其索引
            unique_timestamps_other, unique_indices_other, counts_other = np.unique(timestamps_other, return_index=True, return_counts=True)
            unique_timestamps_model, unique_indices_model, counts_model = np.unique(timestamps_model, return_index=True, return_counts=True)
            # 找到重复的时间戳
            repeated_timestamps_other = unique_timestamps_other[counts_other > 1]
            repeated_timestamps_model = unique_timestamps_model[counts_model > 1]
            # 初始化一个布尔数组，用于标记需要保留的行
            keep_mask_other = np.zeros(data_buffer_other_new.shape[0], dtype=bool)
            keep_mask_model = np.zeros(data_buffer_model_new.shape[0], dtype=bool)
            # 遍历每个重复的时间戳，保留其最后一行
            for ts in repeated_timestamps_other:
                # 找到该时间戳的所有行的索引
                indices = np.where(timestamps_other == ts)[0]
                # 只保留最后一行
                keep_mask_other[indices[-1]] = True
            for ts in repeated_timestamps_model:
                # 找到该时间戳的所有行的索引
                indices = np.where(timestamps_model == ts)[0]
                # 只保留最后一行
                keep_mask_model[indices[-1]] = True
            # 对于没有重复的时间戳，直接保留
            for i in range(len(unique_timestamps_other)):
                if counts_other[i] == 1:
                    keep_mask_other[unique_indices_other[i]] = True
            for i in range(len(unique_timestamps_model)):
                if counts_model[i] == 1:
                    keep_mask_model[unique_indices_model[i]] = True
            # 使用布尔索引筛选出需要保留的行
            data_buffer_other_bool = data_buffer_other_new[keep_mask_other]
            data_buffer_model_bool = data_buffer_model_new[keep_mask_model]
            timestamps_other_bool = data_buffer_other_bool[:, 0]
            timestamps_model_bool = data_buffer_model_bool[:, 0]
            # 找到公共的时间戳
            common_timestamps = np.intersect1d(timestamps_other_bool, timestamps_model_bool)  # 使用 np.intersect1d 找到两个矩阵的公共时间戳。
            logger.info(f"common_timestamps{common_timestamps}")
            # 对齐两个矩阵，仅保留公共的时间戳及其对应的数据
            aligned_other = data_buffer_other_bool[np.isin(timestamps_other_bool, common_timestamps)]  # 使用 np.isin 筛选出两个矩阵中符合公共时间戳的数据行。
            aligned_model = data_buffer_model_bool[np.isin(timestamps_model_bool, common_timestamps)]  # 使用 np.isin 筛选出两个矩阵中符合公共时间戳的数据行。
            logger.info(f"aligned_other{aligned_other}")
            logger.info(f"aligned_model{aligned_model}")
            # 拼接两个矩阵
            combined_matrix = np.hstack((aligned_other, aligned_model[:, 1:]))
            delete_number = combined_matrix.shape[0]
            #print("combined_matrix", combined_matrix)
            my_dict = {
                'value': combined_matrix,
                'is_stored': False
            }
            data_buffer_pool.update_buffer(my_dict, logger)
            #if numbuffer==50:
            # 保存到文本文件
            # with open(fr"logs/data{numbuffer}.txt", "w", encoding="utf-8") as file:
            #     for item in data_buffer_pool.second_data_buffer_model:
            #         file.write(f"{item}\n")
            logger.info("写入二级缓存成功")
            data_buffer_other_pool.delete_buffer(delete_number, logger)
            data_buffer_model_pool.delete_buffer(delete_number, logger)
            logger.info("从一级缓冲池中删除已写入数据")
            #time.sleep(1)
        numbuffer = numbuffer+1

def data_time():
    # 获取当前时间并去除秒和微秒
    current_time = datetime.now().replace(microsecond=0)
    local_timezone = pytz.timezone("Asia/Shanghai")
    local_time_with_tz = local_timezone.localize(current_time)
    utc_time = local_time_with_tz.astimezone(pytz.utc)
    # 将转换后的UTC时间去掉时区
    print(utc_time.tzinfo)
    utc_time_naive = utc_time.replace(tzinfo=None)
    # 将无时区的 UTC 时间转为 Pandas 的 Timestamp 对象
    formatted_time = pd.Timestamp(utc_time_naive)
    return formatted_time
def Real_time_N_data(data_num_n):
    # 获取最新的 n 条数据，即实时数据
    first_n_data_other = data_buffer_pool.get_latest_data_model(data_num_n)  # 获取前 data_num_n 条数据
    # data_buffer_now = data_buffer_pool.get_data_buffer_model()
    if first_n_data_other==[]:
        formatted_time = data_time()
        print(f"formatted_time{formatted_time}")
        # # 将 formatted_time 转换为 NumPy 数组
        formatted_time_array = np.full((1, 1), formatted_time)
        print(f"formatted_time_array{formatted_time_array}")
        zero_matrix = np.zeros((33,))
        data_mobus_numpy_to_influxDB = np.concatenate((np.transpose(formatted_time_array), zero_matrix[np.newaxis, :]), axis=1)
        first_n_data_other = [{'value': np.array(data_mobus_numpy_to_influxDB, dtype=object), 'is_stored': False}]
        print("first_n_data_other_test", first_n_data_other)
    values = []
    for d in first_n_data_other:
        if 'value' in d:
            values.append(d['value'])
    #print(values)
    date_to_vue = first_n_data_other[0]['value']

    for i in range(len(values) - 1):
        list1 = first_n_data_other[i + 1]['value']
        # # 将列表转换为 NumPy 数组
        # array1 = np.array(list1)
        # array2 = np.array(list2)
        # 在第 0 维拼接
        date_to_vue = np.concatenate((date_to_vue, list1), axis=0)
        # combined_array_new = np.concatenate((date_to_DB, list1), axis=0)
        return date_to_vue

def Real_time_one_data():
    latest_data_other = data_buffer_pool.get_latest_data_model(n=1)
    #print("latest_data_other_test", latest_data_other)
    if not latest_data_other:
        data_model_matrix = np.zeros((1, 34)).astype(np.float32)  # matrix_best
        #matrix_time_new = "2024-10-01 00:15:00"  # 使用字符串
        #matrix_time = np.array([[matrix_time_new]])  # 将字符串包装为二维数组
        # 拼接时，确保字符串和数值数据在不同列
        #data_model_matrix = np.hstack((matrix_time, matrix_best))
        # matrix_best = np.random.rand(1, 33).astype(np.float32)
        # matrix_time_new = np.array(str("2024-10-01 00:15:00"))
        # matrix_time = matrix_time_new.reshape(-1, 1)
        # data_model_matrix = np.hstack((matrix_time, matrix_best))
        #print("data_model假数据Real_time_one_data", data_model_matrix)
        return data_model_matrix
    else:
        date_to_vue_all = latest_data_other[0]['value']
        #print("data_model真数据Real_time_one_data", date_to_vue_all)
        #print("Real_time_one_data_date_to_vue_all", date_to_vue_all)
        return date_to_vue_all

def Real_time_from_other_buffer_to_model_for_predict_model(logger):
    # 获取最新的 1 条数据，即实时数据
    latest_data_other = data_buffer_other_pool.get_latest_data_other(n=1)
    # 示例最新数据
    latest_data_other_new = np.array(latest_data_other)
    logger.info(f"最新的实时数据：{latest_data_other_new}")
    if len(latest_data_other_new.shape) == 1:
        latest_data_other_new = latest_data_other_new[np.newaxis, :]
    if latest_data_other_new.size == 0:
        formatted_time = data_time()
        # # 将 formatted_time 转换为 NumPy 数组
        formatted_time_array = np.full((1, 1), formatted_time)
        zero_matrix = np.zeros((24,))
        latest_data_other_new = np.concatenate((np.transpose(formatted_time_array), zero_matrix[np.newaxis, :]), axis=1)
    need_time = latest_data_other_new[:, 0]
    real_time_all_data = latest_data_other_new
    need_data = [0, 12, 8, 20, 21, 1, 19, 3, 7, 13, 15, 14, 23, 24]
    need_data_numpy = latest_data_other_new[:, need_data]
    return need_data_numpy, need_time, real_time_all_data


def Real_time_from_other_buffer_to_alg(logger):
    # 获取最新的 1 条数据，即实时数据
    latest_data_other = data_buffer_other_pool.get_latest_data_other(n=1)
    # 示例最新数据
    latest_data_other_new = np.array(latest_data_other)
    logger.info(f"最新的实时数据：{latest_data_other_new}")
    if len(latest_data_other_new.shape) == 1:
        latest_data_other_new = latest_data_other_new[np.newaxis, :]
    if latest_data_other_new.size == 0:
        formatted_time = data_time()
        # # 将 formatted_time 转换为 NumPy 数组
        formatted_time_array = np.full((1, 1), formatted_time)
        zero_matrix = np.zeros((24,))
        latest_data_other_new = np.concatenate((np.transpose(formatted_time_array), zero_matrix[np.newaxis, :]), axis=1)
    need_time = latest_data_other_new[:, 0]
    return need_time, latest_data_other_new


def History_from_other_buffer_to_alg(logger, n=50):
    # 获取最新的 1 条数据，即实时数据
    latest_one, latest_n, enough_his = data_buffer_other_pool.get_latest_data_other_muti(n=n)
    # 示例最新数据
    latest_one_new = np.array(latest_one)
    latest_n_new = np.array(latest_n)
    if len(latest_one_new.shape) == 1:
        latest_one_new = latest_one_new[np.newaxis, :]
    if latest_one_new.size == 0:
        formatted_time = data_time()
        # # 将 formatted_time 转换为 NumPy 数组
        formatted_time_array = np.full((1, 1), formatted_time)
        zero_matrix = np.zeros((24,))
        latest_one_new = np.concatenate((np.transpose(formatted_time_array), zero_matrix[np.newaxis, :]), axis=1)
    need_time = latest_one_new[:, 0]
    if not enough_his:
        latest_n_new = False
    # print(f'返回给opt2的历史数据{latest_n_new}')
    return need_time, latest_one_new, latest_n_new, enough_his

# 关闭客户端
client.close()