#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
import seaborn as sns
import logging as log
from matplotlib import pyplot as plt
from statsmodels.tsa.seasonal import seasonal_decompose as sd

import numpy as np
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Sequential,load_model
from tensorflow.keras.layers import LSTM, Dense

from service import DevDataPorcessingService as ddpSer
from utils.ConfigInfo import ConfigLoader

ConfigLoader.load_config()
conf = ConfigLoader.get_config()

def proc_dev_data():
    ddp_ser = ddpSer.DevDataProcessingService()
    devs = [267]
    # devs = [483, 485, 487, 493]
    # devs = [68,69,70]
    # devs = [71,72,73,74,75,76,77,78,79,80]
    # devs = [81,82,83,84,85,86,87,88,89,90]
    # devs = [91,92,93,94,95,96,97,98,99,100]
    # devs = [101,102,103,104,105,106,107,108,109,110]
    # devs = [111,112,113,114,115,116,117,118,119,120]
    # devs = [121,122,123,124,125,126,127,128,129,130]
    # devs = [131,132,133,134,135,136,137,138,139,140]
    # devs = [141,142,143,144,145,146,147,148,149,150]
    # devs = [151,152,153,154,155,156,157,158,159,160]
    # devs = [161,162,163,164,165,166,167,168,169,170]
    # devs = [171,172,173,174,175,176,177,178,179,180]
    # devs = [181,182,183,184,185,186,187,188,189,190]
    # devs = [191,192,193,194,195,196,197,198]

    for dev_id in devs:
        ddp_ser.pro_one_dev_data(dev_id)
def matrix():
    # 加载数据
    ddp_ser = ddpSer.DevDataProcessingService()
    mult_dev_data = ddp_ser.query_mult_dimension_dev_data()
    columns = ddp_ser.get_mult_dev_columns()
    data = pd.DataFrame(mult_dev_data, columns=columns)

    # 将 'data_time' 列转换为 datetime 类型
    data['data_time'] = pd.to_datetime(data['data_time'])
    # 计算皮尔逊相关系数矩阵
    correlation_matrix = data[[
            'jia_wan', 'wen_du', 'yi_yang', 'feng_su', 'su_du'
            , 'ya_li_68', 'ya_li_69', 'ya_li_70'
            , 'ya_li_71', 'ya_li_72', 'ya_li_73', 'ya_li_74', 'ya_li_75', 'ya_li_76', 'ya_li_77', 'ya_li_78', 'ya_li_79',
            'ya_li_80'
            , 'ya_li_81', 'ya_li_82', 'ya_li_83', 'ya_li_84', 'ya_li_85', 'ya_li_86', 'ya_li_87', 'ya_li_88', 'ya_li_89',
            'ya_li_90'
            , 'ya_li_91', 'ya_li_92', 'ya_li_93', 'ya_li_94', 'ya_li_95', 'ya_li_96', 'ya_li_97', 'ya_li_98', 'ya_li_99',
            'ya_li_100'
            , 'ya_li_101', 'ya_li_102', 'ya_li_103', 'ya_li_104', 'ya_li_105', 'ya_li_106', 'ya_li_107', 'ya_li_108',
            'ya_li_109', 'ya_li_110'
            , 'ya_li_111', 'ya_li_112', 'ya_li_113', 'ya_li_114', 'ya_li_115', 'ya_li_116', 'ya_li_117', 'ya_li_118',
            'ya_li_119', 'ya_li_120'
            , 'ya_li_121', 'ya_li_122', 'ya_li_123', 'ya_li_124', 'ya_li_125', 'ya_li_126', 'ya_li_127', 'ya_li_128',
            'ya_li_129', 'ya_li_130'
            , 'ya_li_131', 'ya_li_132', 'ya_li_133', 'ya_li_134', 'ya_li_135', 'ya_li_136', 'ya_li_137', 'ya_li_138',
            'ya_li_139', 'ya_li_140'
            , 'ya_li_141', 'ya_li_142', 'ya_li_143', 'ya_li_144', 'ya_li_145', 'ya_li_146', 'ya_li_147', 'ya_li_148',
            'ya_li_149', 'ya_li_150'
            , 'ya_li_151', 'ya_li_152', 'ya_li_153', 'ya_li_154', 'ya_li_155', 'ya_li_156', 'ya_li_157', 'ya_li_158',
            'ya_li_159', 'ya_li_160'
            , 'ya_li_161', 'ya_li_162', 'ya_li_163', 'ya_li_164', 'ya_li_165', 'ya_li_166', 'ya_li_167', 'ya_li_168',
            'ya_li_169', 'ya_li_170'
            , 'ya_li_171', 'ya_li_172', 'ya_li_173', 'ya_li_174', 'ya_li_175', 'ya_li_176', 'ya_li_177', 'ya_li_178',
            'ya_li_179', 'ya_li_180'
            , 'ya_li_181', 'ya_li_182', 'ya_li_183', 'ya_li_184', 'ya_li_185', 'ya_li_186', 'ya_li_187', 'ya_li_188',
            'ya_li_189', 'ya_li_190'
            , 'ya_li_191', 'ya_li_192', 'ya_li_193', 'ya_li_194', 'ya_li_195', 'ya_li_196', 'ya_li_197', 'ya_li_198'
        ]].corr()
    # 显示相关系数矩阵
    threshold = 0.8
    strong_correlations = []
    for i in range(len(correlation_matrix.columns)):
        for j in range(i + 1, len(correlation_matrix.index)):
            row = correlation_matrix.index[i]
            col = correlation_matrix.columns[j]
            value = correlation_matrix.iloc[i, j]
            rounded_value = round(value, 2)
            if abs(value) >= threshold:
                strong_correlations.append({
                    'var1': row,
                    'var2': col,
                    'correlation': float(rounded_value)
                })

    # 输出结果
    for item in strong_correlations:
        # if 'ya_li' not in item['var1']:
        print(item)

def matrix_day():
    # 加载数据
    ddp_ser = ddpSer.DevDataProcessingService()
    mult_dev_data = ddp_ser.query_mult_dimension_dev_data()
    columns = ddp_ser.get_mult_dev_columns()
    data = pd.DataFrame(mult_dev_data, columns=columns)

    # 将 'data_time' 列转换为 datetime 类型
    data['data_time'] = pd.to_datetime(data['data_time'])

    # 按天分组
    grouped = data.groupby(data['data_time'].dt.date)

    # 循环处理每一天的数据
    for date, group_data in grouped:
        print(f"Processing data for date: {date},begin")

        # 计算皮尔逊相关系数矩阵
        correlation_matrix = data[[
            'jia_wan', 'wen_du', 'yi_yang', 'feng_su', 'su_du'
            , 'ya_li_68', 'ya_li_69', 'ya_li_70'
            , 'ya_li_71', 'ya_li_72', 'ya_li_73', 'ya_li_74', 'ya_li_75', 'ya_li_76', 'ya_li_77', 'ya_li_78', 'ya_li_79',
            'ya_li_80'
            , 'ya_li_81', 'ya_li_82', 'ya_li_83', 'ya_li_84', 'ya_li_85', 'ya_li_86', 'ya_li_87', 'ya_li_88', 'ya_li_89',
            'ya_li_90'
            , 'ya_li_91', 'ya_li_92', 'ya_li_93', 'ya_li_94', 'ya_li_95', 'ya_li_96', 'ya_li_97', 'ya_li_98', 'ya_li_99',
            'ya_li_100'
            , 'ya_li_101', 'ya_li_102', 'ya_li_103', 'ya_li_104', 'ya_li_105', 'ya_li_106', 'ya_li_107', 'ya_li_108',
            'ya_li_109', 'ya_li_110'
            , 'ya_li_111', 'ya_li_112', 'ya_li_113', 'ya_li_114', 'ya_li_115', 'ya_li_116', 'ya_li_117', 'ya_li_118',
            'ya_li_119', 'ya_li_120'
            , 'ya_li_121', 'ya_li_122', 'ya_li_123', 'ya_li_124', 'ya_li_125', 'ya_li_126', 'ya_li_127', 'ya_li_128',
            'ya_li_129', 'ya_li_130'
            , 'ya_li_131', 'ya_li_132', 'ya_li_133', 'ya_li_134', 'ya_li_135', 'ya_li_136', 'ya_li_137', 'ya_li_138',
            'ya_li_139', 'ya_li_140'
            , 'ya_li_141', 'ya_li_142', 'ya_li_143', 'ya_li_144', 'ya_li_145', 'ya_li_146', 'ya_li_147', 'ya_li_148',
            'ya_li_149', 'ya_li_150'
            , 'ya_li_151', 'ya_li_152', 'ya_li_153', 'ya_li_154', 'ya_li_155', 'ya_li_156', 'ya_li_157', 'ya_li_158',
            'ya_li_159', 'ya_li_160'
            , 'ya_li_161', 'ya_li_162', 'ya_li_163', 'ya_li_164', 'ya_li_165', 'ya_li_166', 'ya_li_167', 'ya_li_168',
            'ya_li_169', 'ya_li_170'
            , 'ya_li_171', 'ya_li_172', 'ya_li_173', 'ya_li_174', 'ya_li_175', 'ya_li_176', 'ya_li_177', 'ya_li_178',
            'ya_li_179', 'ya_li_180'
            , 'ya_li_181', 'ya_li_182', 'ya_li_183', 'ya_li_184', 'ya_li_185', 'ya_li_186', 'ya_li_187', 'ya_li_188',
            'ya_li_189', 'ya_li_190'
            , 'ya_li_191', 'ya_li_192', 'ya_li_193', 'ya_li_194', 'ya_li_195', 'ya_li_196', 'ya_li_197', 'ya_li_198'
        ]].corr()
        # method = 'kendall'

        # 显示相关系数矩阵
        # print(correlation_matrix)
        threshold = 0.8
        strong_correlations = []

        for i in range(len(correlation_matrix.columns)):
            for j in range(i + 1, len(correlation_matrix.index)):
                row = correlation_matrix.index[i]
                col = correlation_matrix.columns[j]
                value = correlation_matrix.iloc[i, j]
                rounded_value = round(value, 2)
                if abs(value) >= threshold:
                    strong_correlations.append({
                        'var1': row,
                        'var2': col,
                        'correlation': float(rounded_value)
                    })

        # 输出结果
        for item in strong_correlations:
            # if 'ya_li' not in item['var1']:
                print(item)

        print(f"Processing data for date: {date},end")

def plot_data():
    # 加载数据
    ddp_ser = ddpSer.DevDataProcessingService()
    mult_dev_data = ddp_ser.query_mult_dimension_dev_data()
    columns = ddp_ser.get_mult_dev_columns()
    data = pd.DataFrame(mult_dev_data, columns=columns)

    # 设置中文字体和解决负号显示问题
    plt.rcParams['font.sans-serif'] = ['SimHei']
    plt.rcParams['axes.unicode_minus'] = False

    # 选择需要绘制的列（维度）
    # columns_to_plot = ['jia_wan', 'wen_du', 'yi_yang', 'feng_su', 'su_du', 'ya_li_70']
    # columns_to_plot = ['yi_yang', 'ya_li_70', 'ya_li_90', 'ya_li_110', 'ya_li_170']
    columns_to_plot = ['ya_li_70','xing_cheng_267']
    # columns_to_plot = ['su_du', 'jia_wan', 'yi_yang']

    # 筛选特定时间范围内的数据
    # start_time = '2024-12-23 00:00:00'
    # end_time = '2024-12-29 23:59:59'
    # start_time = '2024-12-23 00:00:00'
    # end_time = '2024-12-26 23:59:59'
    # start_time = '2024-11-12 18:00:00'
    # end_time = '2024-11-13 06:00:00'
    # start_time = '2024-11-04 00:00:00'
    # end_time = '2024-12-30 23:59:59'
    # start_time = '2024-11-26 00:00:00'
    # end_time = '2024-12-28 23:59:59'
    start_time = '2024-11-09 00:00:00'
    end_time = '2024-12-11 23:59:59'

    # 假设 n 为指定的间隔步长
    n = 12 * 3  # 可根据需要调整

    filtered_data = data[(data['data_time'] >= start_time) & (data['data_time'] <= end_time)]

    # 将数据转换为适合 Seaborn 的长格式
    data_melted = (filtered_data[['data_time'] + columns_to_plot]
                   .reset_index()
                   .melt(id_vars='data_time', value_vars=columns_to_plot, var_name='variable', value_name='value'))

    # 归一化处理：按每个变量做 Min-Max Scaling
    def min_max_normalize(group):
        group['value_normalized'] = ((group['value'] - group['value'].min())
                                     / (group['value'].max() - group['value'].min()))
        return group

    data_normalized = data_melted.groupby('variable', group_keys=False).apply(min_max_normalize)

    # 将 data_time 转换为 datetime 类型
    data_normalized['data_time'] = pd.to_datetime(data_normalized['data_time'])

    # 使用 Seaborn 绘制折线图
    plt.figure(figsize=(12, 6))
    sns.lineplot(data=data_normalized, x='data_time', y='value_normalized', hue='variable')

    xticks_values = data_normalized['data_time'][::n]
    xticks_labels = xticks_values.dt.strftime('%Y-%m-%d %H:%M')  # 格式化显示格式
    # 控制 x 轴标签密度
    plt.xticks(ticks=xticks_values, labels=xticks_labels, rotation=45)

    plt.xlabel('时间/样本点')
    # plt.ylabel(rf'值:{min_value}~{max_value}')
    plt.ylabel(rf'值')
    plt.title('多维数据折线图')
    plt.legend()
    plt.grid(True)
    plt.tight_layout()  # 自动调整布局
    plt.show()

    # plt.savefig(rf'conf["path"]["test_data"]/测试数据250624.01.png')

def time_stl():
    data = pd.read_csv(rf'{conf["path"]["test_data"]}\测试数据250708.01.cvs')
    # 筛选特定时间范围内的数据
    start_time = '2024-12-23 00:00:00'  # 替换为你的起始时间
    end_time = '2024-12-26 23:59:59'  # 替换为你的结束时间
    filtered_data = data[(data['data_time'] >= start_time) & (data['data_time'] <= end_time)]

    # 将 'data_time' 列转换为 datetime 类型并设置为索引
    filtered_data['data_time'] = pd.to_datetime(filtered_data['data_time'])
    filtered_data.set_index('data_time', inplace=True)

    # 选择一个目标变量列进行分解（例如 'ya_li_70'）
    target_column = 'ya_li_70'
    time_series = filtered_data[target_column]

    # 进行时间序列分解（这里假设周期为每天的小时数据，周期=24）
    result = sd(time_series, model='multiplicative', period=3)

    # 提取分解后的各部分为单独的 Series 或 DataFrame
    decomposed_data = pd.DataFrame({
        'Observed': result.observed,
        'Trend': result.trend,
        'Seasonal': result.seasonal,
        'Residual': result.resid
    })

    # 保存到 CSV 文件
    output_path = conf['path']['test_data']+'/time_series_decomposed_2024_12_23_26.csv'
    decomposed_data.to_csv(output_path, index=True)

    print(f"分解结果已保存至: {output_path}")

    # 手动绘制，保留时间索引
    plt.figure(figsize=(12, 8))

    # Observed
    plt.subplot(4, 1, 1)
    plt.plot(result.observed)
    plt.title('Observed')
    plt.grid(True)

    # Trend
    plt.subplot(4, 1, 2)
    plt.plot(result.trend)
    plt.title('Trend')
    plt.grid(True)

    # Seasonal
    plt.subplot(4, 1, 3)
    plt.plot(result.seasonal)
    plt.title('Seasonal')
    plt.grid(True)

    # Residual
    plt.subplot(4, 1, 4)
    plt.plot(result.resid)
    plt.title('Residual')
    plt.grid(True)

    plt.tight_layout()
    plt.show()

def __opt_data():
    # 加载数据
    data = pd.read_csv(conf['path']['test_data'] + 'time_series_decomposed_2024_12_21_25.csv')
    data['data_time'] = pd.to_datetime(data['data_time'])
    data.set_index('data_time', inplace=True)

    # 定义特征与标签
    window_size = 108
    threshold = 0.5  # 自定义判定阈值

    # 创建数据集
    x, y = [], []
    for i in range(len(data) - window_size):
        x.append(data[i:i + window_size][['Residual']].values)  # 只用 Residual 特征
        residual_window = data[i:i + window_size]['Residual'].values
        is_anomaly = 1 if np.std(residual_window) > threshold else 0
        y.append(is_anomaly)
    x = np.array(x)
    y = np.array(y)
    # 划分训练测试集
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, shuffle=False)
    return x_train, x_test, y_train, y_test

def create_lstm_model():
    # 定义特征与标签
    window_size = 108
    x_train, x_test, y_train, y_test = __opt_data()

    # 构建 LSTM 模型
    model = Sequential()
    model.add(LSTM(64, input_shape=(window_size, 1)))  # 输入维度为 1（仅 Residual）
    model.add(Dense(1, activation='sigmoid'))
    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])

    # 训练模型
    model.fit(x_train, y_train, epochs=10, batch_size=32, validation_data=(x_test, y_test))
    model.save(rf'{conf["path"]["models"]}/my_lstm_model.h5')

    # 推理
    new_data = x_test[0].reshape(1, window_size, 1)
    prediction = model.predict(new_data)
    print("预测结果:", "异常" if prediction[0] > 0.5 else "正常")

def use_lstm_model():
    # 定义特征与标签
    window_size = 108
    x_train, x_test, y_train, y_test = __opt_data()

    model = load_model(conf['path']['models']+'/my_lstm_model.h5')
    # 推理
    new_data = x_test[0].reshape(1, window_size, 1)
    print(rf'数据:{{{new_data}}}')
    prediction = model.predict(new_data)
    print("预测结果:", "异常" if prediction[0] > 0.5 else "正常")


if __name__ == "__main__":
    """
    程序入口
    
    @Author: kindey
    @Date: 2025/6/19
    @Description: 
    """
    log.getLogger('matplotlib').setLevel(log.WARNING)
    plot_data()
