import pymysql
import csv
import numpy as np
from scipy.stats import skew, kurtosis
import random
from datetime import datetime
import pandas as pd
import ast  # 用于安全解析字符串格式的数组

# 数据库配置
DB_CONFIG = {
    "host": "127.0.0.1",
    "port": 3306,
    "user": "root",
    "password": "Lixy050826.",
    "db": "IndustrialVibrationMonitor",
    "charset": "utf8mb4"
}

import numpy as np
from scipy.signal import lfilter


def simple_audio_filter(audio_data, fs=16000, cutoff=100):
    """
    极简音频滤波方案（适合实时处理）
    功能：去除直流分量 + 一阶高通滤波
    参数：
        audio_data : 输入音频数组
        fs : 采样率 (Hz)
        cutoff : 截止频率 (Hz)
    返回：
        滤波后的数据（长度不变）
    """
    # 转换为float32防止溢出
    audio_data = np.array(audio_data, dtype=np.float32)

    # 1. 去除直流分量
    audio_data = audio_data - np.mean(audio_data)

    # 2. 一阶IIR高通滤波（计算量极低）
    alpha = np.exp(-2 * np.pi * cutoff / fs)  # 滤波系数计算
    b = [1, -1]  # 分子系数
    a = [1, -alpha]  # 分母系数

    # 应用滤波（保持零相位特性）
    filtered_data = lfilter(b, a, audio_data)

    return filtered_data


def extract_time_features(audio_data):
    """提取11个核心时域特征（兼容旧版本）"""
    audio_data = simple_audio_filter(audio_data)

    features = {
        'mean': float(np.mean(audio_data)),
        'std': float(np.std(audio_data)),
        'max': float(np.max(audio_data)),
        'min': float(np.min(audio_data)),
        'peak_to_peak': float(np.ptp(audio_data)),
        'rms': float(np.sqrt(np.mean(np.square(audio_data)))),
        'mean_abs': float(np.mean(np.abs(audio_data))),
        'skewness': float(skew(audio_data)),
        'kurtosis': float(kurtosis(audio_data)),
        'energy': float(np.sum(np.square(audio_data))),
        'entropy': float(-np.sum(np.square(audio_data) * np.log(np.square(audio_data) + 1e-10)))
    }
    return features


def fetch_and_save_data(start_time, end_time, filename, label):
    """从数据库获取数据并保存为CSV"""
    global cursor, conn
    try:
        # 连接数据库
        conn = pymysql.connect(**DB_CONFIG)
        cursor = conn.cursor()

        # 构建带JOIN的SQL查询
        sql = f"""
        SELECT 
            v.x_acc_rms, v.y_acc_rms, v.z_acc_rms,
            v.x_v_rms, v.y_v_rms, v.z_v_rms,
            a.audio_samples
        FROM app01_vibrationdata AS v
        INNER JOIN app01_vibrationaudio AS a 
        ON v.id = a.vibration_data_id
        WHERE v.timestamp BETWEEN '2025-04-24 {start_time}' AND '2025-04-24 {end_time}'
        """

        cursor.execute(sql)
        results = cursor.fetchall()

        # 定义CSV表头
        headers = [
            'x_acc_rms', 'y_acc_rms', 'z_acc_rms',
            'x_v_rms', 'y_v_rms', 'z_v_rms',
            'mean', 'std', 'max', 'min',
            'peak_to_peak', 'rms', 'mean_abs', 'skewness',
            'kurtosis', 'energy', 'entropy', 'label'
        ]

        # 写入CSV文件
        with open(f'{filename}.csv', 'w', newline='', encoding='utf-8') as f:
            writer = csv.DictWriter(f, fieldnames=headers)
            writer.writeheader()

            for row in results:
                # 解析传感器数据
                sensor_data = {
                    'x_acc_rms': row[0],
                    'y_acc_rms': row[1],
                    'z_acc_rms': row[2],
                    'x_v_rms': row[3],
                    'y_v_rms': row[4],
                    'z_v_rms': row[5]
                }

                # 解析并处理音频数据
                try:
                    audio_samples = ast.literal_eval(row[6])  # 安全转换字符串为列表
                    audio_features = extract_time_features(np.array(audio_samples))
                except:
                    print(f"音频数据解析失败: {row[6]}")
                    continue

                # 合并数据
                combined = {**sensor_data, **audio_features, 'label': label}
                writer.writerow(combined)

        print(f"成功保存 {len(results)} 条数据到 {filename}.csv")

    except Exception as e:
        print(f"操作失败: {str(e)}")
    finally:
        if 'conn' in locals() and conn.open:
            cursor.close()
            conn.close()


def combine_and_shuffle(file1, file2, output_file):
    """合并并打乱数据集"""
    try:
        # 读取数据
        df1 = pd.read_csv(file1)
        df2 = pd.read_csv(file2)

        # 合并数据
        combined = pd.concat([df1, df2], axis=0)

        # 打乱顺序
        shuffled = combined.sample(frac=1, random_state=42).reset_index(drop=True)

        # 保存结果
        shuffled.to_csv(output_file, index=False)
        print(f"合并完成，总样本数：{len(shuffled)}，保存为 {output_file}")

    except Exception as e:
        print(f"合并失败: {str(e)}")


if __name__ == "__main__":
    # 导出坏数据（标签1）
    fetch_and_save_data(
        start_time="13:13:57",
        end_time="13:17:09",
        filename="bad",
        label=1
    )

    # 导出正常数据（标签0）
    fetch_and_save_data(
        start_time="13:17:17",
        end_time="13:20:20",
        filename="normal",
        label=0
    )

    # 合并数据集
    combine_and_shuffle(
        'bad.csv',
        'normal.csv',
        'combined_dataset.csv'
    )

