import pymysql
import csv
import numpy as np
from scipy.stats import skew, kurtosis
import pandas as pd
import ast  # 用于安全解析字符串格式的数组

# 数据库配置
DB_CONFIG = {
    "host": "127.0.0.1",
    "port": 3306,
    "user": "root",
    "password": "Lixy050826.",
    "db": "IndustrialVibrationMonitor",
    "charset": "utf8mb4"
}


def fetch_all_bearing_data():
    """从数据库获取所有bearing_id的数据"""
    global cursor, conn
    try:
        # 连接数据库
        conn = pymysql.connect(**DB_CONFIG)
        cursor = conn.cursor()

        # 构建带JOIN的SQL查询，获取所有bearing_id的数据
        sql = """
        SELECT 
            v.bearing_id,
            v.x_acc_rms, v.y_acc_rms, v.z_acc_rms,
            v.x_v_rms, v.y_v_rms, v.z_v_rms,
            a.audio_samples
        FROM app01_vibrationdata AS v
        INNER JOIN app01_vibrationaudio AS a 
        ON v.id = a.vibration_data_id
        """

        cursor.execute(sql)
        results = cursor.fetchall()

        # 定义CSV表头（移除了bearing_id作为特征）
        headers = [
            'x_acc_rms', 'y_acc_rms', 'z_acc_rms',
            'x_v_rms', 'y_v_rms', 'z_v_rms',
            'mean', 'std', 'max', 'min',
            'peak_to_peak', 'rms', 'mean_abs', 'skewness',
            'kurtosis', 'energy', 'entropy', 'label'
        ]

        # 写入CSV文件
        with open('all_bearings_data1.csv', 'w', newline='', encoding='utf-8') as f:
            writer = csv.DictWriter(f, fieldnames=headers)
            writer.writeheader()

            for row in results:
                bearing_id = row[0]
                # 根据bearing_id设置标签：1和3为1（异常），其他为0（正常）
                label = 1 if bearing_id in [1, 3] else 0

                # 解析传感器数据（不包含bearing_id）
                sensor_data = {
                    'x_acc_rms': row[1],
                    'y_acc_rms': row[2],
                    'z_acc_rms': row[3],
                    'x_v_rms': row[4],
                    'y_v_rms': row[5],
                    'z_v_rms': row[6],
                    'label': label
                }

                # 解析并处理音频数据
                try:
                    audio_samples = ast.literal_eval(row[7])  # 安全转换字符串为列表
                    if audio_samples:  # 如果有音频数据
                        # 计算音频特征
                        audio_features = {
                            'mean': np.mean(audio_samples),
                            'std': np.std(audio_samples),
                            'max': np.max(audio_samples),
                            'min': np.min(audio_samples),
                            'peak_to_peak': np.ptp(audio_samples),
                            'rms': np.sqrt(np.mean(np.square(audio_samples))),
                            'mean_abs': np.mean(np.abs(audio_samples)),
                            'skewness': skew(audio_samples),
                            'kurtosis': kurtosis(audio_samples),
                            'energy': np.sum(np.square(audio_samples)),
                            'entropy': -np.sum(np.square(audio_samples) * np.log(np.square(audio_samples) + 1e-10))
                        }
                        # 合并所有数据（不包含bearing_id）
                        combined = {**sensor_data, **audio_features}
                        writer.writerow(combined)
                except Exception as e:
                    print(f"音频数据解析失败: {row[7]}, 错误: {str(e)}")
                    continue

        print(f"成功保存 {len(results)} 条数据到 all_bearings_data1.csv")

    except Exception as e:
        print(f"操作失败: {str(e)}")
    finally:
        if 'conn' in locals() and conn.open:
            cursor.close()
            conn.close()


def shuffle_dataset(input_file, output_file):
    """打乱数据集"""
    try:
        # 读取数据
        df = pd.read_csv(input_file)

        # 打乱顺序
        shuffled = df.sample(frac=1, random_state=42).reset_index(drop=True)

        # 保存结果
        shuffled.to_csv(output_file, index=False)
        print(f"数据集已打乱，总样本数：{len(shuffled)}，保存为 {output_file}")

    except Exception as e:
        print(f"打乱数据集失败: {str(e)}")


if __name__ == "__main__":
    # 获取所有bearing_id的数据
    fetch_all_bearing_data()

    # 打乱数据集
    shuffle_dataset(
        'all_bearings_data1.csv',
        'shuffled_combined_dataset1.csv'
    )