import sys
import pymysql
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
import diagnosis_pca as pca
import joblib

def query_data(sql_query):
    # 连接数据库
    conn = pymysql.connect(
        host='192.168.254.132',
        port=3306,
        user='root',
        password='root',
        db='visual_sys',
        charset='utf8')
    try:
        # 创建游标对象
        cursor = conn.cursor()
        # 执行SQL查询
        cursor.execute(sql_query)
        # 获取查询结果
        result = cursor.fetchall()
        # 获取查询结果的字段名和数据
        columns = [column[0] for column in cursor.description]
        # 将查询结果封装到Pandas的DataFrame对象中
        df = pd.DataFrame(result, columns=columns)
        df = df.drop(['sensor_data_id','sensor_insert_time'], axis=1)
        return df
    finally:
        # 关闭游标和数据库连接
        cursor.close()
        conn.close()


if __name__ == "__main__":
    word_path = sys.argv[3]

    my_ratio = 0.85
    my_confidence = 0.99

    db_data_size = sys.argv[1]
    db_data_index = sys.argv[2]
    sql_query = "SELECT * FROM train_data LIMIT " + db_data_size + " OFFSET " + db_data_index
    db_data = query_data(sql_query)
#     print(db_data)

    ## 数据标准化
    scaler = StandardScaler().fit(db_data)
    ## 存储标准化参数
    joblib.dump(scaler, word_path+'model/scaler.joblib')
    ## 训练集数据标准化
    Xtrain_nor = scaler.transform(db_data)

    t2_limit, spe_limit, model_p, model_v = pca.kpca_control_limit(Xtrain_nor, ratio = my_ratio, confidence= my_confidence)

    # 计算权重并持久化存储
    np.save(word_path+"model/model_p", model_p)
    np.save(word_path+"model/model_v", model_v)
    with open(word_path+'model/value.txt', 'w') as file:
        file.write(t2_limit.astype(str)+"\n")
        file.write(spe_limit.astype(str))
