import requests
import psycopg2
import threading
import time
import datetime
import logging
from config import *

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(levelname)s] [Thread-%(thread)d] %(message)s')
logger = logging.getLogger()

'''
CREATE TABLE public.dcmi_power_consumption_watts (
	"instance" varchar(64) NULL,
	rack varchar(64) NULL,
	power_value int4 NULL,
	"time" int8 NULL,
	event_time timestamp NULL,
	department varchar(128) NULL
);
CREATE INDEX dcmi_power_consumption_watts_event_time_idx ON public.dcmi_power_consumption_watts USING btree (event_time);
CREATE INDEX dcmi_power_consumption_watts_time_idx ON public.dcmi_power_consumption_watts USING btree (time);

CREATE TABLE public.instance_info (
    id serial4 NOT NULL,
	ip varchar(64) NULL,
    rack varchar(64) NULL,
    department varchar(128) NULL,
    architecture varchar(64) NULL,
    distribution varchar(64) NULL,
    description varchar(256) NULL,
    status int4 NULL
);

CREATE TABLE public.rack_static_metrics_consumption (
    id serial4 NOT NULL,
    rack varchar(64) NULL,
    power_value int4 NULL
);

dcmi_power_consumption_watts 表按机架聚合，并记录每个机架的功率值，最后结果合并成一行
'''

# 连接PostgreSQL数据库
conn_params = {
    'dbname': db_name,
    'user': db_user,
    'password': db_passwd,
    'host': db_host,
    'port': db_port
}

def request_data(url):
    response = requests.get(url)
    if response.status_code == 200:
        data = response.json()
    else:
        logger.error(f"请求失败，状态码: {response.status_code}")
        return {}

    # 解析响应数据
    if 'data' in data and 'result' in data['data']:
        return data['data']['result']
    else:
        logger.error("数据解析失败")
        return {}

def fetch_and_store_data(ip_port):
    # 发送GET请求
    url1 = f"http://{ip_port}/api/v1/query?query=ipmi_dcmi_power_consumption_watts"
    url2 = f'http://{ip_port}/api/v1/query?query=ipmi_power_watts' + '{name="Power"}'
    now = datetime.datetime.now()
    timestamp = now.timestamp()
    formatted_time = now.strftime('%Y-%m-%d %H:%M:%S.%f')

    result_dcmi = request_data(url1)
    # 记录数据条数
    logger.info(f"dcmi 方式获取到 {len(result_dcmi)} 条数据")

    result_ipmi = request_data(url2)
    # 记录数据条数
    logger.info(f"ipmi 方式获取到 {len(result_ipmi)} 条数据")

    results = result_dcmi + result_ipmi

    try:
        conn = psycopg2.connect(**conn_params)
        cursor = conn.cursor()

        # 准备批量插入的数据
        values = []
        for result in results:
            metric = result['metric']
            value = int(result['value'][1])
            values.append((metric['instance'], metric['rack'], value, int(timestamp), formatted_time, metric['department']))

        # 批量插入数据
        insert_query = """
        INSERT INTO dcmi_power_consumption_watts (instance, rack, power_value, time, event_time, department)
        VALUES (%s, %s, %s, %s, %s, %s)
        """
        cursor.executemany(insert_query, values)
        # 提交事务
        conn.commit()
        logger.info(f"成功插入 {len(values)} 条数据")
    except Exception as e:
        logger.error(f"数据库操作失败: {e}")
    finally:
        # 关闭连接
        cursor.close()
        conn.close()

def timer_task():
    while True:
        # 创建一个新的线程来执行任务
        thread = threading.Thread(target=fetch_and_store_data,args=(prometheus_url,))
        thread.start()
        # 等待30秒后再次调度
        time.sleep(30)


if __name__ == "__main__":
    # 启动定时器线程
    timer_thread = threading.Thread(target=timer_task)
    timer_thread.daemon = True  # 设置为守护线程，主程序退出时自动结束
    timer_thread.start()

    # 主程序保持运行
    while True:
        time.sleep(1)
