import os
import logging
import logging.handlers
import configparser
import time
from datetime import datetime, timedelta
import psutil
import subprocess
import schedule
import mysql.connector
import base64
from mysql.connector import Error


# 读取配置
config = configparser.ConfigParser()
config.read('config/config.ini')

log_path = os.path.abspath(config.get('logs', 'log_path'))
log_name = config.get('logs', 'log_name')

# 确保日志目录存在
os.makedirs(log_path, exist_ok=True)

# 配置日志格式
log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
formatter = logging.Formatter(log_format)

# 自定义的 TimedRotatingFileHandler 类
class CustomTimedRotatingFileHandler(logging.handlers.TimedRotatingFileHandler):
    def doRollover(self):
        # 获取当前日期并格式化
        current_time = self.rolloverAt
        formatted_time = time.strftime('%Y-%m-%d', time.localtime(current_time))
        new_filename = self.baseFilename.replace('.log', f'_{formatted_time}.log')
        os.rename(self.baseFilename, new_filename)
        super().doRollover()

# 创建日志记录器
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)

# 设置日志轮换
log_file = os.path.join(log_path, log_name)
handler = CustomTimedRotatingFileHandler(log_file, when='midnight', interval=1, backupCount=7, encoding='utf-8')
handler.setFormatter(formatter)

# 添加 handler
logger.addHandler(handler)

# 解码 Base64 编码的密码
def decode_password(encoded_password):
    return base64.b64decode(encoded_password).decode('utf-8')


# 读取配置文件
def read_config(config_path='./config/config.ini'):
    config = configparser.ConfigParser()
    # 禁用 optionxform，使得配置项的键保持原样
    config.optionxform = str
    config.read(config_path)
    
    db_config = {
        'host': config['database']['host'],
        'port': int(config['database']['port']),
        'db_name': config['database']['db_name'],
        'user': config['database']['user'],
        'password': decode_password(config['database']['password']),
        'reconnect_interval': int(config['database']['reconnect_interval']),
    }
    
    log_config = {
        'log_path': config['logs']['log_path'],
        'log_name': config['logs']['log_name']
    }
    services = dict(config.items('services'))
    cron_expression = config.get('task_schedule', 'cron_expression', fallback='* * * * 1-5')
    interval = config.getint('task_schedule', 'interval', fallback=1)
    
    return db_config, log_config,services, cron_expression, interval


# 连接到数据库
def connect_to_database(db_config, logger):
    try:
        connection = mysql.connector.connect(
            host=db_config['host'],
            port=db_config['port'],
            database=db_config['db_name'],
            user=db_config['user'],
            password=db_config['password']
        )
        
        if connection.is_connected():
            logger.info(f"数据库连接成功,Connected to the database successfully.")
            
            return connection, 0  # 连接成功，返回状态 0
    except Error as e:
        logger.error(f"数据库连接失败,Connected to the database failed: {e}")
        return None, 1  # 连接失败，返回状态 1
        
# 写入监控信息到数据库
def write_to_database(connection, previous_time, service_name, pid, service_status, cpu_usage, memory_usage, log_file, error_message):
    try:
        with connection.cursor() as cursor:
            # 完整SQL插入
            sql = f"""
            INSERT INTO service_monitor (EnvID, ctime,  service_name, pid,service_status, logs_message, cpu_use, memory_use, uptime)
            VALUES (2, '{previous_time}',  '{service_name}', {pid},{service_status},  {cpu_use}, {memory_use}, '{log_file}','{logs_message}');
            """
            # 执行 SQL
            cursor.execute(sql)
            connection.commit()
            logging.info(f"Inserted data for service {service_name} into database.")
    except Exception as e:
        logging.error(f"Error writing to database: {e}")

# 监控过程函数
def monitor_process():
    db_config, log_config = read_config()
    logger = init_logging(log_config['log_path'], log_config['log_name'])

    connection = None
    mysqlConnectedStatus = 1  # 默认状态为 1（连接失败）

    while True:
        if connection is None:
            # 尝试连接数据库
            connection, mysqlConnectedStatus = connect_to_database(db_config, logger)
        
        if mysqlConnectedStatus == 1:
            logger.error(f"数据库连接失败,Retrying to connect in {db_config['reconnect_interval']} seconds...")
            time.sleep(db_config['reconnect_interval'])
        else:
            # 数据库连接成功后，维持一定的操作（例如心跳检测或查询等）
            try:
                cursor = connection.cursor(buffered=True)
                cursor.execute("SELECT 1")  # 简单查询，测试连接是否活跃

                cursor.close()
                
            except Error as e:
                # 如果数据库连接丢失或发生错误，重新连接
                logger.error(f"数据库连接失败,Database operation error: {e}")
                connection.close()
                connection = None
                mysqlConnectedStatus = 1  # 连接状态重置为 1（失败）
                logger.error(f"数据库连接失败,尝试重新连接:Retrying to connect in {db_config['reconnect_interval']} seconds...")
                time.sleep(db_config['reconnect_interval'])

            # 每隔 N 秒执行一次监控
            time.sleep(db_config['reconnect_interval'])
# 执行服务监控任务
def monitor_service_task(services, connection):
    for service_name, log_file in services.items():
        previous_time = get_previous_second_time()  # 获取当前时间的前一秒，用于日志匹配
        command = f"ps aux | grep '{service_name}' | grep -vE 'grep|vim|vi|cat|tail|more|less'"
        logs_command = f"iconv -f GBK -t UTF-8 -c {log_file} | grep '{previous_time}' | grep -E ' \[E\] | ERROR '"
        
        # 执行进程监控命令
        result = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
        logs_result = subprocess.run(logs_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)

        # 检查ps命令的返回码
        if result.returncode != 0:  # 如果进程不存在
            pid = 0
            cpu_usage = 0
            memory_usage = 0
            logging.info(f"{service_name} is offline. PID: 0, CPU: {cpu_usage}%, Memory: {memory_usage}%")
            logging.info(f'monitorinfo message: "{previous_time} {log_file} null"')
        else:
            lines = result.stdout.strip().splitlines()  # 获取进程信息
            error_message = logs_result.stdout.strip()  # 获取符合条件的日志报文

            # 如果有错误日志，则记录该日志
            if error_message:
                logging.info(f'monitorinfo message: "{log_file} {error_message}"')

            for line in lines:
                fields = line.split()  # 按空格分割每一行
                pid = fields[1]
                cpu_usage = fields[2]
                memory_usage = fields[3]

                # 插入监控信息到数据库
                write_to_database(connection, previous_time, service_name, pid, cpu_usage, memory_usage, log_file, error_message)
                logging.info(f"{service_name} is online. PID: {pid}, CPU: {cpu_usage}%, Memory: {memory_usage}%")
# 定时任务执行
def schedule_task(services, interval):
    schedule.every(interval).seconds.do(monitor_service_task, services=services)
    
    while True:
        schedule.run_pending()
        time.sleep(1)

def main():
    setup_logging()
    services, cron_expression, interval = read_config()
    
    logging.info("Monitor process started.")
    schedule_task(services, interval)

if __name__ == '__main__':
    main()
