from apscheduler.schedulers.blocking import BlockingScheduler
import click
# pip3 install python-daemon
import daemon
from daemon.pidfile import TimeoutPIDLockFile
from pathlib import Path
import logging
from sqlalchemy import create_engine, text
import urllib.parse
import pandas as pd
import os

#获取脚本所在的绝对目录，（避免依赖当前工作目录）
SCRIPT_DIR = Path(__file__).resolve().parent  # __file__ 是当前脚本的路径

# 日志文件路径
LOG_PATH = SCRIPT_DIR/"scheduler.log"
PID_PATH = SCRIPT_DIR/"scheduler.pid"

class Gbase():
    def __init__(self, user, password, port, host):
        self.user = user
        self.password = password
        self.port = port
        self.host = host
        # 对密码进行URL编码，以处理特殊字符
        encoded_password = urllib.parse.quote_plus(self.password)
        # 设置数据库连接使用亚洲上海时区
        self.engine = create_engine(f"opengauss+psycopg2://{self.user}:{encoded_password}@{self.host}:{self.port}/postgres?options=-c%20timezone=Asia/Shanghai")

    def cancel_backend(self,sessionid):
        # 使用SQLAlchemy的text对象和参数绑定
        with self.engine.connect() as conn:
            stmt = text("SELECT pg_cancel_backend(:sessionid)")
            result = conn.execute(stmt, {"sessionid": sessionid})
            conn.commit()
            logging.info(f"取消连接{sessionid}成功")

    def terminate_backend(self,sessionid):
        # 使用SQLAlchemy的text对象和参数绑定
        with self.engine.connect() as conn:
            stmt = text("SELECT pg_terminate_backend(:sessionid)")
            result = conn.execute(stmt, {"sessionid": sessionid})
            conn.commit()
            logging.info(f"终止连接{sessionid}成功")

    def get_backend_info(self):
        df=pd.read_sql("SELECT now() as current_time,* FROM pg_stat_activity",con=self.engine,parse_dates={"current_time": {"utc": False},"xact_start": {"utc": False}})
        return df

    def get_active_time(self,df:pd.DataFrame,busy_time:int)->pd.DataFrame:
        # 只在xact_start有值时计算差值
        df['active_time'] = df.apply(lambda row: row['current_time'] - row['xact_start'] if pd.notna(row['xact_start']) else None, axis=1)
        # 过滤条件：active_time超过指定时间且状态不是idle
        return df[(df['active_time']>pd.Timedelta(seconds=busy_time)) & (df['state']!='idle')]






def get_match_user(df:pd.DataFrame,match_user:str)->pd.DataFrame:
    if match_user=='*':
        return df
    else:
        target_users = list({user.strip() for user in match_user.split('|') if user.strip()})
        return df[df['usename'].isin(target_users)]
        
def get_match_host(df:pd.DataFrame,match_host:str)->pd.DataFrame:
    if match_host=='*':
        return df
    else:
        target_hosts = list({host.strip() for host in match_host.split('|') if host.strip()})
        return df[df['client_addr'].isin(target_hosts)]

def get_match_command(df:pd.DataFrame,match_command:str)->pd.DataFrame:
    if match_command=='*':
        return df
    else:
        target_commands = list({command.strip() for command in match_command.split('|') if command.strip()})
        return df[df['query'].str.contains('|'.join(target_commands),case=False,na=False)]

def get_match_state(df:pd.DataFrame,match_state:str)->pd.DataFrame:
    if match_state=='*':
        return df
    else:
        target_states = list({state.strip() for state in match_state.split('|') if state.strip()})
        return df[df['state'].isin(target_states)]

def get_match_db(df:pd.DataFrame,match_db:str)->pd.DataFrame:
    if match_db=='*':
        return df
    else:
        target_dbs = list({db.strip() for db in match_db.split('|') if db.strip()})
        return df[df['datname'].isin(target_dbs)]

def get_match_uesr(df:pd.DataFrame,match_user:str)->pd.DataFrame:
    if match_user=='*':
        return df
    else:
        target_users = list({user.strip() for user in match_user.split('|') if user.strip()})
        return df[df['usename'].isin(target_users)]

def get_match_info(df:pd.DataFrame,match_info:str)->pd.DataFrame:
    """
    匹配查询信息中的关键字
    """
    if match_info=='*':
        return df
    else:
        # 确保query列存在且不是NaN
        if 'query' in df.columns:
            # 对query列中不是NaN的值应用contains过滤
            return df[df['query'].str.contains(match_info, case=False, na=False)]
        return df





@click.command()
#连接参数
@click.option('--user',type=str,default='gbase',help='数据库用户名,默认gbase')
@click.option('--password',type=str,help='数据库密码')
@click.option('--port',type=int,default=15400,help='数据库端口,默认15400')
@click.option('--host',type=str,default='localhost',help='数据库主机,默认localhost')
#匹配参数
@click.option('--match-command',type=str,default='*',help='匹配状态,默认*')
@click.option('--match-info',type=str,default='*',help='匹配信息,默认*')
@click.option('--match-state',type=str,default='*',help='匹配声明,默认*')
@click.option('--match-host',type=str,default='*',help='匹配主机,默认*')
@click.option('--match-db',type=str,default='*',help='匹配数据库,默认*')
@click.option('--match-user',type=str,default='*',help='匹配用户,默认*')
@click.option('--ignore-host',type=str,default='localhost',help='忽略主机,默认localhost')
@click.option('--ignore-db',type=str,default='*',help='忽略数据库,默认*')
@click.option('--ignore-user',type=str,default='*',help='忽略用户,默认*')
#行为参数
@click.option('--kill',is_flag=True,help='杀掉连接并且退出')
@click.option('--kill-query',is_flag=True,help='只杀掉连接执行的语句，但是线程不会被终止')
#其他重要参数
@click.option('--busy-time',type=int,help='SQL运行时间的线程')
@click.option('--idle-time',type=int,help='sleep时间的连接线程,必须在--match-command sleep时才有效')
@click.option('--interval',type=int,default=5,help='任务间隔(秒),默认5秒')
@click.option('--daemonize',is_flag=True,help='是否放到后台执行')
@click.option('--log',type=str,default=str(SCRIPT_DIR / "scheduler.log"),help='日志文件路径,默认scheduler.log')
@click.option('--pid',type=str,default=str(SCRIPT_DIR / "scheduler.pid"),help='PID文件路径,默认scheduler.pid')
def start_daemon(user, password, port, host, match_command, match_info, match_state, match_host, match_db, match_user, 
                ignore_host, ignore_db, ignore_user, kill, kill_query, busy_time, idle_time, interval, 
                daemonize, log, pid):
    global gbase
    # 使用click传递的参数实例化Gbase类
    gbase = Gbase(user=user, password=password, port=port, host=host)
    logging.info(f"已连接到数据库: {host}:{port} 用户名: {user}")
    
    pid_path=Path(pid)
    log_path=Path(log)
    # print(f"PID路径: {pid_path}")
    # print(f"日志路径: {log_path}")

    # 确保日志目录存在
    log_path.parent.mkdir(parents=True, exist_ok=True)

    #打开日志文件并保留句柄，（关键：避免被守护进程关闭）
    log_file=open(log_path,"a")

    # 创建Logger对象
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.INFO)  # 设置Logger级别

    # 创建文件处理器
    file_handler = logging.FileHandler(log_path)
    file_handler.setLevel(logging.INFO)  # 处理器级别

    # 创建格式化器
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler.setFormatter(formatter)

    # 添加处理器到Logger
    logger.addHandler(file_handler)

    def job():
        info = gbase.get_backend_info()
        logger.info(f"获取到的原始数据库连接信息数量: {len(info)}")
        
        # 按参数顺序层层过滤
        # 1. 过滤主机
        if match_host != '*':
            info = gbase.get_match_host(info, match_host)
            logger.info(f"按主机过滤后连接数: {len(info)}")
        
        # 2. 过滤数据库
        if match_db != '*':
            info = gbase.get_match_db(info, match_db)
            logger.info(f"按数据库过滤后连接数: {len(info)}")
        
        # 3. 过滤状态
        if match_state != '*':
            info = gbase.get_match_state(info, match_state)
            logger.info(f"按状态过滤后连接数: {len(info)}")
        
        # 4. 过滤信息
        if match_info != '*':
            info = gbase.get_match_info(info, match_info)
            logger.info(f"按信息过滤后连接数: {len(info)}")

        # 5. 过滤用户
        if match_user != '*':
            info = gbase.get_match_uesr(info, match_user)
            logger.info(f"按用户过滤后连接数: {len(info)}")

        #6.过滤运行长时间SQL
        if busy_time is not None:
            # print(info[['pid','xact_start','current_time']])
            info = gbase.get_active_time(info,busy_time)
            logger.info(f"按运行时间过滤后连接数: {len(info)}")
        
        # 记录过滤后的结果
        # logger.info(f"最终过滤后的连接信息:\n{info}")
        
        
        # 处理kill和kill_query操作（互斥）
        if kill and kill_query:
            logger.warning("kill和kill_query参数互斥,请只选择其中一个")
            raise os._exit(1)
        elif kill and not info.empty:
            logger.info(f"执行终止连接操作，共{len(info)}个连接")
            for idx, row in info.iterrows():
                sessionid = row.get('sessionid')
                query = row.get('query')
                if pd.notna(sessionid):
                    try:
                        gbase.terminate_backend(sessionid)
                        logger.info(f"已终止连接,sessionid: {sessionid},查询语句：{query}")
                    except Exception as e:
                        logger.error(f"终止连接失败,sessionid: {sessionid}, 错误: {str(e)}")
        elif kill_query and not info.empty:
            logger.info(f"执行取消查询操作，共{len(info)}个连接")
            for idx, row in info.iterrows():
                sessionid = row.get('sessionid')
                query=row.get('query')
                if pd.notna(sessionid):
                    try:
                        gbase.cancel_backend(sessionid)
                        logger.info(f"已取消查询,sessionid: {sessionid},查询语句：{query}")
                    except Exception as e:
                        logger.error(f"取消查询失败,PID: {sessionid}, 错误: {str(e)}")

    #定义运行的主函数
    def run_scheduler():
        scheduler = BlockingScheduler()
        scheduler.add_job(job, 'interval', seconds=interval)
        try:
            scheduler.start()
        except (KeyboardInterrupt, SystemExit):
            logger.info("调度器被用户中断")
        except Exception as e:
            logger.error(f"调度器错误：{str(e)}")
        finally:
            logger.info("调度器关闭")
            log_file.close()
            if not daemonize and pid_path.exists():
                pid_path.unlink()   #删除前台模式的PID文件

    # 只有在指定了 --daemonize 参数时才放到后台运行
    if daemonize:
        # print("程序将在后台运行...")
        # 使用daemon.context将程序转为后台进程
        with daemon.DaemonContext(
            working_directory='.',
            umask=0o022,
            pidfile=TimeoutPIDLockFile(str(pid_path)),
            # stdout=log_file,
            # stderr=log_file,
            # 保留日志句柄(防止被关闭)
            files_preserve=[log_file.fileno()]
        ):
            run_scheduler()
    else:
        with open(pid_path,'w') as f:
            f.write(str(os.getpid()))
        # print("程序在前台运行...")
        run_scheduler()

if __name__ == '__main__':
    start_daemon()