import clickhouse_driver
import argparse
import datetime
import subprocess
import os
import gzip
import shutil
from file_processor import FileProcessor  # 导入之前实现的文件处理器类
from calc_time_range import TimeRangeCalculator
from custom_logger import PrefixDateLogger

# 创建日志记录器
logger = PrefixDateLogger("process_missing_data")

class ClickHouseFileProcessor:
    def __init__(self, host='172.29.151.181', port=9000, user='default', 
                 password='Keepgo123@cys', database='db_cfx', target_dir='/home/cys/www/data_vector_supplement'):
        """
        初始化ClickHouse文件处理器
        :param host: ClickHouse主机地址
        :param port: ClickHouse端口
        :param user: ClickHouse用户名
        :param password: ClickHouse密码
        :param database: ClickHouse数据库名
        :param target_dir: 文件拷贝目标目录
        """
        self.host = host
        self.port = port
        self.user = user
        self.password = password
        self.database = database
        self.target_dir = target_dir
        self.connection = None
        self.processed_pairs = set()  # 用于记录已处理的ip和add_id组合，避免重复处理
        
        # OSS配置
        self.oss_config_file = "/home/cys/www/script/ch_recollect_log/ossutils_eu.config"
        self.oss_bucket = "oss://log-bdp-eu"

    def calculate_date_hour_ranges(self,hours):
        """
        计算从当前时间往前推指定小时数所包含的日期及每个日期的起止小时
        
        :param hours: 往前推的小时数（1-72之间）
        :return: 列表，每个元素为字典，包含日期、开始小时、结束小时
        """
        # 输入验证
        if not isinstance(hours, int) or hours < 1 or hours > 72:
            raise ValueError("小时数必须是1到72之间的整数")
        
        # 获取当前时间（结束时间）和起始时间
        end_time = datetime.datetime.now()
        start_time = end_time - datetime.timedelta(hours=hours)
        
        # 收集所有涉及的日期（去重）
        dates = set()
        current = start_time
        while current <= end_time:
            dates.add(current.date())
            current += datetime.timedelta(days=1)
        sorted_dates = sorted(dates)  # 按日期排序
        
        # 计算每个日期的起止小时
        result = []
        for date in sorted_dates:
            # 该日期的起始时间（0点）
            date_start = datetime.datetime.combine(date, datetime.time.min)
            # 该日期的结束时间（23:59:59）
            date_end = datetime.datetime.combine(date, datetime.time.max)
            
            # 计算实际开始小时：取start_time和该日期0点的最大值
            actual_start = max(start_time, date_start)
            start_hour = actual_start.hour
            
            # 计算实际结束小时：取end_time和该日期23点的最小值
            actual_end = min(end_time, date_end)
            end_hour = actual_end.hour
            
            result.append({
                "date": date.strftime("%Y-%m-%d"),  # 日期字符串
                "start_hour": start_hour,           # 该日期内的开始小时
                "end_hour": end_hour                # 该日期内的结束小时
            })
        
        return result

    def connect(self):
        """建立与ClickHouse的连接"""
        logger.info(self.host)
        try:
            self.connection = clickhouse_driver.connect(
                host=self.host,
                port=self.port,
                user=self.user,
                password=self.password,
                database=self.database
            )
            logger.info("成功连接到ClickHouse")
            return True
        except Exception as e:
            logger.info(f"连接ClickHouse失败: {str(e)}")
            self.connection = None
            return False

    def disconnect(self):
        """关闭与ClickHouse的连接"""
        if self.connection:
            self.connection.close()
            self.connection = None
            logger.info("关闭ClickHouse连接")

    def query_monitor_data(self, date, hour_start, hour_end):
        """
        从t_monitor_log_collect_compare表查询符合条件的数据
        :param date: 日期，格式如'2023-10-01'
        :param hour_start: 开始小时，如8
        :param hour_end: 结束小时，如10
        :return: 查询结果列表
        """
        if not self.connection:
            raise ValueError("ClickHouse连接未初始化，请先调用connect()方法")
        
        # 构建查询SQL
        query = f"""
        SELECT ip, app_id, date, hour,event_id
        FROM t_monitor_log_collect_compare
        WHERE date = '{date}'
          AND hour BETWEEN {hour_start} AND {hour_end}
          AND completeness_ratio<0.995
        """
        
        try:
            cursor = self.connection.cursor()
            cursor.execute(query)
            result = cursor.fetchall()
            logger.info(f"查询到 {len(result)} 条符合条件的数据")
            return result
        except Exception as e:
            logger.info(f"查询ClickHouse失败: {str(e)}")
            raise

    def download_and_extract_file_from_oss(self, ip, add_id, event_id, record_date, record_hour):
        """
        从OSS下载文件并解压到目标目录
        :param ip: IP地址
        :param add_id: 应用ID
        :param event_id: 事件ID
        :param record_date: 日期
        :param record_hour: 小时
        :return: 是否成功下载和解压
        """
        try:
            # 构建OSS路径和文件名
            # 格式: oss://log-bdp-eu/{ip}/{add_id}/{event_id}.{date}_{hour}.0.txt.gz
            dt = datetime.datetime.strptime(f"{record_date} {record_hour}", "%Y-%m-%d %H")
            formatted = dt.strftime("%Y-%m-%d_%H")
            file_name = f"{event_id}.{formatted}.0.txt.gz"
            oss_path = f"{self.oss_bucket}/{ip}/{add_id}/{file_name}"
            
            # 创建目标目录
            target_ip_dir = os.path.join(self.target_dir, ip)
            os.makedirs(target_ip_dir, exist_ok=True)
            
            # 本地压缩文件路径
            local_gz_file = os.path.join(target_ip_dir, file_name)
            # 解压后的文件路径
            local_txt_file = local_gz_file[:-3]  # 移除.gz后缀
            
            logger.info(f"  从OSS下载文件: {oss_path}")
            logger.info(f"  本地保存路径: {local_gz_file}")
            
            # 使用ossutil下载文件
            cmd = [
                "ossutil",
                "--config-file", self.oss_config_file,
                "cp",
                oss_path,
                local_gz_file
            ]
            
            result = subprocess.run(cmd, capture_output=True, text=True)
            
            if result.returncode != 0:
                logger.info(f"  下载失败: {result.stderr}")
                return False
            
            if not os.path.exists(local_gz_file):
                logger.info(f"  下载文件不存在: {local_gz_file}")
                return False
            
            # 解压gzip文件
            logger.info(f"  解压文件: {local_gz_file}")
            try:
                with gzip.open(local_gz_file, 'rb') as f_in:
                    with open(local_txt_file, 'wb') as f_out:
                        shutil.copyfileobj(f_in, f_out)
                
                # 删除压缩文件
                os.remove(local_gz_file)
                logger.info(f"  解压成功: {local_txt_file}")
                return True
                
            except Exception as e:
                logger.info(f"  解压失败: {str(e)}")
                return False
                
        except Exception as e:
            logger.info(f"  处理OSS文件失败: {str(e)}")
            return False

    def delete_monitor_data(self, ip, app_id, event_id, date, hour):
        """
        从t_monitor_log_collect_compare表删除符合条件的数据
        """
        if not self.connection:
            raise ValueError("ClickHouse连接未初始化，请先调用connect()方法")
        
        # 构建查询SQL
        query = f"""
        ALTER TABLE db_cfx.t_monitor_log_collect_compare DELETE
        WHERE ip = '{ip}'
          and app_id = '{app_id}'
          and event_id = '{event_id}'
          and date = '{date}'
          AND hour = {hour}
        """
        logger.info("delete_monitor_data:"+query)
        try:
            cursor = self.connection.cursor()
            cursor.execute(query)
            result = cursor.fetchall()
            logger.info(f"删除 {len(result)} 条符合条件的数据")
            return result
        except Exception as e:
            logger.info(f"delete_monitor_data失败: {str(e)}")
            raise
    def reinsert_monitor_data(self, ip, app_id, event_id, date, hour):
        """
        重新对比数据，插入t_monitor_log_collect_compare表
        """
        if not self.connection:
            raise ValueError("ClickHouse连接未初始化，请先调用connect()方法")
        
        # 构建查询SQL
        query = f"""
            insert into db_cfx.t_monitor_log_collect_compare
            with
                ch_count as (
                    select log_source_ip,base_app_id,base_k,base_create_date ,base_create_hour as hour,count() as cnt from db_cfx.t_log_base_data_standard_active
                    where base_create_date='{date}' and base_create_hour={hour} and base_k ='{event_id}' and log_source_ip ='{ip}' and base_app_id='{app_id}'
                    group by log_source_ip,base_app_id,base_k,base_create_date ,base_create_hour
                ),
                mysql_count as (
                    select ip,app_id,event_id,date,hour,line_count as log_cnt
                    from db_cfx.t_mysql_file_line_counts as a
                    where toDate(date)='{date}' and hour = {hour} and event_id ='{event_id}' and ip ='{ip}' and app_id='{app_id}'
                )
            select a.*,ch.cnt as ch_cnt,(a.log_cnt -ch.cnt) as diff,ch.cnt/a.log_cnt as completeness_ratio,'t_log_base_data_standard_active' as table_name
            from mysql_count as a
            left join ch_count as ch
            on ch.log_source_ip=a.ip and ch.base_app_id=a.app_id and ch.base_k = a.event_id and ch.base_create_date=toDate(a.date) and ch.hour=a.hour

        """
        logger.info("reinsert_monitor_data:",query)
        try:
            cursor = self.connection.cursor()
            cursor.execute(query)
            result = cursor.fetchall()
            logger.info(f"重新插入 {len(result)} 条符合条件的数据")
            return result
        except Exception as e:
            logger.info(f"reinsert_monitor_data失败: {str(e)}")
            

    def merge_monitor_data(self):
        """
        merge对比数据，插入t_monitor_log_collect_compare表
        """
        if not self.connection:
            raise ValueError("ClickHouse连接未初始化，请先调用connect()方法")
        
        # 构建查询SQL
        query = f"""
            OPTIMIZE TABLE db_cfx.t_monitor_log_collect_compare FINAL;
        """
        
        try:
            cursor = self.connection.cursor()
            cursor.execute(query)
        except Exception as e:
            logger.info(f"merge_monitor_data失败: {str(e)}")
            raise

    def process_files(self, date, hour_start, hour_end, type):
        """
        处理文件拷贝流程
        :param date: 日期，格式如'2023-10-01'
        :param hour_start: 开始小时，如8
        :param hour_end: 结束小时，如10
        """
        if not self.connection:
            logger.info("未连接到ClickHouse，尝试自动连接...")
            if not self.connect():
                logger.info("连接失败，无法继续处理")
                return
        
        try:
            # 查询数据
            data = self.query_monitor_data(date, hour_start, hour_end)
            logger.info(f"查询到 {len(data)} 条符合条件的数据")
            logger.info(data)
            if not data:
                logger.info("没有符合条件的数据，无需处理文件")
                return
            if type=='supplement':
                logger.info("补充数据处理")
                # 处理每条记录
                for row in data:
                    ip, add_id, record_date, record_hour, event_id = row

                    if ip is None or ip=="":
                        logger.info(f"跳过无效的ip: {row}")
                        continue
                    
                    # 避免重复处理相同的ip和add_id组合
                    pair_key = f"{ip}_{add_id}_{event_id}_{record_date}_{record_hour}"
                    if pair_key in self.processed_pairs:
                        logger.info(f"已处理过 {pair_key}，跳过重复处理")
                        continue
                    
                    self.processed_pairs.add(pair_key)
                    
                    # 从OSS下载并解压文件
                    logger.info(f"\n开始从OSS下载文件: IP={ip}, AppID={add_id}, EventID={event_id}, Date={record_date}, Hour={record_hour}")
                    success = self.download_and_extract_file_from_oss(ip, add_id, event_id, record_date, record_hour)
                    
                    if success:
                        logger.info(f"  文件处理成功: {ip}/{add_id}/{event_id}.{record_date}_{record_hour}.0.txt")
                    else:
                        logger.info(f"  文件处理失败: {ip}/{add_id}/{event_id}.{record_date}_{record_hour}.0.txt")
            elif type=='check':
                logger.info("核对数据处理")
                # 处理每条记录
                for row in data:

                    ip, add_id, record_date, record_hour, event_id = row

                    if ip is None or ip=="":
                        logger.info(f"跳过无效的ip: {row}")
                        continue
                    
                    logger.info("开始数据处理" + str(row) + " record_date:" + str(record_date) + " record_hour:" + str(record_hour))
                    # 删除旧数据
                    self.delete_monitor_data(ip, add_id, event_id, record_date, record_hour)
                    # 重新插入数据
                    self.reinsert_monitor_data(ip, add_id, event_id, record_date, record_hour)
                # 合并数据
                self.merge_monitor_data()

                
        except Exception as e:
            logger.info(f"处理过程中发生错误: {str(e)}")

    def run(self, date, hour_start, hour_end, type):
        """
        执行完整的处理流程：连接 -> 处理 -> 断开连接
        :param date: 日期，格式如'2023-10-01'
        :param hour_start: 开始小时，如8
        :param hour_end: 结束小时，如10
        """
        try:
            if not self.connect():
                return
            self.process_files(date, hour_start, hour_end, type)
        finally:
            self.disconnect()

if __name__ == "__main__":
    # 解析命令行参数
    parser = argparse.ArgumentParser(description='从ClickHouse读取数据并处理文件拷贝')
    
    # ClickHouse连接参数
    parser.add_argument('--ch-host', default='172.29.151.181', help='ClickHouse主机地址')
    parser.add_argument('--ch-port', type=int, default=9000, help='ClickHouse端口')
    parser.add_argument('--ch-user', default='default', help='ClickHouse用户名')
    parser.add_argument('--ch-password', default='Keepgo123@cys', help='ClickHouse密码')
    parser.add_argument('--ch-db', default='db_cfx', help='ClickHouse数据库名')
    
    # 查询参数
    parser.add_argument('--date', required=True, help='查询日期，格式如2023-10-01')
    parser.add_argument('--hour-start', type=int, default=0, help='开始小时，如8')
    parser.add_argument('--hour-end', type=int, default=23, help='结束小时，如10')
    parser.add_argument('--hour-num', type=int, default=0, help='处理最近N小时，1-72之间，优先级高于date/hour-start/hour-end')
    parser.add_argument('--type', required=True,  help='业务类型， supplement--补充数据，check--核对数据')
    
    # 目标目录
    parser.add_argument('--target-dir', default='/home/cys/www/data_vector_supplement', help='文件拷贝目标目录')
    
    args = parser.parse_args()
    
    # 创建处理器实例并执行
    processor = ClickHouseFileProcessor(
        host=args.ch_host,
        port=args.ch_port,
        user=args.ch_user,
        password=args.ch_password,
        database=args.ch_db,
        target_dir=args.target_dir
    )
    if args.hour_num>0:
        logger.info(f"从当前时间往前推{args.hour_num}小时，包含以下日期及对应小时范围：")
        #ranges = processor.calculate_date_hour_ranges(args.hour_num)
        calculator = TimeRangeCalculator()
        ranges = calculator.get_time_ranges(args.hour_num)
        for item in ranges:
            logger.info(f"处理->日期：{item['date']}，开始小时：{item['start_hour']}，结束小时：{item['end_hour']}")
            processor.run(item['date'], item['start_hour'], item['end_hour'],args.type)
    else:
        processor.run(args.date, args.hour_start, args.hour_end,args.type)

