# -*- coding: utf-8 -*-
"""
@Time ： 2025/10/10 11:13
@Auth ： zfw

"""
import os

import pandas as pd
import numpy as np
from clickhouse_driver import Client
from clickhouse_driver import errors
import logging
from typing import Dict, List, Tuple, Any
import concurrent.futures
from datetime import datetime

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


class TGICalculator:
    def __init__(self):
        try:
            self.client = Client(
                host=os.getenv('GC_CLICK_HOST', '21.66.15.112'),
                port=os.getenv('GC_CLICK_PORT', '9000'),
                user=os.getenv('GC_CLICK_USER', 'dlipck'),
                password=os.getenv('GC_CLICK_PASSWORD', ''),
                database=os.getenv('GC_CLICK_DATABASE', 'clickhouse'),
                settings={'use_numpy': True}
            )
            self.client.execute("select 1")
            logger.info("ck 数据库连接成功")
        except Exception as e:
            error_details = (
                f" CLICKHOUSE 连接失败\n"
                f" ERROR: {e}"
            )

            suggestions = (
                f"建议检查用户凭证， 尤其检查端口号，测试的时候出现过问题"
            )
            raise ConnectionError(error_details + suggestions)

    def get_tag_metadata(self) -> List[Dict]:
        """从CK系统表获取标签元数据"""
        query = """
        SELECT name as tag_name, type as tag_type
        FROM system.columns 
        WHERE  table = 'dla_s_indv_lbl_dlip_d_all' AND name NOT IN ('data_dt', 'cust_id')  
        """
        return self.client.execute(query)

    def execute_query(self, query: str, params: Dict = None) -> List:
        """执行CK查询"""
        try:
            return self.client.execute(query, params)
        except errors.Error as e:
            logger.error(f"Query execution failed: {e}")
            raise

    def _classify_tag_type(self, clickhouse_type: str) -> str:
        """根据CK数据类型分类标签类型"""
        type_lower = clickhouse_type.lower()

        if any(keyword in type_lower for keyword in ['int', 'float', 'decimal']):
            return '数值型'

        elif any(keyword in type_lower for keyword in ['date']):
            return '日期型'
        elif any(keyword in type_lower for keyword in ['string']):
            return '代码型'
        else:
            return '其他'

    def _calculate_bucket_ratios(self, tag_name: str, tag_type: str, buckets: List[str], group_sql: str, total_sql: str,
                                 bucket_type: str) -> List[Dict]:
        """计算各分层的比例和TGI"""
        results = []

        # 获取总客群和总群体的总数
        group_total_query = f"SELECT count(*) FROM ({group_sql})"
        total_total_query = f"SELECT count(*) FROM ({total_sql})"

        group_total = self.execute_query(group_total_query)[0][0]
        total_total = self.execute_query(total_total_query)[0][0]

        for idx, bucket in enumerate(buckets):
            try:
                if bucket_type == 'numeric':
                    # 解析区间
                    parts = bucket.split('~')
                    if len(parts) != 2:
                        continue

                    low_str, high_str = parts

                    # 尝试解析为日期
                    if tag_type == "日期型":
                        # 尝试解析为日期格式
                        low_date = pd.to_datetime(low_str)
                        high_date = pd.to_datetime(high_str)

                        # 处理日期区间
                        if low_date == high_date:
                            group_condition = f"{tag_name} = toDateTime('{low_str}')"
                            total_condition = f"{tag_name} = toDateTime('{low_str}')"
                        else:
                            # 如果是最后一个区间，使用闭区间包含最大值
                            if idx == len(buckets) - 1:
                                group_condition = f"{tag_name} >= toDateTime('{low_str}') AND {tag_name} <= toDateTime('{high_str}')"
                                total_condition = f"{tag_name} >= toDateTime('{low_str}') AND {tag_name} <= toDateTime('{high_str}')"
                            else:
                                group_condition = f"{tag_name} >= toDateTime('{low_str}') AND {tag_name} < toDateTime('{high_str}')"
                                total_condition = f"{tag_name} >= toDateTime('{low_str}') AND {tag_name} < toDateTime('{high_str}')"
                    else:
                        # 如果解析日期失败，尝试作为数值处理
                        try:
                            low = float(low_str)
                            high = float(high_str)

                            # 处理数值区间
                            if abs(low - high) < 1e-9:  # 浮点数精度容差
                                group_condition = f"{tag_name} = {low}"
                                total_condition = f"{tag_name} = {low}"
                            else:
                                # 如果是最后一个区间，使用闭区间包含最大值
                                if idx == len(buckets) - 1:
                                    group_condition = f"{tag_name} >= {low} AND {tag_name} <= {high}"
                                    total_condition = f"{tag_name} >= {low} AND {tag_name} <= {high}"
                                else:
                                    group_condition = f"{tag_name} >= {low} AND {tag_name} < {high}"
                                    total_condition = f"{tag_name} >= {low} AND {tag_name} < {high}"
                        except:
                            # 如果数值转换也失败，跳过此分桶
                            continue
                else:
                    # 分类值条件
                    group_condition = f"{tag_name} = '{bucket}'"
                    total_condition = f"{tag_name} = '{bucket}'"

                # 计算客群中该分层的数量
                group_count_query = f"""
                SELECT count(*) 
                FROM ({group_sql}) 
                WHERE {group_condition}
                """
                group_count = self.execute_query(group_count_query)[0][0]

                # 计算总群体中该分层的数量
                total_count_query = f"""
                SELECT count(*) 
                FROM ({total_sql}) 
                WHERE {total_condition}
                """
                total_count = self.execute_query(total_count_query)[0][0]

                # 计算比例和TGI
                if group_total > 0 and total_total > 0 and total_count > 0:
                    group_ratio = group_count / group_total
                    total_ratio = total_count / total_total
                    tgi = (group_ratio / total_ratio) * 100

                    results.append({
                        'tag_name': tag_name,
                        'bucket': bucket,
                        'tgi': tgi,
                        'group_ratio': group_ratio,
                        'total_ratio': total_ratio,
                        'group_count': group_count,
                        'total_count': total_count
                    })

            except Exception as e:
                logger.warning(f"计算分层 {bucket} 失败: {e}")
                continue

        return results

    def _create_numeric_buckets(self, min_val, q_0125, q_025, q_0375, q_05, q_0625, q_075, q_0875, max_val) -> \
            List[str]:
        """创建数值分桶，处理相邻分位数相同的情况"""
        # 将所有边界值按顺序放入列表
        boundaries = [
            min_val,
            q_0125,
            q_025,
            q_0375,
            q_05,
            q_0625,
            q_075,
            q_0875,
            max_val
        ]

        # 去除相邻重复值
        unique_boundaries = []
        prev = None
        for val in boundaries:
            if prev is None or val != prev:
                unique_boundaries.append(val)
                prev = val

        # 如果去重后只剩1个值（所有值都相同）
        if len(unique_boundaries) == 1:
            # 根据类型选择格式化方式
            if isinstance(unique_boundaries[0], np.datetime64):
                date_str = pd.to_datetime(unique_boundaries[0]).strftime('%Y-%m-%d')
                return [f"{date_str}~{date_str}"]
            else:
                return [f"{unique_boundaries[0]:.2f}~{unique_boundaries[0]:.2f}"]

        # 创建区间
        buckets = []
        for i in range(len(unique_boundaries) - 1):
            lower = unique_boundaries[i]
            upper = unique_boundaries[i + 1]

            # 跳过无效区间（下界>=上界）
            if lower >= upper:
                continue

            # 根据类型选择格式化方式
            if isinstance(lower, np.datetime64) and isinstance(upper, np.datetime64):
                # 日期时间类型
                lower_str = pd.to_datetime(lower).strftime('%Y-%m-%d')
                upper_str = pd.to_datetime(upper).strftime('%Y-%m-%d')
                buckets.append(f"{lower_str}~{upper_str}")
            else:
                # 数值类型
                buckets.append(f"{lower:.2f}~{upper:.2f}")

        return buckets

    def process_numeric_tag(self, tag_name: str, tag_type: str, group_sql: str, total_sql: str) -> List[Dict]:
        """处理数值型和日期型标签 - 8分位分层"""
        logger.info(f"处理数值型标签: {tag_name}")

        # 1. 从总群体获取最大最小值，计算8分位
        quantile_query = f"""
        SELECT 
            min_val,
            quantiles[1] AS q_0125,  -- 0.125 分位数
            quantiles[2] AS q_025,   -- 0.25 分位数
            quantiles[3] AS q_0375,
            quantiles[4] AS q_05,    -- 中位数 (0.5)
            quantiles[5] AS q_0625,
            quantiles[6] AS q_075,
            quantiles[7] AS q_0875,
            max_val,
        FROM (
            SELECT 
                min({tag_name}) AS min_val,
                quantiles(0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875)({tag_name}) AS quantiles,
                max({tag_name}) AS max_val
            FROM ({group_sql})
            WHERE {tag_name} IS NOT NULL
)
        """
        try:
            quantile_result = self.execute_query(quantile_query)
            if not quantile_result:
                return []
        except Exception as e:
            logger.error("异常跳过")
            return []

        min_val, q_0125, q025, q_0375, q_05, q_0625, q_075, q_0875, max_val = quantile_result[0]

        # 构建8分位区间
        buckets = self._create_numeric_buckets(min_val, q_0125, q025, q_0375, q_05, q_0625, q_075, q_0875, max_val)
        logger.info(f" {tag_name} 分桶成功")

        # 2. 计算各分层在客群和总群体的比例
        return self._calculate_bucket_ratios(tag_name, tag_type, buckets, group_sql, total_sql, 'numeric')

    def get_global_top5_tgi(self, all_results: List[Dict]) -> List[Dict]:
        """获取所有标签中TGI最高的5个分层（全局TOP5）"""
        # 按TGI降序排序
        sorted_results = sorted(all_results, key=lambda x: x['tgi'], reverse=True)

        # 取前5个
        top5 = sorted_results[:5]

        logger.info(f"全局TOP5 TGI范围: {top5[0]['tgi']:.2f} - {top5[-1]['tgi']:.2f}")

        return top5

    def parallel_calculate_tgi(self, sql_params: Dict, max_workers: int = 10) -> Dict[str, Any]:
        """并行计算版本，提高计算效率 - 全局TOP5模式"""
        tag_list = self.get_tag_metadata()
        group_sql = sql_params['sql_info']['group_sql']
        total_sql = sql_params['sql_info']['total_sql']

        def process_tag(tag_info):
            tag_name, tag_type_str = tag_info
            tag_type = self._classify_tag_type(tag_type_str)

            try:
                if tag_type in ['数值型', '日期型']:
                    return self.process_numeric_tag(tag_name, tag_type, group_sql, total_sql)
                else:
                    logger.info("暂不处理非数值日期类标签")
                    return []
                    # return self.process_categorical_tag(tag_name, tag_type, group_sql, total_sql)
            except Exception as e:
                logger.warning(f"并行处理标签 {tag_name} 失败: {e}")
                return []

        # 使用线程池并行处理
        with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
            results = list(executor.map(process_tag, tag_list))

        # 合并结果
        flat_results = [item for sublist in results for item in sublist if sublist]
        #
        # 获取全局TOP5
        global_top5 = self.get_global_top5_tgi(flat_results)
        unique_tags = list(set([result['tag_name'] for result in global_top5]))

        return {
            'tag_names': unique_tags,
            'global_top5': global_top5,
            'calculation_time': datetime.now().isoformat(),
            'total_buckets_processed': len(flat_results)
        }

    def calculate_tgi(self, sql_params: Dict) -> Dict[str, Any]:
        """
        主计算函数 - 按照流程图逻辑
        返回所有标签中TGI最高的5个分层（全局TOP5）
        """
        logger.info("开始TGI计算流程 - 全局TOP5模式")

        # 1. 接收参数
        group_sql = sql_params['sql_info']['group_sql']
        total_sql = sql_params['sql_info']['total_sql']

        # 2. 根据宽表获取标签列表
        tag_list = self.get_tag_metadata()
        logger.info(f"获取到 {len(tag_list)} 个标签")

        # 3. 进行标签遍历，进行标签类型判断
        all_results = []

        for tag_info in tag_list:
            tag_name = tag_info[0]
            tag_type = self._classify_tag_type(tag_info[1])

            try:
                # 根据标签类型选择分层策略
                if tag_type in ['数值型', '日期型']:
                    tag_result = self.process_numeric_tag(tag_name, tag_type, group_sql, total_sql)
                else:  # 代码型和标志型
                    tag_result = self.process_categorical_tag(tag_name, tag_type, group_sql, total_sql)

                if tag_result:
                    all_results.extend(tag_result)

            except Exception as e:
                logger.warning(f"标签 {tag_name} 处理失败: {e}")
                continue

        # 4. 对所有标签的所有分层按TGI排序，取出全局前5
        global_top5 = self.get_global_top5_tgi(all_results)

        # 5. 根据前五获取标签名称，去重
        unique_tags = list(set([result['tag_name'] for result in global_top5]))

        logger.info(f"计算完成，共处理 {len(all_results)} 个分层，返回全局TOP5")

        return {
            'tag_names': unique_tags,
            'global_top5': global_top5,
            'calculation_time': datetime.now().isoformat(),
            'total_buckets_processed': len(all_results)
        }

    def process_categorical_tag(self, tag_name: str, tag_type: str, group_sql: str, total_sql: str) -> List[Dict]:
        """处理代码型和标志型标签 - 枚举分层"""
        logger.info(f"处理分类标签: {tag_name}")

        # 获取所有枚举值（限制数量，避免过多）
        distinct_query = f"""
        SELECT DISTINCT {tag_name}
        FROM ({total_sql})
        WHERE {tag_name} IS NOT NULL
        LIMIT 100  -- 限制枚举值数量
        """

        distinct_results = self.execute_query(distinct_query)
        buckets = [str(result[0]) for result in distinct_results]

        if not buckets:
            return []

        # 计算各枚举值在客群和总群体的比例
        return self._calculate_bucket_ratios(tag_name, buckets, group_sql, total_sql, 'categorical')


if __name__ == "__main__":
    t = TGICalculator()
    sql_params = {
        "groupSql": "",
        "totalSql": ""
    }

    results = t.parallel_calculate_tgi(sql_params)
