#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
从 Hive 数据库过滤数据
将 Hive 表中的数据转换为 hive_filter_data 中的目标 TXT 文件
生成 user.txt、call.txt、tv.txt（与 filter_data.py 输出格式相同）
"""

import os
import subprocess
import tempfile
from typing import Dict, List, Tuple

# ============================================
# 配置参数
# ============================================

class HiveFilterConfig:
    """Hive 过滤配置"""
    BASE_OUTPUT_DIR = "./hive_filter_data"  # 输出基础目录
    USER_COUNT = 100                      # 用户数量（用于子目录名）

    # Hive 连接配置
    USE_DOCKER = True                     # 是否使用 Docker 容器中的 beeline
    DOCKER_CONTAINER = "hive_cmcc"        # Hive Docker 容器名称（或容器 ID）
    HIVE_URL = "jdbc:hive2://localhost:10000"  # Hive JDBC 连接 URL
    HIVE_DATABASE = "antifraud_test"      # 数据库名

    # 输出文件名
    OUTPUT_USER_FILE = "user.txt"
    OUTPUT_CALL_FILE = "call.txt"
    OUTPUT_TV_FILE = "tv.txt"

    # 输出格式配置（与 filter_data.py 保持一致）
    OUTPUT_DELIMITER = '€€'         # 分隔符
    OUTPUT_WITH_HEADER = False      # 是否输出表头

    @property
    def OUTPUT_DIR(self):
        """输出目录（包含用户数量子目录）"""
        return os.path.join(self.BASE_OUTPUT_DIR, str(self.USER_COUNT))


# ============================================
# 工具函数
# ============================================

def safe_get(row: List, index: int, default: str = '') -> str:
    """
    安全获取列表值

    Args:
        row: 数据行列表
        index: 索引
        default: 默认值

    Returns:
        值或默认值
    """
    try:
        value = row[index]
        return str(value).strip() if value is not None else default
    except (IndexError, ValueError):
        return default


def execute_hive_query(sql: str, config: HiveFilterConfig) -> List[List[str]]:
    """
    执行 Hive SQL 查询并返回结果

    支持两种模式：
    1. USE_DOCKER=True: 使用 Docker 容器中的 beeline
    2. USE_DOCKER=False: 使用宿主机的 beeline

    Args:
        sql: SQL 查询语句
        config: 配置对象

    Returns:
        查询结果列表
    """
    # 使用临时文件保存 SQL
    with tempfile.NamedTemporaryFile(mode='w', suffix='.sql', delete=False) as f:
        f.write(f"USE {config.HIVE_DATABASE};\n")
        f.write(sql)
        sql_file = f.name

    try:
        if config.USE_DOCKER:
            # Docker 模式：使用 -e 参数执行 SQL（避免文件权限问题）
            # 构建完整的 SQL（包含 USE DATABASE）
            full_sql = f"USE {config.HIVE_DATABASE}; {sql}"

            # 在容器中执行 beeline，使用 -e 参数
            cmd = [
                'docker', 'exec', config.DOCKER_CONTAINER,
                'beeline',
                '-u', config.HIVE_URL,
                '--silent=true',
                '--outputformat=csv2',
                '--showHeader=false',
                '-e', full_sql
            ]

            # 执行查询
            result = subprocess.run(
                cmd,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                universal_newlines=True,
                timeout=300  # 5分钟超时
            )

        else:
            # 本地模式：直接使用宿主机的 beeline
            cmd = [
                'beeline',
                '-u', config.HIVE_URL,
                '--silent=true',
                '--outputformat=csv2',
                '--showHeader=false',
                '-f', sql_file
            ]

            result = subprocess.run(
                cmd,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                universal_newlines=True,
                timeout=300
            )

        if result.returncode != 0:
            print(f"错误: Hive 查询失败")
            print(f"错误信息: {result.stderr}")
            return []

        # 解析 CSV 输出
        lines = result.stdout.strip().split('\n')
        data = []
        for line in lines:
            line = line.strip()
            # 跳过空行和各种日志行
            if not line:
                continue
            # 跳过 Beeline 日志和提示符
            if any([
                line.startswith('INFO'),
                line.startswith('WARN'),
                line.startswith('SLF4J'),
                line.startswith('0: jdbc:'),
                line.startswith('Connecting to'),
                line.startswith('Connected to'),
                line.startswith('[WARN]'),
                line.startswith('[INFO]'),
                'No such file or directory' in line,
                'Unable to load native-hadoop' in line,
                'Class path contains multiple' in line
            ]):
                continue
            # 解析 CSV 行
            row = line.split(',')
            # 跳过无效行（如空列表）
            if row and any(cell.strip() for cell in row):
                data.append(row)

        return data

    except subprocess.TimeoutExpired:
        print(f"错误: 查询超时（超过 5 分钟）")
        return []
    except subprocess.CalledProcessError as e:
        print(f"错误: Docker 命令执行失败 - {e}")
        return []
    except Exception as e:
        print(f"错误: {e}")
        return []
    finally:
        # 删除本地临时文件
        if os.path.exists(sql_file):
            os.unlink(sql_file)


# ============================================
# 数据过滤类
# ============================================

class HiveDataFilter:
    """从 Hive 过滤数据"""

    def __init__(self, config: HiveFilterConfig):
        if isinstance(config, type):
            self.config = config()
        else:
            self.config = config

        # 用于存储中间数据
        self.flux_data: Dict[str, List] = {}  # MSISDN -> 流量数据
        self.package_data: Dict[str, List] = {}  # MSISDN -> 套餐数据

    def run(self):
        """运行数据过滤流程"""
        # 创建输出目录
        os.makedirs(self.config.OUTPUT_DIR, exist_ok=True)

        # 检查 Hive 连接
        if not self._check_hive_connection():
            return

        # 加载辅助数据（流量和套餐）
        print("步骤1: 从 Hive 加载辅助数据...")
        self._load_flux_data()
        self._load_package_data()

        # 生成 user.txt
        print("步骤2: 生成 user.txt...")
        self._generate_user_file()

        # 生成 call.txt
        print("步骤3: 生成 call.txt...")
        self._generate_call_file()

        # 生成 tv.txt
        print("步骤4: 生成 tv.txt...")
        self._generate_tv_file()

        print()
        print("=" * 60)
        print("数据过滤完成！")
        print(f"输出目录: {self.config.OUTPUT_DIR}")
        print("=" * 60)

    def _check_hive_connection(self) -> bool:
        """检查 Hive 连接是否可用"""
        print("检查 Hive 连接...")
        sql = "SHOW TABLES;"
        result = execute_hive_query(sql, self.config)

        if not result:
            print(f"错误: 无法连接到 Hive")
            print(f"请确保：")
            if self.config.USE_DOCKER:
                print(f"  1. Docker 容器 '{self.config.DOCKER_CONTAINER}' 正在运行")
                print(f"     检查: docker ps | grep {self.config.DOCKER_CONTAINER}")
                print(f"  2. HiveServer2 在容器中运行")
                print(f"  3. 数据库 {self.config.HIVE_DATABASE} 已创建")
                print(f"  4. 外部表已创建")
            else:
                print(f"  1. HiveServer2 正在运行 ({self.config.HIVE_URL})")
                print(f"  2. 数据库 {self.config.HIVE_DATABASE} 已创建")
                print(f"  3. 外部表已创建")
            return False

        print(f"  ✓ Hive 连接正常，找到 {len(result)} 个表")
        return True

    def _load_flux_data(self):
        """加载流量数据到内存"""
        sql = """
        SELECT
            MSISDN,
            ONNET_ALL_FLUX,
            WDAY_ONNET_FLUX,
            NWDAY_ONNET_FLUX,
            ONNET_FLUX_3G,
            ONNET_FLUX_4G,
            TOT_FLUX_5G,
            FLUX_FE,
            FLUX_FEE_4G,
            FLUX_TOT_FEE_5G
        FROM tv_m_cust_single_flux_used;
        """

        data = execute_hive_query(sql, self.config)

        for row in data:
            if len(row) >= 10:
                msisdn = safe_get(row, 0)
                if msisdn:
                    self.flux_data[msisdn] = row

        print(f"  ✓ 加载流量数据: {len(self.flux_data):,} 条")

    def _load_package_data(self):
        """加载套餐数据到内存（每个用户取第一个套餐）"""
        sql = """
        SELECT
            MSISDN,
            CHARGE_PACKAGE_UNIFY_CODE,
            CHARGE_PACKAGE_TYP,
            PACK_MON,
            PACKAGE_5G_ID
        FROM tw_d_is_pack_users_new_used;
        """

        data = execute_hive_query(sql, self.config)

        for row in data:
            if len(row) >= 5:
                msisdn = safe_get(row, 0)
                # 只保存每个用户的第一个套餐
                if msisdn and msisdn not in self.package_data:
                    self.package_data[msisdn] = row

        print(f"  ✓ 加载套餐数据: {len(self.package_data):,} 条")

    def _generate_user_file(self):
        """
        生成 user.txt 文件

        字段顺序：
        MSISDN, USER_ID, NEW_RCN_ID, RCN_DURA, IDTY_NBR, STATIS_YMD, PROV_ID
        """
        output_path = os.path.join(self.config.OUTPUT_DIR, self.config.OUTPUT_USER_FILE)

        sql = """
        SELECT
            MSISDN,
            USER_ID,
            NEW_RCN_ID,
            RCN_DURA,
            IDTY_CODE,
            STATIS_YMD,
            PROV_ID
        FROM ty_m_unreal_person_user_number_data_filter;
        """

        data = execute_hive_query(sql, self.config)
        row_count = 0

        with open(output_path, 'w', encoding='utf-8') as outfile:
            # 写入表头（如果配置需要）
            if self.config.OUTPUT_WITH_HEADER:
                headers = ['MSISDN', 'USER_ID', 'NEW_RCN_ID', 'RCN_DURA', 'IDTY_NBR', 'STATIS_YMD', 'PROV_ID']
                outfile.write(self.config.OUTPUT_DELIMITER.join(headers) + '\n')

            # 处理数据
            for row in data:
                if len(row) >= 7:
                    msisdn = safe_get(row, 0)
                    user_id = safe_get(row, 1)
                    new_rcn_id = safe_get(row, 2)
                    rcn_dura = safe_get(row, 3)
                    idty_code = safe_get(row, 4)  # IDTY_CODE 映射为 IDTY_NBR
                    statis_ymd = safe_get(row, 5)
                    prov_id = safe_get(row, 6)

                    output_row = [
                        msisdn,
                        user_id,
                        new_rcn_id,
                        rcn_dura,
                        idty_code,
                        statis_ymd,
                        prov_id
                    ]

                    outfile.write(self.config.OUTPUT_DELIMITER.join(output_row) + '\n')
                    row_count += 1

        print(f"  ✓ 生成 {self.config.OUTPUT_USER_FILE}: {row_count:,} 条记录")

    def _generate_call_file(self):
        """
        生成 call.txt 文件

        字段顺序：
        MSISDN, OPP_MSISDN, STATIS_YMD, PROV_ID
        """
        output_path = os.path.join(self.config.OUTPUT_DIR, self.config.OUTPUT_CALL_FILE)

        sql = """
        SELECT
            MSISDN,
            OPP_MSISDN,
            STATIS_YMD,
            PROV_ID
        FROM ty_m_unreal_person_call_data_filter;
        """

        data = execute_hive_query(sql, self.config)
        row_count = 0

        with open(output_path, 'w', encoding='utf-8') as outfile:
            # 写入表头（如果配置需要）
            if self.config.OUTPUT_WITH_HEADER:
                headers = ['MSISDN', 'OPP_MSISDN', 'STATIS_YMD', 'PROV_ID']
                outfile.write(self.config.OUTPUT_DELIMITER.join(headers) + '\n')

            # 处理数据（去重）
            seen_calls = set()

            for row in data:
                if len(row) >= 4:
                    msisdn = safe_get(row, 0)
                    opp_msisdn = safe_get(row, 1)
                    statis_ymd = safe_get(row, 2)
                    prov_id = safe_get(row, 3)

                    # 去重：同一天的同一对通话关系只保留一次
                    call_key = (msisdn, opp_msisdn, statis_ymd)
                    if call_key in seen_calls:
                        continue

                    seen_calls.add(call_key)

                    output_row = [msisdn, opp_msisdn, statis_ymd, prov_id]
                    outfile.write(self.config.OUTPUT_DELIMITER.join(output_row) + '\n')
                    row_count += 1

        print(f"  ✓ 生成 {self.config.OUTPUT_CALL_FILE}: {row_count:,} 条记录（已去重）")

    def _generate_tv_file(self):
        """
        生成 tv.txt 文件

        字段顺序（34个字段）：
        USER_ID, MSISDN,
        IS_PRETTY_NUM, PRETTY_NUM_TYP, VIP_CUST_ID, VIP_LVL, AGE_LVL, SEX,
        OCPN_CODE, EDUCAT_DEGREE_CODE, BRAND_ID, RCN_CHNL_TYP,
        IS_CAMP_USER, IS_CAMP_AREA_USER, IS_GROUP_USER, MEMB_TYP, IS_GROUP_KEY_INDV,
        IS_GSM_USER, GSM_USER_LVL, INNET_DURA_LVL_CODE, USER_AREA_BELO,
        ONNET_ALL_FLUX, WDAY_ONNET_FLUX, NWDAY_ONNET_FLUX,
        ONNET_FLUX_3G, ONNET_FLUX_4G, TOT_FLUX_5G,
        FLUX_FEE, FLUX_FEE_4G, FLUX_TOT_FEE_5G,
        CHARGE_PACKAGE_UNIFY_CODE, CHARGE_PACKAGE_TYP, PACK_MON, PACKAGE_5G_ID
        """
        output_path = os.path.join(self.config.OUTPUT_DIR, self.config.OUTPUT_TV_FILE)

        sql = """
        SELECT
            USER_ID,
            MSISDN,
            IS_PRETTY_NUM,
            PRETTY_NUM_TYP,
            VIP_CUST_ID,
            VIP_LVL,
            AGE_LVL,
            SEX,
            OCPN_CODE,
            EDUCAT_DEGREE_CODE,
            BRAND_ID,
            RCN_CHNL_TYP,
            IS_CAMP_USER,
            IS_CAMP_AREA_USER,
            IS_GROUP_USER,
            MEMB_TYP,
            IS_GROUP_KEY_INDV,
            IS_GSM_USER,
            GSM_USER_LVL,
            INNET_DURA_LVL_CODE,
            USER_AREA_BELO
        FROM ty_m_unreal_person_user_number_data_filter;
        """

        data = execute_hive_query(sql, self.config)
        row_count = 0

        with open(output_path, 'w', encoding='utf-8') as outfile:
            # 写入表头（如果配置需要）
            if self.config.OUTPUT_WITH_HEADER:
                headers = [
                    'USER_ID', 'MSISDN',
                    'IS_PRETTY_NUM', 'PRETTY_NUM_TYP', 'VIP_CUST_ID', 'VIP_LVL',
                    'AGE_LVL', 'SEX', 'OCPN_CODE', 'EDUCAT_DEGREE_CODE',
                    'BRAND_ID', 'RCN_CHNL_TYP', 'IS_CAMP_USER', 'IS_CAMP_AREA_USER',
                    'IS_GROUP_USER', 'MEMB_TYP', 'IS_GROUP_KEY_INDV',
                    'IS_GSM_USER', 'GSM_USER_LVL', 'INNET_DURA_LVL_CODE', 'USER_AREA_BELO',
                    'ONNET_ALL_FLUX', 'WDAY_ONNET_FLUX', 'NWDAY_ONNET_FLUX',
                    'ONNET_FLUX_3G', 'ONNET_FLUX_4G', 'TOT_FLUX_5G',
                    'FLUX_FEE', 'FLUX_FEE_4G', 'FLUX_TOT_FEE_5G',
                    'CHARGE_PACKAGE_UNIFY_CODE', 'CHARGE_PACKAGE_TYP', 'PACK_MON', 'PACKAGE_5G_ID'
                ]
                outfile.write(self.config.OUTPUT_DELIMITER.join(headers) + '\n')

            # 处理数据
            for row in data:
                if len(row) >= 21:
                    msisdn = safe_get(row, 1)

                    # 从用户基础信息表获取字段（前21个字段）
                    user_id = safe_get(row, 0)
                    is_pretty_num = safe_get(row, 2)
                    pretty_num_typ = safe_get(row, 3)
                    vip_cust_id = safe_get(row, 4)
                    vip_lvl = safe_get(row, 5)
                    age_lvl = safe_get(row, 6)
                    sex = safe_get(row, 7)
                    ocpn_code = safe_get(row, 8)
                    educat_degree_code = safe_get(row, 9)
                    brand_id = safe_get(row, 10)
                    rcn_chnl_typ = safe_get(row, 11)
                    is_camp_user = safe_get(row, 12)
                    is_camp_area_user = safe_get(row, 13)
                    is_group_user = safe_get(row, 14)
                    memb_typ = safe_get(row, 15)
                    is_group_key_indv = safe_get(row, 16)
                    is_gsm_user = safe_get(row, 17)
                    gsm_user_lvl = safe_get(row, 18)
                    innet_dura_lvl_code = safe_get(row, 19)
                    user_area_belo = safe_get(row, 20)

                    # 从流量信息表获取字段
                    flux_row = self.flux_data.get(msisdn, [])
                    onnet_all_flux = safe_get(flux_row, 1, '0')
                    wday_onnet_flux = safe_get(flux_row, 2, '0')
                    nwday_onnet_flux = safe_get(flux_row, 3, '0')
                    onnet_flux_3g = safe_get(flux_row, 4, '0')
                    onnet_flux_4g = safe_get(flux_row, 5, '0')
                    tot_flux_5g = safe_get(flux_row, 6, '0')
                    flux_fee = safe_get(flux_row, 7, '0')
                    flux_fee_4g = safe_get(flux_row, 8, '0')
                    flux_tot_fee_5g = safe_get(flux_row, 9, '0')

                    # 从套餐信息表获取字段
                    package_row = self.package_data.get(msisdn, [])
                    charge_package_unify_code = safe_get(package_row, 1, '')
                    charge_package_typ = safe_get(package_row, 2, '')
                    pack_mon = safe_get(package_row, 3, '')
                    package_5g_id = safe_get(package_row, 4, '')

                    # 写入数据行
                    output_row = [
                        user_id, msisdn,
                        is_pretty_num, pretty_num_typ, vip_cust_id, vip_lvl,
                        age_lvl, sex, ocpn_code, educat_degree_code,
                        brand_id, rcn_chnl_typ, is_camp_user, is_camp_area_user,
                        is_group_user, memb_typ, is_group_key_indv,
                        is_gsm_user, gsm_user_lvl, innet_dura_lvl_code, user_area_belo,
                        onnet_all_flux, wday_onnet_flux, nwday_onnet_flux,
                        onnet_flux_3g, onnet_flux_4g, tot_flux_5g,
                        flux_fee, flux_fee_4g, flux_tot_fee_5g,
                        charge_package_unify_code, charge_package_typ, pack_mon, package_5g_id
                    ]

                    outfile.write(self.config.OUTPUT_DELIMITER.join(output_row) + '\n')
                    row_count += 1

        print(f"  ✓ 生成 {self.config.OUTPUT_TV_FILE}: {row_count:,} 条记录")


# ============================================
# 主函数
# ============================================

def main():
    """主函数"""
    print("=" * 60)
    print("从 Hive 过滤数据")
    print("=" * 60)
    print()

    filter_tool = HiveDataFilter(HiveFilterConfig)
    filter_tool.run()


if __name__ == "__main__":
    main()
