import subprocess
import logging
import json
import time
from typing import Dict, List, Optional, Tuple
from dataclasses import dataclass, asdict
from pathlib import Path
import os
from concurrent.futures import ThreadPoolExecutor, as_completed
import threading
import re

# 环境变量设置
os.environ['PGGSSENCMODE'] = 'disable'
os.environ['HADOOP_CONF_DIR'] = '/opt/TDP/spark-3.2.2-bin-spark-3.2.2-hadoop-3.3.2/conf'

# 配置 logging
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.StreamHandler()
    ]
)

# 线程锁，用于文件写入
file_lock = threading.Lock()


@dataclass
class SparkConfig:
    """Spark配置类"""
    spark_home: str = "/usr/hdp/3.1.5.0-152/spark3/"
    master: str = "yarn"
    num_executors: int = 4
    executor_cores: int = 2
    executor_memory: str = "5G"
    driver_cores: int = 1
    database: str = "dtsw_pro"
    max_result_size: str = "2g"
    
    def get_spark_command(self, sql: str) -> List[str]:
        """生成Spark SQL命令"""
        return [
            f"{self.spark_home}/bin/spark-sql",
            "--master", self.master,
            "--conf", f"spark.driver.extraJavaOptions=-XX:+UseG1GC",
            "--conf", f"spark.executor.extraJavaOptions=-XX:+UseG1GC",
            "--conf", f"spark.driver.maxResultSize={self.max_result_size}",
            "--num-executors", str(self.num_executors),
            "--executor-cores", str(self.executor_cores),
            "--executor-memory", self.executor_memory,
            "--driver-cores", str(self.driver_cores),
            "--database", self.database,
            "--hiveconf", "hive.cli.print.header=true",
            "-e", sql
        ]


@dataclass
class AnalysisConfig:
    """分析配置类"""
    table_file: str = "/app/zhaoxc/python/ConnectAndExec/nulltablename.txt"
    condition: str = "p_date='2025-07-13'"
    output_file: str = "/app/zhaoxc/python/ConnectAndExec/hive_data_quality_analysis.txt"
    error_output_file: str = "/app/zhaoxc/python/ConnectAndExec/query_errors.txt"
    max_retries: int = 3
    retry_delay: float = 2.0
    batch_size: int = 10  # 批量处理字段数量


class HiveDataAnalyzer:
    """Hive数据质量分析器"""
    
    def __init__(self, spark_config: SparkConfig, analysis_config: AnalysisConfig):
        self.spark_config = spark_config
        self.config = analysis_config
        self.logger = logging.getLogger(__name__)
        self.log_patterns = [
            # 匹配格式: 25/08/21 16:05:57 WARN Utils: ...
            re.compile(r'^\d{2}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} (INFO|WARN|ERROR|DEBUG|TRACE)'),
            # 匹配格式: 2025-08-21T16:06:28,519 INFO [main] ...
            re.compile(r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2},\d{3} (INFO|WARN|ERROR|DEBUG|TRACE)')
        ]
        
    def execute_spark_sql_query(self, sql: str, retries: Optional[int] = None) -> List[Dict]:
        """
        执行Spark SQL查询并返回结果，支持重试机制
        
        Args:
            sql: 要执行的SQL语句
            retries: 重试次数，默认使用配置中的值
        
        Returns:
            查询结果列表，每个元素为字典格式
        """
        if retries is None:
            retries = self.config.max_retries
            
        for attempt in range(retries + 1):
            try:
                self.logger.debug(f"执行Spark SQL查询 (尝试 {attempt + 1}/{retries + 1}): {sql[:100]}...")
                
                spark_command = self.spark_config.get_spark_command(sql)
                result = subprocess.run(
                    spark_command, 
                    capture_output=True, 
                    text=True, 
                    check=True,
                    timeout=300  # 5分钟超时
                )
                
                lines = result.stdout.strip().split('\n')
                if len(lines) < 2:
                    return []
                data = []
                for line in lines[1:]:
                    if line.strip() and not self.is_log_line(line):  # 跳过空行和日志行
                        headers = line.strip().split('\t')
                        data.append(dict(zip(headers, line.strip().split('\t'))))
                return data
                
            except subprocess.TimeoutExpired:
                self.logger.warning(f"查询超时 (尝试 {attempt + 1}/{retries + 1})")
                if attempt < retries:
                    time.sleep(self.config.retry_delay)
                    continue
                else:
                    self._write_error_to_file(sql, "查询超时")
                    return []
                    
            except subprocess.CalledProcessError as e:
                self.logger.error(f"Spark SQL查询失败 (尝试 {attempt + 1}/{retries + 1}): {e.stderr}")
                if attempt < retries:
                    time.sleep(self.config.retry_delay)
                    continue
                else:
                    self._write_error_to_file(sql, e.stderr)
                    return []
                    
            except Exception as e:
                self.logger.error(f"Spark SQL执行过程中发生意外错误: {e}")
                self._write_error_to_file(sql, str(e))
                return []
        
        return []
    
    def _write_error_to_file(self, sql: str, error_msg: str):
        """将错误信息写入文件，线程安全"""
        try:
            with file_lock:
                with open(self.config.error_output_file, "a", encoding="utf-8") as f:
                    f.write(f"-执行SQL出错: {sql}\n错误信息: {error_msg}\n\n")
        except Exception as write_error:
            self.logger.error(f"写入错误信息到文件失败: {write_error}")
    
    def get_table_columns(self, table: str) -> List[str]:
        """获取Hive表的所有字段名"""
        desc_query = f"DESCRIBE {table}"
        hive_desc = self.execute_spark_sql_query(desc_query)
        
        hive_columns = []
        for row in hive_desc:
            raw_line = '\t'.join([str(v) for v in row.values()])
            # 跳过表头、分区信息和元数据行
            if raw_line.strip().startswith('#') or raw_line.strip().startswith('col_name'): 
                continue
            parts = raw_line.strip().split('\t')
            if parts and parts[0].strip(): 
                hive_columns.append(parts[0].strip())
        
        return hive_columns
    
    def get_table_row_count(self, table: str, condition: str) -> int:
        """获取表的行数"""
        count_query = f"SELECT COUNT(*) as total_count FROM {table} WHERE {condition}"
        count_results = self.execute_spark_sql_query(count_query)
        
        if not count_results:
            return 0
        
        return int(count_results[0].get('total_count', 0))
    
    def get_null_ratios_batch(self, table: str, columns: List[str], condition: str, total_rows: int) -> Dict[str, float]:
        """批量获取字段空值率"""
        if not columns or total_rows == 0:
            return {column: 0 for column in columns}
        
        # 构建批量查询
        null_count_queries = [
            f"SUM(CASE WHEN {column} IS NULL THEN 1 ELSE 0 END) AS {column}_null_count"
            for column in columns
        ]
        
        null_query = f"SELECT {', '.join(null_count_queries)} FROM {table} WHERE {condition}"
        null_results = self.execute_spark_sql_query(null_query)
        
        null_ratios = {}
        if null_results:
            for column in columns:
                null_count = int(null_results[0].get(f"{column}_null_count", 0))
                null_ratios[column] = null_count / total_rows
        
        return null_ratios
    
    def get_column_stats_batch(self, table: str, columns: List[str], condition: str) -> Dict[str, Dict]:
        """批量获取字段统计信息"""
        if not columns:
            return {}
        
        # 构建批量查询
        stats_queries = []
        for column in columns:
            stats_queries.extend([
                f"MIN({column}) AS {column}_min",
                f"MAX({column}) AS {column}_max",
                f"COUNT(DISTINCT {column}) AS {column}_distinct_count"
            ])
        
        stats_query = f"SELECT {', '.join(stats_queries)} FROM {table} WHERE {condition}"
        stats_results = self.execute_spark_sql_query(stats_query)
        
        column_stats = {}
        if stats_results:
            for column in columns:
                column_stats[column] = {
                    'min': stats_results[0].get(f"{column}_min"),
                    'max': stats_results[0].get(f"{column}_max"),
                    'distinct_count': int(stats_results[0].get(f"{column}_distinct_count", 0))
                }
        
        return column_stats
    

    
    def analyze_table_data_quality(self, table: str, condition: Optional[str] = None) -> Dict:
        """分析表的数据质量"""
        if condition is None:
            condition = self.config.condition
            
        result = {
            'table': table,
            'condition': condition,
            'total_rows': 0,
            'columns': [],
            'null_ratios': {},
            'column_stats': {},
            'high_null_columns': [],
            'quality_score': 0.0,
            'analysis_time': time.time()
        }
        
        try:
            # 获取表字段
            columns = self.get_table_columns(table)
            if not columns:
                self.logger.warning(f"表 {table} 没有找到有效字段")
                return result
            
            result['columns'] = columns
            
            # 获取总行数
            total_rows = self.get_table_row_count(table, condition)
            result['total_rows'] = total_rows
            
            if total_rows == 0:
                self.logger.info(f"表 {table} 在条件 {condition} 下没有数据")
                return result
            
            # 批量获取空值率
            null_ratios = self.get_null_ratios_batch(table, columns, condition, total_rows)
            result['null_ratios'] = null_ratios
            
            # 批量获取字段统计信息
            column_stats = self.get_column_stats_batch(table, columns, condition)
            result['column_stats'] = column_stats
            

            
            # 识别高空值率字段（空值率 > 50%）
            high_null_columns = [col for col, ratio in null_ratios.items() if ratio > 0.5]
            result['high_null_columns'] = high_null_columns
            
            # 计算数据质量分数（基于空值率）
            if null_ratios:
                avg_null_ratio = sum(null_ratios.values()) / len(null_ratios)
                result['quality_score'] = max(0, 1 - avg_null_ratio) * 100
            
            result['analysis_time'] = time.time() - result['analysis_time']
            return result
            
        except Exception as e:
            self.logger.error(f"分析表 {table} 数据质量时发生错误: {e}")
            self._write_error_to_file(f"分析表 {table} 数据质量", str(e))
            return result
    
    def analyze_multiple_tables(self, tables: List[str], max_workers: int = 3) -> List[Dict]:
        """并行分析多个表"""
        all_results = []
        
        with ThreadPoolExecutor(max_workers=max_workers) as executor:
            # 提交所有任务
            future_to_table = {
                executor.submit(self.analyze_table_data_quality, table): table 
                for table in tables
            }
            
            # 收集结果
            for future in as_completed(future_to_table):
                table = future_to_table[future]
                try:
                    result = future.result()
                    all_results.append(result)
                    
                    # 打印分析结果
                    self.logger.info(f"表 {table} 分析完成:")
                    self.logger.info(f"  总行数: {result['total_rows']:,}")
                    self.logger.info(f"  字段数量: {len(result['columns'])}")
                    self.logger.info(f"  数据质量分数: {result['quality_score']:.2f}%")
                    self.logger.info(f"  分析耗时: {result.get('analysis_time', 0):.2f}秒")
                    
                    if result['high_null_columns']:
                        self.logger.warning(f"  高空值率字段: {', '.join(result['high_null_columns'])}")
                    
                except Exception as e:
                    self.logger.error(f"分析表 {table} 时发生错误: {e}")
        
        return all_results
    
    def write_analysis_results(self, results: List[Dict], output_file: Optional[str] = None):
        """将分析结果写入文件"""
        if output_file is None:
            output_file = self.config.output_file
            
        try:
            with open(output_file, "w", encoding="utf-8") as f:
                f.write("=== Hive表数据质量分析报告 ===\n\n")
                
                # 写入JSON格式的完整结果（便于程序处理）
                json_file = output_file.replace('.txt', '.json')
                with open(json_file, "w", encoding="utf-8") as json_f:
                    json.dump(results, json_f, ensure_ascii=False, indent=2)
                
                for result in results:
                    f.write(f"表名: {result['table']}\n")
                    f.write(f"查询条件: {result['condition']}\n")
                    f.write(f"总行数: {result['total_rows']:,}\n")
                    f.write(f"字段数量: {len(result['columns'])}\n")
                    f.write(f"数据质量分数: {result['quality_score']:.2f}%\n")
                    f.write(f"分析耗时: {result.get('analysis_time', 0):.2f}秒\n")
                    
                    if result['high_null_columns']:
                        f.write(f"高空值率字段 (>50%): {', '.join(result['high_null_columns'])}\n")
                    
                    f.write("\n字段空值率详情:\n")
                    for column, ratio in result['null_ratios'].items():
                        f.write(f"  {column}: {ratio * 100:.2f}%\n")
                    
                    f.write("\n字段统计信息详情:\n")
                    for column, stats in result['column_stats'].items():
                        f.write(f"  {column}:\n")
                        f.write(f"    最小值: {stats['min']}\n")
                        f.write(f"    最大值: {stats['max']}\n")
                        f.write(f"    唯一值数量: {stats['distinct_count']:,}\n")
                    

                    
                    f.write("\n" + "="*50 + "\n\n")
                    
        except Exception as e:
            self.logger.error(f"写入分析结果到文件时出错: {e}")


def main():
    """主函数"""
    # 配置
    spark_config = SparkConfig()
    analysis_config = AnalysisConfig()
    
    # 创建分析器
    analyzer = HiveDataAnalyzer(spark_config, analysis_config)
    
    try:
        # 从文件中读取表名
        table_file = Path(analysis_config.table_file)
        if not table_file.exists():
            analyzer.logger.error(f"表名文件不存在: {table_file}")
            return
        
        with open(table_file, "r", encoding="utf-8") as f:
            table_names = [line.strip() for line in f.readlines() if line.strip()]
        
        if not table_names:
            analyzer.logger.warning("没有找到要分析的表名")
            return
        
        analyzer.logger.info(f"开始分析 {len(table_names)} 个表的数据质量...")
        
        # 分析所有表的数据质量
        all_results = analyzer.analyze_multiple_tables(table_names, max_workers=3)
        
        # 将分析结果写入文件
        analyzer.write_analysis_results(all_results)
        analyzer.logger.info(f"分析完成，结果已写入 {analysis_config.output_file}")
        
        # 输出汇总信息
        total_tables = len(all_results)
        avg_quality_score = sum(r['quality_score'] for r in all_results) / total_tables if total_tables > 0 else 0
        total_rows = sum(r['total_rows'] for r in all_results)
        
        analyzer.logger.info(f"\n=== 分析汇总 ===")
        analyzer.logger.info(f"分析表数量: {total_tables}")
        analyzer.logger.info(f"总数据行数: {total_rows:,}")
        analyzer.logger.info(f"平均数据质量分数: {avg_quality_score:.2f}%")
        
    except Exception as e:
        analyzer.logger.error(f"程序运行出错: {e}")


if __name__ == "__main__":
    main() 