#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
精简版模拟数据生成脚本 - 为GaussDB表生成测试数据

由于表字段过多导致行大小超限，此版本只插入核心字段

功能:
1. 解析DDL文件获取表结构
2. 只选择核心字段生成模拟数据
3. 连接GaussDB数据库并插入数据
4. 支持批量插入以提高性能

作者: AI Assistant
创建时间: 2024
"""

import psycopg2
import random
import string
import re
from datetime import datetime, timedelta
from typing import List, Dict, Tuple, Any
import logging
from decimal import Decimal

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('mock_data_generation_minimal.log', encoding='utf-8'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

class MinimalMockDataGenerator:
    def __init__(self, ddl_file_path: str, db_config: Dict[str, str]):
        self.ddl_file_path = ddl_file_path
        self.db_config = db_config
        self.table_name = None
        self.all_columns = []
        self.selected_columns = []
        self.connection = None
        
        # 预定义的模拟数据池
        self.cities = ['北京', '上海', '广州', '深圳', '杭州']
        self.genders = ['M', 'F']
        self.yes_no_values = ['Y', 'N']
        
        # 定义要插入的核心字段（避免行大小超限）
        self.core_fields = [
            'cdhd_usr_id', 'age', 'birth_city', 'gender', 'birth_date',
            'abrd_stdy_cump_prefe_scre', 'abrd_stud', 'acct_opn_time',
            'age_grp', 'alpay_bind_card_cust_acct_cur_dep_mth_day_aver_bal',
            'alpay_bind_card_cust_acct_cur_dep_mth_end_bal', 'alpay_bind_card_cust_acct_term_dep_mth_day_aver_bal',
            'alpay_bind_card_cust_acct_term_dep_mth_end_bal', 'alpay_bind_card_cust_acct_tot_aum_mth_day_aver',
            'alpay_bind_card_cust_acct_tot_aum_mth_end', 'alpay_bind_card_cust_acct_tot_dep_mth_day_aver_bal',
            'alpay_bind_card_cust_acct_tot_dep_mth_end_bal', 'alpay_bind_card_cust_acct_tot_fncg_prod_mkt_vlu_mth_day_aver',
            'alpay_bind_card_cust_acct_tot_fncg_prod_mkt_vlu_mth_end', 'alpay_bind_card_cust_acct_tot_fud_mkt_vlu_mth_day_aver'
        ]
        
    def parse_ddl_file(self) -> None:
        """解析DDL文件获取表结构信息"""
        logger.info(f"开始解析DDL文件: {self.ddl_file_path}")
        
        try:
            with open(self.ddl_file_path, 'r', encoding='utf-8') as file:
                content = file.read()
            
            # 提取表名
            table_match = re.search(r'CREATE TABLE\s+([\w\.]+)\s*\(', content, re.IGNORECASE)
            if table_match:
                self.table_name = table_match.group(1)
                logger.info(f"找到表名: {self.table_name}")
            else:
                raise ValueError("无法从DDL文件中提取表名")
            
            # 提取字段定义
            column_pattern = r'\s*([\w"]+)\s+(\w+(?:\([^)]+\))?(?:\s+\w+)*)(?:\s+NULL|\s+NOT\s+NULL)?'
            
            lines = content.split('\n')
            in_table_definition = False
            
            for line in lines:
                line = line.strip()
                
                # 跳过CREATE TABLE行
                if 'CREATE TABLE' in line.upper():
                    in_table_definition = True
                    continue
                
                # 遇到WITH或)结束表定义
                if in_table_definition and ('WITH (' in line.upper() or line.startswith(')')):
                    break
                
                # 解析字段定义
                if in_table_definition and line and not line.startswith('--'):
                    # 移除末尾的逗号
                    line = line.rstrip(',')
                    
                    # 匹配字段定义
                    match = re.match(column_pattern, line)
                    if match:
                        column_name = match.group(1).strip('"')
                        column_type = match.group(2).strip()
                        
                        # 解析数据类型
                        data_type = self._parse_data_type(column_type)
                        
                        column_info = {
                            'name': column_name,
                            'type': data_type['type'],
                            'length': data_type.get('length'),
                            'precision': data_type.get('precision'),
                            'scale': data_type.get('scale')
                        }
                        
                        self.all_columns.append(column_info)
                        
                        # 如果是核心字段，添加到选中列表
                        if column_name.lower() in [f.lower() for f in self.core_fields]:
                            self.selected_columns.append(column_info)
            
            logger.info(f"成功解析 {len(self.all_columns)} 个字段")
            logger.info(f"选择了 {len(self.selected_columns)} 个核心字段进行插入")
            
        except Exception as e:
            logger.error(f"解析DDL文件失败: {str(e)}")
            raise
    
    def _parse_data_type(self, type_str: str) -> Dict[str, Any]:
        """解析数据类型字符串"""
        type_str = type_str.lower().strip()
        
        # 移除NULL关键字，只保留数据类型部分
        type_str = re.sub(r'\s+null\s*$', '', type_str)
        type_str = re.sub(r'\s+not\s+null\s*$', '', type_str)
        type_str = type_str.strip()
        
        # 处理varchar类型
        if 'varchar' in type_str:
            match = re.search(r'varchar\((\d+)\)', type_str)
            length = int(match.group(1)) if match else 500
            return {'type': 'varchar', 'length': length}
        
        # 处理数值类型
        elif type_str in ['int4', 'integer']:
            return {'type': 'int4'}
        elif type_str in ['int8', 'bigint'] or type_str.startswith('int8'):
            return {'type': 'int8'}
        elif 'decimal' in type_str or 'numeric' in type_str:
            match = re.search(r'(?:decimal|numeric)\((\d+)(?:,(\d+))?\)', type_str)
            if match:
                precision = int(match.group(1))
                scale = int(match.group(2)) if match.group(2) else 0
                return {'type': 'decimal', 'precision': precision, 'scale': scale}
            return {'type': 'decimal', 'precision': 10, 'scale': 2}
        
        # 处理日期时间类型
        elif 'timestamp' in type_str:
            return {'type': 'timestamp'}
        elif 'date' in type_str:
            return {'type': 'date'}
        
        # 默认处理为varchar
        else:
            return {'type': 'varchar', 'length': 500}
    
    def generate_mock_value(self, column: Dict[str, Any]) -> Any:
        """为指定字段生成模拟数据"""
        column_name = column['name'].lower()
        column_type = column['type']
        
        # 优先根据数据类型确保类型正确性
        if column_type in ['int4', 'int8']:
            # 数值类型字段的特殊处理
            if 'age' in column_name:
                return random.randint(18, 80)
            elif 'amt' in column_name or 'amount' in column_name or 'bal' in column_name:
                return random.randint(0, 1000000)
            elif 'scre' in column_name or 'score' in column_name:
                return random.randint(300, 850)
            else:
                return random.randint(0, 100000)
        
        elif column_type == 'varchar':
            # 字符串类型字段的特殊处理
            if 'id' in column_name and column_name.endswith('_id'):
                return self._generate_id()
            elif 'gender' in column_name:
                return random.choice(self.genders)
            elif 'city' in column_name:
                return random.choice(self.cities)
            elif 'name' in column_name:
                return self._generate_name()
            elif column_name.startswith('yn_') or '_yn_' in column_name or column_name.endswith('_ind'):
                return random.choice(self.yes_no_values)
            elif 'date' in column_name or 'time' in column_name:
                return self._generate_date_string()
            elif 'grp' in column_name:
                return random.choice(['A', 'B', 'C'])
            else:
                return self._generate_random_string(5)
        
        elif column_type == 'decimal':
            precision = column.get('precision', 10)
            scale = column.get('scale', 2)
            max_val = 10 ** (precision - scale)
            return round(random.uniform(0, max_val), scale)
        elif column_type == 'timestamp':
            return self._generate_timestamp()
        elif column_type == 'date':
            return self._generate_date()
        else:
            return None
    
    def _generate_id(self) -> str:
        """生成ID"""
        return ''.join(random.choices(string.ascii_uppercase + string.digits, k=8))
    
    def _generate_name(self) -> str:
        """生成姓名"""
        names = ['张三', '李四', '王五', '赵六', '钱七']
        return random.choice(names)
    
    def _generate_date_string(self) -> str:
        """生成日期字符串"""
        start_date = datetime(2020, 1, 1)
        end_date = datetime(2024, 12, 31)
        random_date = start_date + timedelta(
            days=random.randint(0, (end_date - start_date).days)
        )
        return random_date.strftime('%Y-%m-%d')
    
    def _generate_date(self) -> datetime:
        """生成日期对象"""
        start_date = datetime(2020, 1, 1)
        end_date = datetime(2024, 12, 31)
        return start_date + timedelta(
            days=random.randint(0, (end_date - start_date).days)
        )
    
    def _generate_timestamp(self) -> datetime:
        """生成时间戳"""
        start_date = datetime(2020, 1, 1)
        end_date = datetime(2024, 12, 31)
        random_date = start_date + timedelta(
            days=random.randint(0, (end_date - start_date).days),
            hours=random.randint(0, 23),
            minutes=random.randint(0, 59),
            seconds=random.randint(0, 59)
        )
        return random_date
    
    def _generate_random_string(self, length: int) -> str:
        """生成随机字符串"""
        if length <= 0:
            return ''
        chars = string.ascii_letters + string.digits
        return ''.join(random.choices(chars, k=min(length, 10)))
    
    def generate_batch_data(self, batch_size: int = 2000) -> List[Tuple]:
        """生成批量模拟数据"""
        logger.info(f"开始生成 {batch_size} 条模拟数据")
        
        batch_data = []
        for i in range(batch_size):
            row_data = []
            for column in self.selected_columns:
                value = self.generate_mock_value(column)
                row_data.append(value)
            batch_data.append(tuple(row_data))
            
            if (i + 1) % 500 == 0:
                logger.info(f"已生成 {i + 1} 条数据")
        
        logger.info(f"成功生成 {len(batch_data)} 条模拟数据")
        return batch_data
    
    def connect_database(self) -> None:
        """连接数据库"""
        try:
            self.connection = psycopg2.connect(
                host=self.db_config['host'],
                port=self.db_config['port'],
                database=self.db_config['database'],
                user=self.db_config['user'],
                password=self.db_config['password']
            )
            logger.info("数据库连接成功")
        except Exception as e:
            logger.error(f"数据库连接失败: {str(e)}")
            raise
    
    def insert_batch_data(self, batch_data: List[Tuple]) -> None:
        """批量插入数据"""
        if not self.connection:
            raise ValueError("数据库未连接")
        
        try:
            cursor = self.connection.cursor()
            
            # 构建INSERT语句
            column_names = [f'"{col["name"]}"' for col in self.selected_columns]
            placeholders = ','.join(['%s'] * len(self.selected_columns))
            
            insert_sql = f"""
                INSERT INTO {self.table_name} ({','.join(column_names)})
                VALUES ({placeholders})
            """
            
            logger.info(f"开始批量插入 {len(batch_data)} 条数据")
            logger.info(f"插入字段: {[col['name'] for col in self.selected_columns]}")
            
            # 分批插入以避免内存问题
            batch_size = 100
            total_inserted = 0
            
            for i in range(0, len(batch_data), batch_size):
                batch = batch_data[i:i + batch_size]
                cursor.executemany(insert_sql, batch)
                self.connection.commit()
                total_inserted += len(batch)
                logger.info(f"已插入 {total_inserted} 条数据")
            
            cursor.close()
            logger.info(f"成功插入 {total_inserted} 条数据")
            
        except Exception as e:
            logger.error(f"数据插入失败: {str(e)}")
            if self.connection:
                self.connection.rollback()
            raise
    
    def close_connection(self) -> None:
        """关闭数据库连接"""
        if self.connection:
            self.connection.close()
            logger.info("数据库连接已关闭")
    
    def run(self, record_count: int = 2000) -> None:
        """执行完整的数据生成流程"""
        try:
            # 1. 解析DDL文件
            self.parse_ddl_file()
            
            # 2. 生成模拟数据
            batch_data = self.generate_batch_data(record_count)
            
            # 3. 连接数据库
            self.connect_database()
            
            # 4. 插入数据
            self.insert_batch_data(batch_data)
            
            logger.info("数据生成和插入完成！")
            
        except Exception as e:
            logger.error(f"执行失败: {str(e)}")
            raise
        finally:
            # 5. 关闭连接
            self.close_connection()

def main():
    """主函数"""
    # 配置信息
    ddl_file_path = r'd:\Project\Python\pyTools\scrapt\ddl.sql'
    
    # 从JDBC URL解析数据库配置
    db_config = {
        'host': '192.168.4.148',
        'port': '5432',
        'database': 'market',
        'user': 'market',
        'password': 'market@123'
    }
    
    # 创建生成器实例
    generator = MinimalMockDataGenerator(ddl_file_path, db_config)
    
    try:
        # 执行数据生成
        generator.run(record_count=2000)
        print("\n=== 数据生成完成 ===")
        print(f"表名: {generator.table_name}")
        print(f"总字段数量: {len(generator.all_columns)}")
        print(f"插入字段数量: {len(generator.selected_columns)}")
        print(f"插入字段: {[col['name'] for col in generator.selected_columns]}")
        print(f"生成记录数: 2000")
        print("详细日志请查看: mock_data_generation_minimal.log")
        
    except Exception as e:
        print(f"\n=== 执行失败 ===")
        print(f"错误信息: {str(e)}")
        print("详细日志请查看: mock_data_generation_minimal.log")

if __name__ == '__main__':
    main()