# import pandas as pd
# import cx_Oracle
# import os
# import logging
# from datetime import datetime
# from tqdm import tqdm
# import time
# import sys
#
# # ========================
# # 配置参数（请根据实际情况修改）
# # ========================
# # 1. 数据库连接参数
# DB_USER = "your_username"
# DB_PASSWORD = "your_password"
# DB_DSN = "your_host:port/service_name"  # 例如: "localhost:1521/orcl"
#
# # 2. 源文件路径（Oracle导出的文本文件）
# SOURCE_FILE_PATH = "path/to/oracle_export.csv"  # 例如: "oracle_data_export.csv"
#
# # 3. 目标宽表（数据库表名）
# TARGET_TABLE = "wide_table"
#
# # 4. 列映射关系（源列名 -> 目标列名）
# COLUMN_MAPPING = {
#     'source_column1': 'target_column1',
#     'source_column2': 'target_column2',
#     'source_column3': 'target_column3',
#     # 添加更多映射关系...
# }
#
# # 5. 数据类型映射（源列名 -> Python数据类型）
# DATA_TYPE_MAPPING = {
#     'source_column1': 'str',
#     'source_column2': 'float',
#     'source_column3': 'datetime',
#     # 添加更多类型映射...
# }
#
# # 6. 日期格式（如果需要处理日期列）
# DATE_FORMAT = '%Y-%m-%d'
#
# # 7. 批量插入大小（每批插入的行数）
# BATCH_SIZE = 1000
#
#
# # ========================
# # 日志配置
# # ========================
# def setup_logging():
#     log_file = f"db_import_{datetime.now().strftime('%Y%m%d_%H%M%S')}.log"
#     logging.basicConfig(
#         level=logging.INFO,
#         format='%(asctime)s - %(levelname)s - %(message)s',
#         handlers=[
#             logging.FileHandler(log_file),
#             logging.StreamHandler()
#         ]
#     )
#     return logging.getLogger(__name__)
#
#
# logger = setup_logging()
#
#
# # ========================
# # 数据库连接函数
# # ========================
# def connect_to_db():
#     """建立数据库连接"""
#     try:
#         logger.info("正在连接数据库...")
#         connection = cx_Oracle.connect(
#             user=DB_USER,
#             password=DB_PASSWORD,
#             dsn=DB_DSN
#         )
#         logger.info("数据库连接成功")
#         return connection
#     except cx_Oracle.Error as e:
#         logger.error(f"数据库连接失败: {e}")
#         raise
#
#
# # ========================
# # 读取源文件函数
# # ========================
# def read_source_file(file_path):
#     """读取源文件（CSV/TSV）"""
#     if not os.path.exists(file_path):
#         logger.error(f"源文件不存在: {file_path}")
#         raise FileNotFoundError(f"源文件不存在: {file_path}")
#
#     # 检测文件扩展名
#     ext = os.path.splitext(file_path)[1].lower()
#
#     if ext == '.csv':
#         df = pd.read_csv(file_path)
#     elif ext == '.tsv':
#         df = pd.read_csv(file_path, sep='\t')
#     else:
#         logger.error(f"不支持的文件格式: {ext}. 请使用CSV或TSV文件.")
#         raise ValueError(f"不支持的文件格式: {ext}. 请使用CSV或TSV文件.")
#
#     # 检查必要列是否存在
#     missing_columns = [col for col in COLUMN_MAPPING.keys() if col not in df.columns]
#     if missing_columns:
#         logger.error(f"源文件缺少必要列: {', '.join(missing_columns)}")
#         raise ValueError(f"源文件缺少必要列: {', '.join(missing_columns)}")
#
#     logger.info(f"成功读取源文件: {file_path}")
#     logger.info(f"文件包含 {len(df)} 行, {len(df.columns)} 列")
#     logger.info(f"源文件列: {list(df.columns)}")
#
#     return df
#
#
# # ========================
# # 数据映射和转换函数
# # ========================
# def map_and_convert_data(df):
#     """根据映射关系重命名列并转换数据类型"""
#     # 1. 重命名列
#     df = df.rename(columns=COLUMN_MAPPING)
#
#     # 2. 转换数据类型
#     for col, data_type in DATA_TYPE_MAPPING.items():
#         if data_type == 'str':
#             df[col] = df[col].astype(str)
#         elif data_type == 'float':
#             df[col] = pd.to_numeric(df[col], errors='coerce')
#         elif data_type == 'datetime':
#             df[col] = pd.to_datetime(df[col], errors='coerce', format=DATE_FORMAT)
#         else:
#             logger.warning(f"未处理的数据类型: {data_type} for column {col}")
#
#     # 3. 处理缺失值
#     df = df.fillna({col: '' for col in df.columns})
#
#     logger.info("数据映射和转换完成")
#     return df
#
#
# # ========================
# # 数据库插入函数
# # ========================
# def insert_into_database(connection, df):
#     """将数据插入到目标数据库表"""
#     cursor = connection.cursor()
#
#     # 获取目标表的列名
#     cursor.execute(f"SELECT column_name FROM all_tab_columns WHERE table_name = '{TARGET_TABLE}'")
#     target_columns = [col[0].lower() for col in cursor.fetchall()]
#
#     # 检查目标列是否匹配
#     missing_columns = [col for col in df.columns if col.lower() not in target_columns]
#     if missing_columns:
#         logger.error(f"目标表缺少列: {', '.join(missing_columns)}")
#         raise ValueError(f"目标表缺少列: {', '.join(missing_columns)}")
#
#     # 准备插入语句
#     columns = ', '.join(df.columns)
#     placeholders = ', '.join([':' + str(i + 1) for i in range(len(df.columns))])
#     insert_sql = f"INSERT INTO {TARGET_TABLE} ({columns}) VALUES ({placeholders})"
#
#     # 批量插入
#     total_rows = len(df)
#     inserted_rows = 0
#     start_time = time.time()
#
#     logger.info(f"开始插入数据到数据库表: {TARGET_TABLE} (总行数: {total_rows})")
#
#     # 使用tqdm显示进度
#     with tqdm(total=total_rows, desc="插入进度", unit="行") as pbar:
#         for i in range(0, total_rows, BATCH_SIZE):
#             batch = df.iloc[i:i + BATCH_SIZE]
#             batch_data = batch.values.tolist()
#
#             # 执行批量插入
#             cursor.executemany(insert_sql, batch_data)
#             connection.commit()
#
#             # 更新进度
#             inserted_rows += len(batch)
#             elapsed = time.time() - start_time
#             speed = inserted_rows / elapsed if elapsed > 0 else 0
#
#             # 更新进度条显示
#             pbar.update(len(batch))
#             pbar.set_postfix({
#                 "已插入": f"{inserted_rows}/{total_rows}",
#                 "速度": f"{speed:.2f}行/秒",
#                 "时间": f"{elapsed:.2f}s"
#             })
#
#     logger.info(f"数据插入完成! 共插入 {inserted_rows} 行")
#     cursor.close()
#
#
# # ========================
# # 主处理函数
# # ========================
# def main():
#     """主处理流程"""
#     try:
#         # 1. 读取源文件
#         df = read_source_file(SOURCE_FILE_PATH)
#
#         # 2. 数据映射和转换
#         df = map_and_convert_data(df)
#
#         # 3. 连接数据库
#         connection = connect_to_db()
#
#         # 4. 插入数据
#         insert_into_database(connection, df)
#
#         # 5. 生成处理报告
#         generate_report(df)
#
#         logger.info("数据处理流程成功完成!")
#         return True
#
#     except Exception as e:
#         logger.exception("处理过程中发生错误")
#         return False
#     finally:
#         # 确保关闭数据库连接
#         if 'connection' in locals():
#             connection.close()
#             logger.info("数据库连接已关闭")
#
#
# # ========================
# # 生成处理报告函数
# # ========================
# def generate_report(df):
#     """生成详细处理报告"""
#     report_path = f"import_report_{datetime.now().strftime('%Y%m%d_%H%M%S')}.txt"
#     with open(report_path, 'w') as f:
#         f.write(f"数据导入报告 - {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
#         f.write("=" * 50 + "\n\n")
#         f.write(f"源文件: {SOURCE_FILE_PATH}\n")
#         f.write(f"目标表: {TARGET_TABLE}\n")
#         f.write(f"处理行数: {len(df)}\n")
#         f.write(f"列数: {len(df.columns)}\n\n")
#
#         f.write("列映射关系:\n")
#         for src, tgt in COLUMN_MAPPING.items():
#             f.write(f"  {src} -> {tgt}\n")
#
#         f.write("\n数据预览 (前5行):\n")
#         f.write(df.head().to_string(index=False))
#
#     logger.info(f"处理报告已生成: {report_path}")
#
#
# # ========================
# # 执行脚本
# # ========================
# if __name__ == "__main__":
#     logger.info("=" * 50)
#     logger.info("开始执行数据导入脚本")
#     logger.info("=" * 50)
#
#     success = main()
#
#     if success:
#         logger.info("数据导入成功!")
#         print("\n" + "=" * 50)
#         print("数据导入成功! 请查看生成的报告文件")
#         print("=" * 50)
#     else:
#         logger.error("数据导入失败!")
#         print("\n" + "=" * 50)
#         print("数据导入失败! 请查看日志文件获取详细信息")
#         print("=" * 50)
#         sys.exit(1)


import cx_Oracle
import os
from datetime import datetime

# 设置中文字符集
os.environ["NLS_LANG"] = "SIMPLIFIED CHINESE_CHINA.UTF8"

# ========================
# 1. 配置区（请按需修改）
# ========================
ORACLE_CONFIG = {
    "username": "scott",  # ← 替换为您的 Oracle 用户名
    "password": "123456",  # ← 替换为您的 Oracle 密码
    "host": "localhost",  # ← 数据库主机
    "port": "1521",  # ← 端口
    "service_name": "orcl"  # ← 服务名（或 SID）
}

TEXT_FILE_PATH = "E:\测试数据\SBS3.ACTVHH-test-b.p02"  # ← 您的文本文件路径
TABLE_NAME = "ACTVHH"  # ← 目标表名

# ========================
# 2. 表结构字段定义（30个字段，包含RECSTS）
# ========================
TABLE_COLUMNS = [
    "ORGIDT", "TLRNUM", "VCHSET", "SETSEQ", "ERYDAT",
    "OURREF", "ORGID3", "CUSIDT", "APCODE", "CURCDE",
    "TXNAMT", "BOKBAL", "VALDAT", "ANACDE", "FURINF",
    "FXRATE", "VCHATT", "THRREF", "VCHAUT", "VCHANO",
    "DEPNUM", "REGNUM", "TXNBAK", "ACTBAK", "CLRBAK",
    "ORGID4", "ERYTYP", "VCHDAT", "VCHTIM", "RECSTS"
]  # 共30个字段（包含RECSTS）


# ========================
# 3. 解析文本文件（32字段 → 去掉开头序号和^ENDROW → 30字段）
# ========================
def parse_text_file(file_path):
    records = []
    with open(file_path, 'r', encoding='utf-8') as f:
        for line in f:
            line = line.strip()
            if not line or line.endswith('^ENDROW'):
                continue

            # 分割字段
            fields = line.split('|')

            # 检查是否以^ENDROW结尾
            if fields[-1] != '^ENDROW':
                print(f"⚠️ 警告：行未以^ENDROW结尾: {line}")
                continue

            # 去掉^ENDROW和开头序号
            if len(fields) < 32:
                print(f"⚠️ 警告：字段不足（期望32，实际{len(fields)}）: {line}")
                continue

            # 提取30个数据字段（索引1-30，去掉开头序号和^ENDROW）
            data_fields = fields[1:31]  # 30个字段

            # 清理空字段（空格转None）
            cleaned_fields = []
            for f in data_fields:
                stripped = f.strip()
                cleaned_fields.append(stripped if stripped != '' else None)

            records.append(cleaned_fields)
    return records


# ========================
# 4. 转换日期字段（关键！）
# ========================
def convert_date_str(date_str):
    """将 20060801000000000 格式转换为 DATE 类型"""
    if not date_str or len(date_str) < 8:
        return None
    try:
        return datetime.strptime(date_str[:8], "%Y%m%d")
    except:
        return None


# ========================
# 5. 创建 Oracle 表（已存在则跳过）
# ========================
def create_table_if_not_exists(conn):
    cursor = conn.cursor()
    cols_def = ", ".join([f"{col} {get_column_type(col)}" for col in TABLE_COLUMNS])
    sql = f"CREATE TABLE {TABLE_NAME} ({cols_def})"
    try:
        cursor.execute(sql)
        conn.commit()
        print(f"✅ 表 {TABLE_NAME} 创建成功")
    except cx_Oracle.DatabaseError as e:
        error, = e.args
        if "ORA-00955" in str(error):  # 表已存在
            print(f"ℹ️ 表 {TABLE_NAME} 已存在，跳过创建")
        else:
            print(f"❌ 创建表失败: {error.message}")
            raise
    finally:
        cursor.close()


def get_column_type(column_name):
    """根据字段名返回 Oracle 类型"""
    types = {
        "ORGIDT": "CHAR(4)",
        "TLRNUM": "CHAR(4)",
        "VCHSET": "NUMBER(4,0)",
        "SETSEQ": "NUMBER(2,0)",
        "ERYDAT": "DATE",
        "OURREF": "CHAR(16)",
        "ORGID3": "CHAR(4)",
        "CUSIDT": "CHAR(7)",
        "APCODE": "CHAR(4)",
        "CURCDE": "CHAR(3)",
        "TXNAMT": "NUMBER(17,2)",
        "BOKBAL": "NUMBER(17,2)",
        "VALDAT": "DATE",
        "ANACDE": "CHAR(4)",
        "FURINF": "CHAR(32)",
        "FXRATE": "NUMBER(12,6)",
        "VCHATT": "NUMBER(3,0)",
        "THRREF": "CHAR(16)",
        "VCHAUT": "CHAR(4)",
        "VCHANO": "CHAR(6)",
        "DEPNUM": "CHAR(2)",
        "REGNUM": "CHAR(20)",
        "TXNBAK": "CHAR(7)",
        "ACTBAK": "CHAR(7)",
        "CLRBAK": "CHAR(7)",
        "ORGID4": "CHAR(3)",
        "ERYTYP": "CHAR(1)",
        "VCHDAT": "DATE",
        "VCHTIM": "CHAR(8)",
        "RECSTS": "CHAR(1)"  # 注意：RECSTS是CHAR(1)
    }
    return types.get(column_name, "VARCHAR2(255)")


# ========================
# 6. 转换记录（处理日期字段）
# ========================
def transform_records(raw_records):
    """将原始记录转换为 Oracle 可插入的格式"""
    transformed = []
    for record in raw_records:
        # 确保有30个字段
        if len(record) != 30:
            print(f"⚠️ 警告：记录字段数不符（期望30，实际{len(record)}）: {record}")
            continue

        mapped = []
        for i, field in enumerate(record):
            # 处理日期字段（索引4: ERYDAT, 12: VALDAT, 26: VCHDAT）
            if i == 4:  # ERYDAT
                mapped.append(convert_date_str(field))
            elif i == 12:  # VALDAT
                mapped.append(convert_date_str(field))
            elif i == 26:  # VCHDAT
                mapped.append(convert_date_str(field))
            else:
                mapped.append(field)
        transformed.append(mapped)
    return transformed


# ========================
# 7. 批量插入数据（30个字段全部插入）
# ========================
def insert_records(conn, records):
    cursor = conn.cursor()
    placeholders = ", ".join([f":{i + 1}" for i in range(30)])  # 30个占位符
    sql = f"INSERT INTO {TABLE_NAME} ({', '.join(TABLE_COLUMNS)}) VALUES ({placeholders})"

    try:
        cursor.executemany(sql, records)
        conn.commit()
        print(f"✅ 成功插入 {len(records)} 条记录到 {TABLE_NAME}")
    except cx_Oracle.DatabaseError as e:
        conn.rollback()
        print(f"❌ 插入失败: {e}")
        # 打印具体错误行
        if hasattr(cursor, 'getbatcherrors'):
            for error in cursor.getbatcherrors():
                print(f"  错误行 {error.offset}: {error.message}")
    finally:
        cursor.close()


# ========================
# 8. 主程序
# ========================
def main():
    # 1. 解析文本
    print("🔍 正在解析文本文件...")
    raw_records = parse_text_file(TEXT_FILE_PATH)
    if not raw_records:
        print("❌ 未读取到有效数据，请检查文件路径和格式")
        return

    print(f"📄 共解析 {len(raw_records)} 条记录")

    # 2. 转换日期字段
    print("🔄 正在转换日期字段...")
    transformed_records = transform_records(raw_records)

    # 3. 连接 Oracle
    print("🔌 正在连接 Oracle 数据库...")
    dsn = cx_Oracle.makedsn(
        ORACLE_CONFIG["host"],
        ORACLE_CONFIG["port"],
        service_name=ORACLE_CONFIG["service_name"]
    )
    try:
        conn = cx_Oracle.connect(
            user=ORACLE_CONFIG["username"],
            password=ORACLE_CONFIG["password"],
            dsn=dsn
        )
        print("✅ 数据库连接成功")
    except Exception as e:
        print(f"❌ 数据库连接失败: {e}")
        return

    try:
        # 4. 创建表
        create_table_if_not_exists(conn)

        # 5. 插入数据（30个字段全部插入）
        insert_records(conn, transformed_records)

    finally:
        conn.close()
        print("👋 数据库连接已关闭")


if __name__ == "__main__":
    main()