from csvPeekField import mainColsDict, hilistColsDict, mainColsNeeded, hilistColsNeeded, mainColsMust, hilistColsMust
import json
import time
import os
from concurrent.futures import ThreadPoolExecutor
import threading
import gc

def formatTimeSpan(seconds):
    seconds = round(seconds, 2)  # 保留两位小数
    # 转换为 X h Y m Z s 格式
    if seconds > 4000:
        return f"{int(seconds // 3600)} h {int(seconds % 3600 // 60)} m {seconds % 60:.2f} s"
    elif seconds > 100:
        return f"{int(seconds // 60)} m {seconds % 60:.2f} s"
    else:
        return  f"{seconds:.2f} s"

def process_chunk(chunk_data):
    """处理数据块的函数，用于多线程处理 - 极简版"""
    lines, headers, csvColsNum, delimiter, encoding, start_line_num = chunk_data
    
    chunk_errors = []
    chunk_lengths = {}
    
    # 初始化长度字典
    for col in headers:
        chunk_lengths[col] = 0
    
    # 预计算编码倍数，避免重复判断
    is_utf8 = encoding.lower() == 'utf-8'
    chinese_multiplier = 4 if is_utf8 else 2
    
    for i, line in enumerate(lines):
        if not line.strip():  # 快速跳过空行
            continue
            
        row = line.split(delimiter)
        row_len = len(row)
        
        # 列数检查
        if row_len > csvColsNum:
            lineNum = start_line_num + i
            chunk_errors.append({
                'lineNum': lineNum,
                'msg': f"行 {lineNum} 列数异常: {row_len} vs 期望 {csvColsNum}",
                'line': line[:50] + '...' if len(line) > 50 else line,  # 截断保存
            })
        
        # 极速字段长度计算：只处理有效列数范围内的数据
        max_cols = min(row_len, len(headers))
        for coli in range(max_cols):
            value = row[coli]
            if value:  # 只处理非空值
                # 快速长度估算：检查首字符即可判断是否中文
                if ord(value[0]) > 127:  # 非ASCII = 可能中文
                    length = len(value) * chinese_multiplier
                else:
                    length = len(value)
                
                col = headers[coli]
                if length > chunk_lengths[col]:
                    chunk_lengths[col] = length
    
    return chunk_errors, chunk_lengths

def csvPeekOptimized(csvFile, csvFileType=None, encoding='gbk', delimiter=',', newline='\n', chunk_size=5000, max_workers=2):
    """优化版本的CSV检查函数 - 内存优化版本"""
    # csv文件名
    csvFile = csvFile.replace('/', '\\')
    csvFileName = csvFile.split('\\')[-1].replace('.csv','')
    
    print('============ %s BEGIN (内存优化版本) ============'%csvFileName)
    print(f"内存控制设置：块大小={chunk_size:,}行，最大线程数={max_workers}")
    
    # 获取文件路径
    csvFilePath = '\\'.join(csvFile.split('\\')[:-1])
    if csvFileType is None:
        if '明细' in csvFileName:
            csvFileType = 'hilist'
        elif '主单' in csvFileName:
            csvFileType = 'main'
        else:
            csvFileType = 'main'

    # 创建errorlog目录
    errorlog_dir = os.path.join(csvFilePath, 'errorlog')
    if not os.path.exists(errorlog_dir):
        os.makedirs(errorlog_dir)

    # 快速计算行数（优化版本）
    print(f"正在计算文件 {csvFileName} 的行数...")
    with open(csvFile, 'rb') as f:
        lineCount = sum(1 for _ in f)
    print(f"文件 {csvFileName} 的总行数为: {lineCount:,} 行")
    
    # 读取前500行计算平均行大小
    print(f"正在分析前500行以估算行大小...")
    sample_lines = []
    with open(csvFile, 'r', encoding=encoding, newline=newline, errors='replace') as f:
        for i, line in enumerate(f):
            if i >= 500:  # 读取前500行
                break
            sample_lines.append(line)
    
    if sample_lines:
        # 计算样本行的平均字节大小
        total_bytes = sum(len(line.encode(encoding, errors='replace')) for line in sample_lines)
        avg_line_size = total_bytes / len(sample_lines)
        print(f"基于前{len(sample_lines)}行的分析，平均行大小: {avg_line_size:.1f} 字节")
    else:
        avg_line_size = len(headers) * 20  # 回退到原始估算
        print(f"无法读取样本行，使用默认估算: {avg_line_size} 字节")

    # 读取文件头
    with open(csvFile, 'r', encoding=encoding, newline=newline, errors='replace') as f:
        headers = f.readline().strip().split(delimiter)
        firstLine = f.readline().strip()
        firstRow = firstLine.split(delimiter) if firstLine else []

    # 初始化列字典
    csvColsDict = {}
    for coli, col in enumerate(headers):
        csvColsDict[col] = {'colid': coli, 'field': col, 'description': '', 'type': '', 'length': 0}

    # 检查列（保持原逻辑）
    colsNeed = mainColsNeeded if csvFileType == 'main' else hilistColsNeeded
    colsMust = mainColsMust if csvFileType == 'main' else hilistColsMust
    colsDict = mainColsDict if csvFileType == 'main' else hilistColsDict
    colsErrorData = []
    
    for col in colsNeed:
        ord, field, description, type = colsDict[col].values()
        if col not in headers:
            if col in colsMust:
                colsErrorData.append({
                    'ord': ord,
                    'field': field,
                    'description': description,
                    'type': type,
                    'msg': f"第 {ord} 列 '{col}' （{description}, {type}）在csv文件中不存在，且是必须的"
                })
            else:
                colsErrorData.append({
                    'ord': ord,
                    'field': field,
                    'description': description,
                    'type': type,
                    'msg': f"第 {ord} 列 '{col}' （{description}, {type}）在csv文件中不存在，但不是必须的"
                })
        else:
            csvColsDict[col]['description'] = description
            csvColsDict[col]['type'] = type

    # 保存列错误
    with open(os.path.join(errorlog_dir, f'csvColsError_{csvFileName}.json'), 'w', encoding='utf-8') as error_file:
        json.dump(colsErrorData, error_file, ensure_ascii=False, indent=4)

    csvColsNum = len(headers)
    print(f"CSV文件的列数为: {csvColsNum}")
    print('=========== %s 检查列 FINISH ============'%csvFileName)

    # ============ 内存控制的分批处理 ============
    print('=========== %s 开始内存控制的批量检查行 ============'%csvFileName)
    
    all_errors = []
    t0 = time.time()
    processed_lines = 0
    
    # 估算内存使用量并调整参数
    estimated_line_size = int(avg_line_size)  # 使用实际分析的平均行大小，转换为整数
    max_memory_per_chunk = 1024 * 1024 * 1024  # 1GB per chunk
    safe_chunk_size = min(chunk_size, max_memory_per_chunk // estimated_line_size)
    
    print(f"内存安全配置：每块最大 {safe_chunk_size:,} 行")
    print(f"预计最大内存使用：{(safe_chunk_size * estimated_line_size * max_workers / 1024 / 1024 / 1024):.1f} GB")
    print("="*60)
    
    # 流式处理，避免一次性加载整个文件
    with open(csvFile, 'r', encoding=encoding, newline=newline, errors='replace') as f:
        # 跳过头行
        f.readline()
        
        current_line_num = 2  # 从第2行开始
        batch_num = 0
        
        while True:
            # 读取一个批次的数据
            current_chunk = []
            for _ in range(int(safe_chunk_size)):  # 确保是整数
                line = f.readline()
                if not line:  # 文件结束
                    break
                current_chunk.append(line)
            
            if not current_chunk:  # 没有更多数据
                break
                
            batch_num += 1
            
            # 创建数据块用于多线程处理
            chunks = []
            mini_chunk_size = max(1, safe_chunk_size // max_workers)  # 确保至少为1
            
            for i in range(0, len(current_chunk), mini_chunk_size):
                chunk_data = current_chunk[i:i + mini_chunk_size]
                if chunk_data:
                    chunks.append((chunk_data, headers, csvColsNum, delimiter, encoding, current_line_num + i))
            
            # print(f"批次 {batch_num}: 处理 {len(current_chunk):,} 行，分为 {len(chunks)} 个子块")
            
            # 多线程处理当前批次
            batch_errors = []
            batch_t0 = time.time()
            
            with ThreadPoolExecutor(max_workers=max_workers) as executor:
                futures = [executor.submit(process_chunk, chunk) for chunk in chunks]
                
                for i, future in enumerate(futures):
                    chunk_errors, chunk_lengths = future.result()
                    batch_errors.extend(chunk_errors)
                    
                    # 更新最大长度
                    for col, length in chunk_lengths.items():
                        if col in csvColsDict:
                            csvColsDict[col]['length'] = max(csvColsDict[col]['length'], length)
                    
                    # 显示子块处理进度
                    if len(chunks) > 1:
                        print(f"  子块 {i+1}/{len(chunks)} 完成                 ", end='\r')
            
            all_errors.extend(batch_errors)
            processed_lines += len(current_chunk)
            current_line_num += len(current_chunk)
            
            # 显示批次处理结果
            batch_time = time.time() - batch_t0
            total_time = time.time() - t0
            remaining_lines = lineCount - processed_lines
            avg_speed = processed_lines / total_time if total_time > 0 else 0
            eta = remaining_lines / avg_speed if avg_speed > 0 else 0
            
            print(f"批次 {batch_num} 完成: {len(current_chunk):,} 行 | "
                  f"累计: {processed_lines:,}/{lineCount:,} ({processed_lines/lineCount:.1%}) | "
                  f"错误: {len(all_errors):,} | "
                  f"速度: {avg_speed:,.0f} 行/秒 | "
                  f"预计剩余: {formatTimeSpan(eta)}                    ", end='\r')
            
            # 强制垃圾回收，释放内存
            gc.collect()

    print(f'\n{"="*60}')
    print(f'内存控制处理完成！')
    print(f'总处理行数: {processed_lines:,} 行')
    print(f'总错误数: {len(all_errors):,} 个')
    print(f'总处理时间: {formatTimeSpan(time.time() - t0)}')
    print(f'平均速度: {processed_lines/(time.time() - t0):,.0f} 行/秒')
    
    # 保存错误
    with open(os.path.join(errorlog_dir, f'csvRowsError_{csvFileName}.json'), 'w', encoding='utf-8') as error_file:
        json.dump(all_errors, error_file, ensure_ascii=False, indent=4)
    
    print('=========== %s 检查行 FINISH ============'%csvFileName)

    # 保存列信息
    with open(os.path.join(errorlog_dir, f'csvColsDict_{csvFileName}.json'), 'w', encoding='utf-8') as error_file:
        json.dump(csvColsDict, error_file, ensure_ascii=False, indent=4)
    
    # 生成建表语句
    tableName = csvFileName.replace(' ', '_').replace('-', '_').replace('.', '_')
    tableName = tableName.replace('(', '_').replace(')', '_').replace('/', '_')
    tableName = tableName.replace('!', '_').replace('@', '_').replace('#', '_')
    tableName = tableName.replace('$', '_').replace('%', '_').replace('^', '_')
    tableName = tableName.replace('&', '_').replace('*', '_').replace('+', '_')
    tableName = tableName.replace('=', '_').replace('{', '_').replace('}', '_')
    tableName = tableName.replace('[', '_').replace(']', '_').replace('|', '_')
    tableName = tableName.replace('<', '_').replace('>', '_').replace('?', '_')
    
    createTableSql = f"CREATE TABLE tbldirty_{tableName} (\n"
    for col, info in csvColsDict.items():
        if not col and info['length'] == 0:
            continue
        createTableSql += f"    {col} VARCHAR2({info['length'] + 50}) ,\n"
    createTableSql = createTableSql.rstrip(',\n') + "\n);"
    
    with open(os.path.join(errorlog_dir, f'createTable_{tableName}.sql'), 'w', encoding='utf-8') as sql_file:
        sql_file.write(createTableSql)

    print('============ %s COMPLETE ============'%csvFileName)

def csvPeek(csvFile, csvFileType=None, encoding='gbk', delimiter=',', newline='\n', chunk_size=5000, max_workers=2):
    """保持原函数接口，内部调用内存优化版本"""
    return csvPeekOptimized(csvFile, csvFileType, encoding, delimiter, newline, chunk_size, max_workers)

if __name__ == '__main__':
    csvFiles = [
        'other_mx.txt',
        'wyzyy_jz.txt',
        'wyzyy_mx_a.txt',
        'wyzyy_mx_b.txt',
        'wyzyy_dx.txt',
        'wyzyy_js.txt',
        'other_zd.txt',
    ]
    innerPath = r'./dataSource/'
    
    # 内存安全配置参数（控制在40G以内）
    CHUNK_SIZE = 3000000       # 每个数据块的行数，减小以节省内存
    MAX_WORKERS = 30           # 最大线程数，以控制内存使用
    
    print(f"内存安全配置：块大小={CHUNK_SIZE:,}行，最大线程数={MAX_WORKERS}")
    print(f"预计最大内存使用：约 {CHUNK_SIZE * MAX_WORKERS * 200 / 1024 / 1024:.1f} MB per batch")
    print("="*60)
    
    # 检查文件是否存在
    for csvFile in csvFiles:
        if not os.path.exists(innerPath + csvFile):
            raise FileNotFoundError(f"文件 {csvFile} 不存在，请检查路径 {innerPath}")    
    
    for csvFilei, csvFile in enumerate(csvFiles):
        print('\n\n第 %d 个文件：%s'%(csvFilei+1, csvFile))
        
        # 单独处理某个文件
        if csvFile != 'other_zd.txt':
            # 针对other_zd.txt的特殊处理
            continue

        # 不同的文件有不同的分隔符
        delimiter = '@&' if csvFile!='other_zd.txt' else '^'
        
        try:
            csvPeek(innerPath + csvFile, 
                    encoding='gbk', 
                    delimiter=delimiter, 
                    newline='\n',
                    chunk_size=CHUNK_SIZE,
                    max_workers=MAX_WORKERS)
        except MemoryError:
            print(f"文件 {csvFile} 内存不足，尝试更小的块大小...")
            csvPeek(innerPath + csvFile, 
                    encoding='gbk', 
                    delimiter=delimiter, 
                    newline='\n',
                    chunk_size=CHUNK_SIZE//2,
                    max_workers=1)
        except Exception as e:
            print(f"处理文件 {csvFile} 时出错: {e}")
            continue
