from csvPeekField import mainColsDict, hilistColsDict, mainColsNeeded, hilistColsNeeded, mainColsMust, hilistColsMust
import csv
import json
import time
import os

def formatTimeSpan(seconds):
    seconds = round(seconds, 2)  # 保留两位小数
    # 转换为 X h Y m Z s 格式
    if seconds > 4000:
        return f"{int(seconds // 3600)} h {int(seconds % 3600 // 60)} m {seconds % 60:.2f} s"
    elif seconds > 100:
        return f"{int(seconds // 60)} m {seconds % 60:.2f} s"
    else:
        return  f"{seconds:.2f} s"
    

def csvPeek(csvFile, csvFileType=None, encoding='gbk', delimiter=',', newline='\n', use_optimization=True):
    # csv文件名
    # csvFile = r'门诊主单.csv'
    csvFile = csvFile.replace('/', '\\')  # 替换斜杠为反斜杠
    csvFileName = csvFile.split('\\')[-1].replace('.csv','')  # 获取文件名
    
    print('============ %s BEGIN ============'%csvFileName)
    # ============ 读取列 ============
    # 获取除了文件名以外的中间相对路径
    csvFilePath = '\\'.join(csvFile.split('\\')[:-1])  # 获取文件路径
    if csvFileType is None:
        if '明细' in csvFileName:
            csvFileType = 'hilist'  # 'main' or 'hilist'
        elif '主单' in csvFileName:
            csvFileType = 'main'
        else:
            csvFileType = 'main'  # 'main' or 'hilist'

    # 遍历读取整个文件，判断行数（优化版本）
    print(f"正在计算文件 {csvFileName} 的行数...")
    if use_optimization:
        # 使用二进制模式读取，速度更快
        with open(csvFile, 'rb') as f:
            lineCount = sum(1 for _ in f)
    else:
        # 原始方法
        with open(csvFile, 'r', encoding=encoding, newline=newline, errors='replace') as f:
            lineCount = 0
            for line in f:
                lineCount += 1
                # 每100万行显示一次进度，减少I/O开销
                if lineCount % 1000000 == 0:
                    print(f"已扫描 {lineCount} 行...", end='\r')
    print(f"文件 {csvFileName} 的总行数为: {lineCount} 行")

    # 读取文件，读取后一直保留
    rowsErrorJson = []  # 用于存储错误日志
    print('=========== 读取列 START ============')
    with open(csvFile, 'r', encoding=encoding, newline='', errors='replace') as f:
        reader = csv.reader(f, delimiter=delimiter)
        try:
            headers = next(reader)
        except StopIteration:
            headers = []

        # 如果列名有引号，则去掉引号
        headers = [col.strip().strip('"') for col in headers]

        # 做一个字典csvColsDict，保存csv的列名和对应的序号，序号从0开始
        csvColsDict = {}
        for coli, col in enumerate(headers):
            csvColsDict[col] = {'colid': coli, 'field': col, 'description': '', 'type': '', 'length': 0}

        # 遍历所有列名，是否都存在；如果不存在，判断是否是必须列名
        colsNeed =  mainColsNeeded if csvFileType == 'main' else hilistColsNeeded
        colsMust = mainColsMust if csvFileType == 'main' else hilistColsMust
        colsDict = mainColsDict if csvFileType == 'main' else hilistColsDict
        colsErrorData = []
        for col in colsNeed:
            ord, field, description, type = colsDict[col].values()
            # 获得列的各种值的取值
            if col not in headers:
                if col in colsMust:
                    colsErrorData.append({
                        'ord': ord,
                        'field': field,
                        'description': description,
                        'type': type,
                        'msg': f"第 {ord} 列 '{col}' （{description}, {type}）在csv文件中不存在，且是必须的"
                    })
                else:
                    colsErrorData.append({
                        'ord': ord,
                        'field': field,
                        'description': description,
                        'type': type,
                        'msg': f"第 {ord} 列 '{col}' （{description}, {type}）在csv文件中不存在，但不是必须的"
                    })
            else:
                csvColsDict[col]['description'] = description
                csvColsDict[col]['type'] = type
        # 输出列错误数据，将错误数据转换为JSON格式
        with open(csvFilePath+ r'\errorlog\csvColsError_%s.json'%csvFileName, 'w', encoding='utf-8') as error_file:
            json.dump(colsErrorData, error_file, ensure_ascii=False, indent=4)

        # 计算csv的列数
        csvColsNum = len(headers)
        print(f"CSV文件的列数为: {csvColsNum}")
        # 计算maxColLen，最长的列名长度
        maxColLen = max(len(col) for col in headers) if headers else 0
        print('=========== %s 检查列 FINISH ============'%csvFileName)

        # ============ 检查行 ============
        processedRowNum = 0
        firstRow = None
        t0 = time.time()  # 记录开始时间
        encoding_lower = encoding.lower()
        is_utf8 = encoding_lower == 'utf-8'

        for row in reader:
            processedRowNum += 1

            # csv.reader 已经处理了换行和引号，这里对于空行继续跳过
            if not any(cell.strip() for cell in row):
                continue

            if firstRow is None:
                firstRow = row
                continue

            if len(row) > csvColsNum and any(value.strip() for value in row[csvColsNum:]):
                rowMatchData = [
                    {
                        'col': headers[coli] if coli < len(headers) else '###超出范围###',
                        'value': value,
                        'valueFirst': firstRow[coli] if firstRow and len(firstRow) > coli else '###超出范围###',
                    } for coli, value in enumerate(row)
                ]

                rowsErrorJson.append({
                    'lineNum': processedRowNum,
                    'msg': f"第 {processedRowNum} 行的列数为 {len(row)}，与CSV文件的列数 {csvColsNum} 不一致",
                    'line': delimiter.join(row),
                    'rowMatchData': rowMatchData,
                    'headers': headers,
                })

            for coli, value in enumerate(row):
                if coli >= len(headers) or not value:
                    continue

                col = headers[coli]
                if any('\u4e00' <= char <= '\u9fff' for char in value):
                    length = 4 * len(value) if is_utf8 else 2 * len(value)
                else:
                    length = len(value)

                if col in csvColsDict:
                    csvColsDict[col]['length'] = max(length, csvColsDict[col]['length'])

            if processedRowNum % 1000 == 0:
                t = time.time()
                tgap = t - t0
                if lineCount < 1000000:
                    secondsConsumed = tgap
                    secondsRemain = (lineCount - processedRowNum) * (secondsConsumed / processedRowNum) if processedRowNum > 0 else 0
                    strRemain = formatTimeSpan(secondsRemain)
                    print(f"正在检查第 {processedRowNum} 行，共 {lineCount} 行，进度: {processedRowNum/lineCount:.2%}，错误：{len(rowsErrorJson)}，剩余：{strRemain}                   ", end='\r')
                else:
                    if tgap > 2:
                        secondsConsumed = tgap
                        secondsRemain = (lineCount - processedRowNum) * (secondsConsumed / processedRowNum) if processedRowNum > 0 else 0
                        strRemain = formatTimeSpan(secondsRemain)
                        print(f"正在检查第 {processedRowNum} 行，共 {lineCount} 行，进度: {processedRowNum/lineCount:.2%}，错误：{len(rowsErrorJson)}，剩余：{strRemain}                   ", end='\r')
                        t0 = t

    # 输出行错误数据，将错误数据转换为JSON格式
    with open(csvFilePath+ r'\errorlog\csvRowsError_%s.json'%csvFileName, 'w', encoding='utf-8') as error_file:
        json.dump(rowsErrorJson, error_file, ensure_ascii=False, indent=4)
    print('\n=========== %s 检查行 FINISH ============'%csvFileName)

    # 将csvColsDict中各个col的value_count生成成为一个json记录下来
    with open(csvFilePath+ r'/errorlog/csvColsDict_%s.json'%csvFileName, 'w', encoding='utf-8') as error_file:
        json.dump(csvColsDict, error_file, ensure_ascii=False, indent=4)

    # 按照csvColsDict的，做一个oracle的建表语句
    # 将csvFileName中的特殊字符去掉，作为tableName
    tableName = csvFileName.replace(' ', '_').replace('-', '_').replace('.', '_')
    tableName = tableName.replace('(', '_').replace(')', '_').replace('/', '_')
    tableName = tableName.replace('!', '_').replace('@', '_').replace('#', '_')
    tableName = tableName.replace('$', '_').replace('%', '_').replace('^', '_')
    tableName = tableName.replace('&', '_').replace('*', '_').replace('+', '_')
    tableName = tableName.replace('=', '_').replace('{', '_').replace('}', '_')
    tableName = tableName.replace('[', '_').replace(']', '_').replace('|', '_')
    tableName = tableName.replace('<', '_').replace('>', '_').replace('?', '_')
    # 创建表语句，将所有字段都认为是varchar2，长度由这一列的最大长度+50决定
    createTableSql = f"CREATE TABLE tbldirty_{tableName} (\n"
    for col, info in csvColsDict.items():
        # 如果列名是空，且这一列的长度是0，则跳过
        if not col and info['length'] == 0:
            continue
        createTableSql += f"    {col} VARCHAR2({info['length'] + 50}) ,\n"
    createTableSql = createTableSql.rstrip(',\n') + "\n);"
    # 将建表语句写入文件
    with open(csvFilePath + r'/errorlog/createTable_%s.sql'%tableName, 'w', encoding='utf-8') as sql_file:
        sql_file.write(createTableSql)

    print('============ %s COMPLETE ============'%csvFileName)

if __name__ == '__main__':
    # 当直接运行时：
    csvFiles = (
        '440703_H44070300295_江门蓬江孚昌门诊部_结算表.csv', '440703_H44070300295_江门蓬江孚昌门诊部_就诊表.csv', 
        '440703_H44070300295_江门蓬江孚昌门诊部_明细表.csv', '440703_H44070300295_江门蓬江孚昌门诊部_诊断表.csv', 
        '440703_H44070300297_江门康城血液透析中心_结算表.csv', '440703_H44070300297_江门康城血液透析中心_就诊表.csv', 
        '440703_H44070300297_江门康城血液透析中心_明细表.csv', '440703_H44070300297_江门康城血液透析中心_诊断表.csv', 
        '440703_H44070300331_江门蓬江宏仁中西医结合诊所_结算表.csv', '440703_H44070300331_江门蓬江宏仁中西医结合诊所_就诊表.csv', 
        '440703_H44070300331_江门蓬江宏仁中西医结合诊所_明细表.csv', '440703_H44070300331_江门蓬江宏仁中西医结合诊所_诊断表.csv', 
        '440784_H44078400312_鹤山市第三人民医院_结算表.csv', '440784_H44078400312_鹤山市第三人民医院_就诊表.csv', 
        '440784_H44078400312_鹤山市第三人民医院_明细表.csv', '440784_H44078400312_鹤山市第三人民医院_诊断表.csv', 
        '440785_H44078500035_恩平市恩城街道办事处江南社区卫生服务中心_结算表.csv', '440785_H44078500035_恩平市恩城街道办事处江南社区卫生服务中心_就诊表.csv', 
        '440785_H44078500035_恩平市恩城街道办事处江南社区卫生服务中心_明细表.csv', '440785_H44078500035_恩平市恩城街道办事处江南社区卫生服务中心_诊断表.csv', 
        '440785_H44078500065_恩平市沙湖镇中心卫生院（恩平市第二人民医院）_结算表.csv', '440785_H44078500065_恩平市沙湖镇中心卫生院（恩平市第二人民医院）_就诊表.csv', 
        '440785_H44078500065_恩平市沙湖镇中心卫生院（恩平市第二人民医院）_明细表.csv', '440785_H44078500065_恩平市沙湖镇中心卫生院（恩平市第二人民医院）_诊断表.csv', 
        '440785_H44078500113_恩平市大槐镇中心卫生院_结算表.csv', '440785_H44078500113_恩平市大槐镇中心卫生院_就诊表.csv', 
        '440785_H44078500113_恩平市大槐镇中心卫生院_明细表.csv', '440785_H44078500113_恩平市大槐镇中心卫生院_诊断表.csv', 
        '440785_H44078500128_恩平市横陂镇中心卫生院_结算表.csv', '440785_H44078500128_恩平市横陂镇中心卫生院_就诊表.csv', 
        '440785_H44078500128_恩平市横陂镇中心卫生院_明细表.csv', '440785_H44078500128_恩平市横陂镇中心卫生院_诊断表.csv', 
        '440785_H44078500147_恩平爱尔新希望眼耳鼻喉医院_结算表.csv', '440785_H44078500147_恩平爱尔新希望眼耳鼻喉医院_就诊表.csv', 
        '440785_H44078500147_恩平爱尔新希望眼耳鼻喉医院_明细表.csv', '440785_H44078500147_恩平爱尔新希望眼耳鼻喉医院_诊断表.csv'
    )

    innerPath = r'./dataSource/'
    # 先判断每个csvFile是否存在，如果不存在则报错
    for csvFile in csvFiles:
        if not os.path.exists(innerPath + csvFile):
            raise FileNotFoundError(f"文件 {csvFile} 不存在，请检查路径 {innerPath}")    
    
    for csvFilei, csvFile in enumerate(csvFiles):
        print('\n\n第 %d 个文件：%s'%(csvFilei+1, csvFile))
        csvPeek(innerPath + csvFile, encoding='utf-8', delimiter=',', newline='\n')