# -*- coding: utf-8 -*-
import csv
import time
from multiprocessing import Process, Manager
from makeLabel import makeLabel
from industryClassLib import allIndustryClass3
from mysqlIndustryClass import mysqlIndustryClass
from tools import industryClass3To2
from tools import industryClass3To1
import json
import queue
from contextlib import contextmanager

# 配置
labelList = allIndustryClass3  # 打标数据集
num_processes = 4 # 并行数
version = 0.9 # 版本
skipHistory = False # 历史打标存在(同时版本号一致)不重新计算，False会覆盖
calcCompanyList = "makeLabelBatchOnline.txt"
DB_CONNECT_TIMEOUT = 20  # 数据库连接超时时间（秒）
BATCH_SIZE = 100  # 批量插入大小

# 数据库连接池
DB_POOL = queue.Queue()
MAX_RETRIES = 3
RETRY_DELAY = 5  # 重试延迟秒数

@contextmanager
def get_db_connection():
    """数据库连接上下文管理器"""
    db = None
    try:
        for attempt in range(MAX_RETRIES):
            try:
                db = mysqlIndustryClass()
                # 验证连接并设置超时
                db.connection.ping(reconnect=True)
                cursor = db.connection.cursor()
                cursor.execute("SET SESSION wait_timeout=300")  # 5分钟超时
                cursor.execute("SET SESSION interactive_timeout=300")
                break
            except Exception as e:
                print(f"数据库连接尝试 {attempt + 1}/{MAX_RETRIES} 失败: {str(e)}")
                if attempt < MAX_RETRIES - 1:
                    time.sleep(RETRY_DELAY)
                else:
                    if db:
                        db.connection.close()  # 确保关闭无效连接
                    raise Exception(f"在 {MAX_RETRIES} 次尝试后无法建立数据库连接: {str(e)}")

        if db is None:
            raise Exception("无法建立数据库连接")

        yield db
    finally:
        if db:
            try:
                db.connection.close()
            except Exception as e:
                print(f"关闭数据库连接时出错: {str(e)}")

def process_chunk(processIdx, chunk, errorCompanyList):
    """
    处理一个块（chunk）的数据
    :param processIdx: 进程索引
    :param chunk: 一块公司名字列表
    :param errorCompanyList: 错误公司列表
    """
    global labelList, version
    agentMakeLabel = makeLabel()
    batch_data = []  # 批量数据缓存

    for relative_index, company_name in enumerate(chunk):
        try:
            with get_db_connection() as db:
                r = db.getIndustryClassByCompanyName(company_name)
                if skipHistory and r[0] and r[1] and r[2] and r[3] and r[5]==version:
                    log,logs = "(跳过)", ""
                else:
                    # 1. 首先进行计算和验证
                    certainty, r, explain, logs = agentMakeLabel.progressByBackCompanyName(labelList, company_name, QAFlag=True)

                    # 2. 验证结果格式
                    if not isinstance(r, dict):
                        raise ValueError(f"结果格式错误，应为字典类型: {json.dumps({'result': r}, ensure_ascii=False)}")

                    # 3. 验证有效的行业分类
                    valid_results = {k: v for k, v in r.items() if v in ['强', '弱']}
                    if not valid_results:
                        raise ValueError(f"没有有效的行业分类结果: {json.dumps({'result': r}, ensure_ascii=False)}")

                    # 4. 处理结果字符串
                    result_str = str(valid_results).replace('"', "'").replace(" ","")
                    industry_class1 = industryClass3To1(valid_results, True)
                    industry_class2 = industryClass3To2(valid_results, True)

                    # 5. 验证转换结果
                    if not industry_class1 or not industry_class2:
                        raise ValueError(f"行业分类转换失败: class1={industry_class1}, class2={industry_class2}")

                    # 准备数据库参数
                    params = (
                        str(company_name),
                        str(industry_class1),
                        str(industry_class2),
                        str(result_str),
                        str(explain) if explain else "",
                        str(certainty),
                        str(version)
                    )

                    # 添加到批量数据
                    batch_data.append(params)

                    # 达到批量大小或最后一条数据时执行插入
                    if len(batch_data) >= BATCH_SIZE or relative_index == len(chunk) - 1:
                        try:
                            # 尝试批量插入
                            print(f"尝试批量插入 {len(batch_data)} 条数据")
                            print(f"准备插入数据: {batch_data}")
                            if db.batchSetIndustryClass(batch_data):
                                print(f"批量插入成功")
                            else:
                                # 批量插入失败，回退到单条插入
                                print(f"批量插入失败，回退到单条插入模式")
                                for data in batch_data:
                                    try:
                                        db.setIndustryClassByCompanyName(*data)
                                        print(f"数据库保存成功: {data[0]}")
                                    except Exception as db_error:
                                        error_msg = {
                                            "error": str(db_error),
                                            "params": data,
                                            "param_count": len(data)
                                        }
                                        print('错误')
                                        raise Exception(f"数据库保存错误: {json.dumps(error_msg, ensure_ascii=False, indent=2)}")

                            # 清空批量数据
                            batch_data = []

                        except Exception as db_error:
                            error_msg = {
                                "error": str(db_error),
                                "batch_size": len(batch_data),
                                "last_company": company_name
                            }
                            print('批量插入错误')
                            raise Exception(f"数据库批量保存错误: {json.dumps(error_msg, ensure_ascii=False, indent=2)}")

        except Exception as e:
            error_msg = f"【ERROR】程序运行错误 {company_name}: {str(e)}"
            error_data = {
                "company": company_name,
                "error": str(e),
                "time": time.strftime("%Y-%m-%d %H:%M:%S"),
                "process_id": processIdx,
                "logs": logs
            }
            errorCompanyList.append(error_data)

            with open("error_detail.log", "a", encoding="utf-8") as f:
                f.write(f"\n{'-'*20} 错误记录 {'-'*20}\n")
                f.write(f"时间: {time.strftime('%Y-%m-%d %H:%M:%S')}\n")
                f.write(f"公司名称: {company_name}\n")
                f.write(f"错误类型: {type(e).__name__}\n")
                f.write(f"错误信息: {str(e)}\n")
                f.write(f"进程ID: {processIdx}\n")
                if logs:
                    f.write(f"处理日志:\n{logs}\n")
                f.write(f"{'-'*50}\n")

            print(error_msg)
            continue  # 确保在出错时跳过后续操作

        print(f"进程{processIdx}({relative_index+1}/{len(chunk)})，{company_name}")

def main():
    global labelList, num_processes
    t1 = time.time()
    # 读取文件
    with open(calcCompanyList, "r", encoding="utf-8") as f:
        lines = [line.strip() for line in f if line.strip()]

    # 初始化管理器
    manager = Manager()
    errorCompanyList = manager.list()

    # 分配任务
    total_companies = len(lines)
    chunk_size = total_companies // num_processes + 1
    chunks = [lines[i:i + chunk_size] for i in range(0, total_companies, chunk_size)]
    print("总企业数：%s，拆分成%s进程并行计算，每个进程约%s个。"%(total_companies, num_processes, chunk_size))

    # 创建进程池并分配任务
    processes = []
    for i, chunk in enumerate(chunks):
        if len(chunk) == 0:
            continue
        p = Process(target=process_chunk, args=(i, chunk, errorCompanyList))
        processes.append(p)
        p.start()

    # 等待所有进程完成
    for p in processes:
        p.join()

    # 统计错误数量
    failed_companies = len(errorCompanyList)
    success_companies = total_companies - failed_companies

    # 记录运行错误企业
    if failed_companies > 0:
        print("\n【计算错误企业】：")
        for error in errorCompanyList:
            print(f"- {error['company']}: {error['error']}")

    with open("makeLabelBatchOnlineError.txt", 'w', encoding='utf-8') as file:
        for item in errorCompanyList:
            file.write(str(item) + '\n')

    # 获取当前时间并格式化为字符串
    current_time = time.strftime("%Y-%m-%d %H:%M:%S")

    # 打印统计信息
    print("\n" + "="*50)
    print("运行统计信息：")
    print(f"当前时间: {current_time}")
    print(f"总企业数: {total_companies}")
    print(f"成功处理: {success_companies}")
    print(f"失败数量: {failed_companies}")
    print(f"成功率: {(success_companies/total_companies*100):.2f}%")
    print(f"总用时: {round(time.time() - t1, 2)}秒")
    print("="*50)

    # 如果有失败案例，提示查看错误日志
    if failed_companies > 0:
        print("\n请查看 makeLabelBatchOnlineError.txt 和 error_detail.log 获取详细错误信息")

    print("\nEND")

if __name__ == "__main__":
    main()
