import json
import base64
import hashlib
import json
import os
import secrets
import tempfile
import time
from datetime import datetime
from io import BytesIO
from flask import session
import numpy as np
import psutil
import pyodbc
from docx import Document
from docx.enum.table import WD_ALIGN_VERTICAL
from docx.enum.text import WD_ALIGN_PARAGRAPH
from docx.oxml.ns import nsdecls
from docx.shared import Inches, Pt, Cm, RGBColor
from flask import Flask, request, jsonify, send_file
from flask_cors import CORS

# server = 'lbj\\SQLEXPRESS01'  # 自己根据自己电脑的数据库地址进行修改
server = 'localhost' #自己根据自己电脑的数据库地址进行修改
database = 'NEUBG'
driver = '{ODBC Driver 17 for SQL Server}'

app = Flask(__name__)
CORS(app, resources={r"/*": {"origins": "*"}}, supports_credentials=True)

connection_string = f'DRIVER={driver};SERVER={server};DATABASE={database};Trusted_Connection=yes;'


# conn = pyodbc.connect(connection_string) 放弃全局连接，改用下面的连接池    定义连接池类
class ConnectionPool:
    def __init__(self, connection_string, pool_size=5):
        self.connection_string = connection_string
        self.pool_size = pool_size
        self.connections = []  # 存储所有创建的连接
        self.available_connections = []  # 存储可用的连接

        # 初始化连接池
        for _ in range(pool_size):
            try:
                connection = pyodbc.connect(connection_string)
                self.connections.append(connection)
                self.available_connections.append(connection)
                # print(f"初始化连接池: 创建了一个新连接")
            except Exception as e:
                print(f"初始化连接池时出错: {e}")

    def get_connection(self):
        print(f"连接池状态: 总共{len(self.connections)}个连接, 可用{len(self.available_connections)}个")

        if not self.available_connections:
            if len(self.connections) < self.pool_size:
                # 池未满，创建新连接
                try:
                    connection = pyodbc.connect(self.connection_string)
                    self.connections.append(connection)
                    print("池未满，创建新连接")
                    return connection
                except Exception as e:
                    print(f"创建新连接失败: {e}")
                    raise e
            else:
                # 等待连接释放
                print("连接池已满，等待连接释放")
                raise Exception("连接池已满，无法获取连接")

                # 返回一个可用连接
        connection = self.available_connections.pop()
        print("返回现有连接")
        return connection

    def release_connection(self, connection):
        print(f"释放  前   连接池状态: 总共{len(self.connections)}个连接, 可用{len(self.available_connections)}个")

        # 检查连接是否有效
        try:
            # 简单测试连接是否有效
            cursor = connection.cursor()
            cursor.execute("SELECT 1")
            cursor.close()

            # 连接有效，归还到池
            if connection in self.connections and connection not in self.available_connections:
                self.available_connections.append(connection)
                print("连接有效， 已归还到池")
            else:
                print("连接不在池中或已在可用列表中")
        except Exception as e:
            print(f"连接已损坏，不归还到池: {e}")
            # 移除损坏的连接
            if connection in self.connections:
                self.connections.remove(connection)
                # 创建新连接替代
            try:
                new_connection = pyodbc.connect(self.connection_string)
                self.connections.append(new_connection)
                self.available_connections.append(new_connection)
                print("已创建新连接替代损坏连接")
            except Exception as e:
                print(f"创建替代连接失败: {e}")

        print(f"释放  后   连接池状态: 总共{len(self.connections)}个连接, 可用{len(self.available_connections)}个")


# 创建连接池
connection_pool = ConnectionPool(connection_string, pool_size=10)

# username = 'your_username'
# password = 'your_password'

app = Flask(__name__)
CORS(app)  # 允许所有域名访问
app.config['ENV'] = 'development'
os.environ['FLASK_ENV'] = 'development'

# 定义元素摩尔质量
element_molar_mass = {
    'Ca': 40.08, 'Ce': 140.12, 'Mg': 24.31, 'Al': 26.98, 'Ti': 47.87, 'Li': 6.94,
    'Si': 28.09, 'B': 10.81, 'V': 50.94, 'Mn': 54.94, 'Nb': 92.91, 'Cr': 52.00,
    'Fe': 55.85, 'Na': 23, 'K': 39.1, 'C': 12.01, 'N': 14.01, 'O': 16.00, 'F': 19.00,
    'S': 32.06, 'Cl': 35.45
}

# 定义化合物
fluorides = {
    'CaF2': {'Ca': 1, 'F': 2, 'M': 78.08},
}

oxides = {
    'CaO': {'Ca': 1, 'O': 1, 'M': 56.08},
    'CeO': {'Ce': 1, 'O': 1, 'M': 156.12},
    'MgO': {'Mg': 1, 'O': 1, 'M': 40.31},
    'Al2O3': {'Al': 2, 'O': 3, 'M': 101.96},
    'Ti2O3': {'Ti': 2, 'O': 3, 'M': 143.74},
    'Li2O': {'Li': 2, 'O': 1, 'M': 29.88},
    'SiO2': {'Si': 1, 'O': 2, 'M': 60.09},
    'B2O3': {'B': 2, 'O': 3, 'M': 69.62},
    'V2O3': {'V': 2, 'O': 3, 'M': 149.88},
    'MnO': {'Mn': 1, 'O': 1, 'M': 70.94},
    'NbO2': {'Nb': 1, 'O': 2, 'M': 124.91},
    'Cr2O3': {'Cr': 2, 'O': 3, 'M': 152},
    'FeO': {'Fe': 1, 'O': 1, 'M': 71.85},
    'Na2O': {'Na': 2, 'O': 1, 'M': 62},
    'K2O': {'K': 2, 'O': 1, 'M': 79.8}
}

nitrides = {
    'TiN': {'Ti': 1, 'N': 1, 'M': 61.88},
    'AlN': {'Al': 1, 'N': 1, 'M': 40.99}
}

sulfides = {
    'CeS': {'Ce': 1, 'S': 1, 'M': 172.18},
    'TiS': {'Ti': 1, 'S': 2, 'M': 119.93},
    'MnS': {'Mn': 1, 'S': 1, 'M': 87},
}

column_headers = [
    'fileName', 'index', 'CaF2', 'CaO', 'CeO', 'MgO', 'Al2O3', 'Ti2O3', 'Li2O', 'SiO2',
    'B2O3', 'V2O3', 'MnO', 'NbO2', 'Cr2O3', 'FeO', 'Na2O', 'K2O', 'TiN', 'AlN',
    'CeS', 'TiS', 'MnS']


# 计算化合物生成方法   /////////////////////////////////////////////////////////////////////////////////
def calculate_compound_amount(compounds, element_molar_numbers, element_molar_mass):
    results = {}
    for compound, comp in compounds.items():
        # 初始化限制因子列表
        limiting_factors = []
        can_generate = True

        # 遍历化合物中的每个元素
        for element, count in comp.items():
            # 跳过摩尔质量
            if element == 'M':
                continue

                # 如果任何必需元素的摩尔数为 0，则不生成该化合物
            if element not in element_molar_numbers or element_molar_numbers[element] <= 1e-10:
                can_generate = False
                break

                # 计算可生成该化合物的限制因子
            limiting_factor = element_molar_numbers[element] / count
            limiting_factors.append(limiting_factor)

            # 如果不能生成化合物，跳过
        if not can_generate or not limiting_factors:
            continue

            # 取最小的正限制因子
        limiting = min(limiting_factors)

        # 计算生成的质量
        mass = limiting * comp['M']
        results[compound] = mass

        # 更新元素摩尔数
        for element, count in comp.items():
            if element == 'M':
                continue
            if element in element_molar_numbers:
                element_molar_numbers[element] -= limiting * count

    return results


@app.route('/')
def hello_world():  # put application's code here
    return 'Hello World!'


#  成分转换算法  ////////////////////////////////////////////////////////////////////////////////////////
@app.route('/converter', methods=['POST'])
def post_data():
    content = request.json
    print("入参")
    print(content)

    # 确保文件名存在
    file_name = content.get('fileName', 'Converted Data')

    # 初始化返回的数据结构
    converted_row = {header: 0 for header in
                     ['fileName', 'index', 'CaF2',
                      'CaO', 'CeO', 'MgO', 'Al2O3', 'Ti2O3', 'Li2O', 'SiO2', 'B2O3', 'V2O3', 'MnO', 'NbO2', 'Cr2O3',
                      'FeO', 'Na2O', 'K2O',
                      'TiN', 'AlN',
                      'CeS', 'TiS', 'MnS', ]
                     }

    # 设置文件名和索引
    converted_row['fileName'] = file_name
    converted_row['index'] = content.get('index', 0)

    # 移除文件名和索引，准备处理元素
    element_data = {k: v for k, v in content.items() if k not in ['fileName', 'index']}

    # 检查并转换输入数据
    element_mass_fractions = {}
    for element, mass in element_data.items():
        if not isinstance(mass, (int, float)):
            try:
                mass = float(mass)
            except ValueError:
                return jsonify({"error": f"Invalid mass value for element '{element}': {mass}"}), 400
        element_mass_fractions[element] = mass

    total_mass = sum(element_mass_fractions.values())
    if total_mass == 0:
        return jsonify({"error": "Total mass cannot be zero"}), 400

        # 创建元素摩尔数字典
    element_molar_numbers = {}
    for elem, mass in element_mass_fractions.items():
        if mass == 0:
            element_molar_numbers[elem] = 0
        else:
            element_molar_numbers[elem] = mass / element_molar_mass[elem]

            # 打印元素摩尔数
    print("元素摩尔数:")
    for elem, moles in element_molar_numbers.items():
        print(f"{elem}: {moles}")

        # 计算各类化合物生成

    def safe_calculate_compound_amount(compounds, element_molar_numbers_copy):
        results = {}
        for compound, comp in compounds.items():
            limiting_factors = []
            can_generate = True

            # 检查每个元素是否满足生成条件
            for element, count in comp.items():
                if element == 'M':
                    continue

                    # 检查元素是否存在且数量足够
                if (element not in element_molar_numbers_copy or
                        element_molar_numbers_copy[element] < count * 1e-10):
                    can_generate = False
                    break

                    # 计算限制因子
                limiting_factors.append(element_molar_numbers_copy[element] / count)

                # 如果不能生成，跳过
            if not can_generate or not limiting_factors:
                continue

                # 取最小的限制因子
            limiting = min(limiting_factors)

            # 计算生成的质量
            mass = limiting * comp['M']
            results[compound] = mass

            # 更新元素摩尔数
            for element, count in comp.items():
                if element == 'M':
                    continue
                if element in element_molar_numbers_copy:
                    element_molar_numbers_copy[element] -= limiting * count

        return results

        # 计算各类化合物

    fluoride_results = safe_calculate_compound_amount(fluorides, element_molar_numbers.copy())
    oxide_results = safe_calculate_compound_amount(oxides, element_molar_numbers.copy())
    nitride_results = safe_calculate_compound_amount(nitrides, element_molar_numbers.copy())
    sulfide_results = safe_calculate_compound_amount(sulfides, element_molar_numbers.copy())

    # 打印各类化合物结果
    print("氟化物结果:", fluoride_results)
    print("氧化物结果:", oxide_results)
    print("氮化物结果:", nitride_results)
    print("硫化物结果:", sulfide_results)

    # 更新返回的数据结构
    for results in [fluoride_results, oxide_results, nitride_results, sulfide_results]:
        for compound, mass in results.items():
            if compound in converted_row:
                converted_row[compound] = round(mass, 4)

    print("请求成功，请求结果：")
    print(converted_row)

    response = {
        'received': content,
        'converted_row': converted_row
    }
    return jsonify(response)


#  溯源判断法则  余弦相似度算法/////////////////////////////////////////////////////////////////////////////////////////
@app.route('/suyuanguize', methods=['POST'])
def suyuanguize():
    try:
        # 获取前端传来的JSON数据
        content = request.json

        file_name = content.get('fileName', '')
        index = content.get('index', 0)

        # 提取需要用于比较的元素数据
        element_values = {
            'O': float(content.get('O', 0) or 0),
            'F': float(content.get('F', 0) or 0),
            'Na': float(content.get('Na', 0) or 0),
            'Mg': float(content.get('Mg', 0) or 0),
            'Al': float(content.get('Al', 0) or 0),
            'Si': float(content.get('Si', 0) or 0),
            'Ca': float(content.get('Ca', 0) or 0),
            'Ti': float(content.get('Ti', 0) or 0)
        }

        # 转换为列表，保持顺序一致
        new_sample_list = [
            element_values['O'], element_values['F'], element_values['Na'],
            element_values['Mg'], element_values['Al'], element_values['Si'],
            element_values['Ca'], element_values['Ti']
        ]

        # 参考数据保持不变
        reference_samples_dict = {
            "处理前钢包渣": [70.52, 8.34, 2.59, 4.31, 11.58, 1.11, 4.35, 0],
            "处理后钢包渣": [73.67, 6.34, 0, 2.25, 15.54, 1.62, 2.71, 0],
            "中间包覆盖剂熔渣": [60.97, 8.69, 0.59, 2.86, 18.83, 1.04, 6.88, 0],
            "中间包覆盖剂原渣1": [55.97, 0, 0.26, 7.98, 16.75, 0.35, 18.99, 0],
            "中间包覆盖剂原渣2": [53.68, 2.26, 1.56, 12.18, 13.21, 0.35, 18.84, 0],
            "3st保护渣熔渣": [45.45, 23.65, 0.54, 1.39, 3.76, 12.65, 10.79, 0],
            "4st保护渣熔渣": [45.53, 24.69, 0.4, 1.43, 3.39, 12.33, 10.36, 0],
            "3st浸入式水口结瘤": [48.25, 0, 0.92, 0.64, 42.75, 0.5, 0.36, 0],
            "4st浸入式水口结瘤": [52.31, 0, 0, 1.7, 41.39, 0.35, 0, 0],
            "3st保护渣原渣": [40.75, 8.43, 5.03, 2.9, 1.31, 11.67, 18.08, 0],
            "4st保护渣原渣": [43.79, 10.72, 0.7, 1.19, 1.86, 14.34, 27.74, 0.72]
        }

        # --- 特征缩放 (标准化) 开始 ---
        ref_names = list(reference_samples_dict.keys())
        # 将参考样本的值转换为numpy数组，方便计算
        ref_values_np = np.array(list(reference_samples_dict.values()), dtype=float)

        # 仅基于参考样本计算每个特征（元素）的均值和标准差
        means = np.mean(ref_values_np, axis=0)
        stds = np.std(ref_values_np, axis=0)

        # 处理标准差为0的情况，避免除以零。若std为0，则该特征在所有参考样本中值相同。
        # 我们将std=0替换为1，这样缩放后的值为 (value - mean) / 1 = value - mean。
        stds_safe = np.where(stds == 0, 1.0, stds)

        # 标准化参考样本
        scaled_ref_values_np = (ref_values_np - means) / stds_safe

        # 创建包含标准化后参考样本的字典
        scaled_reference_samples_dict = {
            name: list(scaled_data) for name, scaled_data in zip(ref_names, scaled_ref_values_np)
        }

        # 使用从参考样本计算得到的均值和标准差来标准化新的输入样本
        new_sample_np = np.array(new_sample_list, dtype=float)
        scaled_new_sample_list = list((new_sample_np - means) / stds_safe)

        # --- 特征缩放 (标准化) 结束 ---

        # 余弦相似度计算函数 (确保使用numpy进行向量运算)
        def cosine_similarity(a, b):
            vec_a = np.array(a, dtype=float)  # 确保输入为numpy数组并指定类型
            vec_b = np.array(b, dtype=float)

            dot_product = np.dot(vec_a, vec_b)
            norm_a = np.linalg.norm(vec_a)  # 使用 np.linalg.norm 计算范数
            norm_b = np.linalg.norm(vec_b)

            # 避免除以零
            if norm_a == 0 or norm_b == 0:
                return 0.0  # 返回浮点数

            similarity = dot_product / (norm_a * norm_b)
            return similarity

        # 使用标准化后的数据计算相似度
        similarities = {}
        for name, scaled_ref_sample_data in scaled_reference_samples_dict.items():
            sim = cosine_similarity(scaled_new_sample_list, scaled_ref_sample_data)
            similarities[name] = round(sim, 4)  # 保留4位小数

        # 按相似度降序排序
        sorted_similarities = dict(sorted(similarities.items(), key=lambda item: item[1], reverse=True))

        # 找出最匹配的来源
        most_similar = ""
        most_similar_value = 0.0
        if similarities:  # 确保 similarities 不为空
            most_similar = max(similarities, key=similarities.get)
            most_similar_value = similarities[most_similar]

        return jsonify({
            'success': True,
            'result': {
                'similarities': sorted_similarities,
                'most_similar': most_similar,
                'similarity_value': most_similar_value,
                'message': f"匹配结果: {most_similar} (相似度: {most_similar_value})",
                'fileName': file_name,
                'index': index
            }
        })

    except Exception as e:
        return jsonify({
            'success': False,
            'message': f"处理数据时出错: {str(e)}"
        }), 500


# //////////////////////////////////////////////////////////////////////////////////////////   上面的代码是溯源部分，下面为非金属夹杂物智能分析


# 以下是登陆相关接口，写的假数据，勿动-----开始
@app.route('/getInfo', methods=['GET'])
def get_data():
    return jsonify({
        "msg": "操作成功",
        "code": 200,
        "permissions": [
            "*:*:*"
        ],
        "roles": [
            "admin"
        ],
        "user": {
            "createBy": "admin",
            "createTime": "2024-12-16 14:25:49",
            "updateBy": None,
            "updateTime": None,
            "remark": "管理员",
            "params": {
                "@type": "java.util.HashMap"
            },
            "userId": 1,
            "deptId": 103,
            "userName": "admin",
            "nickName": "若依",
            "email": "ry@163.com",
            "phonenumber": "15888888888",
            "sex": "1",
            "avatar": None,
            "password": "$2a$10$7JB720yubVSZvUI0rEqK/.VqGOZTH.ulu33dHOiBE8ByOhJIrdAu2",
            "status": "0",
            "delFlag": "0",
            "loginIp": "127.0.0.1",
            "loginDate": "2024-12-17T11:54:00.000+08:00",
            "dept": {
                "createBy": None,
                "createTime": None,
                "updateBy": None,
                "updateTime": None,
                "remark": None,
                "params": {
                    "@type": "java.util.HashMap"
                },
                "deptId": 103,
                "parentId": 101,
                "ancestors": "0,100,101",
                "deptName": "研发部门",
                "orderNum": 1,
                "leader": "若依",
                "phone": None,
                "email": None,
                "status": "0",
                "delFlag": None,
                "parentName": None,
                "children": []
            },
            "roles": [
                {
                    "createBy": None,
                    "createTime": None,
                    "updateBy": None,
                    "updateTime": None,
                    "remark": None,
                    "params": {
                        "@type": "java.util.HashMap"
                    },
                    "roleId": 1,
                    "roleName": "超级管理员",
                    "roleKey": "admin",
                    "roleSort": 1,
                    "dataScope": "1",
                    "menuCheckStrictly": False,
                    "deptCheckStrictly": False,
                    "status": "0",
                    "delFlag": None,
                    "flag": False,
                    "menuIds": None,
                    "deptIds": None,
                    "permissions": None,
                    "admin": True
                }
            ],
            "roleIds": None,
            "postIds": None,
            "roleId": None,
            "admin": True
        }
    })


@app.route('/getRouters', methods=['GET'])
def get_routers():
    return jsonify({
        "msg": "操作成功",
        "code": 200,
        "data": [
            {
                "path": "/",
                "hidden": False,
                "component": "Layout",
                "children": [
                    {
                        "name": "Fjs_source",
                        "path": "fjs_source",
                        "hidden": False,
                        "component": "bg/jzw",
                        "meta": {
                            "title": "非金属夹杂物溯源",
                            "icon": "example",
                            "noCache": False,
                            "link": None
                        }
                    }
                ]
            }
        ]
    })


@app.route('/captchaImage', methods=['GET'])
def captchaImage():
    return jsonify({
        "msg": "操作成功",
        "img": "/9j/4AAQSkZJRgABAgAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAA8AKADASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwDtrW1ga1hZoIySikkoOeKsCztv+feL/vgU2z/484P+ua/yqyKiMY8q0IjGPKtCIWdr/wA+0P8A3wKeLK1/59of+/YqUU4U+WPYfLHsRCytP+fWH/v2KcLG0/59YP8Av2KkaRY0LOwCgZJPYVl6L4p0jX2nTTrsSvAcSIVKke/PUe4rSNByi5xjdLd20V+4csexpiws/wDn1g/79inCws/+fSD/AL9iqx1rT11dNJ+0qb5ozJ5IBJCjuT0H41ohhiplS5bXW4csexENPsv+fS3/AO/Y/wAKcNOsv+fO3/79L/hWbB4q0SfVn0uPUYDeKceXu6n0B6E+3WttSDTnQdNpTja+uqBRi+hCNOsf+fO3/wC/S/4U4abY/wDPlb/9+l/wqwKcKjlj2Dlj2K40yw/58rb/AL9L/hTxplh/z423/fpf8KnLBRknAFVrTVbC+Li0vIJyhwwjcMR9aappq6QcsexINL0//nxtv+/K/wCFPGlaf/z4Wv8A35X/AAqO21Wwurl7e3vIJZo+WRJASv1Aq+KHTUdGg5Y9isNK07/nwtf+/K/4U4aTp3/QPtf+/K/4VaFPFLlj2Dlj2Ko0nTf+gfaf9+V/wqtqel6fHpF66WNqrrA5VhCoIO08jitYVV1b/kC3/wD17yf+gmlKMeV6ClGPK9DkrP8A484P+ua/yqyKr2f/AB5wf9c1/lVkU4/Chx+FDhTugpBSkcVRRwvxH16bT9G+x2yv596fJVgOAO/Prj+vpXmnhbVZfCfiVhMQElQxMe3PKn8wP1r2DX9MN6mCgbByMjOD615n4h8MyOhIXEq8of6GvfyzMqFOl9TrRtCd+Z9bvZ/Lt8zKcG3zLdEOr6/rx8RwakkMVrftCUAhO8lR35713/hbxVf6porrfJ+/UHZcp9yQf0YdCPavM/D863d7HZ3i4vIvliZupA/h+or0DS/DbJqCXcFxLbhjmeFfuS8dSOx6cit80q04x+qVaajKK0kk7Pe1nduzW176790oJ/EmcHJ4RlivW868JLEnzU6q2epB6/nXR+CPFOt6Z4ti0TUrp7q3lBUbzu28Egg+ldreeHFkQuBzXA3/AIYuI9Ze8tb6W2uNvyEc4I/p7VNHOHi1Ojj2uVxaTttLo9Fdf8AHT5dYns93rumabHvvr63thtLDzZAuQPTPX8K5KD4xeGpdVNoftUcGdq3bx4Q++M7gPcj64riptet/EFkNA8UReXf27gpMuF3n1B7ZHXsfbtg3uraXBNJpdzoEcNqh2h1/1g9Gz1P5/nSwuVU03SrQlKe/uuKXL/NG/wAXoEpvdM9o1zXLLV9IvbHS9VtXuZbdtphmDlQeM8Hpz1rwoeGb+21H7M9wERv+W0ZJU/y5+tX7HUfDvh1zd2Aub+9YFUDMVVAfXgZ/I/hXfaHb2XiXThf23DA7ZYj96N+4Nae0xeUQlLDxfs5dZRs7+m/pfRhaNTfc88uEfwbqlhqel3sjzKxLh+N2MZBx1B5Br6F8KeJLXxLosF/bHhxh0PVGHVT/AJ6Yr5+8f6bNaanHJgmDbtHsa7L4LSPBDe5YlJJFwPTAOa6MxpxxWUU8bVneotL97vZ+n9bkwfLUcVse4Cnio4zlQalFfIHQOFVdW/5Al/8A9e0n/oJq2Kq6v/yBL/8A69pP/QTUy+Fky+FnJWf/AB5Qf9c1/lVkVXsv+PKD/rmv8qsiiPwoI/ChwpwFIKeKooY0SuORWJrGkJPESF5roBQ8YdSCKAPA/Fegzw3KXtlHJ9ojYZ8sHdx0YY7j/PSvTfAmrJr+hxXMihbmMmKcYxhx1/MEH8at6pogmfeq81Z0PTVsndliVGkOXKrgsfU+tehUx7rYWOHqK7h8L627enbsQo2ldG5LGBCRivHfGPh7Uhrb6pp1+28dInbG0egPTHsa9oZN6YrntW0T7QCQOaxwmLqYWpz07Po01dNDlFSVmfP+uam+pGJbm28q+h+RyP4hXd22kTX+kQLdReYxjXzAwzzj+daM3hASX6yvArMp4YrzXe6Po6x26q69q78fmkMRRpUqUOTku976vt2X9dCYwabbZ5to3hSLT7vzoLXEmeGbLEfTPSvRdJ0xEla58oLO6BHccFwOmfXHbPTJ9TW5HpcKnIQVcjt1QcCvLqV6tWTnUk231buWklsec+LPDn2sMdmQevFcd4L1m38O+Ll0SSxaF7iYRvIJSV3Y+UhSOM5HevcL60WaIjFcnb6BDDrJvVtYhcH5TNsG7Hpmt8LiKdOM4VU2pLo2tejfe3mKSbs0d5bkGMVOKq2akRAGrYrjKHCqur/8gS//AOvaT/0E1bFVdX/5Al//ANe0n/oJqZfCyZfCzkrL/jyt/wDrmv8AKrIrmYtauYokjVIiEUKMg9vxqT+37r/nnD/3yf8AGso1o2RnGrGyOlFOFcz/AMJDd/8APOD/AL5P+NL/AMJFd/8APOD/AL5P+NV7aI/bROoFPFcr/wAJJef88oP++T/jS/8ACS3n/PKD/vk/40e2iHtonVGMN1FKkSr0Fcr/AMJPe/8APK3/AO+W/wAaX/hKL3/nlb/98t/jR7aIe2ideBSlAw5Fch/wlV9/zyt/++W/xpf+Ervv+eVt/wB8t/jR7aIe2idX9kjLZ2irEcYQYArjf+Etv/8Anjbf98t/jS/8JfqH/PG2/wC+W/8AiqPbRD20TthTxXD/APCYah/zxtf++W/+Kpf+Ey1H/nja/wDfLf8AxVHtoh7aJ3O0EUwWybs4FcV/wmeo/wDPG1/74b/4ql/4TXUv+eFp/wB8N/8AFUe2iHtoneIoAqQVwH/Cbal/zwtP++G/+Kpf+E41P/nhaf8AfDf/ABVHtoh7aJ6CKq6v/wAgPUP+vaT/ANBNcV/wnOp/88LT/vhv/iqjufGeo3VrNbvDahJUZGKq2QCMcfNUyrRsxSqxsz//2Q==",
        "code": 200,
        "captchaEnabled": True,
        "uuid": "a7f567812e7d41ec83a6c728c8000c90"
    })


@app.route('/system/user/profile/updatePwd', methods=['PUT'])
def updatePwd():
    try:
        # 从headers获取token
        token = request.headers.get('Authorization')
        if not token:
            return jsonify({
                'success': False,
                'message': '未提供认证信息'
            }), 401

        # 如果token以Bearer开头，去掉前缀
        if token.startswith('Bearer '):
            token = token[7:]

        # 从前端params获取参数（根据你的前端代码）
        old_password = request.args.get('oldPassword')
        new_password = request.args.get('newPassword')

        # 参数校验
        if not old_password or not new_password:
            return jsonify({
                'success': False,
                'message': '旧密码和新密码不能为空'
            }), 400

        # 密码强度校验（可选）
        if len(new_password) < 6:
            return jsonify({
                'success': False,
                'message': '新密码长度不能少于6位'
            }), 400

        # 验证token并获取用户信息
        conn = connection_pool.get_connection()
        cursor = conn.cursor()

        # 查询用户信息
        cursor.execute("""
            SELECT id, username, password 
            FROM account_info 
            WHERE token = ?
        """, token)

        user = cursor.fetchone()
        if not user:
            cursor.close()
            connection_pool.release_connection(conn)
            return jsonify({
                'success': False,
                'message': '无效的认证信息或会话已过期'
            }), 401

        user_id = user.id
        username = user.username
        stored_password = user.password

        # 验证旧密码
        if not verify_password(stored_password, username, old_password):
            cursor.close()
            connection_pool.release_connection(conn)
            return jsonify({
                'success': False,
                'message': '旧密码错误'
            }), 400

        # 生成新密码的哈希值
        new_pwdhash, _ = hash_password(username, new_password)

        # 更新密码
        cursor.execute("""
            UPDATE account_info 
            SET password = ?, update_time = GETDATE() 
            WHERE id = ?
        """, (new_pwdhash, user_id))

        conn.commit()
        cursor.close()
        connection_pool.release_connection(conn)

        return jsonify({
            'success': True,
            'message': '密码修改成功'
        })

    except Exception as e:
        # 确保连接被释放
        if 'conn' in locals() and conn:
            connection_pool.release_connection(conn)

        return jsonify({
            'success': False,
            'message': f'修改密码失败: {str(e)}'
        }), 500


@app.route('/login', methods=['POST'])
def login():
    try:
        content = request.json
        username = content.get('username')
        password = content.get('password')

        # 参数校验
        if not username or not password:
            return jsonify({
                'message': '用户名和密码不能为空',
                'code': 400
            }), 400

        # 查询用户
        conn = connection_pool.get_connection()
        cursor = conn.cursor()

        cursor.execute("""
            SELECT id, username, password, email, phone, nickname, role_level, status 
            FROM account_info
            WHERE username = ?
        """, username)

        user = cursor.fetchone()
        cursor.close()

        if not user:
            connection_pool.release_connection(conn)
            return jsonify({
                'message': '用户名或密码错误',
                'code': 401
            }), 401

        # 验证密码
        stored_password = user.password
        if not verify_password(stored_password, username, password):
            connection_pool.release_connection(conn)
            return jsonify({
                'message': '用户名或密码错误',
                'code': 401
            }), 401

        # 检查账户状态
        if user.status == 1:
            connection_pool.release_connection(conn)
            return jsonify({
                'message': '账户已被禁用',
                'code': 401
            }), 401

        # 生成token
        token = secrets.token_hex(32)

        # 将token保存到数据库中，并更新登录时间
        cursor = conn.cursor()
        cursor.execute("""
            UPDATE account_info 
            SET token = ?, login_time = GETDATE()
            WHERE id = ?
        """, (token, user.id))
        conn.commit()
        cursor.close()
        connection_pool.release_connection(conn)

        return jsonify({
            'message': '登录成功',
            'code': 200,
            'token': token,
            'user': {
                'id': user.id,
                'username': user.username,
                'email': user.email,
                'phone': user.phone,
                'nickname': user.nickname,
                'role_level': user.role_level
            }
        })

    except Exception as e:
        if 'conn' in locals() and conn:
            connection_pool.release_connection(conn)
        return jsonify({
            'message': f'登录失败: {str(e)}',
            'code': 500
        }), 500


@app.route('/system/user/profile', methods=['GET'])
def get_user_profile():
    try:
        # 从headers获取token
        token = request.headers.get('Authorization')
        if not token:
            return jsonify({
                'success': False,
                'message': '未提供认证信息'
            }), 401

        # 如果token以Bearer开头，去掉前缀
        if token.startswith('Bearer '):
            token = token[7:]

        # 验证token并获取用户信息
        conn = connection_pool.get_connection()
        cursor = conn.cursor()

        # 查询用户信息（根据你的account_info表结构）
        cursor.execute("""
            SELECT id, username, email, phone, nickname, role_level, create_time,role_level,status
            FROM account_info 
            WHERE token = ?
        """, token)

        user = cursor.fetchone()
        cursor.close()
        connection_pool.release_connection(conn)

        # 检查用户是否存在
        if not user:
            return jsonify({
                'success': False,
                'message': '无效的认证信息或会话已过期'
            }), 401

        # 返回用户信息
        return jsonify({
            'success': True,
            'data': {
                'user': {
                    'id': user.id,
                    'username': user.username,
                    'email': user.email,
                    'phone': user.phone,
                    'nickname': user.nickname,
                    'role_level': user.role_level,
                    'status': user.status,
                    # create_time需要转换成年月日
                    'createTime': user.create_time.strftime('%Y-%m-%d'),
                }
            }
        })



    except Exception as e:
        # 确保连接被释放
        if 'conn' in locals() and conn:
            connection_pool.release_connection(conn)

        return jsonify({
            'success': False,
            'message': f'获取用户信息失败: {str(e)}'
        }), 500


# 修改个人信息
@app.route('/system/user/profile', methods=['PUT'])
def update_user_profile():
    try:
        # 从headers获取token
        token = request.headers.get('Authorization')
        if not token:
            return jsonify({
                'success': False,
                'message': '未提供认证信息'
            }), 401

        # 如果token以Bearer开头，去掉前缀
        if token.startswith('Bearer '):
            token = token[7:]

        # 获取前端传来的更新数据
        content = request.json
        if not content:
            return jsonify({
                'success': False,
                'message': '未提供更新数据'
            }), 400

        # 提取可更新的字段
        username = content.get('username')
        phone = content.get('phone')
        email = content.get('email')
        nickname = content.get('nickname')

        # 检查是否提供了至少一个可更新的字段
        if not any([username, phone, email, nickname]):
            return jsonify({
                'success': False,
                'message': '至少需要提供一个可更新的字段'
            }), 400

        # 验证邮箱格式（如果提供了邮箱）
        if email:
            import re
            email_pattern = r'^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$'
            if not re.match(email_pattern, email):
                return jsonify({
                    'success': False,
                    'message': '邮箱格式不正确'
                }), 400

        # 验证手机号格式（如果提供了手机号）
        if phone:
            import re
            phone_pattern = r'^1[3-9]\d{9}$'
            if not re.match(phone_pattern, phone):
                return jsonify({
                    'success': False,
                    'message': '手机号格式不正确'
                }), 400

        # 验证token并获取用户信息
        conn = connection_pool.get_connection()
        cursor = conn.cursor()

        # 首先检查用户是否存在以及token是否有效
        cursor.execute("""
            SELECT id, username 
            FROM account_info 
            WHERE token = ?
        """, token)

        user = cursor.fetchone()
        if not user:
            cursor.close()
            connection_pool.release_connection(conn)
            return jsonify({
                'success': False,
                'message': '无效的认证信息或会话已过期'
            }), 401

        user_id = user.id
        current_username = user.username

        # 检查用户名是否已被其他用户使用
        if username and username != current_username:
            cursor.execute("""
                SELECT COUNT(1) 
                FROM account_info 
                WHERE username = ? AND id != ?
            """, (username, user_id))

            if cursor.fetchone()[0] > 0:
                cursor.close()
                connection_pool.release_connection(conn)
                return jsonify({
                    'success': False,
                    'message': '用户名已被其他用户使用'
                }), 400

        # 构建更新语句
        update_fields = []
        update_params = []

        if username:
            update_fields.append("username = ?")
            update_params.append(username)
        if phone is not None:  # phone可以为空字符串
            update_fields.append("phone = ?")
            update_params.append(phone)
        if email is not None:  # email可以为空字符串
            update_fields.append("email = ?")
            update_params.append(email)
        if nickname:
            update_fields.append("nickname = ?")
            update_params.append(nickname)

        # 添加更新时间
        update_fields.append("update_time = GETDATE()")

        # 添加用户ID到参数列表
        update_params.append(user_id)

        # 执行更新
        query = f"UPDATE account_info SET {', '.join(update_fields)} WHERE id = ?"
        cursor.execute(query, update_params)
        conn.commit()

        # 查询更新后的用户信息
        cursor.execute("""
            SELECT id, username, email, phone, nickname, role_level, create_time, status
            FROM account_info 
            WHERE id = ?
        """, user_id)

        updated_user = cursor.fetchone()
        cursor.close()
        connection_pool.release_connection(conn)

        if not updated_user:
            return jsonify({
                'success': False,
                'message': '更新失败，用户不存在'
            }), 500

        # 返回更新后的用户信息
        return jsonify({
            'success': True,
            'message': '用户信息更新成功',
            'data': {
                'user': {
                    'id': updated_user.id,
                    'username': updated_user.username,
                    'email': updated_user.email,
                    'phone': updated_user.phone,
                    'nickname': updated_user.nickname,
                    'role_level': updated_user.role_level,
                    'status': updated_user.status,
                    'createTime': updated_user.create_time.strftime('%Y-%m-%d') if updated_user.create_time else None,
                }
            }
        })

    except Exception as e:
        # 确保连接被释放
        if 'conn' in locals() and conn:
            connection_pool.release_connection(conn)

        return jsonify({
            'success': False,
            'message': f'更新用户信息失败: {str(e)}'
        }), 500


# 生成密码哈希
def hash_password(username, password):
    pwdhash = hashlib.pbkdf2_hmac('sha256', password.encode('utf-8'), username.encode('ascii'), 100000)
    return pwdhash.hex(), username


# 验证密码
def verify_password(stored_password, salt, provided_password):
    pwdhash, _ = hash_password(salt, provided_password)
    return pwdhash == stored_password


@app.route('/register', methods=['POST'])
def register():
    try:
        content = request.json
        username = content.get('username')
        password = content.get('password')
        email = content.get('email')
        phone = content.get('phone')
        nickname = content.get('nickname', username)

        # 参数校验
        if not username or not password:
            return jsonify({
                'success': False,
                'message': '用户名和密码不能为空'
            }), 400

        # 检查用户名是否已存在
        conn = connection_pool.get_connection()
        cursor = conn.cursor()

        cursor.execute("SELECT COUNT(1) FROM account_info WHERE username = ?", username)
        if cursor.fetchone()[0] > 0:
            cursor.close()
            connection_pool.release_connection(conn)
            return jsonify({
                'success': False,
                'message': '用户名已存在'
            }), 400

        # 密码加密  数据库不能直接存密码否则会有安全风险，需要加密后将加密数据存入数据库
        pwdhash, salt = hash_password(username, password)

        # 插入新用户
        cursor.execute("""
            INSERT INTO account_info (username, password, email, phone, nickname, role_level, status) 
            VALUES (?, ?, ?, ?, ?, ?, ?)
        """, (username, pwdhash, email, phone, nickname, 2, 0))  # role_level2为普通用户 status 0为 没有禁用

        conn.commit()
        cursor.close()
        connection_pool.release_connection(conn)

        return jsonify({
            'success': True,
            'message': '注册成功'
        })

    except Exception as e:
        if 'conn' in locals():
            connection_pool.release_connection(conn)
        return jsonify({
            'success': False,
            'message': f'注册失败: {str(e)}'
        }), 500


# 重置用户密码
@app.route('/system/user/subInfoResetPwd', methods=['POST'])
def subInfoResetPwd():
    try:
        # 从请求头中获取token
        token = request.headers.get('Authorization')
        if not token:
            return jsonify({
                'success': False,
                'message': '未提供认证信息'
            }), 401

        # 如果token以Bearer开头，去掉前缀
        if token.startswith('Bearer '):
            token = token[7:]

        # 获取前端传来的参数
        content = request.json
        user_id = content.get('id')

        if not user_id:
            return jsonify({
                'success': False,
                'message': '用户ID不能为空'
            }), 400

        # 验证操作者权限（必须是管理员）
        conn = connection_pool.get_connection()
        cursor = conn.cursor()

        # 检查操作者是否为管理员
        cursor.execute("""
            SELECT role_level 
            FROM account_info 
            WHERE token = ?
        """, token)

        operator = cursor.fetchone()
        if not operator:
            cursor.close()
            connection_pool.release_connection(conn)
            return jsonify({
                'success': False,
                'message': '无效的认证信息或会话已过期'
            }), 401

        if operator.role_level != 1:
            cursor.close()
            connection_pool.release_connection(conn)
            return jsonify({
                'success': False,
                'message': '权限不足，只有管理员可以重置用户密码'
            }), 403

        # 检查目标用户是否存在
        cursor.execute("""
            SELECT id, username
            FROM account_info 
            WHERE id = ?
        """, user_id)

        user = cursor.fetchone()
        if not user:
            cursor.close()
            connection_pool.release_connection(conn)
            return jsonify({
                'success': False,
                'message': '用户不存在'
            }), 404

        # 不能重置自己的密码
        cursor.execute("""
            SELECT id
            FROM account_info 
            WHERE token = ?
        """, token)

        operator_info = cursor.fetchone()
        if operator_info and operator_info.id == user_id:
            cursor.close()
            connection_pool.release_connection(conn)
            return jsonify({
                'success': False,
                'message': '不能重置自己的密码'
            }), 400

        # 生成默认密码的哈希值 (123456)
        default_password = "123456"
        new_pwdhash, _ = hash_password(user.username, default_password)

        # 更新用户密码
        cursor.execute("""
            UPDATE account_info 
            SET password = ?, update_time = GETDATE()
            WHERE id = ?
        """, (new_pwdhash, user_id))

        conn.commit()
        cursor.close()
        connection_pool.release_connection(conn)

        return jsonify({
            'success': True,
            'message': f'用户 {user.username} 的密码已重置为默认密码'
        })

    except Exception as e:
        # 确保连接被释放
        if 'conn' in locals() and conn:
            connection_pool.release_connection(conn)

        return jsonify({
            'success': False,
            'message': f'重置密码失败: {str(e)}'
        }), 500


# 启用禁用用户
@app.route('/system/user/editSubInfoStatus', methods=['POST'])
def editSubInfoStatus():
    try:
        # 从请求头中获取token
        token = request.headers.get('Authorization')
        if not token:
            return jsonify({
                'success': False,
                'message': '未提供认证信息'
            }), 401

        # 如果token以Bearer开头，去掉前缀
        if token.startswith('Bearer '):
            token = token[7:]

        # 获取前端传来的参数
        content = request.json
        user_id = content.get('id')

        if not user_id:
            return jsonify({
                'success': False,
                'message': '用户ID不能为空'
            }), 400

        # 验证操作者权限（必须是管理员）
        conn = connection_pool.get_connection()
        cursor = conn.cursor()

        # 检查操作者是否为管理员
        cursor.execute("""
            SELECT role_level 
            FROM account_info 
            WHERE token = ?
        """, token)

        operator = cursor.fetchone()
        if not operator:
            cursor.close()
            connection_pool.release_connection(conn)
            return jsonify({
                'success': False,
                'message': '无效的认证信息或会话已过期'
            }), 401

        if operator.role_level != 1:
            cursor.close()
            connection_pool.release_connection(conn)
            return jsonify({
                'success': False,
                'message': '权限不足，只有管理员可以操作用户状态'
            }), 403

        # 检查目标用户是否存在
        cursor.execute("""
            SELECT id, status, username
            FROM account_info 
            WHERE id = ?
        """, user_id)

        user = cursor.fetchone()
        if not user:
            cursor.close()
            connection_pool.release_connection(conn)
            return jsonify({
                'success': False,
                'message': '用户不存在'
            }), 404

        # 不能禁用自己
        cursor.execute("""
            SELECT id
            FROM account_info 
            WHERE token = ?
        """, token)

        operator_info = cursor.fetchone()
        if operator_info and operator_info.id == user_id:
            cursor.close()
            connection_pool.release_connection(conn)
            return jsonify({
                'success': False,
                'message': '不能修改自己的状态'
            }), 400

        # 切换用户状态 (0->1, 1->0)
        new_status = 0 if user.status == 1 else 1

        # 更新用户状态
        cursor.execute("""
            UPDATE account_info 
            SET status = ?, update_time = GETDATE()
            WHERE id = ?
        """, (new_status, user_id))

        conn.commit()
        cursor.close()
        connection_pool.release_connection(conn)

        status_text = "启用" if new_status == 0 else "禁用"

        return jsonify({
            'success': True,
            'message': f'用户 {user.username} 已{status_text}'
        })

    except Exception as e:
        # 确保连接被释放
        if 'conn' in locals() and conn:
            connection_pool.release_connection(conn)

        return jsonify({
            'success': False,
            'message': f'操作失败: {str(e)}'
        }), 500


@app.route('/logout', methods=['POST'])
def logout():
    try:
        # 从请求头中获取token
        token = request.headers.get('Authorization')

        if token and token.startswith('Bearer '):
            token = token[7:]

        if token:
            # 清除数据库中的token
            conn = connection_pool.get_connection()
            cursor = conn.cursor()
            cursor.execute("""
                UPDATE account_info 
                SET token = NULL 
                WHERE token = ?
            """, token)
            conn.commit()
            cursor.close()
            connection_pool.release_connection(conn)

        return jsonify({
            'message': '退出登录成功',
            'code': 200
        })

    except Exception as e:
        if 'conn' in locals() and conn:
            connection_pool.release_connection(conn)
        print(f"退出登录时出错: {str(e)}")
        return jsonify({
            'message': '退出登录失败',
            'code': 500
        }), 500


# 以下是登陆相关接口，写的假数据，勿动-----结束

# 获取所有普通用户权限
@app.route('/system/user/profile/getSubUserInfoList', methods=['POST'])
def get_sub_user_info_list():
    try:
        # 从headers获取token
        token = request.headers.get('Authorization')
        if not token:
            return jsonify({
                'success': False,
                'message': '未提供认证信息'
            }), 401

        # 如果token以Bearer开头，去掉前缀
        if token.startswith('Bearer '):
            token = token[7:]

        # 验证token并获取用户信息
        conn = connection_pool.get_connection()
        cursor = conn.cursor()

        # 首先验证请求用户是否为管理员(role_level = 1)
        cursor.execute("""
            SELECT role_level 
            FROM account_info 
            WHERE token = ?
        """, token)

        user = cursor.fetchone()
        if not user:
            cursor.close()
            connection_pool.release_connection(conn)
            return jsonify({
                'success': False,
                'message': '无效的认证信息或会话已过期'
            }), 401

        # 检查用户权限，只有管理员可以获取用户列表
        if user.role_level != 1:
            cursor.close()
            connection_pool.release_connection(conn)
            return jsonify({
                'success': False,
                'message': '权限不足，只有管理员可以访问此接口'
            }), 403

        # 查询所有普通用户(role_level = 2)
        cursor.execute("""
            SELECT id, username, email, phone, nickname, create_time, status,login_time
            FROM account_info 
            WHERE role_level = 2
            ORDER BY create_time DESC
        """)

        users = cursor.fetchall()
        cursor.close()
        connection_pool.release_connection(conn)

        # 处理用户数据
        user_list = []
        for user in users:
            user_list.append({
                'id': user.id,
                'username': user.username,
                'email': user.email,
                'phone': user.phone,
                'nickname': user.nickname,
                'status': user.status,
                'createTime': user.create_time.strftime('%Y-%m-%d') if user.create_time else None,
                'loginTime': user.login_time.strftime('%Y-%m-%d %H:%M:%S') if user.login_time else None
            })

        return jsonify({
            'success': True,
            'data': user_list,
            'total': len(user_list)
        })

    except Exception as e:
        # 确保连接被释放
        if 'conn' in locals() and conn:
            connection_pool.release_connection(conn)


# 根据用户ID查询导出记录
@app.route('/exportRecord/<int:accid>', methods=['GET'])
def get_export_records(accid):
    try:
        conn = connection_pool.get_connection()
        cursor = conn.cursor()

        # 查询该用户的所有导出记录
        cursor.execute("""
            SELECT id, username, accid, export_time, syNumbers, dateOption, fileNames
            FROM export_record 
            WHERE accid = ?
            ORDER BY export_time DESC
        """, accid)

        records = cursor.fetchall()
        cursor.close()
        connection_pool.release_connection(conn)

        # 格式化返回数据
        record_list = []
        for record in records:
            record_list.append({
                'id': record.id,
                'username': record.username,
                'accid': record.accid,
                'export_time': record.export_time.strftime('%Y-%m-%d %H:%M:%S') if record.export_time else None,
                'syNumbers': record.syNumbers,
                'dateOption': record.dateOption,
                'fileNames': record.fileNames
            })

        return jsonify({
            'success': True,
            'data': record_list,
            'total': len(record_list)
        })

    except Exception as e:
        if 'conn' in locals() and conn:
            connection_pool.release_connection(conn)

        return jsonify({
            'success': False,
            'message': f'查询导出记录失败: {str(e)}'
        }), 500

        return jsonify({
            'success': False,
            'message': f'获取用户列表失败: {str(e)}'
        }), 500


# 获取文件名列表+内存优化        ########################################################################################################
# 添加全局变量用于跟踪查询次数和首次运行状态

query_counter = 0  # 查询计数器，用于追踪API调用次数
first_run = True


# 内存使用量获取函数
def get_memory_usage():
    """
    获取当前进程的内存使用量(MB)

    Returns:
        float: 当前进程内存使用量，单位MB
    """
    process = psutil.Process()
    return process.memory_info().rss / (1024 * 1024)


# 安全的内存优化函数
def safe_memory_cleanup():
    """
    安全的内存清理，只使用Python内置功能
    避免系统级调用，确保跨平台兼容性

    Returns:
        float: 释放的内存量(MB)
    """
    try:
        before_mem = get_memory_usage()

        # 🔥 内存预警机制 - 当内存使用超过200MB时发出警告
        if before_mem > 200:
            print(f"[内存预警] ⚠️ 内存使用量过高: {before_mem:.2f}MB")
        elif before_mem > 150:
            print(f"[内存注意] 📊 内存使用量较高: {before_mem:.2f}MB")

        # 只使用Python内置的垃圾回收
        import gc
        collected = gc.collect()

        after_mem = get_memory_usage()
        freed_mem = before_mem - after_mem

        print(f"[内存优化] ✅ 垃圾回收完成: 释放 {freed_mem:.2f}MB, 回收对象: {collected}个")
        print(f"[内存状态] 📈 优化前: {before_mem:.2f}MB → 优化后: {after_mem:.2f}MB")

        return freed_mem
    except Exception as e:
        print(f"[内存优化] ❌ 清理时出错: {str(e)}")
        return 0


'''
# 获取文件名列表 - 主要API接口
@app.route('/getFileList', methods=['POST', 'OPTIONS'])
def getFileList():
    """
    获取Excel文件列表的API接口
    支持时间范围筛选和全量查询

    请求参数:
        query (optional): 时间范围数组 [start_date, end_date]

    返回:
        JSON: {nameList: [{label: filename, value: index}, ...]}
    """

    # 🔧 处理跨域预检请求
    if request.method == 'OPTIONS':
        return '', 200

    # 🔢 访问全局变量，跟踪查询次数
    global query_counter
    query_counter += 1

    # 📊 记录请求开始时间和内存状态
    start_time = time.time()
    start_memory = get_memory_usage()
    human_time = datetime.fromtimestamp(start_time).strftime('%H:%M:%S')

    print("\n" + "="*60)
    print(f"[查询开始] 🚀 第 {query_counter} 次调用 getFileList")
    print(f"[时间记录] ⏰ {human_time} - 开始处理请求")
    print(f"[内存监控] 📊 开始时内存: {start_memory:.2f}MB")

    # 🔗 数据库连接变量
    conn = None
    cursor = None

    try:
        print("[数据库] 🔌 正在获取数据库连接...")

        # 🔥 优化的连接池检查逻辑
        try:
            # 直接尝试获取连接，避免复杂的存在性检查
            conn = connection_pool.get_connection()
            print("[数据库] ✅ 连接获取成功")
        except NameError:
            print("[数据库] ❌ connection_pool 变量未定义")
            raise Exception("数据库连接池变量未定义，请检查连接池初始化代码")
        except AttributeError as e:
            print(f"[数据库] ❌ connection_pool 对象问题: {e}")
            raise Exception(f"数据库连接池对象错误: {e}")
        except Exception as e:
            print(f"[数据库] ❌ 获取连接失败: {e}")
            raise Exception(f"无法获取数据库连接: {e}")

        # ⏱️ 设置合理的超时时间，防止长时间等待
        if hasattr(conn, 'timeout'):
            conn.timeout = 30  # 30秒超时

        # 📥 获取和解析请求数据
        content = request.json
        print(f"[请求数据] 📝 {content}")

        # 🎯 创建数据库游标并设置锁超时
        cursor = conn.cursor()
        cursor.execute("SET LOCK_TIMEOUT 8000")  # 8秒数据库锁超时

        # 🔍 构建SQL查询语句
        if 'query' in content and content['query']:
            # 📅 时间范围查询
            dateOption = content['query']
            start_date = datetime.fromisoformat(dateOption[0].replace('Z', '+00:00')).strftime('%Y-%m-%d %H:%M:%S')
            end_date = datetime.fromisoformat(dateOption[1].replace('Z', '+00:00')).strftime('%Y-%m-%d %H:%M:%S')
            print(f"[查询范围] 📅 时间筛选: {start_date} 至 {end_date}")

            # 🔥 修复SQL语法：DISTINCT TOP 而不是 TOP DISTINCT
            query = f"SELECT DISTINCT TOP 1000 excel_name FROM TXEX1 WHERE DT BETWEEN '{start_date}' AND '{end_date}'"
        else:
            # 🌐 全量查询
            print("[查询范围] 🌐 全量查询（限制前1000条）")
            query = "SELECT DISTINCT TOP 1000 excel_name FROM TXEX1"

        print(f"[查询SQL] 🔍 {query}")

        # 💾 执行数据库查询
        try:
            print("[数据库] 🔄 执行查询...")
            cursor.execute(query)
        except Exception as query_error:
            print(f"[数据库] ❌ 查询执行失败: {query_error}")
            cursor.close()
            raise query_error

        # 📤 获取查询结果
        rows = cursor.fetchall()
        row_count = len(rows)
        print(f"[数据处理] 📊 查询返回了 {row_count} 行数据")

        # 🔧 处理查询结果，转换为前端需要的格式
        resList = []
        for index, row in enumerate(rows):
            try:
                # 兼容不同的行数据格式
                excel_name = row.excel_name if hasattr(row, 'excel_name') else row[0]
                res = {'label': excel_name, 'value': index}
                resList.append(res)
            except Exception as row_error:
                print(f"[数据处理] ⚠️ 处理第{index}行数据失败: {row_error}")
                continue

        # 🗂️ 关闭游标
        cursor.close()
        cursor = None

        # 📦 构造响应数据
        response = {
            'nameList': resList,
        }

        # ⏱️ 计算处理时间和内存变化
        end_time = time.time()
        duration = end_time - start_time
        end_memory = get_memory_usage()
        memory_used = end_memory - start_memory
        human_end_time = datetime.fromtimestamp(end_time).strftime('%H:%M:%S')

        print(f"[执行完成] ✅ {human_end_time} - 处理完成")
        print(f"[性能统计] ⏱️ 耗时: {duration:.2f}秒, 返回: {len(resList)} 条记录")
        print(f"[内存监控] 📈 内存变化: {memory_used:+.2f}MB, 当前内存: {end_memory:.2f}MB")

        # 🧹 智能内存优化策略
        should_cleanup = False
        cleanup_reason = ""

        # 策略1: 每10次查询必须清理
        if query_counter % 10 == 0:
            should_cleanup = True
            cleanup_reason = f"第{query_counter}次查询，执行例行清理"

        # 策略2: 查询时间过长时清理（超过3秒）
        elif duration > 3.0:
            should_cleanup = True
            cleanup_reason = f"查询耗时{duration:.2f}秒，执行性能优化清理"

        # 策略3: 返回数据量大时清理（超过500条）
        elif len(resList) > 500:
            should_cleanup = True
            cleanup_reason = f"返回数据量{len(resList)}条，执行大数据清理"

        # 策略4: 内存使用过高时清理（超过100MB）
        elif end_memory > 100:
            should_cleanup = True
            cleanup_reason = f"内存使用{end_memory:.2f}MB，执行内存优化清理"

        # 策略5: 内存增长过多时清理（单次增长超过10MB）
        elif memory_used > 10:
            should_cleanup = True
            cleanup_reason = f"内存增长{memory_used:.2f}MB，执行增长控制清理"

        # 🔥 执行内存清理
        if should_cleanup:
            print(f"[内存优化] 🧹 触发清理: {cleanup_reason}")
            safe_memory_cleanup()
        else:
            print("[内存优化] ✅ 无需清理，内存状态良好")

        print("="*60 + "\n")
        return jsonify(response)

    except Exception as e:
        print(f"[错误处理] ❌ 处理请求错误: {str(e)}")
        print(f"[错误类型] 🔍 异常类型: {type(e)}")

        # 🔍 打印详细错误堆栈，便于调试
        import traceback
        print("[错误详情] 📋 详细错误信息:")
        traceback.print_exc()

        # 🧹 错误后执行完整的内存清理
        try:
            print("[内存优化] 🆘 错误后执行完整清理")
            safe_memory_cleanup()
        except Exception as cleanup_error:
            print(f"[内存优化] ⚠️ 清理失败: {cleanup_error}")
            # 如果safe_memory_cleanup失败，至少执行基本清理
            try:
                import gc
                collected = gc.collect()
                print(f"[内存优化] 🔧 基本清理完成，回收对象: {collected}个")
            except:
                print("[内存优化] ❌ 基本清理也失败")

        print("="*60 + "\n")
        return jsonify({"error": "获取数据失败", "details": str(e)}), 500

    finally:
        # 🔗 确保游标和连接的正确释放
        try:
            # 关闭游标（如果还没关闭）
            if cursor:
                cursor.close()
                print("[数据库] 🔧 游标已关闭")
        except Exception as cursor_error:
            print(f"[数据库] ⚠️ 关闭游标失败: {cursor_error}")

        # 归还连接到连接池
        if conn:
            try:
                connection_pool.release_connection(conn)
                print("[数据库] ♻️ 连接已释放回连接池")
            except Exception as release_error:
                print(f"[数据库] ❌ 释放连接失败: {release_error}")


#试样名查询  #########################################################################################################################
@app.route('/getSyNumbers', methods=['POST', 'OPTIONS'])  # 添加OPTIONS方法  
def getSyNumbers():  
    if request.method == 'OPTIONS':  
        return '', 200  

    print(f"开始处理getSyNumbers请求，方法：{request.method}")  # 调试信息  

    # 从连接池获取连接  
    conn = None  
    try:  
        conn = connection_pool.get_connection()  
        content = request.json  
        dateOption = content.get('dateOption', [])  
        fileNames = content.get('fileNames', [])  
        if dateOption :
            start_date = datetime.fromisoformat(dateOption[0].replace('Z', '+00:00')).strftime('%Y-%m-%d %H:%M:%S')
            end_date = datetime.fromisoformat(dateOption[1].replace('Z', '+00:00')).strftime('%Y-%m-%d %H:%M:%S')

        cursor = conn.cursor()
        # 执行查询  日期在start_date和end_date之间并且excel_name in fileNames 的数据

        if(dateOption): query = "SELECT distinct section FROM TXEX1 WHERE DT BETWEEN '"+start_date+"' AND '"+end_date+"' AND excel_name in ("+','.join(["'"+name+"'" for name in fileNames])+")"
        else : query = "SELECT distinct section FROM TXEX1 WHERE excel_name in ("+','.join(["'"+name+"'" for name in fileNames])+")"
        cursor.execute(query)

        # 获取结果
        resList = []
        rows = cursor.fetchall()
        for index, row in enumerate(rows):
            res = {'label': row.section, 'value': index}
            resList.append(res)  # 改用append
            #resList.insert(0,res)
        # 关闭游标和连接
        #print(rows)
        cursor.close()
        response = {
            'nameList': resList,
        }
        return jsonify(response)
    except Exception as e:  
        print(f"getSyNumbers错误: {str(e)}")  
        return jsonify({"error": "获取试样名失败", "details": str(e)}), 500  
    finally:  
        if conn:  
            connection_pool.release_connection(conn)
'''


# 高级筛选增加
@app.route('/getAdvancedFilterOptions', methods=['POST', 'OPTIONS'])
def getAdvancedFilterOptions():
    """
    统一的筛选选项获取接口
    """

    if request.method == 'OPTIONS':
        return '', 200

    global query_counter
    query_counter += 1

    start_time = time.time()
    start_memory = get_memory_usage()

    conn = None
    cursor = None

    try:
        conn = connection_pool.get_connection()
        cursor = conn.cursor()
        cursor.execute("SET LOCK_TIMEOUT 8000")

        content = request.json
        field = content.get('field')
        conditions = content.get('conditions', {})

        print(f"\n[统一筛选] 🔍 查询字段: {field}")
        print(f"[统一筛选] 📝 请求条件: {conditions}")

        # 字段映射
        field_mapping = {
            'excel_name': 'excel_name',
            'section': 'section',
            'sample_type': 'sample_type',
            'sampling_location': 'sampling_location',
            'sampling_process': 'sampling_process',
            'chart_name': 'chart_name',
            'casting_mark': 'casting_mark',
            'sampling_time': 'sampling_time',
            'sample_sender': 'sample_sender'
        }

        if field not in field_mapping:
            return jsonify({"error": f"不支持的字段: {field}"}), 400

        db_field = field_mapping[field]

        # 构建查询
        query = f"SELECT DISTINCT TOP 1000 {db_field} FROM TXEX1"
        params = []
        where_conditions = []

        has_other_conditions = False

        # 1. 日期条件
        if conditions.get('dateOption') and len(conditions['dateOption']) == 2:
            has_other_conditions = True
            start_date = datetime.fromisoformat(conditions['dateOption'][0].replace('Z', '+00:00')).strftime(
                '%Y-%m-%d %H:%M:%S')
            end_date = datetime.fromisoformat(conditions['dateOption'][1].replace('Z', '+00:00')).strftime(
                '%Y-%m-%d %H:%M:%S')
            where_conditions.append("DT BETWEEN ? AND ?")
            params.extend([start_date, end_date])

        # 2-10. 其他条件
        condition_mappings = [
            ('excel_name', 'fileNames', 'excel_name'),
            ('section', 'syNumbers', 'section'),
            ('sample_type', 'sampleTypes', 'sample_type'),
            ('sampling_location', 'samplingPositions', 'sampling_location'),
            ('sampling_process', 'samplingProcesses', 'sampling_process'),
            ('chart_name', 'chartNames', 'chart_name'),
            ('casting_mark', 'steelMarks', 'casting_mark'),
            ('sampling_time', 'samplingTimes', 'sampling_time'),
            ('sample_sender', 'submitters', 'sample_sender')
        ]

        for field_name, condition_key, db_column in condition_mappings:
            if field != field_name and conditions.get(condition_key) and len(conditions[condition_key]) > 0:
                has_other_conditions = True
                placeholders = ', '.join(['?'] * len(conditions[condition_key]))
                where_conditions.append(f"{db_column} IN ({placeholders})")
                params.extend(conditions[condition_key])

        # 11. 备注条件
        if conditions.get('remarks') and conditions['remarks'].strip():
            has_other_conditions = True
            where_conditions.append("Remarks LIKE ?")
            params.append(f"%{conditions['remarks'].strip()}%")

        # 构建完整查询
        if has_other_conditions:
            query += " WHERE " + " AND ".join(where_conditions)
            query_type = "基于其他条件的筛选查询"
        else:
            query_type = "全量查询"

        # 添加过滤空值和排序
        where_prefix = " WHERE " if not has_other_conditions else " AND "
        query += f"{where_prefix}{db_field} IS NOT NULL AND {db_field} != '' AND LEN(TRIM({db_field})) > 0 ORDER BY {db_field}"

        print(f"[统一筛选] 🔍 查询类型: {query_type}")
        print(f"[统一筛选] 📊 是否有其他条件: {has_other_conditions}")

        # 执行查询
        cursor.execute(query, params)
        rows = cursor.fetchall()

        # 处理结果
        result = []
        seen_values = set()

        for index, row in enumerate(rows):
            try:
                value = getattr(row, db_field) if hasattr(row, db_field) else row[0]
                if value and str(value).strip() and str(value).strip() not in seen_values:
                    clean_value = str(value).strip()
                    seen_values.add(clean_value)
                    result.append({
                        'label': clean_value,
                        'value': len(result)
                    })
            except Exception as row_error:
                print(f"[统一筛选] ⚠️ 处理第{index}行数据失败: {row_error}")
                continue

        cursor.close()
        cursor = None

        # 根据字段类型返回不同的响应格式
        if field in ['excel_name', 'section']:
            response = {
                'nameList': result,
                'field': field,
                'query_type': query_type,
                'has_other_conditions': has_other_conditions,
                'count': len(result)
            }
        else:
            response = {
                'data': [item['label'] for item in result],
                'field': field,
                'query_type': query_type,
                'has_other_conditions': has_other_conditions,
                'count': len(result)
            }

        # 性能统计
        end_time = time.time()
        duration = end_time - start_time
        end_memory = get_memory_usage()
        memory_used = end_memory - start_memory

        print(f"[统一筛选] ✅ 查询完成，耗时: {duration:.2f}秒，返回: {len(result)} 条记录")
        print(f"[统一筛选] 📈 内存变化: {memory_used:+.2f}MB")

        # 内存清理
        if query_counter % 10 == 0 or duration > 3.0 or len(result) > 500:
            safe_memory_cleanup()

        return jsonify(response)

    except Exception as e:
        print(f"[统一筛选] ❌ 错误: {str(e)}")
        return jsonify({"error": "获取筛选选项失败", "details": str(e)}), 500

    finally:
        if cursor:
            cursor.close()
        if conn:
            connection_pool.release_connection(conn)


#################################################################################### 查询列表 ################################
@app.route('/selectTableData', methods=['POST', 'OPTIONS'])
def selectTableData():
    if request.method == 'OPTIONS':
        return '', 200

    # 性能监控
    start_time = time.time()
    start_memory = get_memory_usage()

    conn = connection_pool.get_connection()
    try:
        cursor = conn.cursor()
        cursor.execute("SET LOCK_TIMEOUT 8000")  # 设置锁超时

        content = request.json
        print(f"\n[表格搜索] 📝 搜索参数: {content}")

        # 解析请求参数
        syNumbers = content.get('syNumbers', [])
        dateOption = content.get('dateOption', [])
        fileNames = content.get('fileNames', [])
        currentPage = content.get('currentPage', 1)
        pageSize = content.get('pageSize', 10)

        # 高级筛选条件
        advancedFilters = content.get('advancedFilters', {})

        # 构建查询条件
        query = "SELECT * FROM TXEX1"
        count_query = "SELECT COUNT(1) FROM TXEX1"
        params = []
        conditions = []

        # 原有条件
        if syNumbers:
            placeholders = ', '.join(['?'] * len(syNumbers))
            conditions.append(f"section IN ({placeholders})")
            params.extend(syNumbers)
            print(f"[表格搜索] 添加试样号条件: {len(syNumbers)} 个")

        if dateOption and len(dateOption) == 2:
            start_date = datetime.fromisoformat(dateOption[0].replace('Z', '+00:00')).strftime('%Y-%m-%d %H:%M:%S')
            end_date = datetime.fromisoformat(dateOption[1].replace('Z', '+00:00')).strftime('%Y-%m-%d %H:%M:%S')
            conditions.append("DT BETWEEN ? AND ?")
            params.extend([start_date, end_date])
            print(f"[表格搜索] 添加日期条件: {start_date} ~ {end_date}")

        if fileNames:
            placeholders = ', '.join(['?'] * len(fileNames))
            conditions.append(f"excel_name IN ({placeholders})")
            params.extend(fileNames)
            print(f"[表格搜索] 添加文件名条件: {len(fileNames)} 个")

        # 高级筛选条件处理
        if advancedFilters.get('sampleTypes') and len(advancedFilters['sampleTypes']) > 0:
            placeholders = ', '.join(['?'] * len(advancedFilters['sampleTypes']))
            conditions.append(f"sample_type IN ({placeholders})")
            params.extend(advancedFilters['sampleTypes'])
            print(f"[表格搜索] 添加试样类型条件: {len(advancedFilters['sampleTypes'])} 个")

        if advancedFilters.get('samplingPositions') and len(advancedFilters['samplingPositions']) > 0:
            placeholders = ', '.join(['?'] * len(advancedFilters['samplingPositions']))
            conditions.append(f"sampling_location IN ({placeholders})")
            params.extend(advancedFilters['samplingPositions'])
            print(f"[表格搜索] 添加取样位置条件: {len(advancedFilters['samplingPositions'])} 个")

        if advancedFilters.get('samplingProcesses') and len(advancedFilters['samplingProcesses']) > 0:
            placeholders = ', '.join(['?'] * len(advancedFilters['samplingProcesses']))
            conditions.append(f"sampling_process IN ({placeholders})")
            params.extend(advancedFilters['samplingProcesses'])
            print(f"[表格搜索] 添加取样工序条件: {len(advancedFilters['samplingProcesses'])} 个")

        if advancedFilters.get('chartNames') and len(advancedFilters['chartNames']) > 0:
            placeholders = ', '.join(['?'] * len(advancedFilters['chartNames']))
            conditions.append(f"chart_name IN ({placeholders})")
            params.extend(advancedFilters['chartNames'])
            print(f"[表格搜索] 添加作图名称条件: {len(advancedFilters['chartNames'])} 个")

        if advancedFilters.get('steelMarks') and len(advancedFilters['steelMarks']) > 0:
            placeholders = ', '.join(['?'] * len(advancedFilters['steelMarks']))
            conditions.append(f"casting_mark IN ({placeholders})")
            params.extend(advancedFilters['steelMarks'])
            print(f"[表格搜索] 添加出钢记号条件: {len(advancedFilters['steelMarks'])} 个")

        # 修正取样时间处理 - 支持两种方式
        # 1. 时间范围 (如果前端传的是时间范围)
        if advancedFilters.get('samplingTimeRange') and len(advancedFilters['samplingTimeRange']) == 2:
            start_time_str = datetime.fromisoformat(
                advancedFilters['samplingTimeRange'][0].replace('Z', '+00:00')).strftime('%Y-%m-%d %H:%M:%S')
            end_time_str = datetime.fromisoformat(
                advancedFilters['samplingTimeRange'][1].replace('Z', '+00:00')).strftime('%Y-%m-%d %H:%M:%S')
            conditions.append("sampling_time BETWEEN ? AND ?")
            params.extend([start_time_str, end_time_str])
            print(f"[表格搜索] 添加取样时间范围条件: {start_time_str} ~ {end_time_str}")

        # 2. 取样时间列表 (如果前端传的是具体时间值的列表)
        elif advancedFilters.get('samplingTimes') and len(advancedFilters['samplingTimes']) > 0:
            placeholders = ', '.join(['?'] * len(advancedFilters['samplingTimes']))
            conditions.append(f"sampling_time IN ({placeholders})")
            params.extend(advancedFilters['samplingTimes'])
            print(f"[表格搜索] 添加取样时间条件: {len(advancedFilters['samplingTimes'])} 个")

        if advancedFilters.get('submitters') and len(advancedFilters['submitters']) > 0:
            placeholders = ', '.join(['?'] * len(advancedFilters['submitters']))
            conditions.append(f"sample_sender IN ({placeholders})")
            params.extend(advancedFilters['submitters'])
            print(f"[表格搜索] 添加送样人条件: {len(advancedFilters['submitters'])} 个")

        if advancedFilters.get('remarks') and advancedFilters['remarks'].strip():
            conditions.append("Remarks LIKE ?")
            params.append(f"%{advancedFilters['remarks'].strip()}%")
            print(f"[表格搜索] 添加备注条件: {advancedFilters['remarks'].strip()}")

        # 构建完整查询
        if conditions:
            query += " WHERE " + " AND ".join(conditions)
            count_query += " WHERE " + " AND ".join(conditions)

        # 添加分页逻辑
        offset = (currentPage - 1) * pageSize
        query += f" ORDER BY CAST(SNUMB AS INT) ASC OFFSET {offset} ROWS FETCH NEXT {pageSize} ROWS ONLY"

        print(f'[表格搜索] 🔍 生成的查询: {query}')
        print(f'[表格搜索] 📝 查询参数数量: {len(params)}')

        # 先获取总数
        cursor.execute(count_query, params)
        total_count = cursor.fetchone()[0]
        print(f'[表格搜索] 📊 总记录数: {total_count}')

        # 再获取分页数据
        cursor.execute(query, params)
        rows = cursor.fetchall()

        # 处理结果数据
        resList = []
        for row in rows:
            try:
                res = {
                    'DT': row.DT.strftime('%Y-%m-%d %H:%M:%S') if row.DT else None,
                    'SNUMB': row.SNUMB,
                    'excel_name': row.excel_name,
                    'IDNO': row.IDNO,
                    'section': row.section,
                    'inclusion_type': row.inclusion_type,
                    'FOV': row.FOV,
                    'ECD': float(row.ECD) if row.ECD != 0 else 0,
                    'area': float(row.area) if row.area != 0 else 0,
                    'shape': float(row.shape) if row.shape != 0 else 0,
                    'length': float(row.length) if row.length != 0 else 0,
                    'perimeter': float(row.perimeter) if row.perimeter != 0 else 0,
                    'aspect': float(row.aspect) if row.aspect != 0 else 0,
                    'X_axis': float(row.X_axis) if row.X_axis != 0 else 0,
                    'Y_axis': float(row.Y_axis) if row.Y_axis != 0 else 0,

                    'excel2_name': row.excel2_name,
                    'PONO': row.PONO,
                    'sample_type': row.sample_type,
                    'sampling_location': row.sampling_location,
                    'sampling_process': row.sampling_process,
                    'analysis_area': row.analysis_area,
                    'chart_name': row.chart_name,
                    'casting_mark': row.casting_mark,
                    'sampling_time': row.sampling_time,
                    'sample_sender': row.sample_sender,
                    'Remarks': row.Remarks,

                    'Li_wt_percent': float(row.Li_wt_percent) if row.Li_wt_percent != 0 else 0,
                    'B_wt_percent': float(row.B_wt_percent) if row.B_wt_percent != 0 else 0,
                    'C_wt_percent': float(row.C_wt_percent) if row.C_wt_percent != 0 else 0,
                    'N_wt_percent': float(row.N_wt_percent) if row.N_wt_percent != 0 else 0,
                    'O_wt_percent': float(row.O_wt_percent) if row.O_wt_percent != 0 else 0,
                    'F_wt_percent': float(row.F_wt_percent) if row.F_wt_percent != 0 else 0,
                    'Na_wt_percent': float(row.Na_wt_percent) if row.Na_wt_percent != 0 else 0,
                    'Mg_wt_percent': float(row.Mg_wt_percent) if row.Mg_wt_percent != 0 else 0,
                    'Al_wt_percent': float(row.Al_wt_percent) if row.Al_wt_percent != 0 else 0,
                    'Si_wt_percent': float(row.Si_wt_percent) if row.Si_wt_percent != 0 else 0,
                    'P_wt_percent': float(row.P_wt_percent) if row.P_wt_percent != 0 else 0,
                    'S_wt_percent': float(row.S_wt_percent) if row.S_wt_percent != 0 else 0,
                    'Cl_wt_percent': float(row.Cl_wt_percent) if row.Cl_wt_percent != 0 else 0,
                    'K_wt_percent': float(row.K_wt_percent) if row.K_wt_percent != 0 else 0,
                    'Ca_wt_percent': float(row.Ca_wt_percent) if row.Ca_wt_percent != 0 else 0,
                    'Ti_wt_percent': float(row.Ti_wt_percent) if row.Ti_wt_percent != 0 else 0,
                    'V_wt_percent': float(row.V_wt_percent) if row.V_wt_percent != 0 else 0,
                    'Cr_wt_percent': float(row.Cr_wt_percent) if row.Cr_wt_percent != 0 else 0,
                    'Mn_wt_percent': float(row.Mn_wt_percent) if row.Mn_wt_percent != 0 else 0,
                    'Fe_wt_percent': float(row.Fe_wt_percent) if row.Fe_wt_percent != 0 else 0,
                    'Ni_wt_percent': float(row.Ni_wt_percent) if row.Ni_wt_percent != 0 else 0,
                    'Cu_wt_percent': float(row.Cu_wt_percent) if row.Cu_wt_percent != 0 else 0,
                    'As_wt_percent': float(row.As_wt_percent) if row.As_wt_percent != 0 else 0,
                    'Zr_wt_percent': float(row.Zr_wt_percent) if row.Zr_wt_percent != 0 else 0,
                    'Nb_wt_percent': float(row.Nb_wt_percent) if row.Nb_wt_percent != 0 else 0,
                    'Mo_wt_percent': float(row.Mo_wt_percent) if row.Mo_wt_percent != 0 else 0,
                    'Ru_wt_percent': float(row.Ru_wt_percent) if row.Ru_wt_percent != 0 else 0,
                    'Rh_wt_percent': float(row.Rh_wt_percent) if row.Rh_wt_percent != 0 else 0,
                    'Pd_wt_percent': float(row.Pd_wt_percent) if row.Pd_wt_percent != 0 else 0,
                    'Cd_wt_percent': float(row.Cd_wt_percent) if row.Cd_wt_percent != 0 else 0,
                    'Sn_wt_percent': float(row.Sn_wt_percent) if row.Sn_wt_percent != 0 else 0,
                    'Sb_wt_percent': float(row.Sb_wt_percent) if row.Sb_wt_percent != 0 else 0,
                    'Te_wt_percent': float(row.Te_wt_percent) if row.Te_wt_percent != 0 else 0,
                    'La_wt_percent': float(row.La_wt_percent) if row.La_wt_percent != 0 else 0,
                    'Ce_wt_percent': float(row.Ce_wt_percent) if row.Ce_wt_percent != 0 else 0,
                    'Pm_wt_percent': float(row.Pm_wt_percent) if row.Pm_wt_percent != 0 else 0,
                    'Sm_wt_percent': float(row.Sm_wt_percent) if row.Sm_wt_percent != 0 else 0,
                    'Eu_wt_percent': float(row.Eu_wt_percent) if row.Eu_wt_percent != 0 else 0,
                    'Dy_wt_percent': float(row.Dy_wt_percent) if row.Dy_wt_percent != 0 else 0,
                    'Tm_wt_percent': float(row.Tm_wt_percent) if row.Tm_wt_percent != 0 else 0,
                    'Yb_wt_percent': float(row.Yb_wt_percent) if row.Yb_wt_percent != 0 else 0,
                    'Ta_wt_percent': float(row.Ta_wt_percent) if row.Ta_wt_percent != 0 else 0,
                    'Tl_wt_percent': float(row.Tl_wt_percent) if row.Tl_wt_percent != 0 else 0,
                    'Ra_wt_percent': float(row.Ra_wt_percent) if row.Ra_wt_percent != 0 else 0,
                    'Ac_wt_percent': float(row.Ac_wt_percent) if row.Ac_wt_percent != 0 else 0,
                    'Pa_wt_percent': float(row.Pa_wt_percent) if row.Pa_wt_percent != 0 else 0
                }
                resList.append(res)
            except Exception as row_error:
                print(f"[表格搜索] ⚠️ 处理第{len(resList)}行数据失败: {row_error}")
                continue

        cursor.close()

        # 构建响应
        response = {
            'nameList': resList,
            'total': total_count,
            'currentPage': currentPage,
            'pageSize': pageSize,
            'totalPages': (total_count + pageSize - 1) // pageSize
        }

        # 性能统计
        end_time = time.time()
        duration = end_time - start_time
        end_memory = get_memory_usage()
        memory_used = end_memory - start_memory

        print(f"[表格搜索] ✅ 搜索完成，耗时: {duration:.2f}秒")
        print(f"[表格搜索] 📊 返回: {len(resList)} 条记录，总计: {total_count} 条")
        print(f"[表格搜索] 📈 内存变化: {memory_used:+.2f}MB")

        # 内存清理
        if duration > 3.0 or len(resList) > 500 or memory_used > 50:
            print(f"[表格搜索] 🧹 执行内存清理")
            safe_memory_cleanup()

        return jsonify(response)

    except Exception as e:
        print(f"[表格搜索] ❌ selectTableData错误: {str(e)}")
        return jsonify({"error": "查询失败", "details": str(e)}), 500
    finally:
        if conn:
            connection_pool.release_connection(conn)


##  添加导出excel功能，导出表1和表2           ###############################################################################################
@app.route('/exportExcel', methods=['POST', 'OPTIONS'])
def exportExcel():
    if request.method == 'OPTIONS':
        return '', 200

        # 从连接池获取连接替代全局conn
    conn = connection_pool.get_connection()
    try:
        # 创建游标
        global total_count
        cursor = conn.cursor()
        content = request.json
        print('请求参数:', content)  # 打印请求参数

        # 解析请求参数
        syNumbers = content.get('syNumbers', [])
        dateOption = content.get('dateOption', [])
        fileNames = content.get('fileNames', [])

        # 构建查询条件
        query = "SELECT * FROM TXEX1"
        count_query = "SELECT COUNT(1) FROM TXEX1"
        params = []
        conditions = []

        if syNumbers:
            placeholders = ', '.join(['?'] * len(syNumbers))
            conditions.append(f"section IN ({placeholders})")
            params.extend(syNumbers)

        if dateOption and len(dateOption) == 2:
            start_date = datetime.fromisoformat(dateOption[0].replace('Z', '+00:00')).strftime('%Y-%m-%d %H:%M:%S')
            end_date = datetime.fromisoformat(dateOption[1].replace('Z', '+00:00')).strftime('%Y-%m-%d %H:%M:%S')
            conditions.append("DT BETWEEN ? AND ?")
            params.extend([start_date, end_date])

        if fileNames:
            placeholders = ', '.join(['?'] * len(fileNames))
            conditions.append(f"excel_name IN ({placeholders})")
            params.extend(fileNames)

        if conditions:
            query += " WHERE " + " AND ".join(conditions)
            count_query += " WHERE " + " AND ".join(conditions)
        query += f" ORDER BY CAST(SNUMB AS INT) ASC"

        print('生成的查询:', query)  # 打印生成的查询
        print('查询参数:', params)  # 打印查询参数

        # 执行查询
        cursor.execute(query, params)
        rows = cursor.fetchall()

        # 构建 resList
        resList = []
        for row in rows:
            res = {
                'DT': row.DT.strftime('%Y-%m-%d %H:%M:%S') if row.DT else None,
                'SNUMB': row.SNUMB,
                'excel_name': row.excel_name,
                'IDNO': row.IDNO,
                'section': row.section,
                'inclusion_type': row.inclusion_type,
                'FOV': row.FOV,
                'ECD': float(row.ECD) if row.ECD != 0 else 0,
                'area': float(row.area) if row.area != 0 else 0,
                'shape': float(row.shape) if row.shape != 0 else 0,
                'length': float(row.length) if row.length != 0 else 0,
                'perimeter': float(row.perimeter) if row.perimeter != 0 else 0,
                'aspect': float(row.aspect) if row.aspect != 0 else 0,
                'X_axis': float(row.X_axis) if row.X_axis != 0 else 0,
                'Y_axis': float(row.Y_axis) if row.Y_axis != 0 else 0,

                'excel2_name': row.excel2_name,
                'PONO': row.PONO,
                'sample_type': row.sample_type,
                'sampling_location': row.sampling_location,
                'sampling_process': row.sampling_process,
                'analysis_area': row.analysis_area,
                'chart_name': row.chart_name,
                'casting_mark': row.casting_mark,
                'sampling_time': row.sampling_time,
                'sample_sender': row.sample_sender,
                'Remarks': row.Remarks,

                'Li_wt_percent': float(row.Li_wt_percent) if row.Li_wt_percent != 0 else 0,
                'B_wt_percent': float(row.B_wt_percent) if row.B_wt_percent != 0 else 0,
                'C_wt_percent': float(row.C_wt_percent) if row.C_wt_percent != 0 else 0,
                'N_wt_percent': float(row.N_wt_percent) if row.N_wt_percent != 0 else 0,
                'O_wt_percent': float(row.O_wt_percent) if row.O_wt_percent != 0 else 0,
                'F_wt_percent': float(row.F_wt_percent) if row.F_wt_percent != 0 else 0,
                'Na_wt_percent': float(row.Na_wt_percent) if row.Na_wt_percent != 0 else 0,
                'Mg_wt_percent': float(row.Mg_wt_percent) if row.Mg_wt_percent != 0 else 0,
                'Al_wt_percent': float(row.Al_wt_percent) if row.Al_wt_percent != 0 else 0,
                'Si_wt_percent': float(row.Si_wt_percent) if row.Si_wt_percent != 0 else 0,
                'P_wt_percent': float(row.P_wt_percent) if row.P_wt_percent != 0 else 0,
                'S_wt_percent': float(row.S_wt_percent) if row.S_wt_percent != 0 else 0,
                'Cl_wt_percent': float(row.Cl_wt_percent) if row.Cl_wt_percent != 0 else 0,
                'K_wt_percent': float(row.K_wt_percent) if row.K_wt_percent != 0 else 0,
                'Ca_wt_percent': float(row.Ca_wt_percent) if row.Ca_wt_percent != 0 else 0,
                'Ti_wt_percent': float(row.Ti_wt_percent) if row.Ti_wt_percent != 0 else 0,
                'V_wt_percent': float(row.V_wt_percent) if row.V_wt_percent != 0 else 0,
                'Cr_wt_percent': float(row.Cr_wt_percent) if row.Cr_wt_percent != 0 else 0,
                'Mn_wt_percent': float(row.Mn_wt_percent) if row.Mn_wt_percent != 0 else 0,
                'Fe_wt_percent': float(row.Fe_wt_percent) if row.Fe_wt_percent != 0 else 0,
                'Ni_wt_percent': float(row.Ni_wt_percent) if row.Ni_wt_percent != 0 else 0,
                'Cu_wt_percent': float(row.Cu_wt_percent) if row.Cu_wt_percent != 0 else 0,
                'As_wt_percent': float(row.As_wt_percent) if row.As_wt_percent != 0 else 0,
                'Zr_wt_percent': float(row.Zr_wt_percent) if row.Zr_wt_percent != 0 else 0,
                'Nb_wt_percent': float(row.Nb_wt_percent) if row.Nb_wt_percent != 0 else 0,
                'Mo_wt_percent': float(row.Mo_wt_percent) if row.Mo_wt_percent != 0 else 0,
                'Ru_wt_percent': float(row.Ru_wt_percent) if row.Ru_wt_percent != 0 else 0,
                'Rh_wt_percent': float(row.Rh_wt_percent) if row.Rh_wt_percent != 0 else 0,
                'Pd_wt_percent': float(row.Pd_wt_percent) if row.Pd_wt_percent != 0 else 0,
                'Cd_wt_percent': float(row.Cd_wt_percent) if row.Cd_wt_percent != 0 else 0,
                'Sn_wt_percent': float(row.Sn_wt_percent) if row.Sn_wt_percent != 0 else 0,
                'Sb_wt_percent': float(row.Sb_wt_percent) if row.Sb_wt_percent != 0 else 0,
                'Te_wt_percent': float(row.Te_wt_percent) if row.Te_wt_percent != 0 else 0,
                'La_wt_percent': float(row.La_wt_percent) if row.La_wt_percent != 0 else 0,
                'Ce_wt_percent': float(row.Ce_wt_percent) if row.Ce_wt_percent != 0 else 0,
                'Pm_wt_percent': float(row.Pm_wt_percent) if row.Pm_wt_percent != 0 else 0,
                'Sm_wt_percent': float(row.Sm_wt_percent) if row.Sm_wt_percent != 0 else 0,
                'Eu_wt_percent': float(row.Eu_wt_percent) if row.Eu_wt_percent != 0 else 0,
                'Dy_wt_percent': float(row.Dy_wt_percent) if row.Dy_wt_percent != 0 else 0,
                'Tm_wt_percent': float(row.Tm_wt_percent) if row.Tm_wt_percent != 0 else 0,
                'Yb_wt_percent': float(row.Yb_wt_percent) if row.Yb_wt_percent != 0 else 0,
                'Ta_wt_percent': float(row.Ta_wt_percent) if row.Ta_wt_percent != 0 else 0,
                'Tl_wt_percent': float(row.Tl_wt_percent) if row.Tl_wt_percent != 0 else 0,
                'Ra_wt_percent': float(row.Ra_wt_percent) if row.Ra_wt_percent != 0 else 0,
                'Ac_wt_percent': float(row.Ac_wt_percent) if row.Ac_wt_percent != 0 else 0,
                'Pa_wt_percent': float(row.Pa_wt_percent) if row.Pa_wt_percent != 0 else 0
            }
            resList.append(res)

        # 获取总记录数
        cursor.execute(count_query, params)
        total_count = cursor.fetchone()[0]

        # 记录导出日志
        # 获取用户信息（如果提供了token）
        username = "Unknown"
        accid = 0
        auth_header = request.headers.get('Authorization')
        if auth_header and auth_header.startswith('Bearer '):
            token = auth_header[7:]
            try:
                cursor.execute("SELECT id, username FROM account_info WHERE token = ?", token)
                user = cursor.fetchone()
                if user:
                    username = user.username
                    accid = user.id
            except Exception as e:
                print(f"获取用户信息失败: {str(e)}")

        # 将列表转换为逗号分隔的字符串
        syNumbers_str = ','.join(map(str, syNumbers)) if syNumbers else ""
        dateOption_str = ','.join(map(str, dateOption)) if dateOption else ""
        fileNames_str = ','.join(map(str, fileNames)) if fileNames else ""

        # 插入导出记录
        try:
            cursor.execute("""
                INSERT INTO export_record (username, accid, syNumbers, dateOption, fileNames) 
                VALUES (?, ?, ?, ?, ?)
            """, (username, accid, syNumbers_str, dateOption_str, fileNames_str))
            conn.commit()
        except Exception as e:
            print(f"记录导出日志失败: {str(e)}")
            conn.rollback()

        cursor.close()

        return jsonify({'resList': resList})
    except Exception as e:
        print(f"处理exportExcel请求错误: {str(e)}")
        return jsonify({"error": "导出数据失败", "details": str(e)}), 500
    finally:
        # 归还连接到池
        if conn:
            connection_pool.release_connection(conn)


# -》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》
# 钙处理铝镇静钢,虽然叫filterLowC，原来定的名叫超低碳
@app.route('/filterLowC', methods=['POST', 'OPTIONS'])
def filterLowC():
    if request.method == 'OPTIONS':
        return '', 200

        # 从连接池获取连接
    conn = connection_pool.get_connection()
    try:
        cursor = conn.cursor()
        content = request.json
        # print('前端请求时附带的查询条件',content)
        syNumbers = content.get('syNumber', [])  # 试样号查询条件
        dateOption = content.get('date', [])  # 日期查询条件
        fileNames = content.get('fileName', [])  # 文件名称查询条件
        # 查询主列表（带着查询条件）
        query = "SELECT * FROM TXEX2"
        count_query = "SELECT COUNT(1) FROM TXEX2"
        params = []
        conditions = []
        if syNumbers:
            placeholders = ', '.join(['?'] * len(syNumbers))
            conditions.append(f"section IN ({placeholders})")
            params.extend(syNumbers)

        if dateOption and len(dateOption) == 2:
            # 确保日期格式正确
            start_date = datetime.fromisoformat(dateOption[0].replace('Z', '+00:00')).strftime('%Y-%m-%d %H:%M:%S')
            end_date = datetime.fromisoformat(dateOption[1].replace('Z', '+00:00')).strftime('%Y-%m-%d %H:%M:%S')
            conditions.append("DT BETWEEN ? AND ?")
            params.extend([start_date, end_date])

        if fileNames:
            placeholders = ', '.join(['?'] * len(fileNames))
            conditions.append(f"excel_name IN ({placeholders})")
            params.extend(fileNames)

        if conditions:
            query += " WHERE " + " AND ".join(conditions)
            count_query += " WHERE " + " AND ".join(conditions)

        query += f" ORDER BY CAST(SNUMB AS INT) ASC"

        # print('生成的查询', query)
        # print('查询参数', params)
        cursor.execute(query, params)
        # 获取结果
        rows = cursor.fetchall()
        resList = []

        # 添加筛选逻辑
        for row in rows:
            # 将当前行的数据映射为字典
            res = {
                'DT': row.DT.strftime('%Y-%m-%d %H:%M:%S') if row.DT else None,  # 第1列
                'SNUMB': row.SNUMB,  # 第2列
                'excel_name': row.excel_name,  # 第3列
                'IDNO': row.IDNO,  # 第4列
                'section': row.section,  # 第5列
                'inclusion_type': row.inclusion_type,  # 第6列
                'FOV': row.FOV,  # 第7列
                'ECD': float(row.ECD) if row.ECD != 0 else 0,  # 第8列
                'area': float(row.area) if row.area != 0 else 0,  # 第9列
                'shape': float(row.shape) if row.shape != 0 else 0,  # 第10列
                'length': float(row.length) if row.length != 0 else 0,  # 第11列
                'perimeter': float(row.perimeter) if row.perimeter != 0 else 0,  # 第12列
                'aspect': float(row.aspect) if row.aspect != 0 else 0,  # 第13列
                'X_axis': float(row.X_axis) if row.X_axis != 0 else 0,  # 第14列
                'Y_axis': float(row.Y_axis) if row.Y_axis != 0 else 0,  # 第15列

                'excel2_name': row.excel2_name,
                'PONO': row.PONO,
                'sample_type': row.sample_type,
                'sampling_location': row.sampling_location,
                'sampling_process': row.sampling_process,
                'analysis_area': row.analysis_area,
                'chart_name': row.chart_name,
                'casting_mark': row.casting_mark,
                'sampling_time': row.sampling_time,
                'sample_sender': row.sample_sender,
                'Remarks': row.Remarks,

                'Li_wt_percent': float(row.Li_wt_percent) if row.Li_wt_percent != 0 else 0,  # 第16列
                'B_wt_percent': float(row.B_wt_percent) if row.B_wt_percent != 0 else 0,  # 第17列
                'C_wt_percent': float(row.C_wt_percent) if row.C_wt_percent != 0 else 0,  # 第18列
                'N_wt_percent': float(row.N_wt_percent) if row.N_wt_percent != 0 else 0,  # 第19列
                'O_wt_percent': float(row.O_wt_percent) if row.O_wt_percent != 0 else 0,  # 第20列
                'F_wt_percent': float(row.F_wt_percent) if row.F_wt_percent != 0 else 0,  # 第21列
                'Na_wt_percent': float(row.Na_wt_percent) if row.Na_wt_percent != 0 else 0,  # 第22列
                'Mg_wt_percent': float(row.Mg_wt_percent) if row.Mg_wt_percent != 0 else 0,  # 第23列
                'Al_wt_percent': float(row.Al_wt_percent) if row.Al_wt_percent != 0 else 0,  # 第24列
                'Si_wt_percent': float(row.Si_wt_percent) if row.Si_wt_percent != 0 else 0,  # 第25列
                'P_wt_percent': float(row.P_wt_percent) if row.P_wt_percent != 0 else 0,  # 第26列
                'S_wt_percent': float(row.S_wt_percent) if row.S_wt_percent != 0 else 0,  # 第27列
                'Cl_wt_percent': float(row.Cl_wt_percent) if row.Cl_wt_percent != 0 else 0,  # 第28列
                'K_wt_percent': float(row.K_wt_percent) if row.K_wt_percent != 0 else 0,  # 第29列
                'Ca_wt_percent': float(row.Ca_wt_percent) if row.Ca_wt_percent != 0 else 0,  # 第30列
                'Ti_wt_percent': float(row.Ti_wt_percent) if row.Ti_wt_percent != 0 else 0,  # 第31列
                'V_wt_percent': float(row.V_wt_percent) if row.V_wt_percent != 0 else 0,  # 第32列
                'Cr_wt_percent': float(row.Cr_wt_percent) if row.Cr_wt_percent != 0 else 0,  # 第33列
                'Mn_wt_percent': float(row.Mn_wt_percent) if row.Mn_wt_percent != 0 else 0,  # 第34列
                'Fe_wt_percent': float(row.Fe_wt_percent) if row.Fe_wt_percent != 0 else 0,  # 第35列
                'Ni_wt_percent': float(row.Ni_wt_percent) if row.Ni_wt_percent != 0 else 0,  # 第36列
                'Cu_wt_percent': float(row.Cu_wt_percent) if row.Cu_wt_percent != 0 else 0,  # 第37列
                'As_wt_percent': float(row.As_wt_percent) if row.As_wt_percent != 0 else 0,  # 第38列
                'Zr_wt_percent': float(row.Zr_wt_percent) if row.Zr_wt_percent != 0 else 0,  # 第39列
                'Nb_wt_percent': float(row.Nb_wt_percent) if row.Nb_wt_percent != 0 else 0,  # 第40列
                'Mo_wt_percent': float(row.Mo_wt_percent) if row.Mo_wt_percent != 0 else 0,  # 第41列
                'Ru_wt_percent': float(row.Ru_wt_percent) if row.Ru_wt_percent != 0 else 0,  # 第42列
                'Rh_wt_percent': float(row.Rh_wt_percent) if row.Rh_wt_percent != 0 else 0,  # 第43列
                'Pd_wt_percent': float(row.Pd_wt_percent) if row.Pd_wt_percent != 0 else 0,  # 第44列
                'Cd_wt_percent': float(row.Cd_wt_percent) if row.Cd_wt_percent != 0 else 0,  # 第45列
                'Sn_wt_percent': float(row.Sn_wt_percent) if row.Sn_wt_percent != 0 else 0,  # 第46列
                'Sb_wt_percent': float(row.Sb_wt_percent) if row.Sb_wt_percent != 0 else 0,  # 第47列
                'Te_wt_percent': float(row.Te_wt_percent) if row.Te_wt_percent != 0 else 0,  # 第48列
                'La_wt_percent': float(row.La_wt_percent) if row.La_wt_percent != 0 else 0,  # 第49列
                'Ce_wt_percent': float(row.Ce_wt_percent) if row.Ce_wt_percent != 0 else 0,  # 第50列
                'Pm_wt_percent': float(row.Pm_wt_percent) if row.Pm_wt_percent != 0 else 0,  # 第51列
                'Sm_wt_percent': float(row.Sm_wt_percent) if row.Sm_wt_percent != 0 else 0,  # 第52列
                'Eu_wt_percent': float(row.Eu_wt_percent) if row.Eu_wt_percent != 0 else 0,  # 第53列
                'Dy_wt_percent': float(row.Dy_wt_percent) if row.Dy_wt_percent != 0 else 0,  # 第54列
                'Tm_wt_percent': float(row.Tm_wt_percent) if row.Tm_wt_percent != 0 else 0,  # 第55列
                'Yb_wt_percent': float(row.Yb_wt_percent) if row.Yb_wt_percent != 0 else 0,  # 第56列
                'Ta_wt_percent': float(row.Ta_wt_percent) if row.Ta_wt_percent != 0 else 0,  # 第57列
                'Tl_wt_percent': float(row.Tl_wt_percent) if row.Tl_wt_percent != 0 else 0,  # 第58列
                'Ra_wt_percent': float(row.Ra_wt_percent) if row.Ra_wt_percent != 0 else 0,  # 第59列
                'Ac_wt_percent': float(row.Ac_wt_percent) if row.Ac_wt_percent != 0 else 0,  # 第60列
                'Pa_wt_percent': float(row.Pa_wt_percent) if row.Pa_wt_percent != 0 else 0,  # 第61列
            }

            # 筛选条件检查（钙处理铝镇静钢）
            conditions = [
                # 第一个规则：满足，Al，S，N，Ca，Ce，La，Ti，Mg同时=0
                all(res[element] == 0 for element in ['Al_wt_percent', 'S_wt_percent', 'N_wt_percent',
                                                      'Ca_wt_percent', 'Ce_wt_percent', 'La_wt_percent',
                                                      'Ti_wt_percent', 'Mg_wt_percent']),
            ]

            # 如果没满足任何一个条件，将当前行数据添加到结果中
            if not any(conditions):
                resList.append(res)

        response = {
            'data': resList,
        }
        return jsonify(response)
    finally:
        # 归还连接到池
        connection_pool.release_connection(conn)

    # -》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》


# 铝镇静钢，原来叫硅钢，filterSi
"""""
@app.route('/filterSi', methods=['POST', 'OPTIONS'])
def filterSi():
    if request.method == 'OPTIONS':  
        return '', 200  
    # 从连接池获取连接  
    conn = connection_pool.get_connection()  
    try:
        cursor = conn.cursor()
        content = request.json
        #print('前端请求时附带的查询条件', content)
        syNumbers = content.get('syNumber', [])  # 试样号查询条件
        dateOption = content.get('date', [])  # 日期查询条件
        fileNames = content.get('fileName', [])  # 文件名称查询条件
        # 查询主列表（带着查询条件）
        query = "SELECT * FROM TXEX2"
        count_query = "SELECT COUNT(1) FROM TXEX2"
        params = []
        conditions = []
        if syNumbers:
            placeholders = ', '.join(['?'] * len(syNumbers))
            conditions.append(f"section IN ({placeholders})")
            params.extend(syNumbers)

        if dateOption and len(dateOption) == 2:
            # 确保日期格式正确
            start_date = datetime.fromisoformat(dateOption[0].replace('Z', '+00:00')).strftime('%Y-%m-%d %H:%M:%S')
            end_date = datetime.fromisoformat(dateOption[1].replace('Z', '+00:00')).strftime('%Y-%m-%d %H:%M:%S')
            conditions.append("DT BETWEEN ? AND ?")
            params.extend([start_date, end_date])

        if fileNames:
            placeholders = ', '.join(['?'] * len(fileNames))
            conditions.append(f"excel_name IN ({placeholders})")
            params.extend(fileNames)

        if conditions:
            query += " WHERE " + " AND ".join(conditions)
            count_query += " WHERE " + " AND ".join(conditions)

            query += f" ORDER BY CAST(SNUMB AS INT) ASC"

        #print('生成的查询', query)
        #print('查询参数', params)
        cursor.execute(query, params)
        # 获取结果
        resList = []
        rows = cursor.fetchall()
        resList = []

        # 添加筛选逻辑
        for row in rows:
            # 将当前行的数据映射为字典
            res = {
                'DT': row.DT.strftime('%Y-%m-%d %H:%M:%S') if row.DT else None,  # 第1列
                'SNUMB': row.SNUMB,  # 第2列
                'excel_name': row.excel_name,  # 第3列
                'IDNO': row.IDNO,  # 第4列
                'section': row.section,  # 第5列
                'inclusion_type': row.inclusion_type,  # 第6列
                'FOV': row.FOV,  # 第7列
                'ECD': float(row.ECD) if row.ECD != 0 else 0,  # 第8列
                'area': float(row.area) if row.area != 0 else 0,  # 第9列
                'shape': float(row.shape) if row.shape != 0 else 0,  # 第10列
                'length': float(row.length) if row.length != 0 else 0,  # 第11列
                'perimeter': float(row.perimeter) if row.perimeter != 0 else 0,  # 第12列
                'aspect': float(row.aspect) if row.aspect != 0 else 0,  # 第13列
                'X_axis': float(row.X_axis) if row.X_axis != 0 else 0,  # 第14列
                'Y_axis': float(row.Y_axis) if row.Y_axis != 0 else 0,  # 第15列

                'excel2_name': row.excel2_name,
                'PONO': row.PONO,  
                'sample_type': row.sample_type, 
                'sampling_location': row.sampling_location, 
                'sampling_process': row.sampling_process, 
                'analysis_area': row.analysis_area,  
                'chart_name': row.chart_name,  
                'casting_mark': row.casting_mark, 
                'sampling_time': row.sampling_time, 
                'sample_sender': row.sample_sender, 
                'Remarks': row.Remarks, 

                'Li_wt_percent': float(row.Li_wt_percent) if row.Li_wt_percent != 0 else 0,  # 第16列
                'B_wt_percent': float(row.B_wt_percent) if row.B_wt_percent != 0 else 0,  # 第17列
                'C_wt_percent': float(row.C_wt_percent) if row.C_wt_percent != 0 else 0,  # 第18列
                'N_wt_percent': float(row.N_wt_percent) if row.N_wt_percent != 0 else 0,  # 第19列
                'O_wt_percent': float(row.O_wt_percent) if row.O_wt_percent != 0 else 0,  # 第20列
                'F_wt_percent': float(row.F_wt_percent) if row.F_wt_percent != 0 else 0,  # 第21列
                'Na_wt_percent': float(row.Na_wt_percent) if row.Na_wt_percent != 0 else 0,  # 第22列
                'Mg_wt_percent': float(row.Mg_wt_percent) if row.Mg_wt_percent != 0 else 0,  # 第23列
                'Al_wt_percent': float(row.Al_wt_percent) if row.Al_wt_percent != 0 else 0,  # 第24列
                'Si_wt_percent': float(row.Si_wt_percent) if row.Si_wt_percent != 0 else 0,  # 第25列
                'P_wt_percent': float(row.P_wt_percent) if row.P_wt_percent != 0 else 0,  # 第26列
                'S_wt_percent': float(row.S_wt_percent) if row.S_wt_percent != 0 else 0,  # 第27列
                'Cl_wt_percent': float(row.Cl_wt_percent) if row.Cl_wt_percent != 0 else 0,  # 第28列
                'K_wt_percent': float(row.K_wt_percent) if row.K_wt_percent != 0 else 0,  # 第29列
                'Ca_wt_percent': float(row.Ca_wt_percent) if row.Ca_wt_percent != 0 else 0,  # 第30列
                'Ti_wt_percent': float(row.Ti_wt_percent) if row.Ti_wt_percent != 0 else 0,  # 第31列
                'V_wt_percent': float(row.V_wt_percent) if row.V_wt_percent != 0 else 0,  # 第32列
                'Cr_wt_percent': float(row.Cr_wt_percent) if row.Cr_wt_percent != 0 else 0,  # 第33列
                'Mn_wt_percent': float(row.Mn_wt_percent) if row.Mn_wt_percent != 0 else 0,  # 第34列
                'Fe_wt_percent': float(row.Fe_wt_percent) if row.Fe_wt_percent != 0 else 0,  # 第35列
                'Ni_wt_percent': float(row.Ni_wt_percent) if row.Ni_wt_percent != 0 else 0,  # 第36列
                'Cu_wt_percent': float(row.Cu_wt_percent) if row.Cu_wt_percent != 0 else 0,  # 第37列
                'As_wt_percent': float(row.As_wt_percent) if row.As_wt_percent != 0 else 0,  # 第38列
                'Zr_wt_percent': float(row.Zr_wt_percent) if row.Zr_wt_percent != 0 else 0,  # 第39列
                'Nb_wt_percent': float(row.Nb_wt_percent) if row.Nb_wt_percent != 0 else 0,  # 第40列
                'Mo_wt_percent': float(row.Mo_wt_percent) if row.Mo_wt_percent != 0 else 0,  # 第41列
                'Ru_wt_percent': float(row.Ru_wt_percent) if row.Ru_wt_percent != 0 else 0,  # 第42列
                'Rh_wt_percent': float(row.Rh_wt_percent) if row.Rh_wt_percent != 0 else 0,  # 第43列
                'Pd_wt_percent': float(row.Pd_wt_percent) if row.Pd_wt_percent != 0 else 0,  # 第44列
                'Cd_wt_percent': float(row.Cd_wt_percent) if row.Cd_wt_percent != 0 else 0,  # 第45列
                'Sn_wt_percent': float(row.Sn_wt_percent) if row.Sn_wt_percent != 0 else 0,  # 第46列
                'Sb_wt_percent': float(row.Sb_wt_percent) if row.Sb_wt_percent != 0 else 0,  # 第47列
                'Te_wt_percent': float(row.Te_wt_percent) if row.Te_wt_percent != 0 else 0,  # 第48列
                'La_wt_percent': float(row.La_wt_percent) if row.La_wt_percent != 0 else 0,  # 第49列
                'Ce_wt_percent': float(row.Ce_wt_percent) if row.Ce_wt_percent != 0 else 0,  # 第50列
                'Pm_wt_percent': float(row.Pm_wt_percent) if row.Pm_wt_percent != 0 else 0,  # 第51列
                'Sm_wt_percent': float(row.Sm_wt_percent) if row.Sm_wt_percent != 0 else 0,  # 第52列
                'Eu_wt_percent': float(row.Eu_wt_percent) if row.Eu_wt_percent != 0 else 0,  # 第53列
                'Dy_wt_percent': float(row.Dy_wt_percent) if row.Dy_wt_percent != 0 else 0,  # 第54列
                'Tm_wt_percent': float(row.Tm_wt_percent) if row.Tm_wt_percent != 0 else 0,  # 第55列
                'Yb_wt_percent': float(row.Yb_wt_percent) if row.Yb_wt_percent != 0 else 0,  # 第56列
                'Ta_wt_percent': float(row.Ta_wt_percent) if row.Ta_wt_percent != 0 else 0,  # 第57列
                'Tl_wt_percent': float(row.Tl_wt_percent) if row.Tl_wt_percent != 0 else 0,  # 第58列
                'Ra_wt_percent': float(row.Ra_wt_percent) if row.Ra_wt_percent != 0 else 0,  # 第59列
                'Ac_wt_percent': float(row.Ac_wt_percent) if row.Ac_wt_percent != 0 else 0,  # 第60列
                'Pa_wt_percent': float(row.Pa_wt_percent) if row.Pa_wt_percent != 0 else 0,  # 第61列
            }

            # 筛选条件检查（铝镇静钢）
            conditions = [
                # 第一个规则：满足，l，S，N，Ce，La，Ti，Mg同时=0
                all(res[element] == 0 for element in ['Al_wt_percent', 'S_wt_percent', 'N_wt_percent', 'Ce_wt_percent', 'La_wt_percent',
                                                    'Ti_wt_percent', 'Mg_wt_percent']),
            ]

            # 如果没满足任何一个条件，将当前行数据添加到结果中
            if not any(conditions):
                resList.append(res)

        response = {
            'data': resList,
        }
        return jsonify(response)
    except Exception as e:  
        print(f"处理请求错误: {str(e)}")  
        return jsonify({"error": "获取数据失败", "details": str(e)}), 500  
    finally:  
        # 归还连接到池  
        connection_pool.release_connection(conn) 

# -》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》
# 铝镇静钢和硅钢，原来叫低合金钢filterLowMaterial
@app.route('/filterLowMaterial', methods=['POST', 'OPTIONS'])
def filterLowMaterial():
    if request.method == 'OPTIONS':  
        return '', 200  

    # 从连接池获取连接  
    conn = connection_pool.get_connection()  
    try:  
        cursor = conn.cursor()
        content = request.json
        #print('前端请求时附带的查询条件', content)
        syNumbers = content.get('syNumber', [])  # 试样号查询条件
        dateOption = content.get('date', [])  # 日期查询条件
        fileNames = content.get('fileName', [])  # 文件名称查询条件
        # 查询主列表（带着查询条件）
        query = "SELECT * FROM TXEX2"
        count_query = "SELECT COUNT(1) FROM TXEX2"
        params = []
        conditions = []
        if syNumbers:
            placeholders = ', '.join(['?'] * len(syNumbers))
            conditions.append(f"section IN ({placeholders})")
            params.extend(syNumbers)

        if dateOption and len(dateOption) == 2:
            # 确保日期格式正确
            start_date = datetime.fromisoformat(dateOption[0].replace('Z', '+00:00')).strftime('%Y-%m-%d %H:%M:%S')
            end_date = datetime.fromisoformat(dateOption[1].replace('Z', '+00:00')).strftime('%Y-%m-%d %H:%M:%S')
            conditions.append("DT BETWEEN ? AND ?")
            params.extend([start_date, end_date])

        if fileNames:
            placeholders = ', '.join(['?'] * len(fileNames))
            conditions.append(f"excel_name IN ({placeholders})")
            params.extend(fileNames)

        if conditions:
            query += " WHERE " + " AND ".join(conditions)
            count_query += " WHERE " + " AND ".join(conditions)

            query += f" ORDER BY CAST(SNUMB AS INT) ASC"

        #print('生成的查询', query)
        #print('查询参数', params)
        cursor.execute(query, params)
        # 获取结果
        resList = []
        rows = cursor.fetchall()
        resList = []

        # 添加筛选逻辑
        for row in rows:
            # 将当前行的数据映射为字典
            res = {
                'DT': row.DT.strftime('%Y-%m-%d %H:%M:%S') if row.DT else None,  # 第1列
                'SNUMB': row.SNUMB,  # 第2列
                'excel_name': row.excel_name,  # 第3列
                'IDNO': row.IDNO,  # 第4列
                'section': row.section,  # 第5列
                'inclusion_type': row.inclusion_type,  # 第6列
                'FOV': row.FOV,  # 第7列
                'ECD': float(row.ECD) if row.ECD != 0 else 0,  # 第8列
                'area': float(row.area) if row.area != 0 else 0,  # 第9列
                'shape': float(row.shape) if row.shape != 0 else 0,  # 第10列
                'length': float(row.length) if row.length != 0 else 0,  # 第11列
                'perimeter': float(row.perimeter) if row.perimeter != 0 else 0,  # 第12列
                'aspect': float(row.aspect) if row.aspect != 0 else 0,  # 第13列
                'X_axis': float(row.X_axis) if row.X_axis != 0 else 0,  # 第14列
                'Y_axis': float(row.Y_axis) if row.Y_axis != 0 else 0,  # 第15列

                'excel2_name': row.excel2_name,
                'PONO': row.PONO,  
                'sample_type': row.sample_type, 
                'sampling_location': row.sampling_location, 
                'sampling_process': row.sampling_process, 
                'analysis_area': row.analysis_area,  
                'chart_name': row.chart_name,  
                'casting_mark': row.casting_mark, 
                'sampling_time': row.sampling_time, 
                'sample_sender': row.sample_sender, 
                'Remarks': row.Remarks, 

                'Li_wt_percent': float(row.Li_wt_percent) if row.Li_wt_percent != 0 else 0,  # 第16列
                'B_wt_percent': float(row.B_wt_percent) if row.B_wt_percent != 0 else 0,  # 第17列
                'C_wt_percent': float(row.C_wt_percent) if row.C_wt_percent != 0 else 0,  # 第18列
                'N_wt_percent': float(row.N_wt_percent) if row.N_wt_percent != 0 else 0,  # 第19列
                'O_wt_percent': float(row.O_wt_percent) if row.O_wt_percent != 0 else 0,  # 第20列
                'F_wt_percent': float(row.F_wt_percent) if row.F_wt_percent != 0 else 0,  # 第21列
                'Na_wt_percent': float(row.Na_wt_percent) if row.Na_wt_percent != 0 else 0,  # 第22列
                'Mg_wt_percent': float(row.Mg_wt_percent) if row.Mg_wt_percent != 0 else 0,  # 第23列
                'Al_wt_percent': float(row.Al_wt_percent) if row.Al_wt_percent != 0 else 0,  # 第24列
                'Si_wt_percent': float(row.Si_wt_percent) if row.Si_wt_percent != 0 else 0,  # 第25列
                'P_wt_percent': float(row.P_wt_percent) if row.P_wt_percent != 0 else 0,  # 第26列
                'S_wt_percent': float(row.S_wt_percent) if row.S_wt_percent != 0 else 0,  # 第27列
                'Cl_wt_percent': float(row.Cl_wt_percent) if row.Cl_wt_percent != 0 else 0,  # 第28列
                'K_wt_percent': float(row.K_wt_percent) if row.K_wt_percent != 0 else 0,  # 第29列
                'Ca_wt_percent': float(row.Ca_wt_percent) if row.Ca_wt_percent != 0 else 0,  # 第30列
                'Ti_wt_percent': float(row.Ti_wt_percent) if row.Ti_wt_percent != 0 else 0,  # 第31列
                'V_wt_percent': float(row.V_wt_percent) if row.V_wt_percent != 0 else 0,  # 第32列
                'Cr_wt_percent': float(row.Cr_wt_percent) if row.Cr_wt_percent != 0 else 0,  # 第33列
                'Mn_wt_percent': float(row.Mn_wt_percent) if row.Mn_wt_percent != 0 else 0,  # 第34列
                'Fe_wt_percent': float(row.Fe_wt_percent) if row.Fe_wt_percent != 0 else 0,  # 第35列
                'Ni_wt_percent': float(row.Ni_wt_percent) if row.Ni_wt_percent != 0 else 0,  # 第36列
                'Cu_wt_percent': float(row.Cu_wt_percent) if row.Cu_wt_percent != 0 else 0,  # 第37列
                'As_wt_percent': float(row.As_wt_percent) if row.As_wt_percent != 0 else 0,  # 第38列
                'Zr_wt_percent': float(row.Zr_wt_percent) if row.Zr_wt_percent != 0 else 0,  # 第39列
                'Nb_wt_percent': float(row.Nb_wt_percent) if row.Nb_wt_percent != 0 else 0,  # 第40列
                'Mo_wt_percent': float(row.Mo_wt_percent) if row.Mo_wt_percent != 0 else 0,  # 第41列
                'Ru_wt_percent': float(row.Ru_wt_percent) if row.Ru_wt_percent != 0 else 0,  # 第42列
                'Rh_wt_percent': float(row.Rh_wt_percent) if row.Rh_wt_percent != 0 else 0,  # 第43列
                'Pd_wt_percent': float(row.Pd_wt_percent) if row.Pd_wt_percent != 0 else 0,  # 第44列
                'Cd_wt_percent': float(row.Cd_wt_percent) if row.Cd_wt_percent != 0 else 0,  # 第45列
                'Sn_wt_percent': float(row.Sn_wt_percent) if row.Sn_wt_percent != 0 else 0,  # 第46列
                'Sb_wt_percent': float(row.Sb_wt_percent) if row.Sb_wt_percent != 0 else 0,  # 第47列
                'Te_wt_percent': float(row.Te_wt_percent) if row.Te_wt_percent != 0 else 0,  # 第48列
                'La_wt_percent': float(row.La_wt_percent) if row.La_wt_percent != 0 else 0,  # 第49列
                'Ce_wt_percent': float(row.Ce_wt_percent) if row.Ce_wt_percent != 0 else 0,  # 第50列
                'Pm_wt_percent': float(row.Pm_wt_percent) if row.Pm_wt_percent != 0 else 0,  # 第51列
                'Sm_wt_percent': float(row.Sm_wt_percent) if row.Sm_wt_percent != 0 else 0,  # 第52列
                'Eu_wt_percent': float(row.Eu_wt_percent) if row.Eu_wt_percent != 0 else 0,  # 第53列
                'Dy_wt_percent': float(row.Dy_wt_percent) if row.Dy_wt_percent != 0 else 0,  # 第54列
                'Tm_wt_percent': float(row.Tm_wt_percent) if row.Tm_wt_percent != 0 else 0,  # 第55列
                'Yb_wt_percent': float(row.Yb_wt_percent) if row.Yb_wt_percent != 0 else 0,  # 第56列
                'Ta_wt_percent': float(row.Ta_wt_percent) if row.Ta_wt_percent != 0 else 0,  # 第57列
                'Tl_wt_percent': float(row.Tl_wt_percent) if row.Tl_wt_percent != 0 else 0,  # 第58列
                'Ra_wt_percent': float(row.Ra_wt_percent) if row.Ra_wt_percent != 0 else 0,  # 第59列
                'Ac_wt_percent': float(row.Ac_wt_percent) if row.Ac_wt_percent != 0 else 0,  # 第60列
                'Pa_wt_percent': float(row.Pa_wt_percent) if row.Pa_wt_percent != 0 else 0,  # 第61列
            }

            # 筛选条件检查（铝镇静钢和硅钢）
            conditions = [
                # 第一个规则：满足，Al，S，N，Si，Ce，La，Ti，Mg同时=0
                all(res[element] == 0 for element in ['Al_wt_percent', 'S_wt_percent', 'N_wt_percent',
                                                    'Si_wt_percent', 'Ce_wt_percent', 'La_wt_percent',
                                                    'Ti_wt_percent', 'Mg_wt_percent']),
            ]

            # 如果没满足任何一个条件，将当前行数据添加到结果中
            if not any(conditions):
                resList.append(res)

        response = {
            'data': resList,
        }
        return jsonify(response)
    except Exception as e:  
        print(f"处理请求错误: {str(e)}")  
        return jsonify({"error": "获取数据失败", "details": str(e)}), 500  
    finally:  
        # 归还连接到池  
        connection_pool.release_connection(conn)  
"""""

# 夹杂物自定义分类--统计到弹窗规则  ----------------------------------------------------------------------------------------------
# 定义列名
columns_jzw = [
    'jzwelement',  # 夹杂物元素分类
    'jzwcount',  # 统计数量
    'jzwType'  # 夹杂物类型（暂时留空）
]
# 模板文件配置
TEMPLATE_DIR = os.path.join(os.path.dirname(__file__), 'templates')
LAST_TEMPLATE_FILE = 'last_template.json'

# 确保模板目录存在
if not os.path.exists(TEMPLATE_DIR):
    os.makedirs(TEMPLATE_DIR)


# 🔥 修改原有的夹杂物分类接口，支持自动加载模板
@app.route('/api/jzwfenlei', methods=['POST'])
def get_jzw_fenlei():
    """夹杂物分类统计，支持自动加载上次使用的模板"""
    data = request.get_json()

    if not data:
        print("No data received")
        return jsonify([])

    # 原有的统计逻辑
    inclusion_stats = {}
    for row in data:
        inclusion_type = row.get('inclusion_type', '未分类')
        if inclusion_type not in inclusion_stats:
            inclusion_stats[inclusion_type] = 0
        inclusion_stats[inclusion_type] += 1

    # 转换为基础数据格式
    results = []
    for category, count in sorted(inclusion_stats.items(), key=lambda x: x[1], reverse=True):
        result_row = {
            'jzwelement': category,
            'jzwcount': count,
            'jzwType': '',
            'checked': False
        }
        results.append(result_row)

    # 🔥 自动加载上次使用的模板
    last_template_path = os.path.join(TEMPLATE_DIR, LAST_TEMPLATE_FILE)
    if os.path.exists(last_template_path):
        try:
            with open(last_template_path, 'r', encoding='utf-8') as f:
                template_data = json.load(f)

            # 应用模板到结果
            template_mapping = {}
            for item in template_data:
                template_mapping[item['jzwelement'].lower()] = item

            for result in results:
                element_key = result['jzwelement'].lower()
                if element_key in template_mapping:
                    result['jzwType'] = template_mapping[element_key]['jzwType']
                    result['checked'] = template_mapping[element_key]['checked']

            print(f"[模板系统] 自动加载上次使用的模板")
        except Exception as e:
            print(f"[模板系统] 自动加载模板失败: {e}")

    return jsonify(results)


# 保存模板接口
@app.route('/api/jzw/template/save', methods=['POST'])
def save_jzw_template():
    """保存当前配置为模板"""
    data = request.get_json()
    template_name = data.get('templateName', '').strip()
    jzw_table_data = data.get('jzwTableData', [])

    if not template_name:
        return jsonify({'success': False, 'message': '模板名称不能为空'})

    # 确保文件名以.json结尾
    if not template_name.endswith('.json'):
        template_name += '.json'

    # 确保模板目录存在
    if not os.path.exists(TEMPLATE_DIR):
        os.makedirs(TEMPLATE_DIR)

    # 只保存有自定义类型或被选中的项
    template_data = []
    for item in jzw_table_data:
        if item.get('jzwType') or item.get('checked'):
            template_data.append({
                'jzwelement': item['jzwelement'],
                'jzwType': item.get('jzwType', ''),
                'checked': item.get('checked', False)
            })

    # 保存模板文件
    template_path = os.path.join(TEMPLATE_DIR, template_name)
    try:
        with open(template_path, 'w', encoding='utf-8') as f:
            json.dump(template_data, f, ensure_ascii=False, indent=2)

        # 同时保存为上次使用的模板
        last_template_path = os.path.join(TEMPLATE_DIR, LAST_TEMPLATE_FILE)
        with open(last_template_path, 'w', encoding='utf-8') as f:
            json.dump(template_data, f, ensure_ascii=False, indent=2)

        # 🔥 获取绝对路径
        abs_template_path = os.path.abspath(template_path)
        abs_template_dir = os.path.abspath(TEMPLATE_DIR)

        print(f"[模板系统] 保存模板成功: {template_name}")
        print(f"[模板系统] 模板路径: {abs_template_path}")
        print(f"[模板系统] 模板目录: {abs_template_dir}")

        # 🔥 确保返回正确的JSON格式
        return jsonify({
            'success': True,
            'message': f'模板 "{template_name}" 保存成功',
            'templatePath': abs_template_path,
            'templateDir': abs_template_dir,
            'templateName': template_name
        })

    except Exception as e:
        error_msg = f"保存失败: {str(e)}"
        print(f"[模板系统] 保存模板失败: {e}")
        return jsonify({'success': False, 'message': error_msg})


# 🔥 获取模板列表接口
@app.route('/api/jzw/template/list', methods=['GET'])
def get_jzw_template_list():
    """获取模板文件列表"""
    templates = []
    try:
        for filename in os.listdir(TEMPLATE_DIR):
            if filename.endswith('.json') and filename != LAST_TEMPLATE_FILE:
                templates.append(filename)
        return jsonify({'success': True, 'templates': templates})
    except Exception as e:
        print(f"[模板系统] 获取模板列表失败: {e}")
        return jsonify({'success': False, 'templates': []})


# 🔥 加载模板接口
@app.route('/api/jzw/template/load', methods=['POST'])
def load_jzw_template():
    """加载指定模板"""
    data = request.get_json()
    template_name = data.get('templateName', '').strip()

    if not template_name:
        return jsonify({'success': False, 'message': '模板名称不能为空'})

    template_path = os.path.join(TEMPLATE_DIR, template_name)
    try:
        if os.path.exists(template_path):
            with open(template_path, 'r', encoding='utf-8') as f:
                template_data = json.load(f)

            # 保存为上次使用的模板
            last_template_path = os.path.join(TEMPLATE_DIR, LAST_TEMPLATE_FILE)
            with open(last_template_path, 'w', encoding='utf-8') as f:
                json.dump(template_data, f, ensure_ascii=False, indent=2)

            return jsonify({'success': True, 'message': '模板加载成功', 'templateData': template_data})
        else:
            return jsonify({'success': False, 'message': '模板文件不存在'})
    except Exception as e:
        print(f"[模板系统] 加载模板失败: {e}")
        return jsonify({'success': False, 'message': f'加载失败: {str(e)}'})


# 原有的夹杂物分类插入接口保持不变
@app.route('/api/jzwfenleicharu', methods=['POST'])
def jzw_fenlei_charu():
    """原有逻辑保持不变"""
    data = request.json
    selected_types = data.get('selectedTypes', [])
    table_data2 = data.get('tableData2', [])
    jzw_table_data = data.get('jzwTableData', [])

    print("收到的夹杂物类型:", selected_types)
    print("收到的tableData2的长度:", len(table_data2))

    final_search_types = set()
    type_mapping = {}

    for item in jzw_table_data:
        if item.get('checked'):
            original_type = item['jzwelement']
            original_type_lower = original_type.strip().lower()
            custom_type = item.get('jzwType', '').strip()
            mapped_type = custom_type if custom_type else original_type
            type_mapping[original_type_lower] = mapped_type
            final_search_types.add(original_type_lower)
            if custom_type:
                final_search_types.add(custom_type.lower())

    if not final_search_types:
        print("没有有效的夹杂物类型")
        return jsonify({'data': []})

    filtered_data = []
    for row in table_data2:
        row_type_lower = row.get('inclusion_type', '').strip().lower()
        if row_type_lower in final_search_types:
            row['inclusion_type'] = type_mapping.get(row_type_lower, row['inclusion_type'])
            filtered_data.append(row)

    print("筛选后的数据数量:", len(filtered_data))
    return jsonify({'data': filtered_data})


# 统计接口  ----------------------------------------------------------------------------------------------第一个图

@app.route('/statistic', methods=['POST'])
def statistic():
    data = request.get_json()
    stats = {}

    # 动态收集所有出现的夹杂物类型
    all_inclusion_types = set()
    for row in data:
        if 'inclusion_type' in row and row['inclusion_type']:
            all_inclusion_types.add(str(row['inclusion_type']))

    # 为每个试样初始化统计数据
    for row in data:
        # 检查 analysis_area 是否存在且有效
        if 'analysis_area' not in row or not row['analysis_area'] or row['analysis_area'] == 0:
            return jsonify({"error": "⚠️⚠️⚠️该试样缺少分析面积数据⚠️⚠️⚠️"}), 400

        # 使用元组作为key，包含excel名称和section
        key = (row['excel_name'], row['section'])

        # 初始化该试样的统计数据
        if key not in stats:
            stats[key] = {
                'analysis_area': row['analysis_area'],
            }
            # 为每个夹杂物类型和每个ECD范围初始化计数器
            for inclusion_type in all_inclusion_types:
                for suffix in ['_2', '_5', '_10', '_11']:
                    stats[key][f'{inclusion_type}{suffix}'] = 0

        try:
            inclusion_type = str(row['inclusion_type'])
            ecd = float(row['ECD']) if row['ECD'] else 0

            # 根据ECD范围确定后缀
            if ecd <= 2:
                suffix = '_2'
            elif 2 < ecd <= 5:
                suffix = '_5'
            elif 5 < ecd <= 10:
                suffix = '_10'
            elif ecd > 10:
                suffix = '_11'
            else:
                continue

            # 动态统计
            stat_key = f'{inclusion_type}{suffix}'
            if stat_key in stats[key]:
                stats[key][stat_key] += 1
            else:
                stats[key][stat_key] = 1

        except (ValueError, TypeError) as e:
            print(f"处理数据时出错: {str(e)}")
            continue

    # 动态生成columns
    base_columns = ['DT', 'SNUMB', 'excel_name', 'section', 'analysis_area']
    dynamic_columns = []

    # 按照固定顺序生成列名
    for suffix in ['_2', '_5', '_10', '_11']:
        for inclusion_type in sorted(all_inclusion_types):
            dynamic_columns.append(f'{inclusion_type}{suffix}')

    columns1 = base_columns + dynamic_columns

    # 生成结果
    current_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    seq = 1
    res = []

    for (excel_name, section), counts in stats.items():
        analysis_area = float(counts['analysis_area'])
        # 移除这个检查，因为已经在前面检查过了
        # if analysis_area == 0:
        #     analysis_area = 1

        # 基础信息
        values = [current_time, seq, excel_name, section, analysis_area]

        # 动态添加统计值
        processed_values = []
        for suffix in ['_2', '_5', '_10', '_11']:
            for inclusion_type in sorted(all_inclusion_types):
                key = f'{inclusion_type}{suffix}'
                raw_value = counts.get(key, 0) / analysis_area
                rounded_value = round(raw_value, 3)
                final_value = 0 if rounded_value < 0.001 else rounded_value
                processed_values.append(final_value)

        values.extend(processed_values)
        res.append(values)
        seq += 1

    # 转换为字典格式
    resList = []
    for index, value in enumerate(res):
        row_data = list(value)
        res_dict = dict(zip(columns1, row_data))
        resList.append(res_dict)

    response = {
        'data': resList,
        'columns': columns1,  # 关键：返回columns信息
        'inclusion_types': sorted(list(all_inclusion_types))
    }
    return jsonify(response)


@app.route('/statisticBar1', methods=['POST'])
def statisticBar1():
    data = request.get_json()

    # 动态分类数据
    categorized_data = {
        '<2μm': {},
        '2-5μm': {},
        '5-10μm': {},
        '>10μm': {}
    }

    # 收集所有基础夹杂物类型
    base_types = set()

    # 遍历data中所有属性
    for key, value in data.items():
        # 跳过非统计字段
        if isinstance(value, str) or value == '' or key in ['DT', 'SNUMB', 'Excel_name', 'section', 'analysis_area']:
            continue

        # 只处理大于0的值
        if value > 0:
            # 提取基础类型和后缀
            parts = key.split('_')
            if len(parts) >= 2:
                base_key = '_'.join(parts[:-1])
                suffix = parts[-1]

                base_types.add(base_key)

                # 根据后缀分类
                if suffix == '2':
                    categorized_data['<2μm'][base_key] = value
                elif suffix == '5':
                    categorized_data['2-5μm'][base_key] = value
                elif suffix == '10':
                    categorized_data['5-10μm'][base_key] = value
                elif suffix == '11':
                    categorized_data['>10μm'][base_key] = value

    # 构建series数据
    series = []
    for base_key in sorted(base_types):  # 排序保证顺序一致
        series_data = [
            categorized_data['<2μm'].get(base_key, 0),
            categorized_data['2-5μm'].get(base_key, 0),
            categorized_data['5-10μm'].get(base_key, 0),
            categorized_data['>10μm'].get(base_key, 0)
        ]

        # 只有当至少有一个值大于0时才添加到series中
        if any(val > 0 for val in series_data):
            series.append({
                'name': base_key,
                'barWidth': 65,
                'type': 'bar',
                'stack': 'Search Engine',
                'emphasis': {
                    'focus': 'series'
                },
                'data': series_data
            })

    res = {
        'data': series,
        'categories': ['<2μm', '2-5μm', '5-10μm', '>10μm'],  # X轴类别
        'inclusion_types': sorted(list(base_types))  # 所有夹杂物类型
    }

    return jsonify(res)


# 统计接口 ----------------------------------------------------------------------------------------------第二个图
columns2 = [
    # 基本信息列
    'DT', 'SNUMB', 'excel_name', 'section', 'analysis_area', 'inclusions_avg_size', 'inclusions_num'
]


@app.route('/statistic2', methods=['POST'])
def statistic2():
    data = request.get_json()

    # print("接收到的前端数据:", data)  # 检查接收到的数据
    # if not data:
    # print("警告：没有接收到数据")
    # return jsonify({'data': [], 'message': '没有接收到数据'})

    stats = {}
    # data是从前端拿过来的列表，直接拿着列表进行统计即可
    for row in data:

        # 使用元组作为key，包含excel名称和section
        key = (row['excel_name'], row['section'])

        # 初始化该试样的统计数据
        if key not in stats:
            stats[key] = {
                'analysis_area': row['analysis_area'],  # 使用实际的分析面积数据
                'inclusions_avg_size': 0,  # 用于存储ECD的总和
                'inclusions_num': 0,  # 用于计数
                'total_ecd': 0,  # 添加一个字段存储ECD总和
            }

        try:
            inclusion_type = str(row['inclusion_type'])
            ecd = float(row['ECD']) if row['ECD'] else 0

            # 统计逻辑
            if ecd > 0:  # 只统计有效的ECD值
                stats[key]['total_ecd'] += ecd  # 累加ECD值
                stats[key]['inclusions_num'] += 1  # 计数加1

            # 计算平均值
            if stats[key]['inclusions_num'] > 0:
                stats[key]['inclusions_avg_size'] = stats[key]['total_ecd'] / stats[key]['inclusions_num']

        except (ValueError, TypeError) as e:
            # print(f"处理数据时出错: {str(e)}")
            continue

            # print("统计结果:", stats)  # 检查统计结果

    # 插入统计结果
    current_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    seq = 1
    res2 = []

    for (excel_name, section), counts in stats.items():
        analysis_area = float(counts['analysis_area'])  # 获取analysis_area
        if analysis_area == 0:  # 防止除以0
            analysis_area = 1  # 或者设置为其他默认值

        # 每行数据：时间、序号、试样名称、各项统计值
        values = [current_time, seq, excel_name, section, analysis_area,
                  round(counts['inclusions_avg_size'], 2),  # 保留2位小数
                  round(counts['inclusions_num'] / analysis_area, 2)]

        res2.append(values)
        seq += 1

    # 针对返给前端数据进行处理
    resList = []
    for value in res2:
        res_dict = dict(zip(columns2, value))
        resList.append(res_dict)

        # print("最终返回结果:", resList)  # 检查最终结果

    response = {
        'data': resList
    }
    return jsonify(response)


# 统计接口  ----------------------------------------------------------------------------------------------第三个图
@app.route('/statistic3', methods=['POST'])
def statistic3():
    try:
        data = request.get_json()
        stats = {}

        # 首先收集所有唯一的 inclusion_type
        unique_inclusion_types = set()
        for row in data:
            inclusion_type = str(row['inclusion_type'])
            unique_inclusion_types.add(inclusion_type)

            # 移除一些固定的列名
        fixed_columns = ['DT', 'SNUMB', 'excel_name', 'section', 'analysis_area']

        # 统计逻辑（保持原有逻辑）
        for row in data:
            # 获取分析面积信息
            analysis_area = float(row.get('analysis_area', 0))
            if analysis_area <= 0:
                analysis_area = 1  # 防止除以零

            # 使用元组作为key，包含excel名称和section
            key = (row['excel_name'], row['section'])

            # 初始化该试样的统计数据
            if key not in stats:
                stats[key] = {
                    'analysis_area': analysis_area,  # 添加分析面积
                }
                # 动态添加所有唯一的 inclusion_type
                for inc_type in unique_inclusion_types:
                    stats[key][inc_type] = 0

            try:
                inclusion_type = str(row['inclusion_type'])
                ecd = float(row['ECD']) if row['ECD'] else 0

                # 统计逻辑（ECD <= 10）
                if ecd <= 10:
                    stats[key][inclusion_type] += 1

            except (ValueError, TypeError) as e:
                print(f"处理数据时出错: {str(e)}")
                continue

                # 插入统计结果
        current_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        seq = 1
        res3 = []

        # 遍历统计数据，每个(excel_name, section)生成一行
        for (excel_name, section), counts in stats.items():
            # 创建一条记录
            record = {
                'DT': current_time,
                'SNUMB': seq,
                'excel_name': excel_name,
                'section': section,
                'analysis_area': counts.get('analysis_area', 1)  # 默认为1避免除零
            }

            # 动态添加所有统计项
            for key in unique_inclusion_types:
                # 计算密度比例
                density = counts.get(key, 0) / counts.get('analysis_area', 1)
                # 保留3位有效数字，小于0.001则设为0
                density = round(density, 3)
                if density < 0.001:
                    density = 0
                record[key] = density

                # 添加到res3列表
            res3.append(record)
            seq += 1

            # 返回数据和动态列名
        return jsonify({
            'data': res3,
            'columns': list(unique_inclusion_types)  # 返回动态列名
        })

    except Exception as e:
        print(f"处理请求时出错: {str(e)}")
        return jsonify({'error': str(e), 'data': []}), 500


# 统计接口  ----------------------------------------------------------------------------------------------第四个图
columns6 = [
    # 基本信息列
    'DT', 'SNUMB', 'excel_name', 'section', 'analysis_area',
    # 元素含量平均值的统计列
    'N_wt_percent', 'O_wt_percent', 'F_wt_percent',
    'S_wt_percent', 'Ce_wt_percent', 'Ca_wt_percent',
    'Mg_wt_percent', 'Al_wt_percent', 'Ti_wt_percent',
    'Si_wt_percent', 'Mn_wt_percent',
]


@app.route('/statistic6', methods=['POST'])
def statistic6():
    data = request.get_json()

    if not data:
        return jsonify({'data': [], 'message': '没有接收到数据'})

        # 创建统计字典，按样品分组
    stats = {}

    # 需要计算平均值的元素列表
    elements = ['N', 'O', 'F', 'S', 'Ce', 'Ca', 'Mg', 'Al', 'Ti', 'Si', 'Mn']

    # 遍历前端发送的数据
    for row in data:
        # 使用元组作为key，包含excel名称和section
        key = (row['excel_name'], row['section'])

        # 初始化该试样的统计数据
        if key not in stats:
            stats[key] = {
                'analysis_area': row['analysis_area'],
                'count': 0  # 记录每个组的数据点数量
            }
            # 初始化每个元素的总和
            for element in elements:
                stats[key][f'{element}_total'] = 0
                stats[key][f'{element}_wt_percent'] = 0

                # 累加各元素含量并计数
        try:
            stats[key]['count'] += 1

            # 处理每个元素的含量数据
            for element in elements:
                element_key = f'{element}_wt_percent'  # 修正为原始数据中的实际键名格式
                if element_key in row and row[element_key] is not None:
                    try:
                        element_value = float(row[element_key])
                        stats[key][f'{element}_total'] += element_value
                    except (ValueError, TypeError):
                        continue

        except Exception as e:
            continue

            # 计算平均值
    for key, values in stats.items():
        if values['count'] > 0:
            for element in elements:
                values[f'{element}_wt_percent'] = round(values[f'{element}_total'] / values['count'], 2)

                # 插入统计结果
    current_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    seq = 1
    res6 = []

    for (excel_name, section), counts in stats.items():
        # 构建结果数组
        values = [
            current_time,
            seq,
            excel_name,
            section,
            counts['analysis_area']
        ]

        # 添加所有元素的平均含量
        for element in elements:
            values.append(counts[f'{element}_wt_percent'])

        res6.append(values)
        seq += 1

        # 转换为字典列表格式返回给前端
    resList = []
    for value in res6:
        result_dict = dict(zip(columns6, value))
        resList.append(result_dict)

    response = {
        'data': resList
    }
    return jsonify(response)


# 统计接口   ----------------------------------------------------------------------------------------------第五个图
columns7 = [
    # 基本信息列
    'DT', 'SNUMB', 'excel_name', 'section', 'analysis_area',
    'inclusions_size_0-2μm', 'inclusions_size_2-5μm',
    'inclusions_size_5-10μm', 'inclusions_size_10+μm',
]


@app.route('/statistic7', methods=['POST'])
def statistic7():
    data = request.get_json()

    if not data:
        return jsonify({'message': '没有接收到数据'}), 400

    stats = {}

    for row in data:
        # 使用元组作为key，包含excel名称和section
        key = (row['excel_name'], row['section'])

        # 初始化该试样的统计数据
        if key not in stats:
            stats[key] = {
                'analysis_area': row['analysis_area'],  # 使用实际的分析面积数据
                'inclusions_size_0-2μm': 0,
                'inclusions_size_2-5μm': 0,
                'inclusions_size_5-10μm': 0,
                'inclusions_size_10+μm': 0,
            }

        try:
            ecd = float(row['ECD']) if row['ECD'] else 0

            # 根据 ECD 进行分类统计
            if ecd <= 2:
                stats[key]['inclusions_size_0-2μm'] += 1
            elif 2 < ecd <= 5:
                stats[key]['inclusions_size_2-5μm'] += 1
            elif 5 < ecd <= 10:
                stats[key]['inclusions_size_5-10μm'] += 1
            else:
                stats[key]['inclusions_size_10+μm'] += 1

        except (KeyError, ValueError, TypeError) as e:
            print(f"处理数据时出错: {str(e)}，数据行: {row}")
            continue

            # 插入统计结果
    current_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    seq = 1
    res7 = []

    for (excel_name, section), counts in stats.items():
        analysis_area = float(counts['analysis_area'])  # 获取analysis_area
        if analysis_area == 0:  # 防止除以0
            analysis_area = 1  # 或者设置为其他默认值

        # 每行数据：时间、序号、excel名称、section、分析面积、单位面积夹杂物数量
        values = [
            current_time,  # 时间戳
            seq,  # 序号
            excel_name,  # Excel 文件名
            section,  # 试样名称
            analysis_area,  # 分析面积
            round(counts['inclusions_size_0-2μm'] / analysis_area, 2),  # 0-2μm 单位面积数量
            round(counts['inclusions_size_2-5μm'] / analysis_area, 2),  # 2-5μm 单位面积数量
            round(counts['inclusions_size_5-10μm'] / analysis_area, 2),  # 5-10μm 单位面积数量
            round(counts['inclusions_size_10+μm'] / analysis_area, 2),  # 10+μm 单位面积数量
        ]

        res7.append(values)
        seq += 1

        # 针对返给前端数据进行处理
    resList = []
    for value in res7:
        # 将 values 转换为字典，列名与 columns7 对应
        res_dict = dict(zip(columns7, value))
        resList.append(res_dict)

    response = {
        'data': resList
    }
    return jsonify(response)


# 统计接口  ----------------------------------------------------------------------------------------------图表5：夹杂物尺寸分布（支持多种分类方案）
@app.route('/statistic7_chart', methods=['POST'])
def statistic7_chart():
    """
    图五图表切换接口
    支持5种分类方案
    """
    data = request.get_json()
    
    selected_data = data.get('data', [])
    raw_data = data.get('raw_data', [])
    chart_type = data.get('chart_type', 0)
    use_raw_data = data.get('use_raw_data', False)
    
    # 定义5种分类方案
    schemes = {
        0: {
            'name': '原始方案 (0-2, 2-5, 5-10, ≥10μm)',
            'ranges': [
                {'min': 0, 'max': 2, 'label': '0-2μm'},
                {'min': 2, 'max': 5, 'label': '2-5μm'},
                {'min': 5, 'max': 10, 'label': '5-10μm'},
                {'min': 10, 'max': float('inf'), 'label': '≥10μm'}
            ]
        },
        1: {
            'name': '方案1 (0.5-1, 1-3, 3-5, 5-10, ≥10μm)',
            'ranges': [
                {'min': 0.5, 'max': 1, 'label': '0.5-1μm'},
                {'min': 1, 'max': 3, 'label': '1-3μm'},
                {'min': 3, 'max': 5, 'label': '3-5μm'},
                {'min': 5, 'max': 10, 'label': '5-10μm'},
                {'min': 10, 'max': float('inf'), 'label': '≥10μm'}
            ]
        },
        2: {
            'name': '方案2 (1-3, 3-5, 5-10, ≥10μm)',
            'ranges': [
                {'min': 1, 'max': 3, 'label': '1-3μm'},
                {'min': 3, 'max': 5, 'label': '3-5μm'},
                {'min': 5, 'max': 10, 'label': '5-10μm'},
                {'min': 10, 'max': float('inf'), 'label': '≥10μm'}
            ]
        },
        3: {
            'name': '方案3 (2-4, 4-5, 5-10, ≥10μm)',
            'ranges': [
                {'min': 2, 'max': 4, 'label': '2-4μm'},
                {'min': 4, 'max': 5, 'label': '4-5μm'},
                {'min': 5, 'max': 10, 'label': '5-10μm'},
                {'min': 10, 'max': float('inf'), 'label': '≥10μm'}
            ]
        },
        4: {
            'name': '方案4 (5-7, 7-10, ≥10μm)',
            'ranges': [
                {'min': 5, 'max': 7, 'label': '5-7μm'},
                {'min': 7, 'max': 10, 'label': '7-10μm'},
                {'min': 10, 'max': float('inf'), 'label': '≥10μm'}
            ]
        }
    }
    
    scheme = schemes.get(chart_type)
    if not scheme:
        return jsonify({'error': f'无效的图表类型: {chart_type}'}), 400
    
    result = []
    
    # 方案0：直接使用统计数据
    if chart_type == 0:
        for row in selected_data:
            section = row.get('section') or row.get('Section')
            excel_name = row.get('excel_name') or row.get('Excel_name')
            
            ranges_data = []
            for range_info in scheme['ranges']:
                label = range_info['label']
                
                # 从统计数据中获取对应的字段
                if label == '0-2μm':
                    density = row.get('inclusions_size_0-2μm', 0)
                elif label == '2-5μm':
                    density = row.get('inclusions_size_2-5μm', 0)
                elif label == '5-10μm':
                    density = row.get('inclusions_size_5-10μm', 0)
                elif label == '≥10μm':
                    density = row.get('inclusions_size_10+μm', 0)
                else:
                    density = 0
                
                ranges_data.append({
                    'label': label,
                    'density': round(float(density), 3)
                })
            
            result.append({
                'section': section,
                'excel_name': excel_name,
                'ranges': ranges_data
            })
    
    # 方案1-4：使用原始数据重新统计
    else:
        if not raw_data:
            return jsonify({'error': '缺少原始数据'}), 400
        
        for row in selected_data:
            section = row.get('section') or row.get('Section')
            excel_name = row.get('excel_name') or row.get('Excel_name')
            analysis_area = float(row.get('analysis_area') or row.get('Analysis_area') or 1)
            
            if analysis_area == 0:
                analysis_area = 1
            
            # 筛选该样品的原始数据
            sample_raw_data = [
                item for item in raw_data
                if (item.get('excel_name') or item.get('Excel_name') or '').strip() == (excel_name or '').strip()
                and (item.get('section') or item.get('Section') or '').strip() == (section or '').strip()
            ]
            
            # 统计各个范围的数量
            ranges_data = []
            for range_info in scheme['ranges']:
                min_val = range_info['min']
                max_val = range_info['max']
                label = range_info['label']
                
                # 统计落在该范围的夹杂物数量
                count = 0
                for item in sample_raw_data:
                    ecd_value = item.get('ECD') or item.get('ecd') or item.get('Ecd')
                    
                    if ecd_value is not None:
                        try:
                            ecd = float(ecd_value)
                            if min_val < ecd <= max_val:
                                count += 1
                        except (ValueError, TypeError):
                            continue
                
                # 计算密度（个/mm²）
                density = count / analysis_area
                
                ranges_data.append({
                    'label': label,
                    'density': round(density, 3)
                })
            
            result.append({
                'section': section,
                'excel_name': excel_name,
                'ranges': ranges_data
            })
    
    # 提取 X 轴标签
    categories = [r['label'] for r in scheme['ranges']]
    
    response = {
        'data': result,
        'scheme_name': scheme['name'],
        'ranges': categories,
        'chart_type': chart_type
    }
    
    return jsonify(response)

# 统计接口  ----------------------------------------------------------------------------------------------图表6：双Y轴点线图
@app.route('/statistic8', methods=['POST'])
def statistic8():
    data = request.get_json()  # ← 接收 jzwTableData2
    
    # 按 (excel_name, section) 分组
    unique_sections = {}
    for row in data:
        excel_name = row.get('excel_name')
        section = row.get('section')
        
        if excel_name and section:
            key = (str(excel_name), str(section))
            if key not in unique_sections:
                unique_sections[key] = {
                    'original_data': row,
                    'ecd_values': []
                }
            
            # 🔴 收集 ECD 值（来自前端传来的数据）
            ecd = row.get('ECD')
            if ecd is not None:
                try:
                    unique_sections[key]['ecd_values'].append(float(ecd))
                except (ValueError, TypeError):
                    continue
    
    # 🔴 基于前端传来的数据计算 MIN/MAX
    resList = []
    for (excel_name, section), section_data in unique_sections.items():
        ecd_values = section_data['ecd_values']
        
        if ecd_values:
            min_diameter = round(min(ecd_values), 2)  # ← 基于筛选后的数据
            max_diameter = round(max(ecd_values), 2)  # ← 基于筛选后的数据
            
            res_dict = section_data['original_data'].copy()
            res_dict['min_diameter'] = min_diameter
            res_dict['max_diameter'] = max_diameter
            
            resList.append(res_dict)
    
    return jsonify({'data': resList})

# 统计接口  ----------------------------------------------------------------------------------------------图表7：三元相图
@app.route('/statistic9', methods=['POST'])
def statistic9():
    """
    统计所有样品的三元相图数据
    返回样品级别的数据（每个样品一行，包含所有数据点）
    """
    data = request.get_json()  # 接收所有夹杂物数据
    
    # 按样品分组
    sample_data = {}
    
    for item in data:
        section = item.get('section', '')
        
        if section not in sample_data:
            sample_data[section] = {
                'section': section,
                'excel_name': item.get('excel_name', ''),
                'DT': item.get('DT', ''),  # 🔴 添加时间字段
                'chart1_points': [],
                'chart2_points': []
            }
        
        # 提取元素百分比
        al_percent = float(item.get('Al_wt_percent', 0) or 0)
        mg_percent = float(item.get('Mg_wt_percent', 0) or 0)
        ca_percent = float(item.get('Ca_wt_percent', 0) or 0)
        ti_percent = float(item.get('Ti_wt_percent', 0) or 0)
        
        # 图1：Al₂O₃-MgO-CaO（三个元素都不为0）
        if al_percent != 0 and mg_percent != 0 and ca_percent != 0:
            total = al_percent + mg_percent + ca_percent
            if total > 0:
                sample_data[section]['chart1_points'].append({
                    'SNUMB': item.get('SNUMB', ''),
                    'x': round((al_percent / total) * 100, 2),
                    'y': round((mg_percent / total) * 100, 2),
                    'z': round((ca_percent / total) * 100, 2)
                })
        
        # 图2：Al₂O₃-TiO₂-CaO（三个元素都不为0）
        if al_percent != 0 and ti_percent != 0 and ca_percent != 0:
            total = al_percent + ti_percent + ca_percent
            if total > 0:
                sample_data[section]['chart2_points'].append({
                    'SNUMB': item.get('SNUMB', ''),
                    'x': round((al_percent / total) * 100, 2),
                    'y': round((ti_percent / total) * 100, 2),
                    'z': round((ca_percent / total) * 100, 2)
                })
    
    # 构建返回数据
    result = []
    seq = 1
    
    for section, data_obj in sample_data.items():
        row = {
            'SNUMB': seq,
            'section': data_obj['section'],
            'excel_name': data_obj['excel_name'],
            'DT': data_obj['DT'],  # 🔴 添加时间字段到返回数据
            'chart1_count': len(data_obj['chart1_points']),
            'chart2_count': len(data_obj['chart2_points'])
        }
        
        # 添加图1的数据点：1-1, 1-2, 1-3, ...
        for idx, point in enumerate(data_obj['chart1_points'], start=1):
            row[f'1-{idx}'] = point
        
        # 添加图2的数据点：2-1, 2-2, 2-3, ...
        for idx, point in enumerate(data_obj['chart2_points'], start=1):
            row[f'2-{idx}'] = point
        
        result.append(row)
        seq += 1
    
    return jsonify({
        'data': result
    })

# 统计接口  ----------------------------------------------------------------------------------------------图表8：气泡颜色映射图
@app.route('/statistic10', methods=['POST'])
def statistic10():
    """
    气泡统计接口
    按样品分组统计各尺寸范围的气泡数量，并返回详细气泡坐标
    """
    data = request.get_json()

    if not data:
        return jsonify({'data': []})

    # 按样品分组
    samples_dict = {}
    
    for row in data:
        # 跳过缺少必需字段的数据
        if not row.get('ECD') or not row.get('X_axis') or not row.get('Y_axis'):
            continue
        
        section = row.get('section', '')
        if not section:
            continue
        
        # 初始化样品数据
        if section not in samples_dict:
            samples_dict[section] = {
                'DT': row.get('DT', ''),
                'SNUMB': row.get('SNUMB', ''),
                'excel_name': row.get('excel_name', ''),
                'section': section,
                'bubbles': [],
                'bubble_lt2': 0,
                'bubble_2to5': 0,
                'bubble_5to10': 0,
                'bubble_gt10': 0
            }
        
        # 数据转换
        try:
            ecd = float(row['ECD'])
            x_axis = float(row['X_axis'])
            y_axis = float(row['Y_axis'])
        except:
            continue
        
        # 确定气泡尺寸范围并统计
        if ecd < 2:
            size_range = 'lt2'
            samples_dict[section]['bubble_lt2'] += 1
        elif ecd < 5:
            size_range = '2to5'
            samples_dict[section]['bubble_2to5'] += 1
        elif ecd < 10:
            size_range = '5to10'
            samples_dict[section]['bubble_5to10'] += 1
        else:
            size_range = 'gt10'
            samples_dict[section]['bubble_gt10'] += 1
        
        # 添加气泡详细信息
        samples_dict[section]['bubbles'].append({
            'ecd': round(ecd, 3),
            'x': round(x_axis, 2),
            'y': round(y_axis, 2),
            'size_range': size_range
        })

    # 转换为列表
    resList = []
    for section, sample_data in samples_dict.items():
        resList.append({
            'DT': sample_data['DT'],
            'SNUMB': sample_data['SNUMB'],
            'excel_name': sample_data['excel_name'],
            'section': sample_data['section'],
            'bubble_lt2': sample_data['bubble_lt2'],
            'bubble_2to5': sample_data['bubble_2to5'],
            'bubble_5to10': sample_data['bubble_5to10'],
            'bubble_gt10': sample_data['bubble_gt10'],
            'bubble_total': len(sample_data['bubbles']),
            'bubbles': sample_data['bubbles']
        })

    # 按时间降序排序
    resList.sort(key=lambda x: x['DT'], reverse=True)
    
    # 简洁的调试日志（可选）
    if app.debug:
        print(f"[statistic10] 处理 {len(data)} 条数据，返回 {len(resList)} 个样品")
    
    return jsonify({'data': resList})

#####################################################################################生成word###########################


@app.route('/genWord', methods=['POST'])
def genWord():
    # 获取前端数据
    data = request.json

    # 调试：打印接收到的数据
    print("=== 调试信息 ===")
    for i in range(1, 6):
        table_key = f'htTableData{i}'
        columns_key = f'htTableColumns{i}'
        table_data = data.get(table_key, [])
        columns_order = data.get(columns_key, [])

        print(f"\n{table_key}:")
        print(f"  数据行数: {len(table_data)}")
        if table_data:
            print(f"  数据中的键顺序: {list(table_data[0].keys())}")
        print(f"  前端发送的列顺序: {columns_order}")
    print("=== 调试信息结束 ===\n")

    # 创建Word文档
    doc = Document()

    # 设置页面布局
    section = doc.sections[0]
    section.page_width = Inches(8.5)
    section.page_height = Inches(11)
    section.left_margin = Inches(1)
    section.right_margin = Inches(1)
    section.top_margin = Inches(1)
    section.bottom_margin = Inches(1)

    # 设置页脚（只显示页码）
    footer = section.footer
    footer_para = footer.paragraphs[0]
    footer_para.alignment = WD_ALIGN_PARAGRAPH.CENTER
    footer_para.text = ""  # 清空默认文本

    # 添加页码
    from docx.oxml.shared import qn
    from docx.oxml import parse_xml

    # 创建页码字段
    fldChar1 = parse_xml(
        r'<w:fldChar w:fldCharType="begin" xmlns:w="http://schemas.openxmlformats.org/wordprocessingml/2006/main"/>')
    instrText = parse_xml(
        r'<w:instrText xml:space="preserve" xmlns:w="http://schemas.openxmlformats.org/wordprocessingml/2006/main"> PAGE </w:instrText>')
    fldChar2 = parse_xml(
        r'<w:fldChar w:fldCharType="end" xmlns:w="http://schemas.openxmlformats.org/wordprocessingml/2006/main"/>')

    footer_run = footer_para.add_run()
    footer_run.font.size = Pt(10)
    footer_run.font.name = '宋体'
    footer_run._element.rPr.rFonts.set(qn('w:eastAsia'), '宋体')
    footer_run._element.rPr.rFonts.set(qn('w:ascii'), 'Times New Roman')
    footer_run._element.rPr.rFonts.set(qn('w:hAnsi'), 'Times New Roman')
    footer_run.font.color.rgb = RGBColor(128, 128, 128)

    footer_run._element.append(fldChar1)
    footer_run._element.append(instrText)
    footer_run._element.append(fldChar2)

    # 设置文档默认样式
    style = doc.styles['Normal']
    style.font.name = '宋体'
    style._element.rPr.rFonts.set(qn('w:eastAsia'), '宋体')
    style._element.rPr.rFonts.set(qn('w:ascii'), 'Times New Roman')
    style._element.rPr.rFonts.set(qn('w:hAnsi'), 'Times New Roman')
    style.font.size = Pt(12)

    # 添加封面标题
    title = doc.add_heading('夹杂物分析报告', level=1)
    title.alignment = WD_ALIGN_PARAGRAPH.CENTER
    # 设置标题字体和颜色
    for run in title.runs:
        run.font.name = '黑体'
        run._element.rPr.rFonts.set(qn('w:eastAsia'), '黑体')
        run._element.rPr.rFonts.set(qn('w:ascii'), '黑体')
        run._element.rPr.rFonts.set(qn('w:hAnsi'), '黑体')
        run.font.size = Pt(24)
        run.font.color.rgb = RGBColor(68, 84, 106)
        run.font.bold = True

    # 添加装饰线
    decoration_para = doc.add_paragraph()
    decoration_para.alignment = WD_ALIGN_PARAGRAPH.CENTER
    decoration_run = decoration_para.add_run('═' * 40)
    decoration_run.font.size = Pt(14)
    decoration_run.font.color.rgb = RGBColor(68, 84, 106)

    # 添加钢号信息（无论是否为空都显示）
    steel_num = data.get('htSteelNum', '')
    steel_para = doc.add_paragraph()
    steel_para.alignment = WD_ALIGN_PARAGRAPH.CENTER
    steel_para.paragraph_format.space_before = Pt(20)
    steel_para.paragraph_format.space_after = Pt(20)

    # 添加钢号信息
    if steel_num:
        steel_text = f'钢号：{steel_num}'
    else:
        steel_text = '钢号：未指定'

    steel_run = steel_para.add_run(steel_text)
    steel_run.font.size = Pt(16)
    steel_run.font.bold = True
    steel_run.font.name = '宋体'
    steel_run._element.rPr.rFonts.set(qn('w:eastAsia'), '宋体')
    steel_run._element.rPr.rFonts.set(qn('w:ascii'), 'Times New Roman')
    steel_run._element.rPr.rFonts.set(qn('w:hAnsi'), 'Times New Roman')
    steel_run.font.color.rgb = RGBColor(68, 84, 106)

    # 添加报告概述
    summary_para = doc.add_paragraph()
    summary_para.alignment = WD_ALIGN_PARAGRAPH.CENTER
    summary_para.paragraph_format.space_before = Pt(30)
    summary_para.paragraph_format.space_after = Pt(20)

    summary_run = summary_para.add_run('本报告包含夹杂物分析的图表和数据统计结果')
    summary_run.font.size = Pt(14)
    summary_run.font.name = '宋体'
    summary_run._element.rPr.rFonts.set(qn('w:eastAsia'), '宋体')
    summary_run._element.rPr.rFonts.set(qn('w:ascii'), 'Times New Roman')
    summary_run._element.rPr.rFonts.set(qn('w:hAnsi'), 'Times New Roman')
    summary_run.font.color.rgb = RGBColor(68, 84, 106)

    # 添加分析内容目录
    content_title = doc.add_paragraph()
    content_title.alignment = WD_ALIGN_PARAGRAPH.CENTER
    content_title.paragraph_format.space_before = Pt(40)
    content_title.paragraph_format.space_after = Pt(15)

    content_title_run = content_title.add_run('分析内容')
    content_title_run.font.size = Pt(16)
    content_title_run.font.bold = True
    content_title_run.font.name = '黑体'
    content_title_run._element.rPr.rFonts.set(qn('w:eastAsia'), '黑体')
    content_title_run._element.rPr.rFonts.set(qn('w:ascii'), '黑体')
    content_title_run._element.rPr.rFonts.set(qn('w:hAnsi'), '黑体')
    content_title_run.font.color.rgb = RGBColor(68, 84, 106)

    # 添加内容列表
    content_list = [
        '1. 夹杂物尺寸类型分布分析',
        '2. 夹杂物尺寸数密度对比分析',
        '3. 夹杂物类型数密度统计分析',
        '4. 夹杂物平均成分对比分析',
        '5. 夹杂物尺寸分布对比分析'
    ]

    for item in content_list:
        item_para = doc.add_paragraph()
        item_para.alignment = WD_ALIGN_PARAGRAPH.LEFT
        item_para.paragraph_format.left_indent = Inches(2)  # 左缩进
        item_para.paragraph_format.space_after = Pt(8)

        item_run = item_para.add_run(item)
        item_run.font.size = Pt(12)
        item_run.font.name = '宋体'
        item_run._element.rPr.rFonts.set(qn('w:eastAsia'), '宋体')
        item_run._element.rPr.rFonts.set(qn('w:ascii'), 'Times New Roman')
        item_run._element.rPr.rFonts.set(qn('w:hAnsi'), 'Times New Roman')
        item_run.font.color.rgb = RGBColor(51, 51, 51)

    # 添加生成时间（在第一页底部）
    time_para = doc.add_paragraph()
    time_para.alignment = WD_ALIGN_PARAGRAPH.CENTER
    time_para.paragraph_format.space_before = Pt(60)  # 更大的间距，推到页面底部
    time_run = time_para.add_run(f'生成时间：{datetime.now().strftime("%Y年%m月%d日 %H:%M:%S")}')
    time_run.font.size = Pt(12)
    time_run.font.name = '宋体'
    time_run._element.rPr.rFonts.set(qn('w:eastAsia'), '宋体')
    time_run._element.rPr.rFonts.set(qn('w:ascii'), 'Times New Roman')
    time_run._element.rPr.rFonts.set(qn('w:hAnsi'), 'Times New Roman')
    time_run.font.color.rgb = RGBColor(128, 128, 128)

    # 添加分页符
    doc.add_page_break()

    # 处理图片 (ht1 到 ht5)
    image_titles = {
        'ht1': '夹杂物尺寸类型分布图',
        'ht2': '夹杂物尺寸数密度对比图',
        'ht3': '夹杂物类型数密度图',
        'ht4': '夹杂物平均成分对比图',
        'ht5': '夹杂物尺寸分布对比图'
    }

    for i in range(1, 6):
        img_key = f'ht{i}'
        img_data = data.get(img_key, '')
        if img_data and img_data.startswith('data:image'):
            try:
                # 添加图片标题
                img_title = doc.add_heading(image_titles.get(img_key, f'图表{i}'), level=2)
                img_title.alignment = WD_ALIGN_PARAGRAPH.CENTER
                img_title.paragraph_format.space_before = Pt(20)
                img_title.paragraph_format.space_after = Pt(10)

                # 设置图片标题字体和颜色
                for run in img_title.runs:
                    run.font.name = '黑体'
                    run._element.rPr.rFonts.set(qn('w:eastAsia'), '黑体')
                    run._element.rPr.rFonts.set(qn('w:ascii'), '黑体')
                    run._element.rPr.rFonts.set(qn('w:hAnsi'), '黑体')
                    run.font.size = Pt(14)
                    run.font.color.rgb = RGBColor(68, 84, 106)
                    run.font.bold = True

                # 提取base64编码的图片数据
                header, encoded = img_data.split(',', 1)
                image_data = base64.b64decode(encoded)

                # 创建图片段落
                img_paragraph = doc.add_paragraph()
                img_paragraph.alignment = WD_ALIGN_PARAGRAPH.CENTER
                img_paragraph.paragraph_format.space_before = Pt(10)
                img_paragraph.paragraph_format.space_after = Pt(10)

                # 添加图片
                image_stream = BytesIO(image_data)
                img_run = img_paragraph.add_run()
                img_run.add_picture(image_stream, width=Inches(6))

                doc.add_paragraph()  # 添加空行分隔
            except Exception as e:
                print(f"添加图片{img_key}失败: {str(e)}")

    # 处理表格数据 (htTableData1 到 htTableData5)
    table_titles = {
        'htTableData1': '夹杂物尺寸类型分布表',
        'htTableData2': '夹杂物尺寸数密度对比表',
        'htTableData3': '夹杂物类型数密度表',
        'htTableData4': '夹杂物平均成分对比表',
        'htTableData5': '夹杂物尺寸分布对比表'
    }

    def set_cell_font(cell, text, is_header=False):
        """设置单元格字体和样式"""
        cell.text = str(text)
        paragraph = cell.paragraphs[0]
        paragraph.alignment = WD_ALIGN_PARAGRAPH.CENTER

        # 设置字体
        for run in paragraph.runs:
            run.font.size = Pt(10)
            run.font.name = '宋体'
            run._element.rPr.rFonts.set(qn('w:eastAsia'), '宋体')
            run._element.rPr.rFonts.set(qn('w:ascii'), 'Times New Roman')
            run._element.rPr.rFonts.set(qn('w:hAnsi'), 'Times New Roman')

            if is_header:
                run.font.bold = True
                run.font.color.rgb = RGBColor(255, 255, 255)
            else:
                run.font.color.rgb = RGBColor(51, 51, 51)

    def style_table(table, is_transpose=False):
        """美化表格样式"""
        # 设置表格样式
        table.style = 'Table Grid'

        # 设置表格居中
        tbl = table._tbl
        tblPr = tbl.tblPr

        # 设置表格对齐方式为居中
        jc = tblPr.find(qn('w:jc'))
        if jc is not None:
            jc.set(qn('w:val'), 'center')
        else:
            jc = parse_xml(r'<w:jc {} w:val="center"/>'.format(nsdecls('w')))
            tblPr.append(jc)

        # 设置自动调整
        table.autofit = True

        # 设置表格样式
        for row_idx, row in enumerate(table.rows):
            row.height = Cm(0.8)
            for cell_idx, cell in enumerate(row.cells):
                cell.vertical_alignment = WD_ALIGN_VERTICAL.CENTER

                # 设置单元格背景色
                try:
                    if is_transpose:
                        # 转置表格：第一列是表头
                        if cell_idx == 0:
                            shading = parse_xml(r'<w:shd {} w:fill="4F5663"/>'.format(nsdecls('w')))
                            cell._tc.get_or_add_tcPr().append(shading)
                        else:
                            # 交替行颜色
                            if row_idx % 2 == 0:
                                shading = parse_xml(r'<w:shd {} w:fill="F8F9FA"/>'.format(nsdecls('w')))
                            else:
                                shading = parse_xml(r'<w:shd {} w:fill="FFFFFF"/>'.format(nsdecls('w')))
                            cell._tc.get_or_add_tcPr().append(shading)
                    else:
                        # 正常表格：第一行是表头
                        if row_idx == 0:
                            shading = parse_xml(r'<w:shd {} w:fill="4F5663"/>'.format(nsdecls('w')))
                            cell._tc.get_or_add_tcPr().append(shading)
                        else:
                            # 交替行颜色
                            if row_idx % 2 == 1:
                                shading = parse_xml(r'<w:shd {} w:fill="F8F9FA"/>'.format(nsdecls('w')))
                            else:
                                shading = parse_xml(r'<w:shd {} w:fill="FFFFFF"/>'.format(nsdecls('w')))
                            cell._tc.get_or_add_tcPr().append(shading)
                except Exception as e:
                    print(f"设置单元格背景色失败: {str(e)}")
                    # 如果设置背景色失败，继续执行，不中断程序

    def get_table_headers(table_data, columns_order):
        """获取表格列的正确顺序"""
        if not table_data:
            return []

        data_keys = list(table_data[0].keys())

        # 如果前端提供了列顺序，使用前端的顺序
        if columns_order:
            print(f"前端提供的列顺序: {columns_order}")

            # 检查数据中是否有 'index' 字段
            has_index_in_data = 'index' in data_keys

            # 如果数据中没有 'index' 字段，但前端列顺序中有，则过滤掉
            if not has_index_in_data and 'index' in columns_order:
                filtered_columns = [col for col in columns_order if col != 'index']
                print(f"数据中没有index字段，过滤后的列顺序: {filtered_columns}")
            else:
                # 数据中有 'index' 字段，或者前端列顺序中没有 'index'，直接使用
                filtered_columns = columns_order
                print(f"保持原有列顺序: {filtered_columns}")

            # 验证过滤后的列是否都存在于数据中
            missing_columns = set(filtered_columns) - set(data_keys)
            extra_columns = set(data_keys) - set(filtered_columns)

            if not missing_columns:
                print(f"使用前端列顺序: {filtered_columns}")
                return filtered_columns
            else:
                print(f"前端列顺序有问题，缺少的列: {missing_columns}")
                print(f"数据中额外的列: {extra_columns}")

        # 否则使用数据中的顺序
        print(f"使用数据原始顺序: {data_keys}")
        return data_keys

    for i in range(1, 6):
        table_key = f'htTableData{i}'
        columns_key = f'htTableColumns{i}'
        table_data = data.get(table_key, [])
        columns_order = data.get(columns_key, [])

        if table_data:
            print(f"\n处理{table_key}:")

            # 添加表格标题
            table_title = doc.add_heading(table_titles.get(table_key, f'数据表{i}'), level=2)
            table_title.alignment = WD_ALIGN_PARAGRAPH.CENTER
            table_title.paragraph_format.space_before = Pt(20)
            table_title.paragraph_format.space_after = Pt(10)

            # 设置表格标题字体和颜色
            for run in table_title.runs:
                run.font.name = '黑体'
                run._element.rPr.rFonts.set(qn('w:eastAsia'), '黑体')
                run._element.rPr.rFonts.set(qn('w:ascii'), '黑体')
                run._element.rPr.rFonts.set(qn('w:hAnsi'), '黑体')
                run.font.size = Pt(14)
                run.font.color.rgb = RGBColor(68, 84, 106)
                run.font.bold = True

            # 获取正确的列顺序
            headers = get_table_headers(table_data, columns_order)

            print(f"最终使用的列顺序: {headers}")

            # 如果数据行数较少（<=10行），使用转置表格
            if len(table_data) <= 10:
                # 转置表格：字段名作为第一列，数据作为后续列

                # 创建表格：行数=字段数，列数=数据行数+1（字段名列）
                table = doc.add_table(rows=len(headers), cols=len(table_data) + 1)

                # 填充字段名（第一列）
                for row_idx, header in enumerate(headers):
                    cell = table.cell(row_idx, 0)
                    set_cell_font(cell, header, is_header=True)

                # 填充数据（后续列）
                for col_idx, row_data in enumerate(table_data):
                    for row_idx, header in enumerate(headers):
                        cell = table.cell(row_idx, col_idx + 1)
                        value = row_data.get(header, '')

                        # 处理浮点数精度
                        if isinstance(value, float):
                            value = round(value, 4)

                        set_cell_font(cell, str(value))

                # 应用表格样式
                style_table(table, is_transpose=True)

            else:
                # 数据行数较多时，分页显示，每页最多显示15行
                rows_per_page = 15
                total_pages = (len(table_data) + rows_per_page - 1) // rows_per_page

                for page in range(total_pages):
                    if page > 0:
                        # 添加分页标题
                        page_title = doc.add_heading(f'{table_titles.get(table_key, f"数据表{i}")}（第{page + 1}页）',
                                                     level=3)
                        page_title.alignment = WD_ALIGN_PARAGRAPH.CENTER
                        page_title.paragraph_format.space_before = Pt(15)
                        page_title.paragraph_format.space_after = Pt(8)

                        # 设置分页标题字体
                        for run in page_title.runs:
                            run.font.name = '黑体'
                            run._element.rPr.rFonts.set(qn('w:eastAsia'), '黑体')
                            run._element.rPr.rFonts.set(qn('w:ascii'), '黑体')
                            run._element.rPr.rFonts.set(qn('w:hAnsi'), '黑体')
                            run.font.size = Pt(12)
                            run.font.color.rgb = RGBColor(68, 84, 106)

                    # 确定当前页的数据范围
                    start_row = page * rows_per_page
                    end_row = min((page + 1) * rows_per_page, len(table_data))
                    current_data = table_data[start_row:end_row]

                    # 创建正常表格
                    table = doc.add_table(rows=1, cols=len(headers))

                    # 设置表头
                    hdr_cells = table.rows[0].cells
                    for idx, header in enumerate(headers):
                        set_cell_font(hdr_cells[idx], header, is_header=True)

                    # 填充表格数据
                    for row in current_data:
                        row_cells = table.add_row().cells
                        for idx, key in enumerate(headers):
                            value = row.get(key, '')
                            # 处理浮点数精度
                            if isinstance(value, float):
                                value = round(value, 4)
                            set_cell_font(row_cells[idx], str(value))

                    # 应用表格样式
                    style_table(table, is_transpose=False)

            doc.add_paragraph()  # 添加空行分隔

    # 添加结尾装饰
    end_decoration = doc.add_paragraph()
    end_decoration.alignment = WD_ALIGN_PARAGRAPH.CENTER
    end_decoration.paragraph_format.space_before = Pt(30)
    end_decoration_run = end_decoration.add_run('━━━━━━━━━━━━━━━━━ 报告结束 ━━━━━━━━━━━━━━━━━')
    end_decoration_run.font.size = Pt(12)
    end_decoration_run.font.color.rgb = RGBColor(68, 84, 106)
    end_decoration_run.font.bold = True

    # 保存到临时文件
    temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.docx')
    doc.save(temp_file.name)
    temp_file.close()

    # 返回生成的Word文档
    return send_file(
        temp_file.name,
        as_attachment=True,
        download_name='夹杂物分析报告.docx',
        mimetype='application/vnd.openxmlformats-officedocument.wordprocessingml.document'
    )


# 程序入口
if __name__ == '__main__':
    app.run(host='0.0.0.0', port=5000, debug=True)
