
import json
import traceback
import pandas as pd
from tqdm import tqdm
from django.db import IntegrityError, transaction
from control.models import User
from literature.models import *
from basic_component_related_management.models import *
from dataset.models import *
from unit_management.models import *
import glob
import os

def batch_convert():
    folder_path = "../dechema_tables/"
    file_paths = glob.glob(os.path.join(
        folder_path, "PureComp_File_*.xlsx"))
    file_paths = sorted(file_paths, key=lambda x: int(x.split("_")[-2].split(".")[0]))
    print(file_paths)
    super_admin_entity = User.objects.filter(user_account="superadmin").first()
    # 获取数据库里的对应关系 (保持不变)
    unit_dict = {}
    unit_entity_list = Unit.objects.filter(valid_flag=True).all()
    for unit_entity in unit_entity_list:
        unit_dict[unit_entity.sei_id] = unit_entity

    property_dict = {}
    property_entity_list = Property.objects.filter(valid_flag=True).all()
    for property_entity in property_entity_list:
        property_dict[property_entity.abbreviation] = property_entity

    phase_dict = {}
    phase_entity_list = Phase.objects.filter(valid_flag=True).all()
    for phase_entity in phase_entity_list:
        phase_dict[phase_entity.abbreviation] = phase_entity

    variable_dict = {}
    variable_entity_list = PropertyVariable.objects.filter(
        valid_flag=True).all()
    for variable_entity in variable_entity_list:
        variable_dict[variable_entity.symbol] = variable_entity

    # 获取或创建必要的基础实体 (保持不变)
    data_present_unknow_entity = DataPresentWay.objects.filter(
        name="Unknown").first()
    if data_present_unknow_entity is None:
        data_present_unknow_entity = DataPresentWay.objects.create(
            name="Unknown",
            creator=super_admin_entity,
            last_editor=super_admin_entity
        )

    purpose_others_entity = ExpPurpose.objects.filter(name="others").first()
    if purpose_others_entity is None:
        purpose_others_entity = ExpPurpose.objects.create(
            name="others",
            creator=super_admin_entity,
            last_editor=super_admin_entity,
        )

    # DSIID属性 (保持不变)
    DSIID_attribute_entity = Attribute.objects.filter(name_en="nDSIID").first()
    if DSIID_attribute_entity is None:
        print("nDSIID attribute not exist")
        return
    for file in file_paths:
        print(f"process file {file}")
        convert_file_to_json(file)
        init_dechema_pure_data(unit_dict, property_dict, 
                               phase_dict, variable_dict, 
                               data_present_unknow_entity, 
                               purpose_others_entity,
                               DSIID_attribute_entity)
def convert_file_to_json(file):
    print(f"开始转化{file}为json文件")
    
    # 读取Excel时指定需要的列，减少内存使用
    df = pd.read_excel(
        file,
        sheet_name="Sheet1",
        header=0,
        dtype=str,
        usecols=range(37)  # 只读取前37列
    ).fillna("")
    
    # 设置列名
    new_columns = [
        'DSIID', 'nPCdataCompID', 'Compound', 'Property', 'sPhaseSymbol',
        'sPhaseSymbol2', 'sPhaseSymbol3', 'Variable1Symbol',
        'sVariableSymbol_var1', 'Variable1PropSubType', 'Variable1Value',
        'Variable1Error', 'Variable1Unit', 'nPCdataUnitID_var1',
        'SignificantDigit1', 'Flag1', 'Variable2Symbol', 'sVariableSymbol_var2',
        'Variable2PropSubType', 'Variable2Value', 'Variable2Error',
        'Variable2Unit', 'nPCdataUnitID_var2', 'SignificantDigit2', 'Flag2',
        'ePropAbbreviation', 'ePropAbbreviation_old', 'PropSubType',
        'PropValue', 'PropUnit', 'nPCdataUnitID_prop', 'SignificantDigitProp',
        'FlagProp', 'Error', 'Note', 'Ref', 'nPCdataCitID'
    ]
    
    if len(df.columns) != len(new_columns):
        raise ValueError("列数不匹配")
    
    df.columns = new_columns
    
    # 使用Pandas直接转换到JSON（最快方法）
    json_path = "../dechema_tables/dechema.json"
    df.to_json(
        json_path,
        orient="records",
        indent=4,
        force_ascii=False
    )


def init_dechema_pure_data(unit_dict, property_dict, 
                         phase_dict, variable_dict, 
                         data_present_unknow_entity, 
                         purpose_others_entity,
                         DSIID_attribute_entity):
    # 读取JSON文件
    super_admin_entity = User.objects.filter(user_account="superadmin").first()
    with open("../dechema_tables/dechema.json", "r", encoding="utf-8") as json_file:
        data = json.load(json_file)

    # 预加载所有需要的组件和文献数据
    component_ids = {item["nPCdataCompID"] for item in data}
    component_attr_map = {
        attr.sei_id: attr
        for attr in ComponentIdentifier.objects.filter(
            sei_id__in=component_ids
        )
    }
    
    literature_ids = {str(item["nPCdataCitID"]) for item in data if item.get("nPCdataCitID") and item.get("Note") != "E"}
    literature_map = {
        lit.liter_code: lit 
        for lit in Literature.objects.filter(liter_code__in=literature_ids)
    }

    # 优化分组逻辑
    grouped_data = {}
    for item in data:
        group_key = (item["DSIID"], item["nPCdataCompID"],
                    item["Variable1Symbol"], item["Variable2Symbol"], 
                    item["ePropAbbreviation"], item["Ref"])

        if group_key not in grouped_data:
            grouped_data[group_key] = {
                "info": {
                    "nPCdataCompID": item["nPCdataCompID"],
                    "Compound": item["Compound"],
                    "Property": item["Property"],
                    "PropSubType": item["PropSubType"],
                    "PropUnit": item["PropUnit"],
                    "Variable1Symbol": item["Variable1Symbol"],
                    "sVariableSymbol_var1": item["sVariableSymbol_var1"],
                    "Variable1PropSubType": item["Variable1PropSubType"],
                    "Variable2Symbol": item["Variable2Symbol"],
                    "nPCdataUnitID_var1": item["nPCdataUnitID_var1"],
                    "sVariableSymbol_var2": item["sVariableSymbol_var2"],
                    "Variable2PropSubType": item["Variable2PropSubType"],
                    "nPCdataUnitID_var2": item["nPCdataUnitID_var2"],
                    "ePropAbbreviation": item["ePropAbbreviation"],
                    "ePropAbbreviation_old": item["ePropAbbreviation_old"],
                    "nPCdataUnitID_prop": item["nPCdataUnitID_prop"],
                    "nPCdataCitID": item["nPCdataCitID"],
                },
                "phases": set(),
                "flags": set(),
                "data_points": []
            }

        # 优化phase字段处理
        phases = [item[f"sPhaseSymbol{i}"] for i in ("", "2", "3") if item.get(f"sPhaseSymbol{i}")]
        grouped_data[group_key]["phases"].update(phases)

        if item["FlagProp"]:
            grouped_data[group_key]["flags"].update(item["FlagProp"].split(";"))

        grouped_data[group_key]["data_points"].append({
            "Variable1Value": item["Variable1Value"],
            "Variable1Error": item["Variable1Error"],
            "Variable2Value": item["Variable2Value"],
            "Variable2Error": item["Variable2Error"],
            "PropValue": item["PropValue"],
            "Error": item["Error"],
            "SignificantDigitProp": item["SignificantDigitProp"],
            "SignificantDigit1": item["SignificantDigit1"],
            "SignificantDigit2": item["SignificantDigit2"],
        })

    # 准备批量创建的数据
    datasets_to_create = []
    dataset_relations = {
        'phases': [],
        'components': [],
        'properties': [],
        'variables': [],
        'datapoints': []
    }
    databank_entity = DataBank.objects.filter(name="基础物性数据集").first()
    # 优化数据处理循环
    for group_key, group_data in tqdm(grouped_data.items(), desc="Preparing dechema pure data"):
        try:
            info = group_data["info"]

            # 使用预加载的组件数据
            component_entity = component_attr_map.get(info["nPCdataCompID"])
            if not component_entity:
                print(f"Error: Component with nPCdataCompID {info['nPCdataCompID']} not found")
                continue

            # 使用预加载的文献数据
            dechema_reference_entity = literature_map.get(str(info["nPCdataCitID"])) if info["nPCdataCitID"] else None

            # 检查物性和单位
            property_entity = property_dict.get(info["ePropAbbreviation"])
            property_unit_entity = unit_dict.get(info["nPCdataUnitID_prop"])
            if not property_entity or not property_unit_entity:
                print(f"Error: Property {info['ePropAbbreviation']} or unit {info['nPCdataUnitID_prop']} not found")

            # 处理变量1
            variable_entity1 = variable_dict.get(info['sVariableSymbol_var1']) if info["Variable1Symbol"] else None
            variable_unit_entity1 = unit_dict.get(info["nPCdataUnitID_var1"]) if info["Variable1Symbol"] else None
            if info["Variable1Symbol"] and (not variable_entity1 or not variable_unit_entity1):
                print(f"Error: Variable1 {info['sVariableSymbol_var1']} or unit {info['nPCdataUnitID_var1']} not found")

            # 处理变量2
            variable_entity2 = variable_dict.get(info['sVariableSymbol_var2']) if info["Variable2Symbol"] else None
            variable_unit_entity2 = unit_dict.get(info["nPCdataUnitID_var2"]) if info["Variable2Symbol"] else None
            if info["Variable2Symbol"] and (not variable_entity2 or not variable_unit_entity2):
                print(f"Error: Variable2 {info['sVariableSymbol_var2']} or unit {info['nPCdataUnitID_var2']} not found")

            # 处理相态
            phase_entity_list = [phase_dict[phase] for phase in group_data["phases"] if phase in phase_dict]

            # 创建数据集对象
            dataset_name = f"{info['Compound']}--{info['Property']}--{info['Variable1Symbol']};{info['Variable2Symbol']}"
            dataset = ExpDataset(
                name=dataset_name,
                zh_name=dataset_name,
                description="",
                comment=";".join(group_data["flags"]),
                databank = databank_entity,
                source_liter=dechema_reference_entity,
                purpose=purpose_others_entity,
                data_present_way=data_present_unknow_entity,
                review_status=1,
                creator=super_admin_entity,
                last_editor=super_admin_entity,
            )

            datasets_to_create.append({
                'dataset': dataset,
                'component': component_entity,
                'property': property_entity,
                'property_unit': property_unit_entity,
                'variable1': variable_entity1,
                'variable1_unit': variable_unit_entity1,
                'variable2': variable_entity2,
                'variable2_unit': variable_unit_entity2,
                'phases': phase_entity_list,
                'data_points': group_data["data_points"]
            })

        except Exception as e:
            print(f"Error preparing group {group_key}: {str(e)}")
            traceback.print_exc()
            continue

    # 批量创建数据集
    with transaction.atomic():
        # 批量创建所有数据集
        created_datasets = ExpDataset.objects.bulk_create(
            [d['dataset'] for d in datasets_to_create])

        # 为每个数据集设置关系
        for dataset, dataset_info in tqdm(zip(created_datasets, datasets_to_create), 
                                   total=len(datasets_to_create),
                                   desc="Processing datasets relations"):
            # 设置相态
            if dataset_info['phases']:
                dataset.phases.set(dataset_info['phases'])

            # 设置组分
            ExpDatasetComponentMap.objects.create(
                exp_dataset=dataset,
                component=dataset_info['component']
            )

            # 设置物性
            property_map = ExpDatasetPropertyMap.objects.create(
                exp_dataset=dataset,
                property=dataset_info['property'],
                unit=dataset_info['property_unit'],
                measurement_error_percent=False,
            )
            property_map.prop_component.set([dataset_info['component']])

            # 设置变量1
            variable_map1 = None
            if dataset_info['variable1']:
                variable_map1 = ExpDatasetVariableMap.objects.create(
                    exp_dataset=dataset,
                    variable=dataset_info['variable1'],
                    unit=dataset_info['variable1_unit'],
                    measurement_error_percent=False,
                )

            # 设置变量2
            variable_map2 = None
            if dataset_info['variable2']:
                variable_map2 = ExpDatasetVariableMap.objects.create(
                    exp_dataset=dataset,
                    variable=dataset_info['variable2'],
                    unit=dataset_info['variable2_unit'],
                    measurement_error_percent=False,
                )

            # 创建数据点
            datapoint_entity_list = []
            datapoint_property_list = []
            datapoint_variable_list = []

            for point_item in dataset_info['data_points']:
                datapoint_entity = DataPoint()
                datapoint_entity_list.append(datapoint_entity)

                # 处理变量1数据点
                if point_item["Variable1Value"] and variable_map1:
                    datapoint_variable_entity1 = DataPointExpDatasetVariableMap(
                        data_point=datapoint_entity,
                        exp_dataset_variable_map=variable_map1,
                        value=point_item["Variable1Value"],
                        measurement_error_value=point_item["Variable1Error"] if point_item["Variable1Error"] else None,
                    )
                    datapoint_variable_list.append(datapoint_variable_entity1)

                # 处理变量2数据点
                if point_item["Variable2Value"] and variable_map2:
                    datapoint_variable_entity2 = DataPointExpDatasetVariableMap(
                        data_point=datapoint_entity,
                        exp_dataset_variable_map=variable_map2,
                        value=point_item["Variable2Value"],
                        measurement_error_value=point_item["Variable2Error"] if point_item["Variable2Error"] else None,
                    )
                    datapoint_variable_list.append(datapoint_variable_entity2)

                # 处理物性数据点
                if point_item["PropValue"]:
                    datapoint_property_entity = DataPointExpDatasetPropertyMap(
                        data_point=datapoint_entity,
                        exp_dataset_property_map=property_map,
                        value=point_item["PropValue"],
                        measurement_error_value=point_item["Error"] if point_item["Error"] else None,
                    )
                    datapoint_property_list.append(datapoint_property_entity)

            # 批量创建数据点
            DataPoint.objects.bulk_create(datapoint_entity_list)

            if datapoint_variable_list:
                DataPointExpDatasetVariableMap.objects.bulk_create(
                    datapoint_variable_list)

            if datapoint_property_list:
                DataPointExpDatasetPropertyMap.objects.bulk_create(
                    datapoint_property_list)

            # 关联数据点到数据集
            dataset.data_points.set(datapoint_entity_list)

    print("Dechema pure data import finished")


def convert_file_2_to_json():
    # 单位对齐
    print("开始转化 dechema相平衡_2 为json文件")
    folder_path = "../dechema_tables/"
    file_paths = glob.glob(os.path.join(
        folder_path, "Comp2_File_1_40000*.xlsx"))
    dfs = []
    sheet_names = ["相平衡数据入库对齐版1", "相平衡数据入库对齐版2"]
    for file in file_paths:
        for sheet_name in sheet_names:
            df = pd.read_excel(
                file,
                sheet_name=sheet_name,
                header=0,
                dtype=str
            )
            dfs.append(df)

    combined_df = pd.concat(dfs, ignore_index=True)
    dechema_select = combined_df.iloc[:, :51].fillna("")
    new_columns = [
        'DSIID1', 'nPCdataCompID_ID1', 'DSIID2', 'nPCdataCompID_ID2',
        'Compound1', 'Compound2', 'Property', 'sPhaseSymbol1', 'sPhaseSymbol2',
        'sPhaseSymbol3', 'sPhaseSymbol4', 'sFlag', 'Variable1Symbol',
        'sVariableSymbol_var1', 'Variable1PropSubType', 'Variable1Value',
        'Variable1Error', 'nPCdataUnitID_var1', 'SignificantDigit_var1', 'Flag_var1',
        'Variable2Symbol', 'sVariableSymbol_var2', 'Variable2PropSubType',
        'Variable2Value', 'Variable2Error', 'nPCdataUnitID_var2',
        'SignificantDigit_var2', 'Flag_var2', 'Prop1Symbol', 'ePropAbbreviation_ID1',
        'Prop1SubType', 'Prop1Phase', 'Prop1Value', 'nPCdataUnitID_prop1',
        'Error_prop1', 'SignificantDigit_Prop1', 'Flag_Prop1', 'Prop2Symbol',
        'ePropAbbreviation_ID2', 'Prop2SubType', 'Prop2Phase', 'Prop2Value',
        'nPCdataUnitID_prop2', 'Error_prop2', 'SignificantDigit_Prop2',
        'Flag_Prop2', 'Note', 'Ref', 'nPCdataCitID', 'DataType', 'Date'
    ]
    if len(dechema_select.columns) != len(new_columns):
        raise ValueError("列数不匹配")
    dechema_select.columns = new_columns

    dechema_json = []
    for index, row in dechema_select.iterrows():
        dechema_json.append({
            "DSIID1": row["DSIID1"],  # 来源库编码1
            "nPCdataCompID_ID1": row["nPCdataCompID_ID1"],  # 组分编码1
            "DSIID2": row["DSIID2"],  # 来源库编码2
            "nPCdataCompID_ID2": row["nPCdataCompID_ID2"],  # 组分编码2
            "Compound1": row["Compound1"],  # 组分名称1
            "Compound2": row["Compound2"],  # 组分名称2
            "Property": row["Property"],
            "sPhaseSymbol1": row["sPhaseSymbol1"],
            "sPhaseSymbol2": row["sPhaseSymbol2"],
            "sPhaseSymbol3": row["sPhaseSymbol3"],
            "sPhaseSymbol4": row["sPhaseSymbol4"],
            "sFlag": row["sFlag"],
            "Variable1Symbol": row["Variable1Symbol"],
            "sVariableSymbol_var1": row["sVariableSymbol_var1"],
            "Variable1PropSubType": row["Variable1PropSubType"],
            "Variable1Value": row["Variable1Value"],
            "Variable1Error": row["Variable1Error"],
            "nPCdataUnitID_var1": row["nPCdataUnitID_var1"],
            "SignificantDigit_var1": row["SignificantDigit_var1"],
            "Flag_var1": row["Flag_var1"],
            "Variable2Symbol": row["Variable2Symbol"],
            "sVariableSymbol_var2": row["sVariableSymbol_var2"],
            "Variable2PropSubType": row["Variable2PropSubType"],
            "Variable2Value": row["Variable2Value"],
            "Variable2Error": row["Variable2Error"],
            "nPCdataUnitID_var2": row["nPCdataUnitID_var2"],
            "SignificantDigit_var2": row["SignificantDigit_var2"],
            "Flag_var2": row["Flag_var2"],
            "Prop1Symbol": row["Prop1Symbol"],
            "ePropAbbreviation_ID1": row["ePropAbbreviation_ID1"],
            "Prop1SubType": row["Prop1SubType"],
            "Prop1Phase": row["Prop1Phase"],
            "Prop1Value": row["Prop1Value"],
            "nPCdataUnitID_prop1": row["nPCdataUnitID_prop1"],
            "Error_prop1": row["Error_prop1"],
            "SignificantDigit_Prop1": row["SignificantDigit_Prop1"],
            "Flag_Prop1": row["Flag_Prop1"],
            "Prop2Symbol": row["Prop2Symbol"],
            "ePropAbbreviation_ID2": row["ePropAbbreviation_ID2"],
            "Prop2SubType": row["Prop2SubType"],
            "Prop2Phase": row["Prop2Phase"],
            "Prop2Value": row["Prop2Value"],
            "nPCdataUnitID_prop2": row["nPCdataUnitID_prop2"],
            "Error_prop2": row["Error_prop2"],
            "SignificantDigit_Prop2": row["SignificantDigit_Prop2"],
            "Flag_Prop2": row["Flag_Prop2"],
            "Note": row["Note"],
            "Ref": row["Ref"],
            "nPCdataCitID": row["nPCdataCitID"],
            "DataType": row["DataType"],
            "Date": row["Date"]
        })

    with open("../dechema_tables/dechema_2.json", "w", encoding="utf-8") as json_file:
        json.dump(dechema_json, json_file, indent=4)

def init_dechema_2_data():
    # 读取JSON文件
    super_admin_entity = User.objects.filter(user_account="superadmin").first()
    print("read dechema_2.json file")
    with open("../dechema_tables/dechema_2.json", "r", encoding="utf-8") as json_file:
        data = json.load(json_file)

    # 获取数据库里的对应关系
    unit_dict = {}
    unit_entity_list = Unit.objects.filter(valid_flag=True).all()
    for unit_entity in unit_entity_list:
        unit_dict[unit_entity.sei_id] = unit_entity

    property_dict = {}
    property_entity_list = Property.objects.filter(valid_flag=True).all()
    for property_entity in property_entity_list:
        property_dict[property_entity.abbreviation] = property_entity

    phase_dict = {}
    phase_entity_list = Phase.objects.filter(valid_flag=True).all()
    for phase_entity in phase_entity_list:
        phase_dict[phase_entity.abbreviation] = phase_entity

    variable_dict = {}
    variable_entity_list = PropertyVariable.objects.filter(valid_flag=True).all()
    for variable_entity in variable_entity_list:
        variable_dict[variable_entity.symbol] = variable_entity

    # 获取或创建必要的基础实体
    data_present_unknow_entity = DataPresentWay.objects.filter(name="Unknown").first()
    if data_present_unknow_entity is None:
        data_present_unknow_entity = DataPresentWay.objects.create(
            name="Unknown",
            creator=super_admin_entity,
            last_editor=super_admin_entity
        )

    purpose_others_entity = ExpPurpose.objects.filter(name="others").first()
    if purpose_others_entity is None:
        purpose_others_entity = ExpPurpose.objects.create(
            name="others",
            creator=super_admin_entity,
            last_editor=super_admin_entity,
        )

    # DSIID属性
    DSIID_attribute_entity = Attribute.objects.filter(name_en="nDSIID").first()
    if DSIID_attribute_entity is None:
        print("nDSIID attribute not exist")
        return
    # 按照组分、物性和变量组合进行分组
    grouped_data = {}
    for item in data:

        group_key = (
            item["DSIID1"], item["nPCdataCompID_ID1"],
            item["DSIID2"], item["nPCdataCompID_ID2"],
            item["Variable1Symbol"], item["Variable2Symbol"],
            item["Prop1Symbol"], item["Prop2Symbol"],
            item["Ref"]
        )

        if group_key not in grouped_data:
            grouped_data[group_key] = {
                "info": {
                    "nPCdataCompID_ID1": item["nPCdataCompID_ID1"],
                    "nPCdataCompID_ID2": item["nPCdataCompID_ID2"],
                    "Compound1": item["Compound1"],
                    "Compound2": item["Compound2"],
                    "Property": item["Property"],
                    "Prop1SubType": item["Prop1SubType"],
                    "Prop2SubType": item["Prop2SubType"],
                    "Variable1Symbol": item["Variable1Symbol"],
                    "sVariableSymbol_var1": item["sVariableSymbol_var1"],
                    "Variable1PropSubType": item["Variable1PropSubType"],
                    "nPCdataUnitID_var1": item["nPCdataUnitID_var1"],
                    "Variable2Symbol": item["Variable2Symbol"],
                    "sVariableSymbol_var2": item["sVariableSymbol_var2"],
                    "Variable2PropSubType": item["Variable2PropSubType"],
                    "nPCdataUnitID_var2": item["nPCdataUnitID_var2"],
                    "ePropAbbreviation_ID1": item["ePropAbbreviation_ID1"],
                    "ePropAbbreviation_ID2": item["ePropAbbreviation_ID2"],
                    "nPCdataUnitID_prop1": item["nPCdataUnitID_prop1"],
                    "nPCdataUnitID_prop2": item["nPCdataUnitID_prop2"],
                    "nPCdataCitID": item["nPCdataCitID"],
                    "DataType": item["DataType"]
                },
                "phases": set(),
                "flags": set(),
                "data_points": []
            }

        # 收集所有相态
        for phase_field in ["sPhaseSymbol1", "sPhaseSymbol2", "sPhaseSymbol3", "sPhaseSymbol4"]:
            if item[phase_field]:
                grouped_data[group_key]["phases"].add(item[phase_field])

        # 收集所有标志
        for flag_field in ["Flag_var1", "Flag_var2", "Flag_Prop1", "Flag_Prop2", "sFlag"]:
            if item[flag_field]:
                grouped_data[group_key]["flags"].update(item[flag_field].split(";"))

        # 准备数据点
        data_point = {
            "Variable1Value": item["Variable1Value"],
            "Variable1Error": item["Variable1Error"],
            "Variable2Value": item["Variable2Value"],
            "Variable2Error": item["Variable2Error"],
            "Prop1Value": item["Prop1Value"],
            "Error_prop1": item["Error_prop1"],
            "Prop2Value": item["Prop2Value"],
            "Error_prop2": item["Error_prop2"],
            "SignificantDigit_var1": item["SignificantDigit_var1"],
            "SignificantDigit_var2": item["SignificantDigit_var2"],
            "SignificantDigit_Prop1": item["SignificantDigit_Prop1"],
            "SignificantDigit_Prop2": item["SignificantDigit_Prop2"],
        }
        grouped_data[group_key]["data_points"].append(data_point)

    # 准备批量创建的数据
    datasets_to_create = []
    error_count = 0
    databank_entity = DataBank.objects.filter(name="相平衡数据集").first()
    for group_key, group_data in tqdm(grouped_data.items(), desc="Processing dechema binary data"):
        try:
            info = group_data["info"]

            # 1. 获取组分1
            component_entity1 = ComponentIdentifier.objects.filter(
                sei_id=info["nPCdataCompID_ID1"],
            ).first()
            if component_entity1 is None:
                print(f"Error: Component1 with ID {info['nPCdataCompID_ID1']} not found")
                error_count += 1
                continue

            # 2. 获取组分2
            component_entity2 = ComponentIdentifier.objects.filter(
                sei_id=info["nPCdataCompID_ID2"],
            ).first()
            if component_entity2 is None:
                print(f"Error: Component2 with ID {info['nPCdataCompID_ID2']} not found")
                error_count += 1
                continue

            # 3. 获取文献
            dechema_reference_entity = None
            if info["nPCdataCitID"]:
                dechema_reference_entity = Literature.objects.filter(
                    liter_code=str(info["nPCdataCitID"])).first()

            # 4. 获取物性1 - 只有当ePropAbbreviation_ID1存在时才处理
            property_entity1 = None
            property_unit_entity1 = None
            if info["ePropAbbreviation_ID1"]:
                property_entity1 = property_dict.get(info["ePropAbbreviation_ID1"])
                if property_entity1 is None:
                    print(f"Warning: Property1 {info['ePropAbbreviation_ID1']} not found, skipping property1")
                else:
                    property_unit_entity1 = unit_dict.get(info["nPCdataUnitID_prop1"])
                    if property_unit_entity1 is None:
                        print(f"Warning: Property1 unit {info['nPCdataUnitID_prop1']} not found, skipping property1")
                        property_entity1 = None

            # 5. 获取物性2 - 只有当ePropAbbreviation_ID2存在时才处理
            property_entity2 = None
            property_unit_entity2 = None
            if info["ePropAbbreviation_ID2"]:
                property_entity2 = property_dict.get(info["ePropAbbreviation_ID2"])
                if property_entity2 is None:
                    print(f"Warning: Property2 {info['ePropAbbreviation_ID2']} not found, skipping property2")
                else:
                    property_unit_entity2 = unit_dict.get(info["nPCdataUnitID_prop2"])
                    if property_unit_entity2 is None:
                        print(f"Warning: Property2 unit {info['nPCdataUnitID_prop2']} not found, skipping property2")
                        property_entity2 = None

            # 6. 处理变量1 - 只有当sVariableSymbol_var1存在时才处理
            variable_entity1 = None
            variable_unit_entity1 = None
            if info["sVariableSymbol_var1"]:
                variable_entity1 = variable_dict.get(info['sVariableSymbol_var1'])
                if variable_entity1 is None and info["sVariableSymbol_var1"] != "":
                    print(f"Warning: Variable1 {info['sVariableSymbol_var1']} not found, skipping variable1")
                else:
                    variable_unit_entity1 = unit_dict.get(info["nPCdataUnitID_var1"])
                    if variable_unit_entity1 is None and info["nPCdataUnitID_var1"] != "":
                        print(f"Warning: Variable1 unit {info['nPCdataUnitID_var1']} not found, skipping variable1")
                        variable_entity1 = None

            # 7. 处理变量2 - 只有当sVariableSymbol_var2存在时才处理
            variable_entity2 = None
            variable_unit_entity2 = None
            if info["sVariableSymbol_var2"]:
                variable_entity2 = variable_dict.get(info['sVariableSymbol_var2'])
                if variable_entity2 is None and info["sVariableSymbol_var2"] != "":
                    print(f"Warning: Variable2 {info['sVariableSymbol_var2']} not found, skipping variable2")
                else:
                    variable_unit_entity2 = unit_dict.get(info["nPCdataUnitID_var2"])
                    if variable_unit_entity2 is None and info["nPCdataUnitID_var2"] != "":
                        print(f"Warning: Variable2 unit {info['nPCdataUnitID_var2']} not found, skipping variable2")
                        variable_entity2 = None

            # 8. 处理相态
            phase_entity_list = []
            for phase_symbol in group_data["phases"]:
                phase_entity = phase_dict.get(phase_symbol)
                if phase_entity:
                    phase_entity_list.append(phase_entity)

            # 准备数据集名称
            dataset_name = f"{info['Compound1']}--{info['Compound2']}--{info['Property']}"
            if info["Variable1Symbol"] or info["Variable2Symbol"]:
                dataset_name += f"--{info['Variable1Symbol']};{info['Variable2Symbol']}"

            # 准备数据集对象
            dataset = ExpDataset(
                name=dataset_name,
                zh_name=dataset_name,
                description="",
                comment=";".join(group_data["flags"]),
                databank = databank_entity,
                source_liter=dechema_reference_entity,
                purpose=purpose_others_entity,
                data_present_way=data_present_unknow_entity,
                review_status=1,
                creator=super_admin_entity,
                last_editor=super_admin_entity,
            )

            # 保存关系信息
            dataset_info = {
                'dataset': dataset,
                'component1': component_entity1,
                'component2': component_entity2,
                'property1': property_entity1,
                'property1_unit': property_unit_entity1,
                'property2': property_entity2,
                'property2_unit': property_unit_entity2,
                'variable1': variable_entity1,
                'variable1_unit': variable_unit_entity1,
                'variable2': variable_entity2,
                'variable2_unit': variable_unit_entity2,
                'phases': phase_entity_list,
                'data_points': group_data["data_points"]
            }

            datasets_to_create.append(dataset_info)

        except Exception as e:
            print(f"Error preparing group {group_key}: {str(e)}")
            traceback.print_exc()
            error_count += 1
            continue

    # 批量创建数据集
    with transaction.atomic():
        # 批量创建所有数据集
        created_datasets = ExpDataset.objects.bulk_create(
            [d['dataset'] for d in datasets_to_create])

        # 为每个数据集设置关系
        for dataset, dataset_info in tqdm(zip(created_datasets, datasets_to_create), 
                                   total=len(datasets_to_create),
                                   desc="Processing datasets"):
            # 设置相态
            if dataset_info['phases']:
                dataset.phases.set(dataset_info['phases'])

            # 设置组分1和组分2
            ExpDatasetComponentMap.objects.create(
                exp_dataset=dataset,
                component=dataset_info['component1']
            )
            ExpDatasetComponentMap.objects.create(
                exp_dataset=dataset,
                component=dataset_info['component2']
            )

            # 设置物性1 - 只有存在时才创建
            property_map1 = None
            if dataset_info['property1'] and dataset_info['property1_unit']:
                property_map1 = ExpDatasetPropertyMap.objects.create(
                    exp_dataset=dataset,
                    property=dataset_info['property1'],
                    unit=dataset_info['property1_unit'],
                    measurement_error_percent=False,
                )
                property_map1.prop_component.set([dataset_info['component1'], dataset_info['component2']])

            # 设置物性2 - 只有存在时才创建
            property_map2 = None
            if dataset_info['property2'] and dataset_info['property2_unit']:
                property_map2 = ExpDatasetPropertyMap.objects.create(
                    exp_dataset=dataset,
                    property=dataset_info['property2'],
                    unit=dataset_info['property2_unit'],
                    measurement_error_percent=False,
                )
                property_map2.prop_component.set([dataset_info['component1'], dataset_info['component2']])

            # 设置变量1 - 只有存在时才创建
            variable_map1 = None
            if dataset_info['variable1'] and dataset_info['variable1_unit']:
                variable_map1 = ExpDatasetVariableMap.objects.create(
                    exp_dataset=dataset,
                    variable=dataset_info['variable1'],
                    unit=dataset_info['variable1_unit'],
                    measurement_error_percent=False,
                )

            # 设置变量2 - 只有存在时才创建
            variable_map2 = None
            if dataset_info['variable2'] and dataset_info['variable2_unit']:
                variable_map2 = ExpDatasetVariableMap.objects.create(
                    exp_dataset=dataset,
                    variable=dataset_info['variable2'],
                    unit=dataset_info['variable2_unit'],
                    measurement_error_percent=False,
                )

            # 创建数据点
            datapoint_entity_list = []
            datapoint_property_list = []
            datapoint_variable_list = []

            for point_item in dataset_info['data_points']:
                datapoint_entity = DataPoint()
                datapoint_entity_list.append(datapoint_entity)

                # 处理变量1数据点 - 只有当变量1存在且有值时处理
                if point_item["Variable1Value"] and variable_map1:
                    datapoint_variable_entity1 = DataPointExpDatasetVariableMap(
                        data_point=datapoint_entity,
                        exp_dataset_variable_map=variable_map1,
                        value=point_item["Variable1Value"],
                        measurement_error_value=point_item["Variable1Error"] if point_item["Variable1Error"] else None,
                    )
                    datapoint_variable_list.append(datapoint_variable_entity1)

                # 处理变量2数据点 - 只有当变量2存在且有值时处理
                if point_item["Variable2Value"] and variable_map2:
                    datapoint_variable_entity2 = DataPointExpDatasetVariableMap(
                        data_point=datapoint_entity,
                        exp_dataset_variable_map=variable_map2,
                        value=point_item["Variable2Value"],
                        measurement_error_value=point_item["Variable2Error"] if point_item["Variable2Error"] else None,
                    )
                    datapoint_variable_list.append(datapoint_variable_entity2)

                # 处理物性1数据点 - 只有当物性1存在且有值时处理
                if point_item["Prop1Value"] and property_map1:
                    datapoint_property_entity1 = DataPointExpDatasetPropertyMap(
                        data_point=datapoint_entity,
                        exp_dataset_property_map=property_map1,
                        value=point_item["Prop1Value"],
                        measurement_error_value=point_item["Error_prop1"] if point_item["Error_prop1"] else None,
                    )
                    datapoint_property_list.append(datapoint_property_entity1)

                # 处理物性2数据点 - 只有当物性2存在且有值时处理
                if point_item["Prop2Value"] and property_map2:
                    datapoint_property_entity2 = DataPointExpDatasetPropertyMap(
                        data_point=datapoint_entity,
                        exp_dataset_property_map=property_map2,
                        value=point_item["Prop2Value"],
                        measurement_error_value=point_item["Error_prop2"] if point_item["Error_prop2"] else None,
                    )
                    datapoint_property_list.append(datapoint_property_entity2)

            # 批量创建数据点
            DataPoint.objects.bulk_create(datapoint_entity_list)

            if datapoint_variable_list:
                DataPointExpDatasetVariableMap.objects.bulk_create(datapoint_variable_list)

            if datapoint_property_list:
                DataPointExpDatasetPropertyMap.objects.bulk_create(datapoint_property_list)

            # 关联数据点到数据集
            dataset.data_points.set(datapoint_entity_list)

    print(f"Dechema binary data import finished. Total errors: {error_count}")
def init_dechema_content():
    batch_convert()
    convert_file_2_to_json()
    init_dechema_2_data()
    print("dechema数据入库完成")
