from pyNastran.op2.op2 import read_op2, OP2
import psycopg2
from psycopg2 import sql
import json
import hashlib
import os
from io import StringIO
import csv
import time

shell_element_result = ['stress.ctria3_stress', 'stress.cquad4_stress', 'strain.ctria3_strain', 'strain.cquad4_strain']

conn = psycopg2.connect(
    host='osdev.xuelangyun.com',
    port=30105,
    dbname='sh_sim_structure',
    user='postgres',
    password='Postgres@xuelang'
)


def calculate_md5(file_path):
    # 创建一个 MD5 哈希对象
    md5_hash = hashlib.md5()

    # 打开文件
    with open(file_path, 'rb') as f:
        # 逐块读取文件内容
        for chunk in iter(lambda: f.read(4096), b""):
            md5_hash.update(chunk)

    # 返回 MD5 哈希值的十六进制表示
    return md5_hash.hexdigest()


def check_op2_existed(op2_file_hash):
    query_sql = f'select * from "op2_file_resource" where fp = %s'
    with conn.cursor() as cur:
        cur.execute(query_sql, (op2_file_hash,))
        res = cur.fetchone()
        if res is not None:
            print("op2 file is existed in db, id = ", res[0])
            return True
    return False


def insert_op2_file_resource(op2_file, op2_file_hash):
    op2_file_name = os.path.basename(op2_file)
    print("op2 file name: ", op2_file_name)
    op2_reader = read_op2(op2_file)
    subcase_keys = []
    for key in op2_reader.subcase_key.keys():
        subcase_keys.append(int(key))
    print("subcase keys:", subcase_keys)

    result_names = op2_reader.result_names
    print(result_names)
    element_types = set()
    result_types = set()
    for result_name in result_names:
        print(result_name)
        if "." in result_name:
            r = result_name.split(".")
            element_types.add(r[1].split("_")[0])
            result_types.add(r[0])
        else:
            result_types.add(result_name)
    print("element types:", element_types)
    print("result types:", result_types)
    op2_attrs = {
        "subcases": subcase_keys,
        "element_types": list(element_types),
        "result_types": list(result_types)
    }
    op2_attrs_str = json.dumps(op2_attrs)

    element_results = {}
    for subcase_key in subcase_keys:
        for component in result_names:
            result = op2_reader.get_result(component)[subcase_key]
            element_results[component] = {
                "data.shape": result.data.shape
            }
            if "element_node" in dir(result):
                element_results[component]['element_node.shape'] = result.element_node.shape
            if "nnodes" in dir(result):
                element_results[component]["nnodes"] = int(result.nnodes)
            if component in shell_element_result:
                element_results[component]["nlayers"] = 2
            else:
                element_results[component]["nlayers"] = 1
            if "nelements" in dir(result):
                element_results[component]["nelements"] = int(result.nelements)
            if "nnodes_per_element" in dir(result):
                element_results[component]["nnodes_per_element"] = int(result.nnodes_per_element)
                # nelements值有问题，有nnodes_per_element时重新计算nelements
                element_results[component]["nelements"] = int(
                    element_results[component]["data.shape"][1] / element_results[component]["nnodes_per_element"] /
                    element_results[component]["nlayers"])
    print("element results:", element_results)
    insert_sql = f'insert into "op2_file_resource" (file_name, fp, attrs, element_result_count) values (%s, %s, %s, %s)'
    with conn.cursor() as cur:
        cur.execute(insert_sql, (op2_file_name, op2_file_hash, op2_attrs_str, json.dumps(element_results)))
    conn.commit()


def check_result_table(table_name):
    with conn.cursor() as cur:
        # 检查表是否存在
        cur.execute("""
            SELECT EXISTS (
                SELECT 1
                FROM information_schema.tables
                WHERE table_schema = 'public'
                AND table_name = %s
            )
        """, (table_name,))

        table_exists = cur.fetchone()[0]

        if not table_exists:
            # 创建表的 SQL 语句
            create_table_query = sql.SQL("""
                CREATE TABLE {}
                (
                    eid bigint,
                    element_type VARCHAR,
                    data jsonb
                )
            """).format(sql.Identifier(table_name))

            # 执行创建表的 SQL 语句
            cur.execute(create_table_query)
            conn.commit()
            print(f"表 {table_name} 已创建。")
        else:
            print(f"表 {table_name} 已存在。")


def insert_op2_stress_strain(op2_file, op2_file_hash, include_results=["stress", "strain"], batch=10000):
    for result in include_results:
        check_result_table(f'op2_{op2_file_hash}_{result}')
    op2_reader = read_op2(op2_file, include_results=include_results)
    result_names = op2_reader.result_names
    print("result_names:", result_names)
    subcase_keys = list(op2_reader.subcase_key.keys())
    print("subcase keys:", subcase_keys)
    insert_datas = []

    for result_name in result_names:
        start_time = time.time()
        result_type = result_name[:result_name.index(".")]
        node_datas = []
        element_type = result_name[result_name.index(".") + 1:result_name.index("_")]
        results = op2_reader.get_result(result_name)
        # 取第一个subcase的数据为遍历下标
        subcase_key = subcase_keys[0]
        result = results[subcase_key]
        print(f'element type: {element_type}, result shape: {result.data.shape}')
        nnodes_per_element = result.nnodes_per_element
        nlayers = 2 if result_name in shell_element_result else 1
        if "element_node" not in dir(result):
            print(f'{result_name} has no element_node')
            continue
        for index, item in enumerate(result.element_node):
            node_data = {
                'subcase': int(subcase_key),
                'element_node': int(item[1]),
                'data': result.data[0][index].tolist(),
                'attrs': {
                    "is_von_mises": result.is_von_mises,
                    "is_max_shear": result.is_max_shear,
                    "is_fiber_distance": result.is_fiber_distance,
                    "is_curvature": result.is_curvature
                }}
            if result_name in shell_element_result:
                node_data['location'] = 'z1' if index % 2 == 0 else 'z2'
            node_datas.append(node_data)
            # 处理其他subcase数据
            for i in range(len(subcase_keys)):
                if i == 0:
                    continue
                other_subcase_key = subcase_keys[i]
                other_result = results[other_subcase_key]
                other_element_node = other_result.element_node
                other_node_data = {
                    'subcase': int(other_subcase_key),
                    'element_node': other_element_node[index][1],
                    'data': other_result.data[0][index].tolist(),
                    'attrs': {
                        "is_von_mises": other_result.is_von_mises,
                        "is_max_shear": other_result.is_max_shear,
                        "is_fiber_distance": other_result.is_fiber_distance,
                        "is_curvature": other_result.is_curvature
                    }
                }
                if result_name in shell_element_result:
                    other_node_data['location'] = 'z1' if index % 2 == 0 else 'z2'
                node_datas.append(other_node_data)
            if (index + 1) % (nnodes_per_element * nlayers) == 0:
                insert_datas.append((int(item[0]), element_type, json.dumps(node_datas)))
                # print(insert_datas)
                node_datas.clear()

                if len(insert_datas) >= batch:
                    batch_insert(insert_datas, f"op2_{op2_file_hash}_{result_type}")
                    insert_datas.clear()
        if len(insert_datas) > 0:
            batch_insert(insert_datas, f"op2_{op2_file_hash}_{result_type}")
            insert_datas.clear()

        end_time = time.time()
        elapsed_time = end_time - start_time
        print(f'{result_name}持久化运行时间: {elapsed_time:.6f} 秒')


def batch_insert(data, table):
    csv_file = StringIO()
    writer = csv.writer(csv_file, delimiter='\t', quotechar="'", quoting=csv.QUOTE_MINIMAL)
    for row in data:
        writer.writerow(row)
    csv_file.seek(0)
    with conn.cursor() as cur:
        cur.copy_from(csv_file, table, columns=('eid', 'element_type', 'data'))
    conn.commit()


def op2_file_structure(op2_file):
    # 计算文件md5
    op2_file_hash = calculate_md5(op2_file)
    print("op2 file hash: ", op2_file_hash)
    # 判断文件是否已结构化存储
    existed = check_op2_existed(op2_file_hash)
    if existed:
        print("op2 file is existed")
        return
    insert_op2_file_resource(op2_file, op2_file_hash)
    insert_op2_stress_strain(op2_file, op2_file_hash, include_results=["strain"])


if __name__ == '__main__':
    op2_file = 'E:\op\ARJ21_SE_1130442.op2'
    op2_file_structure(op2_file)