{
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "-38edKrF8ylR"
      },
      "source": [
        "# 官方数据集下载\n",
        "（所有外部数据集通过wget导入在本模块导入，在其他部分导入可能导致失去成绩）"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 1,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "F5N6wkdR75nJ",
        "outputId": "1df85a0d-5958-4a1d-9a20-a5acd16745da"
      },
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "--2024-09-05 17:39:53--  https://drive.usercontent.google.com/download?id=1Y0ejShdcx6YiaE3jJ0yxcOTKxirR-MAh&export=download&authuser=0&confirm=t&uuid=90e67168-c423-4396-a7ac-4cfcb1f2cc01&at=APZUnTXTP3YsMMcYVNsvI5-B_UtN:1720979862404\n",
            "Resolving drive.usercontent.google.com (drive.usercontent.google.com)... 172.217.203.132, 2607:f8b0:400c:c07::84\n",
            "Connecting to drive.usercontent.google.com (drive.usercontent.google.com)|172.217.203.132|:443... connected.\n",
            "HTTP request sent, awaiting response... 200 OK\n",
            "Length: 1089154958 (1.0G) [application/octet-stream]\n",
            "Saving to: ‘CIKM_Training.zip’\n",
            "\n",
            "CIKM_Training.zip   100%[===================>]   1.01G  67.4MB/s    in 20s     \n",
            "\n",
            "2024-09-05 17:40:15 (52.2 MB/s) - ‘CIKM_Training.zip’ saved [1089154958/1089154958]\n",
            "\n"
          ]
        }
      ],
      "source": [
        "!wget --header=\"Host: drive.usercontent.google.com\" --header=\"User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36\" --header=\"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7\" --header=\"Accept-Language: zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6\" --header=\"Cookie: __Secure-ENID=12.SE=Yd0Bj-CLJ14fnd4qzdJHmwUs4B5zz46UaPC1cPJigNqqFV9PtM2CYyBpSbCkOyzUwzlEdZ1nZFf-igtGi7wSdJ_gqQSfQfh84r9egqFQAy9-GKayCRbdQKdera-2mkpuIT-c64CyR9vfNojM3hxZ9Dej-dGvtxlGjal9ttEHybw; __gsas=ID=ae0421b9a34b478c:T=1710758437:RT=1710758437:S=ALNI_MZP13R9ZOHbCzC0rgHSMrGXj6GCsg; SEARCH_SAMESITE=CgQI35oB; SID=g.a000lgiBaVVDJxM_nyJHv8SGKdW8l-Ea7eoYBmsE1puBGqeYmuoUD3CsQuje5kFlDuyAjS6tbwACgYKAewSARQSFQHGX2MiQwM3sBqt9QySR0B5yipflRoVAUF8yKo6yV91BccjS1Z6o32Ux6ew0076; __Secure-1PSID=g.a000lgiBaVVDJxM_nyJHv8SGKdW8l-Ea7eoYBmsE1puBGqeYmuoUdkz-2WaNotDz3XhWHdZaewACgYKAdMSARQSFQHGX2Mi2DyEcyhQnLt_3xtpRFnETBoVAUF8yKoObySYGWwftPorBYI_hdIh0076; __Secure-3PSID=g.a000lgiBaVVDJxM_nyJHv8SGKdW8l-Ea7eoYBmsE1puBGqeYmuoUMoY9B1xAja7hD33yfC_l8gACgYKAXESARQSFQHGX2Miy7Eh3rWIzqh1GS_d-315mhoVAUF8yKpBtafXePsFsQwbeu_1xhEZ0076; HSID=AqK8DRIVZrNtHDQEM; SSID=AF8GbXclnoQKbndEK; APISID=eaYPuocswpo7W9-H/AOCjbMPSZJ096e5h4; SAPISID=xTIl5TE60VtPYf3j/A-54gZvfpsOMhgwjL; __Secure-1PAPISID=xTIl5TE60VtPYf3j/A-54gZvfpsOMhgwjL; __Secure-3PAPISID=xTIl5TE60VtPYf3j/A-54gZvfpsOMhgwjL; NID=515=plru-gK0AjVf1Sy8SC5LvpAocQ8efRsdx0CZ1kdvR10iJMaeT-fsU6T5AyKgdFpWE-kImnEI5utU-487cxMnVkrz0_-aB-3blUbOI1wu31UI-2dAjxd2LWGEgubs2zJi8cjE8hfCzP-TgVqkwzSNkE6riAOmKvZvkdSWWgnwFiQLxr3X7et1TNvcPZvpEHzG8iqoylaY55lZje6L4CmiT0b8N2PzagLdpOZdUGkWyRJDlTsAeSnlvMM5M8ZTWjZm7UvVNqMRBh-CA4KJ-BkCavo-Pt0F4-3-qSceoZ3nNz6AUfFk_2J9dWLmijh23NbSZsQP4z2u7O2WDMwTmntI1Qc5QiIuXjxGDLDz6GQPSEMIyWZCRTsNxZtmNlKQROEsPwKKMmmylxBjRXLFtSKyLitb6oDPhkp8YbUh984lDVAuZkAOcjAuCgYFdAEmNyGGrWWFPmdaUAUZ-uxIL8lClSnbNx0zECu88QhO-QtallWvKzDYr3ZK7czaeJi2nQX53eUWfJM0eyOeeric1Ltf1wA1_w0zXPaBF5rPBONe7Rzm5phafNrSAl9XpofCiBT_B4mmb81hfFlGrBI3_4Rtj7UohFGwMaKcrIyPLBok4nQogBBPAzhPsnNFLJgBOUqQLAMDIoup982yf7v8ISeApMkZdgQ; __Secure-1PSIDTS=sidts-CjEB4E2dkZiFPZveD2L67T0j-mwN6ErulV1FpkkkhAlfHtcpBwyaLEFAl9KPDrhmId99EAA; __Secure-3PSIDTS=sidts-CjEB4E2dkZiFPZveD2L67T0j-mwN6ErulV1FpkkkhAlfHtcpBwyaLEFAl9KPDrhmId99EAA; SIDCC=AKEyXzURbTd6qCpA02QoGXCORelVic8I05vY0Fudj1oUukq6szKRAuO4mcverQ_TFNKg0pPsF-tiZg; __Secure-1PSIDCC=AKEyXzVPd347dt-7_3JSjQc3gmzPqwRtmRgaQpuVqN0qtN34VORpmjxOBVKFZsXxU0m0JibWfONm; __Secure-3PSIDCC=AKEyXzWa_JoS5Duud9ECYjQhLb8ENX9_ML6X5YK4El1eSaET9Cq21vXwSW53TCFtQ0e-KwYt3BEK\" --header=\"Connection: keep-alive\" \"https://drive.usercontent.google.com/download?id=1Y0ejShdcx6YiaE3jJ0yxcOTKxirR-MAh&export=download&authuser=0&confirm=t&uuid=90e67168-c423-4396-a7ac-4cfcb1f2cc01&at=APZUnTXTP3YsMMcYVNsvI5-B_UtN:1720979862404\" -c -O 'CIKM_Training.zip'"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 2,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "_T7GT7nZZYHG",
        "outputId": "0bc413db-01f2-4b6a-c641-c382b16bbe1b"
      },
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "--2024-09-05 17:40:15--  https://drive.usercontent.google.com/download?id=17M0nVsBdeiZxMnFVhKMuq89APc9pbA91&export=download&authuser=0&confirm=t&uuid=de518810-0b86-44da-9924-b3044aedae65&at=APZUnTWx_R5StqP3iGT6U7FBhIm-:1720979898499\n",
            "Resolving drive.usercontent.google.com (drive.usercontent.google.com)... 172.217.203.132, 2607:f8b0:400c:c07::84\n",
            "Connecting to drive.usercontent.google.com (drive.usercontent.google.com)|172.217.203.132|:443... connected.\n",
            "HTTP request sent, awaiting response... 200 OK\n",
            "Length: 94200676 (90M) [application/octet-stream]\n",
            "Saving to: ‘CIKM_Test.zip’\n",
            "\n",
            "CIKM_Test.zip       100%[===================>]  89.84M  36.7MB/s    in 2.4s    \n",
            "\n",
            "2024-09-05 17:40:19 (36.7 MB/s) - ‘CIKM_Test.zip’ saved [94200676/94200676]\n",
            "\n"
          ]
        }
      ],
      "source": [
        "!wget --header=\"Host: drive.usercontent.google.com\" --header=\"User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36\" --header=\"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7\" --header=\"Accept-Language: zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6\" --header=\"Cookie: __Secure-ENID=12.SE=Yd0Bj-CLJ14fnd4qzdJHmwUs4B5zz46UaPC1cPJigNqqFV9PtM2CYyBpSbCkOyzUwzlEdZ1nZFf-igtGi7wSdJ_gqQSfQfh84r9egqFQAy9-GKayCRbdQKdera-2mkpuIT-c64CyR9vfNojM3hxZ9Dej-dGvtxlGjal9ttEHybw; __gsas=ID=ae0421b9a34b478c:T=1710758437:RT=1710758437:S=ALNI_MZP13R9ZOHbCzC0rgHSMrGXj6GCsg; SEARCH_SAMESITE=CgQI35oB; SID=g.a000lgiBaVVDJxM_nyJHv8SGKdW8l-Ea7eoYBmsE1puBGqeYmuoUD3CsQuje5kFlDuyAjS6tbwACgYKAewSARQSFQHGX2MiQwM3sBqt9QySR0B5yipflRoVAUF8yKo6yV91BccjS1Z6o32Ux6ew0076; __Secure-1PSID=g.a000lgiBaVVDJxM_nyJHv8SGKdW8l-Ea7eoYBmsE1puBGqeYmuoUdkz-2WaNotDz3XhWHdZaewACgYKAdMSARQSFQHGX2Mi2DyEcyhQnLt_3xtpRFnETBoVAUF8yKoObySYGWwftPorBYI_hdIh0076; __Secure-3PSID=g.a000lgiBaVVDJxM_nyJHv8SGKdW8l-Ea7eoYBmsE1puBGqeYmuoUMoY9B1xAja7hD33yfC_l8gACgYKAXESARQSFQHGX2Miy7Eh3rWIzqh1GS_d-315mhoVAUF8yKpBtafXePsFsQwbeu_1xhEZ0076; HSID=AqK8DRIVZrNtHDQEM; SSID=AF8GbXclnoQKbndEK; APISID=eaYPuocswpo7W9-H/AOCjbMPSZJ096e5h4; SAPISID=xTIl5TE60VtPYf3j/A-54gZvfpsOMhgwjL; __Secure-1PAPISID=xTIl5TE60VtPYf3j/A-54gZvfpsOMhgwjL; __Secure-3PAPISID=xTIl5TE60VtPYf3j/A-54gZvfpsOMhgwjL; NID=515=plru-gK0AjVf1Sy8SC5LvpAocQ8efRsdx0CZ1kdvR10iJMaeT-fsU6T5AyKgdFpWE-kImnEI5utU-487cxMnVkrz0_-aB-3blUbOI1wu31UI-2dAjxd2LWGEgubs2zJi8cjE8hfCzP-TgVqkwzSNkE6riAOmKvZvkdSWWgnwFiQLxr3X7et1TNvcPZvpEHzG8iqoylaY55lZje6L4CmiT0b8N2PzagLdpOZdUGkWyRJDlTsAeSnlvMM5M8ZTWjZm7UvVNqMRBh-CA4KJ-BkCavo-Pt0F4-3-qSceoZ3nNz6AUfFk_2J9dWLmijh23NbSZsQP4z2u7O2WDMwTmntI1Qc5QiIuXjxGDLDz6GQPSEMIyWZCRTsNxZtmNlKQROEsPwKKMmmylxBjRXLFtSKyLitb6oDPhkp8YbUh984lDVAuZkAOcjAuCgYFdAEmNyGGrWWFPmdaUAUZ-uxIL8lClSnbNx0zECu88QhO-QtallWvKzDYr3ZK7czaeJi2nQX53eUWfJM0eyOeeric1Ltf1wA1_w0zXPaBF5rPBONe7Rzm5phafNrSAl9XpofCiBT_B4mmb81hfFlGrBI3_4Rtj7UohFGwMaKcrIyPLBok4nQogBBPAzhPsnNFLJgBOUqQLAMDIoup982yf7v8ISeApMkZdgQ; __Secure-1PSIDTS=sidts-CjEB4E2dkZiFPZveD2L67T0j-mwN6ErulV1FpkkkhAlfHtcpBwyaLEFAl9KPDrhmId99EAA; __Secure-3PSIDTS=sidts-CjEB4E2dkZiFPZveD2L67T0j-mwN6ErulV1FpkkkhAlfHtcpBwyaLEFAl9KPDrhmId99EAA; SIDCC=AKEyXzV9L2DyLa7cOj7QEH5jq731oO3bjUfxB9xunhlp0aLuj_Yrks1puUhcLsZ8rELAfcHJjDmdqg; __Secure-1PSIDCC=AKEyXzUqTJxy1COupBctW9fNufvYQ4ZxCVzD4btsqLhg-i8UKiKC1jI3mI_5nliA_a0z-FM7PD0I; __Secure-3PSIDCC=AKEyXzU45-0-sVl11D0Mg7O6UieJpansLMEGJeXrmWGqtoNEtTeyHkgewKyRGMZVmJHm49Bzj1IM\" --header=\"Connection: keep-alive\" \"https://drive.usercontent.google.com/download?id=17M0nVsBdeiZxMnFVhKMuq89APc9pbA91&export=download&authuser=0&confirm=t&uuid=de518810-0b86-44da-9924-b3044aedae65&at=APZUnTWx_R5StqP3iGT6U7FBhIm-:1720979898499\" -c -O 'CIKM_Test.zip'"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 3,
      "metadata": {
        "id": "aPTwHZWpaJ-1"
      },
      "outputs": [],
      "source": [
        "!unzip -q CIKM_Training.zip\n",
        "!unzip -q CIKM_Test.zip"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "XxaSCHvUMm4X"
      },
      "source": [
        "# 常见工具安装与导入\n",
        "（所有外部代码和包需要在本模块导入）"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 4,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "kD437pLQMvOC",
        "outputId": "9ca52c4e-8d1b-49cd-bf30-98a377e70da1"
      },
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "Collecting open3d\n",
            "  Downloading open3d-0.18.0-cp310-cp310-manylinux_2_27_x86_64.whl.metadata (4.2 kB)\n",
            "Requirement already satisfied: numpy>=1.18.0 in /usr/local/lib/python3.10/dist-packages (from open3d) (1.26.4)\n",
            "Collecting dash>=2.6.0 (from open3d)\n",
            "  Downloading dash-2.18.0-py3-none-any.whl.metadata (10 kB)\n",
            "Requirement already satisfied: werkzeug>=2.2.3 in /usr/local/lib/python3.10/dist-packages (from open3d) (3.0.4)\n",
            "Requirement already satisfied: nbformat>=5.7.0 in /usr/local/lib/python3.10/dist-packages (from open3d) (5.10.4)\n",
            "Collecting configargparse (from open3d)\n",
            "  Downloading ConfigArgParse-1.7-py3-none-any.whl.metadata (23 kB)\n",
            "Collecting ipywidgets>=8.0.4 (from open3d)\n",
            "  Downloading ipywidgets-8.1.5-py3-none-any.whl.metadata (2.3 kB)\n",
            "Collecting addict (from open3d)\n",
            "  Downloading addict-2.4.0-py3-none-any.whl.metadata (1.0 kB)\n",
            "Requirement already satisfied: pillow>=9.3.0 in /usr/local/lib/python3.10/dist-packages (from open3d) (9.4.0)\n",
            "Requirement already satisfied: matplotlib>=3 in /usr/local/lib/python3.10/dist-packages (from open3d) (3.7.1)\n",
            "Requirement already satisfied: pandas>=1.0 in /usr/local/lib/python3.10/dist-packages (from open3d) (2.1.4)\n",
            "Requirement already satisfied: pyyaml>=5.4.1 in /usr/local/lib/python3.10/dist-packages (from open3d) (6.0.2)\n",
            "Requirement already satisfied: scikit-learn>=0.21 in /usr/local/lib/python3.10/dist-packages (from open3d) (1.3.2)\n",
            "Requirement already satisfied: tqdm in /usr/local/lib/python3.10/dist-packages (from open3d) (4.66.5)\n",
            "Collecting pyquaternion (from open3d)\n",
            "  Downloading pyquaternion-0.9.9-py3-none-any.whl.metadata (1.4 kB)\n",
            "Requirement already satisfied: Flask<3.1,>=1.0.4 in /usr/local/lib/python3.10/dist-packages (from dash>=2.6.0->open3d) (2.2.5)\n",
            "Requirement already satisfied: plotly>=5.0.0 in /usr/local/lib/python3.10/dist-packages (from dash>=2.6.0->open3d) (5.15.0)\n",
            "Collecting dash-html-components==2.0.0 (from dash>=2.6.0->open3d)\n",
            "  Downloading dash_html_components-2.0.0-py3-none-any.whl.metadata (3.8 kB)\n",
            "Collecting dash-core-components==2.0.0 (from dash>=2.6.0->open3d)\n",
            "  Downloading dash_core_components-2.0.0-py3-none-any.whl.metadata (2.9 kB)\n",
            "Collecting dash-table==5.0.0 (from dash>=2.6.0->open3d)\n",
            "  Downloading dash_table-5.0.0-py3-none-any.whl.metadata (2.4 kB)\n",
            "Requirement already satisfied: importlib-metadata in /usr/local/lib/python3.10/dist-packages (from dash>=2.6.0->open3d) (8.4.0)\n",
            "Requirement already satisfied: typing-extensions>=4.1.1 in /usr/local/lib/python3.10/dist-packages (from dash>=2.6.0->open3d) (4.12.2)\n",
            "Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from dash>=2.6.0->open3d) (2.32.3)\n",
            "Collecting retrying (from dash>=2.6.0->open3d)\n",
            "  Downloading retrying-1.3.4-py3-none-any.whl.metadata (6.9 kB)\n",
            "Requirement already satisfied: nest-asyncio in /usr/local/lib/python3.10/dist-packages (from dash>=2.6.0->open3d) (1.6.0)\n",
            "Requirement already satisfied: setuptools in /usr/local/lib/python3.10/dist-packages (from dash>=2.6.0->open3d) (71.0.4)\n",
            "Collecting comm>=0.1.3 (from ipywidgets>=8.0.4->open3d)\n",
            "  Downloading comm-0.2.2-py3-none-any.whl.metadata (3.7 kB)\n",
            "Requirement already satisfied: ipython>=6.1.0 in /usr/local/lib/python3.10/dist-packages (from ipywidgets>=8.0.4->open3d) (7.34.0)\n",
            "Requirement already satisfied: traitlets>=4.3.1 in /usr/local/lib/python3.10/dist-packages (from ipywidgets>=8.0.4->open3d) (5.7.1)\n",
            "Collecting widgetsnbextension~=4.0.12 (from ipywidgets>=8.0.4->open3d)\n",
            "  Downloading widgetsnbextension-4.0.13-py3-none-any.whl.metadata (1.6 kB)\n",
            "Requirement already satisfied: jupyterlab-widgets~=3.0.12 in /usr/local/lib/python3.10/dist-packages (from ipywidgets>=8.0.4->open3d) (3.0.13)\n",
            "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3->open3d) (1.3.0)\n",
            "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3->open3d) (0.12.1)\n",
            "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3->open3d) (4.53.1)\n",
            "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3->open3d) (1.4.5)\n",
            "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3->open3d) (24.1)\n",
            "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3->open3d) (3.1.4)\n",
            "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3->open3d) (2.8.2)\n",
            "Requirement already satisfied: fastjsonschema>=2.15 in /usr/local/lib/python3.10/dist-packages (from nbformat>=5.7.0->open3d) (2.20.0)\n",
            "Requirement already satisfied: jsonschema>=2.6 in /usr/local/lib/python3.10/dist-packages (from nbformat>=5.7.0->open3d) (4.23.0)\n",
            "Requirement already satisfied: jupyter-core!=5.0.*,>=4.12 in /usr/local/lib/python3.10/dist-packages (from nbformat>=5.7.0->open3d) (5.7.2)\n",
            "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas>=1.0->open3d) (2024.1)\n",
            "Requirement already satisfied: tzdata>=2022.1 in /usr/local/lib/python3.10/dist-packages (from pandas>=1.0->open3d) (2024.1)\n",
            "Requirement already satisfied: scipy>=1.5.0 in /usr/local/lib/python3.10/dist-packages (from scikit-learn>=0.21->open3d) (1.13.1)\n",
            "Requirement already satisfied: joblib>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from scikit-learn>=0.21->open3d) (1.4.2)\n",
            "Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from scikit-learn>=0.21->open3d) (3.5.0)\n",
            "Requirement already satisfied: MarkupSafe>=2.1.1 in /usr/local/lib/python3.10/dist-packages (from werkzeug>=2.2.3->open3d) (2.1.5)\n",
            "Requirement already satisfied: Jinja2>=3.0 in /usr/local/lib/python3.10/dist-packages (from Flask<3.1,>=1.0.4->dash>=2.6.0->open3d) (3.1.4)\n",
            "Requirement already satisfied: itsdangerous>=2.0 in /usr/local/lib/python3.10/dist-packages (from Flask<3.1,>=1.0.4->dash>=2.6.0->open3d) (2.2.0)\n",
            "Requirement already satisfied: click>=8.0 in /usr/local/lib/python3.10/dist-packages (from Flask<3.1,>=1.0.4->dash>=2.6.0->open3d) (8.1.7)\n",
            "Collecting jedi>=0.16 (from ipython>=6.1.0->ipywidgets>=8.0.4->open3d)\n",
            "  Using cached jedi-0.19.1-py2.py3-none-any.whl.metadata (22 kB)\n",
            "Requirement already satisfied: decorator in /usr/local/lib/python3.10/dist-packages (from ipython>=6.1.0->ipywidgets>=8.0.4->open3d) (4.4.2)\n",
            "Requirement already satisfied: pickleshare in /usr/local/lib/python3.10/dist-packages (from ipython>=6.1.0->ipywidgets>=8.0.4->open3d) (0.7.5)\n",
            "Requirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from ipython>=6.1.0->ipywidgets>=8.0.4->open3d) (3.0.47)\n",
            "Requirement already satisfied: pygments in /usr/local/lib/python3.10/dist-packages (from ipython>=6.1.0->ipywidgets>=8.0.4->open3d) (2.16.1)\n",
            "Requirement already satisfied: backcall in /usr/local/lib/python3.10/dist-packages (from ipython>=6.1.0->ipywidgets>=8.0.4->open3d) (0.2.0)\n",
            "Requirement already satisfied: matplotlib-inline in /usr/local/lib/python3.10/dist-packages (from ipython>=6.1.0->ipywidgets>=8.0.4->open3d) (0.1.7)\n",
            "Requirement already satisfied: pexpect>4.3 in /usr/local/lib/python3.10/dist-packages (from ipython>=6.1.0->ipywidgets>=8.0.4->open3d) (4.9.0)\n",
            "Requirement already satisfied: attrs>=22.2.0 in /usr/local/lib/python3.10/dist-packages (from jsonschema>=2.6->nbformat>=5.7.0->open3d) (24.2.0)\n",
            "Requirement already satisfied: jsonschema-specifications>=2023.03.6 in /usr/local/lib/python3.10/dist-packages (from jsonschema>=2.6->nbformat>=5.7.0->open3d) (2023.12.1)\n",
            "Requirement already satisfied: referencing>=0.28.4 in /usr/local/lib/python3.10/dist-packages (from jsonschema>=2.6->nbformat>=5.7.0->open3d) (0.35.1)\n",
            "Requirement already satisfied: rpds-py>=0.7.1 in /usr/local/lib/python3.10/dist-packages (from jsonschema>=2.6->nbformat>=5.7.0->open3d) (0.20.0)\n",
            "Requirement already satisfied: platformdirs>=2.5 in /usr/local/lib/python3.10/dist-packages (from jupyter-core!=5.0.*,>=4.12->nbformat>=5.7.0->open3d) (4.2.2)\n",
            "Requirement already satisfied: tenacity>=6.2.0 in /usr/local/lib/python3.10/dist-packages (from plotly>=5.0.0->dash>=2.6.0->open3d) (9.0.0)\n",
            "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.7->matplotlib>=3->open3d) (1.16.0)\n",
            "Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.10/dist-packages (from importlib-metadata->dash>=2.6.0->open3d) (3.20.1)\n",
            "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests->dash>=2.6.0->open3d) (3.3.2)\n",
            "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests->dash>=2.6.0->open3d) (3.8)\n",
            "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->dash>=2.6.0->open3d) (2.0.7)\n",
            "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests->dash>=2.6.0->open3d) (2024.8.30)\n",
            "Requirement already satisfied: parso<0.9.0,>=0.8.3 in /usr/local/lib/python3.10/dist-packages (from jedi>=0.16->ipython>=6.1.0->ipywidgets>=8.0.4->open3d) (0.8.4)\n",
            "Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.10/dist-packages (from pexpect>4.3->ipython>=6.1.0->ipywidgets>=8.0.4->open3d) (0.7.0)\n",
            "Requirement already satisfied: wcwidth in /usr/local/lib/python3.10/dist-packages (from prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0->ipython>=6.1.0->ipywidgets>=8.0.4->open3d) (0.2.13)\n",
            "Downloading open3d-0.18.0-cp310-cp310-manylinux_2_27_x86_64.whl (399.7 MB)\n",
            "\u001b[2K   \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m399.7/399.7 MB\u001b[0m \u001b[31m1.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25hDownloading dash-2.18.0-py3-none-any.whl (7.5 MB)\n",
            "\u001b[2K   \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m7.5/7.5 MB\u001b[0m \u001b[31m61.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25hDownloading dash_core_components-2.0.0-py3-none-any.whl (3.8 kB)\n",
            "Downloading dash_html_components-2.0.0-py3-none-any.whl (4.1 kB)\n",
            "Downloading dash_table-5.0.0-py3-none-any.whl (3.9 kB)\n",
            "Downloading ipywidgets-8.1.5-py3-none-any.whl (139 kB)\n",
            "\u001b[2K   \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m139.8/139.8 kB\u001b[0m \u001b[31m9.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25hDownloading addict-2.4.0-py3-none-any.whl (3.8 kB)\n",
            "Downloading ConfigArgParse-1.7-py3-none-any.whl (25 kB)\n",
            "Downloading pyquaternion-0.9.9-py3-none-any.whl (14 kB)\n",
            "Downloading comm-0.2.2-py3-none-any.whl (7.2 kB)\n",
            "Downloading widgetsnbextension-4.0.13-py3-none-any.whl (2.3 MB)\n",
            "\u001b[2K   \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.3/2.3 MB\u001b[0m \u001b[31m51.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25hDownloading retrying-1.3.4-py3-none-any.whl (11 kB)\n",
            "Using cached jedi-0.19.1-py2.py3-none-any.whl (1.6 MB)\n",
            "Installing collected packages: dash-table, dash-html-components, dash-core-components, addict, widgetsnbextension, retrying, pyquaternion, jedi, configargparse, comm, ipywidgets, dash, open3d\n",
            "  Attempting uninstall: widgetsnbextension\n",
            "    Found existing installation: widgetsnbextension 3.6.8\n",
            "    Uninstalling widgetsnbextension-3.6.8:\n",
            "      Successfully uninstalled widgetsnbextension-3.6.8\n",
            "  Attempting uninstall: ipywidgets\n",
            "    Found existing installation: ipywidgets 7.7.1\n",
            "    Uninstalling ipywidgets-7.7.1:\n",
            "      Successfully uninstalled ipywidgets-7.7.1\n",
            "Successfully installed addict-2.4.0 comm-0.2.2 configargparse-1.7 dash-2.18.0 dash-core-components-2.0.0 dash-html-components-2.0.0 dash-table-5.0.0 ipywidgets-8.1.5 jedi-0.19.1 open3d-0.18.0 pyquaternion-0.9.9 retrying-1.3.4 widgetsnbextension-4.0.13\n",
            "Collecting pywavefront\n",
            "  Downloading PyWavefront-1.3.3-py3-none-any.whl.metadata (8.3 kB)\n",
            "Downloading PyWavefront-1.3.3-py3-none-any.whl (28 kB)\n",
            "Installing collected packages: pywavefront\n",
            "Successfully installed pywavefront-1.3.3\n",
            "Collecting trimesh\n",
            "  Downloading trimesh-4.4.9-py3-none-any.whl.metadata (18 kB)\n",
            "Requirement already satisfied: numpy>=1.20 in /usr/local/lib/python3.10/dist-packages (from trimesh) (1.26.4)\n",
            "Downloading trimesh-4.4.9-py3-none-any.whl (700 kB)\n",
            "\u001b[2K   \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m700.1/700.1 kB\u001b[0m \u001b[31m9.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25hInstalling collected packages: trimesh\n",
            "Successfully installed trimesh-4.4.9\n"
          ]
        }
      ],
      "source": [
        "!pip3 install tqdm\n",
        "!pip3 install pandas\n",
        "!pip3 install numpy\n",
        "!pip3 install torch_geometric\n",
        "!pip3 install scikit-learn\n",
        "!pip3 install transformers\n",
        "!pip3 install termcolor\n",
        "!pip3 install openmesh\n",
        "!pip3 install flash-attn==2.5.9.post1\n",
        "!pip3 install torch_scatter torch_sparse torch_cluster -f https://data.pyg.org/whl/torch-2.3.0+cu121.html\n",
        "!pip3 install vtk"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "## 下载Google 最优权重"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "# https://drive.google.com/file/d/1A3s3C6jz7wK5f10DSEbolts7NJ-C0h2q/view?usp=drive_link\n",
        "# https://drive.google.com/file/d/1FiKR0VihTInMp1DLxUpXF8zOssaVDV_c/view?usp=drive_link\n",
        "\n",
        "# https://drive.google.com/file/d/10RFdTjC5vx8zcv7P9VOLunPTflwdLFmY/view?usp=drive_link\n",
        "# 下载速度最优权重\n",
        "!gdown https://drive.google.com/uc?id=1A3s3C6jz7wK5f10DSEbolts7NJ-C0h2q\n",
        "# 下载cd最优权重\n",
        "!gdown https://drive.google.com/uc?id=1FiKR0VihTInMp1DLxUpXF8zOssaVDV_c\n",
        "# 下载cd标签\n",
        "!gdown https://drive.google.com/uc?id=10RFdTjC5vx8zcv7P9VOLunPTflwdLFmY"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "xgKMibfgYehJ"
      },
      "source": [
        "# 代码运行与数据读取样例\n",
        "（所有训练和推理过程需要在本部分完成）"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "RtWKnq1LYlbo"
      },
      "source": [
        "## Dataset1"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "### 压力模型训练"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "#### 1加载数据"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "import os\n",
        "import numpy as np\n",
        "import pandas as pd\n",
        "import random\n",
        "from tqdm import tqdm\n",
        "\n",
        "import torch_geometric.transforms as T\n",
        "from torch_geometric import seed_everything\n",
        "from torch_geometric.loader import DataLoader\n",
        "from torch_geometric.io import read_ply\n",
        "from torch_geometric.nn import MessagePassing, radius_graph, Linear\n",
        "from torch_geometric.nn import global_max_pool\n",
        "from torch_geometric.nn import PointNetConv, PointTransformerConv\n",
        "\n",
        "import torch\n",
        "from torch.cuda import amp\n",
        "from torch import Tensor\n",
        "from torch.nn import Sequential, ReLU\n",
        "from torch import nn\n",
        "\n",
        "from transformers import BertModel, BertConfig\n",
        "\n",
        "root = ''\n",
        "seed_everything(2024)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "train_ids = np.loadtxt(f\"Training/Dataset_1/Feature_File/watertight_meshes.txt\", dtype=str)\n",
        "valid_index = np.array([ 15,  21,  44,  71,  80,  85,  98, 111, 120, 126, 133, 143, 147,\n",
        "                        149, 154, 161, 164, 168, 169, 198, 222, 223, 231, 238, 251, 258,\n",
        "                        265, 273, 280, 285, 286, 299, 320, 327, 344, 345, 354, 380, 390,\n",
        "                        407, 408, 420, 427, 440, 456, 460, 479, 484, 495, 496])\n",
        "train_index = np.setdiff1d(np.arange(len(train_ids)), valid_index)\n",
        "valid_ids = train_ids[valid_index]\n",
        "train_ids = train_ids[train_index]\n",
        "test_ids = os.listdir(f\"Test/Dataset_1/Feature_File\")\n",
        "test_ids = sorted([i[i.find('_')+1:i.find('.')] for i in test_ids if 'mesh_' in i])"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "with open(f'Training/Dataset_1/Feature_File/train_pressure_min_std.txt') as fp:\n",
        "    mean = fp.readline().split(\" \")\n",
        "    std = fp.readline().split(\" \")\n",
        "    mean = float(mean[0])\n",
        "    std = float(std[0])\n",
        "\n",
        "def read_ply_as_data(ids, training=True):\n",
        "    data_list = []\n",
        "    for file_id in tqdm(ids):\n",
        "        press = None\n",
        "        if training:\n",
        "            ply_path = f\"Training/Dataset_1/Feature_File/mesh_{file_id}.ply\"\n",
        "            press_path = f\"Training/Dataset_1/Label_File/press_{file_id}.npy\"\n",
        "            press = np.load(press_path).reshape(-1)\n",
        "            press = np.concatenate((press[0:16], press[112:]), axis=0).reshape(-1)\n",
        "            press = (press - mean) / std\n",
        "            press = torch.tensor(press).to(torch.float)\n",
        "        else:\n",
        "            ply_path = f\"Test/Dataset_1/Feature_File/mesh_{file_id}.ply\"\n",
        "\n",
        "        data = read_ply(ply_path)\n",
        "        data.y = press\n",
        "        data = T.FaceToEdge()(data)\n",
        "        data = T.Distance()(data)\n",
        "        data = T.Center()(data)\n",
        "        data_list.append(data)\n",
        "    return data_list"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "train_data = read_ply_as_data(train_ids)\n",
        "valid_data = read_ply_as_data(valid_ids)\n",
        "test_data = read_ply_as_data(test_ids, training=False)\n",
        "num_points = train_data[0].pos.size(0)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "#### 定义模型"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "class LpLoss(nn.Module):\n",
        "    def __init__(self, dim=None):\n",
        "        super().__init__()\n",
        "        self.dim = dim\n",
        "\n",
        "    def forward(self, x, y):\n",
        "        a = torch.norm(x - y, p=2, dim=self.dim)\n",
        "        b = torch.norm(y, p=2, dim=self.dim)\n",
        "        return (a / b).mean()\n",
        "\n",
        "class Bert(nn.Module):\n",
        "    def __init__(self):\n",
        "        super().__init__()\n",
        "        self.config = BertConfig(hidden_dropout_prob=0.,\n",
        "                                attention_probs_dropout_prob=0.,\n",
        "                                hidden_size=512,\n",
        "                                num_attention_heads=4,\n",
        "                                num_hidden_layers=12,\n",
        "                                max_position_embeddings=1,\n",
        "                                intermediate_size=2024,\n",
        "                                vocab_size=1)\n",
        "        self.bert = BertModel(self.config)\n",
        "\n",
        "    def forward(self, x):\n",
        "        x = x.view(-1, num_points,  self.config.hidden_size)\n",
        "        x = self.bert(inputs_embeds=x)[0]\n",
        "        x = x.view(-1, self.config.hidden_size)\n",
        "        return x\n",
        "\n",
        "class PointNetLayer(MessagePassing):\n",
        "    def __init__(self, in_channels: int, out_channels: int):\n",
        "        super().__init__(aggr='max')\n",
        "        self.mlp = Linear(in_channels, out_channels)\n",
        "\n",
        "    def forward(self,\n",
        "        x: Tensor,\n",
        "        pos: Tensor,\n",
        "        edge_index: Tensor,\n",
        "        edge_attr: Tensor,\n",
        "    ) -> Tensor:\n",
        "        # Start propagating messages.\n",
        "        h = self.propagate(edge_index, x=x, pos=pos, edge_attr=edge_attr)\n",
        "        x = self.mlp(h) # here\n",
        "        return x\n",
        "\n",
        "    def message(self,\n",
        "        x_i: Tensor,\n",
        "        x_j: Tensor,\n",
        "        pos_j: Tensor,\n",
        "        pos_i: Tensor,\n",
        "        edge_attr: Tensor,\n",
        "    ) -> Tensor:\n",
        "        edge_feat = torch.cat([x_j, pos_j - pos_i, x_i, pos_j, edge_attr], dim=-1)\n",
        "        return edge_feat\n",
        "\n",
        "class GNNBert(torch.nn.Module):\n",
        "    def __init__(self, num_layers=16):\n",
        "        super().__init__()\n",
        "\n",
        "        hidden = 128\n",
        "        convs = torch.nn.ModuleList()\n",
        "        self.mlp_in = Linear(3, 512)\n",
        "\n",
        "        for i in range(num_layers):\n",
        "            conv = PointNetLayer(-1, hidden)\n",
        "            convs.append(conv)\n",
        "        self.convs = convs\n",
        "        self.mlp_out = Linear(hidden, 512)\n",
        "        self.classifier = Linear(-1, 1)\n",
        "        self.bert = Bert()\n",
        "\n",
        "    def forward(self, data):\n",
        "        pos = data.pos\n",
        "        edge_index = data.edge_index\n",
        "        edge_attr = data.edge_attr\n",
        "        x = self.mlp_in(pos)\n",
        "        h = pos\n",
        "        for conv in self.convs:\n",
        "            h = conv(x=h, pos=pos, edge_index=edge_index, edge_attr=edge_attr).relu()\n",
        "        out1 = 0.4*self.classifier(self.mlp_out(h))\n",
        "        out2 = 0.6*self.classifier(self.bert(x))\n",
        "        return (out1 + out2).squeeze()"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "#### 模型训练"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "# do_train = True if not os.path.exists('model.pt') else False"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "do_train = True\n",
        "device = torch.device('cuda')\n",
        "model = GNNBert().to(device)\n",
        "\n",
        "if do_train:\n",
        "    train_loader = DataLoader(train_data, batch_size=1, shuffle=True)\n",
        "    valid_loader = DataLoader(valid_data, batch_size=64, shuffle=False)\n",
        "\n",
        "    optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)\n",
        "\n",
        "    evaluator = LpLoss(dim=1)\n",
        "    loss_fn = torch.nn.MSELoss()\n",
        "    scaler = amp.GradScaler()\n",
        "    best = 1e6\n",
        "    epochs = 200\n",
        "\n",
        "    for epoch in tqdm(range(epochs)):\n",
        "        model.train()\n",
        "        epoch_loss = 0\n",
        "        for data in train_loader:\n",
        "            optimizer.zero_grad()\n",
        "            data = data.to(device)\n",
        "            with amp.autocast():\n",
        "                out = model(data)\n",
        "                loss = loss_fn(out, data.y)\n",
        "            scaler.scale(loss).backward()\n",
        "            scaler.step(optimizer)\n",
        "            scaler.update()\n",
        "            epoch_loss += loss\n",
        "        lr = optimizer.state_dict()['param_groups'][0]['lr']\n",
        "\n",
        "        model.eval()\n",
        "        score = []\n",
        "        with torch.no_grad():\n",
        "            val_loss = 0\n",
        "            for data in valid_loader:\n",
        "                data = data.to(device)\n",
        "                with amp.autocast():\n",
        "                    out = model(data)\n",
        "                    val_loss += loss_fn(out, data.y)\n",
        "                pred = out * std + mean\n",
        "                label = data.y * std + mean\n",
        "                pred = pred.view(-1, num_points)\n",
        "                label = label.view(-1, num_points)\n",
        "                score.append(evaluator(pred, label).item())\n",
        "        score = np.mean(score)\n",
        "\n",
        "        if score < best:\n",
        "            best = score\n",
        "            torch.save(model.state_dict(), f'model_pre.pt')\n",
        "\n",
        "        print(f\"Valid score: {score:.5f} - lr {lr} - Best: {best:.5f}\")\n",
        "else:\n",
        "    print('Loading model from checkpoints...')\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "#### 模型推理"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "submit_path = f'submission'\n",
        "os.makedirs(submit_path, exist_ok=True)\n",
        "\n",
        "test_loader = DataLoader(test_data, batch_size=1, shuffle=False)\n",
        "model.load_state_dict(torch.load(f'model_pre.pt'))\n",
        "model.eval()\n",
        "with torch.no_grad():\n",
        "    for idx, data in enumerate(test_loader):\n",
        "        data = data.to(device)\n",
        "        with amp.autocast():\n",
        "            out = model(data)\n",
        "        out = out.float().view(-1).cpu().numpy() * std + mean\n",
        "        file_id = test_ids[idx]\n",
        "        np.save(f'{submit_path}/press_{file_id}.npy', out)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "### ---速度训练----"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "#### 数据加载器vel & cd"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "\"\"\"\n",
        "code: 2024.8.8\n",
        "\"\"\"\n",
        "import os\n",
        "import numpy as np\n",
        "from tqdm import tqdm\n",
        "import torch\n",
        "# import torch_geometric.transforms as T\n",
        "import torch_geometric.transforms as T\n",
        "from torch_geometric import seed_everything\n",
        "from torch_geometric.loader import DataLoader\n",
        "from torch_geometric.io import read_ply, read_obj\n",
        "from torch_geometric.data import Data\n",
        "from torch_geometric.nn import MessagePassing, radius_graph, Linear\n",
        "from torch_geometric.nn import global_max_pool\n",
        "from torch_geometric.nn import PointNetConv, PointTransformerConv\n",
        "import itertools\n",
        "import pandas as pd\n",
        "\n",
        "import vtk\n",
        "from vtk.util.numpy_support import vtk_to_numpy\n",
        "from pathlib import Path\n",
        "\n",
        "\n",
        "def normals(polydata):\n",
        "    normals_filter = vtk.vtkPolyDataNormals()\n",
        "    normals_filter.SetInputData(polydata)\n",
        "    normals_filter.ComputeCellNormalsOn()\n",
        "    normals_filter.ConsistencyOn()\n",
        "    normals_filter.FlipNormalsOn()\n",
        "    normals_filter.AutoOrientNormalsOn()\n",
        "    normals_filter.Update()\n",
        "    numpy_cell_normals = vtk_to_numpy(normals_filter.GetOutput().GetCellData().GetNormals()).astype(np.float32)\n",
        "    return numpy_cell_normals\n",
        "\n",
        "def areas(polydata):\n",
        "    cell_size_filter = vtk.vtkCellSizeFilter()\n",
        "    cell_size_filter.SetInputData(polydata)\n",
        "    cell_size_filter.ComputeAreaOn()\n",
        "    cell_size_filter.Update()\n",
        "    numpy_cell_areas = vtk_to_numpy(cell_size_filter.GetOutput().GetCellData().GetArray(\"Area\")).astype(np.float32)\n",
        "    return numpy_cell_areas\n",
        "\n",
        "def centoirds(polydata):\n",
        "    cell_centers = vtk.vtkCellCenters()\n",
        "    cell_centers.SetInputData(polydata)\n",
        "    cell_centers.Update()\n",
        "    numpy_cell_centers = vtk_to_numpy(cell_centers.GetOutput().GetPoints().GetData()).astype(np.float32)\n",
        "    return numpy_cell_centers\n",
        "\n",
        "\n",
        "# def read_obj(file_path):\n",
        "#     reader = vtk.vtkOBJReader()\n",
        "#     reader.SetFileName(file_path)\n",
        "#     reader.Update()\n",
        "#     polydata = reader.GetOutput()\n",
        "#     return reader, polydata\n",
        "\n",
        "# --------------velocity部分--------------------- #\n",
        "\n",
        "def read_dir(test=False):\n",
        "    vel_path = []\n",
        "    mesh_path = []\n",
        "    if test:\n",
        "        paths = os.listdir(\"Test/Dataset_1/Feature_File/\")\n",
        "        for p in paths:\n",
        "            p = os.path.join(\"Test/Dataset_1/Feature_File/\", p)\n",
        "            p = Path(p)\n",
        "            if p.suffix == '.ply':\n",
        "                mesh_path.append(p)\n",
        "            elif p.suffix == '.vtk':\n",
        "                vel_path.append(p)\n",
        "        return vel_path, mesh_path\n",
        "    else:\n",
        "        paths = os.listdir(\"Training/Dataset_1/Feature_File/\")\n",
        "        # paths = Path(paths)\n",
        "        for p in paths:\n",
        "            p = os.path.join(\"Training/Dataset_1/Feature_File/\", p)\n",
        "            p = Path(p)\n",
        "            if p.suffix == '.ply':\n",
        "                mesh_path.append(p)\n",
        "            elif p.suffix == '.vtk':\n",
        "                vel_path.append(p)\n",
        "        # 返回速度路径的列表\n",
        "        return vel_path, mesh_path\n",
        "        \n",
        "\n",
        "# 读取vtk数据\n",
        "def read_vtk(file_path):\n",
        "    reader = vtk.vtkUnstructuredGridReader()\n",
        "    reader.SetFileName(file_path)\n",
        "    reader.Update()\n",
        "    polydata = reader.GetOutput()\n",
        "    return reader, polydata\n",
        "\n",
        "\n",
        "def get_edges(unstructured_grid_data, points, cell_size=4):\n",
        "    edge_indeces = set()\n",
        "    cells = vtk_to_numpy(unstructured_grid_data.GetCells().GetData()).reshape(-1, cell_size + 1)\n",
        "    for i in range(len(cells)):\n",
        "        for j, k in itertools.product(range(1, cell_size + 1), repeat=2):\n",
        "            edge_indeces.add((cells[i][j], cells[i][k]))\n",
        "            edge_indeces.add((cells[i][k], cells[i][j]))\n",
        "    edges = [[], []]\n",
        "    for u, v in edge_indeces:\n",
        "        edges[0].append(tuple(points[u]))\n",
        "        edges[1].append(tuple(points[v]))\n",
        "    return edges\n",
        "\n",
        "def get_edge_index(pos, edges_velo):\n",
        "    indices = {tuple(pos[i]): i for i in range(len(pos))}\n",
        "    edges = set()\n",
        "    for i in range(len(edges_velo[0])):\n",
        "        edges.add((indices[edges_velo[0][i]], indices[edges_velo[1][i]]))\n",
        "    edge_index = np.array(list(edges)).T\n",
        "    return edge_index\n",
        "\n",
        "def nodes(polydata):\n",
        "    # points = vtk_to_numpy(polydata.GetPoints().GetData()).astype(np.float32)\n",
        "    points = vtk_to_numpy(polydata.GetPoints().GetData()).astype(np.float64)\n",
        "    return points\n",
        "\n",
        "def velocity(polydata):\n",
        "    vel = vtk_to_numpy(polydata.GetPointData().GetArray(\"point_vectors\")).astype(np.float32)\n",
        "    return vel\n",
        "\n",
        "\n",
        "def load_vel_std_mean():\n",
        "    # 读取vx vy vz 三个方向上的速度均值与方差\n",
        "    return [(0.002925068091990397, 1.3398395669460297), (-0.026870363617985275, 1.2418244477510452), (17.013628314971925, 6.445426697731018)]\n",
        "    \n",
        "\n",
        "\n",
        "def read_vtk_as_data(vel_path, training=True, preprocess=False, save=True):\n",
        "    (x_mean,x_std), (y_mean,y_std), (z_mean,z_std) = load_vel_std_mean()\n",
        "    data_list = []\n",
        "    # 读取速度路径\n",
        "    for vpath in tqdm(vel_path):\n",
        "        # print(vpath)\n",
        "        velo = None\n",
        "        num_ = str(vpath).split('.vtk')[0][-3:]\n",
        "        if preprocess:\n",
        "            vertices = np.load(f\"preData/{num_}/vertices_{num_}.npy\")\n",
        "            edge_attr = np.load(f\"preData/{num_}/edge_attr_{num_}.npy\")\n",
        "            edge_index = np.load(f\"preData/{num_}/edge_index_{num_}.npy\")\n",
        "            if training:\n",
        "                velo = np.load(f\"preData/{num_}/velo_{num_}.npy\")\n",
        "                velo = torch.tensor(velo).to(torch.float)\n",
        "        else:    \n",
        "            _, polydata = read_vtk(vpath)\n",
        "            if training:\n",
        "                # _, polydata = read_vtk(vel)\n",
        "                velo = velocity(polydata)\n",
        "                # 对速度进行预处理\n",
        "                velo[:,0] = (velo[:,0] - x_mean )/ x_std\n",
        "                velo[:,1] = (velo[:,1] - y_mean )/ y_std\n",
        "                velo[:,2] = (velo[:,2] - z_mean )/ z_std\n",
        "                # press = (press - mean) / std\n",
        "                velo = torch.tensor(velo).to(torch.float)\n",
        "                \n",
        "\n",
        "            vertices = nodes(polydata)\n",
        "            edge_attr = get_edges(polydata, vertices, cell_size=8)\n",
        "            edge_index = get_edge_index(vertices, edge_attr)\n",
        "            if save:\n",
        "                # 创建一个文件夹\n",
        "                os.system(f'mkdir -p preData/{num_}')\n",
        "                np.save(f\"preData/{num_}/vertices_{num_}.npy\",vertices)\n",
        "                np.save(f\"preData/{num_}/edge_attr_{num_}.npy\", edge_attr)\n",
        "                np.save(f\"preData/{num_}/edge_index_{num_}.npy\", edge_index)\n",
        "            if save and training:\n",
        "                np.save(f\"preData/{num_}/velo_{num_}.npy\", velo)\n",
        "                \n",
        "                \n",
        "\n",
        "        vertices = torch.tensor(vertices).to(torch.float)\n",
        "        edge_attr = torch.tensor(edge_attr).to(torch.float)\n",
        "        edge_index = torch.tensor(edge_index).to(torch.long)\n",
        "\n",
        "        data = Data(pos=vertices, y=velo, edge_index=edge_index, edge_attr=edge_attr[0])\n",
        "        data_list.append(data)\n",
        "\n",
        "    return data_list\n",
        "\n",
        "\n",
        "\n",
        "# 根据后缀组成不同的数据\n",
        "def read(file_path):\n",
        "    # 读取obj数据集\n",
        "    if file_path.suffix == \".obj\":\n",
        "        _, polydata = read_obj(file_path)\n",
        "        data_dict = {\n",
        "            \"centroids\":    centoirds(polydata),\n",
        "            \"vertices\":     nodes(polydata),\n",
        "            \"areas\":        areas(polydata),\n",
        "            \"normal\":       normals(polydata),\n",
        "            # \"sdf\":          load_sdf(),  去看百度的\n",
        "            # \"sdf_query_points\":          load_sdf_queries(),\n",
        "        }\n",
        "\n",
        "    elif file_path.suffix == \".vtk\":\n",
        "        _, polydata = read_vtk(file_path)\n",
        "        data_dict = {\n",
        "            \"vertices\":     nodes(polydata),\n",
        "            \"velocity\":     velocity(polydata),\n",
        "            # \"sdf\":          load_sdf(),\n",
        "            # \"sdf_query_points\":          load_sdf_queries(),\n",
        "        }\n",
        "    else:\n",
        "        raise NotImplemented\n",
        "    \n",
        "    return data_dict\n",
        "\n",
        "\n",
        "def read_test(file_path):\n",
        "    # 读取obj数据集\n",
        "    if file_path.suffix == \".obj\":\n",
        "        _, polydata = read_obj(file_path)\n",
        "        data_dict = {\n",
        "            # \"centroids\":    centoirds(polydata),\n",
        "            \"vertices\":     nodes(polydata),\n",
        "            # \"areas\":        areas(polydata),\n",
        "            # \"normal\":       normals(polydata),\n",
        "            # \"sdf\":          load_sdf(),  去看百度的\n",
        "            # \"sdf_query_points\":          load_sdf_queries(),\n",
        "        }\n",
        "    # 读取速度的数据集\n",
        "    elif file_path.suffix == \".vtk\":\n",
        "        _, polydata = read_vtk(file_path)\n",
        "        data_dict = {\n",
        "            \"vertices\":     nodes(polydata),\n",
        "            # \"velocity\":     velocity(polydata), # 测试集没有速度，需要自己预测\n",
        "            # \"sdf\":          load_sdf(),\n",
        "            # \"sdf_query_points\":          load_sdf_queries(),\n",
        "        }\n",
        "    else:\n",
        "        raise NotImplemented\n",
        "    \n",
        "    return data_dict\n",
        "\n",
        "\n",
        "# --------------------用于训练obj文件，Cd-----------------------\n",
        "\n",
        "def cal_cd_mean_std():\n",
        "    # 计算obj数据集的方差和均值 0-500为训练 500-550是需要预测的\n",
        "    labels = pd.read_csv(r'dataset2_train_label.csv')\n",
        "    mean = labels['Cd'].mean()\n",
        "    std = labels['Cd'].std()\n",
        "    return mean, std\n",
        "    \n",
        "def read_obj_as_data(files, training=True):\n",
        "    # 把（文件名称，cd）存储在一起\n",
        "    mean, std = cal_cd_mean_std()\n",
        "    data_list = []\n",
        "\n",
        "    # TODO 减少采样点数\n",
        "    \n",
        "    transform = T.SamplePoints(63094,False)\n",
        "    k=0\n",
        "    for file in tqdm(files):\n",
        "        # print(file)\n",
        "        cd = None\n",
        "        data1 = Data()\n",
        "        if training:\n",
        "            obj_path = f'Training/Dataset_2/Feature_File/{file[0]}.obj'\n",
        "            cd = file[1]\n",
        "            # 对标签进行预处理\n",
        "            cd = (cd - mean) / std\n",
        "            cd = torch.tensor(cd).to(torch.float)\n",
        "        else:\n",
        "            # 如果是测试集直接传入 file名称即可\n",
        "            obj_path = f'Test/Dataset_2/Feature_File/{file}.obj'\n",
        "        \n",
        "        # TODO: 需要对数据进行扩增，使数据对其\n",
        "        # 对数据进行采样\n",
        "        data = read_obj(obj_path)\n",
        "        if data.pos.size(0) > 63094: \n",
        "            continue\n",
        "        k = k+1\n",
        "        data = transform(data)\n",
        "        \n",
        "        data.y = cd\n",
        "        \n",
        "        data = T.FaceToEdge()(data)\n",
        "        \n",
        "        data = T.Distance()(data)\n",
        "        data = T.Center()(data)\n",
        "        # print(data)\n",
        "\n",
        "        data_list.append(data)\n",
        "\n",
        "    print(f\"Haved Loaded {k} Sample\")\n",
        "    \n",
        "    return data_list\n",
        "        \n",
        "def load_files(train_ratio=0.9):\n",
        "    df = pd.read_csv(r'dataset2_train_label.csv')\n",
        "    df_train = df.head(500)\n",
        "    # 剔除三个脏数据\n",
        "    df_train = df_train[df_train['file'] != '1328a95d69cefe32f200a72c9245aee7_aug']\n",
        "    df_train = df_train[df_train['file'] != '22d477830b1bbbded536c1ebda275556'] \n",
        "    df_train = df_train[df_train['file'] != '17ac544cdfbf74b999c8924280047dd9'] \n",
        "    \n",
        "    train_files = []\n",
        "    val_files = []\n",
        "    # 训练集和验证集\n",
        "    for i in range(int(train_ratio * len(df_train))):\n",
        "        # 分别存储file_name 和 cd\n",
        "        train_files.append((df_train.iloc[i,1], df_train.iloc[i,2]))\n",
        "        \n",
        "    for i in range(int(train_ratio * len(df_train)), len(df_train)):\n",
        "        val_files.append((df_train.iloc[i,1], df_train.iloc[i,2]))\n",
        "\n",
        "    # 读取测试集\n",
        "    test_files = []\n",
        "    df_test = df.iloc[500:550]\n",
        "    for j in range(len(df_test)):\n",
        "        # 读取文件名\n",
        "        test_files.append(df_test.iloc[j,1])\n",
        "    # print(test_files)\n",
        "    return train_files, val_files, test_files\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "#### 导入模型"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "\n",
        "from torch_geometric.nn import MessagePassing, radius_graph, Linear\n",
        "import torch\n",
        "from torch.cuda import amp\n",
        "from torch import Tensor\n",
        "from torch.nn import Sequential, ReLU\n",
        "from torch import nn\n",
        "\n",
        "from transformers import BertModel, BertConfig\n",
        "\n",
        "num_points = 29498\n",
        "\n",
        "class LpLoss(nn.Module):\n",
        "    def __init__(self, dim=None):\n",
        "        super().__init__()\n",
        "        self.dim = dim\n",
        "\n",
        "    def forward(self, x, y):\n",
        "        a = torch.norm(x - y, p=2, dim=self.dim)\n",
        "        b = torch.norm(y, p=2, dim=self.dim)\n",
        "        return (a / b).mean()\n",
        "\n",
        "\n",
        "class Bert(nn.Module):\n",
        "    def __init__(self):\n",
        "        super().__init__()\n",
        "        self.config = BertConfig(hidden_dropout_prob=0.,\n",
        "                                attention_probs_dropout_prob=0.,\n",
        "                                hidden_size=512,\n",
        "                                num_attention_heads=4,\n",
        "                                num_hidden_layers=12,\n",
        "                                max_position_embeddings=1,\n",
        "                                intermediate_size=2024,\n",
        "                                vocab_size=1)\n",
        "        self.bert = BertModel(self.config)\n",
        "\n",
        "    def forward(self, x):\n",
        "        x = x.view(-1, num_points,  self.config.hidden_size)\n",
        "        x = self.bert(inputs_embeds=x)[0]\n",
        "        x = x.view(-1, self.config.hidden_size)\n",
        "        return x\n",
        "\n",
        "class PointNetLayer(MessagePassing):\n",
        "    def __init__(self, in_channels: int, out_channels: int):\n",
        "        super().__init__(aggr='max')\n",
        "        self.mlp = Linear(in_channels, out_channels)\n",
        "\n",
        "    def forward(self,\n",
        "        x: Tensor,\n",
        "        pos: Tensor,\n",
        "        edge_index: Tensor,\n",
        "        edge_attr: Tensor,\n",
        "    ) -> Tensor:\n",
        "        # Start propagating messages.\n",
        "        h = self.propagate(edge_index, x=x, pos=pos, edge_attr=edge_attr)\n",
        "        x = self.mlp(h) # here\n",
        "        return x\n",
        "\n",
        "    def message(self,\n",
        "        x_i: Tensor,\n",
        "        x_j: Tensor,\n",
        "        pos_j: Tensor,\n",
        "        pos_i: Tensor,\n",
        "        edge_attr: Tensor,\n",
        "    ) -> Tensor:\n",
        "        edge_feat = torch.cat([x_j, pos_j - pos_i, x_i, pos_j, edge_attr], dim=-1)\n",
        "        return edge_feat\n",
        "\n",
        "class GNNBert(torch.nn.Module):\n",
        "    def __init__(self, num_layers=16):\n",
        "        super().__init__()\n",
        "\n",
        "        hidden = 128\n",
        "        convs = torch.nn.ModuleList()\n",
        "        self.mlp_in = Linear(3, 512)\n",
        "\n",
        "        for i in range(num_layers):\n",
        "            conv = PointNetLayer(-1, hidden)\n",
        "            convs.append(conv)\n",
        "        self.convs = convs\n",
        "        self.mlp_out = Linear(hidden, 512)\n",
        "        self.classifier = Linear(-1, 3)\n",
        "        self.bert = Bert()\n",
        "\n",
        "    def forward(self, data):\n",
        "        pos = data.pos\n",
        "        edge_index = data.edge_index\n",
        "        edge_attr = data.edge_attr\n",
        "        x = self.mlp_in(pos)\n",
        "        h = pos\n",
        "\n",
        "        for conv in self.convs:\n",
        "            h = conv(x=h, pos=pos, edge_index=edge_index, edge_attr=edge_attr).relu()\n",
        "        out1 = 0.4*self.classifier(self.mlp_out(h))\n",
        "        out2 = 0.6*self.classifier(self.bert(x))\n",
        "        return (out1 + out2).squeeze()"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "#### 训练模型"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "import os\n",
        "import numpy as np\n",
        "import pandas as pd\n",
        "import random\n",
        "from tqdm import tqdm\n",
        "\n",
        "import torch_geometric.transforms as T\n",
        "from torch_geometric import seed_everything\n",
        "from torch_geometric.loader import DataLoader\n",
        "\n",
        "import torch\n",
        "from torch.cuda import amp\n",
        "from torch import Tensor\n",
        "from torch.nn import Sequential, ReLU\n",
        "from torch import nn\n",
        "\n",
        "from transformers import BertModel, BertConfig\n",
        "\n",
        "\n",
        "\n",
        "# 读取均值和方差\n",
        "(x_mean,x_std), (y_mean,y_std), (z_mean,z_std) = load_vel_std_mean()\n",
        "\n",
        "# ----------------------Train-----------------------\n",
        "seed_everything(2024)\n",
        "\n",
        "vel_path, _ = read_dir()\n",
        "train_path = vel_path[:450]\n",
        "valid_path = vel_path[450:]\n",
        "\n",
        "train_data = read_vtk_as_data(train_path, preprocess=False, save=False)\n",
        "valid_data = read_vtk_as_data(valid_path, preprocess=False, save=False)\n",
        "\n",
        "\n",
        "num_points = 29498\n",
        "# do_train = True if not os.path.exists('model.pt') else False\n",
        "do_train = True\n",
        "device = torch.device('cuda')\n",
        "model = GNNBert().to(device)\n",
        "\n",
        "# params = sum(p.numel() for p in model.parameters() if p.requires_grad)\n",
        "# print(f\"---模型的参数为：{params} ---\")\n",
        "\n",
        "# if do_train:\n",
        "train_loader = DataLoader(train_data, batch_size=1, shuffle=True)\n",
        "valid_loader = DataLoader(valid_data, batch_size=1, shuffle=False)\n",
        "print(\"=========loading data finished===========\")\n",
        "optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)\n",
        "\n",
        "\n",
        "evaluator = LpLoss(dim=1)\n",
        "loss_fn = torch.nn.MSELoss()\n",
        "scaler = amp.GradScaler()\n",
        "best = 1e6\n",
        "epochs = 200\n",
        "\n",
        "for epoch in range(epochs):\n",
        "    model.train()\n",
        "    epoch_loss = 0\n",
        "    for data in tqdm(train_loader):\n",
        "        # num_points = data.pos.size(0)\n",
        "        optimizer.zero_grad()\n",
        "        data = data.to(device)\n",
        "        with amp.autocast():\n",
        "            out = model(data)\n",
        "            # out[:,0] = out[:,0] * x_std + x_mean\n",
        "            # out[:,1] = out[:,1] * y_std + y_mean \n",
        "            # out[:,2] = out[:,2] * z_std + z_mean \n",
        "            loss = loss_fn(out, data.y)\n",
        "        scaler.scale(loss).backward()\n",
        "        scaler.step(optimizer)\n",
        "        scaler.update()\n",
        "        # scheduler.step()\n",
        "        epoch_loss += loss\n",
        "    train_loss = epoch_loss/len(train_loader)\n",
        "    lr = optimizer.state_dict()['param_groups'][0]['lr']\n",
        "\n",
        "    model.eval()\n",
        "    score = []\n",
        "    with torch.no_grad():\n",
        "        val_loss = 0\n",
        "        for data in valid_loader:\n",
        "            data = data.to(device)\n",
        "            # num_points = data.pos.size(0)\n",
        "            with amp.autocast():\n",
        "                out = model(data)\n",
        "                val_loss += loss_fn(out, data.y)\n",
        "                out[:,0] = out[:,0] * x_std + x_mean\n",
        "                out[:,1] = out[:,1] * y_std + y_mean \n",
        "                out[:,2] = out[:,2] * z_std + z_mean \n",
        "            pred = out\n",
        "            data.y[:, 0] = data.y[: ,0] * x_std + x_mean\n",
        "            data.y[:, 1] = data.y[: ,1] * y_std + y_mean\n",
        "            data.y[:, 2] = data.y[: ,2] * z_std + z_mean\n",
        "            label = data.y\n",
        "            pred = pred.view(-1, num_points).T\n",
        "            label = label.view(-1, num_points).T\n",
        "            # print(\"<<<<\", np.shape(out), np.shape(data.y), \">>>>>\")\n",
        "            # print(pred.shape, label.shape)\n",
        "            score.append(evaluator(pred, label).item())\n",
        "    score = np.mean(score)\n",
        "\n",
        "    if score < best:\n",
        "        best = score\n",
        "        torch.save(model.state_dict(), f'model_vel.pt')\n",
        "    print(f\"[{epoch}/{epochs}]: Train loss: {train_loss:5f} Valid score: {score:.5f} - lr {lr} - Best: {best:.5f}\")\n",
        "\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "\n",
        "import torch_geometric.transforms as T\n",
        "from torch_geometric import seed_everything\n",
        "from torch_geometric.loader import DataLoader\n",
        "\n",
        "import torch\n",
        "from torch.cuda import amp\n",
        "from torch import Tensor\n",
        "from torch.nn import Sequential, ReLU\n",
        "from torch import nn\n",
        "\n",
        "from transformers import BertModel, BertConfig\n",
        "\n",
        "\n",
        "vel_path, _ = read_dir(test=True)\n",
        "test_data = read_vtk_as_data(vel_path, preprocess=False, training=False,save=False)\n",
        "\n",
        "(x_mean,x_std), (y_mean,y_std), (z_mean,z_std) = load_vel_std_mean()\n",
        "\n",
        "\n",
        "num_points = 29498\n",
        "device = torch.device('cuda')\n",
        "model = GNNBert().to(device)\n",
        "\n",
        "submit_path = f'submission'\n",
        "os.makedirs(submit_path, exist_ok=True)\n",
        "\n",
        "test_loader = DataLoader(test_data, batch_size=1, shuffle=False)\n",
        "model.load_state_dict(torch.load(f'model_vel.pt'))\n",
        "model.eval()\n",
        "with torch.no_grad():\n",
        "    for idx, data in tqdm(enumerate(test_loader)):\n",
        "        data = data.to(device)\n",
        "        with amp.autocast():\n",
        "            out = model(data)\n",
        "            out[:,0] = out[:,0] * x_std + x_mean\n",
        "            out[:,1] = out[:,1] * y_std + y_mean \n",
        "            out[:,2] = out[:,2] * z_std + z_mean \n",
        "            # pred = pred.view(-1, num_points)\n",
        "        out = out.float().view(num_points,-1).cpu().numpy()\n",
        "        print(out.shape)\n",
        "        file_id = str(vel_path[idx]).split('.vtk')[0][-3:]\n",
        "        np.save(f'{submit_path}/vel_{file_id}.npy', out)\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "y9uq00gkYq1B"
      },
      "source": [
        "## Dataset2"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 11,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "NjbtsOF5YumS",
        "outputId": "4952429c-6858-481e-df56-be9a0469cd14"
      },
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "TriangleMesh with 29230 points and 58896 triangles.\n"
          ]
        }
      ],
      "source": [
        "# Bert 模型\n",
        "import torch_geometric.transforms as T\n",
        "from torch_geometric import seed_everything\n",
        "from torch_geometric.loader import DataLoader\n",
        "from torch_geometric.io import read_ply\n",
        "from torch_geometric.nn import MessagePassing, radius_graph, Linear\n",
        "from torch_geometric.nn import global_max_pool\n",
        "from torch_geometric.nn import PointNetConv, PointTransformerConv\n",
        "\n",
        "import torch\n",
        "from torch.cuda import amp\n",
        "from torch import Tensor\n",
        "from torch.nn import Sequential, ReLU\n",
        "from torch import nn\n",
        "\n",
        "from transformers import BertModel, BertConfig\n",
        "\n",
        "num_points = 63094\n",
        "\n",
        "class Bert(nn.Module):\n",
        "    def __init__(self):\n",
        "        super().__init__()\n",
        "        self.config = BertConfig(hidden_dropout_prob=0.,\n",
        "                                attention_probs_dropout_prob=0.,\n",
        "                                hidden_size=512,\n",
        "                                num_attention_heads=4,\n",
        "                                num_hidden_layers=8,\n",
        "                                max_position_embeddings=1,\n",
        "                                intermediate_size=2024,\n",
        "                                vocab_size=1)\n",
        "        self.bert = BertModel(self.config)\n",
        "\n",
        "    def forward(self, x):\n",
        "        x = x.view(-1, num_points,  self.config.hidden_size)\n",
        "        x = self.bert(inputs_embeds=x)[0]\n",
        "        x = x.view(-1, self.config.hidden_size)\n",
        "        return x\n",
        "\n",
        "class PointNetLayer(MessagePassing):\n",
        "    def __init__(self, in_channels: int, out_channels: int):\n",
        "        super().__init__(aggr='max')\n",
        "        self.mlp = Linear(in_channels, out_channels)\n",
        "\n",
        "    def forward(self,\n",
        "        x: Tensor,\n",
        "        pos: Tensor,\n",
        "        edge_index: Tensor,\n",
        "        edge_attr: Tensor,\n",
        "    ) -> Tensor:\n",
        "        # Start propagating messages.\n",
        "        h = self.propagate(edge_index, x=x, pos=pos, edge_attr=edge_attr)\n",
        "        x = self.mlp(h) # here\n",
        "        return x\n",
        "\n",
        "    def message(self,\n",
        "        x_i: Tensor,\n",
        "        x_j: Tensor,\n",
        "        pos_j: Tensor,\n",
        "        pos_i: Tensor,\n",
        "        edge_attr: Tensor,\n",
        "    ) -> Tensor:\n",
        "        edge_feat = torch.cat([x_j, pos_j - pos_i, x_i, pos_j, edge_attr], dim=-1)\n",
        "        return edge_feat\n",
        "\n",
        "class GNNBert(torch.nn.Module):\n",
        "    def __init__(self, num_layers=16):\n",
        "        super().__init__()\n",
        "\n",
        "        hidden = 128\n",
        "        convs = torch.nn.ModuleList()\n",
        "        self.mlp_in = Linear(3, 512)\n",
        "\n",
        "        for i in range(num_layers):\n",
        "            conv = PointNetLayer(-1, hidden)\n",
        "            convs.append(conv)\n",
        "        self.convs = convs\n",
        "        self.mlp_out = Linear(hidden, 512)\n",
        "        self.classifier = Linear(-1, 1)\n",
        "        self.classifier2 = Linear(63094, 1)\n",
        "        self.bert = Bert()\n",
        "\n",
        "    def forward(self, data):\n",
        "        pos = data.pos\n",
        "        edge_index = data.edge_index\n",
        "        edge_attr = data.edge_attr\n",
        "        x = self.mlp_in(pos)\n",
        "        h = pos\n",
        "\n",
        "        for conv in self.convs:\n",
        "            h = conv(x=h, pos=pos, edge_index=edge_index, edge_attr=edge_attr).relu()\n",
        "            # 加一层\n",
        "        out1 = 0.4*self.classifier2(self.classifier(self.mlp_out(h)).T)\n",
        "        out2 = 0.6*self.classifier2(self.classifier(self.bert(x)).T)\n",
        "        return (out1 + out2).squeeze()\n",
        "\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "#### 训练模型"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "import os\n",
        "import numpy as np\n",
        "import pandas as pd\n",
        "import random\n",
        "from tqdm import tqdm\n",
        "\n",
        "import torch_geometric.transforms as T\n",
        "from torch_geometric import seed_everything\n",
        "from torch_geometric.loader import DataLoader\n",
        "\n",
        "from transformers import BertModel, BertConfig\n",
        "\n",
        "\n",
        "\n",
        "# 读取cd的\n",
        "(mean, std) = cal_cd_mean_std()\n",
        "\n",
        "# ----------------------Train-----------------------\n",
        "\n",
        "seed_everything(2024)\n",
        "\n",
        "\n",
        "# 读取obj数据\n",
        "train_files, val_files, test_files = load_files()\n",
        "train_data = read_obj_as_data(train_files)\n",
        "valid_data = read_obj_as_data(val_files)\n",
        "\n",
        "num_points = 63094\n",
        "\n",
        "do_train = True\n",
        "device = torch.device('cuda')\n",
        "model = GNNBert().to(device)\n",
        "\n",
        "\n",
        "\n",
        "# if do_train:\n",
        "train_loader = DataLoader(train_data, batch_size=1, shuffle=True)\n",
        "valid_loader = DataLoader(valid_data, batch_size=1, shuffle=False)\n",
        "\n",
        "optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)\n",
        "\n",
        "\n",
        "evaluator = LpLoss(dim=0)\n",
        "loss_fn = torch.nn.MSELoss()\n",
        "scaler = amp.GradScaler()\n",
        "best = 1e6\n",
        "epochs = 10\n",
        "\n",
        "for epoch in range(epochs):\n",
        "    model.train()\n",
        "    epoch_loss = 0\n",
        "    for data in tqdm(train_loader):\n",
        "        optimizer.zero_grad()\n",
        "        data = data.to(device)\n",
        "        with amp.autocast():\n",
        "            out = model(data)        \n",
        "            loss = loss_fn(out, data.y)\n",
        "        scaler.scale(loss).backward()\n",
        "        scaler.step(optimizer)\n",
        "        scaler.update()\n",
        "        # scheduler.step()\n",
        "        epoch_loss += loss\n",
        "    train_loss = epoch_loss/len(train_loader)\n",
        "    lr = optimizer.state_dict()['param_groups'][0]['lr']\n",
        "\n",
        "    model.eval()\n",
        "    score = []\n",
        "    with torch.no_grad():\n",
        "        val_loss = 0\n",
        "        for data in valid_loader:\n",
        "            data = data.to(device)\n",
        "            with amp.autocast():\n",
        "                out = model(data) \n",
        "                val_loss += loss_fn(out, data.y)\n",
        "            pred = out * std + mean\n",
        "            # print(pred)\n",
        "            label = data.y * std + mean\n",
        "\n",
        "            score.append(evaluator(pred, label).item())\n",
        "    score = np.mean(score)\n",
        "\n",
        "    if score < best:\n",
        "        best = score\n",
        "        torch.save(model.state_dict(), f'model_cd.pt')\n",
        "    print(f\"[{epoch}/{epochs}]:Valid score: {score:.5f} - lr {lr} - Best: {best:.5f}\")\n",
        "    # print(f\"Valid score: {score:.5f} - lr {lr} - Best: {best:.5f}\")\n",
        "\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "#### 测试模型"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "import os\n",
        "import numpy as np\n",
        "import pandas as pd\n",
        "import random\n",
        "from tqdm import tqdm\n",
        "\n",
        "from transformers import BertModel, BertConfig\n",
        "import csv\n",
        "\n",
        "submit_path = f'submission'\n",
        "ff = open(f'{submit_path}/Answer.csv',mode='w')\n",
        "fc = csv.writer(ff)\n",
        "fc.writerow(['','Cd'])\n",
        "\n",
        "\n",
        "# 读取cd的\n",
        "(mean, std) = cal_cd_mean_std()\n",
        "\n",
        "train_files, val_files, test_files = load_files()\n",
        "\n",
        "test_data = read_obj_as_data(test_files, training=False)\n",
        "\n",
        "num_points = 63094\n",
        "device = torch.device('cuda')\n",
        "model = GNNBert().to(device)\n",
        "\n",
        "\n",
        "\n",
        "os.makedirs(submit_path, exist_ok=True)\n",
        "\n",
        "test_loader = DataLoader(test_data, batch_size=1, shuffle=False)\n",
        "model.load_state_dict(torch.load(f'model_cd.pt'))\n",
        "model.eval()\n",
        "with torch.no_grad():\n",
        "    for idx, data in tqdm(enumerate(test_loader)):\n",
        "        data = data.to(device)\n",
        "        with amp.autocast():\n",
        "            out = model(data)\n",
        "        out = out.float().view(-1).cpu().numpy() * std + mean\n",
        "        file_id = train_files[idx]\n",
        "        fc.writerow([idx,out[0]])\n",
        "        # np.save(f'{submit_path}/press_{file_id}.npy', out)\n",
        "\n",
        "\n",
        "ff.close()\n",
        "print(\"--------测试完成-------------\")\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "7YRUsrbjZ-pm"
      },
      "source": [
        "# 结果保存\n",
        "（结果在此模块下压缩成submission.zip，提交结果不可以通过外部链接下载）"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "zPkn9fEPlg4y"
      },
      "outputs": [],
      "source": [
        "!zip -r submission.zip submission"
      ]
    }
  ],
  "metadata": {
    "colab": {
      "collapsed_sections": [
        "Q0mY71_DZwFx",
        "YQE2mV-JZyCo"
      ],
      "provenance": []
    },
    "kernelspec": {
      "display_name": "Python 3",
      "name": "python3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 0
}
