{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Filtering max leadtime:   0%|          | 0/31 [00:00<?, ?it/s]"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Filtering max leadtime:  35%|███▌      | 11/31 [00:01<00:01, 10.73it/s]\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "Input \u001b[0;32mIn [2]\u001b[0m, in \u001b[0;36m<cell line: 45>\u001b[0;34m()\u001b[0m\n\u001b[1;32m     64\u001b[0m     \u001b[38;5;28;01mcontinue\u001b[39;00m \u001b[38;5;66;03m# ERA5 情况\u001b[39;00m\n\u001b[1;32m     65\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m---> 66\u001b[0m     tyens \u001b[38;5;241m=\u001b[39m \u001b[43mtydat\u001b[49m\u001b[43m(\u001b[49m\u001b[43mpath\u001b[49m\u001b[43m,\u001b[49m\u001b[43mRIstd\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m     67\u001b[0m     ini_time \u001b[38;5;241m=\u001b[39m datetime\u001b[38;5;241m.\u001b[39mstrptime(date_name, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124m%\u001b[39m\u001b[38;5;124mY\u001b[39m\u001b[38;5;124m%\u001b[39m\u001b[38;5;124mm\u001b[39m\u001b[38;5;132;01m%d\u001b[39;00m\u001b[38;5;124m%\u001b[39m\u001b[38;5;124mH\u001b[39m\u001b[38;5;124m'\u001b[39m)\n\u001b[1;32m     68\u001b[0m     end_time \u001b[38;5;241m=\u001b[39m tyens\u001b[38;5;241m.\u001b[39mtime[\u001b[38;5;241m-\u001b[39m\u001b[38;5;241m1\u001b[39m]\n",
      "File \u001b[0;32m/data/gsj/typlot/typlot/scripts/gsj_typhoon.py:89\u001b[0m, in \u001b[0;36mtydat.__init__\u001b[0;34m(self, path, RIstd, skiprows, lon_format)\u001b[0m\n\u001b[1;32m     79\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m, path, RIstd, skiprows\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m2\u001b[39m, lon_format\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m180\u001b[39m\u001b[38;5;124m'\u001b[39m):\n\u001b[1;32m     80\u001b[0m \u001b[38;5;250m    \u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m     81\u001b[0m \u001b[38;5;124;03m    初始化台风数据\u001b[39;00m\n\u001b[1;32m     82\u001b[0m \u001b[38;5;124;03m    \u001b[39;00m\n\u001b[0;32m   (...)\u001b[0m\n\u001b[1;32m     87\u001b[0m \u001b[38;5;124;03m    lon_format: 经度格式，'360' 表示0-360，'180' 表示-180到180\u001b[39;00m\n\u001b[1;32m     88\u001b[0m \u001b[38;5;124;03m    \"\"\"\u001b[39;00m\n\u001b[0;32m---> 89\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdf \u001b[38;5;241m=\u001b[39m \u001b[43mpd\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mread_csv\u001b[49m\u001b[43m(\u001b[49m\u001b[43mpath\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mskiprows\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mskiprows\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mengine\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mpython\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43msep\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43m & |\u001b[39;49m\u001b[38;5;124;43m\\\u001b[39;49m\u001b[38;5;124;43ms\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mheader\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m)\u001b[49m\n\u001b[1;32m     90\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcolors \u001b[38;5;241m=\u001b[39m [\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m#000000\u001b[39m\u001b[38;5;124m'\u001b[39m, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124m#00ffff\u001b[39m\u001b[38;5;124m'\u001b[39m, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124m#0000ff\u001b[39m\u001b[38;5;124m'\u001b[39m, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124m#FF8C00\u001b[39m\u001b[38;5;124m'\u001b[39m, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124m#FF0000\u001b[39m\u001b[38;5;124m'\u001b[39m, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124m#FF00FF\u001b[39m\u001b[38;5;124m'\u001b[39m]\n\u001b[1;32m     92\u001b[0m     \u001b[38;5;66;03m# 获取原始经纬度数据\u001b[39;00m\n",
      "File \u001b[0;32m~/.conda/envs/wmq/lib/python3.9/site-packages/pandas/util/_decorators.py:211\u001b[0m, in \u001b[0;36mdeprecate_kwarg.<locals>._deprecate_kwarg.<locals>.wrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m    209\u001b[0m     \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m    210\u001b[0m         kwargs[new_arg_name] \u001b[38;5;241m=\u001b[39m new_arg_value\n\u001b[0;32m--> 211\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/.conda/envs/wmq/lib/python3.9/site-packages/pandas/util/_decorators.py:331\u001b[0m, in \u001b[0;36mdeprecate_nonkeyword_arguments.<locals>.decorate.<locals>.wrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m    325\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(args) \u001b[38;5;241m>\u001b[39m num_allow_args:\n\u001b[1;32m    326\u001b[0m     warnings\u001b[38;5;241m.\u001b[39mwarn(\n\u001b[1;32m    327\u001b[0m         msg\u001b[38;5;241m.\u001b[39mformat(arguments\u001b[38;5;241m=\u001b[39m_format_argument_list(allow_args)),\n\u001b[1;32m    328\u001b[0m         \u001b[38;5;167;01mFutureWarning\u001b[39;00m,\n\u001b[1;32m    329\u001b[0m         stacklevel\u001b[38;5;241m=\u001b[39mfind_stack_level(),\n\u001b[1;32m    330\u001b[0m     )\n\u001b[0;32m--> 331\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/.conda/envs/wmq/lib/python3.9/site-packages/pandas/io/parsers/readers.py:950\u001b[0m, in \u001b[0;36mread_csv\u001b[0;34m(filepath_or_buffer, sep, delimiter, header, names, index_col, usecols, squeeze, prefix, mangle_dupe_cols, dtype, engine, converters, true_values, false_values, skipinitialspace, skiprows, skipfooter, nrows, na_values, keep_default_na, na_filter, verbose, skip_blank_lines, parse_dates, infer_datetime_format, keep_date_col, date_parser, dayfirst, cache_dates, iterator, chunksize, compression, thousands, decimal, lineterminator, quotechar, quoting, doublequote, escapechar, comment, encoding, encoding_errors, dialect, error_bad_lines, warn_bad_lines, on_bad_lines, delim_whitespace, low_memory, memory_map, float_precision, storage_options)\u001b[0m\n\u001b[1;32m    935\u001b[0m kwds_defaults \u001b[38;5;241m=\u001b[39m _refine_defaults_read(\n\u001b[1;32m    936\u001b[0m     dialect,\n\u001b[1;32m    937\u001b[0m     delimiter,\n\u001b[0;32m   (...)\u001b[0m\n\u001b[1;32m    946\u001b[0m     defaults\u001b[38;5;241m=\u001b[39m{\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mdelimiter\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m,\u001b[39m\u001b[38;5;124m\"\u001b[39m},\n\u001b[1;32m    947\u001b[0m )\n\u001b[1;32m    948\u001b[0m kwds\u001b[38;5;241m.\u001b[39mupdate(kwds_defaults)\n\u001b[0;32m--> 950\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43m_read\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfilepath_or_buffer\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mkwds\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/.conda/envs/wmq/lib/python3.9/site-packages/pandas/io/parsers/readers.py:611\u001b[0m, in \u001b[0;36m_read\u001b[0;34m(filepath_or_buffer, kwds)\u001b[0m\n\u001b[1;32m    608\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m parser\n\u001b[1;32m    610\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m parser:\n\u001b[0;32m--> 611\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mparser\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[43mnrows\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/.conda/envs/wmq/lib/python3.9/site-packages/pandas/io/parsers/readers.py:1778\u001b[0m, in \u001b[0;36mTextFileReader.read\u001b[0;34m(self, nrows)\u001b[0m\n\u001b[1;32m   1771\u001b[0m nrows \u001b[38;5;241m=\u001b[39m validate_integer(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mnrows\u001b[39m\u001b[38;5;124m\"\u001b[39m, nrows)\n\u001b[1;32m   1772\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m   1773\u001b[0m     \u001b[38;5;66;03m# error: \"ParserBase\" has no attribute \"read\"\u001b[39;00m\n\u001b[1;32m   1774\u001b[0m     (\n\u001b[1;32m   1775\u001b[0m         index,\n\u001b[1;32m   1776\u001b[0m         columns,\n\u001b[1;32m   1777\u001b[0m         col_dict,\n\u001b[0;32m-> 1778\u001b[0m     ) \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_engine\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[43m  \u001b[49m\u001b[38;5;66;43;03m# type: ignore[attr-defined]\u001b[39;49;00m\n\u001b[1;32m   1779\u001b[0m \u001b[43m        \u001b[49m\u001b[43mnrows\u001b[49m\n\u001b[1;32m   1780\u001b[0m \u001b[43m    \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1781\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m:\n\u001b[1;32m   1782\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mclose()\n",
      "File \u001b[0;32m~/.conda/envs/wmq/lib/python3.9/site-packages/pandas/io/parsers/python_parser.py:285\u001b[0m, in \u001b[0;36mPythonParser.read\u001b[0;34m(self, rows)\u001b[0m\n\u001b[1;32m    282\u001b[0m alldata \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_rows_to_cols(content)\n\u001b[1;32m    283\u001b[0m data, columns \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_exclude_implicit_index(alldata)\n\u001b[0;32m--> 285\u001b[0m conv_data \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_convert_data\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdata\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    286\u001b[0m columns, conv_data \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_do_date_conversions(columns, conv_data)\n\u001b[1;32m    288\u001b[0m index, result_columns \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_make_index(\n\u001b[1;32m    289\u001b[0m     conv_data, alldata, columns, indexnamerow\n\u001b[1;32m    290\u001b[0m )\n",
      "File \u001b[0;32m~/.conda/envs/wmq/lib/python3.9/site-packages/pandas/io/parsers/python_parser.py:349\u001b[0m, in \u001b[0;36mPythonParser._convert_data\u001b[0;34m(self, data)\u001b[0m\n\u001b[1;32m    346\u001b[0m     clean_na_values \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mna_values\n\u001b[1;32m    347\u001b[0m     clean_na_fvalues \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mna_fvalues\n\u001b[0;32m--> 349\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_convert_to_ndarrays\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m    350\u001b[0m \u001b[43m    \u001b[49m\u001b[43mdata\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    351\u001b[0m \u001b[43m    \u001b[49m\u001b[43mclean_na_values\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    352\u001b[0m \u001b[43m    \u001b[49m\u001b[43mclean_na_fvalues\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    353\u001b[0m \u001b[43m    \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mverbose\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    354\u001b[0m \u001b[43m    \u001b[49m\u001b[43mclean_conv\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    355\u001b[0m \u001b[43m    \u001b[49m\u001b[43mclean_dtypes\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    356\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/.conda/envs/wmq/lib/python3.9/site-packages/pandas/io/parsers/base_parser.py:586\u001b[0m, in \u001b[0;36mParserBase._convert_to_ndarrays\u001b[0;34m(self, dct, na_values, na_fvalues, verbose, converters, dtypes)\u001b[0m\n\u001b[1;32m    583\u001b[0m try_num_bool \u001b[38;5;241m=\u001b[39m \u001b[38;5;129;01mnot\u001b[39;00m (cast_type \u001b[38;5;129;01mand\u001b[39;00m is_str_or_ea_dtype)\n\u001b[1;32m    585\u001b[0m \u001b[38;5;66;03m# general type inference and conversion\u001b[39;00m\n\u001b[0;32m--> 586\u001b[0m cvals, na_count \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_infer_types\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m    587\u001b[0m \u001b[43m    \u001b[49m\u001b[43mvalues\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mset\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mcol_na_values\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m|\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mcol_na_fvalues\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtry_num_bool\u001b[49m\n\u001b[1;32m    588\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    590\u001b[0m \u001b[38;5;66;03m# type specified in dtype param or cast_type is an EA\u001b[39;00m\n\u001b[1;32m    591\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m cast_type \u001b[38;5;129;01mand\u001b[39;00m (\n\u001b[1;32m    592\u001b[0m     \u001b[38;5;129;01mnot\u001b[39;00m is_dtype_equal(cvals, cast_type)\n\u001b[1;32m    593\u001b[0m     \u001b[38;5;129;01mor\u001b[39;00m is_extension_array_dtype(cast_type)\n\u001b[1;32m    594\u001b[0m ):\n",
      "File \u001b[0;32m~/.conda/envs/wmq/lib/python3.9/site-packages/pandas/io/parsers/base_parser.py:712\u001b[0m, in \u001b[0;36mParserBase._infer_types\u001b[0;34m(self, values, na_values, try_num_bool)\u001b[0m\n\u001b[1;32m    709\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m try_num_bool \u001b[38;5;129;01mand\u001b[39;00m is_object_dtype(values\u001b[38;5;241m.\u001b[39mdtype):\n\u001b[1;32m    710\u001b[0m     \u001b[38;5;66;03m# exclude e.g DatetimeIndex here\u001b[39;00m\n\u001b[1;32m    711\u001b[0m     \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 712\u001b[0m         result, _ \u001b[38;5;241m=\u001b[39m \u001b[43mlib\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmaybe_convert_numeric\u001b[49m\u001b[43m(\u001b[49m\u001b[43mvalues\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mna_values\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m)\u001b[49m\n\u001b[1;32m    713\u001b[0m     \u001b[38;5;28;01mexcept\u001b[39;00m (\u001b[38;5;167;01mValueError\u001b[39;00m, \u001b[38;5;167;01mTypeError\u001b[39;00m):\n\u001b[1;32m    714\u001b[0m         \u001b[38;5;66;03m# e.g. encountering datetime string gets ValueError\u001b[39;00m\n\u001b[1;32m    715\u001b[0m         \u001b[38;5;66;03m#  TypeError can be raised in floatify\u001b[39;00m\n\u001b[1;32m    716\u001b[0m         result \u001b[38;5;241m=\u001b[39m values\n",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "# -*- coding: utf-8 -*-\n",
    "\"\"\"\n",
    "y ： 起报时间\n",
    "x : 预报 lead time\n",
    "对不同集合成员id求RMSE\n",
    "\"\"\"\n",
    "\n",
    "import pandas as pd\n",
    "import os,glob,math\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "import matplotlib.lines as mlines\n",
    "import matplotlib.dates as mdates\n",
    "import cartopy.crs as ccrs\n",
    "from cartopy.mpl.ticker import LongitudeFormatter,LatitudeFormatter\n",
    "import cartopy.feature as cfeature\n",
    "import shapely.geometry as sgeom\n",
    "from datetime import datetime,timedelta\n",
    "from tqdm import tqdm\n",
    "from global_land_mask import globe\n",
    "from typlot.scripts.gsj_typhoon import tydat,see,count_rapidgrow,tydat_CMA,average_datetime,split_str_id,load_land_polygons,detect_landfall\n",
    "from geopy.distance import geodesic\n",
    "import matplotlib.ticker as ticker\n",
    "import seaborn as sns\n",
    "import xarray as xr\n",
    "import warnings\n",
    "warnings.filterwarnings(\"ignore\")\n",
    "from typlot.config.global_config import *\n",
    "from geopy.distance import geodesic\n",
    "\n",
    "ini_time_mode = ['00','12']\n",
    "names = ['mojie_28','dusurui_16','gaemi_09','haikui_38','kangni_54','shantuo_44','saola_25','koinu_49']\n",
    "\n",
    "# names = ['dusurui_16']\n",
    "pic_save_dir = ''\n",
    "tynames,tyids = split_str_id(names)\n",
    "draw_obs_opt = True\n",
    "obs_baseline='land'  # ‘land’ 'RI'\n",
    "RIstd = 7\n",
    "tyrmse = {}\n",
    "show = True\n",
    "track_id = np.arange(1,52)\n",
    "\n",
    "\n",
    "for ty,tyid in zip(tynames,tyids):  \n",
    "    # 初始化\n",
    "    directory  = os.path.join(global_ensdir,f'{ty}_{tyid}')\n",
    "    dates_name = sorted(os.listdir(directory))\n",
    "    dates_name = [i for i in dates_name if i[-2:] in ini_time_mode ]\n",
    "    obs_path   = os.path.join( global_obsdir, f'{ty}_CMAobs.txt')\n",
    "    tyobs = tydat_CMA(obs_path)\n",
    "    \n",
    "    #### 计算最长的leadtime，确定后用来制作DataArray\n",
    "    leadtime_list=[]\n",
    "    # 遍历起报时间\n",
    "    for date_name in tqdm(dates_name,total=len(dates_name),desc='Filtering max leadtime'):\n",
    "        dir_date = os.path.join(directory,date_name)\n",
    "        name_ensembles = os.listdir(dir_date)\n",
    "        path_ensembles = [os.path.join(dir_date,i) for i in name_ensembles if i.startswith('TRACK')]\n",
    "        # 遍历集合\n",
    "        for path in path_ensembles:\n",
    "            t_id = int(path.split('_')[-1])\n",
    "            if t_id==0:\n",
    "                continue # ERA5 情况\n",
    "            else:\n",
    "                tyens = tydat(path,RIstd)\n",
    "                ini_time = datetime.strptime(date_name, '%Y%m%d%H')\n",
    "                end_time = tyens.time[-1]\n",
    "                leadtime_list.append(end_time-ini_time)\n",
    "    \n",
    "    max_leadtime = max(leadtime_list)\n",
    "    n_leadtime = np.ceil(max_leadtime / timedelta(hours=6)).astype(int)+1\n",
    "    \n",
    "    \n",
    "    #### 创建DataArray  dims=['variable','start_time','lead_time']\n",
    "    lead_time = 6*np.arange(n_leadtime)  # 6 for 6h/index.\n",
    "    variable = ['umax','pmin','dist']\n",
    "    da = xr.DataArray(np.nan,dims=['variable','start_time','lead_time'],\n",
    "                  coords={'variable':variable ,'start_time':dates_name ,'lead_time':lead_time})\n",
    "\n",
    "    #### 开始填充da,逐个元素计算\n",
    "    # 遍历start_time\n",
    "    for date_name in tqdm(dates_name,total=len(dates_name),desc='Calcing RMSE on members'):\n",
    "        dir_date = os.path.join(directory,date_name)\n",
    "        name_ensembles = os.listdir(dir_date)\n",
    "        path_ensembles = [os.path.join(dir_date,i) for i in name_ensembles if i.startswith('TRACK')]\n",
    "        # 遍历 lead_time\n",
    "        for i in lead_time:\n",
    "            v_ens_list,p_ens_list = [],[]\n",
    "            v_obs_list,p_obs_list = [],[]\n",
    "            lat_ens_list,lat_obs_list = [],[]\n",
    "            lon_ens_list,lon_obs_list = [],[]\n",
    "            leadtime = datetime.strptime(date_name,'%Y%m%d%H') + timedelta(hours=int(i))\n",
    "            # 遍历所有的成员，计算\n",
    "            for path in path_ensembles:\n",
    "                t_id = int(path.split('_')[-1])\n",
    "                if t_id==0:\n",
    "                    continue # 再分析 情况\n",
    "                else:\n",
    "                    tyens = tydat(path,RIstd)\n",
    "                    # 只留下leadtime同时存在于ens和obs的情况\n",
    "                    if (leadtime in tyobs.time) and (leadtime in tyens.time):\n",
    "                        v_ens_list.append(tyens.umax[tyens.time==leadtime][0])\n",
    "                        v_obs_list.append(tyobs.umax[tyobs.time==leadtime][0])\n",
    "                        p_ens_list.append(tyens.pmin[tyens.time==leadtime][0])\n",
    "                        p_obs_list.append(tyobs.pmin[tyobs.time==leadtime][0])\n",
    "                        lat_ens_list.append(tyens.lat[tyens.time==leadtime][0])\n",
    "                        lat_obs_list.append(tyobs.lat[tyobs.time==leadtime][0])\n",
    "                        lon_ens_list.append(tyens.lon[tyens.time==leadtime][0])\n",
    "                        lon_obs_list.append(tyobs.lon[tyobs.time==leadtime][0])\n",
    "                    else:\n",
    "                        v_ens_list.append(np.nan)\n",
    "                        v_obs_list.append(np.nan)\n",
    "                        p_ens_list.append(np.nan)\n",
    "                        p_obs_list.append(np.nan)\n",
    "                        lat_ens_list.append(np.nan)\n",
    "                        lat_obs_list.append(np.nan)\n",
    "                        lon_ens_list.append(np.nan)\n",
    "                        lon_obs_list.append(np.nan)\n",
    "         \n",
    "            v_ens,p_ens,v_obs,p_obs = np.array(v_ens_list),np.array(p_ens_list),np.array(v_obs_list),np.array(p_obs_list)\n",
    "            lat_ens,lat_obs,lon_ens,lon_obs = np.array(lat_ens_list),np.array(lat_obs_list),np.array(lon_ens_list),np.array(lon_obs_list)\n",
    "            dist_rmse_list = [geodesic((i,j),(k,l)).kilometers if not math.isnan(i*j*k*l) else np.nan for i,j,k,l in zip(lat_ens,lon_ens,lat_obs,lon_obs) ]\n",
    "            \n",
    "            #### 计算该leadtime的RMSE\n",
    "            umax_rmse = np.sqrt(np.nanmean((v_ens-v_obs)**2))\n",
    "            pmin_rmse = np.sqrt(np.nanmean((p_ens-p_obs)**2))\n",
    "            dist_rmse = np.sqrt(np.nanmean((np.array(dist_rmse_list))**2))\n",
    "\n",
    "            #### 补充DataArray\n",
    "            da.loc['umax',date_name,i] = umax_rmse\n",
    "            da.loc['pmin',date_name,i] = pmin_rmse\n",
    "            da.loc['dist',date_name,i] = dist_rmse\n",
    "\n",
    "    da.to_netcdf(os.path.join(pic_save_dir,f'{ty}_pmin_umax_dist.nc'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_RI_init_time(tyobs, da):\n",
    "    \"\"\"\n",
    "    根据tyobs的信息，找到观测中的RI时刻。\n",
    "    但是观测RI往往不在严格的00，12h这种起报时间上。\n",
    "    因此要把观测RI转换到最近的RI时刻上去。\n",
    "    \"\"\"\n",
    "    from datetime import datetime\n",
    "    index = count_rapidgrow(7, tyobs.umax, tyobs.time).astype(bool)\n",
    "    obs_RI_moments = tyobs.time[index]\n",
    "    # 将 da.start_time 从字符串转换为 datetime 对象\n",
    "    da_start_times = [datetime.strptime(str(t), \"%Y%m%d%H\") for t in da.start_time.values]\n",
    "    # 找到da中最近的RI起报时刻\n",
    "    da_RI_moments = []\n",
    "    for obs_time in obs_RI_moments:\n",
    "        # 计算时间差\n",
    "        time_diffs = np.array([(dt - obs_time).total_seconds() for dt in da_start_times])\n",
    "        time_diffs[time_diffs < 0] = np.inf\n",
    "        # 找到最小正数时间差的索引\n",
    "        min_idx = np.argmin(time_diffs)\n",
    "        # 添加对应的 start_time 字符串\n",
    "        da_RI_moments.append(da.start_time.values[min_idx])\n",
    "    return da_RI_moments\n",
    "\n",
    "def ceil_timedelta(td):\n",
    "    \"\"\"将 Timedelta 向上取整到天\"\"\"\n",
    "    return pd.Timedelta(days=int(np.ceil(td.total_seconds() / 86400)))\n",
    "\n",
    "\n",
    "def gen_RI_scatter(tyobs,da):\n",
    "    # 根据da来得到y和x轴的时间维度信息\n",
    "    init_times = [pd.Timestamp(i+'00') for i in da.start_time.values]\n",
    "    dy = init_times[1]-init_times[0]\n",
    "    dx = pd.Timedelta(da.lead_time.values[1]-da.lead_time.values[0],'h')\n",
    "\n",
    "    RI_ini_times = [pd.Timestamp(i+'00') for i in get_RI_init_time(tyobs, da)]\n",
    "    x = []\n",
    "    y = []\n",
    "    for RI_ini_time in RI_ini_times:\n",
    "        ini_time = RI_ini_time\n",
    "        while ini_time>=init_times[0] and RI_ini_time-ini_time <= pd.Timedelta(da.lead_time.values[-1],'h'):\n",
    "            x.append((RI_ini_time-ini_time))\n",
    "            y.append(ini_time)\n",
    "            ini_time -= dy\n",
    "    # 对x进行处理\n",
    "    x = [ceil_timedelta(xx) for xx in x]\n",
    "\n",
    "    # 转换为index\n",
    "    yindex = np.searchsorted(init_times,np.array(y))\n",
    "    lead_time_timedelta = [pd.Timedelta(i,'h') for i in da.lead_time.values]\n",
    "    xindex = np.searchsorted(lead_time_timedelta,np.array(x))\n",
    "    return yindex,xindex\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "show = True\n",
    "for var in variable:\n",
    "    # 1) 读取数据               \n",
    "    data = da.loc[var].values.astype(float)               # 转 float，避免 int 溢出\n",
    "    data = np.where(data > 10000, np.nan, data)  # 屏蔽异常值\n",
    "    data = np.ma.masked_invalid(data)            # NaN → masked\n",
    "\n",
    "    # 2) 坐标 \n",
    "    members = da.lead_time.values  \n",
    "    times   = da.start_time.values \n",
    "\n",
    "    # 🔧 将预报时效转换为天数，并筛选每天一次\n",
    "    lead_days = members / 24  # 小时转换为天数\n",
    "    day_indices = []\n",
    "    day_labels = []\n",
    "\n",
    "    for i, day in enumerate(lead_days):\n",
    "        if day.is_integer():  # 只保留整天数\n",
    "            day_indices.append(i)\n",
    "            day_labels.append(f'{int(day)}d')\n",
    "\n",
    "    # 如果没有整天数，则按24小时间隔筛选. 但是预报总是有整天数。\n",
    "    if not day_indices:\n",
    "        step = 24  # 每24小时取一个点\n",
    "        day_indices = list(range(0, len(members), step))\n",
    "        day_labels = [f'{int(members[i]/24)}d' for i in day_indices]\n",
    "        \n",
    "    # 3) 画图\n",
    "    fig, ax = plt.subplots(figsize=(12, 8))\n",
    "\n",
    "    # ⭐ 根据变量类型设置颜色范围\n",
    "    if var == 'pmin':\n",
    "        vmin, vmax = 0, 50\n",
    "        cbar_label = 'RMSE (hPa)'\n",
    "    elif var == 'umax':\n",
    "        vmin, vmax = 0, 30\n",
    "        cbar_label = 'RMSE (m/s)'\n",
    "    else:\n",
    "        vmin, vmax = None, None\n",
    "        cbar_label = 'RMSE'\n",
    "\n",
    "    # ✅ 在 imshow 中设置 vmin/vmax\n",
    "    im = ax.imshow(\n",
    "        data,                    \n",
    "        aspect='auto',\n",
    "        interpolation='none',\n",
    "        cmap='viridis',\n",
    "        origin='lower',\n",
    "        vmin=vmin,  # ✅ 正确位置\n",
    "        vmax=vmax   # ✅ 正确位置\n",
    "    )\n",
    "\n",
    "    # 🔧 交换 X、Y 轴的刻度和标签设置\n",
    "    ax.set_yticks(np.arange(len(times)))\n",
    "    ax.set_yticklabels(times, rotation=0, fontsize=8)\n",
    "\n",
    "    # 🔧 Y轴只显示整天数的刻度\n",
    "    ax.set_xticks(day_indices)\n",
    "    ax.set_xticklabels(day_labels, fontsize=8)\n",
    "\n",
    "    # 🔧 交换轴标签\n",
    "    ax.set_title(f'{ty} — {var} RMSE Heatmap', fontsize=16, fontweight='bold')\n",
    "    ax.set_ylabel('Forecast Initialization Time', fontsize=14)\n",
    "    ax.set_xlabel('Forecast Lead Time', fontsize=14)\n",
    "\n",
    "    # ⭐ 色标（不需要 vmin/vmax）\n",
    "    cbar = fig.colorbar(im, ax=ax)\n",
    "    cbar.set_label(cbar_label, fontsize=12, fontweight='bold')\n",
    "\n",
    "    # scatter plot\n",
    "    y,x = gen_RI_scatter(tyobs,da)\n",
    "    ax.scatter(x,y,color='red')\n",
    "\n",
    "    plt.tight_layout()\n",
    "\n",
    "    if show:\n",
    "        plt.show()\n",
    "    else:\n",
    "        plt.savefig(\n",
    "            os.path.join(global_picdir,'RMSE' ,f'heatmap_{ty}_{var}_lead_start_days.png'),\n",
    "            dpi=600\n",
    "        )\n",
    "        plt.close()\n",
    "\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "wmq",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
