{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "## duih"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import torch\n",
    "from copy import deepcopy\n",
    "import numpy as np\n",
    "import xarray as xr\n",
    "import pandas as pd\n",
    "import torch.nn as nn\n",
    "import random\n",
    "from tqdm import tqdm\n",
    "from sklearn.model_selection import train_test_split\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "import torchvision.models as models\n",
    "import zipfile\n",
    "\n",
    "def set_seed(seed = 123):\n",
    "    random.seed(seed)\n",
    "    np.random.seed(seed)\n",
    "    os.environ['PYTHONHASHSEED'] = str(seed)\n",
    "    torch.manual_seed(seed)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CMIP数据加载成功\n",
      "Train samples: 46383, Valid samples: 1189\n"
     ]
    }
   ],
   "source": [
    "'''\n",
    "Args:\n",
    "    原始数据  year 模式数据中包含的年数\n",
    "\n",
    "Returns:\n",
    "    滑窗后的数据\n",
    "'''\n",
    "def rolling_12(sst,t300,ua,va,label,year):\n",
    "    label_seq = np.concatenate((label[:,:12].values,label[-1,12:].values.reshape(2,12)),axis=0).reshape(-1,)\n",
    "    sst_seq = np.concatenate((sst[:,:12].values,sst[-1,12:].values.reshape(2,12,24,72)),axis=0).reshape((year+2)*12,24,72)\n",
    "    t300_seq = np.concatenate((t300[:,:12].values,t300[-1,12:].values.reshape(2,12,24,72)),axis=0).reshape((year+2)*12,24,72)\n",
    "    ua_seq = np.concatenate((ua[:,:12].values,ua[-1,12:].values.reshape(2,12,24,72)),axis=0).reshape((year+2)*12,24,72)\n",
    "    va_seq = np.concatenate((va[:,:12].values,va[-1,12:].values.reshape(2,12,24,72)),axis=0).reshape((year+2)*12,24,72)\n",
    "   # print(sst_seq.shape,label_seq.shape)\n",
    "\n",
    "    new_label = np.zeros((label_seq.shape[0]-36+1,24))\n",
    "    for i in range(new_label.shape[0]):\n",
    "        new_label[i] = label_seq[i+12:i+12+24]\n",
    "\n",
    "    new_sst = np.zeros((sst_seq.shape[0]-36+1,12,24,72))\n",
    "    for i in range(new_sst.shape[0]):\n",
    "        new_sst[i] = sst_seq[i:i+12]\n",
    "\n",
    "    new_t300 = np.zeros((t300_seq.shape[0]-36+1,12,24,72))\n",
    "    for i in range(new_t300.shape[0]):\n",
    "        new_t300[i] = t300_seq[i:i+12]\n",
    "\n",
    "    new_ua = np.zeros((ua_seq.shape[0]-36+1,12,24,72))\n",
    "    for i in range(new_ua.shape[0]):\n",
    "        new_ua[i] = ua_seq[i:i+12]\n",
    "\n",
    "    new_va = np.zeros((va_seq.shape[0]-36+1,12,24,72))\n",
    "    for i in range(new_va.shape[0]):\n",
    "        new_va[i] = va_seq[i:i+12]\n",
    "\n",
    "    #print(new_sst.shape,new_label.shape)\n",
    "\n",
    "    #pass\n",
    "    return new_sst.astype('float32'),new_t300.astype('float32'),new_ua.astype('float32'),new_va.astype('float32'),new_label.astype('float32')\n",
    "\n",
    "def load_data2():\n",
    "    #CMIP\n",
    "    #PATH = './tcdata/enso_round1_test_20210201/'\n",
    "    PATH = 'E:/enso_round1_train_20210201/'\n",
    "    CMIP_train = xr.open_dataset(PATH+'CMIP_train.nc')\n",
    "    CMIP_label = xr.open_dataset(PATH+'CMIP_label.nc')  \n",
    "\n",
    "    CMIP_sst_rolling = []\n",
    "    CMIP_t300_rolling = []\n",
    "    CMIP_ua_rolling = []\n",
    "    CMIP_va_rolling = []\n",
    "    CMIP_nino_rolling = []\n",
    "    #CMIP5  151 * 15\n",
    "    year = 151\n",
    "    for i in range(15):\n",
    "        CMIP_train_sst =  CMIP_train['sst'][i*151:(i+1)*151]\n",
    "        CMIP_train_t300 = CMIP_train['t300'][i*151:(i+1)*151]\n",
    "        CMIP_train_ua =   CMIP_train['ua'][i*151:(i+1)*151]\n",
    "        CMIP_train_va =   CMIP_train['va'][i*151:(i+1)*151]\n",
    "        CMIP_label_nino = CMIP_label['nino'][i*151:(i+1)*151]\n",
    "        CMIP_train_sst,CMIP_train_t300,CMIP_train_ua,CMIP_train_va,CMIP_label_nino = rolling_12(\n",
    "            CMIP_train_sst,CMIP_train_t300,CMIP_train_ua,CMIP_train_va,CMIP_label_nino,year = 151)\n",
    "\n",
    "        CMIP_sst_rolling.append(CMIP_train_sst)\n",
    "        CMIP_t300_rolling.append(CMIP_train_t300) \n",
    "        CMIP_ua_rolling.append(CMIP_train_ua) \n",
    "        CMIP_va_rolling.append(CMIP_train_va) \n",
    "        CMIP_nino_rolling.append(CMIP_label_nino)\n",
    "        #break\n",
    "    #CMIP6  140 * 17 \n",
    "    year = 140\n",
    "    for i in range(17):\n",
    "        CMIP_train_sst =  CMIP_train['sst'][2265+i*year:2265+(i+1)*year]\n",
    "        CMIP_train_t300 = CMIP_train['t300'][2265+i*year:2265+(i+1)*year]\n",
    "        CMIP_train_ua =   CMIP_train['ua'][2265+i*year:2265+(i+1)*year]\n",
    "        CMIP_train_va =   CMIP_train['va'][2265+i*year:2265+(i+1)*year]\n",
    "        CMIP_label_nino = CMIP_label['nino'][2265+i*year:2265+(i+1)*year]\n",
    "        CMIP_train_sst,CMIP_train_t300,CMIP_train_ua,CMIP_train_va,CMIP_label_nino = rolling_12(\n",
    "            CMIP_train_sst,CMIP_train_t300,CMIP_train_ua,CMIP_train_va,CMIP_label_nino,year = 140)\n",
    "\n",
    "        CMIP_sst_rolling.append(CMIP_train_sst)\n",
    "        CMIP_t300_rolling.append(CMIP_train_t300) \n",
    "        CMIP_ua_rolling.append(CMIP_train_ua) \n",
    "        CMIP_va_rolling.append(CMIP_train_va) \n",
    "        CMIP_nino_rolling.append(CMIP_label_nino)\n",
    "\n",
    "\n",
    "    CMIP_sst = np.concatenate([CMIP_sst_rolling[i] for i in range(15+17)],axis = 0)\n",
    "    CMIP_t300 = np.concatenate([CMIP_t300_rolling[i] for i in range(15+17)],axis = 0)\n",
    "    CMIP_ua = np.concatenate([CMIP_ua_rolling[i] for i in range(15+17)],axis = 0)\n",
    "    CMIP_va = np.concatenate([CMIP_va_rolling[i] for i in range(15+17)],axis = 0)\n",
    "    CMIP_label = np.concatenate([CMIP_nino_rolling[i] for i in range(15+17)],axis = 0)\n",
    "\n",
    "    del CMIP_sst_rolling,CMIP_t300_rolling,CMIP_ua_rolling,CMIP_va_rolling,CMIP_nino_rolling\n",
    "    \n",
    "    \n",
    "    # 去空值操作  对于CMIP数据， 需要对4个值都进行，但是ua是缺失值最多的，直接对ua进行去空就行了\n",
    "    m = np.zeros(CMIP_ua.shape[0])\n",
    "    for i in range(CMIP_ua.shape[0]):\n",
    "        if np.sum(np.isnan(CMIP_ua[i])) != 0:\n",
    "            m[i] = np.sum(np.isnan(CMIP_ua[i]))\n",
    "\n",
    "    #将有缺失值的数据 剔除，选择缺失值为0的数据\n",
    "    CMIP_sst = CMIP_sst[np.where(m==0)[0]]\n",
    "    CMIP_t300 = CMIP_t300[np.where(m==0)[0]]\n",
    "    CMIP_ua = CMIP_ua[np.where(m==0)[0]]\n",
    "    CMIP_va = CMIP_va[np.where(m==0)[0]]\n",
    "    CMIP_label = CMIP_label[np.where(m==0)[0]]\n",
    "\n",
    "   \n",
    "\n",
    "    print('CMIP数据加载成功')\n",
    "    # SODA data    \n",
    "    SODA_train = xr.open_dataset(PATH+'SODA_train.nc')\n",
    "    SODA_label = xr.open_dataset(PATH+'SODA_label.nc')\n",
    "    SODA_train_sst =  SODA_train['sst']\n",
    "    SODA_train_t300 = SODA_train['t300']\n",
    "    SODA_train_ua =   SODA_train['ua']\n",
    "    SODA_train_va =   SODA_train['va']\n",
    "    SODA_label_nino = SODA_label['nino']\n",
    "    SODA_train_sst,SODA_train_t300,SODA_train_ua,SODA_train_va,SODA_label_nino = rolling_12(\n",
    "        SODA_train_sst,SODA_train_t300,SODA_train_ua,SODA_train_va,SODA_label_nino,year = 100)\n",
    "\n",
    "    #N = int(len(SODA_label_nino)*0.8)\n",
    "    #print('Train samples: {}, Valid samples: {}'.format(len(SODA_label_nino[:N]), len(SODA_label_nino[N:])))\n",
    "    print('Train samples: {}, Valid samples: {}'.format(len(CMIP_label), len(SODA_label_nino)))\n",
    "\n",
    "    dict_train = {\n",
    "        'sst':     CMIP_sst,\n",
    "        't300':    CMIP_t300,\n",
    "        'ua':      CMIP_ua,\n",
    "        'va':      CMIP_va,\n",
    "        'label':   CMIP_label}\n",
    "    dict_valid = {\n",
    "        'sst':      SODA_train_sst,\n",
    "        't300':     SODA_train_t300,\n",
    "        'ua':       SODA_train_ua,\n",
    "        'va':       SODA_train_va,\n",
    "        'label':    SODA_label_nino}\n",
    "    train_dataset = EarthDataSet(dict_train)\n",
    "    valid_dataset = EarthDataSet(dict_valid)\n",
    "    return train_dataset, valid_dataset\n",
    "    \n",
    "\n",
    "class EarthDataSet(Dataset):\n",
    "    def __init__(self, data):\n",
    "        self.data = data\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.data['sst'])\n",
    "\n",
    "    def __getitem__(self, idx):   \n",
    "        return (self.data['sst'][idx], self.data['t300'][idx], self.data['ua'][idx], self.data['va'][idx]), self.data['label'][idx]\n",
    "    \n",
    "train_dataset, valid_dataset = load_data2()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
