{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "70457346",
   "metadata": {},
   "source": [
    "## Import"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e23f723d",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import warnings\n",
    "warnings.filterwarnings(\"ignore\")\n",
    "import matplotlib.pyplot as plt\n",
    "# plt.rcParams['font.family'] = ['sans-serif']\n",
    "# plt.rcParams['font.sans-serif'] = ['SimHei']\n",
    "plt.rcParams['font.sans-serif'] = ['Microsoft YaHei']\n",
    "# plt.rcParams['font.family'] = 'Heiti TC'\n",
    "plt.rcParams['axes.unicode_minus'] = False\n",
    "from mpl_toolkits.mplot3d import Axes3D\n",
    "from matplotlib import rc\n",
    "from matplotlib import dates\n",
    "rc('mathtext', default='regular')\n",
    "# 显示所有变量/最后一个变量\n",
    "from IPython.core.interactiveshell import InteractiveShell\n",
    "InteractiveShell.ast_node_interactivity = 'all'\n",
    "# InteractiveShell.ast_node_interactivity = 'last'\n",
    "import pandas as pd\n",
    "pd.set_option('display.max_rows', None)\n",
    "pd.set_option('display.max_columns', None)\n",
    "import numpy as np\n",
    "# conda install scikit-learn\n",
    "from sklearn.preprocessing import MinMaxScaler\n",
    "from keras.models import Sequential\n",
    "from keras.layers import LSTM, Dense, Activation,Dropout\n",
    "from scipy import stats\n",
    "from pylab import *\n",
    "from sklearn import metrics\n",
    "from sklearn.metrics import silhouette_score, silhouette_samples, r2_score\n",
    "from sklearn.cluster import KMeans\n",
    "from keras.models import load_model\n",
    "from sklearn.ensemble import RandomForestRegressor\n",
    "from xgboost import XGBRegressor\n",
    "from sklearn.linear_model import LinearRegression\n",
    "# from fitter import Fitter\n",
    "from sklearn import model_selection, metrics\n",
    "from sklearn.model_selection import GridSearchCV\n",
    "import xgboost as xgb\n",
    "from sklearn.model_selection import train_test_split\n",
    "from xgboost import XGBRegressor\n",
    "warnings.filterwarnings(\"ignore\")\n",
    "import numpy as np\n",
    "import numpy.random as rd\n",
    "from sklearn import svm\n",
    "import seaborn as sns\n",
    "import datetime\n",
    "from time import time\n",
    "import chinese_calendar\n",
    "import calendar\n",
    "# pip install EMD-signal\n",
    "from PyEMD import EEMD\n",
    "from minepy import MINE\n",
    "from sklearn.neural_network import MLPRegressor"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0aaf737e",
   "metadata": {},
   "source": [
    "# 数据读取\n",
    "## 全局定义"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "97c33aa4",
   "metadata": {},
   "outputs": [],
   "source": [
    "work_dir = os.getcwd()\n",
    "load_mped_ids = [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 21, 22, 23, 24, 25, 26, 27,\n",
    "    28, 29, 30, 31, 32, 34, 37, 38, 39, 40, 44, 46, 48, 49, 50]\n",
    "# load表中\n",
    "pv_mped_ids = [41, 51]\n",
    "# pv表中\n",
    "pv_mpedk_ids = [74, 75, 76, 77, 78]\n",
    "\n",
    "localwea_itemname = ['风速', '风向', '环境温度', '组件温度值', '湿度', '总辐射值']\n",
    "args_localwea = ['year', 'month', 'day', 'hour', 'minutes', 'wind_speed',\n",
    "    'wind_direction', 'temperature', 'equipment_temperature', 'wet', 'sun']\n",
    "\n",
    "# 负荷、天气原始数据表参数\n",
    "args_p = ['year', 'month', 'day', 'hour', 'minutes', 'p']\n",
    "args_wea = ['year', 'month', 'day', 'hour', 'minutes', 'temperature', 'cloud', 'sun', 'uv', 'water', 'wet']\n",
    "\n",
    "# 负荷、光伏数据参数\n",
    "args_train = ['date', 'p', 'temperature', 'cloud', 'sun', 'uv', 'water', 'wet']\n",
    "\n",
    "# time_index = ['2017/1/2', '2017/11/25', '2017/11/20', '2017/11/25', '2017/10/16', '2017/1/2']\n",
    "time_index = ['2018/1/2', '2018/1/20', '2018/2/10',\n",
    "    '2018/3/1', '2018/3/20', '2018/4/10']\n",
    "    \n",
    "time_str1 = '20220101'\n",
    "time_str2 = '20220430'"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "11b10982",
   "metadata": {},
   "source": [
    "## 天气数据读取"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b1cbb55e",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 本地气象文件处理\n",
    "def localweafile_handle(file):\n",
    "    data = pd.read_excel(file)\n",
    "    localwea_data = {x:[None for i in range(96)] for x in args_localwea}\n",
    "    for i, row in data.iterrows():\n",
    "        item_value = float(row['item_value'])\n",
    "        time = pd.to_datetime(row['value_time'])\n",
    "        year = time.year\n",
    "        month = time.month\n",
    "        day = time.day\n",
    "        hour = time.hour\n",
    "        minutes = hour * 60 + time.minute\n",
    "        index96 = minutes // 15\n",
    "\n",
    "        localwea_data['year'][index96] = year\n",
    "        localwea_data['month'][index96] = month\n",
    "        localwea_data['day'][index96] = day\n",
    "        localwea_data['hour'][index96] = hour\n",
    "        localwea_data['minutes'][index96] = minutes\n",
    "        for j, item_name in enumerate(localwea_itemname):\n",
    "            if row['item_name'] == item_name and localwea_data[args_localwea[j+5]][index96] == None:\n",
    "                localwea_data[args_localwea[j+5]][index96] = item_value\n",
    "    \n",
    "    # 统计数据缺情况\n",
    "    num = 0\n",
    "    for i, key in enumerate(localwea_data):\n",
    "        for j in range(96):\n",
    "            if localwea_data[key][j] == None:\n",
    "                num += 1\n",
    "    if num != 0:\n",
    "        print(file.split('/')[-1] + '本地气象缺失数据个数：' + str(num))\n",
    "\n",
    "    return localwea_data\n",
    "\n",
    "def get_localweadata(data_dir, time_str1, time_str2):\n",
    "    file_list = os.listdir(data_dir)\n",
    "    # file_list.remove('.DS_Store')\n",
    "    file_list = sorted(file_list, key=lambda x: pd.to_datetime(x.split('.')[0]))\n",
    "    localwea_data = [[] for j in range(len(args_localwea))]\n",
    "    for f in file_list:\n",
    "        time = pd.to_datetime(f.split('.')[0])\n",
    "        time1 = pd.to_datetime(time_str1.split('.')[0])\n",
    "        time2 = pd.to_datetime(time_str2.split('.')[0])\n",
    "        if time >= time1 and time <= time2:\n",
    "            file = data_dir + '/' + f\n",
    "            input_localwea_data = localweafile_handle(file)\n",
    "            for i, key in enumerate(input_localwea_data):\n",
    "                for j, value in enumerate(input_localwea_data[key]):\n",
    "                    localwea_data[i].append(value)\n",
    "    \n",
    "    # 数据清洗\n",
    "    nonenum_afterclean = 0\n",
    "    for i, valuei in enumerate(localwea_data):\n",
    "        for j, valuej in enumerate(valuei):\n",
    "            value_replace = None\n",
    "            for k in range(4):\n",
    "                if j > k and valuei[j-k] != None:\n",
    "                    value_replace = valuei[j-k]\n",
    "                    break\n",
    "                elif j < len(valuei)-k and valuei[j+k] != None:\n",
    "                    value_replace = valuei[j+k]\n",
    "                    break\n",
    "            if valuej == None:\n",
    "                localwea_data[i][j] = value_replace\n",
    "            if valuej == None and value_replace == None:\n",
    "                nonenum_afterclean += 1\n",
    "    if nonenum_afterclean != 0:\n",
    "        print(f'数据清洗后仍存在空数据{nonenum_afterclean}，请调整算法！' )\n",
    "    else:\n",
    "        print('数据清洗已完成，缺失数据已补齐！')\n",
    "    localwea_data = pd.DataFrame({'year':localwea_data[0], 'month':localwea_data[1],\n",
    "        'day':localwea_data[2], 'hour':localwea_data[3], 'minutes':localwea_data[4],\n",
    "        'wind_speed':localwea_data[5], 'wind_direction':localwea_data[6],\n",
    "        'temperature':localwea_data[7], 'equipment_temperature':localwea_data[8],\n",
    "        'wet':localwea_data[9], 'sun':localwea_data[10]})\n",
    "    return localwea_data\n",
    "\n",
    "# 气象数据文件处理\n",
    "def weafile_handle(file):\n",
    "    data = pd.read_excel(file)\n",
    "    date = datetime.datetime.strptime(file.split('/')[-1].split('.')[0], '%Y%m')\n",
    "    wea_data = {x:[] for x in args_wea}\n",
    "    for i in range(len(data)):\n",
    "        time = pd.to_datetime(str(data['日期'][i]) + ' ' + str(data['时间(UTC)'][i]), utc=True)\\\n",
    "            .tz_convert('Asia/Shanghai')\n",
    "        year = time.year\n",
    "        month = time.month\n",
    "        day = time.day\n",
    "        hour = time.hour\n",
    "        temperature = float(data['气温2m(℃)'][i])\n",
    "        cloud = float(data['总云量(tcc)'][i])\n",
    "        sun = float(data['总日照强度(down,J/m2)'][i])\n",
    "        uv = float(data['紫外强度(J/m2)'][i])\n",
    "        water = float(data['降水量(mm)'][i])\n",
    "        wet = float(data['相对湿度(%)'][i])\n",
    "        for j in range(4):\n",
    "            wea_data['year'].append(year)\n",
    "            wea_data['month'].append(month)\n",
    "            wea_data['day'].append(day)\n",
    "            wea_data['hour'].append(hour)\n",
    "            wea_data['minutes'].append(j * 15)\n",
    "            wea_data['temperature'].append(temperature)\n",
    "            wea_data['cloud'].append(cloud)\n",
    "            wea_data['sun'].append(sun)\n",
    "            wea_data['uv'].append(uv)\n",
    "            wea_data['water'].append(water)\n",
    "            wea_data['wet'].append(wet)\n",
    "    return wea_data\n",
    "\n",
    "def get_weadata(data_dir, time_str1, time_str2):\n",
    "    file_list = os.listdir(data_dir)\n",
    "    # file_list.remove('.DS_Store')\n",
    "    file_list = sorted(file_list, key=lambda x: datetime.datetime.strptime(x.split('.')[0], '%Y%m'))\n",
    "    data = [[] for j in range(len(args_wea))]\n",
    "\n",
    "    wea_data = {x:[] for x in args_wea}\n",
    "    for f in file_list:\n",
    "        file = data_dir + '/' + f\n",
    "        temp_wea_data = weafile_handle(file)\n",
    "        for key in temp_wea_data:\n",
    "            wea_data[key] += temp_wea_data[key]\n",
    "\n",
    "    time1 = pd.to_datetime(time_str1.split('.')[0])\n",
    "    time2 = pd.to_datetime(time_str2.split('.')[0])\n",
    "    for i in range(len(wea_data['year'])):\n",
    "        year = wea_data['year'][i]\n",
    "        month = wea_data['month'][i]\n",
    "        day = wea_data['day'][i]\n",
    "        time = pd.to_datetime(str(year) + ' ' + str(month) + ' ' + str(day))\n",
    "        if time >= time1 and time <= time2:\n",
    "            for index, value in enumerate(args_wea):\n",
    "                data[index].append(wea_data[value][i])\n",
    "    data = pd.DataFrame({'year':data[0], 'month':data[1], 'day':data[2], 'hour':data[3], 'minutes':data[4],\n",
    "        'temperature':data[5], 'cloud':data[6], 'sun':data[7], 'uv':data[8], 'water':data[9], 'wet':data[10]})\n",
    "    return data\n",
    "\n",
    "# 读取气象数据\n",
    "localwea_dir = work_dir + '/../Data/气象'\n",
    "localwea_data = get_localweadata(localwea_dir, time_str1, time_str2)\n",
    "localwea_data.head()\n",
    "wea_dir = work_dir + '/../Data/气象（网络）'\n",
    "wea_data = get_weadata(wea_dir, time_str1, time_str2)\n",
    "wea_data.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b8f0d9c6",
   "metadata": {},
   "source": [
    "## 负荷数据读取"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d7bac734",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 负荷数据文件处理\n",
    "def loadfile_handle(file):\n",
    "    data = pd.read_excel(file)\n",
    "    month = pd.to_datetime(file.split('/')[-1].split('.')[0]).month\n",
    "    day = pd.to_datetime(file.split('/')[-1].split('.')[0]).day\n",
    "    # 以15分钟为间隔检查数据是否存在\n",
    "    # 设备数据\n",
    "    load_datas = [[{ x:None for x in args_p } for i in range(96)] for j in range(len(load_mped_ids))]\n",
    "    pv_datas = [[{x:None for x in args_p} for i in range(96)] for j in range(len(pv_mped_ids))]\n",
    "    for i, row in data.iterrows():\n",
    "        p = float(row['item_value'])\n",
    "        time = pd.to_datetime(row['value_time'])\n",
    "        minutes = time.hour * 60 + time.minute\n",
    "        index96 = minutes // 15\n",
    "        for j, value in enumerate(load_mped_ids):\n",
    "            if row['mped_id'] == value and load_datas[j][index96]['p'] == None:\n",
    "                load_datas[j][index96] = {'p':p, 'hour':time.hour, 'minutes':(index96 % 4) * 15,\n",
    "                    'year':2022, 'month':month, 'day':day}\n",
    "        for j, value in enumerate(pv_mped_ids):\n",
    "            if row['mped_id'] == value and pv_datas[j][index96]['p'] == None:\n",
    "                pv_datas[j][index96] = {'p':p, 'hour':time.hour, 'minutes':(index96 % 4) * 15,\n",
    "                    'year':2022, 'month':month, 'day':day}\n",
    "\n",
    "    # 统计数据缺失情况\n",
    "    num = 0\n",
    "    for i, value in enumerate(load_datas):\n",
    "        for j in range(96):\n",
    "            if value[j]['p'] == None:\n",
    "                num += 1\n",
    "                load_datas[i][j] = {'p':None, 'hour':(j + 1) // 4, 'minutes':((j + 1) % 4) * 15,\n",
    "                    'year':2022, 'month':month, 'day':day}\n",
    "    if num != 0:\n",
    "        print(file.split('/')[-1] + '负荷缺失数据个数：' + str(num))\n",
    "    num = 0\n",
    "    for i, value in enumerate(pv_datas):\n",
    "        for j in range(96):\n",
    "            if value[j]['p'] == None:\n",
    "                num += 1\n",
    "                pv_datas[i][j] = {'p':None, 'hour':(i + 1) // 4, 'minutes':((i + 1) % 4) * 15,\n",
    "                    'year':2022, 'month':month, 'day':day}\n",
    "    if num != 0:\n",
    "        print(file.split('/')[-1] + '光伏缺失数据个数：' + str(num))\n",
    "\n",
    "    return load_datas, pv_datas\n",
    "\n",
    "def get_data(data_dir, time_str1, time_str2):\n",
    "    file_list = os.listdir(data_dir)\n",
    "    # file_list.remove('.DS_Store')\n",
    "    file_list = sorted(file_list, key=lambda x: pd.to_datetime(x.split('.')[0]))\n",
    "    load_data = [[[] for j in range(len(args_p))] for i in range(len(load_mped_ids))]\n",
    "    pv_data = [[[] for j in range(len(args_p))] for i in range(len(pv_mped_ids))]\n",
    "    for f in file_list:\n",
    "        time = pd.to_datetime(f.split('.')[0])\n",
    "        time1 = pd.to_datetime(time_str1.split('.')[0])\n",
    "        time2 = pd.to_datetime(time_str2.split('.')[0])\n",
    "        if time >= time1 and time <= time2:\n",
    "            file = data_dir + '/' + f\n",
    "            input_load_datas, input_pv_datas = loadfile_handle(file)\n",
    "            for i, mped_data in enumerate(input_load_datas):\n",
    "                for j, value in enumerate(mped_data):\n",
    "                    load_data[i][0].append(value['year'])\n",
    "                    load_data[i][1].append(value['month'])\n",
    "                    load_data[i][2].append(value['day'])\n",
    "                    load_data[i][3].append(value['hour'])\n",
    "                    load_data[i][4].append(value['minutes'])\n",
    "                    load_data[i][5].append(value['p'])\n",
    "            for i, mped_data in enumerate(input_pv_datas):\n",
    "                for j, value in enumerate(mped_data):\n",
    "                    pv_data[i][0].append(value['year'])\n",
    "                    pv_data[i][1].append(value['month'])\n",
    "                    pv_data[i][2].append(value['day'])\n",
    "                    pv_data[i][3].append(value['hour'])\n",
    "                    pv_data[i][4].append(value['minutes'])\n",
    "                    pv_data[i][5].append(value['p'])\n",
    "    \n",
    "    # 数据清洗\n",
    "    nonenum_afterclean = 0\n",
    "    # 负荷数据清洗\n",
    "    nrows = len(load_data[0][5])\n",
    "    for i in range(nrows):\n",
    "        for j, id in enumerate(load_mped_ids):\n",
    "            # 20220217、20220218缺失数据较多，用前一日数据补齐\n",
    "            if load_data[j][1][i] == 2\\\n",
    "                and (load_data[j][2][i] == 17 or load_data[j][2][i] == 18)\\\n",
    "                and load_data[j][5][i] == None:\n",
    "                load_data[j][5][i] = load_data[j][5][i-96]\n",
    "            # 缺失数据用前后1小时最近的数据进行补全\n",
    "            value_replace = None\n",
    "            for k in range(4):\n",
    "                if i > k and load_data[j][5][i-k] != None:\n",
    "                    value_replace = load_data[j][5][i-k]\n",
    "                    break\n",
    "                elif j < nrows-k and load_data[j][5][i+k] != None:\n",
    "                    value_replace = load_data[j][5][i+k]\n",
    "                    break\n",
    "            if load_data[j][5][i] == None:\n",
    "                load_data[j][5][i] = value_replace\n",
    "            if load_data[j][5][i] == None and value_replace == None:\n",
    "                nonenum_afterclean += 1\n",
    "    # 光伏数据清洗\n",
    "    nrows = len(pv_data[0][5])\n",
    "    for i in range(nrows):\n",
    "        for j, id in enumerate(pv_mped_ids):\n",
    "            # 20220217、20220218缺失数据较多，用前一日数据补齐\n",
    "            if pv_data[j][1][i] == 2\\\n",
    "                and (pv_data[j][2][i] == 17 or pv_data[j][2][i] == 18)\\\n",
    "                and pv_data[j][5][i] == None:\n",
    "                pv_data[j][5][i] = pv_data[j][5][i-96]\n",
    "            # 缺失数据用前后1小时最近的数据进行补全\n",
    "            value_replace = None\n",
    "            for k in range(4):\n",
    "                if i > k and pv_data[j][5][i-k] != None:\n",
    "                    value_replace = pv_data[j][5][i-k]\n",
    "                    break\n",
    "                elif j < nrows-k and pv_data[j][5][i+k] != None:\n",
    "                    value_replace = pv_data[j][5][i+k]\n",
    "                    break\n",
    "            if pv_data[j][5][i] == None:\n",
    "                pv_data[j][5][i] = value_replace\n",
    "            if pv_data[j][5][i] == None and value_replace == None:\n",
    "                nonenum_afterclean += 1\n",
    "    if nonenum_afterclean != 0:\n",
    "        print(f'数据清洗后仍存在空数据{nonenum_afterclean}，请调整算法！' )\n",
    "    else:\n",
    "        print('数据清洗已完成，缺失数据已补齐！')\n",
    "                    \n",
    "    # 返回所有设备总功率\n",
    "    ps = []\n",
    "    for i in range(len(load_data[0][5])):\n",
    "        p = 0\n",
    "        for j in range(len(load_mped_ids)):\n",
    "            p += load_data[j][5][i]\n",
    "        ps.append(p)\n",
    "    load_data = pd.DataFrame({'year':load_data[0][0], 'month':load_data[0][1],\n",
    "        'day':load_data[0][2], 'hour':load_data[0][3], 'minutes':load_data[0][4], 'p':ps})\n",
    "    pv1_data = pd.DataFrame({'year':pv_data[0][0], 'month':pv_data[0][1],\n",
    "        'day':pv_data[0][2], 'hour':pv_data[0][3], 'minutes':pv_data[0][4], 'p':pv_data[0][5]})\n",
    "    pv2_data = pd.DataFrame({'year':pv_data[1][0], 'month':pv_data[1][1],\n",
    "        'day':pv_data[1][2], 'hour':pv_data[1][3], 'minutes':pv_data[1][4], 'p':pv_data[1][5]})\n",
    "    return load_data, pv1_data, pv2_data\n",
    "\n",
    "# 合并两个dataframe，选取有用的列\n",
    "def merge_df(df1, df2, col_names):\n",
    "    data = pd.DataFrame()\n",
    "    for col in col_names:\n",
    "        if col in df1.columns:\n",
    "            data[col] = df1[col]\n",
    "        elif col in df2.columns:\n",
    "            data[col] = df2[col]\n",
    "    return data\n",
    "\n",
    "load_dir = work_dir + '/../Data/负荷'\n",
    "load_data, pv1_data, pv2_data = get_data(load_dir, time_str1, time_str2)\n",
    "date_list = []\n",
    "for i, row in load_data.iterrows():\n",
    "    year = int(row['year'])\n",
    "    month = int(row['month'])\n",
    "    day = int(row['day'])\n",
    "    date_list.append(datetime.datetime(year, month, day).strftime('%Y/%m/%d'))\n",
    "\n",
    "load_data['date'] = pd.DataFrame({'date':date_list})\n",
    "load_data = merge_df(load_data, wea_data, args_train)\n",
    "load_data = load_data.set_index('date')\n",
    "load_data.head()\n",
    "\n",
    "pv1_data['date'] = pd.DataFrame({'date':date_list})\n",
    "pv1_data = merge_df(pv1_data, wea_data, args_train)\n",
    "pv1_data = pv1_data.set_index('date')\n",
    "pv1_data.head()\n",
    "\n",
    "pv2_data['date'] = pd.DataFrame({'date':date_list})\n",
    "pv2_data = merge_df(pv2_data, wea_data, args_train)\n",
    "pv2_data = pv2_data.set_index('date')\n",
    "pv2_data.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e5bbd322",
   "metadata": {},
   "outputs": [],
   "source": [
    "datetime.datetime(2018, 1, 1).strftime('%Y/%m/%d')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1de702f3",
   "metadata": {},
   "source": [
    "## 光伏数据读取"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "35abbb79",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 光伏数据文件处理\n",
    "def pvfile_handle(file):\n",
    "    data = pd.read_excel(file)\n",
    "    month = pd.to_datetime(file.split('/')[-1].split('.')[0]).month\n",
    "    day = pd.to_datetime(file.split('/')[-1].split('.')[0]).day\n",
    "    # 以15分钟为间隔检查数据是否存在\n",
    "    # 设备数据\n",
    "    pv_datas = [[{x:None for x in args_p} for i in range(96)] for j in range(len(pv_mpedk_ids))]\n",
    "    for i, row in data.iterrows():\n",
    "        p = float(row['item_value'])\n",
    "        time = pd.to_datetime(row['value_time'])\n",
    "        minutes = time.hour * 60 + time.minute\n",
    "        index96 = minutes // 15\n",
    "        for j, value in enumerate(pv_mpedk_ids):\n",
    "            if row['mped_id'] == value and pv_datas[j][index96]['p'] == None:\n",
    "                pv_datas[j][index96] = {'p':p, 'hour':time.hour, 'minutes':(index96 % 4) * 15,\n",
    "                    'year':2022, 'month':month, 'day':day}\n",
    "    \n",
    "    # 统计数据缺失情况\n",
    "    num = 0\n",
    "    for i, value in enumerate(pv_datas):\n",
    "        for j in range(96):\n",
    "            if value[j]['p'] == None:\n",
    "                num += 1\n",
    "                pv_datas[i][j] = {'p':None, 'hour':(j + 1) // 4, 'minutes':((j + 1) % 4) * 15,\n",
    "                    'year':2022, 'month':month, 'day':day}\n",
    "    if num != 0:\n",
    "        print(file.split('/')[-1] + '光伏缺失数据个数：' + str(num))\n",
    "        \n",
    "    return pv_datas\n",
    "\n",
    "def get_pvdata(data_dir, time_str1, time_str2):\n",
    "    file_list = os.listdir(data_dir)\n",
    "    # file_list.remove('.DS_Store')\n",
    "    file_list = sorted(file_list, key=lambda x: pd.to_datetime(x.split('.')[0]))\n",
    "    pv_data = [[[] for j in range(len(args_p))] for i in range(len(pv_mpedk_ids))]\n",
    "    for f in file_list:\n",
    "        time = pd.to_datetime(f.split('.')[0])\n",
    "        time1 = pd.to_datetime(time_str1.split('.')[0])\n",
    "        time2 = pd.to_datetime(time_str2.split('.')[0])\n",
    "        if time >= time1 and time <= time2:\n",
    "            file = data_dir + '/' + f\n",
    "            input_pv_datas = pvfile_handle(file)\n",
    "            for i, mped_data in enumerate(input_pv_datas):\n",
    "                for j, value in enumerate(mped_data):\n",
    "                    pv_data[i][0].append(value['year'])\n",
    "                    pv_data[i][1].append(value['month'])\n",
    "                    pv_data[i][2].append(value['day'])\n",
    "                    pv_data[i][3].append(value['hour'])\n",
    "                    pv_data[i][4].append(value['minutes'])\n",
    "                    pv_data[i][5].append(value['p'])\n",
    "    \n",
    "    # 光伏数据清洗\n",
    "    nonenum_afterclean = 0\n",
    "    nrows = len(pv_data[0][5])\n",
    "    for i in range(nrows):\n",
    "        for j, id in enumerate(pv_mpedk_ids):\n",
    "            # 0~4时、20～24时缺失数据用0补全\n",
    "            if (pv_data[j][3][i] <= 4 or pv_data[j][3][i] >= 20) and pv_data[j][5][i] == None:\n",
    "                pv_data[j][5][i] = 0\n",
    "            # 其余缺失数据用前后1小时最近的数据进行补全\n",
    "            value_replace = None\n",
    "            for k in range(4):\n",
    "                if i > k and pv_data[j][5][i-k] != None:\n",
    "                    value_replace = pv_data[j][5][i-k]\n",
    "                    break\n",
    "                elif j < nrows-k and pv_data[j][5][i+k] != None:\n",
    "                    value_replace = pv_data[j][5][i+k]\n",
    "                    break\n",
    "            if pv_data[j][5][i] == None:\n",
    "                pv_data[j][5][i] = value_replace\n",
    "            if pv_data[j][5][i] == None and value_replace == None:\n",
    "                nonenum_afterclean += 1\n",
    "    if nonenum_afterclean != 0:\n",
    "        print(f'数据清洗后仍存在空数据{nonenum_afterclean}，请调整算法！' )\n",
    "    else:\n",
    "        print('数据清洗已完成，缺失数据已补齐！')\n",
    "                    \n",
    "    # 返回设备总功率\n",
    "    p1s = []\n",
    "    p2s = []\n",
    "    for i in range(len(pv_data[0][5])):\n",
    "        p = 0\n",
    "        for j, id in enumerate(pv_mpedk_ids):\n",
    "            if id != 78:\n",
    "                p += pv_data[j][5][i]\n",
    "            else:\n",
    "                p2s.append(pv_data[j][5][i])\n",
    "        p1s.append(p)\n",
    "    pv1_data = pd.DataFrame({'year':pv_data[0][0], 'month':pv_data[0][1],\n",
    "        'day':pv_data[0][2], 'hour':pv_data[0][3], 'minutes':pv_data[0][4], 'p':p1s})\n",
    "    pv2_data = pd.DataFrame({'year':pv_data[0][0], 'month':pv_data[0][1],\n",
    "        'day':pv_data[0][2], 'hour':pv_data[0][3], 'minutes':pv_data[0][4], 'p':p2s})\n",
    "    return pv1_data, pv2_data\n",
    "\n",
    "pv_dir = work_dir + '/../Data/光伏'\n",
    "pv1_data, pv2_data = get_pvdata(pv_dir, time_str1, time_str2)\n",
    "date_list = []\n",
    "for i, row in pv1_data.iterrows():\n",
    "    year = int(row['year'])\n",
    "    month = int(row['month'])\n",
    "    day = int(row['day'])\n",
    "    date_list.append(datetime.datetime(year, month, day).strftime('%Y/%m/%d'))\n",
    "pv1_data['date'] = pd.DataFrame({'date':date_list})\n",
    "pv1_data = merge_df(pv1_data, wea_data, args_train)\n",
    "pv1_data = pv1_data.set_index('date')\n",
    "pv1_data.head()\n",
    "\n",
    "pv2_data['date'] = pd.DataFrame({'date':date_list})\n",
    "pv2_data = merge_df(pv2_data, wea_data, args_train)\n",
    "pv2_data = pv2_data.set_index('date')\n",
    "pv2_data.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b006ad77",
   "metadata": {},
   "source": [
    "## 公开数据读取"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "700356ad",
   "metadata": {},
   "outputs": [],
   "source": [
    "# # 光伏\n",
    "# pv0_data = pd.read_csv(work_dir + '/../Data/pv_2018.csv', encoding='gbk')\n",
    "# data = np.array(pv0_data['实际功率'])\n",
    "# data = data.reshape(int(len(data)/96), 96)\n",
    "# _ = plt.figure(figsize=(16, 9))\n",
    "# for i in range(data.shape[0]):\n",
    "#     _ = plt.plot(range(96), data[i])\n",
    "# _ = plt.show()\n",
    "# # pv0_data[32:72]\n",
    "# # 数据清洗\n",
    "# for r in range(pv0_data.shape[0]):\n",
    "#     if r > 0 and r < pv0_data.shape[0] - 1\\\n",
    "#         and abs(pv0_data['实际功率'][r]) < pv0_data['实际功率'][r-1]\\\n",
    "#         and abs(pv0_data['实际功率'][r]) < pv0_data['实际功率'][r+1]:\n",
    "#         pv0_data['实际功率'][r] = 0.5 * (pv0_data['实际功率'][r-1] + pv0_data['实际功率'][r+1])\n",
    "#     # 删除了异常数据2月11日, 原始数据保存为pv_2018_origin.csv\n",
    "#     if r < pv0_data.shape[0] - 5 and pv0_data['实际功率'][r] > 0\\\n",
    "#         and pv0_data['实际功率'][r] == pv0_data['实际功率'][r+1]\\\n",
    "#         and pv0_data['实际功率'][r] == pv0_data['实际功率'][r+2]\\\n",
    "#         and pv0_data['实际功率'][r] == pv0_data['实际功率'][r+3]\\\n",
    "#         and pv0_data['实际功率'][r] == pv0_data['实际功率'][r+4]:\n",
    "#         print(r)\n",
    "\n",
    "# data = np.array(pv0_data['实际功率'])\n",
    "# data = data.reshape(int(len(data)/96), 96)\n",
    "# _ = plt.figure(figsize=(16, 9))\n",
    "# for i in range(data.shape[0]):\n",
    "#     _ = plt.plot(range(96), data[i])\n",
    "# _ = plt.show()\n",
    "\n",
    "# # 负荷原始数据预处理\n",
    "# df = pd.read_excel(work_dir + '/../Data/load_2018.xlsx', sheet_name='负荷')\n",
    "# df.head()\n",
    "# load = df[df.columns[1:]].values.reshape(-1,1).flatten()\n",
    "# load\n",
    "# time, season, month, quarter, temperature, humidity, wind, water = [], [], [], [], [], [], [], []\n",
    "# time0 = datetime.datetime(2018, 1, 1)\n",
    "# for r in range(load.shape[0]):\n",
    "#     t = time0 + datetime.timedelta(r // 96)\n",
    "#     time.append(str(t.date()))\n",
    "#     if t.month == 3 or t.month == 4 or t.month == 5:\n",
    "#         season.append(1)\n",
    "#     elif t.month == 6 or t.month == 7 or t.month == 8:\n",
    "#         season.append(2)\n",
    "#     elif t.month == 9 or t.month == 10 or t.month == 11:\n",
    "#         season.append(3)\n",
    "#     else:\n",
    "#         season.append(4)\n",
    "#     quarter.append(r % 96)\n",
    "#     month.append(t.month)\n",
    "# sheet_names = ['温度', '湿度', '风速', '降雨']\n",
    "# for s in sheet_names:\n",
    "#     df = pd.read_excel(work_dir + '/../Data/load_2018.xlsx', sheet_name=s)\n",
    "#     data = df[df.columns[1:]].values.reshape(-1,1).flatten()\n",
    "#     if s == '温度':\n",
    "#         temperature = data\n",
    "#     elif s == '湿度':\n",
    "#         humidity = data\n",
    "#     elif s == '风速':\n",
    "#         wind = data\n",
    "#     else:\n",
    "#         water = data\n",
    "# load_data = pd.DataFrame({'时间':time, '时节':season, '月份':month, '时刻':quarter,\n",
    "#     '温度':temperature, '湿度':humidity, '风速':wind, '降雨':water, '实际功率':load})\n",
    "# load_data.head()\n",
    "# load_data.to_csv(work_dir + '/../Data/load_2018.csv', encoding='gbk')\n",
    "\n",
    "# 负荷\n",
    "load0_data = pd.read_csv(work_dir + '/../Data/load_2018.csv', encoding='gbk')\n",
    "load0_data = load0_data.set_index('时间')\n",
    "load0_data.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "840c4b2b",
   "metadata": {},
   "source": [
    "## K-Means"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c9ea6f78",
   "metadata": {},
   "outputs": [],
   "source": [
    "data = np.array(pv0_data['实际功率'])\n",
    "data = data.reshape(int(len(data)/96), 96)\n",
    "shape(data)\n",
    "km = KMeans(n_clusters=3, random_state=9)\n",
    "km.fit(data)\n",
    "for i, l in enumerate(km.labels_[246:]):\n",
    "    print(i)\n",
    "    _ = plt.plot(range(96), data[i], 'b')\n",
    "    _ = plt.xlim(24, 80)\n",
    "    _ = plt.ylim(0, 20)\n",
    "    _ = plt.show()\n",
    "    # if l == 2:\n",
    "    #     _ = plt.plot(range(96), data[i], 'b')\n",
    "    # elif l == 1:\n",
    "    #     _ = plt.plot(range(96), data[i], 'r')\n",
    "    # elif l == 2:\n",
    "    #     _ = plt.plot(range(96), data[i], 'y')\n",
    "    # elif l == 3:\n",
    "    #     _ = plt.plot(range(96), data[i], 'g')\n",
    "    # else:\n",
    "    #     _ = plt.plot(range(96), data[i], 'k')\n",
    "# _ = plt.xlim(24, 80)\n",
    "# _ = plt.ylim(0, 20)\n",
    "# _ = plt.show()\n",
    "km.labels_[246:]"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "07d2772c",
   "metadata": {},
   "source": [
    "## 数据统计"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0c8fe8f0",
   "metadata": {},
   "outputs": [],
   "source": [
    "# # 绘制三维图\n",
    "# def plot_3d():\n",
    "#     ax = plt.subplot(111, projection='3d')\n",
    "#     x = np.arange(0,24,1)\n",
    "#     y = np.arange(0,365,1)\n",
    "#     x,y = np.meshgrid(x,y)\n",
    "#     z = [[None for i in range(24)] for j in range(365)]\n",
    "#     for i in range(365):\n",
    "#         # x = [i for i in range(24)]\n",
    "#         # y = [d for i in range(24)]\n",
    "#         for j in range(24):\n",
    "#             z[i][j] = data['load'][(i*24)+j]\n",
    "#         # _ = ax.plot_surface(x, y, z, cmap=cm.coolwarm)\n",
    "#     _ = ax.plot_surface(x, y, np.array(z),rstride=1, cstride=1, cmap='rainbow')\n",
    "#     _ = ax.set_xlabel('小时')\n",
    "#     _ = ax.set_ylabel('天')\n",
    "#     _ = ax.set_zlabel('功率')\n",
    "#     _ = plt.show()\n",
    "#     return\n",
    "\n",
    "t = MinMaxScaler(feature_range=(0, 1))\n",
    "\n",
    "# 光伏出力曲线\n",
    "ps = t.fit_transform(pv1_data[['p']])\n",
    "days = int(len(pv1_data) / 96)\n",
    "for i in range(days):\n",
    "    _ = plt.plot(range(96), ps[(96*i):96*(i+1)])\n",
    "_ = plt.show()\n",
    "\n",
    "ps = t.fit_transform(pv2_data[['p']])\n",
    "days = int(len(pv2_data) / 96)\n",
    "for i in range(days):\n",
    "    _ = plt.plot(range(96), ps[(96*i):96*(i+1)])\n",
    "_ = plt.show()\n",
    "\n",
    "# sun1 = t.fit_transform(localwea_data['sun'][:300].values.reshape(-1,1))\n",
    "# sun2 = t.fit_transform(wea_data['sun'][:300].values.reshape(-1,1))\n",
    "# _ = plt.plot(range(300), sun1)\n",
    "# _ = plt.plot(range(300), sun2)\n",
    "# _ = plt.plot(range(300), sun2-sun1)\n",
    "# _ = plt.show()\n",
    "\n",
    "# 负荷曲线\n",
    "ps = t.fit_transform(load_data[['p']])\n",
    "days = int(len(ps) / 96)\n",
    "for i in range(days):\n",
    "    _ = plt.plot(range(96), ps[(96*i):(96*(i+1))])\n",
    "_ = plt.show()\n",
    "_ = plt.plot(range(96), ps[-96:])\n",
    "\n",
    "pv1_data.max()\n",
    "pv2_data.max()\n",
    "load_data.max()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e8e8e526",
   "metadata": {},
   "source": [
    "# 预测模型\n",
    "## RF"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d8409f63",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 随机森林\n",
    "forest = RandomForestRegressor(\n",
    "    n_estimators=10,\n",
    "    random_state=1,\n",
    "    n_jobs=-1,\n",
    "    max_features='auto',\n",
    "    min_samples_leaf=2, min_samples_split=3)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "594ed001",
   "metadata": {},
   "source": [
    "## MLR"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4d8c0b75",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 多元线性回归Multiple Linear Regression\n",
    "linear = LinearRegression()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7950ecf8",
   "metadata": {},
   "source": [
    "## XGBoost"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "42a602f4",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 训练函数\n",
    "def modelfit(alg, x, y, useTrainCV=True, cv_folds=7, early_stopping_rounds=66):\n",
    "    if useTrainCV:\n",
    "        xgb_param = alg.get_xgb_params()\n",
    "        xgtrain = xgb.DMatrix(x.values, label=y.values)\n",
    "        cvresult = xgb.cv(xgb_param, xgtrain, num_boost_round=alg.get_params()['n_estimators'], nfold=cv_folds,\n",
    "             early_stopping_rounds=early_stopping_rounds, verbose_eval=False)\n",
    "        alg.set_params(n_estimators=cvresult.shape[0])\n",
    "    \n",
    "    #建模\n",
    "    alg.fit(x,y,eval_metric='auc')\n",
    "    #对训练集预测\n",
    "    dtrain_predictions = alg.predict(x)\n",
    "    #dtrain_predprob = alg.predict_proba(x)[:,1]\n",
    "    #输出模型的一些结果\n",
    "    print(\"\\n关于现在这个模型\")\n",
    "    print (\"平均绝对误差 : %.2f\" % metrics.mean_absolute_error(y.values, dtrain_predictions))\n",
    "    print (\"平均平方误差 : %.2f\" % metrics.mean_squared_error(y.values, dtrain_predictions))\n",
    "    #print (\"AUC 得分 (训练集): %f\" % metrics.roc_auc_score(y, dtrain_predprob))\n",
    "    xgb.plot_importance(alg, title='feature_importance', xlabel='feature', ylabel='importance', grid=False)\n",
    "    plt.show()\n",
    "model_test = XGBRegressor(base_score=0.3,colsample_bylevel=1,colsample_bytree=0.7,\n",
    "                          gamma=0,learning_rate=0.05,max_depth=6,min_child_weight=2,\n",
    "                          n_estimators=1040,reg_alpha=0.1,reg_lambda=0.05,subsample=0.7)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0c695f45",
   "metadata": {},
   "source": [
    "## ANN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a842a9ab",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 多层感知机Multi-Layer Perceptron是一种特定的人工神经网络ANN\n",
    "bpnn = MLPRegressor(hidden_layer_sizes=(20,),\n",
    "    batch_size=96,\n",
    "    random_state=10, learning_rate_init=0.001)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "be2566dd",
   "metadata": {},
   "source": [
    "## LSTM"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2207c269",
   "metadata": {},
   "outputs": [],
   "source": [
    "# x_train1, y_train1, x_valid\n",
    "def lstm_fit(x_train, y_train):\n",
    "    # return_sequences，True：输出为一个序列，默认为False，输出一个值。\n",
    "    # 输入单个样本特征值的维度\n",
    "    dim = x_train.shape[-1]\n",
    "    # 输入的时间点长度\n",
    "    length = x_train.shape[1]\n",
    "    model = Sequential()\n",
    "    model.add(LSTM(units=384, return_sequences=True,\n",
    "        input_dim=dim, input_length=length))\n",
    "    model.add(LSTM(units=31))\n",
    "    model.add(Dense(1))\n",
    "    model.compile(loss='mean_squared_error', optimizer='adam')\n",
    "    model.fit(x_train, y_train, epochs=2, batch_size=96, verbose=1)\n",
    "    return model"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "dd8ea552",
   "metadata": {},
   "source": [
    "## 灰色关联度"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ac9711a5",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "def GRA_ONE(DataFrame,m=0):\n",
    "    gray= DataFrame\n",
    "    #读取为df格式\n",
    "    gray=(gray - gray.min()) / (gray.max() - gray.min())\n",
    "    #标准化\n",
    "    std=gray.iloc[:,m]#为标准要素\n",
    "    ce=gray.iloc[:,0:]#为比较要素\n",
    "    n=ce.shape[0]\n",
    "    m=ce.shape[1]#计算行列\n",
    "    # print(std,ce,n,m)\n",
    "    #与标准要素比较，相减\n",
    "    a=zeros([m,n])\n",
    "    for i in range(m):\n",
    "        for j in range(n):\n",
    "            a[i,j]=abs(ce.iloc[j,i]-std[j])\n",
    "    # 取出矩阵中最大值与最小值\n",
    "    c=amax(a)\n",
    "    d=amin(a)\n",
    "    # 计算值\n",
    "    result=zeros([m,n])\n",
    "    for i in range(m):\n",
    "        for j in range(n):\n",
    "            result[i,j]=(d+0.5*c)/(a[i,j]+0.5*c)\n",
    "    # 求均值，得到灰色关联值\n",
    "    result2=zeros(m)\n",
    "    for i in range(m):\n",
    "            result2[i]=mean(result[i,:])\n",
    "    RT=pd.DataFrame(result2)\n",
    "    return RT\n",
    "\n",
    "def GRA(DataFrame):\n",
    "    list_columns = [DataFrame.columns[i] for i in range(len(DataFrame.columns))]\n",
    "    df_local = pd.DataFrame(columns=list_columns)\n",
    "    for i in range(len(DataFrame.columns)):\n",
    "        df_local.iloc[:,i] = GRA_ONE(DataFrame,m=i)[0]\n",
    "    return df_local"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0667cedd",
   "metadata": {},
   "source": [
    "## GWO-LSSVM"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2defff34",
   "metadata": {},
   "outputs": [],
   "source": [
    "## GWO优化算法\n",
    "def gwo(x_train, y_train, agents_num, max_iter, dim, lb, ub):\n",
    "    '''\n",
    "    agents_num: 狼群数量\n",
    "    max_iter: 最大迭代次数\n",
    "    dim: 需要优化参数个数\n",
    "    lb： 参数取值下界\n",
    "    ub： 参数取值上界\n",
    "    '''\n",
    "    # 初始化Alpha狼的位置\n",
    "    Alpha_pos=[0,0]\n",
    "    Beta_pos=[0,0]\n",
    "    Delta_pos=[0,0]\n",
    "    # 初始化Alpha狼的目标函数值\n",
    "    Alpha_score = float(\"inf\")\n",
    "    Beta_score = float(\"inf\")\n",
    "    Delta_score = float(\"inf\")\n",
    "    # 初始化首次搜索位置\n",
    "    pos = np.dot(rd.rand(agents_num, dim), (ub - lb)) + lb\n",
    "    iterations = []\n",
    "    accuracy = []\n",
    "    # 循环次数\n",
    "    iter = 0\n",
    "    while iter < max_iter:\n",
    "        # t0 = time()\n",
    "        # 遍历每个狼\n",
    "        for i in range(0, (pos.shape[0])):\n",
    "            # 若搜索位置超过了搜索空间，需要重新回到搜索空间 \n",
    "            for j in range(0, (pos.shape[1])):\n",
    "                Flag4ub = pos[i, j] > ub\n",
    "                Flag4lb = pos[i, j] < lb\n",
    "                # 若狼的位置在最大值和最小值之间，则位置不需要调整，若超出最大值，最回到最大值边界\n",
    "                if Flag4ub:\n",
    "                    pos[i, j] = ub\n",
    "                if Flag4lb:\n",
    "                    pos[i, j] = lb\n",
    "            # LSSVM\n",
    "            sigma, alpha, b = lssvm_fit(x_train, y_train, C=pos[i][0], sigma=pos[i][1])\n",
    "            y_pre = lssvm_predict(sigma, alpha, b, x_train, x_train)\n",
    "            fitness = r2_score(y_train, y_pre) * 100\n",
    "            #如果目标函数值小于Alpha狼的目标函数值\n",
    "            if fitness < Alpha_score:\n",
    "                Alpha_score = fitness # 则将Alpha狼的目标函数值更新为最优目标函数值\n",
    "                Alpha_pos = pos[i] #同时将Alpha狼的位置更新为最优位置\n",
    "            #如果目标函数值介于于Alpha狼和Beta狼的目标函数值之间\n",
    "            if fitness > Alpha_score and fitness < Beta_score:\n",
    "                Beta_score = fitness # 则将Beta狼的目标函数值更新为最优目标函数值\n",
    "                Beta_pos = pos[i]\n",
    "            #如果目标函数值介于于Beta狼和Delta狼的目标函数值之间\n",
    "            if fitness > Alpha_score and fitness > Beta_score and fitness < Delta_score:\n",
    "                Delta_score = fitness # 则将Delta狼的目标函数值更新为最优目标函数值\n",
    "                Delta_pos = pos[i]\n",
    "        a = 2 - iter * (2 / max_iter)\n",
    "        \n",
    "        # 遍历每个狼\n",
    "        for i in range(0, (pos.shape[0])):\n",
    "            #遍历每个维度\n",
    "            for j in range(0, (pos.shape[1])):\n",
    "                #包围猎物，位置更新                \n",
    "                r1 = rd.random(1)#生成0~1之间的随机数\n",
    "                r2 = rd.random(1)\n",
    "                A1 = 2 * a * r1 - a # 计算系数A\n",
    "                C1 = 2 * r2 # 计算系数C\n",
    "                #Alpha狼位置更新\n",
    "                D_alpha = abs(C1 * Alpha_pos[j] - pos[i,j])\n",
    "                X1 = Alpha_pos[j] - A1 * D_alpha\n",
    "                r1 = rd.random(1)\n",
    "                r2 = rd.random(1)\n",
    "                A2 = 2 * a * r1 - a\n",
    "                C2 = 2 * r2\n",
    "                # Beta狼位置更新\n",
    "                D_beta = abs(C2 * Beta_pos[j] - pos[i,j])\n",
    "                X2 = Beta_pos[j] - A2 * D_beta\n",
    "                r1 = rd.random(1)\n",
    "                r2 = rd.random(1)\n",
    "                A3 = 2 * a * r1 - a\n",
    "                C3 = 2 * r2\n",
    "                # Delta狼位置更新\n",
    "                D_delta = abs(C3 * Delta_pos[j] - pos[i,j])\n",
    "                X3 = Delta_pos[j] - A3 * D_delta\n",
    "                # 位置更新\n",
    "                pos[i,j] = (X1 + X2 + X3) / 3\n",
    "        iter += 1\n",
    "        iterations.append(iter)\n",
    "        accuracy.append((100 - Alpha_score) / 100)\n",
    "        # print(f'迭代次数: {iter}, {(time() - t0):.0f}s')\n",
    "    # 绘制迭代曲线\n",
    "    _ = plt.plot(iterations, accuracy)\n",
    "    _ = plt.xlabel('迭代次数', size = 20)\n",
    "    _ = plt.ylabel('精度', size = 20)\n",
    "    _ = plt.title('GWO-LSSVM参数优化')\n",
    "    _ = plt.show()\n",
    "    bestC = Alpha_pos[0]\n",
    "    bestsigma = Alpha_pos[1]\n",
    "    return bestC, bestsigma\n",
    "\n",
    "def lssvm_fit(X, Y, C, sigma):\n",
    "    # C：正则化参数，soft margin，适应误差\n",
    "    # sigma：rbf核函数参数\n",
    "    X = mat(X)\n",
    "    m = shape(X)[0]\n",
    "    alphas = mat(zeros((m, 1)))\n",
    "    b = 0\n",
    "    # 特征数据集合中向量两两核函数值组成的矩阵，[i,j]表示第i个向量与第j个向量的核函数值\n",
    "    K = mat(zeros((m, m)))\n",
    "    for i in range(m):\n",
    "        K_ = mat(zeros((m, 1)))\n",
    "        for j in range(m):\n",
    "            deltaRow = X[j] - X[i,:]\n",
    "            K_[j] = deltaRow * deltaRow.T\n",
    "        # 数据集中每一个数据向量X[j]与输入向量X[i,:]的核函数值组成的矩阵\n",
    "        K[:,i] = exp(K_/(-1 * sigma ** 2))\n",
    "\n",
    "    # 参数设置\n",
    "    unit = mat(ones((m, 1)))\n",
    "    I = eye(m)\n",
    "    zero = mat(zeros((1, 1)))\n",
    "    upmat = hstack((zero, unit.T))\n",
    "    downmat = hstack((unit, K + I/float(C)))\n",
    "    # 方程求解\n",
    "    # lssvm中求解方程的左边矩阵\n",
    "    completemat = vstack((upmat, downmat))\n",
    "    # lssvm中求解方程的右边矩阵\n",
    "    Y = mat(Y).transpose()\n",
    "    rightmat = vstack((zero, Y))    \n",
    "    b_alpha = completemat.I * rightmat\n",
    "    # 导出偏置b和Lagrange乘子序列\n",
    "    b = b_alpha[0, 0]\n",
    "    for i in range(m):\n",
    "        alphas[i, 0] = b_alpha[i+1, 0]\n",
    "    return sigma, alphas, b\n",
    "\n",
    "def lssvm_predict(sigma, alphas, b, X, A):\n",
    "    y_pre = []\n",
    "    X = mat(X)\n",
    "    m = shape(X)[0]\n",
    "    K = mat(zeros((m, 1)))\n",
    "    A = mat(A)\n",
    "    n = shape(A)[0]\n",
    "    for i in range(n):\n",
    "        for j in range(m):\n",
    "            deltaRow = X[j, :] - A[i, :]\n",
    "            K[j] = deltaRow * deltaRow.T\n",
    "        K = exp(K/(-1 * sigma ** 2))\n",
    "        y_pre.append(float(K.T * alphas + b))\n",
    "    return y_pre"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6568a233",
   "metadata": {},
   "source": [
    "## EEDM"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b2c37596",
   "metadata": {},
   "outputs": [],
   "source": [
    "def eedm(data, n):\n",
    "    '''\n",
    "    n: 分解次数\n",
    "    '''\n",
    "    S = np.array(data)\n",
    "    # EEMD计算\n",
    "    eemd = EEMD()\n",
    "    eemd.trials = 50\n",
    "    eemd.noise_seed(12345)\n",
    "    E_IMFs = eemd.eemd(S, max_imf=n)\n",
    "    imfNo = E_IMFs.shape[0]\n",
    "\n",
    "    return E_IMFs"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ebefa682",
   "metadata": {},
   "source": [
    "## 预测评价指标"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a48c3d23",
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_eval_index(y, y_pre):\n",
    "    # 均方根差RMSE、平均绝对误差MAE、决定系数R2、准确率ACC\n",
    "    rmse = metrics.mean_squared_error(y, y_pre)**0.5\n",
    "    mae = metrics.mean_absolute_error(y, y_pre)\n",
    "    r2 = metrics.r2_score(y, y_pre)\n",
    "    acc = 1 - np.mean(np.abs((y - y_pre) / 20))\n",
    "    eval_index = {'rmse':rmse, 'mae':mae, 'r2':r2, 'acc':acc}\n",
    "    return eval_index"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9ab391d0",
   "metadata": {},
   "source": [
    "# 发电预测"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f6ae0c9b",
   "metadata": {},
   "source": [
    "## 特征选取"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c1633dd2",
   "metadata": {},
   "outputs": [],
   "source": [
    "data = pv1_data\n",
    "_ = plt.figure(figsize=(16,9))\n",
    "# mine = MINE(alpha=0.6, c=15, est=\"mic_approx\")\n",
    "for i, time in enumerate(time_index):\n",
    "    # 归一化\n",
    "    # 定性分析\n",
    "    t = MinMaxScaler(feature_range=(0, 1))\n",
    "    a = t.fit_transform(data[data.index==time]['p'].values.reshape(-1,1))\n",
    "    b = t.fit_transform(data[data.index==time][data.columns[i+1]].values.reshape(-1,1))\n",
    "    _ = plt.subplot(3, 3, i+1)\n",
    "    _ = plt.plot(a, label='p', color='b')\n",
    "    _ = plt.plot(b, label=data.columns[i], color='r')\n",
    "    _ = plt.title('p与' + data.columns[i+1])\n",
    "    # 定量分析，计算最大互信息数\n",
    "    # mine.compute_score(a.flatten(), b.flatten())\n",
    "    # print('p与' + str(data.columns[i]) + 'MIC：' + str(mine.mic()))\n",
    "_ = plt.show()\n",
    "\n",
    "# 输入变量时长确定\n",
    "# 分析时序功率和实际功率的关系，相关性系数，决定使用多少天作为预测的时间区间\n",
    "rows = len(data)\n",
    "for i in range(14):\n",
    "    a = data['p'][96*14:]\n",
    "    b = data['p'][96*(14-i):(rows-96*i)]\n",
    "    print('前' + str(i) + '天MIC：' + str(metrics.normalized_mutual_info_score(a,b)) + '\\r')\n",
    "# 结论：选5天作为时序区间\n",
    "\n",
    "t = MinMaxScaler(feature_range=(0, 1))\n",
    "x = pv1_data.drop(['p'], axis=1)\n",
    "y = pv1_data['p']\n",
    "x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.2,random_state=0)\n",
    "\n",
    "modelfit(model_test, x_train, y_train)\n",
    "# 结论：选择温度、实际辐照度、湿度、辐照度差值作为模型的输入因素"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9f234ed6",
   "metadata": {},
   "source": [
    "## 相似日聚类"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "20d3cd9d",
   "metadata": {},
   "outputs": [],
   "source": [
    "def kmeans(data, n, par_name):\n",
    "    '''\n",
    "    data: Dataframe数据类型\n",
    "    n: 聚类个数\n",
    "    par_name: 聚类参数, 例如'sun'日照强度\n",
    "    '''\n",
    "    days = int(len(data)/96)\n",
    "    _data = np.array(data[par_name]).reshape(days, 96)\n",
    "    index = data.index.unique()\n",
    "    # 转为日序列样本\n",
    "    _data = pd.DataFrame(_data, columns=range(96))\n",
    "    # 误方差\n",
    "    sse = None\n",
    "    # 轮廓系数\n",
    "    sc = None\n",
    "    # 固定random_state的值，每次分割得到同样训练集和测试集，值任意\n",
    "    km = KMeans(n_clusters=n, random_state=9)\n",
    "    km.fit(_data)\n",
    "    # 使用inertia属性来获取簇内的误方差(SSE)\n",
    "    sse = km.inertia_\n",
    "    # 平均轮廓系数\n",
    "    sc = silhouette_score(_data, km.labels_, metric=\"euclidean\")\n",
    "    # 在data数据中添加labels列\n",
    "    _data['labels'] = km.labels_\n",
    "    labels = []\n",
    "    for day in range(days):\n",
    "        for i in range(96):\n",
    "            labels.append(km.labels_[day])\n",
    "    data['labels'] = labels\n",
    "    # 绘制不同类出力曲线\n",
    "    _ = plt.figure(figsize=(16,9))\n",
    "    _ = plt.ioff()\n",
    "    for i in range(n):\n",
    "        _ = plt.subplot(round(n/2), 2, i+1)\n",
    "        for index, row in _data.iterrows():\n",
    "            if row['labels'] == i:\n",
    "                _ = plt.plot(range(96), data['p'][96*index:(96*(index+1))])\n",
    "        _ = plt.title(f'type: {i}')\n",
    "    \n",
    "    return sse, sc, data\n",
    "\n",
    "sse, sc, pv1_data = kmeans(pv1_data, 3, 'sun')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e5234ae4",
   "metadata": {},
   "source": [
    "## 训练数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8cccb500",
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_train_data_pv(df, x_par_names, y_par_name):\n",
    "    '''\n",
    "    df: Dataframe类型\n",
    "    x_par_names: list类型, 表中用于作为x输入的列名, 例如日照强度、湿度['sun', 'wet']\n",
    "    y_par_name: list类型, 表中用于作为y输出的列名, 例如有功['p']\n",
    "    '''\n",
    "    \n",
    "    # 时间点长度\n",
    "    time_size = 5 * 96\n",
    "    # 训练数据集与验证数据集比例为4:1\n",
    "    days = int(len(df) // (96 * 5 / 4))\n",
    "    len_train = days * 96\n",
    "    \n",
    "    x_train, y_train, x_valid, y_valid = [], [], [], []\n",
    "\n",
    "    # 训练集\n",
    "    train_data = df[0:(len_train+time_size)]\n",
    "    scaled_train_data_x = np.array(train_data[x_par_names])\n",
    "    scaled_train_data_y = np.array(train_data[y_par_name])\n",
    "    for i in range(time_size, len(train_data)):\n",
    "        x_train.append(scaled_train_data_x[(i-time_size):i])\n",
    "        y_train.append(scaled_train_data_y[i])\n",
    "    x_train, y_train = np.array(x_train), np.array(y_train)\n",
    "\n",
    "    # 验证集\n",
    "    valid_data = df[(len_train-time_size):]\n",
    "    scaled_valid_data_x = np.array(valid_data[x_par_names])\n",
    "    scaled_valid_data_y = np.array(valid_data[y_par_name])\n",
    "    for i in range(time_size, len(valid_data)):\n",
    "        x_valid.append(scaled_valid_data_x[i-time_size:i])\n",
    "        y_valid.append(scaled_valid_data_y[i])\n",
    "    x_valid, y_valid = np.array(x_valid), np.array(y_valid)\n",
    "\n",
    "    print('x_train shape' + str(shape(x_train)))\n",
    "    print('x_valid shape' + str(shape(x_valid)))\n",
    "    return x_train, y_train, x_valid, y_valid"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4a211074",
   "metadata": {},
   "source": [
    "## 训练预测"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a9e73b59",
   "metadata": {},
   "outputs": [],
   "source": [
    "t = MinMaxScaler(feature_range=(0, 1))\n",
    "par_names = ['辐照度', '温度', '湿度', '实际功率']\n",
    "# pv0_data[par_names] = t.fit_transform(pv0_data[par_names])\n",
    "train_df = pd.DataFrame()\n",
    "train_df[par_names] = t.fit_transform(pv0_data[par_names])\n",
    "x_train, y_train, x_valid, y_valid = get_train_data_pv(train_df,\n",
    "    ['辐照度', '温度', '湿度'], ['实际功率'])\n",
    "lstm = lstm_fit(x_train, y_train)\n",
    "y_pre = lstm.predict(x_valid)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "51b0a9d5",
   "metadata": {},
   "outputs": [],
   "source": [
    "_ = t.fit_transform(pv0_data[['实际功率']])\n",
    "y_valid_inversed = t.inverse_transform(y_valid)\n",
    "y_pre_inversed = t.inverse_transform(y_pre)\n",
    "_ = plt.plot(y_valid_inversed[0:96*5], 'b')\n",
    "_ = plt.plot(y_pre_inversed[0:96*5], 'r')\n",
    "_ = plt.show()\n",
    "y_plot = y_pre_inversed[0:96]\n",
    "imfs = eedm(y_plot.flatten(), 5)\n",
    "\n",
    "# plot results\n",
    "c = np.floor(np.sqrt(imfs.shape[0]+1))\n",
    "r = np.ceil((imfs.shape[0]+1)/c)\n",
    "_ = plt.figure(figsize=(16,16))\n",
    "_ = plt.ioff()\n",
    "_ = plt.subplot(int(r), int(c), 1)\n",
    "_ = plt.plot(y_plot, 'r')\n",
    "_ = plt.title('预测总功率')\n",
    "\n",
    "for i in range(imfs.shape[0]):\n",
    "    _ = plt.subplot(r, c, i+2)\n",
    "    _ = plt.plot(imfs[i], 'g')\n",
    "    _ = plt.title('Imf'+str(i+1)+'分量功率')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e21ccf00",
   "metadata": {},
   "source": [
    "## 模型对比"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6b9b41f4",
   "metadata": {},
   "outputs": [],
   "source": [
    "x_train_2D = x_train.reshape(x_train.shape[0],\n",
    "  x_train.shape[1]*x_train.shape[2])\n",
    "x_valid_2D = x_valid.reshape(x_valid.shape[0],\n",
    "  x_valid.shape[1]*x_valid.shape[2])\n",
    "\n",
    "# 随机森林\n",
    "print('RF:')\n",
    "forest.fit(x_train_2D, y_train)\n",
    "forest_y_pre = forest.predict(x_valid_2D)\n",
    "eval_index = get_eval_index(y_valid, forest_y_pre)\n",
    "print(eval_index)\n",
    "\n",
    "# ANN\n",
    "print('ANN:')\n",
    "bpnn.fit(x_train_2D, y_train)\n",
    "bpnn_y_pre = bpnn.predict(x_valid_2D)\n",
    "eval_index = get_eval_index(y_valid, bpnn_y_pre)\n",
    "print(eval_index)\n",
    "\n",
    "# 多元线性回归\n",
    "print('MLR:')\n",
    "linear.fit(x_train_2D, y_train)\n",
    "linear_y_pre = linear.predict(x_valid_2D)\n",
    "eval_index = get_eval_index(y_valid, linear_y_pre)\n",
    "print(eval_index)\n",
    "\n",
    "# 加入辐照度差值优化后的模型，略"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1513f6d9",
   "metadata": {},
   "outputs": [],
   "source": [
    "ts = [10, 16, 1]\n",
    "forest_y_pre_inversed = t.inverse_transform(forest_y_pre.reshape(-1,1))\n",
    "linear_y_pre_inversed = t.inverse_transform(linear_y_pre.reshape(-1,1))\n",
    "bpnn_y_pre_inversed = t.inverse_transform(bpnn_y_pre.reshape(-1,1))\n",
    "# 绘制对比曲线\n",
    "for i in range(3):\n",
    "    # _ = plt.figure(figsize=(16, 8))\n",
    "    _ = plt.plot(y_valid_inversed[(96*ts[i]+24):(96*ts[i]+80)], label='实际功率')\n",
    "    _ = plt.plot(forest_y_pre_inversed[(96*ts[i]+24):(96*ts[i]+80)], label='RF')\n",
    "    _ = plt.plot(linear_y_pre_inversed[(96*ts[i]+24):(96*ts[i]+80)], label='MLR')\n",
    "    _ = plt.plot(bpnn_y_pre_inversed[(96*ts[i]+24):(96*ts[i]+80)], label='ANN')\n",
    "    _ = plt.plot(y_pre_inversed[(96*ts[i]+24):(96*ts[i]+80)], label='LSTM')\n",
    "    _ = plt.legend(fontsize=12)\n",
    "    _ = plt.ylim(0, 20)\n",
    "    _ = plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2a28c9a0",
   "metadata": {},
   "source": [
    "## 区间预测"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7130c82d",
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_interval(arr, dis, conf_level):\n",
    "    '''\n",
    "    arr: array数组\n",
    "    dis: 分布类型, 例如'student', 'normal'\n",
    "    '''\n",
    "    interval = None\n",
    "    mean = arr.mean()\n",
    "    std = arr.std()\n",
    "    if dis == 'student':\n",
    "        interval = stats.t.interval(conf_level, len(arr)-1, mean, std)\n",
    "    elif dis == 'normal':\n",
    "        std = arr.std(ddof=1)\n",
    "        interval = stats.norm.interval(conf_level, mean, std)\n",
    "    return interval\n",
    "\n",
    "# 预测误差\n",
    "pre_error = y_valid - y_pre\n",
    "# 预测区间\n",
    "levels = [0.95, 0.90, 0.85, 0.80, 0.75, 0.70]\n",
    "intervals = []\n",
    "for level in levels:\n",
    "    interval = get_interval(pre_error, 'student', level)\n",
    "    print(f'{level}预测下界为: {interval[0]}, {level}预测上界为:{interval[1]}')\n",
    "    intervals.append(interval)\n",
    "\n",
    "down_data = [[] for i in range(6)]\n",
    "up_data = [[] for i in range(6)]\n",
    "for i in range(96):\n",
    "    down_data[0].append(y_pre[0:96][i][0] + intervals[0][0])\n",
    "    up_data[0].append(y_pre[0:96][i][0] + intervals[0][1])\n",
    "    down_data[1].append(y_pre[0:96][i][0] + intervals[1][0])\n",
    "    up_data[1].append(y_pre[0:96][i][0] + intervals[1][1])\n",
    "    down_data[2].append(y_pre[0:96][i][0] + intervals[2][0])\n",
    "    up_data[2].append(y_pre[0:96][i][0] + intervals[2][1])\n",
    "    down_data[3].append(y_pre[0:96][i][0] + intervals[3][0])\n",
    "    up_data[3].append(y_pre[0:96][i][0] + intervals[3][1])\n",
    "    down_data[4].append(y_pre[0:96][i][0] + intervals[4][0])\n",
    "    up_data[4].append(y_pre[0:96][i][0] + intervals[4][1])\n",
    "    down_data[5].append(y_pre[0:96][i][0] + intervals[5][0])\n",
    "    up_data[5].append(y_pre[0:96][i][0] + intervals[5][1])\n",
    "\n",
    "_ = plt.figure(figsize=(14, 8))\n",
    "_ = plt.plot(range(96), y_valid[0:96], label='实际功率', color='r')\n",
    "_ = plt.plot(range(96), y_pre[0:96], label='预测值', color='black')\n",
    "_ = plt.fill_between(range(96), down_data[0], up_data[0], color='#00ffcc', alpha=0.3, label='95%置信区间')\n",
    "_ = plt.fill_between(range(96), down_data[1], up_data[1], color='#00cccc', alpha=0.3, label='95%置信区间')\n",
    "_ = plt.fill_between(range(96), down_data[2], up_data[2], color='#0099cc', alpha=0.3, label='95%置信区间')\n",
    "_ = plt.fill_between(range(96), down_data[3], up_data[3], color='#0066cc', alpha=0.3, label='95%置信区间')\n",
    "_ = plt.fill_between(range(96), down_data[4], up_data[4], color='#0033cc', alpha=0.3, label='95%置信区间')\n",
    "_ = plt.fill_between(range(96), down_data[5], up_data[5], color='#0000cc', alpha=0.3, label='95%置信区间')\n",
    "_ = plt.legend(fontsize=12)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f53676a8",
   "metadata": {},
   "source": [
    "# 负荷预测"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a0f778e3",
   "metadata": {},
   "source": [
    "## 特征选取"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "702ab1be",
   "metadata": {},
   "outputs": [],
   "source": [
    "data = load0_data\n",
    "_ = plt.figure(figsize=(16, 12))\n",
    "# mine = MINE(alpha=0.6, c=15, est=\"mic_approx\")\n",
    "for i, time in enumerate(time_index[:4]):\n",
    "    # 归一化\n",
    "    # 定性分析\n",
    "    t = MinMaxScaler(feature_range=(0, 1))\n",
    "    a = t.fit_transform(data[data.index==time]['实际功率'].values.reshape(-1,1))\n",
    "    b = t.fit_transform(data[data.index==time][data.columns[i+3]].values.reshape(-1,1))\n",
    "    _ = plt.subplot(2, 2, i+1)\n",
    "    _ = plt.plot(a, label='实际功率', color='b')\n",
    "    _ = plt.plot(b, label=data.columns[i+3], color='r')\n",
    "    _ = plt.title('负荷与' + data.columns[i+3])\n",
    "    # 定量分析，计算最大互信息数\n",
    "    # mine.compute_score(a.flatten(), b.flatten())\n",
    "    # print('p与' + str(data.columns[i]) + 'MIC：' + str(mine.mic()))\n",
    "_ = plt.show()\n",
    "\n",
    "t = MinMaxScaler(feature_range=(0, 1))\n",
    "columns = data.columns\n",
    "index = data.index\n",
    "data = t.fit_transform(data)\n",
    "data = pd.DataFrame(data, columns=columns, index=index)\n",
    "data.head()\n",
    "# 计算任意一日'p'与前2天至4天前3个小时12个时刻的MIC\n",
    "# 前4个时刻MIC大于0.8\n",
    "mine = MINE(alpha=0.6, c=15, est=\"mic_approx\")\n",
    "for i in range(2, 5):\n",
    "    m1 = data[96*20:96*21]['实际功率']\n",
    "    for j in range(4):\n",
    "        m2 = data[(96*(20-i)-j):(96*(21-i)-j)]['实际功率']\n",
    "        mine.compute_score(m1, m2)\n",
    "        print(f'负荷与前{i}天{j}时刻前负荷的MIC：{mine.mic()}')\n",
    "# 计算任意一日各因素的MIC\n",
    "data = data[0:96]\n",
    "for col in data.columns:\n",
    "    mine.compute_score(data['实际功率'][-24:], data[col][-24:])\n",
    "    print(f'负荷与{col}的MIC：{mine.mic()}')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f037a39a",
   "metadata": {},
   "source": [
    "## 训练数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e0fb0f4c",
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_train_data_load(df, x_par_names, y_par_name):\n",
    "    '''\n",
    "    df: Dataframe类型\n",
    "    df_pre: Dataframe类型, 待预测日数据\n",
    "    x_par_names: list类型, 表中用于作为x输入的列名\n",
    "    y_par_name: list类型, 表中用于作为y输出的列名\n",
    "    '''\n",
    "    # 待预测日即为data中最后一天\n",
    "    x_valid = np.array(df[x_par_names][-96:]).flatten()\n",
    "    y_valid = np.array(df[y_par_name][-96:]).flatten()\n",
    "    gra_0 = {'x':x_valid, 'y':y_valid}\n",
    "    days = int(len(df) // 96)\n",
    "    # 根据灰色关联系数生成相似日样本集\n",
    "    lis = []\n",
    "    for i in range(days-2):\n",
    "        x = np.array(df[x_par_names][-(i+2)*96:-(i+1)*96]).flatten()\n",
    "        y = np.array(df[y_par_name][-(i+2)*96:-(i+1)*96]).flatten()\n",
    "        gra_1 = {'x':x, 'y':y}\n",
    "        # 两列数求灰色关联系数\n",
    "        da = pd.DataFrame({'origin':gra_0['x'], 'new':gra_1['x']})\n",
    "        gra = GRA(da).iloc[0,1]\n",
    "        dic = {'gra':gra, 'x':gra_1['x'], 'y':gra_1['y']}\n",
    "        lis.append(dic)\n",
    "    # 根据gra值进行排序，取相似度最高的n天作为训练数据\n",
    "    data = sorted(lis, key=lambda x:x['gra'])[-5:]\n",
    "    \n",
    "    x_train, y_train = [], []\n",
    "    for i in range(len(data)):\n",
    "        for j in range(len(data[i]['x'])):\n",
    "            x_train.append(data[i]['x'][j])\n",
    "        for j in range(len(data[i]['y'])):\n",
    "            y_train.append(data[i]['y'][j])\n",
    "    args_num = len(x_par_names)\n",
    "\n",
    "    x_train = np.array(x_train).reshape(int(len(x_train)/args_num), args_num)\n",
    "    x_valid = np.array(x_valid).reshape(int(len(x_valid)/args_num), args_num)\n",
    "    print(f'x_train shape{shape(x_train)}')\n",
    "    print(f'x_valid shape{shape(x_valid)}')\n",
    "\n",
    "    return x_train, y_train, x_valid, y_valid\n",
    "\n",
    "data = load0_data\n",
    "# data = data.drop({'temperature', 'cloud', 'sun', 'uv', 'water', 'wet'}, axis='columns')\n",
    "data = data.drop({'时节', '月份', '风速', '降雨'}, axis='columns')\n",
    "# 由确定的最优特征集生成训练数据集\n",
    "x_par_names = []\n",
    "for i in range(2, 5):\n",
    "    for j in range(4):\n",
    "        x_par_names.append(f'D-{i}T-{j}')\n",
    "        data[f'D-{i}T-{j}'] = data['实际功率'].shift(96*i-j)\n",
    "data = data[96*5:]\n",
    "# x_par_names = ['temperature', 'cloud', 'sun', 'uv', 'water', 'wet']\n",
    "\n",
    "index = data.index\n",
    "columns = data.columns\n",
    "t = MinMaxScaler(feature_range=(0,1))\n",
    "df = t.fit_transform(data)\n",
    "df = pd.DataFrame(df, index=index, columns=columns)\n",
    "x_train, y_train, x_valid, y_valid = get_train_data_load(df, x_par_names, ['实际功率'])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "84c852a3",
   "metadata": {},
   "source": [
    "## 训练预测"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2e02afe5",
   "metadata": {},
   "outputs": [],
   "source": [
    "# init C = 1, sigma = 1\n",
    "sigma, alphas, b = lssvm_fit(x_train, y_train, 4.1, 2.8)\n",
    "y_pre = lssvm_predict(2.8, alphas, b, x_train, x_valid)\n",
    "# _ = plt.plot(y_valid, 'b')\n",
    "# _ = plt.plot(y_pre, 'r')\n",
    "\n",
    "# # 模型训练\n",
    "# bestC, bestsigma = gwo(x_train, y_train, 5, 20, 2, 0.01, 10)\n",
    "# print(f'Best C: {bestC}, Best sigma: {bestsigma}')\n",
    "# # Best C: 4.106348545547135, Best sigma: 2.7983026002779603\n",
    "# sigma, alphas, b = lssvm_fit(x_train, y_train, bestC, bestsigma)\n",
    "\n",
    "# # 预测\n",
    "# y_pred = lssvm_predict(sigma, alphas, b, x_train, x_valid)\n",
    "# _ = plt.plot(y_valid, 'b')\n",
    "# _ = plt.plot(y_pred, 'r')\n",
    "\n",
    "# # 预测评价指标\n",
    "# eval_index = get_eval_index(y_valid, y_pre)\n",
    "\n",
    "_ = t.fit_transform(load0_data[['实际功率']])\n",
    "y_valid_inversed = t.inverse_transform(y_valid.reshape(-1,1))\n",
    "y_pre_inversed = t.inverse_transform(np.array(y_pre).reshape(-1,1))\n",
    "_ = plt.plot(y_valid_inversed, 'b')\n",
    "_ = plt.plot(y_pre_inversed, 'r')\n",
    "_ = plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8adc452d",
   "metadata": {},
   "source": [
    "## 模型对比"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8d396154",
   "metadata": {},
   "outputs": [],
   "source": [
    "# RF\n",
    "print('RF:')\n",
    "forest.fit(x_train, y_train)\n",
    "forest_y_pre = forest.predict(x_valid)\n",
    "eval_index = get_eval_index(y_valid, forest_y_pre)\n",
    "print(eval_index)\n",
    "\n",
    "# ANN\n",
    "print('ANN:')\n",
    "bpnn.fit(x_train, y_train)\n",
    "bpnn_y_pre = bpnn.predict(x_valid)\n",
    "eval_index = get_eval_index(y_valid, bpnn_y_pre)\n",
    "print(eval_index)\n",
    "# _ = plt.plot(y[0:800], 'b')\n",
    "# _ = plt.plot(y_valid[0:800], 'r')\n",
    "\n",
    "# XGBoost\n",
    "print('XGBoost:')\n",
    "xgboost = XGBRegressor(base_score=0.3, colsample_bylevel=1,\n",
    "    colsample_bytree=0.7, gamma=0, learning_rate=0.05,\n",
    "    max_depth=6, min_child_weight=2, n_estimators=566,\n",
    "    reg_alpha=0.1, reg_lambda=0.05, subsample=0.7)\n",
    "xgboost.fit(x_train, y_train)\n",
    "xgb_resluts = xgboost.predict(x_valid)\n",
    "\n",
    "# MLR\n",
    "print('MLR:')\n",
    "linear.fit(x_train, y_train)\n",
    "linear_y_pre = linear.predict(x_valid)\n",
    "eval_index = get_eval_index(y_valid, linear_y_pre)\n",
    "print(eval_index)\n",
    "\n",
    "# 绘制对比曲线\n",
    "# _ = plt.figure(figsize=(16, 8))\n",
    "_ = plt.plot(y_valid[0:96], label='实际功率')\n",
    "_ = plt.plot(forest_y_pre[0:96], label='随机森林')\n",
    "_ = plt.plot(linear_y_pre[0:96], label='线性回归')\n",
    "_ = plt.plot(bpnn_y_pre[0:96], label='BPNN')\n",
    "_ = plt.plot(y_pre[0:96], label='LSSVM')\n",
    "_ = plt.legend(fontsize=12)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.7"
  },
  "vscode": {
   "interpreter": {
    "hash": "e1582c2b6d08d091242bbba91b4102c1fdf89e601d9e3a7a12a6df745e337292"
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
