ICEEMDAN-Solar_power-forecast/iceemdan-筛选-high-ConvBiGruA...

1198 lines
274 KiB
Plaintext
Raw Normal View History

{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"C:\\Users\\asus\\AppData\\Roaming\\Python\\Python39\\site-packages\\pandas\\core\\computation\\expressions.py:21: UserWarning: Pandas requires version '2.8.4' or newer of 'numexpr' (version '2.8.3' currently installed).\n",
" from pandas.core.computation.check import NUMEXPR_INSTALLED\n",
"C:\\Users\\asus\\AppData\\Roaming\\Python\\Python39\\site-packages\\pandas\\core\\arrays\\masked.py:60: UserWarning: Pandas requires version '1.3.6' or newer of 'bottleneck' (version '1.3.5' currently installed).\n",
" from pandas.core import (\n"
]
}
],
"source": [
"from math import sqrt\n",
"from numpy import concatenate\n",
"from matplotlib import pyplot\n",
"import pandas as pd\n",
"import numpy as np\n",
"from sklearn.preprocessing import MinMaxScaler\n",
"from sklearn.preprocessing import LabelEncoder\n",
"from sklearn.metrics import mean_squared_error\n",
"from tensorflow.keras import Sequential\n",
"\n",
"from tensorflow.keras.layers import Dense\n",
"from tensorflow.keras.layers import LSTM\n",
"from tensorflow.keras.layers import Dropout\n",
"from sklearn.model_selection import train_test_split\n",
"import matplotlib.pyplot as plt"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"这段代码是一个函数 time_series_to_supervised它用于将时间序列数据转换为监督学习问题的数据集。下面是该函数的各个部分的含义\n",
"\n",
"data: 输入的时间序列数据可以是列表或2D NumPy数组。\n",
"n_in: 作为输入的滞后观察数即用多少个时间步的观察值作为输入。默认值为96表示使用前96个时间步的观察值作为输入。\n",
"n_out: 作为输出的观测数量即预测多少个时间步的观察值。默认值为10表示预测未来10个时间步的观察值。\n",
"dropnan: 布尔值表示是否删除具有NaN值的行。默认为True即删除具有NaN值的行。\n",
"函数首先检查输入数据的维度并初始化一些变量。然后它创建一个新的DataFrame对象 df 来存储输入数据,并保存原始的列名。接着,它创建了两个空列表 cols 和 names用于存储新的特征列和列名。\n",
"\n",
"接下来,函数开始构建特征列和对应的列名。首先,它将原始的观察序列添加到 cols 列表中,并将其列名添加到 names 列表中。然后,它依次将滞后的观察序列添加到 cols 列表中,并构建相应的列名,格式为 (原始列名)(t-滞后时间)。这样就创建了输入特征的部分。\n",
"\n",
"接着,函数开始构建输出特征的部分。它依次将未来的观察序列添加到 cols 列表中,并构建相应的列名,格式为 (原始列名)(t+未来时间)。\n",
"\n",
"最后函数将所有的特征列拼接在一起构成一个新的DataFrame对象 agg。如果 dropnan 参数为True则删除具有NaN值的行。最后函数返回处理后的数据集 agg。"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"def time_series_to_supervised(data, n_in=96, n_out=10,dropnan=True):\n",
" \"\"\"\n",
" :param data:作为列表或2D NumPy数组的观察序列。需要。\n",
" :param n_in:作为输入的滞后观察数X。值可以在[1..len数据]之间可选。默认为1。\n",
" :param n_out:作为输出的观测数量y。值可以在[0..len数据]之间。可选的。默认为1。\n",
" :param dropnan:Boolean是否删除具有NaN值的行。可选的。默认为True。\n",
" :return:\n",
" \"\"\"\n",
" n_vars = 1 if type(data) is list else data.shape[1]\n",
" df = pd.DataFrame(data)\n",
" origNames = df.columns\n",
" cols, names = list(), list()\n",
" cols.append(df.shift(0))\n",
" names += [('%s' % origNames[j]) for j in range(n_vars)]\n",
" n_in = max(0, n_in)\n",
" for i in range(n_in, 0, -1):\n",
" time = '(t-%d)' % i\n",
" cols.append(df.shift(i))\n",
" names += [('%s%s' % (origNames[j], time)) for j in range(n_vars)]\n",
" n_out = max(n_out, 0)\n",
" for i in range(1, n_out+1):\n",
" time = '(t+%d)' % i\n",
" cols.append(df.shift(-i))\n",
" names += [('%s%s' % (origNames[j], time)) for j in range(n_vars)]\n",
" agg = pd.concat(cols, axis=1)\n",
" agg.columns = names\n",
" if dropnan:\n",
" agg.dropna(inplace=True)\n",
" return agg"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
" Temp Humidity GHI DHI Rainfall Power\n",
"0 19.779453 40.025826 3.232706 1.690531 0.0 0.0\n",
"1 19.714937 39.605961 3.194991 1.576346 0.0 0.0\n",
"2 19.549330 39.608631 3.070866 1.576157 0.0 0.0\n",
"3 19.405870 39.680702 3.038623 1.482489 0.0 0.0\n",
"4 19.387363 39.319881 2.656474 1.134153 0.0 0.0\n",
"(104256, 6)\n"
]
}
],
"source": [
"# 加载数据\n",
"path1 = r\"D:\\project\\小论文1-基于ICEEMDAN分解的时序高维变化的短期光伏功率预测模型\\CEEMAN-PosConv1dbiLSTM-LSTM\\模型代码流程\\data6.csv\"#数据所在路径\n",
"#我的数据是excel表若是csv文件用pandas的read_csv()函数替换即可。\n",
"datas1 = pd.DataFrame(pd.read_csv(path1))\n",
"#我只取了data表里的第3、23、16、17、18、19、20、21、27列如果取全部列的话这一行可以去掉\n",
"# data1 = datas1.iloc[:,np.r_[3,23,16:22,27]]\n",
"data1=datas1.interpolate()\n",
"values1 = data1.values\n",
"print(data1.head())\n",
"print(data1.shape)"
]
},
{
"cell_type": "code",
"execution_count": 25,
"metadata": {},
"outputs": [],
"source": [
"# data2= data1.drop(['date','Air_P','RH'], axis = 1)"
]
},
{
"cell_type": "code",
"execution_count": 22,
"metadata": {},
"outputs": [],
"source": [
"# # 获取重构的原始数据\n",
"# # 获取重构的原始数据\n",
"# # 获取重构的原始数据\n",
"high_re= r\"D:\\project\\小论文1-基于ICEEMDAN分解的时序高维变化的短期光伏功率预测模型\\CEEMAN-PosConv1dbiLSTM-LSTM\\模型代码流程\\完整的模型代码流程\\high_re.csv\"#数据所在路径\n",
"# #我的数据是excel表若是csv文件用pandas的read_csv()函数替换即可。\n",
"high_re = pd.DataFrame(pd.read_csv(high_re))"
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
" column_name\n",
"0 -1.426824\n",
"1 -1.426819\n",
"2 -1.426815\n",
"3 -1.426812\n",
"4 -1.426810\n",
"... ...\n",
"104251 -1.629381\n",
"104252 -1.629328\n",
"104253 -1.629271\n",
"104254 -1.629213\n",
"104255 -1.629152\n",
"\n",
"[104256 rows x 1 columns]\n"
]
}
],
"source": [
"reconstructed_data_high= high_re\n",
"# # 打印重构的原始数据\n",
"print(reconstructed_data_high)"
]
},
{
"cell_type": "code",
"execution_count": 25,
"metadata": {},
"outputs": [
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAA0wAAAIjCAYAAAAwSJuMAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/TGe4hAAAACXBIWXMAAA9hAAAPYQGoP6dpAACNU0lEQVR4nO3dd5hTVf4G8DfTe2M6DEPvSBUEpClKE8QCFlTAisIidlEXdBXBgru4rrp27K4KqKAUpYqASAcB6R2GNr1Pzu+P87vJhGnp597k/TzPPDdkMpnvXHKT+552TUIIASIiIiIiIqoiQHUBREREREREesXAREREREREVAMGJiIiIiIiohowMBEREREREdWAgYmIiIiIiKgGDExEREREREQ1YGAiIiIiIiKqAQMTERERERFRDRiYiIiIiIiIasDARER2MZlMePbZZ1WXUa1GjRrhmmuuUV2GX+vXrx/69evn1M+OHTsWjRo1cms9F/voo49gMplw6NAhj/4eR+j5mCLXrVixAiaTCStWrHD5ufT4+q2sX79+aNeuXZ2PO3ToEEwmEz766COHf4f2s6+++qoTFRK5hoGJyE779+/HfffdhyZNmiAsLAwxMTHo1asXZs+ejaKiItXlkRsVFhbi2WefdcuJjt4tWLAAgwYNQr169RAWFoYWLVrg0Ucfxblz51SXRn7gxRdfxPz58/2+Bk/6/PPP8a9//Ut1GUSGFqS6ACIjWLhwIUaOHInQ0FDccccdaNeuHUpLS/Hrr7/isccew86dO/HOO++oLtOjioqKEBTkH28ZhYWFeO655wDA6V4TI3j00Ucxa9YsdOjQAU888QQSEhKwadMmvPHGG/jyyy/xyy+/oGXLlnY915IlS5yu491334XZbHb658m4XnzxRdx4440YMWKEX9fgSZ9//jl27NiByZMnqy4FmZmZKCoqQnBwsOpSiBziH2c/RC44ePAgbr75ZmRmZmLZsmVIS0uzfG/ChAnYt28fFi5cqLBCzzGbzSgtLUVYWBjCwsJUl0Nu9MUXX2DWrFm46aab8NlnnyEwMNDyvbFjx6J///4YOXIkNm3aVGtQLiwsREREBEJCQpyuhSdPjikuLkZISAgCAvxrkEhBQQEiIyNVl0EuMJlM/CwhQ/Kvd1siJ7z88svIz8/H+++/bxOWNM2aNcODDz5o+Xd5eTmef/55NG3aFKGhoWjUqBGeeuoplJSU2PycNu9mxYoV6Nq1K8LDw9G+fXvLMLC5c+eiffv2CAsLQ5cuXbB582abnx87diyioqJw4MABDBw4EJGRkUhPT8c//vEPCCFsHvvqq6+iZ8+eqFevHsLDw9GlSxd88803Vf4Wk8mEiRMn4rPPPkPbtm0RGhqKRYsWWb5Xeb5FXl4eJk+ejEaNGiE0NBTJycm46qqrsGnTJpvn/Prrr9GlSxeEh4cjMTERt912G44fP17t33L8+HGMGDECUVFRSEpKwqOPPoqKiooa/meqWrJkCTp27IiwsDC0adMGc+fOrfKY7OxsTJ48GRkZGQgNDUWzZs3w0ksvWXo4Dh06hKSkJADAc889B5PJZPnbv//+e5hMJmzbts3yfN9++y1MJhOuv/56m9/TunVr3HTTTTb3ffrpp5Z9kZCQgJtvvhlHjx6tUuP69esxaNAgxMbGIiIiAn379sWaNWtsHvPss8/CZDJh3759GDt2LOLi4hAbG4tx48ahsLCwzn313HPPIT4+Hu+8845NWAKAbt264YknnsD27dttXifaPIWNGzeiT58+iIiIwFNPPWX53sW9cYcPH8bw4cMRGRmJ5ORkPPTQQ1i8eHGVeR0Xz2GqPFfhnXfesRxLl156KTZs2GDzO7Zt24axY8dahsqmpqbizjvvdHpIob3P58j+LykpwUMPPYSkpCRER0dj+PDhOHbsmF31aPNgvvzySzzzzDOoX78+IiIikJubC8C+1woAHD9+HHfddRfS09MRGhqKxo0b4/7770dpaanlMQcOHMDIkSORkJCAiIgIXHbZZVUag7R6/ve//2H69Olo0KABwsLCcOWVV2Lfvn02j927dy9uuOEGpKamIiwsDA0aNMDNN9+MnJwcAPI9paCgAHPmzLEcZ2PHjrXZv3/++SduvfVWxMfH4/LLLwdQ83y56ubCmc1mzJ492/JempSUhEGDBuGPP/6oswZtv915551ISUlBaGgo2rZtiw8++KDK7z527BhGjBhh81q/+D3fXjt37sQVV1yB8PBwNGjQAC+88EK1PbDfffcdhg4davk/bdq0KZ5//nmb98x+/fph4cKFOHz4sOXv0/ZRaWkppk6dii5duiA2NhaRkZHo3bs3li9f7lTdAPDnn3+if//+iIiIQP369fHyyy/bfL+mOUxff/012rRpg7CwMLRr1w7z5s2rdW5jXe8LRO7GHiaiOvzwww9o0qQJevbsadfj7777bsyZMwc33ngjHnnkEaxfvx4zZszArl27MG/ePJvH7tu3D7feeivuu+8+3HbbbXj11VcxbNgwvP3223jqqafwwAMPAABmzJiBUaNGYc+ePTatyhUVFRg0aBAuu+wyvPzyy1i0aBGmTZuG8vJy/OMf/7A8bvbs2Rg+fDhGjx6N0tJSfPnllxg5ciQWLFiAoUOH2tS0bNky/O9//8PEiRORmJhY4wfW+PHj8c0332DixIlo06YNzp07h19//RW7du1C586dAciJyuPGjcOll16KGTNm4PTp05g9ezbWrFmDzZs3Iy4uzuZvGThwILp3745XX30VP//8M2bNmoWmTZvi/vvvr3O/7927FzfddBPGjx+PMWPG4MMPP8TIkSOxaNEiXHXVVQBkb0jfvn1x/Phx3HfffWjYsCF+++03TJkyBSdPnsS//vUvJCUl4a233sL999+P6667zhKELrnkEjRo0AAmkwmrVq3CJZdcAgBYvXo1AgIC8Ouvv1pqOXPmDHbv3o2JEyda7ps+fTr+/ve/Y9SoUbj77rtx5swZ/Pvf/0afPn1s9sWyZcswePBgdOnSBdOmTUNAQAA+/PBDXHHFFVi9ejW6detm83ePGjUKjRs3xowZM7Bp0ya89957SE5OxksvvVTrvtqzZw/Gjh2LmJiYah9zxx13YNq0aViwYAFuvvlmy/3nzp3D4MGDcfPNN+O2225DSkpKtT9fUFCAK664AidPnsSDDz6I1NRUfP755w6djH3++efIy8vDfffdB5PJhJdffhnXX389Dhw4YOmVWrp0KQ4cOIBx48YhNTXVMjx2586dWLduHUwmk92/z5nns2f/33333fj0009x6623omfPnli2bFmV464uzz//PEJCQvDoo4+ipKQEISEhdr9WTpw4gW7duiE7Oxv33nsvWrVqhePHj+Obb75BYWEhQkJCcPr0afTs2ROFhYWYNGkS6tWrhzlz5mD48OH45ptvcN1119nUM3PmTAQEBODRRx9FTk4OXn75ZYwePRrr168HIE/GBw4ciJKSEvztb39Damoqjh8/jgULFiA7OxuxsbH45JNPcPfdd6Nbt2649957AQBNmza1+T0jR45E8+bN8eKLL1ZpCLLHXXfdhY8++giDBw/G3XffjfLycqxevRrr1q1D165da63h9OnTuOyyyywNSUlJSfjpp59w1113ITc31zLEraioCFdeeSWOHDmCSZMmIT09HZ988gmWLVvmcL2nTp1C//79UV5ejieffBKRkZF45513EB4eXuWxH330EaKiovDwww8jKioKy5Ytw9SpU5Gbm4tXXnkFAPD0008jJycHx44dwz//+U8AQFRUFAAgNzcX7733Hm655Rbcc889yMvLw/vvv4+BAwfi999/R8eOHR2q/cKFCxg0aBCuv/56jBo1Ct988w2eeOIJtG/fHoMHD67x5xYuXIibbroJ7du3x4wZM3DhwgXcddddqF+/frWPt+d9gcjtBBHVKCcnRwAQ1157rV2P37JliwAg7r77bpv7H330UQFALFu2zHJfZmamACB+++03y32LFy8WAER4eLg4fPiw5f7//ve/AoBYvny55b4xY8YIAOJvf/ub5T6z2SyGDh0qQkJCxJk
"text/plain": [
"<Figure size 1000x600 with 1 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"import matplotlib.pyplot as plt\n",
"\n",
"# # 假设你已经有了原始数据和重构数据\n",
"# # 原始数据\n",
"original_data = data1['Power'].values\n",
"\n",
"# # 创建时间序列(假设时间序列与数据对应)\n",
"time = range(len(original_data))\n",
"\n",
"# # 创建画布和子图\n",
"plt.figure(figsize=(10, 6))\n",
"\n",
"# # 绘制原始数据\n",
"# plt.plot(time, original_data, label='Original Data', color='blue')\n",
"\n",
"# # 绘制重构数据\n",
"plt.plot(reconstructed_data_high[200:1000], label='Reconstructed Data', color='red')\n",
"\n",
"# # 添加标题和标签\n",
"plt.title('Comparison between Original and reconstructed_data_high')\n",
"plt.xlabel('Time')\n",
"plt.ylabel('Power')\n",
"plt.legend()\n",
"\n",
"# # 显示图形\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": 26,
"metadata": {},
"outputs": [],
"source": [
"data3=data1.iloc[:,:5]"
]
},
{
"cell_type": "code",
"execution_count": 27,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
" Temp Humidity GHI DHI Rainfall column_name\n",
"0 19.779453 40.025826 3.232706 1.690531 0.0 -1.426824\n",
"1 19.714937 39.605961 3.194991 1.576346 0.0 -1.426819\n",
"2 19.549330 39.608631 3.070866 1.576157 0.0 -1.426815\n",
"3 19.405870 39.680702 3.038623 1.482489 0.0 -1.426812\n",
"4 19.387363 39.319881 2.656474 1.134153 0.0 -1.426810\n",
"... ... ... ... ... ... ...\n",
"104251 13.303740 34.212711 1.210789 0.787026 0.0 -1.629381\n",
"104252 13.120920 34.394939 2.142980 1.582670 0.0 -1.629328\n",
"104253 12.879215 35.167400 1.926214 1.545889 0.0 -1.629271\n",
"104254 12.915867 35.359989 1.317695 0.851529 0.0 -1.629213\n",
"104255 13.134816 34.500034 1.043269 0.597816 0.0 -1.629152\n",
"\n",
"[104256 rows x 6 columns]\n"
]
}
],
"source": [
"import pandas as pd\n",
"\n",
"# # 创建data3和imf1_array对应的DataFrame\n",
"data3_df = pd.DataFrame(data3)\n",
"imf1_df = pd.DataFrame(reconstructed_data_high)\n",
"\n",
"# # 合并data3_df和imf1_df\n",
"merged_df = pd.concat([data3_df, imf1_df], axis=1)\n",
"\n",
"merged_df = merged_df.iloc[:104256]\n",
"\n",
"# # 打印合并后的表\n",
"print(merged_df)"
]
},
{
"cell_type": "code",
"execution_count": 28,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(104256, 6)"
]
},
"execution_count": 28,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"merged_df.shape"
]
},
{
"cell_type": "code",
"execution_count": 29,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"(104256, 6)\n"
]
}
],
"source": [
"# 使用MinMaxScaler进行归一化\n",
"scaler = MinMaxScaler(feature_range=(0, 1))\n",
"scaledData1 = scaler.fit_transform(merged_df)\n",
"print(scaledData1.shape)"
]
},
{
"cell_type": "code",
"execution_count": 30,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
" 0 1 2 3 4 5 0(t-96) \\\n",
"96 0.555631 0.349673 0.190042 0.040558 0.0 0.245160 0.490360 \n",
"97 0.564819 0.315350 0.211335 0.044613 0.0 0.264683 0.489088 \n",
"98 0.576854 0.288321 0.229657 0.047549 0.0 0.283988 0.485824 \n",
"99 0.581973 0.268243 0.247775 0.053347 0.0 0.303131 0.482997 \n",
"100 0.586026 0.264586 0.266058 0.057351 0.0 0.322308 0.482632 \n",
"\n",
" 1(t-96) 2(t-96) 3(t-96) ... 2(t-1) 3(t-1) 4(t-1) 5(t-1) \\\n",
"96 0.369105 0.002088 0.002013 ... 0.166009 0.036794 0.0 0.225396 \n",
"97 0.364859 0.002061 0.001839 ... 0.190042 0.040558 0.0 0.245160 \n",
"98 0.364886 0.001973 0.001839 ... 0.211335 0.044613 0.0 0.264683 \n",
"99 0.365615 0.001950 0.001697 ... 0.229657 0.047549 0.0 0.283988 \n",
"100 0.361965 0.001679 0.001167 ... 0.247775 0.053347 0.0 0.303131 \n",
"\n",
" 0(t+1) 1(t+1) 2(t+1) 3(t+1) 4(t+1) 5(t+1) \n",
"96 0.564819 0.315350 0.211335 0.044613 0.0 0.264683 \n",
"97 0.576854 0.288321 0.229657 0.047549 0.0 0.283988 \n",
"98 0.581973 0.268243 0.247775 0.053347 0.0 0.303131 \n",
"99 0.586026 0.264586 0.266058 0.057351 0.0 0.322308 \n",
"100 0.590772 0.258790 0.282900 0.060958 0.0 0.340588 \n",
"\n",
"[5 rows x 588 columns]\n"
]
}
],
"source": [
"n_steps_in =96 #历史时间长度\n",
"n_steps_out=1#预测时间长度\n",
"processedData1 = time_series_to_supervised(scaledData1,n_steps_in,n_steps_out)\n",
"print(processedData1.head())"
]
},
{
"cell_type": "code",
"execution_count": 31,
"metadata": {},
"outputs": [],
"source": [
"\n",
"# processedData1.to_csv('processedData1.csv', index=False)"
]
},
{
"cell_type": "code",
"execution_count": 32,
"metadata": {},
"outputs": [],
"source": [
"data_x = processedData1.loc[:,'0(t-96)':'5(t-1)']\n",
"data_y = processedData1.loc[:,'5']"
]
},
{
"cell_type": "code",
"execution_count": 33,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(104159, 576)"
]
},
"execution_count": 33,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"data_x.shape"
]
},
{
"cell_type": "code",
"execution_count": 34,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"96 0.245160\n",
"97 0.264683\n",
"98 0.283988\n",
"99 0.303131\n",
"100 0.322308\n",
" ... \n",
"104250 0.000090\n",
"104251 0.000099\n",
"104252 0.000109\n",
"104253 0.000118\n",
"104254 0.000128\n",
"Name: 5, Length: 104159, dtype: float64"
]
},
"execution_count": 34,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"data_y"
]
},
{
"cell_type": "code",
"execution_count": 35,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(104159,)"
]
},
"execution_count": 35,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"data_y.shape"
]
},
{
"cell_type": "code",
"execution_count": 36,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"(83328, 96, 6) (83328,) (20831, 96, 6) (20831,)\n"
]
}
],
"source": [
"# 7.划分训练集和测试集\n",
"\n",
"test_size = int(len(data_x) * 0.2)\n",
"# 计算训练集和测试集的索引范围\n",
"train_indices = range(len(data_x) - test_size)\n",
"test_indices = range(len(data_x) - test_size, len(data_x))\n",
"\n",
"# 根据索引范围划分数据集\n",
"train_X1 = data_x.iloc[train_indices].values.reshape((-1, n_steps_in, scaledData1.shape[1]))\n",
"test_X1 = data_x.iloc[test_indices].values.reshape((-1, n_steps_in, scaledData1.shape[1]))\n",
"train_y = data_y.iloc[train_indices].values\n",
"test_y = data_y.iloc[test_indices].values\n",
"\n",
"\n",
"# # 多次运行代码时希望得到相同的数据分割,可以设置 random_state 参数为一个固定的整数值\n",
"# train_X1,test_X1, train_y, test_y = train_test_split(data_x.values, data_y.values, test_size=0.2, random_state=343)\n",
"# reshape input to be 3D [samples, timesteps, features]\n",
"train_X = train_X1.reshape((train_X1.shape[0], n_steps_in, scaledData1.shape[1]))\n",
"test_X = test_X1.reshape((test_X1.shape[0], n_steps_in,scaledData1.shape[1]))\n",
"print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)\n",
"# 使用train_test_split函数划分训练集和测试集测试集的比重是40%。\n",
"# 然后将train_X1、test_X1进行一个升维变成三维维数分别是[samples,timesteps,features]。\n",
"# 打印一下他们的shape\\\n"
]
},
{
"cell_type": "code",
"execution_count": 37,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(83328, 96, 6)"
]
},
"execution_count": 37,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"train_X1.shape"
]
},
{
"cell_type": "code",
"execution_count": 38,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"font-weight: bold\">Model: \"functional_1\"</span>\n",
"</pre>\n"
],
"text/plain": [
"\u001b[1mModel: \"functional_1\"\u001b[0m\n"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\">┏━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┓\n",
"┃<span style=\"font-weight: bold\"> Layer (type) </span>┃<span style=\"font-weight: bold\"> Output Shape </span>┃<span style=\"font-weight: bold\"> Param # </span>┃<span style=\"font-weight: bold\"> Connected to </span>┃\n",
"┡━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━┩\n",
"│ input_layer_1 │ (<span style=\"color: #00d7ff; text-decoration-color: #00d7ff\">None</span>, <span style=\"color: #00af00; text-decoration-color: #00af00\">96</span>, <span style=\"color: #00af00; text-decoration-color: #00af00\">6</span>) │ <span style=\"color: #00af00; text-decoration-color: #00af00\">0</span> │ - │\n",
"│ (<span style=\"color: #0087ff; text-decoration-color: #0087ff\">InputLayer</span>) │ │ │ │\n",
"├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
"│ conv1d_1 (<span style=\"color: #0087ff; text-decoration-color: #0087ff\">Conv1D</span>) │ (<span style=\"color: #00d7ff; text-decoration-color: #00d7ff\">None</span>, <span style=\"color: #00af00; text-decoration-color: #00af00\">95</span>, <span style=\"color: #00af00; text-decoration-color: #00af00\">64</span>) │ <span style=\"color: #00af00; text-decoration-color: #00af00\">832</span> │ input_layer_1[<span style=\"color: #00af00; text-decoration-color: #00af00\">0</span>]… │\n",
"├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
"│ max_pooling1d_1 │ (<span style=\"color: #00d7ff; text-decoration-color: #00d7ff\">None</span>, <span style=\"color: #00af00; text-decoration-color: #00af00\">95</span>, <span style=\"color: #00af00; text-decoration-color: #00af00\">64</span>) │ <span style=\"color: #00af00; text-decoration-color: #00af00\">0</span> │ conv1d_1[<span style=\"color: #00af00; text-decoration-color: #00af00\">0</span>][<span style=\"color: #00af00; text-decoration-color: #00af00\">0</span>] │\n",
"│ (<span style=\"color: #0087ff; text-decoration-color: #0087ff\">MaxPooling1D</span>) │ │ │ │\n",
"├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
"│ bidirectional_1 │ (<span style=\"color: #00d7ff; text-decoration-color: #00d7ff\">None</span>, <span style=\"color: #00af00; text-decoration-color: #00af00\">95</span>, <span style=\"color: #00af00; text-decoration-color: #00af00\">128</span>) │ <span style=\"color: #00af00; text-decoration-color: #00af00\">49,920</span> │ max_pooling1d_1[<span style=\"color: #00af00; text-decoration-color: #00af00\">…</span> │\n",
"│ (<span style=\"color: #0087ff; text-decoration-color: #0087ff\">Bidirectional</span>) │ │ │ │\n",
"├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
"│ self_attention_1 │ [(<span style=\"color: #00d7ff; text-decoration-color: #00d7ff\">None</span>, <span style=\"color: #00d7ff; text-decoration-color: #00d7ff\">None</span>, │ <span style=\"color: #00af00; text-decoration-color: #00af00\">66,048</span> │ bidirectional_1[<span style=\"color: #00af00; text-decoration-color: #00af00\">…</span> │\n",
"│ (<span style=\"color: #0087ff; text-decoration-color: #0087ff\">SelfAttention</span>) │ <span style=\"color: #00af00; text-decoration-color: #00af00\">128</span>), (<span style=\"color: #00d7ff; text-decoration-color: #00d7ff\">None</span>, <span style=\"color: #00af00; text-decoration-color: #00af00\">8</span>, │ │ bidirectional_1[<span style=\"color: #00af00; text-decoration-color: #00af00\">…</span> │\n",
"│ │ <span style=\"color: #00d7ff; text-decoration-color: #00d7ff\">None</span>, <span style=\"color: #00d7ff; text-decoration-color: #00d7ff\">None</span>)] │ │ bidirectional_1[<span style=\"color: #00af00; text-decoration-color: #00af00\">…</span> │\n",
"├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
"│ global_average_poo… │ (<span style=\"color: #00d7ff; text-decoration-color: #00d7ff\">None</span>, <span style=\"color: #00af00; text-decoration-color: #00af00\">128</span>) │ <span style=\"color: #00af00; text-decoration-color: #00af00\">0</span> │ self_attention_1… │\n",
"│ (<span style=\"color: #0087ff; text-decoration-color: #0087ff\">GlobalAveragePool…</span> │ │ │ │\n",
"├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
"│ dense_9 (<span style=\"color: #0087ff; text-decoration-color: #0087ff\">Dense</span>) │ (<span style=\"color: #00d7ff; text-decoration-color: #00d7ff\">None</span>, <span style=\"color: #00af00; text-decoration-color: #00af00\">1</span>) │ <span style=\"color: #00af00; text-decoration-color: #00af00\">129</span> │ global_average_p… │\n",
"└─────────────────────┴───────────────────┴────────────┴───────────────────┘\n",
"</pre>\n"
],
"text/plain": [
"┏━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┓\n",
"┃\u001b[1m \u001b[0m\u001b[1mLayer (type) \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1mOutput Shape \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1m Param #\u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1mConnected to \u001b[0m\u001b[1m \u001b[0m┃\n",
"┡━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━┩\n",
"│ input_layer_1 │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m96\u001b[0m, \u001b[38;5;34m6\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │ - │\n",
"│ (\u001b[38;5;33mInputLayer\u001b[0m) │ │ │ │\n",
"├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
"│ conv1d_1 (\u001b[38;5;33mConv1D\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m95\u001b[0m, \u001b[38;5;34m64\u001b[0m) │ \u001b[38;5;34m832\u001b[0m │ input_layer_1[\u001b[38;5;34m0\u001b[0m]… │\n",
"├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
"│ max_pooling1d_1 │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m95\u001b[0m, \u001b[38;5;34m64\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │ conv1d_1[\u001b[38;5;34m0\u001b[0m][\u001b[38;5;34m0\u001b[0m] │\n",
"│ (\u001b[38;5;33mMaxPooling1D\u001b[0m) │ │ │ │\n",
"├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
"│ bidirectional_1 │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m95\u001b[0m, \u001b[38;5;34m128\u001b[0m) │ \u001b[38;5;34m49,920\u001b[0m │ max_pooling1d_1[\u001b[38;5;34m…\u001b[0m │\n",
"│ (\u001b[38;5;33mBidirectional\u001b[0m) │ │ │ │\n",
"├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
"│ self_attention_1 │ [(\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;45mNone\u001b[0m, │ \u001b[38;5;34m66,048\u001b[0m │ bidirectional_1[\u001b[38;5;34m…\u001b[0m │\n",
"│ (\u001b[38;5;33mSelfAttention\u001b[0m) │ \u001b[38;5;34m128\u001b[0m), (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m8\u001b[0m, │ │ bidirectional_1[\u001b[38;5;34m…\u001b[0m │\n",
"│ │ \u001b[38;5;45mNone\u001b[0m, \u001b[38;5;45mNone\u001b[0m)] │ │ bidirectional_1[\u001b[38;5;34m…\u001b[0m │\n",
"├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
"│ global_average_poo… │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m128\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │ self_attention_1… │\n",
"│ (\u001b[38;5;33mGlobalAveragePool…\u001b[0m │ │ │ │\n",
"├─────────────────────┼───────────────────┼────────────┼───────────────────┤\n",
"│ dense_9 (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m1\u001b[0m) │ \u001b[38;5;34m129\u001b[0m │ global_average_p… │\n",
"└─────────────────────┴───────────────────┴────────────┴───────────────────┘\n"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"font-weight: bold\"> Total params: </span><span style=\"color: #00af00; text-decoration-color: #00af00\">116,929</span> (456.75 KB)\n",
"</pre>\n"
],
"text/plain": [
"\u001b[1m Total params: \u001b[0m\u001b[38;5;34m116,929\u001b[0m (456.75 KB)\n"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"font-weight: bold\"> Trainable params: </span><span style=\"color: #00af00; text-decoration-color: #00af00\">116,929</span> (456.75 KB)\n",
"</pre>\n"
],
"text/plain": [
"\u001b[1m Trainable params: \u001b[0m\u001b[38;5;34m116,929\u001b[0m (456.75 KB)\n"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"font-weight: bold\"> Non-trainable params: </span><span style=\"color: #00af00; text-decoration-color: #00af00\">0</span> (0.00 B)\n",
"</pre>\n"
],
"text/plain": [
"\u001b[1m Non-trainable params: \u001b[0m\u001b[38;5;34m0\u001b[0m (0.00 B)\n"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"import tensorflow as tf\n",
"from tensorflow.keras.layers import Input, Conv1D, Bidirectional, GlobalAveragePooling1D, Dense, GRU, MaxPooling1D\n",
"from tensorflow.keras.models import Model\n",
"class SelfAttention(tf.keras.layers.Layer):\n",
" def __init__(self, d_model, num_heads):\n",
" super(SelfAttention, self).__init__()\n",
" self.num_heads = num_heads\n",
" self.d_model = d_model\n",
" assert d_model % self.num_heads == 0\n",
" self.depth = d_model // self.num_heads\n",
" self.wq = tf.keras.layers.Dense(d_model)\n",
" self.wk = tf.keras.layers.Dense(d_model)\n",
" self.wv = tf.keras.layers.Dense(d_model)\n",
" self.dense = tf.keras.layers.Dense(d_model)\n",
"\n",
" def split_heads(self, x, batch_size):\n",
" x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))\n",
" return tf.transpose(x, perm=[0, 2, 1, 3])\n",
"\n",
" def call(self, v, k, q, mask):\n",
" batch_size = tf.shape(q)[0]\n",
" q = self.wq(q)\n",
" k = self.wk(k)\n",
" v = self.wv(v)\n",
"\n",
" q = self.split_heads(q, batch_size)\n",
" k = self.split_heads(k, batch_size)\n",
" v = self.split_heads(v, batch_size)\n",
"\n",
" scaled_attention, attention_weights = self.scaled_dot_product_attention(q, k, v, mask)\n",
" scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3])\n",
" concat_attention = tf.reshape(scaled_attention, (batch_size, -1, self.d_model))\n",
" output = self.dense(concat_attention)\n",
" return output, attention_weights\n",
"\n",
" def scaled_dot_product_attention(self, q, k, v, mask):\n",
" matmul_qk = tf.matmul(q, k, transpose_b=True)\n",
" dk = tf.cast(tf.shape(k)[-1], tf.float32)\n",
" scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)\n",
"\n",
" if mask is not None:\n",
" scaled_attention_logits += (mask * -1e9)\n",
"\n",
" attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1)\n",
" output = tf.matmul(attention_weights, v)\n",
" return output, attention_weights\n",
"\n",
"class SelfAttentionWithRelativePositionEncoding(tf.keras.layers.Layer):\n",
" def __init__(self, d_model, num_heads, max_len=5000):\n",
" super(SelfAttentionWithRelativePositionEncoding, self).__init__()\n",
" self.num_heads = num_heads\n",
" self.d_model = d_model\n",
" self.max_len = max_len\n",
" self.wq = tf.keras.layers.Dense(d_model)\n",
" self.wk = tf.keras.layers.Dense(d_model)\n",
" self.wv = tf.keras.layers.Dense(d_model)\n",
" self.dense = tf.keras.layers.Dense(d_model)\n",
" self.relative_position_encoding = AdvancedRelativePositionalEncoding(d_model)\n",
"\n",
" def call(self, v, k, q, mask):\n",
" batch_size = tf.shape(q)[0]\n",
" q = self.wq(q)\n",
" k = self.wk(k)\n",
" v = self.wv(v)\n",
"\n",
" # 添加相对位置编码\n",
" k += self.relative_position_encoding(k)\n",
" q += self.relative_position_encoding(q)\n",
"\n",
" q = self.split_heads(q, batch_size)\n",
" k = self.split_heads(k, batch_size)\n",
" v = self.split_heads(v, batch_size)\n",
"\n",
" scaled_attention, attention_weights = self.scaled_dot_product_attention(q, k, v, mask)\n",
" scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3])\n",
" concat_attention = tf.reshape(scaled_attention, (batch_size, -1, self.d_model))\n",
" output = self.dense(concat_attention)\n",
" return output, attention_weights\n",
"\n",
" def split_heads(self, x, batch_size):\n",
" x = tf.reshape(x, (batch_size, -1, self.num_heads, self.d_model // self.num_heads))\n",
" return tf.transpose(x, perm=[0, 2, 1, 3])\n",
"\n",
" def scaled_dot_product_attention(self, q, k, v, mask):\n",
" matmul_qk = tf.matmul(q, k, transpose_b=True)\n",
" dk = tf.cast(tf.shape(k)[-1], tf.float32)\n",
" scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)\n",
"\n",
" if mask is not None:\n",
" scaled_attention_logits += (mask * -1e9)\n",
"\n",
" attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1)\n",
" output = tf.matmul(attention_weights, v)\n",
" return output, attention_weights\n",
"\n",
"import tensorflow as tf\n",
"import numpy as np\n",
"\n",
"import tensorflow as tf\n",
"\n",
"class AdvancedRelativePositionalEncoding(tf.keras.layers.Layer):\n",
" def __init__(self, d_model, max_len=5000):\n",
" super(AdvancedRelativePositionalEncoding, self).__init__()\n",
" self.max_len = max_len\n",
" self.d_model = d_model\n",
" # #创新点 引入可变化的参数uv 进行线性变化\n",
" self.u = tf.Variable(tf.random(self.add_weight(shape=(d_model,), initializer='random_normal', trainable=True)))\n",
" self.v = tf.Variable(tf.random(self.add_weight(shape=(d_model,), initializer='random_normal', trainable=True)))\n",
"\n",
" def call(self, inputs):\n",
" seq_length = tf.shape(inputs)[1]\n",
" pos_encoding = self.relative_positional_encoding(seq_length, self.d_model)\n",
"\n",
" # 保留Sinusoidal生成方案\n",
" angle_rads_sin = pos_encoding[:, :, 0]\n",
" angle_rads_cos = pos_encoding[:, :, 1]\n",
"\n",
" # 线性维度转换层\n",
" ti = tf.expand_dims(inputs, axis=1) # shape: [batch_size, 1, seq_length, d_model]\n",
" tj = tf.expand_dims(inputs, axis=2) # shape: [batch_size, seq_length, 1, d_model]\n",
"\n",
" # 计算表征 t_i * W_q * W_k^T * t_j\n",
" t_wq_wk_t = tf.einsum('bijd,d->bij', tf.einsum('bijd,d->bijd', ti, self.u), tf.transpose(tj, perm=[0, 1, 3, 2]))\n",
"\n",
" # 计算基于全局的偏置 t_i * W_q * W_k^T * R_(i-j)^T\n",
" t_wq_wk_r = tf.einsum('bijd,d->bij', tf.einsum('bijd,d->bijd', ti, self.u), angle_rads_sin)\n",
"\n",
" # 计算基于表征的偏置 u * W_q * W_k^T * t_j\n",
" E_u = tf.einsum('bd,bijd->bij', self.u, ti)\n",
"\n",
" # 计算基于表征的局部偏置 v * W_q * W_k^T * R_(i-j)^T\n",
" R_v = tf.einsum('bd,bijd->bij', self.v, angle_rads_cos)\n",
"\n",
" \n",
" pe_with_params = t_wq_wk_t + R_v + t_wq_wk_r + E_u\n",
"\n",
" return inputs + pe_with_params\n",
"\n",
" def relative_positional_encoding(self, position, d_model):\n",
" pos = tf.range(position, dtype=tf.float32)\n",
" i = tf.range(d_model, dtype=tf.float32)\n",
"\n",
" angles = 1 / tf.pow(10000.0, (2 * (i // 2)) / tf.cast(d_model, tf.float32))\n",
" angle_rads = tf.einsum('i,j->ij', pos, angles)\n",
"\n",
" pos_encoding = tf.stack([tf.sin(angle_rads[:, 0::2]), tf.cos(angle_rads[:, 1::2])], axis=-1)\n",
" pos_encoding = tf.pad(pos_encoding, [[0, 0], [0, 0], [0, 0]]) #embbing维度嵌入层\n",
"\n",
" return pos_encoding\n",
"\n",
"\n",
"\n",
"\n",
"\n",
"def PosConv1biGRUWithSelfAttention(input_shape, gru_units, num_heads):\n",
" inputs = Input(shape=input_shape)\n",
" # CNN layer\n",
" cnn_layer = Conv1D(filters=64, kernel_size=2, activation='relu')(inputs)\n",
" cnn_layer = MaxPooling1D(pool_size=1)(cnn_layer)\n",
" gru_output = Bidirectional(GRU(gru_units, return_sequences=True))(cnn_layer)\n",
" \n",
" # Apply Self-Attention\n",
" self_attention = SelfAttention(d_model=gru_units*2, num_heads=num_heads)\n",
" gru_output, _ = self_attention(gru_output, gru_output, gru_output, mask=None)\n",
" \n",
" pool1 = GlobalAveragePooling1D()(gru_output)\n",
" output = Dense(1)(pool1)\n",
" \n",
" return Model(inputs=inputs, outputs=output)\n",
"\n",
"\n",
"input_shape = (96, 6)\n",
"gru_units = 64\n",
"num_heads = 8\n",
"\n",
"# Create model\n",
"model = PosConv1biGRUWithSelfAttention(input_shape, gru_units, num_heads)\n",
"model.compile(optimizer='adam', loss='mse')\n",
"model.summary()\n"
]
},
{
"cell_type": "code",
"execution_count": 39,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m71s\u001b[0m 53ms/step - loss: 0.0196 - val_loss: 0.0018\n",
"Epoch 2/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m77s\u001b[0m 59ms/step - loss: 0.0013 - val_loss: 0.0019\n",
"Epoch 3/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m79s\u001b[0m 61ms/step - loss: 0.0012 - val_loss: 0.0017\n",
"Epoch 4/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m76s\u001b[0m 58ms/step - loss: 0.0010 - val_loss: 0.0015\n",
"Epoch 5/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m75s\u001b[0m 57ms/step - loss: 9.7760e-04 - val_loss: 0.0018\n",
"Epoch 6/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m77s\u001b[0m 59ms/step - loss: 9.9108e-04 - val_loss: 0.0017\n",
"Epoch 7/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m90s\u001b[0m 69ms/step - loss: 9.7381e-04 - val_loss: 0.0016\n",
"Epoch 8/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m82s\u001b[0m 63ms/step - loss: 9.1248e-04 - val_loss: 0.0015\n",
"Epoch 9/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m86s\u001b[0m 66ms/step - loss: 9.4959e-04 - val_loss: 0.0016\n",
"Epoch 10/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m76s\u001b[0m 58ms/step - loss: 9.3746e-04 - val_loss: 0.0016\n",
"Epoch 11/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m88s\u001b[0m 68ms/step - loss: 9.1358e-04 - val_loss: 0.0015\n",
"Epoch 12/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m80s\u001b[0m 61ms/step - loss: 8.8907e-04 - val_loss: 0.0016\n",
"Epoch 13/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m87s\u001b[0m 67ms/step - loss: 9.0822e-04 - val_loss: 0.0015\n",
"Epoch 14/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m89s\u001b[0m 68ms/step - loss: 8.9505e-04 - val_loss: 0.0015\n",
"Epoch 15/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m88s\u001b[0m 68ms/step - loss: 8.9855e-04 - val_loss: 0.0015\n",
"Epoch 16/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m89s\u001b[0m 68ms/step - loss: 9.4414e-04 - val_loss: 0.0015\n",
"Epoch 17/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m91s\u001b[0m 70ms/step - loss: 8.8443e-04 - val_loss: 0.0014\n",
"Epoch 18/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m89s\u001b[0m 68ms/step - loss: 8.7323e-04 - val_loss: 0.0015\n",
"Epoch 19/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m90s\u001b[0m 69ms/step - loss: 8.7132e-04 - val_loss: 0.0014\n",
"Epoch 20/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m86s\u001b[0m 66ms/step - loss: 8.7265e-04 - val_loss: 0.0015\n",
"Epoch 21/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m82s\u001b[0m 63ms/step - loss: 8.4318e-04 - val_loss: 0.0015\n",
"Epoch 22/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m87s\u001b[0m 67ms/step - loss: 7.9306e-04 - val_loss: 0.0015\n",
"Epoch 23/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m84s\u001b[0m 65ms/step - loss: 8.1019e-04 - val_loss: 0.0015\n",
"Epoch 24/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m91s\u001b[0m 70ms/step - loss: 7.8526e-04 - val_loss: 0.0015\n",
"Epoch 25/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m94s\u001b[0m 72ms/step - loss: 8.6874e-04 - val_loss: 0.0014\n",
"Epoch 26/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m90s\u001b[0m 69ms/step - loss: 8.0322e-04 - val_loss: 0.0015\n",
"Epoch 27/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m90s\u001b[0m 69ms/step - loss: 8.3907e-04 - val_loss: 0.0014\n",
"Epoch 28/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m92s\u001b[0m 71ms/step - loss: 8.2911e-04 - val_loss: 0.0015\n",
"Epoch 29/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m93s\u001b[0m 71ms/step - loss: 8.1428e-04 - val_loss: 0.0015\n",
"Epoch 30/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m90s\u001b[0m 69ms/step - loss: 8.1292e-04 - val_loss: 0.0015\n",
"Epoch 31/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m90s\u001b[0m 69ms/step - loss: 8.2787e-04 - val_loss: 0.0015\n",
"Epoch 32/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m99s\u001b[0m 76ms/step - loss: 7.9780e-04 - val_loss: 0.0015\n",
"Epoch 33/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m98s\u001b[0m 75ms/step - loss: 7.9815e-04 - val_loss: 0.0015\n",
"Epoch 34/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m96s\u001b[0m 74ms/step - loss: 7.9996e-04 - val_loss: 0.0016\n",
"Epoch 35/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m93s\u001b[0m 71ms/step - loss: 7.5751e-04 - val_loss: 0.0016\n",
"Epoch 36/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m84s\u001b[0m 65ms/step - loss: 8.1121e-04 - val_loss: 0.0015\n",
"Epoch 37/100\n",
"\u001b[1m1302/1302\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m92s\u001b[0m 71ms/step - loss: 7.6797e-04 - val_loss: 0.0015\n",
"\u001b[1m651/651\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m12s\u001b[0m 16ms/step\n"
]
}
],
"source": [
"# Compile and train the model\n",
"model.compile(optimizer='adam', loss='mean_squared_error')\n",
"from keras.callbacks import EarlyStopping, ModelCheckpoint\n",
"\n",
"# 定义早停机制\n",
"early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=0, mode='min')\n",
"\n",
"# 拟合模型,并添加早停机制和模型检查点\n",
"history = model.fit(train_X, train_y, epochs=100, batch_size=64, validation_data=(test_X, test_y), \n",
" callbacks=[early_stopping])\n",
"# 预测\n",
"lstm_pred = model.predict(test_X)\n",
"# 将预测结果的形状修改为与原始数据相同的形状"
]
},
{
"cell_type": "code",
"execution_count": 40,
"metadata": {},
"outputs": [
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAjUAAAGdCAYAAADqsoKGAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/TGe4hAAAACXBIWXMAAA9hAAAPYQGoP6dpAABRU0lEQVR4nO3dfXxT1eE/8M/Nc9LStFBsWii0QhGRh8pTLTJxs1occ1Y3h7jJw5ioU39gRQQGxQe0DsUhynf9uk1x+46BTEWHWMUibINa5ElFBQGLRWkKBduU9CFNcn9/nCQlEEpTcm9o+3m/XtekNyc35yaVfHrOuedIsizLICIiIurgNNGuABEREVEkMNQQERFRp8BQQ0RERJ0CQw0RERF1Cgw1RERE1Ckw1BAREVGnwFBDREREnQJDDREREXUKumhXQC1erxdHjx5Ft27dIElStKtDREREbSDLMurq6pCSkgKNpvW2mC4Tao4ePYrU1NRoV4OIiIja4ciRI+jdu3erZbpMqOnWrRsA8abExcVFuTZERETUFg6HA6mpqYHv8dZ0mVDj73KKi4tjqCEiIupg2jJ0hAOFiYiIqFNgqCEiIqJOgaGGiIiIOoUuM6aGiIhIKbIsw+12w+PxRLsqHY5Wq4VOp4vIdCsMNURERBfA5XKhsrIS9fX10a5Kh2WxWJCcnAyDwXBBx2GoISIiaiev14vy8nJotVqkpKTAYDBwgtcwyLIMl8uF48ePo7y8HBkZGeedYK81DDVERETt5HK54PV6kZqaCovFEu3qdEhmsxl6vR7ffPMNXC4XTCZTu4/Vrji0YsUKpKWlwWQyISsrC9u3b2+1/Nq1azFw4ECYTCYMGTIEGzZsCHpclmUUFBQgOTkZZrMZOTk5OHDgQODxzZs3Q5KkkNvHH3/cnlMgIiKKmAtpXaDIvX9hH2XNmjXIz8/HokWLsGvXLgwbNgy5ubk4duxYyPLbtm3DpEmTMH36dOzevRt5eXnIy8vD3r17A2WWLFmC5cuXo6ioCGVlZYiJiUFubi4aGxsBAGPGjEFlZWXQ9pvf/Abp6ekYOXJkO0+diIiIOhNJlmU5nCdkZWVh1KhRePHFFwEg0Oz2wAMPYO7cuWeVnzhxIpxOJ9avXx/Yd9VVVyEzMxNFRUWQZRkpKSl46KGHMHv2bABAbW0tkpKSsHLlStx+++1nHbO5uRm9evXCAw88gIULF7ap3g6HA1arFbW1tZxRmIiIIqKxsRHl5eVIT0+/oG6Trq619zGc7++wWmpcLhd27tyJnJyclgNoNMjJyUFpaWnI55SWlgaVB4Dc3NxA+fLyctjt9qAyVqsVWVlZ5zzm22+/jRMnTmDatGnhVJ+IiIgUkJaWhmXLlkW7GuENFK6urobH40FSUlLQ/qSkJOzbty/kc+x2e8jydrs98Lh/37nKnOkvf/kLcnNzW12ts6mpCU1NTYGfHQ7HOcsSERF1Nddeey0yMzMjEkY+/vhjxMTEXHilLlCHG9n07bff4r333sP06dNbLVdYWAir1RrYUlNTFanPgao6PLH+CxRtOaTI8YmIiKLBP6FgW/Ts2fOiuPorrFCTmJgIrVaLqqqqoP1VVVWw2Wwhn2Oz2Vot779t6zFfeeUV9OjRAz/96U9breu8efNQW1sb2I4cOdL6ybXT0dpG/OW/5Xh7z1FFjk9ERB2HLMuod7mjsoUzRHbq1KnYsmULnn/++cDVxCtXroQkSXj33XcxYsQIGI1G/Pe//8WhQ4dw8803IykpCbGxsRg1ahQ++OCDoOOd2f0kSRL+/Oc/45ZbboHFYkFGRgbefvvtSL3N5xRW95PBYMCIESNQUlKCvLw8AGKgcElJCe6///6Qz8nOzkZJSQlmzZoV2Ldx40ZkZ2cDANLT02Gz2VBSUoLMzEwAoquorKwM9957b9CxZFnGK6+8gsmTJ0Ov17daV6PRCKPRGM7ptYtZrwUANDRzamwioq6uodmDQQXvReW1v3g8FxZD277Wn3/+eXz11VcYPHgwHn/8cQDA559/DgCYO3cunn32WVx66aVISEjAkSNH8OMf/xhPPvkkjEYj/vrXv+Kmm27C/v370adPn3O+xmOPPYYlS5bgmWeewQsvvIBf/vKX+Oabb9C9e/cLP9lzCLv7KT8/H3/605/w6quv4ssvv8S9994Lp9MZGLQ7efJkzJs3L1B+5syZKC4uxtKlS7Fv3z48+uij2LFjRyAESZKEWbNmYfHixXj77bfx2WefYfLkyUhJSQkEJ79NmzahvLwcv/nNby7glCPLYvCFGhdDDRERdQxWqxUGgwEWiwU2mw02mw1arfg+e/zxx3H99dejX79+6N69O4YNG4a7774bgwcPRkZGBp544gn069fvvC0vU6dOxaRJk9C/f3889dRTOHXq1HnntbtQYc8oPHHiRBw/fhwFBQWw2+3IzMxEcXFxYKBvRUVF0CQ6Y8aMwapVq7BgwQLMnz8fGRkZWLduHQYPHhwoM2fOHDidTsyYMQM1NTUYO3YsiouLz7qs6y9/+QvGjBmDgQMHtvd8I87ElhoiIvIx67X44vHcqL12JJw5/9upU6fw6KOP4p133kFlZSXcbjcaGhpQUVHR6nGGDh0auB8TE4O4uLhzzmkXKe1aJuH+++8/Z3fT5s2bz9p322234bbbbjvn8SRJwuOPPx5oAjuXVatWhVVPNZjZUkNERD6SJLW5C+hideZVTLNnz8bGjRvx7LPPon///jCbzfj5z38Ol8vV6nHOHCYiSRK8Xm/E63u6jv3OXwQsvmTs8njh9nih03a4C8qIiKgLMhgM8HjO/wf51q1bMXXqVNxyyy0ARMvN4cOHFa5d+/Ab+AL5W2oAoNGtbAIlIiKKlLS0NJSVleHw4cOorq4+ZytKRkYG3njjDezZsweffPIJ7rjjDsVbXNqLoeYCGXUa+FeZr3e17Xp+IiKiaJs9eza0Wi0GDRqEnj17nnOMzHPPPYeEhASMGTMGN910E3JzczF8+HCVa9s27H66QJIkwazXot7lQaPr4kyuREREZxowYMBZyxFNnTr1rHJpaWnYtGlT0L777rsv6Oczu6NCzZlTU1PTrnqGgy01EcC5aoiIiKKPoSYC/ONq2P1EREQUPQw1EcCWGiIiouhjqIkAzlVDREQUfQw1EcCWGiIiouhjqIkAttQQERFFH0NNBLClhoiIKPoYaiKALTVERETRx1ATAWypISIiij6GmggIhBq21BARUQdx7bXXYtasWRE73tSpU5GXlxex47UHQ00EWAxsqSEiIoo2hpoIMHFMDRERdSBTp07Fli1b8Pzzz0OSJEiShMOHD2Pv3r248cYbERsbi6SkJNx5552orq4OPO+f//wnhgwZArPZjB49eiAnJwdOpxOPPvooXn31Vbz11luB423evFn18+KClhFg8XU/1bOlhoioa5NloLk+Oq+ttwCS1Kaizz//PL766isMHjwYjz/+uHi6Xo/Ro0fjN7/5Df7whz+goaEBjzzyCH7xi19g06ZNqKysxKRJk7BkyRLccsstqKurw3/+8x/IsozZs2fjyy+/hMPhwCuvvAIA6N69u2Knei4MNRHgv/qpkS01RERdW3M98FRKdF57/lHAENOmolarFQaDARaLBTabDQCwePFiXHnllXjqqacC5V5++WWkpqbiq6++wqlTp+B2u3Hrrbeib9++AIAhQ4YEyprNZjQ1NQWOFw0MNRFg8rfUMNQQEVEH9cknn+DDDz9EbGzsWY8dOnQIN9xwA6677joMGTIEubm5uOGGG/Dzn/8cCQkJUahtaAw1EWAxiLeRA4WJiLo4vUW0mETrtS/AqVOncNNNN+H3v//9WY8lJydDq9Vi48aN2LZtG95//3288MIL+N3vfoeysjKkp6df0GtHCkNNBPgv6W5kqCEi6tokqc1dQNFmMBjg8bR8bw0fPhyvv/460tLSoNOFjgeSJOHqq6/G1VdfjYKCAvTt2xdvvvkm8vPzzzpeNPDqpwgwG8TbyO4nIiLqKNLS0lBWVobDhw+juroa9913H06ePIlJkybh448/xqFDh/Dee+9h2rRp8Hg8KCs
"text/plain": [
"<Figure size 640x480 with 1 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"plt.plot(history.history['loss'], label='train')\n",
"plt.plot(history.history['val_loss'], label='test')\n",
"plt.legend()\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": 41,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(20831, 1)"
]
},
"execution_count": 41,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"lstm_pred.shape"
]
},
{
"cell_type": "code",
"execution_count": 42,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(20831,)"
]
},
"execution_count": 42,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"test_y.shape"
]
},
{
"cell_type": "code",
"execution_count": 43,
"metadata": {},
"outputs": [],
"source": [
"test_y1=test_y.reshape(20831,1)"
]
},
{
"cell_type": "code",
"execution_count": 44,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"array([[4.52189913e-01],\n",
" [3.12516873e-01],\n",
" [3.25310588e-01],\n",
" ...,\n",
" [1.08522631e-04],\n",
" [1.18219088e-04],\n",
" [1.28327022e-04]])"
]
},
"execution_count": 44,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"test_y1"
]
},
{
"cell_type": "code",
"execution_count": 45,
"metadata": {},
"outputs": [],
"source": [
"results1 = np.broadcast_to(lstm_pred, (20831, 6))"
]
},
{
"cell_type": "code",
"execution_count": 46,
"metadata": {},
"outputs": [],
"source": [
"test_y2 = np.broadcast_to(test_y1, (20831, 6))"
]
},
{
"cell_type": "code",
"execution_count": 47,
"metadata": {},
"outputs": [],
"source": [
"# 反归一化\n",
"inv_forecast_y = scaler.inverse_transform(results1)\n",
"inv_test_y = scaler.inverse_transform(test_y2)"
]
},
{
"cell_type": "code",
"execution_count": 48,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"array([[ 1.78428369e+01, 4.82409691e+01, 6.37156385e+02,\n",
" 2.97801603e+02, 1.07621239e+01, 9.90052500e-01],\n",
" [ 1.07562527e+01, 3.44305945e+01, 4.40440713e+02,\n",
" 2.05929459e+02, 7.43790432e+00, 1.80780551e-01],\n",
" [ 1.14053667e+01, 3.56955916e+01, 4.58459395e+02,\n",
" 2.14344726e+02, 7.74239484e+00, 2.54907916e-01],\n",
" ...,\n",
" [-5.09439462e+00, 3.54076535e+00, 4.44428011e-01,\n",
" 4.37940726e-01, 2.58283957e-03, -1.62932764e+00],\n",
" [-5.09390265e+00, 3.54172410e+00, 4.58084512e-01,\n",
" 4.44318723e-01, 2.81361533e-03, -1.62927146e+00],\n",
" [-5.09338980e+00, 3.54272354e+00, 4.72320538e-01,\n",
" 4.50967376e-01, 3.05418424e-03, -1.62921289e+00]])"
]
},
"execution_count": 48,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"inv_test_y"
]
},
{
"cell_type": "code",
"execution_count": 49,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Test RMSE: 0.222\n"
]
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAABQoAAAKTCAYAAABRkzVdAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/TGe4hAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOy9d9w0V13+f52Z7Xd9auoTEtKkBggokRZRREUUGyr4A1RAkChRsaCifkEJSvuCfgkIaEAEkSbN0AkhtEB6IT1P2tPLfd9bp57fH2d2d3Z2dnf23im7e1/v1ytkd2Z2Zsi5z8w517k+n4+QUkoQQgghhBBCCCGEEEK2NFrWN0AIIYQQQgghhBBCCMkeCoWEEEIIIYQQQgghhBAKhYQQQgghhBBCCCGEEAqFhBBCCCGEEEIIIYQQUCgkhBBCCCGEEEIIIYSAQiEhhBBCCCGEEEIIIQQUCgkhhBBCCCGEEEIIIQByWd/AMFzXxb59+7C0tAQhRNa3QwghhBBCCCGEEELITCGlRLVaxcknnwxNG+4ZnGqhcN++fdizZ0/Wt0EIIYQQQgghhBBCyEzzwAMP4NRTTx16zFQLhUtLSwDU/5Hl5eWM74YQQgghhBBCCCGEkNliY2MDe/bs6ehsw5hqobAdbry8vEyhkBBCCCGEEEIIIYSQTRIlrR+LmRBCCCGEEEIIIYQQQigUEkIIIYQQQgghhBBCKBQSQgghhBBCCCGEEEIw5TkKCSGEEEIIIYQQQsjWxHEcWJaV9W3MBIVCAZo2uR+QQiEhhBBCCCGEEEIImRqklDhw4ADW1tayvpWZQdM0nHHGGSgUChOdh0IhIYQQQgghhBBCCJka2iLh7t27UalUIlXr3cq4rot9+/Zh//79OO200yb670WhkBBCCCGEEEIIIYRMBY7jdETCHTt2ZH07M8OuXbuwb98+2LaNfD6/6fOwmAkhhBBCCCGEEEIImQraOQkrlUrGdzJbtEOOHceZ6DwUCgkhhBBCCCGEEELIVMFw4/GI678XhUJCCCGEEEIIIYQQQgiFQkIIIYQQQgghhBBCCIVCQgghhBBCCCGEEEIIKBQSQgghhBBCCCGEEDIxF154IS6++OKsb2MiKBQSQgghhBBCCCGEEJIwUkrYtp31bQyFQiEhhBBCCCGEEEIImUqklGiYdib/SCkj3+dLXvISfOMb38A73vEOCCEghMBll10GIQQuv/xynH/++SgWi7jqqqvwkpe8BM973vN6fn/xxRfjwgsv7Hx3XReXXHIJzjjjDJTLZZx33nn4+Mc/HtN/1cHkEr8CIYQQQgghhBBCCCGboGk5eOTffDGTa9/6+mejUogmnb3jHe/AHXfcgUc/+tF4/etfDwC45ZZbAAB/8Rd/gbe85S14+MMfjm3btkU63yWXXIIPfehDePe7342zzz4bV155JX7rt34Lu3btwjOe8YzN/R+KAIVCQgghhBBCCCGEEEImYGVlBYVCAZVKBSeeeCIA4LbbbgMAvP71r8eznvWsyOcyDANvfOMb8ZWvfAUXXHABAODhD384rrrqKrznPe+hUEgIIYQQQgghhBBCth7lvI5bX//szK4dB0984hPHOv6uu+5Co9HoExdN08TjH//4WO5pEBQKCSGEEEIIIYQQQshUIoSIHP47rSwsLPR81zStL/+hZVmdz7VaDQDw+c9/HqecckrPccViMaG7VMz2f2lCCCGEEEIIIYQQQqaAQqEAx3FGHrdr1y7cfPPNPduuv/565PN5AMAjH/lIFItF3H///YmGGYdBoZAQQgghhBBCCCGEkAk5/fTT8b3vfQ979+7F4uIiXNcNPe6Zz3wm3vzmN+ODH/wgLrjgAnzoQx/CzTff3AkrXlpawmte8xr80R/9EVzXxVOf+lSsr6/jW9/6FpaXl/HiF784sf8PWmJnJoQQQgghhBBCCCFki/Ca17wGuq7jkY98JHbt2oX7778/9LhnP/vZeN3rXoc/+7M/w5Oe9CRUq1W86EUv6jnmDW94A173utfhkksuwSMe8Qj8zM/8DD7/+c/jjDPOSPT/g5DBoOgpYmNjAysrK1hfX8fy8nLWt0MIIYQQQgghhBBCEqTVauHee+/FGWecgVKplPXtzAzD/ruNo6/RUUgIIYQQQgghhBBCCKFQSAghhBBCCJmcB4418Kr/vBbXP7CW9a0QQgghZJNQKCSEEEIIIYRMzMUfvR6fv2k/nvf/vhX5N3cdquL3/uMHuGXfeoJ3RgghhJCoUCgkhBBCCCGETMx9R+tj/+a33nc1vnjLQfzyu76dwB0RQgghZFwoFBJCCCGEEEImJq+PP7U4sNECABi2G/ftEEIIIWQTUCgkhBBCCCGETExOF1nfAiGEEEImhEIhIYQQQgghZGLyGqcWhBBCyKzDtzkhhBBCCCFkYjYTekwIIYSQ6YJvc0IIIYTMBP/xnb34u8/cAill1rdCCAmBoceEEELI7JPL+gYIIYQQQqLwuk/fAgD4ucechB89Y3vGd0MICZKjo5AQQgiZefg2J4QQQshMsd60sr4FQkgIBToKCSGEkMiYppn1LYRCoZAQQgghM4XjulnfAiEkBOYoJIQQspW58MILcdFFF+Giiy7CysoKdu7cide97nWdtDmnn3463vCGN+BFL3oRlpeX8fKXvxwAcNVVV+FpT3sayuUy9uzZgz/8wz9EvV7P7P8H3+aEEEIImSlslzkKCZlG/KHHzCVKCCEkNqQEzHo2/4z5PvvABz6AXC6Hq6++Gu94xzvwtre9De973/s6+9/ylrfgvPPOw3XXXYfXve51uPvuu/EzP/Mz+JVf+RXceOON+OhHP4qrrroKF110Udz/FSPDHIWEEEIImWpu2beOf7tqb+e77VCAIGQayWvd0GPTcVHM6RneDSGEkLnBagBvPDmba//lPqCwEPnwPXv24O1vfzuEEDj33HNx00034e1vfzte9rKXAQCe+cxn4k/+5E86x7/0pS/FC1/4Qlx88cUAgLPPPhvvfOc78YxnPAOXXnopSqVSrP93okBHISGEEEKmmp//56vwiWsf7Hyno5CQ6YcZAgghhGxFnvzkJ0OI7sLZBRdcgDvvvBOO4wAAnvjEJ/Ycf8MNN+Cyyy7D4uJi559nP/vZcF0X9957b6r33oaOQkIIIYRMNcGID+YoJGQ6cRhuTAghJAnyFeXsy+raMbKw0OtOrNVq+L3f+z384R/+Yd+xp512WqzXjgqFQkIIIYTMFHQUEjKdOL6+KcF+SgghJCaEGCv8N0u+973v9Xz/7ne/i7PPPhu6Hp6O4wlPeAJuvfVWnHXWWWncXiQYekwIIYSQmcKhUEjIVNIjFLKbEkII2YLcf//9+OM//mPcfvvt+MhHPoJ//ud/xqtf/eqBx//5n/85vv3tb+Oiiy7C9ddfjzvvvBOf/vSnWcyEEEIIIcTPP37hNtxzuIZLX3h+3z6LxUwImUp6HYWEEELI1uNFL3oRms0mfvRHfxS6ruPVr341Xv7ylw88/rGPfSy+8Y1v4K/+6q/wtKc9DVJKnHnmmfj1X//1FO+6FwqFhBBCCJk6Lr3ibgDANfcf79vHHIWETCeu9DsKKRUSQgjZeuTzefzf//t/cemll/bt27t3b+hvnvSkJ+FLX/pSwncWHYYeE0IIIWRqsZx+UZA5CgmZTiZxFGpi9DGEEEIISR4KhYQQQgiZWgT61YO7D9Xx/Hd/B1+//VAGd0QIGcQkWQE0QaWQEEIImQYYekwIIYSQqSXMZfSJax8EAFz978ew903PSfmOCCGDcCcoZkKhkBBCyKxzxRVXZH0LsUBHISGEEEKmFk0T0BmTSMhM0FORfFx3Ibs5IYQQMhVQKCSEEELI1KIJIK9TQSBkFugpZjKmUsj1gNnmB3uP4an/+DV85daDWd8KIWSOYGGs8YjrvxeFQkIIIYRMFf5BjhACeZ3DFUJmAWeC0GOdocczzcs++AM8eLyJl37wB1nfCiFkDsjn8wCARqOR8Z3MFqZpAgB0XZ/oPMxRSAiZew5VW3jNx27E+adtw6t/6uysb4cQMgJ/9KIAUMxpqGZ2N4SQUdx3tI5/u+p
"text/plain": [
"<Figure size 1600x800 with 1 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"# 计算均方根误差\n",
"rmse = sqrt(mean_squared_error(inv_test_y[:,5], inv_forecast_y[:,5]))\n",
"print('Test RMSE: %.3f' % rmse)\n",
"#画图\n",
"plt.figure(figsize=(16,8))\n",
"plt.plot(inv_test_y[300:3000,5], label='true')\n",
"plt.plot(inv_forecast_y[300:3000,5], label='pre')\n",
"plt.legend()\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": 57,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"mean_squared_error: 0.0014630274318863602\n",
"mean_absolute_error: 0.013232284805068965\n",
"rmse: 0.03824954159053884\n",
"r2 score: 0.9900756487103545\n"
]
}
],
"source": [
"from sklearn.metrics import mean_squared_error, mean_absolute_error # 评价指标\n",
"# 使用sklearn调用衡量线性回归的MSE 、 RMSE、 MAE、r2\n",
"from math import sqrt\n",
"from sklearn.metrics import mean_absolute_error\n",
"from sklearn.metrics import mean_squared_error\n",
"from sklearn.metrics import r2_score\n",
"print('mean_squared_error:', mean_squared_error(lstm_pred, test_y)) # mse)\n",
"print(\"mean_absolute_error:\", mean_absolute_error(lstm_pred, test_y)) # mae\n",
"print(\"rmse:\", sqrt(mean_squared_error(lstm_pred,test_y)))\n",
"print(\"r2 score:\", r2_score(inv_test_y[480:850,5], inv_forecast_y[480:850,5]))"
]
},
{
"cell_type": "code",
"execution_count": 107,
"metadata": {},
"outputs": [],
"source": [
"df1 = pd.DataFrame(inv_test_y[:,5], columns=['column_name'])"
]
},
{
"cell_type": "code",
"execution_count": 108,
"metadata": {},
"outputs": [],
"source": [
"# 指定文件路径和文件名保存DataFrame到CSV文件中\n",
"df1.to_csv('高频_test.csv', index=False)"
]
},
{
"cell_type": "code",
"execution_count": 109,
"metadata": {},
"outputs": [],
"source": [
"df2 = pd.DataFrame(inv_forecast_y[:,5], columns=['column_name'])"
]
},
{
"cell_type": "code",
"execution_count": 110,
"metadata": {},
"outputs": [],
"source": [
"# 指定文件路径和文件名保存DataFrame到CSV文件中\n",
"df2.to_csv('高频_forecast.csv', index=False)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "base",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.13"
}
},
"nbformat": 4,
"nbformat_minor": 2
}