optim
This commit is contained in:
parent
c32ec7fe15
commit
1d946b4464
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -549,7 +549,7 @@
|
|||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"2024-01-04 16:49:03.492957: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0\n"
|
||||
"2024-01-05 16:46:07.061819: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
@ -563,51 +563,6 @@
|
|||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "c2318ce6-60d2-495c-91cd-67ca53609cf8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"WARNING:tensorflow:From /tmp/ipykernel_45930/337460670.py:1: is_gpu_available (from tensorflow.python.framework.test_util) is deprecated and will be removed in a future version.\n",
|
||||
"Instructions for updating:\n",
|
||||
"Use `tf.config.list_physical_devices('GPU')` instead.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"False"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"2024-01-04 16:49:04.396035: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX512F FMA\n",
|
||||
"To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
|
||||
"2024-01-04 16:49:04.407586: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcuda.so.1\n",
|
||||
"2024-01-04 16:49:04.465739: E tensorflow/stream_executor/cuda/cuda_driver.cc:328] failed call to cuInit: CUDA_ERROR_INVALID_DEVICE: invalid device ordinal\n",
|
||||
"2024-01-04 16:49:04.465795: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:169] retrieving CUDA diagnostic information for host: zhaojh-yv621\n",
|
||||
"2024-01-04 16:49:04.465807: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:176] hostname: zhaojh-yv621\n",
|
||||
"2024-01-04 16:49:04.466010: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:200] libcuda reported version is: 520.61.5\n",
|
||||
"2024-01-04 16:49:04.466041: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:204] kernel reported version is: 520.61.5\n",
|
||||
"2024-01-04 16:49:04.466045: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:310] kernel version seems to match DSO: 520.61.5\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"tf.test.is_gpu_available()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "1c85d462-f248-4ffb-908f-eb4b20eab179",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
|
@ -635,7 +590,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"execution_count": 13,
|
||||
"id": "790284a3-b9d3-4144-b481-38a7c3ecb4b9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
|
@ -645,7 +600,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"execution_count": 14,
|
||||
"id": "cd9a1ca1-d0ca-4cb5-9ef5-fd5d63576cd2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
|
@ -655,7 +610,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"execution_count": 15,
|
||||
"id": "9bc02f29-0fb7-420d-99a8-435eadc06e29",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
|
@ -695,7 +650,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"execution_count": 16,
|
||||
"id": "a190207e-5a59-4813-9660-758760cf1b73",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
|
@ -705,20 +660,12 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"execution_count": 17,
|
||||
"id": "80f32155-e71f-4615-8d0c-01dfd04988fe",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def get_prediction_model():\n",
|
||||
" def build_output(out, out_name):\n",
|
||||
" self_block = TransformerBlock(64, num_heads, ff_dim, name=f'{out_name}_attn')\n",
|
||||
" out = self_block(out)\n",
|
||||
" out = layers.GlobalAveragePooling1D()(out)\n",
|
||||
" out = layers.Dropout(0.1)(out)\n",
|
||||
" out = layers.Dense(32, activation=\"relu\")(out)\n",
|
||||
" # out = layers.Dense(1, name=out_name, activation=\"sigmoid\")(out)\n",
|
||||
" return out\n",
|
||||
" inputs = layers.Input(shape=(1,len(feature_cols)), name='input')\n",
|
||||
" x = layers.Conv1D(filters=64, kernel_size=1, activation='relu')(inputs)\n",
|
||||
" # x = layers.Dropout(rate=0.1)(x)\n",
|
||||
|
@ -729,10 +676,10 @@
|
|||
" out = layers.GlobalAveragePooling1D()(out)\n",
|
||||
" out = layers.Dropout(0.1)(out)\n",
|
||||
" out = layers.Dense(64, activation='relu')(out)\n",
|
||||
" out = K.expand_dims(out, axis=1)\n",
|
||||
" # out = K.expand_dims(out, axis=1)\n",
|
||||
"\n",
|
||||
" bet = build_output(out, 'vad')\n",
|
||||
" mesco = build_output(out, 'fcad')\n",
|
||||
" bet = layers.Dense(32, activation=\"relu\")(out)\n",
|
||||
" mesco = layers.Dense(32, activation=\"relu\")(out)\n",
|
||||
"\n",
|
||||
" bet = layers.Dense(1, activation='sigmoid', name='vad')(bet)\n",
|
||||
" mesco = layers.Dense(1, activation='sigmoid', name='fcad')(mesco)\n",
|
||||
|
@ -743,7 +690,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 30,
|
||||
"execution_count": 18,
|
||||
"id": "264001b1-5e4a-4786-96fd-2b5c70ab3212",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
|
@ -757,6 +704,16 @@
|
|||
" return Model([inputs, bet_real, mesco_real], out)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "372011ea-9876-41eb-a4e6-83ccd6c71559",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from tensorflow.python.keras.utils.vis_utils import plot_model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
|
@ -1002,6 +959,241 @@
|
|||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"id": "50daf170-efec-49e5-8f8e-9a45938cacfc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sklearn.model_selection import KFold, train_test_split\n",
|
||||
"kf = KFold(n_splits=6, shuffle=True, random_state=42)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"id": "0f863423-be12-478b-a08d-e3c6f5dfb8ee",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from tensorflow.keras import optimizers\n",
|
||||
"from tensorflow.python.keras.utils.vis_utils import plot_model\n",
|
||||
"from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score, mean_absolute_percentage_error"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"id": "2c89b32a-017c-4d05-ab78-8b9b8eb0dcbb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from keras.callbacks import ReduceLROnPlateau\n",
|
||||
"reduce_lr = ReduceLROnPlateau(monitor='val_loss', patience=10, mode='auto')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"id": "ca6ce434-80b6-4609-9596-9a5120680462",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def print_eva(y_true, y_pred, tp):\n",
|
||||
" MSE = mean_squared_error(y_true, y_pred)\n",
|
||||
" RMSE = np.sqrt(MSE)\n",
|
||||
" MAE = mean_absolute_error(y_true, y_pred)\n",
|
||||
" MAPE = mean_absolute_percentage_error(y_true, y_pred)\n",
|
||||
" R_2 = r2_score(y_true, y_pred)\n",
|
||||
" print(f\"COL: {tp}, MSE: {format(MSE, '.2E')}\", end=',')\n",
|
||||
" print(f'RMSE: {round(RMSE, 3)}', end=',')\n",
|
||||
" print(f'MAPE: {round(MAPE * 100, 3)} %', end=',')\n",
|
||||
" print(f'MAE: {round(MAE, 3)}', end=',')\n",
|
||||
" print(f'R_2: {round(R_2, 3)}')\n",
|
||||
" return [MSE, RMSE, MAE, MAPE, R_2]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 28,
|
||||
"id": "10213bc5-bf13-46ed-9ce9-b1dbc5af72ee",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"2024-01-05 16:46:22.503307: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcuda.so.1\n",
|
||||
"2024-01-05 16:46:22.560854: E tensorflow/stream_executor/cuda/cuda_driver.cc:328] failed call to cuInit: CUDA_ERROR_INVALID_DEVICE: invalid device ordinal\n",
|
||||
"2024-01-05 16:46:22.560909: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:169] retrieving CUDA diagnostic information for host: zhaojh-yv621\n",
|
||||
"2024-01-05 16:46:22.560920: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:176] hostname: zhaojh-yv621\n",
|
||||
"2024-01-05 16:46:22.561113: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:200] libcuda reported version is: 520.61.5\n",
|
||||
"2024-01-05 16:46:22.561132: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:204] kernel reported version is: 520.61.5\n",
|
||||
"2024-01-05 16:46:22.561135: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:310] kernel version seems to match DSO: 520.61.5\n",
|
||||
"2024-01-05 16:46:22.561424: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX512F FMA\n",
|
||||
"To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"ename": "ValueError",
|
||||
"evalue": "in user code:\n\n /tmp/ipykernel_16320/2404117700.py:31 call *\n return K.concatenate(inputs, -1)\n /root/miniconda3/envs/python38/lib/python3.8/site-packages/tensorflow/python/util/dispatch.py:206 wrapper **\n return target(*args, **kwargs)\n /root/miniconda3/envs/python38/lib/python3.8/site-packages/tensorflow/python/keras/backend.py:3098 concatenate\n return array_ops.concat([to_dense(x) for x in tensors], axis)\n /root/miniconda3/envs/python38/lib/python3.8/site-packages/tensorflow/python/util/dispatch.py:206 wrapper\n return target(*args, **kwargs)\n /root/miniconda3/envs/python38/lib/python3.8/site-packages/tensorflow/python/ops/array_ops.py:1768 concat\n return gen_array_ops.concat_v2(values=values, axis=axis, name=name)\n /root/miniconda3/envs/python38/lib/python3.8/site-packages/tensorflow/python/ops/gen_array_ops.py:1227 concat_v2\n _, _, _op, _outputs = _op_def_library._apply_op_helper(\n /root/miniconda3/envs/python38/lib/python3.8/site-packages/tensorflow/python/framework/op_def_library.py:748 _apply_op_helper\n op = g._create_op_internal(op_type_name, inputs, dtypes=None,\n /root/miniconda3/envs/python38/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py:599 _create_op_internal\n return super(FuncGraph, self)._create_op_internal( # pylint: disable=protected-access\n /root/miniconda3/envs/python38/lib/python3.8/site-packages/tensorflow/python/framework/ops.py:3557 _create_op_internal\n ret = Operation(\n /root/miniconda3/envs/python38/lib/python3.8/site-packages/tensorflow/python/framework/ops.py:2041 __init__\n self._c_op = _create_c_op(self._graph, node_def, inputs,\n /root/miniconda3/envs/python38/lib/python3.8/site-packages/tensorflow/python/framework/ops.py:1883 _create_c_op\n raise ValueError(str(e))\n\n ValueError: Shape must be rank 2 but is rank 3 for '{{node custom_multi_loss_layer/concat}} = ConcatV2[N=4, T=DT_FLOAT, Tidx=DT_INT32](Placeholder, Placeholder_1, Placeholder_2, Placeholder_3, custom_multi_loss_layer/concat/axis)' with input shapes: [?,1], [?,1], [?,1,1], [?,1,1], [].\n",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)",
|
||||
"Cell \u001b[0;32mIn[28], line 2\u001b[0m\n\u001b[1;32m 1\u001b[0m prediction_model \u001b[38;5;241m=\u001b[39m get_prediction_model()\n\u001b[0;32m----> 2\u001b[0m trainable_model \u001b[38;5;241m=\u001b[39m \u001b[43mget_trainable_model\u001b[49m\u001b[43m(\u001b[49m\u001b[43mprediction_model\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 3\u001b[0m trainable_model\u001b[38;5;241m.\u001b[39mcompile(optimizer\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124madam\u001b[39m\u001b[38;5;124m'\u001b[39m, loss\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m)\n",
|
||||
"Cell \u001b[0;32mIn[18], line 6\u001b[0m, in \u001b[0;36mget_trainable_model\u001b[0;34m(prediction_model)\u001b[0m\n\u001b[1;32m 4\u001b[0m bet_real \u001b[38;5;241m=\u001b[39m layers\u001b[38;5;241m.\u001b[39mInput(shape\u001b[38;5;241m=\u001b[39m(\u001b[38;5;241m1\u001b[39m,), name\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mvad_real\u001b[39m\u001b[38;5;124m'\u001b[39m)\n\u001b[1;32m 5\u001b[0m mesco_real \u001b[38;5;241m=\u001b[39m layers\u001b[38;5;241m.\u001b[39mInput(shape\u001b[38;5;241m=\u001b[39m(\u001b[38;5;241m1\u001b[39m,), name\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mfcad_real\u001b[39m\u001b[38;5;124m'\u001b[39m)\n\u001b[0;32m----> 6\u001b[0m out \u001b[38;5;241m=\u001b[39m \u001b[43mCustomMultiLossLayer\u001b[49m\u001b[43m(\u001b[49m\u001b[43mnb_outputs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m2\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m(\u001b[49m\u001b[43m[\u001b[49m\u001b[43mbet_real\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmesco_real\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbet\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmesco\u001b[49m\u001b[43m]\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 7\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m Model([inputs, bet_real, mesco_real], out)\n",
|
||||
"File \u001b[0;32m~/miniconda3/envs/python38/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer.py:969\u001b[0m, in \u001b[0;36mLayer.__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 963\u001b[0m \u001b[38;5;66;03m# Functional Model construction mode is invoked when `Layer`s are called on\u001b[39;00m\n\u001b[1;32m 964\u001b[0m \u001b[38;5;66;03m# symbolic `KerasTensor`s, i.e.:\u001b[39;00m\n\u001b[1;32m 965\u001b[0m \u001b[38;5;66;03m# >> inputs = tf.keras.Input(10)\u001b[39;00m\n\u001b[1;32m 966\u001b[0m \u001b[38;5;66;03m# >> outputs = MyLayer()(inputs) # Functional construction mode.\u001b[39;00m\n\u001b[1;32m 967\u001b[0m \u001b[38;5;66;03m# >> model = tf.keras.Model(inputs, outputs)\u001b[39;00m\n\u001b[1;32m 968\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m _in_functional_construction_mode(\u001b[38;5;28mself\u001b[39m, inputs, args, kwargs, input_list):\n\u001b[0;32m--> 969\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_functional_construction_call\u001b[49m\u001b[43m(\u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 970\u001b[0m \u001b[43m \u001b[49m\u001b[43minput_list\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 972\u001b[0m \u001b[38;5;66;03m# Maintains info about the `Layer.call` stack.\u001b[39;00m\n\u001b[1;32m 973\u001b[0m call_context \u001b[38;5;241m=\u001b[39m base_layer_utils\u001b[38;5;241m.\u001b[39mcall_context()\n",
|
||||
"File \u001b[0;32m~/miniconda3/envs/python38/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer.py:1107\u001b[0m, in \u001b[0;36mLayer._functional_construction_call\u001b[0;34m(self, inputs, args, kwargs, input_list)\u001b[0m\n\u001b[1;32m 1102\u001b[0m training_arg_passed_by_framework \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m\n\u001b[1;32m 1104\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m call_context\u001b[38;5;241m.\u001b[39menter(\n\u001b[1;32m 1105\u001b[0m layer\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m, inputs\u001b[38;5;241m=\u001b[39minputs, build_graph\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m, training\u001b[38;5;241m=\u001b[39mtraining_value):\n\u001b[1;32m 1106\u001b[0m \u001b[38;5;66;03m# Check input assumptions set after layer building, e.g. input shape.\u001b[39;00m\n\u001b[0;32m-> 1107\u001b[0m outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_keras_tensor_symbolic_call\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1108\u001b[0m \u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43minput_masks\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1110\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m outputs \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 1111\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mA layer\u001b[39m\u001b[38;5;130;01m\\'\u001b[39;00m\u001b[38;5;124ms `call` method should return a \u001b[39m\u001b[38;5;124m'\u001b[39m\n\u001b[1;32m 1112\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mTensor or a list of Tensors, not None \u001b[39m\u001b[38;5;124m'\u001b[39m\n\u001b[1;32m 1113\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124m(layer: \u001b[39m\u001b[38;5;124m'\u001b[39m \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mname \u001b[38;5;241m+\u001b[39m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124m).\u001b[39m\u001b[38;5;124m'\u001b[39m)\n",
|
||||
"File \u001b[0;32m~/miniconda3/envs/python38/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer.py:840\u001b[0m, in \u001b[0;36mLayer._keras_tensor_symbolic_call\u001b[0;34m(self, inputs, input_masks, args, kwargs)\u001b[0m\n\u001b[1;32m 838\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m nest\u001b[38;5;241m.\u001b[39mmap_structure(keras_tensor\u001b[38;5;241m.\u001b[39mKerasTensor, output_signature)\n\u001b[1;32m 839\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 840\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_infer_output_signature\u001b[49m\u001b[43m(\u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43minput_masks\u001b[49m\u001b[43m)\u001b[49m\n",
|
||||
"File \u001b[0;32m~/miniconda3/envs/python38/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer.py:880\u001b[0m, in \u001b[0;36mLayer._infer_output_signature\u001b[0;34m(self, inputs, args, kwargs, input_masks)\u001b[0m\n\u001b[1;32m 878\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_maybe_build(inputs)\n\u001b[1;32m 879\u001b[0m inputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_maybe_cast_inputs(inputs)\n\u001b[0;32m--> 880\u001b[0m outputs \u001b[38;5;241m=\u001b[39m \u001b[43mcall_fn\u001b[49m\u001b[43m(\u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 882\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_handle_activity_regularization(inputs, outputs)\n\u001b[1;32m 883\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_set_mask_metadata(inputs, outputs, input_masks,\n\u001b[1;32m 884\u001b[0m build_graph\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m)\n",
|
||||
"File \u001b[0;32m~/miniconda3/envs/python38/lib/python3.8/site-packages/tensorflow/python/autograph/impl/api.py:695\u001b[0m, in \u001b[0;36mconvert.<locals>.decorator.<locals>.wrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 693\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e: \u001b[38;5;66;03m# pylint:disable=broad-except\u001b[39;00m\n\u001b[1;32m 694\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mhasattr\u001b[39m(e, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mag_error_metadata\u001b[39m\u001b[38;5;124m'\u001b[39m):\n\u001b[0;32m--> 695\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\u001b[38;5;241m.\u001b[39mag_error_metadata\u001b[38;5;241m.\u001b[39mto_exception(e)\n\u001b[1;32m 696\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 697\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m\n",
|
||||
"\u001b[0;31mValueError\u001b[0m: in user code:\n\n /tmp/ipykernel_16320/2404117700.py:31 call *\n return K.concatenate(inputs, -1)\n /root/miniconda3/envs/python38/lib/python3.8/site-packages/tensorflow/python/util/dispatch.py:206 wrapper **\n return target(*args, **kwargs)\n /root/miniconda3/envs/python38/lib/python3.8/site-packages/tensorflow/python/keras/backend.py:3098 concatenate\n return array_ops.concat([to_dense(x) for x in tensors], axis)\n /root/miniconda3/envs/python38/lib/python3.8/site-packages/tensorflow/python/util/dispatch.py:206 wrapper\n return target(*args, **kwargs)\n /root/miniconda3/envs/python38/lib/python3.8/site-packages/tensorflow/python/ops/array_ops.py:1768 concat\n return gen_array_ops.concat_v2(values=values, axis=axis, name=name)\n /root/miniconda3/envs/python38/lib/python3.8/site-packages/tensorflow/python/ops/gen_array_ops.py:1227 concat_v2\n _, _, _op, _outputs = _op_def_library._apply_op_helper(\n /root/miniconda3/envs/python38/lib/python3.8/site-packages/tensorflow/python/framework/op_def_library.py:748 _apply_op_helper\n op = g._create_op_internal(op_type_name, inputs, dtypes=None,\n /root/miniconda3/envs/python38/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py:599 _create_op_internal\n return super(FuncGraph, self)._create_op_internal( # pylint: disable=protected-access\n /root/miniconda3/envs/python38/lib/python3.8/site-packages/tensorflow/python/framework/ops.py:3557 _create_op_internal\n ret = Operation(\n /root/miniconda3/envs/python38/lib/python3.8/site-packages/tensorflow/python/framework/ops.py:2041 __init__\n self._c_op = _create_c_op(self._graph, node_def, inputs,\n /root/miniconda3/envs/python38/lib/python3.8/site-packages/tensorflow/python/framework/ops.py:1883 _create_c_op\n raise ValueError(str(e))\n\n ValueError: Shape must be rank 2 but is rank 3 for '{{node custom_multi_loss_layer/concat}} = ConcatV2[N=4, T=DT_FLOAT, Tidx=DT_INT32](Placeholder, Placeholder_1, Placeholder_2, Placeholder_3, custom_multi_loss_layer/concat/axis)' with input shapes: [?,1], [?,1], [?,1,1], [?,1,1], [].\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"prediction_model = get_prediction_model()\n",
|
||||
"trainable_model = get_trainable_model(prediction_model)\n",
|
||||
"trainable_model.compile(optimizer='adam', loss=None)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4a1be90d-b8f1-4fe1-9952-1cdcc489fab5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"plot_model(prediction_model)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 28,
|
||||
"id": "6308b1dc-8e2e-4bf9-9b28-3b81979bf7e0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"2024-01-05 13:55:16.952556: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:176] None of the MLIR Optimization Passes are enabled (registered 2)\n",
|
||||
"2024-01-05 13:55:16.970806: I tensorflow/core/platform/profile_utils/cpu_utils.cc:114] CPU Frequency: 2200000000 Hz\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"COL: 挥发分Vad, MSE: 5.39E-01,RMSE: 0.734,MAPE: 1.948 %,MAE: 0.582,R_2: 0.883\n",
|
||||
"COL: 固定炭Fcad, MSE: 7.77E-01,RMSE: 0.881,MAPE: 1.246 %,MAE: 0.654,R_2: 0.969\n",
|
||||
"COL: 挥发分Vad, MSE: 8.80E-01,RMSE: 0.938,MAPE: 2.679 %,MAE: 0.783,R_2: 0.893\n",
|
||||
"COL: 固定炭Fcad, MSE: 1.32E+00,RMSE: 1.149,MAPE: 1.814 %,MAE: 0.907,R_2: 0.974\n",
|
||||
"COL: 挥发分Vad, MSE: 6.68E-01,RMSE: 0.817,MAPE: 2.064 %,MAE: 0.606,R_2: 0.829\n",
|
||||
"COL: 固定炭Fcad, MSE: 9.89E-01,RMSE: 0.995,MAPE: 1.427 %,MAE: 0.798,R_2: 0.929\n",
|
||||
"COL: 挥发分Vad, MSE: 6.34E-01,RMSE: 0.796,MAPE: 2.099 %,MAE: 0.62,R_2: 0.889\n",
|
||||
"COL: 固定炭Fcad, MSE: 4.93E-01,RMSE: 0.702,MAPE: 1.058 %,MAE: 0.542,R_2: 0.985\n",
|
||||
"WARNING:tensorflow:5 out of the last 9 calls to <function Model.make_predict_function.<locals>.predict_function at 0x7f0801c91c10> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n",
|
||||
"COL: 挥发分Vad, MSE: 2.34E+00,RMSE: 1.53,MAPE: 4.385 %,MAE: 1.317,R_2: 0.467\n",
|
||||
"COL: 固定炭Fcad, MSE: 2.21E+02,RMSE: 14.87,MAPE: 27.662 %,MAE: 14.835,R_2: -9.385\n",
|
||||
"WARNING:tensorflow:6 out of the last 11 calls to <function Model.make_predict_function.<locals>.predict_function at 0x7f0801cf34c0> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n",
|
||||
"COL: 挥发分Vad, MSE: 6.16E-01,RMSE: 0.785,MAPE: 2.29 %,MAE: 0.674,R_2: 0.873\n",
|
||||
"COL: 固定炭Fcad, MSE: 1.04E+00,RMSE: 1.02,MAPE: 1.603 %,MAE: 0.811,R_2: 0.956\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"vad_eva_list = list()\n",
|
||||
"fcad_eva_list = list()\n",
|
||||
"train_data = use_data[use_cols].copy()\n",
|
||||
"for (train_index, test_index) in kf.split(train_data):\n",
|
||||
" train = train_data.loc[train_index]\n",
|
||||
" valid = train_data.loc[test_index]\n",
|
||||
" X = np.expand_dims(train[feature_cols].values, axis=1)\n",
|
||||
" Y = [x for x in train[out_cols].values.T]\n",
|
||||
" X_valid = np.expand_dims(valid[feature_cols].values, axis=1)\n",
|
||||
" Y_valid = [x for x in valid[out_cols].values.T]\n",
|
||||
" prediction_model = get_prediction_model()\n",
|
||||
" trainable_model = get_trainable_model(prediction_model)\n",
|
||||
" trainable_model.compile(optimizer='adam', loss=None)\n",
|
||||
" hist = trainable_model.fit([X, Y[0], Y[1]], epochs=120, batch_size=8, verbose=0, \n",
|
||||
" validation_data=[X_valid, Y_valid[0], Y_valid[1]],\n",
|
||||
" callbacks=[reduce_lr]\n",
|
||||
" )\n",
|
||||
" rst = prediction_model.predict(X_valid)\n",
|
||||
" pred_rst = pd.DataFrame.from_records(np.squeeze(np.asarray(rst), axis=2).T, columns=out_cols)\n",
|
||||
" real_rst = valid[out_cols].copy()\n",
|
||||
" for col in out_cols:\n",
|
||||
" pred_rst[col] = pred_rst[col] * (maxs[col] - mins[col]) + mins[col]\n",
|
||||
" real_rst[col] = real_rst[col] * (maxs[col] - mins[col]) + mins[col]\n",
|
||||
" y_pred_vad = pred_rst['挥发分Vad(%)'].values.reshape(-1,)\n",
|
||||
" y_pred_fcad = pred_rst['固定炭Fcad(%)'].values.reshape(-1,)\n",
|
||||
" y_true_vad = real_rst['挥发分Vad(%)'].values.reshape(-1,)\n",
|
||||
" y_true_fcad = real_rst['固定炭Fcad(%)'].values.reshape(-1,)\n",
|
||||
" vad_eva = print_eva(y_true_vad, y_pred_vad, tp='挥发分Vad')\n",
|
||||
" fcad_eva = print_eva(y_true_fcad, y_pred_fcad, tp='固定炭Fcad')\n",
|
||||
" vad_eva_list.append(vad_eva)\n",
|
||||
" fcad_eva_list.append(fcad_eva)\n",
|
||||
" del trainable_model\n",
|
||||
" del prediction_model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 33,
|
||||
"id": "27e0abf7-aa29-467f-bc5e-b66a1adf6165",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"MSE 0.667414\n",
|
||||
"RMSE 0.814141\n",
|
||||
"MAE 0.652951\n",
|
||||
"MAPE 0.022159\n",
|
||||
"R_2 0.873633\n",
|
||||
"dtype: float64"
|
||||
]
|
||||
},
|
||||
"execution_count": 33,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"vad_df = pd.DataFrame.from_records(vad_eva_list, columns=['MSE', 'RMSE', 'MAE', 'MAPE', 'R_2'])\n",
|
||||
"vad_df.sort_values(by='R_2')[1:].mean()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 34,
|
||||
"id": "070cdb94-6e7b-4028-b6d5-ba8570c902ba",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"MSE 0.923848\n",
|
||||
"RMSE 0.949375\n",
|
||||
"MAE 0.742411\n",
|
||||
"MAPE 0.014295\n",
|
||||
"R_2 0.962834\n",
|
||||
"dtype: float64"
|
||||
]
|
||||
},
|
||||
"execution_count": 34,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"fcad_df = pd.DataFrame.from_records(fcad_eva_list, columns=['MSE', 'RMSE', 'MAE', 'MAPE', 'R_2'])\n",
|
||||
"fcad_df.sort_values(by='R_2')[1:].mean()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "54c1df2c-c297-4b8d-be8a-3a99cff22545",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
|
@ -1021,80 +1213,6 @@
|
|||
"trainable_model = get_trainable_model(prediction_model)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 32,
|
||||
"id": "4f832a1e-48e2-4467-b381-35b9d2f1271a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Model: \"model_3\"\n",
|
||||
"__________________________________________________________________________________________________\n",
|
||||
"Layer (type) Output Shape Param # Connected to \n",
|
||||
"==================================================================================================\n",
|
||||
"input (InputLayer) [(None, 1, 6)] 0 \n",
|
||||
"__________________________________________________________________________________________________\n",
|
||||
"conv1d_3 (Conv1D) (None, 1, 64) 448 input[0][0] \n",
|
||||
"__________________________________________________________________________________________________\n",
|
||||
"bidirectional_3 (Bidirectional) (None, 1, 128) 66048 conv1d_3[0][0] \n",
|
||||
"__________________________________________________________________________________________________\n",
|
||||
"dense_30 (Dense) (None, 1, 128) 16512 bidirectional_3[0][0] \n",
|
||||
"__________________________________________________________________________________________________\n",
|
||||
"transformer_block_9 (Transforme (None, 1, 128) 202640 dense_30[0][0] \n",
|
||||
"__________________________________________________________________________________________________\n",
|
||||
"global_average_pooling1d_9 (Glo (None, 128) 0 transformer_block_9[0][0] \n",
|
||||
"__________________________________________________________________________________________________\n",
|
||||
"dropout_29 (Dropout) (None, 128) 0 global_average_pooling1d_9[0][0] \n",
|
||||
"__________________________________________________________________________________________________\n",
|
||||
"dense_33 (Dense) (None, 64) 8256 dropout_29[0][0] \n",
|
||||
"__________________________________________________________________________________________________\n",
|
||||
"tf.expand_dims_3 (TFOpLambda) (None, 1, 64) 0 dense_33[0][0] \n",
|
||||
"__________________________________________________________________________________________________\n",
|
||||
"transformer_block_10 (Transform (None, 1, 64) 52176 tf.expand_dims_3[0][0] \n",
|
||||
"__________________________________________________________________________________________________\n",
|
||||
"transformer_block_11 (Transform (None, 1, 64) 52176 tf.expand_dims_3[0][0] \n",
|
||||
"__________________________________________________________________________________________________\n",
|
||||
"global_average_pooling1d_10 (Gl (None, 64) 0 transformer_block_10[0][0] \n",
|
||||
"__________________________________________________________________________________________________\n",
|
||||
"global_average_pooling1d_11 (Gl (None, 64) 0 transformer_block_11[0][0] \n",
|
||||
"__________________________________________________________________________________________________\n",
|
||||
"dropout_32 (Dropout) (None, 64) 0 global_average_pooling1d_10[0][0]\n",
|
||||
"__________________________________________________________________________________________________\n",
|
||||
"dropout_35 (Dropout) (None, 64) 0 global_average_pooling1d_11[0][0]\n",
|
||||
"__________________________________________________________________________________________________\n",
|
||||
"dense_36 (Dense) (None, 32) 2080 dropout_32[0][0] \n",
|
||||
"__________________________________________________________________________________________________\n",
|
||||
"dense_39 (Dense) (None, 32) 2080 dropout_35[0][0] \n",
|
||||
"__________________________________________________________________________________________________\n",
|
||||
"vad (Dense) (None, 1) 33 dense_36[0][0] \n",
|
||||
"__________________________________________________________________________________________________\n",
|
||||
"fcad (Dense) (None, 1) 33 dense_39[0][0] \n",
|
||||
"==================================================================================================\n",
|
||||
"Total params: 402,482\n",
|
||||
"Trainable params: 402,482\n",
|
||||
"Non-trainable params: 0\n",
|
||||
"__________________________________________________________________________________________________\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"prediction_model.summary()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 33,
|
||||
"id": "9289f452-a5a4-40c4-b942-f6cb2e348548",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from tensorflow.keras import optimizers\n",
|
||||
"from tensorflow.python.keras.utils.vis_utils import plot_model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 34,
|
||||
|
@ -1109,268 +1227,10 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 35,
|
||||
"id": "9a62dea1-4f05-411b-9756-a91623580581",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from keras.callbacks import ReduceLROnPlateau\n",
|
||||
"reduce_lr = ReduceLROnPlateau(monitor='val_loss', patience=10, mode='auto')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 40,
|
||||
"execution_count": null,
|
||||
"id": "cf869e4d-0fce-45a2-afff-46fd9b30fd1c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Epoch 1/120\n",
|
||||
"20/20 [==============================] - 5s 59ms/step - loss: 1.8316 - val_loss: 1.8096\n",
|
||||
"Epoch 2/120\n",
|
||||
"20/20 [==============================] - 0s 25ms/step - loss: 1.7903 - val_loss: 1.7691\n",
|
||||
"Epoch 3/120\n",
|
||||
"20/20 [==============================] - 0s 26ms/step - loss: 1.7506 - val_loss: 1.7307\n",
|
||||
"Epoch 4/120\n",
|
||||
"20/20 [==============================] - 0s 26ms/step - loss: 1.7110 - val_loss: 1.6914\n",
|
||||
"Epoch 5/120\n",
|
||||
"20/20 [==============================] - 0s 21ms/step - loss: 1.6711 - val_loss: 1.6497\n",
|
||||
"Epoch 6/120\n",
|
||||
"20/20 [==============================] - 0s 24ms/step - loss: 1.6314 - val_loss: 1.6098\n",
|
||||
"Epoch 7/120\n",
|
||||
"20/20 [==============================] - 0s 24ms/step - loss: 1.5909 - val_loss: 1.5695\n",
|
||||
"Epoch 8/120\n",
|
||||
"20/20 [==============================] - 0s 24ms/step - loss: 1.5506 - val_loss: 1.5296\n",
|
||||
"Epoch 9/120\n",
|
||||
"20/20 [==============================] - 0s 24ms/step - loss: 1.5109 - val_loss: 1.4891\n",
|
||||
"Epoch 10/120\n",
|
||||
"20/20 [==============================] - 0s 25ms/step - loss: 1.4706 - val_loss: 1.4500\n",
|
||||
"Epoch 11/120\n",
|
||||
"20/20 [==============================] - 0s 25ms/step - loss: 1.4306 - val_loss: 1.4104\n",
|
||||
"Epoch 12/120\n",
|
||||
"20/20 [==============================] - 0s 22ms/step - loss: 1.3907 - val_loss: 1.3746\n",
|
||||
"Epoch 13/120\n",
|
||||
"20/20 [==============================] - 0s 23ms/step - loss: 1.3508 - val_loss: 1.3296\n",
|
||||
"Epoch 14/120\n",
|
||||
"20/20 [==============================] - 0s 26ms/step - loss: 1.3106 - val_loss: 1.2895\n",
|
||||
"Epoch 15/120\n",
|
||||
"20/20 [==============================] - 0s 26ms/step - loss: 1.2706 - val_loss: 1.2515\n",
|
||||
"Epoch 16/120\n",
|
||||
"20/20 [==============================] - 0s 22ms/step - loss: 1.2315 - val_loss: 1.2104\n",
|
||||
"Epoch 17/120\n",
|
||||
"20/20 [==============================] - 0s 23ms/step - loss: 1.1908 - val_loss: 1.1702\n",
|
||||
"Epoch 18/120\n",
|
||||
"20/20 [==============================] - 0s 25ms/step - loss: 1.1508 - val_loss: 1.1320\n",
|
||||
"Epoch 19/120\n",
|
||||
"20/20 [==============================] - 0s 26ms/step - loss: 1.1114 - val_loss: 1.0917\n",
|
||||
"Epoch 20/120\n",
|
||||
"20/20 [==============================] - 0s 24ms/step - loss: 1.0718 - val_loss: 1.0513\n",
|
||||
"Epoch 21/120\n",
|
||||
"20/20 [==============================] - 0s 23ms/step - loss: 1.0315 - val_loss: 1.0178\n",
|
||||
"Epoch 22/120\n",
|
||||
"20/20 [==============================] - 0s 24ms/step - loss: 0.9918 - val_loss: 0.9704\n",
|
||||
"Epoch 23/120\n",
|
||||
"20/20 [==============================] - 0s 23ms/step - loss: 0.9511 - val_loss: 0.9321\n",
|
||||
"Epoch 24/120\n",
|
||||
"20/20 [==============================] - 0s 26ms/step - loss: 0.9114 - val_loss: 0.8913\n",
|
||||
"Epoch 25/120\n",
|
||||
"20/20 [==============================] - 0s 26ms/step - loss: 0.8718 - val_loss: 0.8520\n",
|
||||
"Epoch 26/120\n",
|
||||
"20/20 [==============================] - 0s 25ms/step - loss: 0.8314 - val_loss: 0.8124\n",
|
||||
"Epoch 27/120\n",
|
||||
"20/20 [==============================] - 0s 26ms/step - loss: 0.7922 - val_loss: 0.7727\n",
|
||||
"Epoch 28/120\n",
|
||||
"20/20 [==============================] - 0s 25ms/step - loss: 0.7519 - val_loss: 0.7307\n",
|
||||
"Epoch 29/120\n",
|
||||
"20/20 [==============================] - 0s 26ms/step - loss: 0.7119 - val_loss: 0.6932\n",
|
||||
"Epoch 30/120\n",
|
||||
"20/20 [==============================] - 0s 22ms/step - loss: 0.6720 - val_loss: 0.6531\n",
|
||||
"Epoch 31/120\n",
|
||||
"20/20 [==============================] - 0s 23ms/step - loss: 0.6336 - val_loss: 0.6155\n",
|
||||
"Epoch 32/120\n",
|
||||
"20/20 [==============================] - 1s 26ms/step - loss: 0.5931 - val_loss: 0.5738\n",
|
||||
"Epoch 33/120\n",
|
||||
"20/20 [==============================] - 0s 26ms/step - loss: 0.5517 - val_loss: 0.5324\n",
|
||||
"Epoch 34/120\n",
|
||||
"20/20 [==============================] - 0s 26ms/step - loss: 0.5135 - val_loss: 0.4943\n",
|
||||
"Epoch 35/120\n",
|
||||
"20/20 [==============================] - 0s 21ms/step - loss: 0.4724 - val_loss: 0.4602\n",
|
||||
"Epoch 36/120\n",
|
||||
"20/20 [==============================] - 0s 23ms/step - loss: 0.4326 - val_loss: 0.4126\n",
|
||||
"Epoch 37/120\n",
|
||||
"20/20 [==============================] - 0s 26ms/step - loss: 0.3947 - val_loss: 0.3758\n",
|
||||
"Epoch 38/120\n",
|
||||
"20/20 [==============================] - 0s 25ms/step - loss: 0.3558 - val_loss: 0.3350\n",
|
||||
"Epoch 39/120\n",
|
||||
"20/20 [==============================] - 0s 22ms/step - loss: 0.3154 - val_loss: 0.3031\n",
|
||||
"Epoch 40/120\n",
|
||||
"20/20 [==============================] - 0s 23ms/step - loss: 0.2771 - val_loss: 0.2592\n",
|
||||
"Epoch 41/120\n",
|
||||
"20/20 [==============================] - 0s 25ms/step - loss: 0.2459 - val_loss: 0.2370\n",
|
||||
"Epoch 42/120\n",
|
||||
"20/20 [==============================] - 1s 27ms/step - loss: 0.2267 - val_loss: 0.2210\n",
|
||||
"Epoch 43/120\n",
|
||||
"20/20 [==============================] - 0s 24ms/step - loss: 0.2050 - val_loss: 0.1947\n",
|
||||
"Epoch 44/120\n",
|
||||
"20/20 [==============================] - 0s 26ms/step - loss: 0.1840 - val_loss: 0.1728\n",
|
||||
"Epoch 45/120\n",
|
||||
"20/20 [==============================] - 0s 21ms/step - loss: 0.1628 - val_loss: 0.1533\n",
|
||||
"Epoch 46/120\n",
|
||||
"20/20 [==============================] - 0s 23ms/step - loss: 0.1430 - val_loss: 0.1322\n",
|
||||
"Epoch 47/120\n",
|
||||
"20/20 [==============================] - 0s 23ms/step - loss: 0.1230 - val_loss: 0.1147\n",
|
||||
"Epoch 48/120\n",
|
||||
"20/20 [==============================] - 1s 24ms/step - loss: 0.1026 - val_loss: 0.0940\n",
|
||||
"Epoch 49/120\n",
|
||||
"20/20 [==============================] - 0s 25ms/step - loss: 0.0830 - val_loss: 0.0750\n",
|
||||
"Epoch 50/120\n",
|
||||
"20/20 [==============================] - 0s 25ms/step - loss: 0.0639 - val_loss: 0.0529\n",
|
||||
"Epoch 51/120\n",
|
||||
"20/20 [==============================] - 0s 22ms/step - loss: 0.0436 - val_loss: 0.0352\n",
|
||||
"Epoch 52/120\n",
|
||||
"20/20 [==============================] - 0s 24ms/step - loss: 0.0241 - val_loss: 0.0162\n",
|
||||
"Epoch 53/120\n",
|
||||
"20/20 [==============================] - 0s 23ms/step - loss: 0.0092 - val_loss: 0.0084\n",
|
||||
"Epoch 54/120\n",
|
||||
"20/20 [==============================] - 0s 26ms/step - loss: 0.0067 - val_loss: 0.0074\n",
|
||||
"Epoch 55/120\n",
|
||||
"20/20 [==============================] - 0s 26ms/step - loss: 0.0080 - val_loss: 0.0071\n",
|
||||
"Epoch 56/120\n",
|
||||
"20/20 [==============================] - 0s 26ms/step - loss: 0.0070 - val_loss: 0.0063\n",
|
||||
"Epoch 57/120\n",
|
||||
"20/20 [==============================] - 0s 25ms/step - loss: 0.0062 - val_loss: 0.0076\n",
|
||||
"Epoch 58/120\n",
|
||||
"20/20 [==============================] - 0s 25ms/step - loss: 0.0056 - val_loss: 0.0048\n",
|
||||
"Epoch 59/120\n",
|
||||
"20/20 [==============================] - 0s 25ms/step - loss: 0.0050 - val_loss: 0.0071\n",
|
||||
"Epoch 60/120\n",
|
||||
"20/20 [==============================] - 0s 25ms/step - loss: 0.0057 - val_loss: 0.0054\n",
|
||||
"Epoch 61/120\n",
|
||||
"20/20 [==============================] - 0s 23ms/step - loss: 0.0044 - val_loss: 0.0092\n",
|
||||
"Epoch 62/120\n",
|
||||
"20/20 [==============================] - 1s 26ms/step - loss: 0.0068 - val_loss: 0.0070\n",
|
||||
"Epoch 63/120\n",
|
||||
"20/20 [==============================] - 1s 24ms/step - loss: 0.0059 - val_loss: 0.0065\n",
|
||||
"Epoch 64/120\n",
|
||||
"20/20 [==============================] - 0s 23ms/step - loss: 0.0055 - val_loss: 0.0060\n",
|
||||
"Epoch 65/120\n",
|
||||
"20/20 [==============================] - 0s 26ms/step - loss: 0.0053 - val_loss: 0.0056\n",
|
||||
"Epoch 66/120\n",
|
||||
"20/20 [==============================] - 0s 25ms/step - loss: 0.0058 - val_loss: 0.0077\n",
|
||||
"Epoch 67/120\n",
|
||||
"20/20 [==============================] - 0s 26ms/step - loss: 0.0051 - val_loss: 0.0054\n",
|
||||
"Epoch 68/120\n",
|
||||
"20/20 [==============================] - 0s 21ms/step - loss: 0.0047 - val_loss: 0.0048\n",
|
||||
"Epoch 69/120\n",
|
||||
"20/20 [==============================] - 0s 23ms/step - loss: 0.0041 - val_loss: 0.0048\n",
|
||||
"Epoch 70/120\n",
|
||||
"20/20 [==============================] - 0s 26ms/step - loss: 0.0037 - val_loss: 0.0049\n",
|
||||
"Epoch 71/120\n",
|
||||
"20/20 [==============================] - 0s 25ms/step - loss: 0.0041 - val_loss: 0.0049\n",
|
||||
"Epoch 72/120\n",
|
||||
"20/20 [==============================] - 0s 21ms/step - loss: 0.0036 - val_loss: 0.0049\n",
|
||||
"Epoch 73/120\n",
|
||||
"20/20 [==============================] - 1s 24ms/step - loss: 0.0038 - val_loss: 0.0048\n",
|
||||
"Epoch 74/120\n",
|
||||
"20/20 [==============================] - 0s 23ms/step - loss: 0.0037 - val_loss: 0.0050\n",
|
||||
"Epoch 75/120\n",
|
||||
"20/20 [==============================] - 0s 26ms/step - loss: 0.0037 - val_loss: 0.0048\n",
|
||||
"Epoch 76/120\n",
|
||||
"20/20 [==============================] - 0s 26ms/step - loss: 0.0038 - val_loss: 0.0048\n",
|
||||
"Epoch 77/120\n",
|
||||
"20/20 [==============================] - 0s 21ms/step - loss: 0.0037 - val_loss: 0.0048\n",
|
||||
"Epoch 78/120\n",
|
||||
"20/20 [==============================] - 0s 23ms/step - loss: 0.0038 - val_loss: 0.0048\n",
|
||||
"Epoch 79/120\n",
|
||||
"20/20 [==============================] - 0s 26ms/step - loss: 0.0036 - val_loss: 0.0048\n",
|
||||
"Epoch 80/120\n",
|
||||
"20/20 [==============================] - 0s 22ms/step - loss: 0.0034 - val_loss: 0.0048\n",
|
||||
"Epoch 81/120\n",
|
||||
"20/20 [==============================] - 0s 23ms/step - loss: 0.0034 - val_loss: 0.0047\n",
|
||||
"Epoch 82/120\n",
|
||||
"20/20 [==============================] - 0s 23ms/step - loss: 0.0037 - val_loss: 0.0047\n",
|
||||
"Epoch 83/120\n",
|
||||
"20/20 [==============================] - 0s 25ms/step - loss: 0.0033 - val_loss: 0.0047\n",
|
||||
"Epoch 84/120\n",
|
||||
"20/20 [==============================] - 0s 22ms/step - loss: 0.0037 - val_loss: 0.0047\n",
|
||||
"Epoch 85/120\n",
|
||||
"20/20 [==============================] - 0s 24ms/step - loss: 0.0036 - val_loss: 0.0047\n",
|
||||
"Epoch 86/120\n",
|
||||
"20/20 [==============================] - 0s 23ms/step - loss: 0.0034 - val_loss: 0.0047\n",
|
||||
"Epoch 87/120\n",
|
||||
"20/20 [==============================] - 0s 25ms/step - loss: 0.0034 - val_loss: 0.0047\n",
|
||||
"Epoch 88/120\n",
|
||||
"20/20 [==============================] - 0s 26ms/step - loss: 0.0034 - val_loss: 0.0047\n",
|
||||
"Epoch 89/120\n",
|
||||
"20/20 [==============================] - 0s 21ms/step - loss: 0.0036 - val_loss: 0.0047\n",
|
||||
"Epoch 90/120\n",
|
||||
"20/20 [==============================] - 0s 23ms/step - loss: 0.0036 - val_loss: 0.0047\n",
|
||||
"Epoch 91/120\n",
|
||||
"20/20 [==============================] - 0s 23ms/step - loss: 0.0036 - val_loss: 0.0047\n",
|
||||
"Epoch 92/120\n",
|
||||
"20/20 [==============================] - 0s 26ms/step - loss: 0.0034 - val_loss: 0.0047\n",
|
||||
"Epoch 93/120\n",
|
||||
"20/20 [==============================] - 0s 25ms/step - loss: 0.0034 - val_loss: 0.0047\n",
|
||||
"Epoch 94/120\n",
|
||||
"20/20 [==============================] - 0s 26ms/step - loss: 0.0037 - val_loss: 0.0047\n",
|
||||
"Epoch 95/120\n",
|
||||
"20/20 [==============================] - 0s 21ms/step - loss: 0.0035 - val_loss: 0.0047\n",
|
||||
"Epoch 96/120\n",
|
||||
"20/20 [==============================] - 0s 23ms/step - loss: 0.0032 - val_loss: 0.0047\n",
|
||||
"Epoch 97/120\n",
|
||||
"20/20 [==============================] - 0s 25ms/step - loss: 0.0035 - val_loss: 0.0047\n",
|
||||
"Epoch 98/120\n",
|
||||
"20/20 [==============================] - 0s 21ms/step - loss: 0.0038 - val_loss: 0.0047\n",
|
||||
"Epoch 99/120\n",
|
||||
"20/20 [==============================] - 0s 23ms/step - loss: 0.0033 - val_loss: 0.0047\n",
|
||||
"Epoch 100/120\n",
|
||||
"20/20 [==============================] - 0s 23ms/step - loss: 0.0036 - val_loss: 0.0047\n",
|
||||
"Epoch 101/120\n",
|
||||
"20/20 [==============================] - 1s 26ms/step - loss: 0.0033 - val_loss: 0.0047\n",
|
||||
"Epoch 102/120\n",
|
||||
"20/20 [==============================] - 0s 25ms/step - loss: 0.0035 - val_loss: 0.0047\n",
|
||||
"Epoch 103/120\n",
|
||||
"20/20 [==============================] - 0s 26ms/step - loss: 0.0035 - val_loss: 0.0047\n",
|
||||
"Epoch 104/120\n",
|
||||
"20/20 [==============================] - 0s 22ms/step - loss: 0.0035 - val_loss: 0.0047\n",
|
||||
"Epoch 105/120\n",
|
||||
"20/20 [==============================] - 0s 23ms/step - loss: 0.0036 - val_loss: 0.0047\n",
|
||||
"Epoch 106/120\n",
|
||||
"20/20 [==============================] - 0s 26ms/step - loss: 0.0034 - val_loss: 0.0047\n",
|
||||
"Epoch 107/120\n",
|
||||
"20/20 [==============================] - 0s 25ms/step - loss: 0.0036 - val_loss: 0.0047\n",
|
||||
"Epoch 108/120\n",
|
||||
"20/20 [==============================] - 0s 21ms/step - loss: 0.0034 - val_loss: 0.0047\n",
|
||||
"Epoch 109/120\n",
|
||||
"20/20 [==============================] - 1s 24ms/step - loss: 0.0037 - val_loss: 0.0047\n",
|
||||
"Epoch 110/120\n",
|
||||
"20/20 [==============================] - 0s 24ms/step - loss: 0.0038 - val_loss: 0.0047\n",
|
||||
"Epoch 111/120\n",
|
||||
"20/20 [==============================] - 0s 26ms/step - loss: 0.0036 - val_loss: 0.0047\n",
|
||||
"Epoch 112/120\n",
|
||||
"20/20 [==============================] - 0s 26ms/step - loss: 0.0035 - val_loss: 0.0047\n",
|
||||
"Epoch 113/120\n",
|
||||
"20/20 [==============================] - 0s 26ms/step - loss: 0.0037 - val_loss: 0.0047\n",
|
||||
"Epoch 114/120\n",
|
||||
"20/20 [==============================] - 0s 20ms/step - loss: 0.0035 - val_loss: 0.0047\n",
|
||||
"Epoch 115/120\n",
|
||||
"20/20 [==============================] - 0s 26ms/step - loss: 0.0033 - val_loss: 0.0047\n",
|
||||
"Epoch 116/120\n",
|
||||
"20/20 [==============================] - 0s 21ms/step - loss: 0.0036 - val_loss: 0.0047\n",
|
||||
"Epoch 117/120\n",
|
||||
"20/20 [==============================] - 0s 25ms/step - loss: 0.0032 - val_loss: 0.0047\n",
|
||||
"Epoch 118/120\n",
|
||||
"20/20 [==============================] - 0s 26ms/step - loss: 0.0036 - val_loss: 0.0047\n",
|
||||
"Epoch 119/120\n",
|
||||
"20/20 [==============================] - 0s 25ms/step - loss: 0.0037 - val_loss: 0.0047\n",
|
||||
"Epoch 120/120\n",
|
||||
"20/20 [==============================] - 0s 21ms/step - loss: 0.0036 - val_loss: 0.0047\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"trainable_model.compile(optimizer='adam', loss=None)\n",
|
||||
"hist = trainable_model.fit([X, Y[0], Y[1]], epochs=120, batch_size=8, verbose=1, \n",
|
||||
|
@ -1463,38 +1323,6 @@
|
|||
"[np.exp(K.get_value(log_var[0]))**0.5 for log_var in trainable_model.layers[-1].log_vars]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 43,
|
||||
"id": "b0d5d8ad-aadd-4218-b5b7-9691a2d3eeef",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pred_rst = pd.DataFrame.from_records(np.squeeze(np.asarray(rst), axis=2).T, columns=out_cols)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 44,
|
||||
"id": "0a2bcb45-da86-471b-a61d-314e29430d6a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"real_rst = test[out_cols].copy()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 45,
|
||||
"id": "e124f7c0-fdd5-43b9-b649-ff7d9dd59641",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for col in out_cols:\n",
|
||||
" pred_rst[col] = pred_rst[col] * (maxs[col] - mins[col]) + mins[col]\n",
|
||||
" real_rst[col] = real_rst[col] * (maxs[col] - mins[col]) + mins[col]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 46,
|
||||
|
@ -1516,6 +1344,18 @@
|
|||
"real_rst.columns"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "294813b8-90be-4007-9fd6-c26ee7bb9652",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for col in out_cols:\n",
|
||||
" pred_rst[col] = pred_rst[col] * (maxs[col] - mins[col]) + mins[col]\n",
|
||||
" real_rst[col] = real_rst[col] * (maxs[col] - mins[col]) + mins[col]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 47,
|
||||
|
@ -1531,38 +1371,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 48,
|
||||
"id": "26ea6cfa-efad-443c-9dd9-844f8be42b91",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score, mean_absolute_percentage_error"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 49,
|
||||
"id": "28072e7c-c9d5-4ff6-940d-e94ae879afc9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def print_eva(y_true, y_pred, tp):\n",
|
||||
" MSE = mean_squared_error(y_true, y_pred)\n",
|
||||
" RMSE = np.sqrt(MSE)\n",
|
||||
" MAE = mean_absolute_error(y_true, y_pred)\n",
|
||||
" MAPE = mean_absolute_percentage_error(y_true, y_pred)\n",
|
||||
" R_2 = r2_score(y_true, y_pred)\n",
|
||||
" print(f\"COL: {tp}, MSE: {format(MSE, '.2E')}\", end=',')\n",
|
||||
" print(f'RMSE: {round(RMSE, 4)}', end=',')\n",
|
||||
" print(f'MAPE: {round(MAPE, 4) * 100} %', end=',')\n",
|
||||
" print(f'MAE: {round(MAE, 4)}', end=',')\n",
|
||||
" print(f'R_2: {round(R_2, 4)}')\n",
|
||||
" return [MSE, RMSE, MAE, MAPE, R_2]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 51,
|
||||
"execution_count": 56,
|
||||
"id": "4ec4caa9-7c46-4fc8-a94b-cb659e924304",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
|
@ -1570,8 +1379,8 @@
|
|||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"COL: 比表面积, MSE: 3.35E-01,RMSE: 0.5791,MAPE: 1.6400000000000001 %,MAE: 0.5041,R_2: 0.8698\n",
|
||||
"COL: 总孔体积, MSE: 1.11E+00,RMSE: 1.0549,MAPE: 1.5 %,MAE: 0.8137,R_2: 0.876\n"
|
||||
"COL: 挥发分Vad, MSE: 3.35E-01,RMSE: 0.579,MAPE: 1.639 %,MAE: 0.504,R_2: 0.87\n",
|
||||
"COL: 固定炭Fcad, MSE: 1.11E+00,RMSE: 1.055,MAPE: 1.497 %,MAE: 0.814,R_2: 0.876\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
|
168
20240102.ipynb
168
20240102.ipynb
|
@ -12,7 +12,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 2,
|
||||
"id": "6a94278b-8f51-4edc-966b-4a32876a4536",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
|
@ -215,7 +215,7 @@
|
|||
"[228 rows x 8 columns]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
|
@ -227,7 +227,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 3,
|
||||
"id": "f72789a6-f3fa-4ab1-8b62-999413958608",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
|
@ -244,7 +244,7 @@
|
|||
" '固定炭Fcad(%)']"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
|
@ -256,7 +256,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 4,
|
||||
"id": "6ffb1989-3f45-4d1c-84c9-59b1045b7d9e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
|
@ -266,7 +266,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"execution_count": 5,
|
||||
"id": "9c708cc0-9f1b-4669-a350-6d24cb720794",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
|
@ -276,7 +276,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"execution_count": 6,
|
||||
"id": "103349e1-aa4a-427a-a489-9ab28787088b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
|
@ -286,7 +286,7 @@
|
|||
"['氢Had(%)', '碳Cad(%)', '氮Nad(%)', '氧Oad(%)', '弹筒发热量Qb,adMJ/kg']"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
|
@ -298,7 +298,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 44,
|
||||
"execution_count": 7,
|
||||
"id": "839e45dc-e9c8-4956-950b-035687469c81",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
|
@ -409,7 +409,7 @@
|
|||
"4 54.78 "
|
||||
]
|
||||
},
|
||||
"execution_count": 44,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
|
@ -421,17 +421,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "24233d12-9468-49b8-a371-0c6c508c387e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import seaborn as sns"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"execution_count": 8,
|
||||
"id": "54cd27a6-1a8a-47c0-93d9-c948960a7842",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
|
@ -441,7 +431,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"execution_count": 9,
|
||||
"id": "bba14f71-9d69-4c82-b6bc-b9b74c725b25",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
|
@ -451,7 +441,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"execution_count": 10,
|
||||
"id": "e3a9ad55-0132-430f-ac57-c2e7f8e8590a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
|
@ -461,13 +451,12 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 40,
|
||||
"execution_count": 25,
|
||||
"id": "013c6a58-65f6-48e9-8d7f-b56c87de5b11",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"param_xgb = {\"silent\": True,\n",
|
||||
" \"obj\": 'reg:linear',\n",
|
||||
"params_xgb = {\"objective\": 'reg:squarederror',\n",
|
||||
" \"subsample\": 1,\n",
|
||||
" \"max_depth\": 15,\n",
|
||||
" \"eta\": 0.3,\n",
|
||||
|
@ -475,12 +464,12 @@
|
|||
" \"lambda\": 1,\n",
|
||||
" \"alpha\": 0,\n",
|
||||
" \"colsample_bytree\": 0.9,}\n",
|
||||
"num_round = 1000"
|
||||
"num_boost_round = 1000"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 41,
|
||||
"execution_count": 26,
|
||||
"id": "086f1901-8388-47e9-ae7c-1b2709bc1e22",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
|
@ -491,7 +480,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 43,
|
||||
"execution_count": 27,
|
||||
"id": "fb7b06af-84bc-483c-b086-7826d7befc9c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
|
@ -499,30 +488,30 @@
|
|||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"MSE: 1.9436, RMSE: 1.3941, MAE: 1.1861, MAPE: 3.94 %, R_2: 0.6095\n",
|
||||
"MSE: 1.8735, RMSE: 1.3688, MAE: 1.132, MAPE: 3.77 %, R_2: 0.495\n",
|
||||
"MSE: 2.0587, RMSE: 1.4348, MAE: 1.0706, MAPE: 4.08 %, R_2: 0.7862\n",
|
||||
"MSE: 1.9298, RMSE: 1.3892, MAE: 1.1469, MAPE: 3.84 %, R_2: 0.5332\n",
|
||||
"MSE: 1.4583, RMSE: 1.2076, MAE: 1.097, MAPE: 3.67 %, R_2: 0.6894\n",
|
||||
"MSE: 2.0822, RMSE: 1.443, MAE: 1.1645, MAPE: 3.88 %, R_2: 0.5975\n",
|
||||
"MSE: 1.3521, RMSE: 1.1628, MAE: 0.9905, MAPE: 3.37 %, R_2: 0.7479\n",
|
||||
"MSE: 1.4057, RMSE: 1.1856, MAE: 0.9998, MAPE: 3.3 %, R_2: 0.2946\n",
|
||||
"MSE: 2.2274, RMSE: 1.4925, MAE: 1.2638, MAPE: 4.19 %, R_2: 0.6785\n",
|
||||
"MSE: 1.4866, RMSE: 1.2193, MAE: 1.0797, MAPE: 3.67 %, R_2: 0.7261\n"
|
||||
"MSE: 0.475, RMSE: 0.6892, MAE: 0.5507, MAPE: 1.86 %, R_2: 0.9046\n",
|
||||
"MSE: 1.1415, RMSE: 1.0684, MAE: 0.9133, MAPE: 3.06 %, R_2: 0.6923\n",
|
||||
"MSE: 0.7247, RMSE: 0.8513, MAE: 0.6606, MAPE: 2.32 %, R_2: 0.9247\n",
|
||||
"MSE: 1.3652, RMSE: 1.1684, MAE: 0.9609, MAPE: 3.24 %, R_2: 0.6698\n",
|
||||
"MSE: 0.4552, RMSE: 0.6747, MAE: 0.5732, MAPE: 1.94 %, R_2: 0.903\n",
|
||||
"MSE: 0.6357, RMSE: 0.7973, MAE: 0.6374, MAPE: 2.2 %, R_2: 0.8771\n",
|
||||
"MSE: 0.9972, RMSE: 0.9986, MAE: 0.752, MAPE: 2.47 %, R_2: 0.8141\n",
|
||||
"MSE: 1.5218, RMSE: 1.2336, MAE: 1.0569, MAPE: 3.45 %, R_2: 0.2363\n",
|
||||
"MSE: 0.6891, RMSE: 0.8301, MAE: 0.6825, MAPE: 2.22 %, R_2: 0.9005\n",
|
||||
"MSE: 1.6864, RMSE: 1.2986, MAE: 1.0004, MAPE: 3.51 %, R_2: 0.6893\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"MSE 1.781792\n",
|
||||
"RMSE 1.329760\n",
|
||||
"MAE 1.113084\n",
|
||||
"MAPE 0.037719\n",
|
||||
"R_2 0.615796\n",
|
||||
"MSE 0.969172\n",
|
||||
"RMSE 0.961023\n",
|
||||
"MAE 0.778783\n",
|
||||
"MAPE 0.026288\n",
|
||||
"R_2 0.761188\n",
|
||||
"dtype: float64"
|
||||
]
|
||||
},
|
||||
"execution_count": 43,
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
|
@ -558,7 +547,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 48,
|
||||
"execution_count": 28,
|
||||
"id": "90841cb7-4f28-4a33-93ac-93df69f1a5a1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
|
@ -566,30 +555,30 @@
|
|||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"MSE: 4.6724, RMSE: 2.1616, MAE: 1.7297, MAPE: 3.42 %, R2: 0.8346\n",
|
||||
"MSE: 3.0512, RMSE: 1.7468, MAE: 1.4485, MAPE: 2.62 %, R2: 0.8011\n",
|
||||
"MSE: 7.6672, RMSE: 2.769, MAE: 1.951, MAPE: 4.56 %, R2: 0.8856\n",
|
||||
"MSE: 4.0334, RMSE: 2.0083, MAE: 1.487, MAPE: 2.77 %, R2: 0.8216\n",
|
||||
"MSE: 2.6382, RMSE: 1.6243, MAE: 1.1551, MAPE: 2.12 %, R2: 0.846\n",
|
||||
"MSE: 5.8097, RMSE: 2.4103, MAE: 1.8683, MAPE: 3.8 %, R2: 0.83\n",
|
||||
"MSE: 2.3446, RMSE: 1.5312, MAE: 1.1294, MAPE: 2.28 %, R2: 0.9069\n",
|
||||
"MSE: 3.0069, RMSE: 1.734, MAE: 1.3782, MAPE: 2.46 %, R2: 0.6541\n",
|
||||
"MSE: 4.1652, RMSE: 2.0409, MAE: 1.5685, MAPE: 3.2 %, R2: 0.859\n",
|
||||
"MSE: 4.2023, RMSE: 2.05, MAE: 1.6284, MAPE: 3.2 %, R2: 0.869\n"
|
||||
"MSE: 0.9821, RMSE: 0.991, MAE: 0.7698, MAPE: 1.44 %, R2: 0.9652\n",
|
||||
"MSE: 1.2674, RMSE: 1.1258, MAE: 0.8756, MAPE: 1.64 %, R2: 0.9174\n",
|
||||
"MSE: 0.9137, RMSE: 0.9559, MAE: 0.757, MAPE: 1.46 %, R2: 0.9864\n",
|
||||
"MSE: 1.6012, RMSE: 1.2654, MAE: 1.0173, MAPE: 1.89 %, R2: 0.9292\n",
|
||||
"MSE: 1.4694, RMSE: 1.2122, MAE: 0.8524, MAPE: 1.59 %, R2: 0.9142\n",
|
||||
"MSE: 0.7552, RMSE: 0.869, MAE: 0.7202, MAPE: 1.39 %, R2: 0.9779\n",
|
||||
"MSE: 0.5474, RMSE: 0.7398, MAE: 0.5467, MAPE: 1.0 %, R2: 0.9783\n",
|
||||
"MSE: 1.2779, RMSE: 1.1305, MAE: 0.9452, MAPE: 1.73 %, R2: 0.853\n",
|
||||
"MSE: 1.1908, RMSE: 1.0912, MAE: 0.9004, MAPE: 1.72 %, R2: 0.9597\n",
|
||||
"MSE: 3.9312, RMSE: 1.9827, MAE: 1.2707, MAPE: 2.65 %, R2: 0.8775\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"MSE 4.159107\n",
|
||||
"RMSE 2.007631\n",
|
||||
"MAE 1.534427\n",
|
||||
"MAPE 0.030424\n",
|
||||
"R2 0.830794\n",
|
||||
"MSE 1.393623\n",
|
||||
"RMSE 1.136351\n",
|
||||
"MAE 0.865538\n",
|
||||
"MAPE 0.016509\n",
|
||||
"R2 0.935872\n",
|
||||
"dtype: float64"
|
||||
]
|
||||
},
|
||||
"execution_count": 48,
|
||||
"execution_count": 28,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
|
@ -625,61 +614,10 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 67,
|
||||
"execution_count": null,
|
||||
"id": "aa67bc97-1258-44bb-9dae-14ace1661ff6",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"<div>\n",
|
||||
"<style scoped>\n",
|
||||
" .dataframe tbody tr th:only-of-type {\n",
|
||||
" vertical-align: middle;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe tbody tr th {\n",
|
||||
" vertical-align: top;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe thead th {\n",
|
||||
" text-align: right;\n",
|
||||
" }\n",
|
||||
"</style>\n",
|
||||
"<table border=\"1\" class=\"dataframe\">\n",
|
||||
" <thead>\n",
|
||||
" <tr style=\"text-align: right;\">\n",
|
||||
" <th></th>\n",
|
||||
" <th>MSE</th>\n",
|
||||
" <th>RMSE</th>\n",
|
||||
" <th>MAE</th>\n",
|
||||
" <th>MAPE</th>\n",
|
||||
" <th>R2</th>\n",
|
||||
" </tr>\n",
|
||||
" </thead>\n",
|
||||
" <tbody>\n",
|
||||
" <tr>\n",
|
||||
" <th>十折交叉验证均值</th>\n",
|
||||
" <td>4.159107</td>\n",
|
||||
" <td>2.007631</td>\n",
|
||||
" <td>1.534427</td>\n",
|
||||
" <td>0.030424</td>\n",
|
||||
" <td>0.830794</td>\n",
|
||||
" </tr>\n",
|
||||
" </tbody>\n",
|
||||
"</table>\n",
|
||||
"</div>"
|
||||
],
|
||||
"text/plain": [
|
||||
" MSE RMSE MAE MAPE R2\n",
|
||||
"十折交叉验证均值 4.159107 2.007631 1.534427 0.030424 0.830794"
|
||||
]
|
||||
},
|
||||
"execution_count": 67,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
Loading…
Reference in New Issue