62 KiB
62 KiB
In [1]:
import os os.environ['CUDA_DEVICE_ORDER'] = 'PCB_BUS_ID' os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1'
In [2]:
import pandas as pd import numpy as np from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt #新增加的两行 from pylab import mpl # 设置显示中文字体 mpl.rcParams["font.sans-serif"] = ["SimHei"] mpl.rcParams["axes.unicode_minus"] = False
In [3]:
data = pd.read_csv('./data/20240102/train_data.csv')
In [4]:
out_cols = [x for x in data.columns if '碳材料' in x]
In [5]:
out_cols
Out[5]:
['碳材料结构特征-比表面积', '碳材料结构特征-总孔体积', '碳材料结构特征-微孔体积', '碳材料结构特征-平均孔径']
In [6]:
train_data = data.dropna(subset=out_cols).fillna(0)
In [7]:
import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers import tensorflow.keras.backend as K
2024-01-04 16:14:39.388684: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0
In [8]:
tf.test.is_gpu_available()
WARNING:tensorflow:From /tmp/ipykernel_43672/337460670.py:1: is_gpu_available (from tensorflow.python.framework.test_util) is deprecated and will be removed in a future version. Instructions for updating: Use `tf.config.list_physical_devices('GPU')` instead.
2024-01-04 16:14:40.311876: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX512F FMA To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags. 2024-01-04 16:14:40.319726: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcuda.so.1 2024-01-04 16:14:40.406804: E tensorflow/stream_executor/cuda/cuda_driver.cc:328] failed call to cuInit: CUDA_ERROR_INVALID_DEVICE: invalid device ordinal 2024-01-04 16:14:40.406829: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:169] retrieving CUDA diagnostic information for host: zhaojh-yv621 2024-01-04 16:14:40.406833: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:176] hostname: zhaojh-yv621 2024-01-04 16:14:40.406963: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:200] libcuda reported version is: 520.61.5 2024-01-04 16:14:40.406982: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:204] kernel reported version is: 520.61.5 2024-01-04 16:14:40.406985: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:310] kernel version seems to match DSO: 520.61.5
Out[8]:
False
In [9]:
class TransformerBlock(layers.Layer): def __init__(self, embed_dim, num_heads, ff_dim, name, rate=0.1): super().__init__() self.att = layers.MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim, name=name) self.ffn = keras.Sequential( [layers.Dense(ff_dim, activation="relu"), layers.Dense(embed_dim),] ) self.layernorm1 = layers.LayerNormalization(epsilon=1e-6) self.layernorm2 = layers.LayerNormalization(epsilon=1e-6) self.dropout1 = layers.Dropout(rate) self.dropout2 = layers.Dropout(rate) def call(self, inputs, training): attn_output = self.att(inputs, inputs) attn_output = self.dropout1(attn_output, training=training) out1 = self.layernorm1(inputs + attn_output) ffn_output = self.ffn(out1) ffn_output = self.dropout2(ffn_output, training=training) return self.layernorm2(out1 + ffn_output)
In [10]:
from tensorflow.keras import Model
In [11]:
from tensorflow.keras.initializers import Constant
In [12]:
# Custom loss layer class CustomMultiLossLayer(layers.Layer): def __init__(self, nb_outputs=2, **kwargs): self.nb_outputs = nb_outputs self.is_placeholder = True super(CustomMultiLossLayer, self).__init__(**kwargs) def build(self, input_shape=None): # initialise log_vars self.log_vars = [] for i in range(self.nb_outputs): self.log_vars += [self.add_weight(name='log_var' + str(i), shape=(1,), initializer=tf.initializers.he_normal(), trainable=True)] super(CustomMultiLossLayer, self).build(input_shape) def multi_loss(self, ys_true, ys_pred): assert len(ys_true) == self.nb_outputs and len(ys_pred) == self.nb_outputs loss = 0 for y_true, y_pred, log_var in zip(ys_true, ys_pred, self.log_vars): mse = (y_true - y_pred) ** 2. pre = K.exp(-log_var[0]) loss += tf.abs(tf.reduce_logsumexp(pre * mse + log_var[0], axis=-1)) return K.mean(loss) def call(self, inputs): ys_true = inputs[:self.nb_outputs] ys_pred = inputs[self.nb_outputs:] loss = self.multi_loss(ys_true, ys_pred) self.add_loss(loss, inputs=inputs) # We won't actually use the output. return K.concatenate(inputs, -1)
In [13]:
num_heads, ff_dim = 1, 12
In [38]:
def get_prediction_model(): def build_output(out, out_name): self_block = TransformerBlock(64, num_heads, ff_dim, name=f'{out_name}_attn') out = self_block(out) out = layers.GlobalAveragePooling1D()(out) out = layers.Dropout(0.1)(out) out = layers.Dense(32, activation="relu")(out) # out = layers.Dense(1, name=out_name, activation="sigmoid")(out) return out inputs = layers.Input(shape=(1,len(feature_cols)), name='input') x = layers.Conv1D(filters=64, kernel_size=1, activation='relu')(inputs) # x = layers.Dropout(rate=0.1)(x) lstm_out = layers.Bidirectional(layers.LSTM(units=64, return_sequences=True))(x) lstm_out = layers.Dense(128, activation='relu')(lstm_out) transformer_block = TransformerBlock(128, num_heads, ff_dim, name='first_attn') out = transformer_block(lstm_out) out = layers.GlobalAveragePooling1D()(out) out = layers.Dropout(0.1)(out) out = layers.Dense(64, activation='relu')(out) out = K.expand_dims(out, axis=1) bet = build_output(out, 'bet') mesco = build_output(out, 'mesco') micro = build_output(out, 'micro') avg = build_output(out, 'avg') bet = layers.Dense(1, activation='sigmoid', name='bet')(bet) mesco = layers.Dense(1, activation='sigmoid', name='mesco')(mesco) micro = layers.Dense(1, activation='sigmoid', name='micro')(micro) avg = layers.Dense(1, activation='sigmoid', name='avg')(avg) model = Model(inputs=[inputs], outputs=[bet, mesco, micro, avg]) return model
In [39]:
def get_trainable_model(prediction_model): inputs = layers.Input(shape=(1,len(feature_cols)), name='input') bet, mesco, micro, avg = prediction_model(inputs) bet_real = layers.Input(shape=(1,), name='bet_real') mesco_real = layers.Input(shape=(1,), name='mesco_real') micro_real = layers.Input(shape=(1,), name='micro_real') avg_real = layers.Input(shape=(1,), name='avg_real') out = CustomMultiLossLayer(nb_outputs=4)([bet_real, mesco_real, micro_real, avg_real, bet, mesco, micro, avg]) return Model([inputs, bet_real, mesco_real, micro_real, avg_real], out)
In [40]:
maxs = train_data.max() mins = train_data.min() for col in train_data.columns: if maxs[col] - mins[col] == 0: continue train_data[col] = (train_data[col] - mins[col]) / (maxs[col] - mins[col])
In [41]:
train_data
Out[41]:
热处理条件-热处理次数 | 热处理条件-是否是中温停留 | 第一次热处理-温度 | 第一次热处理-升温速率 | 第一次热处理-保留时间 | 第二次热处理-温度 | 第二次热处理-升温速率· | 第二次热处理-保留时间 | 共碳化-是否是共碳化物质 | 共碳化-共碳化物质/沥青 | ... | 模板剂-种类_二氧化硅 | 模板剂-种类_氢氧化镁 | 模板剂-种类_氧化钙 | 模板剂-种类_氧化锌 | 模板剂-种类_氧化镁 | 模板剂-种类_氯化钠 | 模板剂-种类_氯化钾 | 模板剂-种类_碱式碳酸镁 | 模板剂-种类_碳酸钙 | 模板剂-种类_纤维素 | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 0.0 | 0.0 | 0.166667 | 0.3 | 0.5 | 0.000000 | 0.0 | 0.000000 | 0.0 | 0.0 | ... | 0 | 0.0 | 1.0 | 0 | 0.0 | 0.0 | 0 | 0.0 | 0.0 | 0.0 |
1 | 0.0 | 0.0 | 0.333333 | 0.3 | 0.5 | 0.000000 | 0.0 | 0.000000 | 0.0 | 0.0 | ... | 0 | 0.0 | 1.0 | 0 | 0.0 | 0.0 | 0 | 0.0 | 0.0 | 0.0 |
2 | 0.0 | 0.0 | 0.333333 | 0.3 | 0.5 | 0.000000 | 0.0 | 0.000000 | 0.0 | 0.0 | ... | 0 | 0.0 | 1.0 | 0 | 0.0 | 0.0 | 0 | 0.0 | 0.0 | 0.0 |
3 | 0.0 | 0.0 | 0.333333 | 0.3 | 0.5 | 0.000000 | 0.0 | 0.000000 | 0.0 | 0.0 | ... | 0 | 0.0 | 1.0 | 0 | 0.0 | 0.0 | 0 | 0.0 | 0.0 | 0.0 |
4 | 1.0 | 0.0 | 0.166667 | 0.3 | 0.5 | 0.666667 | 0.5 | 0.666667 | 0.0 | 0.0 | ... | 0 | 0.0 | 0.0 | 0 | 0.0 | 0.0 | 0 | 1.0 | 0.0 | 0.0 |
... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
144 | 0.0 | 0.0 | 0.333333 | 0.3 | 0.0 | 0.000000 | 0.0 | 0.000000 | 0.0 | 0.0 | ... | 0 | 0.0 | 0.0 | 0 | 0.0 | 0.0 | 0 | 0.0 | 0.0 | 0.0 |
145 | 0.0 | 0.0 | 0.500000 | 0.3 | 0.0 | 0.000000 | 0.0 | 0.000000 | 0.0 | 0.0 | ... | 0 | 0.0 | 0.0 | 0 | 0.0 | 0.0 | 0 | 0.0 | 0.0 | 0.0 |
146 | 0.0 | 0.0 | 0.666667 | 0.3 | 0.0 | 0.000000 | 0.0 | 0.000000 | 0.0 | 0.0 | ... | 0 | 0.0 | 0.0 | 0 | 0.0 | 0.0 | 0 | 0.0 | 0.0 | 0.0 |
147 | 0.0 | 0.0 | 0.500000 | 0.3 | 0.0 | 0.000000 | 0.0 | 0.000000 | 0.0 | 0.0 | ... | 0 | 0.0 | 0.0 | 0 | 0.0 | 0.0 | 0 | 0.0 | 0.0 | 0.0 |
148 | 0.0 | 0.0 | 0.500000 | 0.3 | 0.0 | 0.000000 | 0.0 | 0.000000 | 0.0 | 0.0 | ... | 0 | 0.0 | 0.0 | 0 | 0.0 | 0.0 | 0 | 0.0 | 0.0 | 0.0 |
123 rows × 42 columns
In [42]:
# feature_cols = [x for x in train_data.columns if x not in out_cols and '第二次' not in x] feature_cols = [x for x in train_data.columns if x not in out_cols] use_cols = feature_cols + out_cols
In [43]:
use_data = train_data.copy() for col in use_cols: use_data[col] = use_data[col].astype('float32')
In [44]:
train, valid = train_test_split(use_data[use_cols], test_size=0.2, random_state=42, shuffle=True) valid, test = train_test_split(valid, test_size=0.5, random_state=42, shuffle=True)
In [45]:
prediction_model = get_prediction_model() trainable_model = get_trainable_model(prediction_model)
In [46]:
prediction_model.summary()
Model: "model_4" __________________________________________________________________________________________________ Layer (type) Output Shape Param # Connected to ================================================================================================== input (InputLayer) [(None, 1, 38)] 0 __________________________________________________________________________________________________ conv1d_3 (Conv1D) (None, 1, 64) 2496 input[0][0] __________________________________________________________________________________________________ bidirectional_3 (Bidirectional) (None, 1, 128) 66048 conv1d_3[0][0] __________________________________________________________________________________________________ dense_28 (Dense) (None, 1, 128) 16512 bidirectional_3[0][0] __________________________________________________________________________________________________ transformer_block_7 (Transforme (None, 1, 128) 202640 dense_28[0][0] __________________________________________________________________________________________________ global_average_pooling1d_7 (Glo (None, 128) 0 transformer_block_7[0][0] __________________________________________________________________________________________________ dropout_23 (Dropout) (None, 128) 0 global_average_pooling1d_7[0][0] __________________________________________________________________________________________________ dense_31 (Dense) (None, 64) 8256 dropout_23[0][0] __________________________________________________________________________________________________ tf.expand_dims_3 (TFOpLambda) (None, 1, 64) 0 dense_31[0][0] __________________________________________________________________________________________________ transformer_block_8 (Transforme (None, 1, 64) 52176 tf.expand_dims_3[0][0] __________________________________________________________________________________________________ transformer_block_9 (Transforme (None, 1, 64) 52176 tf.expand_dims_3[0][0] __________________________________________________________________________________________________ transformer_block_10 (Transform (None, 1, 64) 52176 tf.expand_dims_3[0][0] __________________________________________________________________________________________________ transformer_block_11 (Transform (None, 1, 64) 52176 tf.expand_dims_3[0][0] __________________________________________________________________________________________________ global_average_pooling1d_8 (Glo (None, 64) 0 transformer_block_8[0][0] __________________________________________________________________________________________________ global_average_pooling1d_9 (Glo (None, 64) 0 transformer_block_9[0][0] __________________________________________________________________________________________________ global_average_pooling1d_10 (Gl (None, 64) 0 transformer_block_10[0][0] __________________________________________________________________________________________________ global_average_pooling1d_11 (Gl (None, 64) 0 transformer_block_11[0][0] __________________________________________________________________________________________________ dense_34 (Dense) (None, 32) 2080 global_average_pooling1d_8[0][0] __________________________________________________________________________________________________ dense_37 (Dense) (None, 32) 2080 global_average_pooling1d_9[0][0] __________________________________________________________________________________________________ dense_40 (Dense) (None, 32) 2080 global_average_pooling1d_10[0][0] __________________________________________________________________________________________________ dense_43 (Dense) (None, 32) 2080 global_average_pooling1d_11[0][0] __________________________________________________________________________________________________ bet (Dense) (None, 1) 33 dense_34[0][0] __________________________________________________________________________________________________ mesco (Dense) (None, 1) 33 dense_37[0][0] __________________________________________________________________________________________________ micro (Dense) (None, 1) 33 dense_40[0][0] __________________________________________________________________________________________________ avg (Dense) (None, 1) 33 dense_43[0][0] ================================================================================================== Total params: 513,108 Trainable params: 513,108 Non-trainable params: 0 __________________________________________________________________________________________________
In [47]:
from tensorflow.keras import optimizers from tensorflow.python.keras.utils.vis_utils import plot_model
In [48]:
X = np.expand_dims(train[feature_cols].values, axis=1) Y = [x for x in train[out_cols].values.T] Y_valid = [x for x in valid[out_cols].values.T]
In [49]:
from keras.callbacks import ReduceLROnPlateau reduce_lr = ReduceLROnPlateau(monitor='val_loss', patience=10, mode='auto')
In [50]:
trainable_model.compile(optimizer='adam', loss=None) hist = trainable_model.fit([X, Y[0], Y[1], Y[2], Y[3]], epochs=160, batch_size=8, verbose=1, validation_data=[np.expand_dims(valid[feature_cols].values, axis=1), Y_valid[0], Y_valid[1], Y_valid[2], Y_valid[3]], callbacks=[reduce_lr] )
2024-01-04 16:17:21.543163: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:176] None of the MLIR Optimization Passes are enabled (registered 2) 2024-01-04 16:17:21.562835: I tensorflow/core/platform/profile_utils/cpu_utils.cc:114] CPU Frequency: 2200000000 Hz
Epoch 1/160 13/13 [==============================] - 6s 103ms/step - loss: 5.1128 - val_loss: 4.4845 Epoch 2/160 13/13 [==============================] - 0s 30ms/step - loss: 4.4173 - val_loss: 4.3305 Epoch 3/160 13/13 [==============================] - 0s 32ms/step - loss: 4.0913 - val_loss: 4.4123 Epoch 4/160 13/13 [==============================] - 0s 32ms/step - loss: 4.0410 - val_loss: 4.3142 Epoch 5/160 13/13 [==============================] - 0s 33ms/step - loss: 3.9933 - val_loss: 4.5518 Epoch 6/160 13/13 [==============================] - 0s 29ms/step - loss: 4.1234 - val_loss: 4.3268 Epoch 7/160 13/13 [==============================] - 0s 35ms/step - loss: 4.0470 - val_loss: 4.2908 Epoch 8/160 13/13 [==============================] - 0s 32ms/step - loss: 4.1230 - val_loss: 4.4964 Epoch 9/160 13/13 [==============================] - 0s 33ms/step - loss: 3.8889 - val_loss: 4.0178 Epoch 10/160 13/13 [==============================] - 0s 33ms/step - loss: 3.6648 - val_loss: 3.9010 Epoch 11/160 13/13 [==============================] - 0s 32ms/step - loss: 3.7712 - val_loss: 3.9471 Epoch 12/160 13/13 [==============================] - 0s 32ms/step - loss: 3.5449 - val_loss: 3.8723 Epoch 13/160 13/13 [==============================] - 0s 26ms/step - loss: 3.3373 - val_loss: 3.8543 Epoch 14/160 13/13 [==============================] - 0s 28ms/step - loss: 3.5200 - val_loss: 3.8259 Epoch 15/160 13/13 [==============================] - 0s 28ms/step - loss: 3.5623 - val_loss: 3.8838 Epoch 16/160 13/13 [==============================] - 0s 29ms/step - loss: 3.3898 - val_loss: 3.8122 Epoch 17/160 13/13 [==============================] - 0s 35ms/step - loss: 3.2718 - val_loss: 3.8799 Epoch 18/160 13/13 [==============================] - 0s 32ms/step - loss: 3.3303 - val_loss: 3.7849 Epoch 19/160 13/13 [==============================] - 0s 26ms/step - loss: 3.2860 - val_loss: 3.7713 Epoch 20/160 13/13 [==============================] - 0s 34ms/step - loss: 3.2669 - val_loss: 3.5689 Epoch 21/160 13/13 [==============================] - 0s 34ms/step - loss: 3.2366 - val_loss: 3.5238 Epoch 22/160 13/13 [==============================] - 0s 33ms/step - loss: 3.0037 - val_loss: 3.6039 Epoch 23/160 13/13 [==============================] - 0s 32ms/step - loss: 3.2087 - val_loss: 3.5221 Epoch 24/160 13/13 [==============================] - 0s 32ms/step - loss: 3.0619 - val_loss: 3.5939 Epoch 25/160 13/13 [==============================] - 0s 32ms/step - loss: 3.0423 - val_loss: 3.2731 Epoch 26/160 13/13 [==============================] - 0s 33ms/step - loss: 3.0533 - val_loss: 3.2256 Epoch 27/160 13/13 [==============================] - 0s 32ms/step - loss: 3.0105 - val_loss: 3.2154 Epoch 28/160 13/13 [==============================] - 0s 33ms/step - loss: 2.9607 - val_loss: 3.2926 Epoch 29/160 13/13 [==============================] - 0s 28ms/step - loss: 3.0072 - val_loss: 3.5834 Epoch 30/160 13/13 [==============================] - 0s 28ms/step - loss: 2.9276 - val_loss: 3.1635 Epoch 31/160 13/13 [==============================] - 0s 28ms/step - loss: 2.8930 - val_loss: 3.1363 Epoch 32/160 13/13 [==============================] - 0s 28ms/step - loss: 2.7904 - val_loss: 3.0188 Epoch 33/160 13/13 [==============================] - 0s 35ms/step - loss: 2.6856 - val_loss: 2.9808 Epoch 34/160 13/13 [==============================] - 0s 32ms/step - loss: 2.6503 - val_loss: 3.0943 Epoch 35/160 13/13 [==============================] - 0s 32ms/step - loss: 2.5339 - val_loss: 2.9359 Epoch 36/160 13/13 [==============================] - 0s 33ms/step - loss: 2.5369 - val_loss: 2.9704 Epoch 37/160 13/13 [==============================] - 0s 32ms/step - loss: 2.5478 - val_loss: 2.9344 Epoch 38/160 13/13 [==============================] - 0s 32ms/step - loss: 2.7129 - val_loss: 2.8326 Epoch 39/160 13/13 [==============================] - 0s 33ms/step - loss: 2.3453 - val_loss: 2.8198 Epoch 40/160 13/13 [==============================] - 0s 33ms/step - loss: 2.4666 - val_loss: 2.7701 Epoch 41/160 13/13 [==============================] - 0s 32ms/step - loss: 2.3401 - val_loss: 2.7727 Epoch 42/160 13/13 [==============================] - 0s 28ms/step - loss: 2.4369 - val_loss: 2.7568 Epoch 43/160 13/13 [==============================] - 0s 28ms/step - loss: 2.2170 - val_loss: 2.6998 Epoch 44/160 13/13 [==============================] - 0s 33ms/step - loss: 2.1565 - val_loss: 2.6711 Epoch 45/160 13/13 [==============================] - 0s 28ms/step - loss: 2.1251 - val_loss: 2.6134 Epoch 46/160 13/13 [==============================] - 0s 30ms/step - loss: 2.1728 - val_loss: 2.6394 Epoch 47/160 13/13 [==============================] - 0s 28ms/step - loss: 2.0609 - val_loss: 2.6568 Epoch 48/160 13/13 [==============================] - 0s 28ms/step - loss: 2.0893 - val_loss: 2.6603 Epoch 49/160 13/13 [==============================] - 0s 28ms/step - loss: 2.0615 - val_loss: 2.6517 Epoch 50/160 13/13 [==============================] - 0s 33ms/step - loss: 2.0500 - val_loss: 2.6041 Epoch 51/160 13/13 [==============================] - 0s 28ms/step - loss: 1.9309 - val_loss: 2.6218 Epoch 52/160 13/13 [==============================] - 0s 28ms/step - loss: 1.9971 - val_loss: 2.4494 Epoch 53/160 13/13 [==============================] - 0s 35ms/step - loss: 1.8514 - val_loss: 2.3886 Epoch 54/160 13/13 [==============================] - 0s 33ms/step - loss: 1.7823 - val_loss: 2.5517 Epoch 55/160 13/13 [==============================] - 0s 32ms/step - loss: 1.7059 - val_loss: 2.3293 Epoch 56/160 13/13 [==============================] - 0s 33ms/step - loss: 1.7039 - val_loss: 2.3810 Epoch 57/160 13/13 [==============================] - 0s 34ms/step - loss: 1.6961 - val_loss: 2.4552 Epoch 58/160 13/13 [==============================] - 0s 31ms/step - loss: 1.7350 - val_loss: 2.3526 Epoch 59/160 13/13 [==============================] - 0s 32ms/step - loss: 1.5840 - val_loss: 2.2976 Epoch 60/160 13/13 [==============================] - 0s 28ms/step - loss: 1.4915 - val_loss: 2.3516 Epoch 61/160 13/13 [==============================] - 0s 28ms/step - loss: 1.5910 - val_loss: 2.2383 Epoch 62/160 13/13 [==============================] - 0s 29ms/step - loss: 1.6101 - val_loss: 2.1474 Epoch 63/160 13/13 [==============================] - 0s 34ms/step - loss: 1.4698 - val_loss: 2.1210 Epoch 64/160 13/13 [==============================] - 0s 31ms/step - loss: 1.4796 - val_loss: 2.0695 Epoch 65/160 13/13 [==============================] - 0s 33ms/step - loss: 1.4472 - val_loss: 1.9768 Epoch 66/160 13/13 [==============================] - 0s 33ms/step - loss: 1.6250 - val_loss: 2.1760 Epoch 67/160 13/13 [==============================] - 0s 33ms/step - loss: 1.4058 - val_loss: 2.0605 Epoch 68/160 13/13 [==============================] - 0s 33ms/step - loss: 1.4318 - val_loss: 2.1487 Epoch 69/160 13/13 [==============================] - 0s 28ms/step - loss: 1.3285 - val_loss: 1.8259 Epoch 70/160 13/13 [==============================] - 0s 28ms/step - loss: 1.4306 - val_loss: 1.7314 Epoch 71/160 13/13 [==============================] - 0s 28ms/step - loss: 1.3449 - val_loss: 1.7509 Epoch 72/160 13/13 [==============================] - 0s 32ms/step - loss: 1.2737 - val_loss: 1.8892 Epoch 73/160 13/13 [==============================] - 0s 32ms/step - loss: 1.3647 - val_loss: 1.8109 Epoch 74/160 13/13 [==============================] - 0s 28ms/step - loss: 1.3075 - val_loss: 1.8175 Epoch 75/160 13/13 [==============================] - 0s 28ms/step - loss: 1.2765 - val_loss: 1.7334 Epoch 76/160 13/13 [==============================] - 0s 28ms/step - loss: 1.2761 - val_loss: 1.7685 Epoch 77/160 13/13 [==============================] - 0s 28ms/step - loss: 1.1870 - val_loss: 1.7683 Epoch 78/160 13/13 [==============================] - 0s 28ms/step - loss: 1.1441 - val_loss: 1.8794 Epoch 79/160 13/13 [==============================] - 0s 28ms/step - loss: 1.1193 - val_loss: 1.9073 Epoch 80/160 13/13 [==============================] - 0s 35ms/step - loss: 1.1361 - val_loss: 1.8016 Epoch 81/160 13/13 [==============================] - 0s 26ms/step - loss: 1.0629 - val_loss: 1.8359 Epoch 82/160 13/13 [==============================] - 0s 29ms/step - loss: 1.1137 - val_loss: 1.9310 Epoch 83/160 13/13 [==============================] - 0s 28ms/step - loss: 1.0947 - val_loss: 1.9212 Epoch 84/160 13/13 [==============================] - 0s 28ms/step - loss: 1.0225 - val_loss: 1.9027 Epoch 85/160 13/13 [==============================] - 0s 28ms/step - loss: 1.0892 - val_loss: 1.8943 Epoch 86/160 13/13 [==============================] - 0s 28ms/step - loss: 1.0387 - val_loss: 1.9131 Epoch 87/160 13/13 [==============================] - 0s 34ms/step - loss: 1.0590 - val_loss: 1.8768 Epoch 88/160 13/13 [==============================] - 0s 27ms/step - loss: 1.0909 - val_loss: 1.8732 Epoch 89/160 13/13 [==============================] - 0s 34ms/step - loss: 1.0632 - val_loss: 1.8506 Epoch 90/160 13/13 [==============================] - 0s 26ms/step - loss: 1.0703 - val_loss: 1.8108 Epoch 91/160 13/13 [==============================] - 0s 33ms/step - loss: 1.0262 - val_loss: 1.8143 Epoch 92/160 13/13 [==============================] - 0s 27ms/step - loss: 1.0330 - val_loss: 1.8132 Epoch 93/160 13/13 [==============================] - 0s 28ms/step - loss: 1.0440 - val_loss: 1.8156 Epoch 94/160 13/13 [==============================] - 0s 28ms/step - loss: 1.0279 - val_loss: 1.8182 Epoch 95/160 13/13 [==============================] - 0s 28ms/step - loss: 1.0713 - val_loss: 1.8191 Epoch 96/160 13/13 [==============================] - 0s 29ms/step - loss: 1.0401 - val_loss: 1.8187 Epoch 97/160 13/13 [==============================] - 0s 28ms/step - loss: 1.0318 - val_loss: 1.8193 Epoch 98/160 13/13 [==============================] - 0s 28ms/step - loss: 1.0306 - val_loss: 1.8233 Epoch 99/160 13/13 [==============================] - 0s 29ms/step - loss: 1.0348 - val_loss: 1.8268 Epoch 100/160 13/13 [==============================] - 0s 35ms/step - loss: 1.0165 - val_loss: 1.8276 Epoch 101/160 13/13 [==============================] - 0s 32ms/step - loss: 1.0826 - val_loss: 1.8275 Epoch 102/160 13/13 [==============================] - 0s 32ms/step - loss: 1.0631 - val_loss: 1.8269 Epoch 103/160 13/13 [==============================] - 0s 31ms/step - loss: 0.9980 - val_loss: 1.8268 Epoch 104/160 13/13 [==============================] - 0s 29ms/step - loss: 1.0245 - val_loss: 1.8270 Epoch 105/160 13/13 [==============================] - 0s 29ms/step - loss: 1.1240 - val_loss: 1.8272 Epoch 106/160 13/13 [==============================] - 0s 28ms/step - loss: 1.0099 - val_loss: 1.8275 Epoch 107/160 13/13 [==============================] - 0s 32ms/step - loss: 1.0435 - val_loss: 1.8272 Epoch 108/160 13/13 [==============================] - 0s 32ms/step - loss: 1.0166 - val_loss: 1.8257 Epoch 109/160 13/13 [==============================] - 0s 33ms/step - loss: 1.0462 - val_loss: 1.8256 Epoch 110/160 13/13 [==============================] - 0s 29ms/step - loss: 1.1261 - val_loss: 1.8256 Epoch 111/160 13/13 [==============================] - 0s 29ms/step - loss: 1.0418 - val_loss: 1.8256 Epoch 112/160 13/13 [==============================] - 0s 27ms/step - loss: 0.9933 - val_loss: 1.8255 Epoch 113/160 13/13 [==============================] - 0s 28ms/step - loss: 1.0758 - val_loss: 1.8255 Epoch 114/160 13/13 [==============================] - 0s 28ms/step - loss: 1.0435 - val_loss: 1.8254 Epoch 115/160 13/13 [==============================] - 0s 28ms/step - loss: 1.0265 - val_loss: 1.8254 Epoch 116/160 13/13 [==============================] - 0s 28ms/step - loss: 1.0268 - val_loss: 1.8254 Epoch 117/160 13/13 [==============================] - 0s 28ms/step - loss: 1.0709 - val_loss: 1.8254 Epoch 118/160 13/13 [==============================] - 0s 36ms/step - loss: 1.0304 - val_loss: 1.8253 Epoch 119/160 13/13 [==============================] - 0s 25ms/step - loss: 1.0074 - val_loss: 1.8254 Epoch 120/160 13/13 [==============================] - 0s 29ms/step - loss: 1.0375 - val_loss: 1.8254 Epoch 121/160 13/13 [==============================] - 0s 29ms/step - loss: 1.0196 - val_loss: 1.8254 Epoch 122/160 13/13 [==============================] - 0s 28ms/step - loss: 1.0706 - val_loss: 1.8254 Epoch 123/160 13/13 [==============================] - 0s 29ms/step - loss: 1.0120 - val_loss: 1.8254 Epoch 124/160 13/13 [==============================] - 0s 28ms/step - loss: 1.0962 - val_loss: 1.8254 Epoch 125/160 13/13 [==============================] - 0s 28ms/step - loss: 0.9942 - val_loss: 1.8254 Epoch 126/160 13/13 [==============================] - 0s 33ms/step - loss: 1.0419 - val_loss: 1.8254 Epoch 127/160 13/13 [==============================] - 0s 32ms/step - loss: 1.1072 - val_loss: 1.8254 Epoch 128/160 13/13 [==============================] - 0s 33ms/step - loss: 1.0153 - val_loss: 1.8254 Epoch 129/160 13/13 [==============================] - 0s 29ms/step - loss: 1.0324 - val_loss: 1.8254 Epoch 130/160 13/13 [==============================] - 0s 28ms/step - loss: 1.0363 - val_loss: 1.8254 Epoch 131/160 13/13 [==============================] - 0s 35ms/step - loss: 1.0624 - val_loss: 1.8254 Epoch 132/160 13/13 [==============================] - 0s 34ms/step - loss: 1.1191 - val_loss: 1.8254 Epoch 133/160 13/13 [==============================] - 0s 32ms/step - loss: 1.0297 - val_loss: 1.8254 Epoch 134/160 13/13 [==============================] - 0s 28ms/step - loss: 1.0494 - val_loss: 1.8254 Epoch 135/160 13/13 [==============================] - 0s 28ms/step - loss: 1.0162 - val_loss: 1.8254 Epoch 136/160 13/13 [==============================] - 0s 32ms/step - loss: 0.9976 - val_loss: 1.8254 Epoch 137/160 13/13 [==============================] - 0s 32ms/step - loss: 1.0401 - val_loss: 1.8254 Epoch 138/160 13/13 [==============================] - 0s 28ms/step - loss: 0.9879 - val_loss: 1.8254 Epoch 139/160 13/13 [==============================] - 0s 28ms/step - loss: 1.0398 - val_loss: 1.8254 Epoch 140/160 13/13 [==============================] - 0s 28ms/step - loss: 1.0297 - val_loss: 1.8254 Epoch 141/160 13/13 [==============================] - 0s 28ms/step - loss: 1.0344 - val_loss: 1.8254 Epoch 142/160 13/13 [==============================] - 0s 32ms/step - loss: 1.0372 - val_loss: 1.8254 Epoch 143/160 13/13 [==============================] - 0s 34ms/step - loss: 1.0513 - val_loss: 1.8254 Epoch 144/160 13/13 [==============================] - 0s 34ms/step - loss: 1.0447 - val_loss: 1.8254 Epoch 145/160 13/13 [==============================] - 0s 26ms/step - loss: 1.0532 - val_loss: 1.8254 Epoch 146/160 13/13 [==============================] - 0s 29ms/step - loss: 1.0670 - val_loss: 1.8254 Epoch 147/160 13/13 [==============================] - 0s 28ms/step - loss: 1.0499 - val_loss: 1.8254 Epoch 148/160 13/13 [==============================] - 0s 34ms/step - loss: 1.0295 - val_loss: 1.8254 Epoch 149/160 13/13 [==============================] - 0s 27ms/step - loss: 1.1065 - val_loss: 1.8254 Epoch 150/160 13/13 [==============================] - 0s 27ms/step - loss: 1.1115 - val_loss: 1.8254 Epoch 151/160 13/13 [==============================] - 0s 32ms/step - loss: 1.1047 - val_loss: 1.8254 Epoch 152/160 13/13 [==============================] - 0s 26ms/step - loss: 1.0098 - val_loss: 1.8254 Epoch 153/160 13/13 [==============================] - 0s 34ms/step - loss: 0.9954 - val_loss: 1.8254 Epoch 154/160 13/13 [==============================] - 0s 27ms/step - loss: 1.0815 - val_loss: 1.8254 Epoch 155/160 13/13 [==============================] - 0s 28ms/step - loss: 1.1248 - val_loss: 1.8254 Epoch 156/160 13/13 [==============================] - 0s 35ms/step - loss: 1.0116 - val_loss: 1.8254 Epoch 157/160 13/13 [==============================] - 0s 31ms/step - loss: 1.0502 - val_loss: 1.8254 Epoch 158/160 13/13 [==============================] - 0s 33ms/step - loss: 1.0578 - val_loss: 1.8254 Epoch 159/160 13/13 [==============================] - 0s 32ms/step - loss: 1.0356 - val_loss: 1.8254 Epoch 160/160 13/13 [==============================] - 0s 27ms/step - loss: 1.0162 - val_loss: 1.8254
In [27]:
rst = prediction_model.predict(np.expand_dims(test[feature_cols], axis=1)) rst
Out[27]:
[array([[0.00237915], [0.21877757], [0.00211415], [0.00235748], [0.2187585 ], [0.25013232], [0.00218698], [0.00213578], [0.0021604 ], [0.00213698], [0.00215602], [0.00211859], [0.00211859]], dtype=float32), array([[0.26313323], [0.43726084], [0.257788 ], [0.2622419 ], [0.43731606], [0.41615662], [0.2588436 ], [0.2605151 ], [0.2610975 ], [0.26035452], [0.25860977], [0.25888485], [0.2590733 ]], dtype=float32), array([[0.03315076], [0.43969226], [0.0066632 ], [0.0311569 ], [0.4396916 ], [0.46122804], [0.01751196], [0.0046435 ], [0.00397068], [0.00480857], [0.01166728], [0.00597936], [0.00580207]], dtype=float32), array([[0.2627051 ], [0.25722986], [0.30297792], [0.26330546], [0.25718838], [0.30138326], [0.27484083], [0.3198207 ], [0.3352574 ], [0.31860778], [0.28594404], [0.30712652], [0.3081199 ]], dtype=float32)]
In [28]:
[np.exp(K.get_value(log_var[0]))**0.5 for log_var in trainable_model.layers[-1].log_vars]
Out[28]:
[0.44671712167235617, 0.995773503303174, 0.8775154468883085, 0.9863306026616467]
In [29]:
pred_rst = pd.DataFrame.from_records(np.squeeze(np.asarray(rst), axis=2).T, columns=out_cols)
In [30]:
real_rst = test[out_cols].copy()
In [31]:
for col in out_cols: pred_rst[col] = pred_rst[col] * (maxs[col] - mins[col]) + mins[col] real_rst[col] = real_rst[col] * (maxs[col] - mins[col]) + mins[col]
In [32]:
real_rst.columns
Out[32]:
Index(['碳材料结构特征-比表面积', '碳材料结构特征-总孔体积', '碳材料结构特征-微孔体积', '碳材料结构特征-平均孔径'], dtype='object')
In [33]:
y_pred_pm25 = pred_rst['碳材料结构特征-比表面积'].values.reshape(-1,) y_pred_pm10 = pred_rst['碳材料结构特征-总孔体积'].values.reshape(-1,) y_pred_so2 = pred_rst['碳材料结构特征-微孔体积'].values.reshape(-1,) y_pred_no2 = pred_rst['碳材料结构特征-平均孔径'].values.reshape(-1,) y_true_pm25 = real_rst['碳材料结构特征-比表面积'].values.reshape(-1,) y_true_pm10 = real_rst['碳材料结构特征-总孔体积'].values.reshape(-1,) y_true_so2 = real_rst['碳材料结构特征-微孔体积'].values.reshape(-1,) y_true_no2 = real_rst['碳材料结构特征-平均孔径'].values.reshape(-1,)
In [34]:
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score, mean_absolute_percentage_error
In [35]:
def print_eva(y_true, y_pred, tp): MSE = mean_squared_error(y_true, y_pred) RMSE = np.sqrt(MSE) MAE = mean_absolute_error(y_true, y_pred) MAPE = mean_absolute_percentage_error(y_true, y_pred) R_2 = r2_score(y_true, y_pred) print(f"COL: {tp}, MSE: {format(MSE, '.2E')}", end=',') print(f'RMSE: {round(RMSE, 4)}', end=',') print(f'MAPE: {round(MAPE, 4) * 100} %', end=',') print(f'MAE: {round(MAE, 4)}', end=',') print(f'R_2: {round(R_2, 4)}') return [MSE, RMSE, MAE, MAPE, R_2]
In [36]:
pm25_eva = print_eva(y_true_pm25, y_pred_pm25, tp='比表面积') pm10_eva = print_eva(y_true_pm10, y_pred_pm10, tp='总孔体积') so2_eva = print_eva(y_true_so2, y_pred_so2, tp='微孔体积') nox_eva = print_eva(y_true_no2, y_pred_no2, tp='平均孔径')
COL: 比表面积, MSE: 2.59E+06,RMSE: 1609.7549,MAPE: 90.44 %,MAE: 1456.9479,R_2: -3.4577 COL: 总孔体积, MSE: 2.74E-01,RMSE: 0.5234,MAPE: 36.559999999999995 %,MAE: 0.4001,R_2: 0.1427 COL: 微孔体积, MSE: 1.45E-01,RMSE: 0.3802,MAPE: 77.27000000000001 %,MAE: 0.324,R_2: -2.0216 COL: 平均孔径, MSE: 1.44E+00,RMSE: 1.201,MAPE: 42.24 %,MAE: 1.0489,R_2: -0.0048
In [ ]:
In [ ]: