60 KiB
60 KiB
In [1]:
import os os.environ['CUDA_DEVICE_ORDER'] = 'PCB_BUS_ID' os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1'
In [2]:
import pandas as pd import numpy as np from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt #新增加的两行 from pylab import mpl # 设置显示中文字体 mpl.rcParams["font.sans-serif"] = ["SimHei"] mpl.rcParams["axes.unicode_minus"] = False
In [3]:
data_0102 = pd.read_excel('./data/20240102/20240102.xlsx', header=[0,1,2]) data_0102
Out[3]:
Unnamed: 0_level_0 | 氢 | 碳 | 氮 | 氧 | 弹筒发热量 | 挥发分 | 固定炭 | |
---|---|---|---|---|---|---|---|---|
化验编号 | Had | Cad | Nad | Oad | Qb,ad | Vad | Fcad | |
Unnamed: 0_level_2 | (%) | (%) | (%) | (%) | MJ/kg | (%) | (%) | |
0 | 2720110529 | 3.93 | 70.18 | 0.81 | 25.079 | 27.820 | 32.06 | 55.68 |
1 | 2720096883 | 3.78 | 68.93 | 0.77 | 26.512 | 27.404 | 29.96 | 54.71 |
2 | 2720109084 | 3.48 | 69.60 | 0.76 | 26.148 | 27.578 | 29.31 | 55.99 |
3 | 2720084708 | 3.47 | 66.71 | 0.76 | 29.055 | 26.338 | 28.58 | 53.87 |
4 | 2720062721 | 3.87 | 68.78 | 0.80 | 26.542 | 27.280 | 29.97 | 54.78 |
... | ... | ... | ... | ... | ... | ... | ... | ... |
223 | 2720030490 | 4.12 | 68.85 | 0.97 | 26.055 | 27.864 | 32.94 | 51.89 |
224 | 2720028633 | 3.97 | 67.04 | 0.94 | 28.043 | 27.368 | 31.88 | 51.38 |
225 | 2720028634 | 4.12 | 68.42 | 0.96 | 26.493 | 27.886 | 33.16 | 52.00 |
226 | 2720017683 | 3.88 | 67.42 | 0.94 | 27.760 | 26.616 | 31.65 | 50.56 |
227 | 2720017678 | 3.81 | 66.74 | 0.92 | 28.530 | 26.688 | 31.02 | 50.82 |
228 rows × 8 columns
In [4]:
cols = [''.join([y for y in x if 'Unnamed' not in y]) for x in data_0102.columns] cols
Out[4]:
['化验编号', '氢Had(%)', '碳Cad(%)', '氮Nad(%)', '氧Oad(%)', '弹筒发热量Qb,adMJ/kg', '挥发分Vad(%)', '固定炭Fcad(%)']
In [5]:
data_0102.columns = cols
In [6]:
data_0102
Out[6]:
化验编号 | 氢Had(%) | 碳Cad(%) | 氮Nad(%) | 氧Oad(%) | 弹筒发热量Qb,adMJ/kg | 挥发分Vad(%) | 固定炭Fcad(%) | |
---|---|---|---|---|---|---|---|---|
0 | 2720110529 | 3.93 | 70.18 | 0.81 | 25.079 | 27.820 | 32.06 | 55.68 |
1 | 2720096883 | 3.78 | 68.93 | 0.77 | 26.512 | 27.404 | 29.96 | 54.71 |
2 | 2720109084 | 3.48 | 69.60 | 0.76 | 26.148 | 27.578 | 29.31 | 55.99 |
3 | 2720084708 | 3.47 | 66.71 | 0.76 | 29.055 | 26.338 | 28.58 | 53.87 |
4 | 2720062721 | 3.87 | 68.78 | 0.80 | 26.542 | 27.280 | 29.97 | 54.78 |
... | ... | ... | ... | ... | ... | ... | ... | ... |
223 | 2720030490 | 4.12 | 68.85 | 0.97 | 26.055 | 27.864 | 32.94 | 51.89 |
224 | 2720028633 | 3.97 | 67.04 | 0.94 | 28.043 | 27.368 | 31.88 | 51.38 |
225 | 2720028634 | 4.12 | 68.42 | 0.96 | 26.493 | 27.886 | 33.16 | 52.00 |
226 | 2720017683 | 3.88 | 67.42 | 0.94 | 27.760 | 26.616 | 31.65 | 50.56 |
227 | 2720017678 | 3.81 | 66.74 | 0.92 | 28.530 | 26.688 | 31.02 | 50.82 |
228 rows × 8 columns
In [7]:
out_cols = ['挥发分Vad(%)', '固定炭Fcad(%)']
In [8]:
out_cols
Out[8]:
['挥发分Vad(%)', '固定炭Fcad(%)']
In [9]:
data = data_0102.copy()
In [10]:
train_data = data.dropna(subset=out_cols).fillna(0)
In [11]:
import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers import tensorflow.keras.backend as K
2024-01-04 16:49:03.492957: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0
In [12]:
tf.test.is_gpu_available()
WARNING:tensorflow:From /tmp/ipykernel_45930/337460670.py:1: is_gpu_available (from tensorflow.python.framework.test_util) is deprecated and will be removed in a future version. Instructions for updating: Use `tf.config.list_physical_devices('GPU')` instead.
Out[12]:
False
2024-01-04 16:49:04.396035: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX512F FMA To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags. 2024-01-04 16:49:04.407586: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcuda.so.1 2024-01-04 16:49:04.465739: E tensorflow/stream_executor/cuda/cuda_driver.cc:328] failed call to cuInit: CUDA_ERROR_INVALID_DEVICE: invalid device ordinal 2024-01-04 16:49:04.465795: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:169] retrieving CUDA diagnostic information for host: zhaojh-yv621 2024-01-04 16:49:04.465807: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:176] hostname: zhaojh-yv621 2024-01-04 16:49:04.466010: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:200] libcuda reported version is: 520.61.5 2024-01-04 16:49:04.466041: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:204] kernel reported version is: 520.61.5 2024-01-04 16:49:04.466045: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:310] kernel version seems to match DSO: 520.61.5
In [13]:
class TransformerBlock(layers.Layer): def __init__(self, embed_dim, num_heads, ff_dim, name, rate=0.1): super().__init__() self.att = layers.MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim, name=name) self.ffn = keras.Sequential( [layers.Dense(ff_dim, activation="relu"), layers.Dense(embed_dim),] ) self.layernorm1 = layers.LayerNormalization(epsilon=1e-6) self.layernorm2 = layers.LayerNormalization(epsilon=1e-6) self.dropout1 = layers.Dropout(rate) self.dropout2 = layers.Dropout(rate) def call(self, inputs, training): attn_output = self.att(inputs, inputs) attn_output = self.dropout1(attn_output, training=training) out1 = self.layernorm1(inputs + attn_output) ffn_output = self.ffn(out1) ffn_output = self.dropout2(ffn_output, training=training) return self.layernorm2(out1 + ffn_output)
In [14]:
from tensorflow.keras import Model
In [15]:
from tensorflow.keras.initializers import Constant
In [16]:
# Custom loss layer class CustomMultiLossLayer(layers.Layer): def __init__(self, nb_outputs=2, **kwargs): self.nb_outputs = nb_outputs self.is_placeholder = True super(CustomMultiLossLayer, self).__init__(**kwargs) def build(self, input_shape=None): # initialise log_vars self.log_vars = [] for i in range(self.nb_outputs): self.log_vars += [self.add_weight(name='log_var' + str(i), shape=(1,), initializer=tf.initializers.he_normal(), trainable=True)] super(CustomMultiLossLayer, self).build(input_shape) def multi_loss(self, ys_true, ys_pred): assert len(ys_true) == self.nb_outputs and len(ys_pred) == self.nb_outputs loss = 0 for y_true, y_pred, log_var in zip(ys_true, ys_pred, self.log_vars): mse = (y_true - y_pred) ** 2. pre = K.exp(-log_var[0]) loss += tf.abs(tf.reduce_logsumexp(pre * mse + log_var[0], axis=-1)) return K.mean(loss) def call(self, inputs): ys_true = inputs[:self.nb_outputs] ys_pred = inputs[self.nb_outputs:] loss = self.multi_loss(ys_true, ys_pred) self.add_loss(loss, inputs=inputs) # We won't actually use the output. return K.concatenate(inputs, -1)
In [17]:
num_heads, ff_dim = 3, 16
In [18]:
def get_prediction_model(): def build_output(out, out_name): self_block = TransformerBlock(64, num_heads, ff_dim, name=f'{out_name}_attn') out = self_block(out) out = layers.GlobalAveragePooling1D()(out) out = layers.Dropout(0.1)(out) out = layers.Dense(32, activation="relu")(out) # out = layers.Dense(1, name=out_name, activation="sigmoid")(out) return out inputs = layers.Input(shape=(1,len(feature_cols)), name='input') x = layers.Conv1D(filters=64, kernel_size=1, activation='relu')(inputs) # x = layers.Dropout(rate=0.1)(x) lstm_out = layers.Bidirectional(layers.LSTM(units=64, return_sequences=True))(x) lstm_out = layers.Dense(128, activation='relu')(lstm_out) transformer_block = TransformerBlock(128, num_heads, ff_dim, name='first_attn') out = transformer_block(lstm_out) out = layers.GlobalAveragePooling1D()(out) out = layers.Dropout(0.1)(out) out = layers.Dense(64, activation='relu')(out) out = K.expand_dims(out, axis=1) bet = build_output(out, 'vad') mesco = build_output(out, 'fcad') bet = layers.Dense(1, activation='sigmoid', name='vad')(bet) mesco = layers.Dense(1, activation='sigmoid', name='fcad')(mesco) model = Model(inputs=[inputs], outputs=[bet, mesco]) return model
In [30]:
def get_trainable_model(prediction_model): inputs = layers.Input(shape=(1,len(feature_cols)), name='input') bet, mesco = prediction_model(inputs) bet_real = layers.Input(shape=(1,), name='vad_real') mesco_real = layers.Input(shape=(1,), name='fcad_real') out = CustomMultiLossLayer(nb_outputs=2)([bet_real, mesco_real, bet, mesco]) return Model([inputs, bet_real, mesco_real], out)
In [20]:
maxs = train_data.max() mins = train_data.min() for col in train_data.columns: if maxs[col] - mins[col] == 0: continue train_data[col] = (train_data[col] - mins[col]) / (maxs[col] - mins[col])
In [21]:
train_data
Out[21]:
化验编号 | 氢Had(%) | 碳Cad(%) | 氮Nad(%) | 氧Oad(%) | 弹筒发热量Qb,adMJ/kg | 挥发分Vad(%) | 固定炭Fcad(%) | |
---|---|---|---|---|---|---|---|---|
0 | 0.996547 | 0.773973 | 0.835414 | 0.456522 | 0.171463 | 0.811249 | 0.847737 | 0.828147 |
1 | 0.851118 | 0.671233 | 0.799943 | 0.369565 | 0.210254 | 0.782038 | 0.674897 | 0.794606 |
2 | 0.981147 | 0.465753 | 0.818956 | 0.347826 | 0.200401 | 0.794256 | 0.621399 | 0.838866 |
3 | 0.721367 | 0.458904 | 0.736947 | 0.347826 | 0.279094 | 0.707183 | 0.561317 | 0.765560 |
4 | 0.487046 | 0.732877 | 0.795687 | 0.434783 | 0.211066 | 0.773331 | 0.675720 | 0.797026 |
... | ... | ... | ... | ... | ... | ... | ... | ... |
223 | 0.143553 | 0.904110 | 0.797673 | 0.804348 | 0.197883 | 0.814339 | 0.920165 | 0.697095 |
224 | 0.123762 | 0.801370 | 0.746311 | 0.739130 | 0.251699 | 0.779510 | 0.832922 | 0.679461 |
225 | 0.123773 | 0.904110 | 0.785471 | 0.782609 | 0.209740 | 0.815884 | 0.938272 | 0.700899 |
226 | 0.007066 | 0.739726 | 0.757094 | 0.739130 | 0.244038 | 0.726705 | 0.813992 | 0.651107 |
227 | 0.007012 | 0.691781 | 0.737798 | 0.695652 | 0.264882 | 0.731760 | 0.762140 | 0.660097 |
228 rows × 8 columns
In [22]:
# feature_cols = [x for x in train_data.columns if x not in out_cols and '第二次' not in x] feature_cols = [x for x in train_data.columns if x not in out_cols] use_cols = feature_cols + out_cols
In [23]:
use_data = train_data.copy() for col in use_cols: use_data[col] = use_data[col].astype('float32')
In [24]:
train, valid = train_test_split(use_data[use_cols], test_size=0.3, random_state=42, shuffle=True) valid, test = train_test_split(valid, test_size=0.3, random_state=42, shuffle=True)
In [31]:
prediction_model = get_prediction_model() trainable_model = get_trainable_model(prediction_model)
In [32]:
prediction_model.summary()
Model: "model_3" __________________________________________________________________________________________________ Layer (type) Output Shape Param # Connected to ================================================================================================== input (InputLayer) [(None, 1, 6)] 0 __________________________________________________________________________________________________ conv1d_3 (Conv1D) (None, 1, 64) 448 input[0][0] __________________________________________________________________________________________________ bidirectional_3 (Bidirectional) (None, 1, 128) 66048 conv1d_3[0][0] __________________________________________________________________________________________________ dense_30 (Dense) (None, 1, 128) 16512 bidirectional_3[0][0] __________________________________________________________________________________________________ transformer_block_9 (Transforme (None, 1, 128) 202640 dense_30[0][0] __________________________________________________________________________________________________ global_average_pooling1d_9 (Glo (None, 128) 0 transformer_block_9[0][0] __________________________________________________________________________________________________ dropout_29 (Dropout) (None, 128) 0 global_average_pooling1d_9[0][0] __________________________________________________________________________________________________ dense_33 (Dense) (None, 64) 8256 dropout_29[0][0] __________________________________________________________________________________________________ tf.expand_dims_3 (TFOpLambda) (None, 1, 64) 0 dense_33[0][0] __________________________________________________________________________________________________ transformer_block_10 (Transform (None, 1, 64) 52176 tf.expand_dims_3[0][0] __________________________________________________________________________________________________ transformer_block_11 (Transform (None, 1, 64) 52176 tf.expand_dims_3[0][0] __________________________________________________________________________________________________ global_average_pooling1d_10 (Gl (None, 64) 0 transformer_block_10[0][0] __________________________________________________________________________________________________ global_average_pooling1d_11 (Gl (None, 64) 0 transformer_block_11[0][0] __________________________________________________________________________________________________ dropout_32 (Dropout) (None, 64) 0 global_average_pooling1d_10[0][0] __________________________________________________________________________________________________ dropout_35 (Dropout) (None, 64) 0 global_average_pooling1d_11[0][0] __________________________________________________________________________________________________ dense_36 (Dense) (None, 32) 2080 dropout_32[0][0] __________________________________________________________________________________________________ dense_39 (Dense) (None, 32) 2080 dropout_35[0][0] __________________________________________________________________________________________________ vad (Dense) (None, 1) 33 dense_36[0][0] __________________________________________________________________________________________________ fcad (Dense) (None, 1) 33 dense_39[0][0] ================================================================================================== Total params: 402,482 Trainable params: 402,482 Non-trainable params: 0 __________________________________________________________________________________________________
In [33]:
from tensorflow.keras import optimizers from tensorflow.python.keras.utils.vis_utils import plot_model
In [34]:
X = np.expand_dims(train[feature_cols].values, axis=1) Y = [x for x in train[out_cols].values.T] Y_valid = [x for x in valid[out_cols].values.T]
In [35]:
from keras.callbacks import ReduceLROnPlateau reduce_lr = ReduceLROnPlateau(monitor='val_loss', patience=10, mode='auto')
In [40]:
trainable_model.compile(optimizer='adam', loss=None) hist = trainable_model.fit([X, Y[0], Y[1]], epochs=120, batch_size=8, verbose=1, validation_data=[np.expand_dims(valid[feature_cols].values, axis=1), Y_valid[0], Y_valid[1]], callbacks=[reduce_lr] )
Epoch 1/120 20/20 [==============================] - 5s 59ms/step - loss: 1.8316 - val_loss: 1.8096 Epoch 2/120 20/20 [==============================] - 0s 25ms/step - loss: 1.7903 - val_loss: 1.7691 Epoch 3/120 20/20 [==============================] - 0s 26ms/step - loss: 1.7506 - val_loss: 1.7307 Epoch 4/120 20/20 [==============================] - 0s 26ms/step - loss: 1.7110 - val_loss: 1.6914 Epoch 5/120 20/20 [==============================] - 0s 21ms/step - loss: 1.6711 - val_loss: 1.6497 Epoch 6/120 20/20 [==============================] - 0s 24ms/step - loss: 1.6314 - val_loss: 1.6098 Epoch 7/120 20/20 [==============================] - 0s 24ms/step - loss: 1.5909 - val_loss: 1.5695 Epoch 8/120 20/20 [==============================] - 0s 24ms/step - loss: 1.5506 - val_loss: 1.5296 Epoch 9/120 20/20 [==============================] - 0s 24ms/step - loss: 1.5109 - val_loss: 1.4891 Epoch 10/120 20/20 [==============================] - 0s 25ms/step - loss: 1.4706 - val_loss: 1.4500 Epoch 11/120 20/20 [==============================] - 0s 25ms/step - loss: 1.4306 - val_loss: 1.4104 Epoch 12/120 20/20 [==============================] - 0s 22ms/step - loss: 1.3907 - val_loss: 1.3746 Epoch 13/120 20/20 [==============================] - 0s 23ms/step - loss: 1.3508 - val_loss: 1.3296 Epoch 14/120 20/20 [==============================] - 0s 26ms/step - loss: 1.3106 - val_loss: 1.2895 Epoch 15/120 20/20 [==============================] - 0s 26ms/step - loss: 1.2706 - val_loss: 1.2515 Epoch 16/120 20/20 [==============================] - 0s 22ms/step - loss: 1.2315 - val_loss: 1.2104 Epoch 17/120 20/20 [==============================] - 0s 23ms/step - loss: 1.1908 - val_loss: 1.1702 Epoch 18/120 20/20 [==============================] - 0s 25ms/step - loss: 1.1508 - val_loss: 1.1320 Epoch 19/120 20/20 [==============================] - 0s 26ms/step - loss: 1.1114 - val_loss: 1.0917 Epoch 20/120 20/20 [==============================] - 0s 24ms/step - loss: 1.0718 - val_loss: 1.0513 Epoch 21/120 20/20 [==============================] - 0s 23ms/step - loss: 1.0315 - val_loss: 1.0178 Epoch 22/120 20/20 [==============================] - 0s 24ms/step - loss: 0.9918 - val_loss: 0.9704 Epoch 23/120 20/20 [==============================] - 0s 23ms/step - loss: 0.9511 - val_loss: 0.9321 Epoch 24/120 20/20 [==============================] - 0s 26ms/step - loss: 0.9114 - val_loss: 0.8913 Epoch 25/120 20/20 [==============================] - 0s 26ms/step - loss: 0.8718 - val_loss: 0.8520 Epoch 26/120 20/20 [==============================] - 0s 25ms/step - loss: 0.8314 - val_loss: 0.8124 Epoch 27/120 20/20 [==============================] - 0s 26ms/step - loss: 0.7922 - val_loss: 0.7727 Epoch 28/120 20/20 [==============================] - 0s 25ms/step - loss: 0.7519 - val_loss: 0.7307 Epoch 29/120 20/20 [==============================] - 0s 26ms/step - loss: 0.7119 - val_loss: 0.6932 Epoch 30/120 20/20 [==============================] - 0s 22ms/step - loss: 0.6720 - val_loss: 0.6531 Epoch 31/120 20/20 [==============================] - 0s 23ms/step - loss: 0.6336 - val_loss: 0.6155 Epoch 32/120 20/20 [==============================] - 1s 26ms/step - loss: 0.5931 - val_loss: 0.5738 Epoch 33/120 20/20 [==============================] - 0s 26ms/step - loss: 0.5517 - val_loss: 0.5324 Epoch 34/120 20/20 [==============================] - 0s 26ms/step - loss: 0.5135 - val_loss: 0.4943 Epoch 35/120 20/20 [==============================] - 0s 21ms/step - loss: 0.4724 - val_loss: 0.4602 Epoch 36/120 20/20 [==============================] - 0s 23ms/step - loss: 0.4326 - val_loss: 0.4126 Epoch 37/120 20/20 [==============================] - 0s 26ms/step - loss: 0.3947 - val_loss: 0.3758 Epoch 38/120 20/20 [==============================] - 0s 25ms/step - loss: 0.3558 - val_loss: 0.3350 Epoch 39/120 20/20 [==============================] - 0s 22ms/step - loss: 0.3154 - val_loss: 0.3031 Epoch 40/120 20/20 [==============================] - 0s 23ms/step - loss: 0.2771 - val_loss: 0.2592 Epoch 41/120 20/20 [==============================] - 0s 25ms/step - loss: 0.2459 - val_loss: 0.2370 Epoch 42/120 20/20 [==============================] - 1s 27ms/step - loss: 0.2267 - val_loss: 0.2210 Epoch 43/120 20/20 [==============================] - 0s 24ms/step - loss: 0.2050 - val_loss: 0.1947 Epoch 44/120 20/20 [==============================] - 0s 26ms/step - loss: 0.1840 - val_loss: 0.1728 Epoch 45/120 20/20 [==============================] - 0s 21ms/step - loss: 0.1628 - val_loss: 0.1533 Epoch 46/120 20/20 [==============================] - 0s 23ms/step - loss: 0.1430 - val_loss: 0.1322 Epoch 47/120 20/20 [==============================] - 0s 23ms/step - loss: 0.1230 - val_loss: 0.1147 Epoch 48/120 20/20 [==============================] - 1s 24ms/step - loss: 0.1026 - val_loss: 0.0940 Epoch 49/120 20/20 [==============================] - 0s 25ms/step - loss: 0.0830 - val_loss: 0.0750 Epoch 50/120 20/20 [==============================] - 0s 25ms/step - loss: 0.0639 - val_loss: 0.0529 Epoch 51/120 20/20 [==============================] - 0s 22ms/step - loss: 0.0436 - val_loss: 0.0352 Epoch 52/120 20/20 [==============================] - 0s 24ms/step - loss: 0.0241 - val_loss: 0.0162 Epoch 53/120 20/20 [==============================] - 0s 23ms/step - loss: 0.0092 - val_loss: 0.0084 Epoch 54/120 20/20 [==============================] - 0s 26ms/step - loss: 0.0067 - val_loss: 0.0074 Epoch 55/120 20/20 [==============================] - 0s 26ms/step - loss: 0.0080 - val_loss: 0.0071 Epoch 56/120 20/20 [==============================] - 0s 26ms/step - loss: 0.0070 - val_loss: 0.0063 Epoch 57/120 20/20 [==============================] - 0s 25ms/step - loss: 0.0062 - val_loss: 0.0076 Epoch 58/120 20/20 [==============================] - 0s 25ms/step - loss: 0.0056 - val_loss: 0.0048 Epoch 59/120 20/20 [==============================] - 0s 25ms/step - loss: 0.0050 - val_loss: 0.0071 Epoch 60/120 20/20 [==============================] - 0s 25ms/step - loss: 0.0057 - val_loss: 0.0054 Epoch 61/120 20/20 [==============================] - 0s 23ms/step - loss: 0.0044 - val_loss: 0.0092 Epoch 62/120 20/20 [==============================] - 1s 26ms/step - loss: 0.0068 - val_loss: 0.0070 Epoch 63/120 20/20 [==============================] - 1s 24ms/step - loss: 0.0059 - val_loss: 0.0065 Epoch 64/120 20/20 [==============================] - 0s 23ms/step - loss: 0.0055 - val_loss: 0.0060 Epoch 65/120 20/20 [==============================] - 0s 26ms/step - loss: 0.0053 - val_loss: 0.0056 Epoch 66/120 20/20 [==============================] - 0s 25ms/step - loss: 0.0058 - val_loss: 0.0077 Epoch 67/120 20/20 [==============================] - 0s 26ms/step - loss: 0.0051 - val_loss: 0.0054 Epoch 68/120 20/20 [==============================] - 0s 21ms/step - loss: 0.0047 - val_loss: 0.0048 Epoch 69/120 20/20 [==============================] - 0s 23ms/step - loss: 0.0041 - val_loss: 0.0048 Epoch 70/120 20/20 [==============================] - 0s 26ms/step - loss: 0.0037 - val_loss: 0.0049 Epoch 71/120 20/20 [==============================] - 0s 25ms/step - loss: 0.0041 - val_loss: 0.0049 Epoch 72/120 20/20 [==============================] - 0s 21ms/step - loss: 0.0036 - val_loss: 0.0049 Epoch 73/120 20/20 [==============================] - 1s 24ms/step - loss: 0.0038 - val_loss: 0.0048 Epoch 74/120 20/20 [==============================] - 0s 23ms/step - loss: 0.0037 - val_loss: 0.0050 Epoch 75/120 20/20 [==============================] - 0s 26ms/step - loss: 0.0037 - val_loss: 0.0048 Epoch 76/120 20/20 [==============================] - 0s 26ms/step - loss: 0.0038 - val_loss: 0.0048 Epoch 77/120 20/20 [==============================] - 0s 21ms/step - loss: 0.0037 - val_loss: 0.0048 Epoch 78/120 20/20 [==============================] - 0s 23ms/step - loss: 0.0038 - val_loss: 0.0048 Epoch 79/120 20/20 [==============================] - 0s 26ms/step - loss: 0.0036 - val_loss: 0.0048 Epoch 80/120 20/20 [==============================] - 0s 22ms/step - loss: 0.0034 - val_loss: 0.0048 Epoch 81/120 20/20 [==============================] - 0s 23ms/step - loss: 0.0034 - val_loss: 0.0047 Epoch 82/120 20/20 [==============================] - 0s 23ms/step - loss: 0.0037 - val_loss: 0.0047 Epoch 83/120 20/20 [==============================] - 0s 25ms/step - loss: 0.0033 - val_loss: 0.0047 Epoch 84/120 20/20 [==============================] - 0s 22ms/step - loss: 0.0037 - val_loss: 0.0047 Epoch 85/120 20/20 [==============================] - 0s 24ms/step - loss: 0.0036 - val_loss: 0.0047 Epoch 86/120 20/20 [==============================] - 0s 23ms/step - loss: 0.0034 - val_loss: 0.0047 Epoch 87/120 20/20 [==============================] - 0s 25ms/step - loss: 0.0034 - val_loss: 0.0047 Epoch 88/120 20/20 [==============================] - 0s 26ms/step - loss: 0.0034 - val_loss: 0.0047 Epoch 89/120 20/20 [==============================] - 0s 21ms/step - loss: 0.0036 - val_loss: 0.0047 Epoch 90/120 20/20 [==============================] - 0s 23ms/step - loss: 0.0036 - val_loss: 0.0047 Epoch 91/120 20/20 [==============================] - 0s 23ms/step - loss: 0.0036 - val_loss: 0.0047 Epoch 92/120 20/20 [==============================] - 0s 26ms/step - loss: 0.0034 - val_loss: 0.0047 Epoch 93/120 20/20 [==============================] - 0s 25ms/step - loss: 0.0034 - val_loss: 0.0047 Epoch 94/120 20/20 [==============================] - 0s 26ms/step - loss: 0.0037 - val_loss: 0.0047 Epoch 95/120 20/20 [==============================] - 0s 21ms/step - loss: 0.0035 - val_loss: 0.0047 Epoch 96/120 20/20 [==============================] - 0s 23ms/step - loss: 0.0032 - val_loss: 0.0047 Epoch 97/120 20/20 [==============================] - 0s 25ms/step - loss: 0.0035 - val_loss: 0.0047 Epoch 98/120 20/20 [==============================] - 0s 21ms/step - loss: 0.0038 - val_loss: 0.0047 Epoch 99/120 20/20 [==============================] - 0s 23ms/step - loss: 0.0033 - val_loss: 0.0047 Epoch 100/120 20/20 [==============================] - 0s 23ms/step - loss: 0.0036 - val_loss: 0.0047 Epoch 101/120 20/20 [==============================] - 1s 26ms/step - loss: 0.0033 - val_loss: 0.0047 Epoch 102/120 20/20 [==============================] - 0s 25ms/step - loss: 0.0035 - val_loss: 0.0047 Epoch 103/120 20/20 [==============================] - 0s 26ms/step - loss: 0.0035 - val_loss: 0.0047 Epoch 104/120 20/20 [==============================] - 0s 22ms/step - loss: 0.0035 - val_loss: 0.0047 Epoch 105/120 20/20 [==============================] - 0s 23ms/step - loss: 0.0036 - val_loss: 0.0047 Epoch 106/120 20/20 [==============================] - 0s 26ms/step - loss: 0.0034 - val_loss: 0.0047 Epoch 107/120 20/20 [==============================] - 0s 25ms/step - loss: 0.0036 - val_loss: 0.0047 Epoch 108/120 20/20 [==============================] - 0s 21ms/step - loss: 0.0034 - val_loss: 0.0047 Epoch 109/120 20/20 [==============================] - 1s 24ms/step - loss: 0.0037 - val_loss: 0.0047 Epoch 110/120 20/20 [==============================] - 0s 24ms/step - loss: 0.0038 - val_loss: 0.0047 Epoch 111/120 20/20 [==============================] - 0s 26ms/step - loss: 0.0036 - val_loss: 0.0047 Epoch 112/120 20/20 [==============================] - 0s 26ms/step - loss: 0.0035 - val_loss: 0.0047 Epoch 113/120 20/20 [==============================] - 0s 26ms/step - loss: 0.0037 - val_loss: 0.0047 Epoch 114/120 20/20 [==============================] - 0s 20ms/step - loss: 0.0035 - val_loss: 0.0047 Epoch 115/120 20/20 [==============================] - 0s 26ms/step - loss: 0.0033 - val_loss: 0.0047 Epoch 116/120 20/20 [==============================] - 0s 21ms/step - loss: 0.0036 - val_loss: 0.0047 Epoch 117/120 20/20 [==============================] - 0s 25ms/step - loss: 0.0032 - val_loss: 0.0047 Epoch 118/120 20/20 [==============================] - 0s 26ms/step - loss: 0.0036 - val_loss: 0.0047 Epoch 119/120 20/20 [==============================] - 0s 25ms/step - loss: 0.0037 - val_loss: 0.0047 Epoch 120/120 20/20 [==============================] - 0s 21ms/step - loss: 0.0036 - val_loss: 0.0047
In [41]:
rst = prediction_model.predict(np.expand_dims(test[feature_cols], axis=1)) rst
Out[41]:
[array([[0.73740077], [0.89292204], [0.7599046 ], [0.67802393], [0.6815233 ], [0.88627005], [0.6121343 ], [0.7072234 ], [0.8561135 ], [0.52762157], [0.8325021 ], [0.50241977], [0.8242289 ], [0.68957335], [0.6980361 ], [0.82116604], [0.8566438 ], [0.53687835], [0.56832707], [0.78476715], [0.85638577]], dtype=float32), array([[0.68600863], [0.78454906], [0.8179163 ], [0.94351083], [0.86383885], [0.69705516], [0.6913491 ], [0.80277354], [0.93557894], [0.82278305], [0.82674253], [0.93518937], [0.8094449 ], [0.9206344 ], [0.7747319 ], [0.9137207 ], [0.9491073 ], [0.93225 ], [0.6185102 ], [0.8867341 ], [0.82890105]], dtype=float32)]
In [42]:
[np.exp(K.get_value(log_var[0]))**0.5 for log_var in trainable_model.layers[-1].log_vars]
Out[42]:
[0.9991559102070927, 0.9998196796918477]
In [43]:
pred_rst = pd.DataFrame.from_records(np.squeeze(np.asarray(rst), axis=2).T, columns=out_cols)
In [44]:
real_rst = test[out_cols].copy()
In [45]:
for col in out_cols: pred_rst[col] = pred_rst[col] * (maxs[col] - mins[col]) + mins[col] real_rst[col] = real_rst[col] * (maxs[col] - mins[col]) + mins[col]
In [46]:
real_rst.columns
Out[46]:
Index(['挥发分Vad(%)', '固定炭Fcad(%)'], dtype='object')
In [47]:
y_pred_vad = pred_rst['挥发分Vad(%)'].values.reshape(-1,) y_pred_fcad = pred_rst['固定炭Fcad(%)'].values.reshape(-1,) y_true_vad = real_rst['挥发分Vad(%)'].values.reshape(-1,) y_true_fcad = real_rst['固定炭Fcad(%)'].values.reshape(-1,)
In [48]:
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score, mean_absolute_percentage_error
In [49]:
def print_eva(y_true, y_pred, tp): MSE = mean_squared_error(y_true, y_pred) RMSE = np.sqrt(MSE) MAE = mean_absolute_error(y_true, y_pred) MAPE = mean_absolute_percentage_error(y_true, y_pred) R_2 = r2_score(y_true, y_pred) print(f"COL: {tp}, MSE: {format(MSE, '.2E')}", end=',') print(f'RMSE: {round(RMSE, 4)}', end=',') print(f'MAPE: {round(MAPE, 4) * 100} %', end=',') print(f'MAE: {round(MAE, 4)}', end=',') print(f'R_2: {round(R_2, 4)}') return [MSE, RMSE, MAE, MAPE, R_2]
In [51]:
pm25_eva = print_eva(y_true_vad, y_pred_vad, tp='挥发分Vad') pm10_eva = print_eva(y_true_fcad, y_pred_fcad, tp='固定炭Fcad')
COL: 比表面积, MSE: 3.35E-01,RMSE: 0.5791,MAPE: 1.6400000000000001 %,MAE: 0.5041,R_2: 0.8698 COL: 总孔体积, MSE: 1.11E+00,RMSE: 1.0549,MAPE: 1.5 %,MAE: 0.8137,R_2: 0.876
In [ ]:
In [ ]: