48 KiB
48 KiB
In [1]:
import os os.environ['CUDA_DEVICE_ORDER'] = 'PCB_BUS_ID' os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1'
In [2]:
import pandas as pd import numpy as np from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt #新增加的两行 from pylab import mpl # 设置显示中文字体 mpl.rcParams["font.sans-serif"] = ["SimHei"] mpl.rcParams["axes.unicode_minus"] = False
In [3]:
data = pd.read_csv('./data/20240102/train_data.csv')
In [4]:
out_cols = [x for x in data.columns if '碳材料' in x]
In [5]:
out_cols
Out[5]:
['碳材料结构特征-比表面积', '碳材料结构特征-总孔体积', '碳材料结构特征-微孔体积', '碳材料结构特征-平均孔径']
In [6]:
train_data = data.dropna(subset=out_cols).fillna(0)
In [7]:
import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers import tensorflow.keras.backend as K
2024-01-04 16:22:35.199530: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0
In [8]:
tf.test.is_gpu_available()
WARNING:tensorflow:From /tmp/ipykernel_44444/337460670.py:1: is_gpu_available (from tensorflow.python.framework.test_util) is deprecated and will be removed in a future version. Instructions for updating: Use `tf.config.list_physical_devices('GPU')` instead.
2024-01-04 16:22:36.097926: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX512F FMA To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags. 2024-01-04 16:22:36.142225: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcuda.so.1 2024-01-04 16:22:36.232036: E tensorflow/stream_executor/cuda/cuda_driver.cc:328] failed call to cuInit: CUDA_ERROR_INVALID_DEVICE: invalid device ordinal 2024-01-04 16:22:36.232061: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:169] retrieving CUDA diagnostic information for host: zhaojh-yv621 2024-01-04 16:22:36.232065: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:176] hostname: zhaojh-yv621 2024-01-04 16:22:36.232185: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:200] libcuda reported version is: 520.61.5 2024-01-04 16:22:36.232204: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:204] kernel reported version is: 520.61.5 2024-01-04 16:22:36.232207: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:310] kernel version seems to match DSO: 520.61.5
Out[8]:
False
In [9]:
class TransformerBlock(layers.Layer): def __init__(self, embed_dim, num_heads, ff_dim, name, rate=0.1): super().__init__() self.att = layers.MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim, name=name) self.ffn = keras.Sequential( [layers.Dense(ff_dim, activation="relu"), layers.Dense(embed_dim),] ) self.layernorm1 = layers.LayerNormalization(epsilon=1e-6) self.layernorm2 = layers.LayerNormalization(epsilon=1e-6) self.dropout1 = layers.Dropout(rate) self.dropout2 = layers.Dropout(rate) def call(self, inputs, training): attn_output = self.att(inputs, inputs) attn_output = self.dropout1(attn_output, training=training) out1 = self.layernorm1(inputs + attn_output) ffn_output = self.ffn(out1) ffn_output = self.dropout2(ffn_output, training=training) return self.layernorm2(out1 + ffn_output)
In [10]:
from tensorflow.keras import Model
In [11]:
from tensorflow.keras.initializers import Constant
In [12]:
# Custom loss layer class CustomMultiLossLayer(layers.Layer): def __init__(self, nb_outputs=2, **kwargs): self.nb_outputs = nb_outputs self.is_placeholder = True super(CustomMultiLossLayer, self).__init__(**kwargs) def build(self, input_shape=None): # initialise log_vars self.log_vars = [] for i in range(self.nb_outputs): self.log_vars += [self.add_weight(name='log_var' + str(i), shape=(1,), initializer=tf.initializers.he_normal(), trainable=True)] super(CustomMultiLossLayer, self).build(input_shape) def multi_loss(self, ys_true, ys_pred): assert len(ys_true) == self.nb_outputs and len(ys_pred) == self.nb_outputs loss = 0 for y_true, y_pred, log_var in zip(ys_true, ys_pred, self.log_vars): mse = (y_true - y_pred) ** 2. pre = K.exp(-log_var[0]) loss += tf.abs(tf.reduce_logsumexp(pre * mse + log_var[0], axis=-1)) return K.mean(loss) def call(self, inputs): ys_true = inputs[:self.nb_outputs] ys_pred = inputs[self.nb_outputs:] loss = self.multi_loss(ys_true, ys_pred) self.add_loss(loss, inputs=inputs) # We won't actually use the output. return K.concatenate(inputs, -1)
In [13]:
num_heads, ff_dim = 1, 12
In [14]:
def get_prediction_model(): def build_output(out, out_name): self_block = TransformerBlock(64, num_heads, ff_dim, name=f'{out_name}_attn') out = self_block(out) out = layers.GlobalAveragePooling1D()(out) out = layers.Dropout(0.1)(out) out = layers.Dense(32, activation="relu")(out) # out = layers.Dense(1, name=out_name, activation="sigmoid")(out) return out inputs = layers.Input(shape=(1,len(feature_cols)), name='input') x = layers.Conv1D(filters=64, kernel_size=1, activation='relu')(inputs) # x = layers.Dropout(rate=0.1)(x) lstm_out = layers.Bidirectional(layers.LSTM(units=64, return_sequences=True))(x) lstm_out = layers.Dense(128, activation='relu')(lstm_out) transformer_block = TransformerBlock(128, num_heads, ff_dim, name='first_attn') out = transformer_block(lstm_out) out = layers.GlobalAveragePooling1D()(out) out = layers.Dropout(0.1)(out) out = layers.Dense(64, activation='relu')(out) out = K.expand_dims(out, axis=1) bet = build_output(out, 'bet') mesco = build_output(out, 'mesco') micro = build_output(out, 'micro') avg = build_output(out, 'avg') bet = layers.Dense(1, activation='sigmoid', name='bet')(bet) mesco = layers.Dense(1, activation='sigmoid', name='mesco')(mesco) micro = layers.Dense(1, activation='sigmoid', name='micro')(micro) avg = layers.Dense(1, activation='sigmoid', name='avg')(avg) model = Model(inputs=[inputs], outputs=[bet, mesco, micro, avg]) return model
In [15]:
def get_trainable_model(prediction_model): inputs = layers.Input(shape=(1,len(feature_cols)), name='input') bet, mesco, micro, avg = prediction_model(inputs) bet_real = layers.Input(shape=(1,), name='bet_real') mesco_real = layers.Input(shape=(1,), name='mesco_real') micro_real = layers.Input(shape=(1,), name='micro_real') avg_real = layers.Input(shape=(1,), name='avg_real') out = CustomMultiLossLayer(nb_outputs=4)([bet_real, mesco_real, micro_real, avg_real, bet, mesco, micro, avg]) return Model([inputs, bet_real, mesco_real, micro_real, avg_real], out)
In [16]:
maxs = train_data.max() mins = train_data.min() for col in train_data.columns: if maxs[col] - mins[col] == 0: continue train_data[col] = (train_data[col] - mins[col]) / (maxs[col] - mins[col])
In [17]:
train_data
Out[17]:
热处理条件-热处理次数 | 热处理条件-是否是中温停留 | 第一次热处理-温度 | 第一次热处理-升温速率 | 第一次热处理-保留时间 | 第二次热处理-温度 | 第二次热处理-升温速率· | 第二次热处理-保留时间 | 共碳化-是否是共碳化物质 | 共碳化-共碳化物质/沥青 | ... | 模板剂-种类_二氧化硅 | 模板剂-种类_氢氧化镁 | 模板剂-种类_氧化钙 | 模板剂-种类_氧化锌 | 模板剂-种类_氧化镁 | 模板剂-种类_氯化钠 | 模板剂-种类_氯化钾 | 模板剂-种类_碱式碳酸镁 | 模板剂-种类_碳酸钙 | 模板剂-种类_纤维素 | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 0.0 | 0.0 | 0.166667 | 0.3 | 0.5 | 0.000000 | 0.0 | 0.000000 | 0.0 | 0.0 | ... | 0 | 0.0 | 1.0 | 0 | 0.0 | 0.0 | 0 | 0.0 | 0.0 | 0.0 |
1 | 0.0 | 0.0 | 0.333333 | 0.3 | 0.5 | 0.000000 | 0.0 | 0.000000 | 0.0 | 0.0 | ... | 0 | 0.0 | 1.0 | 0 | 0.0 | 0.0 | 0 | 0.0 | 0.0 | 0.0 |
2 | 0.0 | 0.0 | 0.333333 | 0.3 | 0.5 | 0.000000 | 0.0 | 0.000000 | 0.0 | 0.0 | ... | 0 | 0.0 | 1.0 | 0 | 0.0 | 0.0 | 0 | 0.0 | 0.0 | 0.0 |
3 | 0.0 | 0.0 | 0.333333 | 0.3 | 0.5 | 0.000000 | 0.0 | 0.000000 | 0.0 | 0.0 | ... | 0 | 0.0 | 1.0 | 0 | 0.0 | 0.0 | 0 | 0.0 | 0.0 | 0.0 |
4 | 1.0 | 0.0 | 0.166667 | 0.3 | 0.5 | 0.666667 | 0.5 | 0.666667 | 0.0 | 0.0 | ... | 0 | 0.0 | 0.0 | 0 | 0.0 | 0.0 | 0 | 1.0 | 0.0 | 0.0 |
... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
144 | 0.0 | 0.0 | 0.333333 | 0.3 | 0.0 | 0.000000 | 0.0 | 0.000000 | 0.0 | 0.0 | ... | 0 | 0.0 | 0.0 | 0 | 0.0 | 0.0 | 0 | 0.0 | 0.0 | 0.0 |
145 | 0.0 | 0.0 | 0.500000 | 0.3 | 0.0 | 0.000000 | 0.0 | 0.000000 | 0.0 | 0.0 | ... | 0 | 0.0 | 0.0 | 0 | 0.0 | 0.0 | 0 | 0.0 | 0.0 | 0.0 |
146 | 0.0 | 0.0 | 0.666667 | 0.3 | 0.0 | 0.000000 | 0.0 | 0.000000 | 0.0 | 0.0 | ... | 0 | 0.0 | 0.0 | 0 | 0.0 | 0.0 | 0 | 0.0 | 0.0 | 0.0 |
147 | 0.0 | 0.0 | 0.500000 | 0.3 | 0.0 | 0.000000 | 0.0 | 0.000000 | 0.0 | 0.0 | ... | 0 | 0.0 | 0.0 | 0 | 0.0 | 0.0 | 0 | 0.0 | 0.0 | 0.0 |
148 | 0.0 | 0.0 | 0.500000 | 0.3 | 0.0 | 0.000000 | 0.0 | 0.000000 | 0.0 | 0.0 | ... | 0 | 0.0 | 0.0 | 0 | 0.0 | 0.0 | 0 | 0.0 | 0.0 | 0.0 |
123 rows × 42 columns
In [18]:
# feature_cols = [x for x in train_data.columns if x not in out_cols and '第二次' not in x] feature_cols = [x for x in train_data.columns if x not in out_cols] use_cols = feature_cols + out_cols
In [19]:
use_data = train_data.copy() for col in use_cols: use_data[col] = use_data[col].astype('float32')
In [20]:
train, valid = train_test_split(use_data[use_cols], test_size=0.3, random_state=42, shuffle=True) valid, test = train_test_split(valid, test_size=0.3, random_state=42, shuffle=True)
In [21]:
prediction_model = get_prediction_model() trainable_model = get_trainable_model(prediction_model)
In [22]:
prediction_model.summary()
Model: "model" __________________________________________________________________________________________________ Layer (type) Output Shape Param # Connected to ================================================================================================== input (InputLayer) [(None, 1, 38)] 0 __________________________________________________________________________________________________ conv1d (Conv1D) (None, 1, 64) 2496 input[0][0] __________________________________________________________________________________________________ bidirectional (Bidirectional) (None, 1, 128) 66048 conv1d[0][0] __________________________________________________________________________________________________ dense (Dense) (None, 1, 128) 16512 bidirectional[0][0] __________________________________________________________________________________________________ transformer_block (TransformerB (None, 1, 128) 69772 dense[0][0] __________________________________________________________________________________________________ global_average_pooling1d (Globa (None, 128) 0 transformer_block[0][0] __________________________________________________________________________________________________ dropout_2 (Dropout) (None, 128) 0 global_average_pooling1d[0][0] __________________________________________________________________________________________________ dense_3 (Dense) (None, 64) 8256 dropout_2[0][0] __________________________________________________________________________________________________ tf.expand_dims (TFOpLambda) (None, 1, 64) 0 dense_3[0][0] __________________________________________________________________________________________________ transformer_block_1 (Transforme (None, 1, 64) 18508 tf.expand_dims[0][0] __________________________________________________________________________________________________ transformer_block_2 (Transforme (None, 1, 64) 18508 tf.expand_dims[0][0] __________________________________________________________________________________________________ transformer_block_3 (Transforme (None, 1, 64) 18508 tf.expand_dims[0][0] __________________________________________________________________________________________________ transformer_block_4 (Transforme (None, 1, 64) 18508 tf.expand_dims[0][0] __________________________________________________________________________________________________ global_average_pooling1d_1 (Glo (None, 64) 0 transformer_block_1[0][0] __________________________________________________________________________________________________ global_average_pooling1d_2 (Glo (None, 64) 0 transformer_block_2[0][0] __________________________________________________________________________________________________ global_average_pooling1d_3 (Glo (None, 64) 0 transformer_block_3[0][0] __________________________________________________________________________________________________ global_average_pooling1d_4 (Glo (None, 64) 0 transformer_block_4[0][0] __________________________________________________________________________________________________ dropout_5 (Dropout) (None, 64) 0 global_average_pooling1d_1[0][0] __________________________________________________________________________________________________ dropout_8 (Dropout) (None, 64) 0 global_average_pooling1d_2[0][0] __________________________________________________________________________________________________ dropout_11 (Dropout) (None, 64) 0 global_average_pooling1d_3[0][0] __________________________________________________________________________________________________ dropout_14 (Dropout) (None, 64) 0 global_average_pooling1d_4[0][0] __________________________________________________________________________________________________ dense_6 (Dense) (None, 32) 2080 dropout_5[0][0] __________________________________________________________________________________________________ dense_9 (Dense) (None, 32) 2080 dropout_8[0][0] __________________________________________________________________________________________________ dense_12 (Dense) (None, 32) 2080 dropout_11[0][0] __________________________________________________________________________________________________ dense_15 (Dense) (None, 32) 2080 dropout_14[0][0] __________________________________________________________________________________________________ bet (Dense) (None, 1) 33 dense_6[0][0] __________________________________________________________________________________________________ mesco (Dense) (None, 1) 33 dense_9[0][0] __________________________________________________________________________________________________ micro (Dense) (None, 1) 33 dense_12[0][0] __________________________________________________________________________________________________ avg (Dense) (None, 1) 33 dense_15[0][0] ================================================================================================== Total params: 245,568 Trainable params: 245,568 Non-trainable params: 0 __________________________________________________________________________________________________
In [23]:
from tensorflow.keras import optimizers from tensorflow.python.keras.utils.vis_utils import plot_model
In [24]:
X = np.expand_dims(train[feature_cols].values, axis=1) Y = [x for x in train[out_cols].values.T] Y_valid = [x for x in valid[out_cols].values.T]
In [25]:
from keras.callbacks import ReduceLROnPlateau reduce_lr = ReduceLROnPlateau(monitor='val_loss', patience=10, mode='auto')
In [39]:
trainable_model.compile(optimizer='adam', loss=None) hist = trainable_model.fit([X, Y[0], Y[1], Y[2], Y[3]], epochs=40, batch_size=8, verbose=1, validation_data=[np.expand_dims(valid[feature_cols].values, axis=1), Y_valid[0], Y_valid[1], Y_valid[2], Y_valid[3]], callbacks=[reduce_lr] )
Epoch 1/40 11/11 [==============================] - 6s 108ms/step - loss: 0.0316 - val_loss: 0.0835 Epoch 2/40 11/11 [==============================] - 0s 20ms/step - loss: 0.0281 - val_loss: 0.0958 Epoch 3/40 11/11 [==============================] - 0s 27ms/step - loss: 0.0278 - val_loss: 0.0891 Epoch 4/40 11/11 [==============================] - 0s 21ms/step - loss: 0.0233 - val_loss: 0.0912 Epoch 5/40 11/11 [==============================] - 0s 27ms/step - loss: 0.0215 - val_loss: 0.1023 Epoch 6/40 11/11 [==============================] - 0s 33ms/step - loss: 0.0348 - val_loss: 0.0864 Epoch 7/40 11/11 [==============================] - 0s 16ms/step - loss: 0.0207 - val_loss: 0.0823 Epoch 8/40 11/11 [==============================] - 0s 25ms/step - loss: 0.0222 - val_loss: 0.0883 Epoch 9/40 11/11 [==============================] - 0s 22ms/step - loss: 0.0258 - val_loss: 0.1029 Epoch 10/40 11/11 [==============================] - 0s 26ms/step - loss: 0.0288 - val_loss: 0.0857 Epoch 11/40 11/11 [==============================] - 0s 22ms/step - loss: 0.0249 - val_loss: 0.0880 Epoch 12/40 11/11 [==============================] - 0s 21ms/step - loss: 0.0219 - val_loss: 0.0882 Epoch 13/40 11/11 [==============================] - 0s 24ms/step - loss: 0.0191 - val_loss: 0.0873 Epoch 14/40 11/11 [==============================] - 0s 20ms/step - loss: 0.0187 - val_loss: 0.0929 Epoch 15/40 11/11 [==============================] - 0s 23ms/step - loss: 0.0183 - val_loss: 0.0988 Epoch 16/40 11/11 [==============================] - 0s 19ms/step - loss: 0.0189 - val_loss: 0.0905 Epoch 17/40 11/11 [==============================] - 0s 20ms/step - loss: 0.0209 - val_loss: 0.0823 Epoch 18/40 11/11 [==============================] - 0s 27ms/step - loss: 0.0185 - val_loss: 0.0834 Epoch 19/40 11/11 [==============================] - 0s 20ms/step - loss: 0.0177 - val_loss: 0.0916 Epoch 20/40 11/11 [==============================] - 0s 24ms/step - loss: 0.0163 - val_loss: 0.0919 Epoch 21/40 11/11 [==============================] - 0s 20ms/step - loss: 0.0141 - val_loss: 0.0898 Epoch 22/40 11/11 [==============================] - 0s 27ms/step - loss: 0.0144 - val_loss: 0.0923 Epoch 23/40 11/11 [==============================] - 0s 19ms/step - loss: 0.0138 - val_loss: 0.0906 Epoch 24/40 11/11 [==============================] - 0s 20ms/step - loss: 0.0140 - val_loss: 0.0897 Epoch 25/40 11/11 [==============================] - 0s 23ms/step - loss: 0.0126 - val_loss: 0.0892 Epoch 26/40 11/11 [==============================] - 0s 20ms/step - loss: 0.0129 - val_loss: 0.0918 Epoch 27/40 11/11 [==============================] - 0s 25ms/step - loss: 0.0123 - val_loss: 0.0935 Epoch 28/40 11/11 [==============================] - 0s 25ms/step - loss: 0.0131 - val_loss: 0.0933 Epoch 29/40 11/11 [==============================] - 0s 17ms/step - loss: 0.0125 - val_loss: 0.0933 Epoch 30/40 11/11 [==============================] - 0s 23ms/step - loss: 0.0119 - val_loss: 0.0932 Epoch 31/40 11/11 [==============================] - 0s 20ms/step - loss: 0.0129 - val_loss: 0.0936 Epoch 32/40 11/11 [==============================] - 0s 28ms/step - loss: 0.0114 - val_loss: 0.0933 Epoch 33/40 11/11 [==============================] - 0s 20ms/step - loss: 0.0122 - val_loss: 0.0932 Epoch 34/40 11/11 [==============================] - 0s 21ms/step - loss: 0.0114 - val_loss: 0.0936 Epoch 35/40 11/11 [==============================] - 0s 23ms/step - loss: 0.0119 - val_loss: 0.0938 Epoch 36/40 11/11 [==============================] - 0s 20ms/step - loss: 0.0118 - val_loss: 0.0937 Epoch 37/40 11/11 [==============================] - 0s 20ms/step - loss: 0.0127 - val_loss: 0.0937 Epoch 38/40 11/11 [==============================] - 0s 27ms/step - loss: 0.0123 - val_loss: 0.0937 Epoch 39/40 11/11 [==============================] - 0s 19ms/step - loss: 0.0124 - val_loss: 0.0937 Epoch 40/40 11/11 [==============================] - 0s 20ms/step - loss: 0.0129 - val_loss: 0.0937
In [40]:
rst = prediction_model.predict(np.expand_dims(test[feature_cols], axis=1)) rst
Out[40]:
[array([[0.8401114 ], [0.4296295 ], [0.34763122], [0.33006623], [0.74300694], [0.48508543], [0.48184243], [0.7309267 ], [0.5264127 ], [0.7570494 ], [0.29492375], [0.34379733]], dtype=float32), array([[0.9495956 ], [0.19964108], [0.25691378], [0.15781167], [0.39773428], [0.257546 ], [0.2265681 ], [0.39088207], [0.30309337], [0.4006669 ], [0.16448957], [0.20928389]], dtype=float32), array([[0.93163174], [0.45915267], [0.24377662], [0.32275468], [0.84771645], [0.51101613], [0.52240014], [0.77952445], [0.6746559 ], [0.6747417 ], [0.3022651 ], [0.3458013 ]], dtype=float32), array([[0.4518058 ], [0.06488091], [0.2511762 ], [0.0624491 ], [0.09656441], [0.07555431], [0.06494072], [0.09723139], [0.10824579], [0.09783638], [0.07164052], [0.15804273]], dtype=float32)]
In [41]:
[np.exp(K.get_value(log_var[0]))**0.5 for log_var in trainable_model.layers[-1].log_vars]
Out[41]:
[0.998927703775019, 0.9994643982390371, 0.9991108696677027, 0.9996066810061789]
In [42]:
pred_rst = pd.DataFrame.from_records(np.squeeze(np.asarray(rst), axis=2).T, columns=out_cols)
In [43]:
real_rst = test[out_cols].copy()
In [44]:
for col in out_cols: pred_rst[col] = pred_rst[col] * (maxs[col] - mins[col]) + mins[col] real_rst[col] = real_rst[col] * (maxs[col] - mins[col]) + mins[col]
In [45]:
real_rst.columns
Out[45]:
Index(['碳材料结构特征-比表面积', '碳材料结构特征-总孔体积', '碳材料结构特征-微孔体积', '碳材料结构特征-平均孔径'], dtype='object')
In [46]:
y_pred_pm25 = pred_rst['碳材料结构特征-比表面积'].values.reshape(-1,) y_pred_pm10 = pred_rst['碳材料结构特征-总孔体积'].values.reshape(-1,) y_pred_so2 = pred_rst['碳材料结构特征-微孔体积'].values.reshape(-1,) y_pred_no2 = pred_rst['碳材料结构特征-平均孔径'].values.reshape(-1,) y_true_pm25 = real_rst['碳材料结构特征-比表面积'].values.reshape(-1,) y_true_pm10 = real_rst['碳材料结构特征-总孔体积'].values.reshape(-1,) y_true_so2 = real_rst['碳材料结构特征-微孔体积'].values.reshape(-1,) y_true_no2 = real_rst['碳材料结构特征-平均孔径'].values.reshape(-1,)
In [47]:
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score, mean_absolute_percentage_error
In [48]:
def print_eva(y_true, y_pred, tp): MSE = mean_squared_error(y_true, y_pred) RMSE = np.sqrt(MSE) MAE = mean_absolute_error(y_true, y_pred) MAPE = mean_absolute_percentage_error(y_true, y_pred) R_2 = r2_score(y_true, y_pred) print(f"COL: {tp}, MSE: {format(MSE, '.2E')}", end=',') print(f'RMSE: {round(RMSE, 4)}', end=',') print(f'MAPE: {round(MAPE, 4) * 100} %', end=',') print(f'MAE: {round(MAE, 4)}', end=',') print(f'R_2: {round(R_2, 4)}') return [MSE, RMSE, MAE, MAPE, R_2]
In [49]:
pm25_eva = print_eva(y_true_pm25, y_pred_pm25, tp='比表面积') pm10_eva = print_eva(y_true_pm10, y_pred_pm10, tp='总孔体积') so2_eva = print_eva(y_true_so2, y_pred_so2, tp='微孔体积') nox_eva = print_eva(y_true_no2, y_pred_no2, tp='平均孔径')
COL: 比表面积, MSE: 2.36E+05,RMSE: 485.5891,MAPE: 25.86 %,MAE: 340.8309,R_2: -0.1091 COL: 总孔体积, MSE: 5.15E-02,RMSE: 0.2268,MAPE: 23.810000000000002 %,MAE: 0.1519,R_2: 0.7657 COL: 微孔体积, MSE: 4.53E-02,RMSE: 0.2128,MAPE: 34.75 %,MAE: 0.1536,R_2: -0.0412 COL: 平均孔径, MSE: 4.63E-01,RMSE: 0.6802,MAPE: 15.620000000000001 %,MAE: 0.415,R_2: 0.5929
In [ ]:
In [ ]: