coal_materials/multi-task0102.ipynb

120 KiB
Raw Permalink Blame History

In [1]:
import os
os.environ['CUDA_DEVICE_ORDER'] = 'PCB_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1'
In [2]:
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
#新增加的两行
from pylab import mpl
# 设置显示中文字体
mpl.rcParams["font.sans-serif"] = ["SimHei"]

mpl.rcParams["axes.unicode_minus"] = False
In [3]:
data_0102 = pd.read_excel('./data/20240102/20240102.xlsx', header=[0,1,2])
data_0102
Out[3]:
Unnamed: 0_level_0 弹筒发热量 挥发分 固定炭
化验编号 Had Cad Nad Oad Qb,ad Vad Fcad
Unnamed: 0_level_2 (%) (%) (%) (%) MJ/kg (%) (%)
0 2720110529 3.93 70.18 0.81 25.079 27.820 32.06 55.68
1 2720096883 3.78 68.93 0.77 26.512 27.404 29.96 54.71
2 2720109084 3.48 69.60 0.76 26.148 27.578 29.31 55.99
3 2720084708 3.47 66.71 0.76 29.055 26.338 28.58 53.87
4 2720062721 3.87 68.78 0.80 26.542 27.280 29.97 54.78
... ... ... ... ... ... ... ... ...
223 2720030490 4.12 68.85 0.97 26.055 27.864 32.94 51.89
224 2720028633 3.97 67.04 0.94 28.043 27.368 31.88 51.38
225 2720028634 4.12 68.42 0.96 26.493 27.886 33.16 52.00
226 2720017683 3.88 67.42 0.94 27.760 26.616 31.65 50.56
227 2720017678 3.81 66.74 0.92 28.530 26.688 31.02 50.82

228 rows × 8 columns

In [4]:
cols = [''.join([y for y in x if 'Unnamed' not in y]) for x in data_0102.columns]
cols
Out[4]:
['化验编号',
 '氢Had(%)',
 '碳Cad(%)',
 '氮Nad(%)',
 '氧Oad(%)',
 '弹筒发热量Qb,adMJ/kg',
 '挥发分Vad(%)',
 '固定炭Fcad(%)']
In [5]:
data_0102.columns = cols
In [6]:
data_0102
Out[6]:
化验编号 氢Had(%) 碳Cad(%) 氮Nad(%) 氧Oad(%) 弹筒发热量Qb,adMJ/kg 挥发分Vad(%) 固定炭Fcad(%)
0 2720110529 3.93 70.18 0.81 25.079 27.820 32.06 55.68
1 2720096883 3.78 68.93 0.77 26.512 27.404 29.96 54.71
2 2720109084 3.48 69.60 0.76 26.148 27.578 29.31 55.99
3 2720084708 3.47 66.71 0.76 29.055 26.338 28.58 53.87
4 2720062721 3.87 68.78 0.80 26.542 27.280 29.97 54.78
... ... ... ... ... ... ... ... ...
223 2720030490 4.12 68.85 0.97 26.055 27.864 32.94 51.89
224 2720028633 3.97 67.04 0.94 28.043 27.368 31.88 51.38
225 2720028634 4.12 68.42 0.96 26.493 27.886 33.16 52.00
226 2720017683 3.88 67.42 0.94 27.760 26.616 31.65 50.56
227 2720017678 3.81 66.74 0.92 28.530 26.688 31.02 50.82

228 rows × 8 columns

In [7]:
out_cols = ['挥发分Vad(%)', '固定炭Fcad(%)']
In [8]:
out_cols
Out[8]:
['挥发分Vad(%)', '固定炭Fcad(%)']
In [9]:
data = data_0102.copy()
In [10]:
train_data = data.dropna(subset=out_cols).fillna(0)
In [11]:
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import tensorflow.keras.backend as K
2024-01-08 18:28:46.594783: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0
In [12]:
class TransformerBlock(layers.Layer):
    def __init__(self, embed_dim, num_heads, ff_dim, name, rate=0.1):
        super().__init__()
        self.att = layers.MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim, name=name)
        self.ffn = keras.Sequential(
            [layers.Dense(ff_dim, activation="relu"), layers.Dense(embed_dim),]
        )
        self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)
        self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)
        self.dropout1 = layers.Dropout(rate)
        self.dropout2 = layers.Dropout(rate)

    def call(self, inputs, training):
        attn_output = self.att(inputs, inputs)
        attn_output = self.dropout1(attn_output, training=training)
        out1 = self.layernorm1(inputs + attn_output)
        ffn_output = self.ffn(out1)
        ffn_output = self.dropout2(ffn_output, training=training)
        return self.layernorm2(out1 + ffn_output)
In [13]:
from tensorflow.keras import Model
In [14]:
from tensorflow.keras.initializers import Constant
In [15]:
# Custom loss layer
class CustomMultiLossLayer(layers.Layer):
    def __init__(self, nb_outputs=2, **kwargs):
        self.nb_outputs = nb_outputs
        self.is_placeholder = True
        super(CustomMultiLossLayer, self).__init__(**kwargs)
        
    def build(self, input_shape=None):
        # initialise log_vars
        self.log_vars = []
        for i in range(self.nb_outputs):
            self.log_vars += [self.add_weight(name='log_var' + str(i), shape=(1,),
                                              initializer=tf.initializers.he_normal(), trainable=True)]
        super(CustomMultiLossLayer, self).build(input_shape)

    def multi_loss(self, ys_true, ys_pred):
        assert len(ys_true) == self.nb_outputs and len(ys_pred) == self.nb_outputs
        loss = 0
        for y_true, y_pred, log_var in zip(ys_true, ys_pred, self.log_vars):
            mse = (y_true - y_pred) ** 2.
            pre = K.exp(-log_var[0])
            loss += tf.abs(tf.reduce_logsumexp(pre * mse + log_var[0], axis=-1))
        return K.mean(loss)

    def call(self, inputs):
        ys_true = inputs[:self.nb_outputs]
        ys_pred = inputs[self.nb_outputs:]
        loss = self.multi_loss(ys_true, ys_pred)
        self.add_loss(loss, inputs=inputs)
        # We won't actually use the output.
        return K.concatenate(inputs, -1)
In [16]:
num_heads, ff_dim = 3, 16
In [17]:
def get_prediction_model():
    def build_output(out, out_name):
        self_block = TransformerBlock(64, num_heads, ff_dim, name=f'{out_name}_attn')
        out = self_block(out)
        out = layers.GlobalAveragePooling1D()(out)
        out = layers.Dropout(0.1)(out)
        out = layers.Dense(32, activation="relu")(out)
        # out = layers.Dense(1, name=out_name, activation="sigmoid")(out)
        return out
    inputs = layers.Input(shape=(1,len(feature_cols)), name='input')
    x = layers.Conv1D(filters=64, kernel_size=1, activation='relu')(inputs)
    # x = layers.Dropout(rate=0.1)(x)
    lstm_out = layers.Bidirectional(layers.LSTM(units=64, return_sequences=True))(x)
    lstm_out = layers.Dense(128, activation='relu')(lstm_out)
    transformer_block = TransformerBlock(128, num_heads, ff_dim, name='first_attn')
    out = transformer_block(lstm_out)
    out = layers.GlobalAveragePooling1D()(out)
    out = layers.Dropout(0.1)(out)
    out = layers.Dense(64, activation='relu')(out)
    out = K.expand_dims(out, axis=1)

    # bet = layers.Dense(32, activation="relu")(out)
    # mesco = layers.Dense(32, activation="relu")(out)
    bet = build_output(out, 'bet')
    mesco = build_output(out, 'mesco')

    bet = layers.Dense(1, activation='sigmoid', name='vad')(bet)
    mesco = layers.Dense(1, activation='sigmoid', name='fcad')(mesco)

    model = Model(inputs=[inputs], outputs=[bet, mesco])
    return model
In [18]:
def get_trainable_model(prediction_model):
    inputs = layers.Input(shape=(1,len(feature_cols)), name='input')
    bet, mesco = prediction_model(inputs)
    bet_real = layers.Input(shape=(1,), name='vad_real')
    mesco_real = layers.Input(shape=(1,), name='fcad_real')
    out = CustomMultiLossLayer(nb_outputs=2)([bet_real, mesco_real, bet, mesco])
    return Model([inputs, bet_real, mesco_real], out)
In [19]:
from tensorflow.python.keras.utils.vis_utils import plot_model
In [20]:
maxs = train_data.max()
mins = train_data.min()
for col in train_data.columns:
    if maxs[col] - mins[col] == 0:
        continue
    train_data[col] = (train_data[col] - mins[col]) / (maxs[col] - mins[col])
In [21]:
train_data
Out[21]:
化验编号 氢Had(%) 碳Cad(%) 氮Nad(%) 氧Oad(%) 弹筒发热量Qb,adMJ/kg 挥发分Vad(%) 固定炭Fcad(%)
0 0.996547 0.773973 0.835414 0.456522 0.171463 0.811249 0.847737 0.828147
1 0.851118 0.671233 0.799943 0.369565 0.210254 0.782038 0.674897 0.794606
2 0.981147 0.465753 0.818956 0.347826 0.200401 0.794256 0.621399 0.838866
3 0.721367 0.458904 0.736947 0.347826 0.279094 0.707183 0.561317 0.765560
4 0.487046 0.732877 0.795687 0.434783 0.211066 0.773331 0.675720 0.797026
... ... ... ... ... ... ... ... ...
223 0.143553 0.904110 0.797673 0.804348 0.197883 0.814339 0.920165 0.697095
224 0.123762 0.801370 0.746311 0.739130 0.251699 0.779510 0.832922 0.679461
225 0.123773 0.904110 0.785471 0.782609 0.209740 0.815884 0.938272 0.700899
226 0.007066 0.739726 0.757094 0.739130 0.244038 0.726705 0.813992 0.651107
227 0.007012 0.691781 0.737798 0.695652 0.264882 0.731760 0.762140 0.660097

228 rows × 8 columns

In [22]:
# feature_cols = [x for x in train_data.columns if x not in out_cols and '第二次' not in x]
feature_cols = [x for x in train_data.columns if x not in out_cols and '编号' not in x]
use_cols = feature_cols + out_cols
In [23]:
use_data = train_data.copy()
for col in use_cols:
    use_data[col] = use_data[col].astype('float32')
In [24]:
from sklearn.model_selection import KFold, train_test_split
kf = KFold(n_splits=6, shuffle=True, random_state=42)
In [25]:
from tensorflow.keras import optimizers
from tensorflow.python.keras.utils.vis_utils import plot_model
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score, mean_absolute_percentage_error
In [26]:
from keras.callbacks import ReduceLROnPlateau
reduce_lr = ReduceLROnPlateau(monitor='val_loss', patience=10, mode='auto')
In [27]:
def print_eva(y_true, y_pred, tp):
    MSE = mean_squared_error(y_true, y_pred)
    RMSE = np.sqrt(MSE)
    MAE = mean_absolute_error(y_true, y_pred)
    MAPE = mean_absolute_percentage_error(y_true, y_pred)
    R_2 = r2_score(y_true, y_pred)
    print(f"COL: {tp}, MSE: {format(MSE, '.2E')}", end=',')
    print(f'RMSE: {round(RMSE, 3)}', end=',')
    print(f'MAPE: {round(MAPE * 100, 3)} %', end=',')
    print(f'MAE: {round(MAE, 3)}', end=',')
    print(f'R_2: {round(R_2, 3)}')
    return [MSE, RMSE, MAE, MAPE, R_2]
In [28]:
prediction_model = get_prediction_model()
trainable_model = get_trainable_model(prediction_model)
trainable_model.compile(optimizer='adam', loss=None)
2024-01-08 18:28:48.712096: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcuda.so.1
2024-01-08 18:28:48.770197: E tensorflow/stream_executor/cuda/cuda_driver.cc:328] failed call to cuInit: CUDA_ERROR_INVALID_DEVICE: invalid device ordinal
2024-01-08 18:28:48.770270: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:169] retrieving CUDA diagnostic information for host: zhaojh-yv621
2024-01-08 18:28:48.770284: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:176] hostname: zhaojh-yv621
2024-01-08 18:28:48.770578: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:200] libcuda reported version is: 520.61.5
2024-01-08 18:28:48.770639: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:204] kernel reported version is: 520.61.5
2024-01-08 18:28:48.770650: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:310] kernel version seems to match DSO: 520.61.5
2024-01-08 18:28:48.771267: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations:  AVX2 AVX512F FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
In [29]:
plot_model(prediction_model)
Out[29]:
No description has been provided for this image
In [ ]:
vad_eva_list = list()
fcad_eva_list = list()
train_data = use_data[use_cols].copy()
for (train_index, test_index) in kf.split(train_data):
    train = train_data.loc[train_index]
    valid = train_data.loc[test_index]
    X = np.expand_dims(train[feature_cols].values, axis=1)
    Y = [x for x in train[out_cols].values.T]
    X_valid = np.expand_dims(valid[feature_cols].values, axis=1)
    Y_valid = [x for x in valid[out_cols].values.T]
    prediction_model = get_prediction_model()
    trainable_model = get_trainable_model(prediction_model)
    trainable_model.compile(optimizer='adam', loss=None)
    hist = trainable_model.fit([X, Y[0], Y[1]], epochs=120, batch_size=8, verbose=0, 
                               validation_data=[X_valid, Y_valid[0], Y_valid[1]],
                               callbacks=[reduce_lr]
                               )
    rst = prediction_model.predict(X_valid)
    pred_rst = pd.DataFrame.from_records(np.squeeze(np.asarray(rst), axis=2).T, columns=out_cols)
    real_rst = valid[out_cols].copy()
    for col in out_cols:
        pred_rst[col] = pred_rst[col] * (maxs[col] - mins[col]) + mins[col]
        real_rst[col] = real_rst[col] * (maxs[col] - mins[col]) + mins[col]
    y_pred_vad = pred_rst['挥发分Vad(%)'].values.reshape(-1,)
    y_pred_fcad = pred_rst['固定炭Fcad(%)'].values.reshape(-1,)
    y_true_vad = real_rst['挥发分Vad(%)'].values.reshape(-1,)
    y_true_fcad = real_rst['固定炭Fcad(%)'].values.reshape(-1,)
    vad_eva = print_eva(y_true_vad, y_pred_vad, tp='挥发分Vad')
    fcad_eva = print_eva(y_true_fcad, y_pred_fcad, tp='固定炭Fcad')
    vad_eva_list.append(vad_eva)
    fcad_eva_list.append(fcad_eva)
    del trainable_model
    del prediction_model
2024-01-08 18:28:51.289876: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:176] None of the MLIR Optimization Passes are enabled (registered 2)
2024-01-08 18:28:51.306804: I tensorflow/core/platform/profile_utils/cpu_utils.cc:114] CPU Frequency: 2200000000 Hz
COL: 挥发分Vad, MSE: 5.90E-01,RMSE: 0.768,MAPE: 2.097 %,MAE: 0.628,R_2: 0.872
COL: 固定炭Fcad, MSE: 8.06E-01,RMSE: 0.898,MAPE: 1.432 %,MAE: 0.753,R_2: 0.968
COL: 挥发分Vad, MSE: 3.46E+01,RMSE: 5.885,MAPE: 18.37 %,MAE: 5.539,R_2: -3.196
COL: 固定炭Fcad, MSE: 3.66E+00,RMSE: 1.912,MAPE: 3.032 %,MAE: 1.563,R_2: 0.929
COL: 挥发分Vad, MSE: 1.77E+01,RMSE: 4.212,MAPE: 12.828 %,MAE: 3.718,R_2: -3.53
COL: 固定炭Fcad, MSE: 1.25E+00,RMSE: 1.119,MAPE: 1.541 %,MAE: 0.855,R_2: 0.91
COL: 挥发分Vad, MSE: 6.66E-01,RMSE: 0.816,MAPE: 2.165 %,MAE: 0.633,R_2: 0.883
COL: 固定炭Fcad, MSE: 4.77E-01,RMSE: 0.69,MAPE: 1.072 %,MAE: 0.551,R_2: 0.986
WARNING:tensorflow:5 out of the last 9 calls to <function Model.make_predict_function.<locals>.predict_function at 0x7fba6eb73040> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for  more details.
COL: 挥发分Vad, MSE: 8.97E-01,RMSE: 0.947,MAPE: 2.346 %,MAE: 0.722,R_2: 0.796
COL: 固定炭Fcad, MSE: 1.29E+00,RMSE: 1.137,MAPE: 1.633 %,MAE: 0.886,R_2: 0.939
In [ ]:
vad_df = pd.DataFrame.from_records(vad_eva_list, columns=['MSE', 'RMSE', 'MAE', 'MAPE', 'R_2'])
vad_df.sort_values(by='R_2')[1:].mean()
In [32]:
fcad_df = pd.DataFrame.from_records(fcad_eva_list, columns=['MSE', 'RMSE', 'MAE', 'MAPE', 'R_2'])
fcad_df.sort_values(by='R_2')[1:].mean()
Out[32]:
MSE     0.820345
RMSE    0.899216
MAE     0.723321
MAPE    0.013728
R_2     0.967491
dtype: float64
In [ ]:
train, valid = train_test_split(use_data[use_cols], test_size=0.3, random_state=42, shuffle=True)
valid, test = train_test_split(valid, test_size=0.3, random_state=42, shuffle=True)
In [31]:
prediction_model = get_prediction_model()
trainable_model = get_trainable_model(prediction_model)
In [34]:
X = np.expand_dims(train[feature_cols].values, axis=1)
Y = [x for x in train[out_cols].values.T]
Y_valid = [x for x in valid[out_cols].values.T]
In [ ]:
trainable_model.compile(optimizer='adam', loss=None)
hist = trainable_model.fit([X, Y[0], Y[1]], epochs=120, batch_size=8, verbose=1, 
                           validation_data=[np.expand_dims(valid[feature_cols].values, axis=1), Y_valid[0], Y_valid[1]],
                           callbacks=[reduce_lr]
                           )
In [41]:
rst = prediction_model.predict(np.expand_dims(test[feature_cols], axis=1))
rst
Out[41]:
[array([[0.73740077],
        [0.89292204],
        [0.7599046 ],
        [0.67802393],
        [0.6815233 ],
        [0.88627005],
        [0.6121343 ],
        [0.7072234 ],
        [0.8561135 ],
        [0.52762157],
        [0.8325021 ],
        [0.50241977],
        [0.8242289 ],
        [0.68957335],
        [0.6980361 ],
        [0.82116604],
        [0.8566438 ],
        [0.53687835],
        [0.56832707],
        [0.78476715],
        [0.85638577]], dtype=float32),
 array([[0.68600863],
        [0.78454906],
        [0.8179163 ],
        [0.94351083],
        [0.86383885],
        [0.69705516],
        [0.6913491 ],
        [0.80277354],
        [0.93557894],
        [0.82278305],
        [0.82674253],
        [0.93518937],
        [0.8094449 ],
        [0.9206344 ],
        [0.7747319 ],
        [0.9137207 ],
        [0.9491073 ],
        [0.93225   ],
        [0.6185102 ],
        [0.8867341 ],
        [0.82890105]], dtype=float32)]
In [42]:
[np.exp(K.get_value(log_var[0]))**0.5 for log_var in trainable_model.layers[-1].log_vars]
Out[42]:
[0.9991559102070927, 0.9998196796918477]
In [46]:
real_rst.columns
Out[46]:
Index(['挥发分Vad(%)', '固定炭Fcad(%)'], dtype='object')
In [ ]:
for col in out_cols:
    pred_rst[col] = pred_rst[col] * (maxs[col] - mins[col]) + mins[col]
    real_rst[col] = real_rst[col] * (maxs[col] - mins[col]) + mins[col]
In [47]:
y_pred_vad = pred_rst['挥发分Vad(%)'].values.reshape(-1,)
y_pred_fcad = pred_rst['固定炭Fcad(%)'].values.reshape(-1,)
y_true_vad = real_rst['挥发分Vad(%)'].values.reshape(-1,)
y_true_fcad = real_rst['固定炭Fcad(%)'].values.reshape(-1,)
In [56]:
pm25_eva = print_eva(y_true_vad, y_pred_vad, tp='挥发分Vad')
pm10_eva = print_eva(y_true_fcad, y_pred_fcad, tp='固定炭Fcad')
COL: 挥发分Vad, MSE: 3.35E-01,RMSE: 0.579,MAPE: 1.639 %,MAE: 0.504,R_2: 0.87
COL: 固定炭Fcad, MSE: 1.11E+00,RMSE: 1.055,MAPE: 1.497 %,MAE: 0.814,R_2: 0.876
In [ ]:
 
In [ ]: