212 KiB
212 KiB
In [1]:
from math import sqrt from numpy import concatenate from matplotlib import pyplot import pandas as pd import numpy as np from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import LabelEncoder from sklearn.metrics import mean_squared_error from tensorflow.keras import Sequential from tensorflow.keras.layers import Dense from tensorflow.keras.layers import LSTM from tensorflow.keras.layers import Dropout from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt
C:\Users\asus\AppData\Roaming\Python\Python39\site-packages\pandas\core\computation\expressions.py:21: UserWarning: Pandas requires version '2.8.4' or newer of 'numexpr' (version '2.8.3' currently installed). from pandas.core.computation.check import NUMEXPR_INSTALLED C:\Users\asus\AppData\Roaming\Python\Python39\site-packages\pandas\core\arrays\masked.py:60: UserWarning: Pandas requires version '1.3.6' or newer of 'bottleneck' (version '1.3.5' currently installed). from pandas.core import (
这段代码是一个函数 time_series_to_supervised,它用于将时间序列数据转换为监督学习问题的数据集。下面是该函数的各个部分的含义:
data: 输入的时间序列数据,可以是列表或2D NumPy数组。 n_in: 作为输入的滞后观察数,即用多少个时间步的观察值作为输入。默认值为96,表示使用前96个时间步的观察值作为输入。 n_out: 作为输出的观测数量,即预测多少个时间步的观察值。默认值为10,表示预测未来10个时间步的观察值。 dropnan: 布尔值,表示是否删除具有NaN值的行。默认为True,即删除具有NaN值的行。 函数首先检查输入数据的维度,并初始化一些变量。然后,它创建一个新的DataFrame对象 df 来存储输入数据,并保存原始的列名。接着,它创建了两个空列表 cols 和 names,用于存储新的特征列和列名。
接下来,函数开始构建特征列和对应的列名。首先,它将原始的观察序列添加到 cols 列表中,并将其列名添加到 names 列表中。然后,它依次将滞后的观察序列添加到 cols 列表中,并构建相应的列名,格式为 (原始列名)(t-滞后时间)。这样就创建了输入特征的部分。
接着,函数开始构建输出特征的部分。它依次将未来的观察序列添加到 cols 列表中,并构建相应的列名,格式为 (原始列名)(t+未来时间)。
最后,函数将所有的特征列拼接在一起,构成一个新的DataFrame对象 agg。如果 dropnan 参数为True,则删除具有NaN值的行。最后,函数返回处理后的数据集 agg。
In [2]:
def time_series_to_supervised(data, n_in=96, n_out=10,dropnan=True): """ :param data:作为列表或2D NumPy数组的观察序列。需要。 :param n_in:作为输入的滞后观察数(X)。值可以在[1..len(数据)]之间可选。默认为1。 :param n_out:作为输出的观测数量(y)。值可以在[0..len(数据)]之间。可选的。默认为1。 :param dropnan:Boolean是否删除具有NaN值的行。可选的。默认为True。 :return: """ n_vars = 1 if type(data) is list else data.shape[1] df = pd.DataFrame(data) origNames = df.columns cols, names = list(), list() cols.append(df.shift(0)) names += [('%s' % origNames[j]) for j in range(n_vars)] n_in = max(0, n_in) for i in range(n_in, 0, -1): time = '(t-%d)' % i cols.append(df.shift(i)) names += [('%s%s' % (origNames[j], time)) for j in range(n_vars)] n_out = max(n_out, 0) for i in range(1, n_out+1): time = '(t+%d)' % i cols.append(df.shift(-i)) names += [('%s%s' % (origNames[j], time)) for j in range(n_vars)] agg = pd.concat(cols, axis=1) agg.columns = names if dropnan: agg.dropna(inplace=True) return agg
In [3]:
# 加载数据 path1 = r"D:\project\小论文1-基于ICEEMDAN分解的时序高维变化的短期光伏功率预测模型\CEEMAN-PosConv1dbiLSTM-LSTM\模型代码流程\data6.csv"#数据所在路径 #我的数据是excel表,若是csv文件用pandas的read_csv()函数替换即可。 datas1 = pd.DataFrame(pd.read_csv(path1)) #我只取了data表里的第3、23、16、17、18、19、20、21、27列,如果取全部列的话这一行可以去掉 # data1 = datas1.iloc[:,np.r_[3,23,16:22,27]] data1=datas1.interpolate() values1 = data1.values print(data1.head()) print(data1.shape)
Temp Humidity GHI DHI Rainfall Power 0 19.779453 40.025826 3.232706 1.690531 0.0 0.0 1 19.714937 39.605961 3.194991 1.576346 0.0 0.0 2 19.549330 39.608631 3.070866 1.576157 0.0 0.0 3 19.405870 39.680702 3.038623 1.482489 0.0 0.0 4 19.387363 39.319881 2.656474 1.134153 0.0 0.0 (104256, 6)
In [4]:
# data2= data1.drop(['date'], axis = 1)
In [5]:
# # 获取重构的原始数据 # # 获取重构的原始数据 # # 获取重构的原始数据 path_re = r"D:\project\小论文1-基于ICEEMDAN分解的时序高维变化的短期光伏功率预测模型\CEEMAN-PosConv1dbiLSTM-LSTM\模型代码流程\完整的模型代码流程\iceemdan_reconstructed_data_low.csv"#数据所在路径 # #我的数据是excel表,若是csv文件用pandas的read_csv()函数替换即可。 data_re = pd.DataFrame(pd.read_csv(path_re))
In [6]:
data_re
Out[6]:
column_name | |
---|---|
0 | 1.426824 |
1 | 1.426819 |
2 | 1.426815 |
3 | 1.426812 |
4 | 1.426810 |
... | ... |
104251 | 1.629381 |
104252 | 1.629328 |
104253 | 1.629271 |
104254 | 1.629213 |
104255 | 1.629152 |
104256 rows × 1 columns
In [7]:
import matplotlib.pyplot as plt # # 假设你已经有了原始数据和重构数据 # # 原始数据 original_data = data1['Power'].values # # 创建时间序列(假设时间序列与数据对应) time = range(len(original_data)) # # 创建画布和子图 plt.figure(figsize=(10, 6)) # # 绘制原始数据 # plt.plot(time, original_data, label='Original Data', color='blue') # # 绘制重构数据 plt.plot( data_re[:], label='Reconstructed Data', color='red') # # 添加标题和标签 plt.title('Comparison between Original and reconstructed_data_high') plt.xlabel('Time') plt.ylabel('Power') plt.legend() # # 显示图形 plt.show()
In [8]:
data3=data1.iloc[:,:5]
In [9]:
import pandas as pd # # 创建data3和imf1_array对应的DataFrame data3_df = pd.DataFrame(data3) imf1_df = pd.DataFrame(data_re) # # 合并data3_df和imf1_df merged_df = pd.concat([data3_df, imf1_df], axis=1) # # 设置行数为35040行 merged_df = merged_df.iloc[:104256] # # 打印合并后的表 print(merged_df)
Temp Humidity GHI DHI Rainfall column_name 0 19.779453 40.025826 3.232706 1.690531 0.0 1.426824 1 19.714937 39.605961 3.194991 1.576346 0.0 1.426819 2 19.549330 39.608631 3.070866 1.576157 0.0 1.426815 3 19.405870 39.680702 3.038623 1.482489 0.0 1.426812 4 19.387363 39.319881 2.656474 1.134153 0.0 1.426810 ... ... ... ... ... ... ... 104251 13.303740 34.212711 1.210789 0.787026 0.0 1.629381 104252 13.120920 34.394939 2.142980 1.582670 0.0 1.629328 104253 12.879215 35.167400 1.926214 1.545889 0.0 1.629271 104254 12.915867 35.359989 1.317695 0.851529 0.0 1.629213 104255 13.134816 34.500034 1.043269 0.597816 0.0 1.629152 [104256 rows x 6 columns]
In [10]:
merged_df.shape
Out[10]:
(104256, 6)
In [11]:
# 使用MinMaxScaler进行归一化 scaler = MinMaxScaler(feature_range=(0, 1)) scaledData1 = scaler.fit_transform(merged_df) print(scaledData1.shape)
(104256, 6)
In [12]:
n_steps_in =96 #历史时间长度 n_steps_out=1#预测时间长度 processedData1 = time_series_to_supervised(scaledData1,n_steps_in,n_steps_out) print(processedData1.head())
0 1 2 3 4 5 0(t-96) \ 96 0.555631 0.349673 0.190042 0.040558 0.0 0.836699 0.490360 97 0.564819 0.315350 0.211335 0.044613 0.0 0.836762 0.489088 98 0.576854 0.288321 0.229657 0.047549 0.0 0.836826 0.485824 99 0.581973 0.268243 0.247775 0.053347 0.0 0.836891 0.482997 100 0.586026 0.264586 0.266058 0.057351 0.0 0.836956 0.482632 1(t-96) 2(t-96) 3(t-96) ... 2(t-1) 3(t-1) 4(t-1) 5(t-1) \ 96 0.369105 0.002088 0.002013 ... 0.166009 0.036794 0.0 0.836635 97 0.364859 0.002061 0.001839 ... 0.190042 0.040558 0.0 0.836699 98 0.364886 0.001973 0.001839 ... 0.211335 0.044613 0.0 0.836762 99 0.365615 0.001950 0.001697 ... 0.229657 0.047549 0.0 0.836826 100 0.361965 0.001679 0.001167 ... 0.247775 0.053347 0.0 0.836891 0(t+1) 1(t+1) 2(t+1) 3(t+1) 4(t+1) 5(t+1) 96 0.564819 0.315350 0.211335 0.044613 0.0 0.836762 97 0.576854 0.288321 0.229657 0.047549 0.0 0.836826 98 0.581973 0.268243 0.247775 0.053347 0.0 0.836891 99 0.586026 0.264586 0.266058 0.057351 0.0 0.836956 100 0.590772 0.258790 0.282900 0.060958 0.0 0.837022 [5 rows x 588 columns]
In [13]:
# processedData1.to_csv('processedData1.csv', index=False)
In [14]:
data_x = processedData1.loc[:,'0(t-96)':'5(t-1)'] data_y = processedData1.loc[:,'5']
In [15]:
data_x.shape
Out[15]:
(104159, 576)
In [16]:
data_y
Out[16]:
96 0.836699 97 0.836762 98 0.836826 99 0.836891 100 0.836956 ... 104250 0.989547 104251 0.989508 104252 0.989466 104253 0.989423 104254 0.989378 Name: 5, Length: 104159, dtype: float64
In [17]:
data_y.shape
Out[17]:
(104159,)
In [18]:
# 7.划分训练集和测试集 test_size = int(len(data_x) * 0.2) # 计算训练集和测试集的索引范围 train_indices = range(len(data_x) - test_size) test_indices = range(len(data_x) - test_size, len(data_x)) # 根据索引范围划分数据集 train_X1 = data_x.iloc[train_indices].values.reshape((-1, n_steps_in, scaledData1.shape[1])) test_X1 = data_x.iloc[test_indices].values.reshape((-1, n_steps_in, scaledData1.shape[1])) train_y = data_y.iloc[train_indices].values test_y = data_y.iloc[test_indices].values # # 多次运行代码时希望得到相同的数据分割,可以设置 random_state 参数为一个固定的整数值 # train_X1,test_X1, train_y, test_y = train_test_split(data_x.values, data_y.values, test_size=0.2, random_state=343) # reshape input to be 3D [samples, timesteps, features] train_X = train_X1.reshape((train_X1.shape[0], n_steps_in, scaledData1.shape[1])) test_X = test_X1.reshape((test_X1.shape[0], n_steps_in,scaledData1.shape[1])) print(train_X.shape, train_y.shape, test_X.shape, test_y.shape) # 使用train_test_split函数划分训练集和测试集,测试集的比重是40%。 # 然后将train_X1、test_X1进行一个升维,变成三维,维数分别是[samples,timesteps,features]。 # 打印一下他们的shape:\
(83328, 96, 6) (83328,) (20831, 96, 6) (20831,)
In [19]:
train_X1.shape
Out[19]:
(83328, 96, 6)
In [20]:
from keras.layers import GRU, Bidirectional from keras.models import Model from keras.layers import Input, Conv1D, MaxPooling1D, LSTM, Dense, Attention, Flatten import keras from keras.models import Sequential from keras.layers import LSTM, Dense # 创建模型 model = Sequential() # 添加单层 LSTM model.add(LSTM(units=128, input_shape=(96, 6))) # 添加输出层 model.add(Dense(1)) # 编译模型 model.compile(optimizer='adam', loss='mean_squared_error') # 查看模型结构 model.summary()
d:\Anaconda3\lib\site-packages\keras\src\layers\rnn\rnn.py:204: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(**kwargs)
Model: "sequential"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓ ┃ Layer (type) ┃ Output Shape ┃ Param # ┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩ │ lstm (LSTM) │ (None, 128) │ 69,120 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dense (Dense) │ (None, 1) │ 129 │ └─────────────────────────────────┴────────────────────────┴───────────────┘
Total params: 69,249 (270.50 KB)
Trainable params: 69,249 (270.50 KB)
Non-trainable params: 0 (0.00 B)
In [21]:
# Compile and train the model model.compile(optimizer='adam', loss='mean_squared_error') from keras.callbacks import EarlyStopping, ModelCheckpoint # 定义早停机制 early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=0, mode='min') # 拟合模型,并添加早停机制和模型检查点 history = model.fit(train_X, train_y, epochs=100, batch_size=64, validation_data=(test_X, test_y), callbacks=[early_stopping]) # 预测 lstm_pred = model.predict(test_X) # 将预测结果的形状修改为与原始数据相同的形状
Epoch 1/100 1302/1302 ━━━━━━━━━━━━━━━━━━━━ 122s 92ms/step - loss: 0.0156 - val_loss: 1.0318e-05 Epoch 2/100 1302/1302 ━━━━━━━━━━━━━━━━━━━━ 110s 85ms/step - loss: 1.2280e-05 - val_loss: 2.9811e-06 Epoch 3/100 1302/1302 ━━━━━━━━━━━━━━━━━━━━ 114s 87ms/step - loss: 9.1935e-06 - val_loss: 2.5579e-06 Epoch 4/100 1302/1302 ━━━━━━━━━━━━━━━━━━━━ 128s 98ms/step - loss: 1.0443e-05 - val_loss: 8.4623e-06 Epoch 5/100 1302/1302 ━━━━━━━━━━━━━━━━━━━━ 118s 90ms/step - loss: 1.1108e-05 - val_loss: 8.1167e-06 Epoch 6/100 1302/1302 ━━━━━━━━━━━━━━━━━━━━ 111s 85ms/step - loss: 5.3451e-06 - val_loss: 2.4689e-06 Epoch 7/100 1302/1302 ━━━━━━━━━━━━━━━━━━━━ 117s 90ms/step - loss: 1.5962e-05 - val_loss: 2.2134e-06 Epoch 8/100 1302/1302 ━━━━━━━━━━━━━━━━━━━━ 124s 95ms/step - loss: 5.3290e-06 - val_loss: 3.5285e-07 Epoch 9/100 1302/1302 ━━━━━━━━━━━━━━━━━━━━ 126s 97ms/step - loss: 4.5184e-06 - val_loss: 1.2596e-07 Epoch 10/100 1302/1302 ━━━━━━━━━━━━━━━━━━━━ 128s 98ms/step - loss: 1.6976e-06 - val_loss: 7.1095e-06 Epoch 11/100 1302/1302 ━━━━━━━━━━━━━━━━━━━━ 123s 95ms/step - loss: 6.6386e-06 - val_loss: 1.0392e-07 Epoch 12/100 1302/1302 ━━━━━━━━━━━━━━━━━━━━ 126s 97ms/step - loss: 2.3165e-06 - val_loss: 8.4822e-07 Epoch 13/100 1302/1302 ━━━━━━━━━━━━━━━━━━━━ 120s 92ms/step - loss: 3.5823e-06 - val_loss: 4.9285e-08 Epoch 14/100 1302/1302 ━━━━━━━━━━━━━━━━━━━━ 117s 90ms/step - loss: 3.1791e-06 - val_loss: 2.2294e-07 Epoch 15/100 1302/1302 ━━━━━━━━━━━━━━━━━━━━ 124s 95ms/step - loss: 2.9977e-06 - val_loss: 3.9852e-06 Epoch 16/100 1302/1302 ━━━━━━━━━━━━━━━━━━━━ 128s 98ms/step - loss: 2.3874e-06 - val_loss: 1.3594e-07 Epoch 17/100 1302/1302 ━━━━━━━━━━━━━━━━━━━━ 135s 103ms/step - loss: 3.1801e-07 - val_loss: 1.6932e-07 Epoch 18/100 1302/1302 ━━━━━━━━━━━━━━━━━━━━ 128s 98ms/step - loss: 1.5647e-06 - val_loss: 2.1397e-08 Epoch 19/100 1302/1302 ━━━━━━━━━━━━━━━━━━━━ 135s 104ms/step - loss: 1.4188e-06 - val_loss: 1.4569e-07 Epoch 20/100 1302/1302 ━━━━━━━━━━━━━━━━━━━━ 128s 99ms/step - loss: 1.1043e-06 - val_loss: 5.9704e-07 Epoch 21/100 1302/1302 ━━━━━━━━━━━━━━━━━━━━ 135s 103ms/step - loss: 2.0067e-06 - val_loss: 2.0218e-06 Epoch 22/100 1302/1302 ━━━━━━━━━━━━━━━━━━━━ 145s 111ms/step - loss: 1.9982e-06 - val_loss: 2.2618e-07 Epoch 23/100 1302/1302 ━━━━━━━━━━━━━━━━━━━━ 136s 104ms/step - loss: 1.4178e-06 - val_loss: 1.3009e-06 Epoch 24/100 1302/1302 ━━━━━━━━━━━━━━━━━━━━ 130s 100ms/step - loss: 2.7170e-06 - val_loss: 1.2247e-08 Epoch 25/100 1302/1302 ━━━━━━━━━━━━━━━━━━━━ 124s 95ms/step - loss: 1.8664e-06 - val_loss: 5.6499e-07 Epoch 26/100 1302/1302 ━━━━━━━━━━━━━━━━━━━━ 89s 68ms/step - loss: 1.3434e-06 - val_loss: 1.2509e-08 Epoch 27/100 1302/1302 ━━━━━━━━━━━━━━━━━━━━ 85s 65ms/step - loss: 1.8632e-06 - val_loss: 5.3179e-07 Epoch 28/100 1302/1302 ━━━━━━━━━━━━━━━━━━━━ 82s 63ms/step - loss: 1.2746e-06 - val_loss: 9.0354e-08 Epoch 29/100 1302/1302 ━━━━━━━━━━━━━━━━━━━━ 83s 63ms/step - loss: 1.5440e-06 - val_loss: 1.2604e-07 Epoch 30/100 1302/1302 ━━━━━━━━━━━━━━━━━━━━ 88s 68ms/step - loss: 1.2646e-06 - val_loss: 2.5639e-07 Epoch 31/100 1302/1302 ━━━━━━━━━━━━━━━━━━━━ 81s 62ms/step - loss: 1.3377e-06 - val_loss: 4.0479e-08 Epoch 32/100 1302/1302 ━━━━━━━━━━━━━━━━━━━━ 48s 37ms/step - loss: 7.9140e-07 - val_loss: 1.1824e-06 Epoch 33/100 1302/1302 ━━━━━━━━━━━━━━━━━━━━ 48s 37ms/step - loss: 2.1865e-06 - val_loss: 4.2140e-06 Epoch 34/100 1302/1302 ━━━━━━━━━━━━━━━━━━━━ 51s 39ms/step - loss: 1.4884e-06 - val_loss: 1.8359e-06 651/651 ━━━━━━━━━━━━━━━━━━━━ 10s 16ms/step
In [22]:
lstm_pred.shape
Out[22]:
(20831, 1)
In [23]:
test_y.shape
Out[23]:
(20831,)
In [24]:
test_y1=test_y.reshape(20831,1)
In [25]:
test_y1
Out[25]:
array([[0.65620206], [0.6565139 ], [0.65682633], ..., [0.98946626], [0.98942303], [0.98937795]])
In [26]:
results1 = np.broadcast_to(lstm_pred, (20831, 6))
In [27]:
test_y2 = np.broadcast_to(test_y1, (20831, 6))
In [28]:
# 反归一化 inv_forecast_y = scaler.inverse_transform(results1) inv_test_y = scaler.inverse_transform(test_y2)
In [29]:
inv_test_y
Out[29]:
array([[2.81937911e+01, 6.84129659e+01, 9.24487326e+02, 4.31993807e+02, 1.56176147e+01, 1.19628092e+00], [2.82096130e+01, 6.84437997e+01, 9.24926524e+02, 4.32198926e+02, 1.56250366e+01, 1.19668613e+00], [2.82254649e+01, 6.84746920e+01, 9.25366555e+02, 4.32404434e+02, 1.56324725e+01, 1.19709211e+00], ..., [4.51026009e+01, 1.01364948e+02, 1.39385702e+03, 6.51203592e+02, 2.35493057e+01, 1.62932764e+00], [4.51004072e+01, 1.01360673e+02, 1.39379613e+03, 6.51175153e+02, 2.35482767e+01, 1.62927146e+00], [4.50981204e+01, 1.01356216e+02, 1.39373265e+03, 6.51145506e+02, 2.35472040e+01, 1.62921289e+00]])
In [30]:
# 计算均方根误差 rmse = sqrt(mean_squared_error(inv_test_y[:,5], inv_forecast_y[:,5])) print('Test RMSE: %.3f' % rmse) #画图 plt.figure(figsize=(16,8)) plt.plot(inv_test_y[:,5], label='true') plt.plot(inv_forecast_y[:,5], label='pre') plt.legend() plt.show()
Test RMSE: 0.002
In [31]:
from sklearn.metrics import mean_squared_error, mean_absolute_error # 评价指标 # 使用sklearn调用衡量线性回归的MSE 、 RMSE、 MAE、r2 from math import sqrt from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_squared_error from sklearn.metrics import r2_score print('mean_squared_error:', mean_squared_error(lstm_pred, test_y)) # mse) print("mean_absolute_error:", mean_absolute_error(lstm_pred, test_y)) # mae print("rmse:", sqrt(mean_squared_error(lstm_pred,test_y))) print("r2 score:", r2_score(inv_test_y[:], inv_forecast_y[:]))
mean_squared_error: 1.8358609523038586e-06 mean_absolute_error: 0.0012240899816947145 rmse: 0.0013549394644425479 r2 score: 0.9998451201868883
In [32]:
df1 = pd.DataFrame(inv_test_y[:,5], columns=['column_name'])
In [33]:
# 指定文件路径和文件名,保存DataFrame到CSV文件中 df1.to_csv('低频_test.csv', index=False)
In [34]:
df2 = pd.DataFrame(inv_forecast_y[:,5], columns=['column_name'])
In [35]:
# 指定文件路径和文件名,保存DataFrame到CSV文件中 df2.to_csv('低频_forecast.csv', index=False)
In [ ]: