asp.net 网站备份,中国建设工程机械网站,以下属于网站seo的内容是,运城市做网站价格1. 问题描述
1.1 阐述问题
对某电力部门的二氧化碳排放量进行回归预测#xff0c;有如下要求
数据时间跨度从1973年1月到2021年12月#xff0c;按月份记录。数据集包括“煤电”#xff0c;“天然气”#xff0c;“馏分燃料”等共9个指标的数据#xff08;其中早期的部分…1. 问题描述
1.1 阐述问题
对某电力部门的二氧化碳排放量进行回归预测有如下要求
数据时间跨度从1973年1月到2021年12月按月份记录。数据集包括“煤电”“天然气”“馏分燃料”等共9个指标的数据其中早期的部分指标not available要求预测从2022年1月开始的半年时间的以下各个部分的排放量
二氧化碳的排放情况具体分为九项指标
Coal Electric Power Sector CO2 Emissions煤电力行业二氧化碳排放 Natural Gas Electric Power Sector CO2 Emissions天然气电力行业二氧化碳排放Distillate Fuel, Including Kerosene-Type Jet Fuel, Oil Electric Power Sector CO2 Emissions蒸馏燃料包括喷气燃料、石油电力行业二氧化碳排放Petroleum Coke Electric Power Sector CO2 Emissions石油焦电力行业二氧化碳排放Residual Fuel Oil Electric Power Sector CO2 Emissions残余燃料油电力行业二氧化碳排放Petroleum Electric Power Sector CO2 Emissions石油电力行业二氧化碳排放Geothermal Energy Electric Power Sector CO2 Emissions地热能电力行业二氧化碳排放Non-Biomass Waste Electric Power Sector CO2 Emissions非生物质废物电力行业二氧化碳排放Total Energy Electric Power Sector CO2 Emissions总能源电力行业二氧化碳排放
1.2 方案设计
由于9个指标之间存在相关性对一个指标的未来值进行预测除了考虑自身的历史值以外还需要引入其他指标对该指标的影响。数据量大、时间周期长需要采用具有较强回归能力的、能够实现时间序列预测任务的机器学习模型。
1.3 方法概括
经过讨论研究本次实验通过三种神经网络模型独立实现了多元时间序列回归预测任务分别是
模型介绍特点BP误差反向传播网络通过多次学习获取非线性映射TCN时间卷积网络因果卷积实现时间预测LSTM长短时记忆网络门控结构保存长时记忆
通过从无到有建立模型、性能优化、模型比较等流程小组成员强化了机器学习的基础知识提升了机器学习相应技能的熟练程度对机器学习的理论和部分模型的特性有了进一步的理解
2. BP神经网络Backpropagation Neural Network
2.1 模型原理
BP神经网络是一种前馈神经网络采用反向传播算法进行训练。该网络由输入层、隐藏层和输出层组成。每个神经元与前一层的所有神经元相连接每个连接都有一个权重网络通过调整这些权重来学习输入与输出之间的映射关系。 BP神经网络通过反向传播Backpropagation计算模型输出与实际输出之间的误差然后反向传播误差调整网络参数以最小化误差。
在本次实验中采取了500大小的隐藏层以0.01学习率进行了2000轮的训练。
2.2.1数据处理
从xlsx读取数据取前80%数据为训练集后20%为测试集
import time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler# 读取xlsx文件
data pd.read_excel(data.xlsx)
side 192 # 有缺失部分的长度
side2 587 # 整个已知数据的长度
seq_len 10
batch_size 64
data data.iloc[1:side2 1]# 提取全部列名
col_names data.columns.values.tolist()
col_names [col_names[i] for i in range(1, len(col_names))]
data_list np.array(data[col_names].values.tolist())# 处理缺失值用平均值填充
data_list[data_list Not Available] np.nan
data_list data_list.astype(float)
imputer SimpleImputer(strategymean, fill_valuenp.nan)
data_imputed imputer.fit_transform(data_list)# 标准化处理
scaler StandardScaler()
data_normalized scaler.fit_transform(data_imputed)# 划分训练集和测试集
data_len len(data_normalized)
train_data data_normalized[:int(0.75 * data_len)] # 取前75%作为训练集
test_data data_normalized[int(0.75 * data_len):] # 取剩下25%作为测试集2.3.2 定义画图函数 # 画出曲线
def plot_results(X_test, Y_test, W1, b1, W2, b2, scaler, col_names):Y_pred, _ forward(X_test, W1, b1, W2, b2)Y_pred_original scaler.inverse_transform(Y_pred)Y_test_original scaler.inverse_transform(Y_test)f, ax plt.subplots(nrows3, ncols3, figsize(20, 10))for i in range(3):for j in range(3):ax[i, j].plot(Y_pred_original[:, 3 * i j], labelpredictions)ax[i, j].plot(Y_test_original[:, 3 * i j], labeltrue)ax[i, j].set_title(col_names[3 * i j])ax[i, j].legend()plt.tight_layout()plt.show()# 绘制Loss曲线
def plot_loss_curve(training_losses, testing_losses):plt.figure(figsize(10, 6))plt.plot(training_losses, labelTraining Loss, colorblue)plt.plot(testing_losses, labelTesting Loss, colororange)plt.title(Training and Testing Loss Over Epochs)plt.xlabel(Epoch)plt.ylabel(Loss)plt.legend()plt.show()
2.3.3 定义BP神经网络结构
# 参数初始化
def initialize_parameters(input_size, hidden_size, output_size):np.random.seed(42)W1 np.random.randn(input_size, hidden_size) * 0.01b1 np.zeros((1, hidden_size))W2 np.random.randn(hidden_size, output_size) * 0.01b2 np.zeros((1, output_size))return W1, b1, W2, b2# 前向传播
def forward(X, W1, b1, W2, b2):Z1 np.dot(X, W1) b1A1 np.tanh(Z1)Z2 np.dot(A1, W2) b2return Z2, A1# 损失函数
def compute_loss(Y, Y_pred):m Y.shape[0]loss np.sum((Y - Y_pred) ** 2) / mreturn loss# 反向传播
def backward(X, A1, Y, Y_pred, W1, W2, b1, b2):m X.shape[0]dZ2 Y_pred - YdW2 np.dot(A1.T, dZ2) / mdb2 np.sum(dZ2, axis0, keepdimsTrue) / mdA1 np.dot(dZ2, W2.T)dZ1 dA1 * (1 - np.tanh(A1) ** 2)dW1 np.dot(X.T, dZ1) / mdb1 np.sum(dZ1, axis0, keepdimsTrue) / mreturn dW1, db1, dW2, db2# 梯度下降更新参数
def update_parameters(W1, b1, W2, b2, dW1, db1, dW2, db2, learning_rate):W1 - learning_rate * dW1b1 - learning_rate * db1W2 - learning_rate * dW2b2 - learning_rate * db2return W1, b1, W2, b2# 训练神经网络
def train_neural_network(X_train, Y_train, X_test, Y_test, input_size, hidden_size, output_size, epochs, learning_rate):W1, b1, W2, b2 initialize_parameters(input_size, hidden_size, output_size)training_losses []testing_losses []start_time time.time()for epoch in range(epochs):# 前向传播训练集Y_pred_train, A1_train forward(X_train, W1, b1, W2, b2)# 计算训练集损失train_loss compute_loss(Y_train, Y_pred_train)training_losses.append(train_loss)# 前向传播测试集Y_pred_test, _ forward(X_test, W1, b1, W2, b2)# 计算测试集损失test_loss compute_loss(Y_test, Y_pred_test)testing_losses.append(test_loss)# 反向传播和参数更新dW1, db1, dW2, db2 backward(X_train, A1_train, Y_train, Y_pred_train, W1, W2, b1, b2)W1, b1, W2, b2 update_parameters(W1, b1, W2, b2, dW1, db1, dW2, db2, learning_rate)# 打印每个epoch的损失print(fEpoch {epoch 1}/{epochs} - Training Loss: {train_loss:.10f} - Testing Loss: {test_loss:.10f})end_time time.time()training_duration end_time - start_timeprint(f用时 {training_duration:.2f} s)# 结束后画出图像plot_loss_curve(training_losses, testing_losses)plot_results(X_test, Y_test, W1, b1, W2, b2, scaler, col_names)return W1, b1, W2, b2, training_losses, testing_losses
2.3.4 模型训练流程及性能表现
# 将训练数据和测试数据准备为神经网络输入
X_train train_data[:-seq_len]
Y_train train_data[seq_len:]
X_test test_data[:-seq_len]
Y_test test_data[seq_len:]# 参数设置
input_size X_train.shape[1]
hidden_size 500
output_size Y_train.shape[1]
epochs 2000
learning_rate 0.01# 训练神经网络
W1_final, b1_final, W2_final, b2_final, training_losses, testing_losses train_neural_network(X_train, Y_train, X_test, Y_test, input_size, hidden_size, output_size, epochs, learning_rate)# 在训练完成后使用训练好的模型对训练集和测试集进行预测
Y_pred_train, _ forward(X_train, W1_final, b1_final, W2_final, b2_final)
Y_pred_test, _ forward(X_test, W1_final, b1_final, W2_final, b2_final)# 将预测值逆归一化
Y_pred_train_original scaler.inverse_transform(Y_pred_train)
Y_pred_test_original scaler.inverse_transform(Y_pred_test)# 逆归一化训练集和测试集的真实值
Y_train_original scaler.inverse_transform(Y_train)
Y_test_original scaler.inverse_transform(Y_test)# 计算 MAE 和 MSE
mse_on_train np.mean((Y_train_original - Y_pred_train_original) ** 2)
mse_on_test np.mean((Y_test_original - Y_pred_test_original) ** 2)
mae_on_train np.mean(np.abs(Y_train_original - Y_pred_train_original))
mae_on_test np.mean(np.abs(Y_test_original - Y_pred_test_original))# 输出最终的 MAE 和 MSE
print(fmse_on_train: {mse_on_train:.10f} mse_on_test: {mse_on_test:.10f})
print(fmae_on_train: {mae_on_train:.10f} mae_on_test: {mae_on_test:.10f})
3. TCN网络Temporal Convolutional Network
3.1 模型原理
TCN是一种基于卷积操作的神经网络特别适用于处理时序数据。与传统的循环神经网络RNN和LSTM相比TCN使用卷积层捕捉时序数据中的模式从而更好地捕获长期依赖关系。
从结构上来说TCN通常由一个或多个卷积层组成卷积层的感受野逐渐增大从而能够捕捉不同尺度的模式。此外TCN还可以通过残差连接来加强梯度的流动从而更容易训练深层网络。
3.2.1 数据处理
在第一个实验方案中BP网络直接将整段历史信息输入给了模型为了更充分地考虑数据集中的时序信息以及加快训练速度TCN网络和LSTM采取了时间窗口的划分方式。
滑动窗口rolling window将时间序列划分为多个窗口在每个窗口内进行训练和测试如果存在较大的波动或季节性变化而且这些变化的周期较长使用滑动窗口可以更好地捕捉到这些特征。
TCN中仍然设定前80%为训练数据时间窗口大小为16
import pandas as pd
import numpy as np
import torch
from torch import optim
from torch.utils.data import Dataset, DataLoader,TensorDataset
import torch.nn as nn
from sklearn.preprocessing import StandardScaler, Normalizer
import matplotlib.pyplot as pltdef windows_split(data, seq_len):res []label []for i in range(len(data) - seq_len):res.append(data[i:i seq_len])label.append(data[i seq_len])res np.array(res).astype(np.float32)label np.array(label).astype(np.float32)return res, labeldata pd.read_excel(data.xlsx)
side 192 # 有缺失部分的长度
side2 587 # 整个已知数据的长度
seq_len 16
batch_size 64# 提取全部列名
col_names data.columns.values.tolist()
col_names [col_names[i] for i in range(1, len(col_names))]
data.replace(Not Available, np.nan, inplaceTrue)interpolated data[col_names].interpolate(methodspline, order3)
data_list np.array(data[col_names].values.tolist())scalar StandardScaler()
data_list scalar.fit_transform(data_list)
data_list[np.isnan(data_list)] 0data_split, label_split windows_split(data_list[side:side2], seq_len)
data_split np.transpose(data_split, (0, 2, 1))
length data_split.shape[0]data_train torch.Tensor(data_split[0:int(0.8 * length), :])
label_train torch.Tensor(label_split[0:int(0.8 * length)])
data_test torch.Tensor(data_split[int(0.8 * length):int(length), :])
label_test torch.Tensor(label_split[int(0.8 * length):label_split.shape[0]])dataset_train TensorDataset(data_train, label_train)
dataset_test TensorDataset(data_test, label_test)train_loader DataLoader(dataset_train, batch_sizebatch_size, shuffleTrue)
test_loader DataLoader(dataset_test, batch_sizebatch_size, shuffleFalse)input_size 9
output_size 9
num_channels [32, 64, 128, 256]
kernel_size 3
dropout 0
num_epochs 2003.2.2 模型定义
每层TCN定义为[conv, chomp, relu, dropout]*2 学习率0.0001训练轮数200
import torch
import torch.nn as nn
from torch.nn.utils import weight_normclass Chomp1d(nn.Module):def __init__(self, chomp_size):super(Chomp1d, self).__init__()self.chomp_size chomp_sizedef forward(self, x):return x[:, :, :-self.chomp_size].contiguous()class TemporalBlock(nn.Module):def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, padding, dropout0.2):super(TemporalBlock, self).__init__()self.conv1 weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size,stridestride, paddingpadding, dilationdilation))self.chomp1 Chomp1d(padding)self.relu1 nn.ReLU()self.dropout1 nn.Dropout(dropout)self.conv2 weight_norm(nn.Conv1d(n_outputs, n_outputs, kernel_size,stridestride, paddingpadding, dilationdilation))self.chomp2 Chomp1d(padding)self.relu2 nn.ReLU()self.dropout2 nn.Dropout(dropout)self.net nn.Sequential(self.conv1, self.chomp1, self.relu1, self.dropout1,self.conv2, self.chomp2, self.relu2, self.dropout2)self.downsample nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs ! n_outputs else Noneself.relu nn.ReLU()self.init_weights()def init_weights(self):self.conv1.weight.data.normal_(0, 0.01)self.conv2.weight.data.normal_(0, 0.01)if self.downsample is not None:self.downsample.weight.data.normal_(0, 0.01)def forward(self, x):out self.net(x)res x if self.downsample is None else self.downsample(x)return self.relu(out res)class TemporalConvNet(nn.Module):def __init__(self, num_inputs, num_channels, kernel_size2, dropout0.2):super(TemporalConvNet, self).__init__()layers []num_levels len(num_channels)for i in range(num_levels):dilation_size 2 ** iin_channels num_inputs if i 0 else num_channels[i-1]out_channels num_channels[i]layers [TemporalBlock(in_channels, out_channels, kernel_size, stride1, dilationdilation_size,padding(kernel_size-1) * dilation_size, dropoutdropout)]self.network nn.Sequential(*layers)def forward(self, x):return self.network(x)class TCN(nn.Module):def __init__(self, input_size, output_size, num_channels, kernel_size, dropout):super(TCN, self).__init__()self.tcn TemporalConvNet(input_size, num_channels, kernel_sizekernel_size, dropoutdropout)self.linear nn.Linear(num_channels[-1], output_size)def forward(self, inputs):Inputs have to have dimension (N, C_in, L_in)y1 self.tcn(inputs) # input should have dimension (N, C, L)o self.linear(y1[:, :, -1])return o
3.2.3 模型训练流程及性能表现
model TCN(input_size, output_size, num_channels, kernel_size, dropout)
optimizer optim.Adam(model.parameters(), lr1e-4)
criterion nn.MSELoss()
criterion2 nn.L1Loss()loss_train_list []
loss_test_list []
for i in range(num_epochs):model.train()n 0loss_total 0for data, label in train_loader:optimizer.zero_grad()pred model(data)loss criterion(pred, label)loss.backward()optimizer.step()n 1loss_total loss.item()loss_total / nloss_train_list.append(loss_total)model.eval()loss_test_total 0n 0for data,label in test_loader:with torch.no_grad():pred model(data)loss criterion(pred, label)loss_test_total loss.item()n1loss_test_total / nloss_test_list.append(loss_test_total)print(epoch:{0}/{1} loss_train:{2} loss_test:{3}.format(i 1, num_epochs, loss_total,loss_test_total))model.eval()
prediction model(data_test)
prediction prediction.detach().numpy()
label_test label_test.detach().numpy()prediction scalar.inverse_transform(prediction)
label_test scalar.inverse_transform(label_test)f,ax plt.subplots(nrows3,ncols3,figsize(10, 10))
for i in range(3):for j in range(3):ax[i,j].plot(prediction[:,3 * i j],label predictions)ax[i,j].plot(label_test[:,3 * i j],label true)ax[i,j].set_title(col_names[3 * i j])ax[i,j].legend()
plt.tight_layout()
plt.show()plt.plot(loss_test_list,label loss_on_test)
plt.plot(loss_train_list,label loss_on_train)
plt.legend()
plt.show()prediction_train model(data_train)
prediction_train prediction_train.detach().numpy()
prediction_train scalar.inverse_transform(prediction_train)
label_train scalar.inverse_transform(label_train)mse_on_train criterion(torch.Tensor(prediction_train),torch.Tensor(label_train))
rmse_on_train torch.sqrt(mse_on_train)
mae_on_train criterion2(torch.Tensor(prediction_train),torch.Tensor(label_train))mse_on_test criterion(torch.Tensor(prediction),torch.Tensor(label_test))
rmse_on_test torch.sqrt(mse_on_test)
mae_on_test criterion2(torch.Tensor(prediction),torch.Tensor(label_test))print(mse_on_train:{0} mse_on_test:{1}.format(mse_on_train,mse_on_test))
print(rmse_on_train:{0} rmse_on_test:{1}.format(rmse_on_train,rmse_on_test))
print(mae_on_train:{0} mae_on_test:{1}.format(mae_on_train,mae_on_test))# data_split torch.Tensor(data_split)
# label_split torch.Tensor(label_split)# prediction_rest []
# windows torch.cat((data_split[-1,:,1:],label_split[-1].unsqueeze(1)),dim 1).unsqueeze(0)
# for i in range(6):
# pred model(windows)
# prediction_rest.append(pred.detach().numpy().squeeze())
# windows torch.cat((windows[-1,:,1:],torch.transpose(pred, 0, 1)),dim 1).unsqueeze(0)# # print(prediction_rest)
# prediction_rest np.array(prediction_rest)
# prediction_rest scalar.inverse_transform(prediction_rest)# prediction_total model(data_split)
# prediction_total prediction_total.detach().numpy()
# label_split label_split.detach().numpy()# prediction_total scalar.inverse_transform(prediction_total)
# label_split scalar.inverse_transform(label_split)
# f,ax plt.subplots(nrows3,ncols3,figsize(10, 10))# length prediction_total.shape[0]
# for i in range(3):
# for j in range(3):
# ax[i,j].plot(range(length),prediction_total[:,3 * i j],label predictions)
# ax[i,j].plot(range(length),label_split[:,3 * i j],label true)
# ax[i,j].plot(range(length,length6),prediction_rest[:,3 * i j],label rest)
# ax[i,j].set_title(col_names[3 * i j])
# ax[i,j].legend()
# plt.tight_layout()
# plt.show()4. LSTM网络
4.1 模型原理
LSTM是一种循环神经网络RNN的变体专门设计用来解决长期依赖问题。LSTM引入了门控机制包括输入门、遗忘门和输出门以有效地控制信息的流动。
LSTM中的记忆单元可以保留和读取信息使其能够更好地处理时序数据中的长期依赖关系。遗忘门可以选择性地遗忘先前的信息输入门可以添加新的信息输出门控制输出的信息。
4.2.1 数据处理
此部分与TCN相同采取前80%为训练数据后20%为测试集时间窗口大小为16
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScalerdata pd.read_excel(data.xlsx)empty 192 # 有缺失部分的长度
side 587 # 整个已知数据的长度
seq_len 10
batch_size 64# 提取全部列名
col_names data.columns.values.tolist()
col_names [col_names[i] for i in range(1, len(col_names))]
for col in col_names:data[col] pd.to_numeric(data[col], errorscoerce).astype(float)# print(data)
data_list np.array(data[col_names].values.tolist())
# print(data_list)
data_list data_list[:data_list.shape[0]-6,:]
# print(data_list)
# print(type(data_list[1][1]))
scaler StandardScaler()
data_list_scaled scaler.fit_transform(data_list)
data_scaled pd.DataFrame(data_list_scaled, columnscol_names)def get_data():return data_list
def get_data_scaled():data_list_scaled[np.isnan(data_list_scaled)] 0# print(data_list_scaled)return data_list_scaled[192:588], data_list_scaleddef plot(prediction, label_test):plt.figure()f,ax plt.subplots(nrows3,ncols3,figsize(10, 10))for i in range(3):for j in range(3):ax[i,j].plot(label_test[:,3 * i j],b-, label true)ax[i,j].plot(prediction[:,3 * i j],r-, label predictions)ax[i,j].set_title(col_names[3 * i j])ax[i,j].legend()plt.tight_layout()plt.show()def plot_loss(train_loss):plt.figure()plt.xlabel(epoch)plt.ylabel(loss)plt.title(Loss-Rate)temp_list []for i in range(len(train_loss)):temp_list.append(train_loss[i].to(cpu).detach().numpy())plt.plot([i for i in range(len(train_loss))], temp_list, b-, labelutrain_loss)plt.legend() plt.show()def create_sliding_window(data, seq_len, testFalse):## 创建滑动窗口生成输入序列和对应的目标值。参数- data: 输入的时序数据形状为 (num_samples, num_features)- seq_len: 滑动窗口的大小返回- X: 输入序列形状为 (num_samples - seq_len, seq_len, num_features)- y: 目标值形状为 (num_samples - seq_len, num_features)X, y [], []num_samples, num_features data.shapefor i in range(num_samples - seq_len):window data[i : i seq_len, :]target data[i seq_len, :]X.append(window)y.append(target)if test:X.append(X[len(X)-1])return np.array(X), np.array(y)def inverse_scale(data):return scaler.inverse_transform(data)4.2.2 模型定义
import torch
import torch.nn as nn
import torch.optim as optim
import time
# 定义多层LSTM模型
class myLSTM(nn.Module):def __init__(self, input_size, hidden_size, num_layers2, output_size9):super(myLSTM, self).__init__()# self.lstm nn.LSTM(input_sizeinput_size, hidden_sizehidden_size, num_layersnum_layers, batch_firstTrue)self.lstm_layers nn.ModuleList([nn.LSTM(input_sizeinput_size if i 0 else hidden_size,hidden_sizehidden_size,batch_firstTrue)for i in range(num_layers)])self.fc nn.Linear(hidden_size, output_size)def forward(self, x):for lstm_layer in self.lstm_layers:x, _ lstm_layer(x)if len(x.shape) 3:output self.fc(x[:, -1, :]) # 取最后一个时间步的输出return outputelse:return xdef train_epoch(model, X_train, y_train, epochs10, lr0.001, criterionnn.MSELoss(), optimizerNone):if optimizer None:optimizer optim.Adam(model.parameters(), lrlr)print(model)# 训练模型train_loss []t1 time.time()for epoch in range(epochs):model.train()optimizer.zero_grad()outputs model(X_train)loss criterion(outputs, y_train)loss.backward()optimizer.step()train_loss.append(loss)if (epoch 1) % 5 0:print(fEpoch {epoch 1}/{epochs}, Loss: {loss.item()})if (epoch 1) % 20 0:t2 time.time()print(当前耗时{:.2f}s.format(t2-t1))return train_loss
# X_train 的形状为 (samples, time_steps, features)
# y_train 的形状为 (samples, num_targets)
4.2.3 模型训练及性能表现
相关参数LSTM层数3隐藏层大小2048学习率0.0005训练轮数400轮
import torch
import numpy as np
import matplotlib.pyplot as plt
ign_data, _ get_data_scaled()
device torch.device(cuda if torch.cuda.is_available() else cpu)
# device cpu
# ign_data.shape(396, 9)# 超参数
input_size 9 # 每个时间步的特征数9
hidden_size 2048 # 隐藏层大小
output_size 9 # 输出特征数
epochs 400 # 轮数
lr 0.0005 # learing rate
num_layers 3
model myLSTM(input_sizeinput_size, hidden_sizehidden_size, num_layersnum_layers, output_sizeoutput_size).to(device)seq_len 16 # 暂定窗口为16
X_, y_ create_sliding_window(ign_data, seq_lenseq_len)split_rate 0.8
split_idx X_.shape[0]*split_rate
split_idx round(split_idx)
# X_train torch.tensor(X_, dtypetorch.float32)
# y_train torch.tensor(y_, dtypetorch.float32)
X_train torch.tensor(X_[:split_idx,:,:], dtypetorch.float32)
y_train torch.tensor(y_[:split_idx,:], dtypetorch.float32)
X_test torch.tensor(X_[split_idx:,:,:], dtypetorch.float32)
y_test torch.tensor(y_[split_idx:,:], dtypetorch.float32)
# X_test torch.tensor(ign_data[split_idx:,:,:], dtypetorch.float32)
# y_test torch.tensor(ign_data[split_idx:,:], dtypetorch.float32)
# 训练模型
train_loss train_epoch(model, X_train.to(device), y_train.to(device), epochsepochs, lrlr)
# 保存模型
torch.save(model.state_dict(),LSTM-hidden2048-3-copy)plot_loss(train_loss)
model.eval()
with torch.no_grad():predictions model(X_test.to(device))predictions predictions.to(cpu).numpy()
# print(predictions)plot(predictionpredictions, label_testy_test.numpy())5. 实验结果