提问人:H_ A 提问时间:11/17/2023 更新时间:11/17/2023 访问量:19
AttributeError:“PhysicsInformedNN1”对象没有属性“train”
AttributeError: 'PhysicsInformedNN1' object has no attribute 'train'
问:
我尝试在 pytorch 中实现 PINN,但我收到了这条消息
代码:
模型 = PhysicsInformedNN1(x0, u0, x1, layers, dt,lb, ub,q )
型号.train(10000)
U1_pred = model.predict(x_star)
error = np.linalg.norm(U1_pred[:,-1] - Exact[idx_t1,:], 2)/np.linalg.norm(Exact[idx_t1,:], 2) print('错误: %e' % (错误)) 我是如何修复凸轮的?
物理引导神经网络
类 PhysicsInformedNN1(): def init(self, x0, u0, x1, layers, dt,lb, ub, q):
# boundary conditions
self.lb = torch.tensor(lb).float().to(device)
self.ub = torch.tensor(ub).float().to(device)
#data
self.x0 = torch.tensor(x0).float().to(device)
self.x1 = torch.tensor(x1).float().to(device)
self.u0 = torch.tensor(u0).float().to(device)
self.layers = layers
self.q = q
self.dt = dt
# Create a dummy tensor for forward gradients with shape (N, self.q)
self.dummy_x0_torch = torch.tensor(self.q).float().to(device)
self.dummy_x0_torch= self.dummy_x0_torch.detach().clone()
##
# Create another dummy tensor for forward gradients with shape (N, self.q+1)
self.dummy_x1_torch = torch.tensor(self.q+1).float().to(device)
self.dummy_x1_torch= self.dummy_x1_torch.detach().clone() ###
# deep neural networks
self.dnn = DNN(layers).to(device)
# Load the data from the text file as a NumPy array
tmp = np.float32(np.loadtxt('/content/Butcher_IRK100 2.txt', ndmin=2))
# Convert the NumPy array to a PyTorch tensor
#Reshape the first part of the tensor to get the IRK weights
self.IRK_weights = torch.tensor(np.reshape(tmp[0:q**2+q],(q+1, q)))
# Get the IRK times from the remaining part of the tensor
self.IRK_times =torch.tensor(tmp[q**2+q:])
# optimizers: using the same settings
self.optimizer = torch.optim.LBFGS(
self.dnn.parameters(),
lr=1.0,
max_iter=50000,
max_eval=50000,
history_size=50,
tolerance_grad=1e-5,
tolerance_change=1.0 * np.finfo(float).eps,
line_search_fn="strong_wolfe" # can be "strong_wolfe"
)
self.iter = 0
def fwd_gradients_0(self, U, x):
# Compute the gradients of U with respect to x
g = torch.autograd.grad(U, x, grad_outputs=self.dummy_x0_torch, create_graph=True)[0]
# Compute and return the gradients of g with respect to x (which are the second derivatives of U with respect to x)
return torch.autograd.grad(g, x, grad_outputs=torch.ones_like(g))[0]
def fwd_gradients_1(self, U, x):
# Compute the gradients of U with respect to x
g = torch.autograd.grad(U, x, grad_outputs=self.dummy_x1_torch, create_graph=True)[0]
# Compute and return the gradients of g with respect to x (which are the second derivatives of U with respect to x)
return torch.autograd.grad(g, x, grad_outputs=torch.ones_like(g))[0]
def net_U0(self, x):
nu = 0.01 / np.pi
# Apply the neural network to get U1
U1 = self.neural_net(x)
# Get U from U1 and compute the gradients
U = U1[:, :-1]
U_x = self.fwd_gradients_0(U, x)
U_xx = self.fwd_gradients_0(U_x, x)
# Compute the function F using U and its gradients
F = -U * U_x + nu * U_xx
# Compute U0 using matrix multiplication with IRK weights
# The IRK weights should be a tensor of shape (q+1, q) and self.dt a scalar
U0 = U1 - self.dt * torch.matmul(F, self.IRK_weights.T)
return U0
def net_U1(self, x):
U1 = self.neural_net(x)
return U1 # Output shape: N x (q+1)
def loss_func(self):
self.dummy_x0 = torch.ones((self.x0.shape[0], self.q), requires_grad=True)
self.dummy_x0_torch= self.dummy_x0_torch.detach().clone()
self.dummy_x1 = torch.ones((self.x1.shape[0], self.q+1), requires_grad=True)
self.dummy_x1_torch= self.dummy_x1_torch.detach().clone()
self.optimizer.zero_grad()
self.U0_pred = self.net_U0(self.x0) # N x (q+1)
self.U1_pred = self.net_U1(self.x1) # N1 x (q+1)
loss = torch.sum((self.u0 - self.U0_pred) ** 2) + torch.sum(self.U1_pred ** 2)
loss.backward()
self.iter += 1
if self.iter % 100 == 0:
print('Iter %d, Loss: %.5e' % (self.iter, loss.item()))
return loss
def train(self):
self.dnn.train()
# Backward and optimize
self.optimizer.step(self.loss_func)
# Updating the model parameters
def predict(self, x_star):
# Assuming x_star is a PyTorch tensor that has been properly preprocessed if necessary.
# Also assuming that self.U1_pred is a method that represents the forward pass of your PyTorch model.
#x_star= torch.tensor(x, requires_grad=True).float().to(device)
# Set the model to evaluation mode. This is important for layers like dropout or batchnorm.
self.dnn.eval()
# No need to track gradients here since we're only predicting, not training.
with torch.no_grad(): ##
U1_star = self.U1_pred(x_star)
U1_star= U1_star.detach().cpu().numpy()
return U1_star
答: 暂无答案
评论