提问人:chris kim 提问时间:4/11/2023 最后编辑:tripleeechris kim 更新时间:4/11/2023 访问量:53
Numpy中的深度学习:为什么这两种实现会产生不同的结果?
Deep learning in Numpy: Why do these two implementations produce different results?
问:
据我所知,这两个代码应该具有相同的输出,但事实并非如此。 有人可以帮我吗?
代码 1.
import numpy as np
def sigmoid(x):
return 1 / (1 + np.exp(-x))
class Percep:
def __init__(self, input_size, hidden_size, output_size):
self.W1 = np.random.randn(input_size, hidden_size) / np.sqrt(input_size)
self.b1 = np.zeros(hidden_size)
self.W2 = np.random.randn(hidden_size, output_size) / np.sqrt(hidden_size)
self.b2 = np.zeros(output_size)
def forward(self, x):
self.h = sigmoid(np.dot(x, self.W1) + self.b1)
y = np.dot(self.h, self.W2) + self.b2
return y
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
Y = np.array([[0], [1], [1], [0]])
mlp = Percep(2, 2, 1)
lr = 0.1
epochs = 10000
for epoch in range(epochs):
y_pred = mlp.forward(X)
error = Y - y_pred
delta2 = error * (y_pred * (1 - y_pred))
delta1 = np.dot(delta2, mlp.W2.T) * (mlp.h * (1 - mlp.h))
mlp.W2 += lr * np.dot(mlp.h.T, delta2)
mlp.b2 += lr * np.sum(delta2, axis=0)
mlp.W1 += lr * np.dot(X.T, delta1)
mlp.b1 += lr * np.sum(delta1, axis=0)
print('Output:')
print(mlp.forward(X))
代码2。使用我们从上面得到的权重和偏差
import numpy as np
def identify(x):
return x
def sigmoid(x):
return 1 / (1 + np.exp(-x))
x= np.array([[0,0], [0,1], [1,0],[1,1]])
v1= np.dot(x, mlp.W1)+ mlp.b1
h1= sigmoid(v1)
v2 = np.dot(h1, mlp.W2)+ mlp.b2
h2 = sigmoid(v2)
y= identify(h2)
print('Output:')
print(y)
答: 暂无答案
评论
Percep.forward