Neural Network
hard· Machine Learningruns: 0Implement a feed-forward neural network with one hidden layer (sigmoid activations) trained via backpropagation. The forward pass propagates input through a hidden layer to the output. The backward pass computes gradients of the loss with respect to each weight matrix and updates them with gradient descent.
sign in to paste and practice your own solution
wpm 0acc 100%time 0:000 / 1357
import numpy as np
class NeuralNetwork:
def __init__(
self,
input_size: int,
hidden_size: int,
output_size: int,
lr: float = 0.01,
):
self.lr = lr
rng = np.random.default_rng(0)
self.W1 = rng.standard_normal((input_size, hidden_size)) * 0.01
self.b1 = np.zeros(hidden_size)
self.W2 = rng.standard_normal((hidden_size, output_size)) * 0.01
self.b2 = np.zeros(output_size)
def _sigmoid(self, z):
return 1 / (1 + np.exp(-z))
def _forward(self, X):
self.z1 = X @ self.W1 + self.b1
self.a1 = self._sigmoid(self.z1)
self.z2 = self.a1 @ self.W2 + self.b2
self.a2 = self._sigmoid(self.z2)
return self.a2
def _backward(self, X, y):
n = len(X)
dz2 = self.a2 - y
dW2 = self.a1.T @ dz2 / n
db2 = dz2.mean(axis=0)
da1 = dz2 @ self.W2.T
dz1 = da1 * self.a1 * (1 - self.a1)
dW1 = X.T @ dz1 / n
db1 = dz1.mean(axis=0)
self.W2 -= self.lr * dW2
self.b2 -= self.lr * db2
self.W1 -= self.lr * dW1
self.b1 -= self.lr * db1
def fit(self, X, y, epochs: int = 1000):
for _ in range(epochs):
self._forward(X)
self._backward(X, y)
def predict(self, X):
return self._forward(X)
click the box to focus · tab inserts 4 spaces · backspace to correct · esc to pause
desktop only
codedrill is a typing game and needs a real keyboard. open this on a laptop or desktop to practice.
you can still browse problems and sections from your phone.