8000 add a code file of the multi-layer perceptron classifier from scrach by WeiYFan · Pull Request #12754 · TheAlgorithms/Python · GitHub
[go: up one dir, main page]

Skip to content

add a code file of the multi-layer perceptron classifier from scrach #12754

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 11 commits into from
Closed
Prev Previous commit
Next Next commit
Update multilayer_perceptron_classifier_from_scratch.py
  • Loading branch information
WeiYFan authored May 14, 2025
commit 644ba59de5817d7c117f2cfa035ef7cd200adfb7
22 changes: 10 additions & 12 deletions machine_learning/multilayer_perceptron_classifier_from_scratch.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,6 @@
import numpy as np
from numpy.random import default_rng

Check failure on line 2 in machine_learning/multilayer_perceptron_classifier_from_scratch.py

View workflow job for this annotation

GitHub Actions / ruff

Ruff (I001)

machine_learning/multilayer_perceptron_classifier_from_scratch.py:1:1: I001 Import block is un-sorted or un-formatted

rng = default_rng(42)


class Dataloader:
"""
DataLoader class for handling dataset, including data shuffling,
Expand Down Expand Up @@ -139,12 +136,12 @@
"""

def __init__(
self,
dataloader: Dataloader,
epoch: int,
learning_rate: float,
gamma: float = 1.0,
hidden_dim: int = 2,
self,
dataloader: Dataloader,
epoch: int,
learning_rate: float,
gamma: float = 1.0,
hidden_dim: int = 2,
) -> None:
self.learning_rate = learning_rate
self.gamma = gamma # learning_rate decay hyperparameter gamma
Expand Down A2E2 Expand Up @@ -195,7 +192,8 @@
"""

in_dim, out_dim = self.dataloader.get_inout_dim()
w1 = rng.standard_normal((in_dim + 1, self.hidden_dim)) * np.sqrt(2.0 / in_dim)
w1 = (rng.standard_normal((in_dim + 1, self.hidden_dim)) *
np.sqrt(2.0 / in_dim))
w2 = rng.standard_normal((self.hidden_dim, out_dim)) * np.sqrt(
2.0 / self.hidden_dim
)
Expand Down Expand Up @@ -404,7 +402,7 @@
>>> label = np.array([[1, 0], [0, 1], [1, 0]])
>>> y_hat = np.array([[0.9, 0.1], [0.2, 0.8], [0.6, 0.4]])
>>> mlp.accuracy(label, y_hat)
1.0
np.float64(1.0)
"""
return (y_hat.argmax(axis=1) == label.argmax(axis=1)).mean()

Expand All @@ -425,7 +423,7 @@
>>> output = np.array([[0.9, 0.1], [0.2, 0.8]])
>>> label = np.array([[1.0, 0.0], [0.0, 1.0]])
>>> round(mlp.loss(output, label), 3)
0.025
np.float64(0.025)
"""
return np.sum((output - label) ** 2) / (2 * label.shape[0])

Expand Down
Loading
0