From a6454961b7101f3c76d6507832cd539e8a3bc4f8 Mon Sep 17 00:00:00 2001 From: Nick Date: Sun, 16 Nov 2025 22:51:40 +0100 Subject: [PATCH 1/3] Implement numpy assignment --- numpy_questions.py | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/numpy_questions.py b/numpy_questions.py index 21fcec4b..537f2d98 100644 --- a/numpy_questions.py +++ b/numpy_questions.py @@ -37,10 +37,17 @@ def max_index(X): If the input is not a numpy array or if the shape is not 2D. """ - i = 0 - j = 0 + if not isinstance(X, np.ndarray): + raise ValueError("Input must be a numpy array.") - # TODO + if X.ndim != 2: + raise ValueError("Input must be a 2D array.") + + # Find the flat index of the maximum + flat_idx = np.argmax(X) + + # Convert the flat index back to 2D indices + i, j = np.unravel_index(flat_idx, X.shape) return i, j @@ -62,6 +69,13 @@ def wallis_product(n_terms): pi : float The approximation of order `n_terms` of pi using the Wallis product. """ - # XXX : The n_terms is an int that corresponds to the number of - # terms in the product. For example 10000. - return 0. + if n_terms == 0: + return 1.0 + + k = np.arange(1, n_terms + 1, dtype=float) + terms = (4 * k * k) / (4 * k * k - 1) + + # Product converges to pi / 2, so multiply by 2 + pi_approx = 2 * np.prod(terms) + + return float(pi_approx) From 7bca1a9c0cd642bb30e5e7a3afce34d6d36cbaa0 Mon Sep 17 00:00:00 2001 From: Nick Date: Sun, 16 Nov 2025 23:37:41 +0100 Subject: [PATCH 2/3] Complete numpy and sklearn assignments --- sklearn_questions.py | 94 +++++++++++++++++++++++++++++++++----------- 1 file changed, 70 insertions(+), 24 deletions(-) diff --git a/sklearn_questions.py b/sklearn_questions.py index f65038c6..fb7a3896 100644 --- a/sklearn_questions.py +++ b/sklearn_questions.py @@ -22,53 +22,99 @@ import numpy as np from sklearn.base import BaseEstimator from sklearn.base import ClassifierMixin -from sklearn.utils.validation import check_X_y -from sklearn.utils.validation import check_array -from sklearn.utils.validation import check_is_fitted +from sklearn.utils.validation import ( + check_X_y, + check_is_fitted, + validate_data, +) from sklearn.utils.multiclass import check_classification_targets -class OneNearestNeighbor(BaseEstimator, ClassifierMixin): - "OneNearestNeighbor classifier." +class OneNearestNeighbor(ClassifierMixin, BaseEstimator): + """One-nearest neighbor classifier. + + This classifier implements the 1-nearest neighbor rule using the + Euclidean distance to find, for each sample, the closest point in the + training set and predict its class label. + """ def __init__(self): # noqa: D107 + # No hyper-parameters for this simple estimator. pass def fit(self, X, y): - """Write docstring. - - And describe parameters + """Fit the OneNearestNeighbor classifier. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + y : array-like of shape (n_samples,) + Target labels. + + Returns + ------- + self : OneNearestNeighbor + Fitted estimator. """ - X, y = check_X_y(X, y) + # This sets n_features_in_ and handles validation in a + # sklearn-compatible way. + X, y = validate_data(self, X, y, accept_sparse=False) check_classification_targets(y) + self.classes_ = np.unique(y) - self.n_features_in_ = X.shape[1] + self.X_ = X + self.y_ = y - # XXX fix return self def predict(self, X): - """Write docstring. + """Predict class labels for samples in X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input samples for which to predict class labels. - And describe parameters + Returns + ------- + y_pred : ndarray of shape (n_samples,) + Predicted class labels. """ check_is_fitted(self) - X = check_array(X) - y_pred = np.full( - shape=len(X), fill_value=self.classes_[0], - dtype=self.classes_.dtype - ) - # XXX fix + # Use reset=False so sklearn checks consistency with n_features_in_ + X = validate_data(self, X, reset=False) + + # Compute pairwise Euclidean distances between X and training data + diff = X[:, np.newaxis, :] - self.X_[np.newaxis, :, :] + distances = np.linalg.norm(diff, axis=2) + + # For each sample in X, find index of nearest neighbor in training data + nearest_idx = np.argmin(distances, axis=1) + + # Predict the label of the nearest neighbor + y_pred = self.y_[nearest_idx] + return y_pred def score(self, X, y): - """Write docstring. - - And describe parameters + """Return the mean accuracy on the given test data and labels. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Test samples. + y : array-like of shape (n_samples,) + True labels for X. + + Returns + ------- + score : float + Mean accuracy of the predictions on X with respect to y. """ + # We still validate (X, y) as a proper supervised dataset. X, y = check_X_y(X, y) y_pred = self.predict(X) - # XXX fix - return y_pred.sum() + return float(np.mean(y_pred == y)) From 3be0211bf730dc9aa3df4ccbc759a54c208dba1d Mon Sep 17 00:00:00 2001 From: Nick Date: Sun, 16 Nov 2025 23:48:25 +0100 Subject: [PATCH 3/3] Adjust OneNearestNeighbor error message for sklearn check --- sklearn_questions.py | 83 ++++++++++++++------------------------------ 1 file changed, 26 insertions(+), 57 deletions(-) diff --git a/sklearn_questions.py b/sklearn_questions.py index fb7a3896..d5a2f4c3 100644 --- a/sklearn_questions.py +++ b/sklearn_questions.py @@ -24,8 +24,8 @@ from sklearn.base import ClassifierMixin from sklearn.utils.validation import ( check_X_y, + check_array, check_is_fitted, - validate_data, ) from sklearn.utils.multiclass import check_classification_targets @@ -39,82 +39,51 @@ class OneNearestNeighbor(ClassifierMixin, BaseEstimator): """ def __init__(self): # noqa: D107 - # No hyper-parameters for this simple estimator. pass def fit(self, X, y): - """Fit the OneNearestNeighbor classifier. - - Parameters - ---------- - X : array-like of shape (n_samples, n_features) - Training data. - y : array-like of shape (n_samples,) - Target labels. - - Returns - ------- - self : OneNearestNeighbor - Fitted estimator. - """ - # This sets n_features_in_ and handles validation in a - # sklearn-compatible way. - X, y = validate_data(self, X, y, accept_sparse=False) + """Fit the OneNearestNeighbor classifier.""" + X, y = check_X_y(X, y) check_classification_targets(y) self.classes_ = np.unique(y) self.X_ = X self.y_ = y + # Required for sklearn compatibility + self.n_features_in_ = X.shape[1] + return self def predict(self, X): - """Predict class labels for samples in X. - - Parameters - ---------- - X : array-like of shape (n_samples, n_features) - Input samples for which to predict class labels. - - Returns - ------- - y_pred : ndarray of shape (n_samples,) - Predicted class labels. - """ + """Predict class labels for samples in X.""" check_is_fitted(self) - # Use reset=False so sklearn checks consistency with n_features_in_ - X = validate_data(self, X, reset=False) + X = check_array(X) + + # Manually enforce n_features_in_ check (older sklearn versions do not) + if X.shape[1] != self.n_features_in_: + raise ValueError( + f"X has {X.shape[1]} features, but {type(self).__name__} " + f"is expecting {self.n_features_in_} features as input" + ) - # Compute pairwise Euclidean distances between X and training data + # Compute Euclidean distances diff = X[:, np.newaxis, :] - self.X_[np.newaxis, :, :] distances = np.linalg.norm(diff, axis=2) - - # For each sample in X, find index of nearest neighbor in training data nearest_idx = np.argmin(distances, axis=1) - # Predict the label of the nearest neighbor - y_pred = self.y_[nearest_idx] - - return y_pred + return self.y_[nearest_idx] def score(self, X, y): - """Return the mean accuracy on the given test data and labels. - - Parameters - ---------- - X : array-like of shape (n_samples, n_features) - Test samples. - y : array-like of shape (n_samples,) - True labels for X. - - Returns - ------- - score : float - Mean accuracy of the predictions on X with respect to y. - """ - # We still validate (X, y) as a proper supervised dataset. - X, y = check_X_y(X, y) - y_pred = self.predict(X) + """Return the mean accuracy on the given test data and labels.""" + X = check_array(X) + if X.shape[1] != self.n_features_in_: + raise ValueError( + f"X has {X.shape[1]} features, but {type(self).__name__} " + f"is expecting {self.n_features_in_} features as input" + ) + + y_pred = self.predict(X) return float(np.mean(y_pred == y))