|
43 | 43 | 'scale', |
44 | 44 | 'robust_scale', |
45 | 45 | 'maxabs_scale', |
| 46 | + 'minmax_scale', |
46 | 47 | ] |
47 | 48 |
|
48 | 49 |
|
@@ -194,20 +195,20 @@ def scale(X, axis=0, with_mean=True, with_std=True, copy=True): |
194 | 195 |
|
195 | 196 |
|
196 | 197 | class MinMaxScaler(BaseEstimator, TransformerMixin): |
197 | | - """Standardizes features by scaling each feature to a given range. |
| 198 | + """Transforms features by scaling each feature to a given range. |
198 | 199 |
|
199 | 200 | This estimator scales and translates each feature individually such |
200 | 201 | that it is in the given range on the training set, i.e. between |
201 | 202 | zero and one. |
202 | 203 |
|
203 | | - The standardization is given by:: |
| 204 | + The transformation is given by:: |
204 | 205 |
|
205 | 206 | X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) |
206 | 207 | X_scaled = X_std * (max - min) + min |
207 | 208 |
|
208 | 209 | where min, max = feature_range. |
209 | 210 |
|
210 | | - This standardization is often used as an alternative to zero mean, |
| 211 | + This transformation is often used as an alternative to zero mean, |
211 | 212 | unit variance scaling. |
212 | 213 |
|
213 | 214 | Read more in the :ref:`User Guide <preprocessing_scaler>`. |
@@ -289,6 +290,45 @@ def inverse_transform(self, X): |
289 | 290 | return X |
290 | 291 |
|
291 | 292 |
|
| 293 | +def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True): |
| 294 | + """Transforms features by scaling each feature to a given range. |
| 295 | +
|
| 296 | + This estimator scales and translates each feature individually such |
| 297 | + that it is in the given range on the training set, i.e. between |
| 298 | + zero and one. |
| 299 | +
|
| 300 | + The transformation is given by:: |
| 301 | +
|
| 302 | + X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) |
| 303 | + X_scaled = X_std * (max - min) + min |
| 304 | +
|
| 305 | + where min, max = feature_range. |
| 306 | +
|
| 307 | + This transformation is often used as an alternative to zero mean, |
| 308 | + unit variance scaling. |
| 309 | +
|
| 310 | + Read more in the :ref:`User Guide <preprocessing_scaler>`. |
| 311 | +
|
| 312 | + Parameters |
| 313 | + ---------- |
| 314 | + feature_range: tuple (min, max), default=(0, 1) |
| 315 | + Desired range of transformed data. |
| 316 | +
|
| 317 | + axis : int (0 by default) |
| 318 | + axis used to scale along. If 0, independently scale each feature, |
| 319 | + otherwise (if 1) scale each sample. |
| 320 | +
|
| 321 | + copy : boolean, optional, default is True |
| 322 | + Set to False to perform inplace scaling and avoid a copy (if the input |
| 323 | + is already a numpy array). |
| 324 | + """ |
| 325 | + s = MinMaxScaler(feature_range=feature_range, copy=copy) |
| 326 | + if axis == 0: |
| 327 | + return s.fit_transform(X) |
| 328 | + else: |
| 329 | + return s.fit_transform(X.T).T |
| 330 | + |
| 331 | + |
292 | 332 | class StandardScaler(BaseEstimator, TransformerMixin): |
293 | 333 | """Standardize features by removing the mean and scaling to unit variance |
294 | 334 |
|
@@ -337,7 +377,7 @@ class StandardScaler(BaseEstimator, TransformerMixin): |
337 | 377 | The mean value for each feature in the training set. |
338 | 378 |
|
339 | 379 | std_ : array of floats with shape [n_features] |
340 | | - The standard deviation for each feature in the training set. |
| 380 | + The standard deviation for each feature in the training set. |
341 | 381 | Set to one if the standard deviation is zero for a given feature. |
342 | 382 |
|
343 | 383 | See also |
|
0 commit comments