|
27 | 27 | from sklearn.linear_model.ridge import _solve_cholesky |
28 | 28 | from sklearn.linear_model.ridge import _solve_cholesky_kernel |
29 | 29 |
|
| 30 | +from sklearn.grid_search import GridSearchCV |
| 31 | + |
30 | 32 | from sklearn.cross_validation import KFold |
31 | 33 |
|
32 | 34 |
|
@@ -527,6 +529,32 @@ def test_ridgecv_store_cv_values(): |
527 | 529 | assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas)) |
528 | 530 |
|
529 | 531 |
|
| 532 | +def test_ridgecv_sample_weight(): |
| 533 | + rng = np.random.RandomState(0) |
| 534 | + alphas = (0.1, 1.0, 10.0) |
| 535 | + |
| 536 | + # There are different algorithms for n_samples > n_features |
| 537 | + # and the opposite, so test them both. |
| 538 | + for n_samples, n_features in ((6, 5), (5, 10)): |
| 539 | + y = rng.randn(n_samples) |
| 540 | + X = rng.randn(n_samples, n_features) |
| 541 | + sample_weight = 1 + rng.rand(n_samples) |
| 542 | + |
| 543 | + cv = KFold(n_samples, 5) |
| 544 | + ridgecv = RidgeCV(alphas=alphas, cv=cv) |
| 545 | + ridgecv.fit(X, y, sample_weight=sample_weight) |
| 546 | + |
| 547 | + # Check using GridSearchCV directly |
| 548 | + parameters = {'alpha': alphas} |
| 549 | + fit_params = {'sample_weight': sample_weight} |
| 550 | + gs = GridSearchCV(Ridge(), parameters, fit_params=fit_params, |
| 551 | + cv=cv) |
| 552 | + gs.fit(X, y) |
| 553 | + |
| 554 | + assert_equal(ridgecv.alpha_, gs.best_estimator_.alpha) |
| 555 | + assert_array_almost_equal(ridgecv.coef_, gs.best_estimator_.coef_) |
| 556 | + |
| 557 | + |
530 | 558 | def test_raises_value_error_if_sample_weights_greater_than_1d(): |
531 | 559 | # Sample weights must be either scalar or 1D |
532 | 560 |
|
|
0 commit comments