Skip to content

Typed Optimization #531

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 26 commits into from
Dec 27, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
cf963b1
WIP
till-m Nov 9, 2022
81321f3
Add ML example
till-m Nov 9, 2022
4106850
Save for merge
till-m May 23, 2023
5a3f2de
Merge remote-tracking branch 'origin/master' into parameter-types
till-m May 23, 2023
ac7f253
Merge remote-tracking branch 'origin/master' into parameter-types
till-m May 25, 2023
0ff88fc
Merge branch 'master' into parameter-types
till-m Oct 1, 2024
5d34efa
Update
till-m Oct 6, 2024
2b64ff0
Parameter types more (#13)
phi-friday Oct 9, 2024
3920e0f
Use `.masks` not `._masks`
till-m Oct 9, 2024
241e5c7
User `super` to call kernel
till-m Oct 9, 2024
68909ad
Update logging for parameters
till-m Oct 12, 2024
1a03b05
Disable SDR when non-float parameters are present
till-m Oct 12, 2024
f17c96a
Add demo script for typed optimization
till-m Oct 12, 2024
3c4c298
Update parameters, testing
till-m Oct 15, 2024
264b79e
Remove sorting, gradient optimize only continuous params
till-m Oct 29, 2024
b97c11e
Go back to `wrap_kernel`
till-m Oct 29, 2024
9543fb8
Update code
till-m Oct 30, 2024
7c84390
Remove `tqdm` dependency, use EI acq
till-m Nov 1, 2024
f1e4493
Add more text to typed optimization notebook.
till-m Nov 1, 2024
b765b5d
Merge branch 'master' into parameter-types
till-m Nov 1, 2024
187fd08
Save files while moving device
till-m Nov 15, 2024
31223a9
Update with custom parameter type example
till-m Dec 10, 2024
4476271
Merge branch 'master' into parameter-types
till-m Dec 18, 2024
9b1fbc1
Mention that parameters are not sorted
till-m Dec 18, 2024
1a54e1b
Change array reg warning
till-m Dec 18, 2024
05fbbcd
Update Citations, parameter notebook
till-m Dec 25, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Update
  • Loading branch information
till-m committed Oct 6, 2024
commit 5d34efa5676778528a1bbb7489ef80f3f3e7b034
40 changes: 17 additions & 23 deletions bayes_opt/acquisition.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ def suggest(
self._fit_gp(gp=gp, target_space=target_space)

acq = self._get_acq(gp=gp, constraint=target_space.constraint)
return self._acq_min(acq, target_space._float_bounds, n_random=n_random, n_l_bfgs_b=n_l_bfgs_b)
return self._acq_min(acq, target_space, n_random=n_random, n_l_bfgs_b=n_l_bfgs_b)

def _get_acq(
self, gp: GaussianProcessRegressor, constraint: ConstraintModel | None = None
Expand Down Expand Up @@ -182,7 +182,7 @@ def acq(x: NDArray[Float]) -> NDArray[Float]:
def _acq_min(
self,
acq: Callable[[NDArray[Float]], NDArray[Float]],
bounds: NDArray[Float],
space: TargetSpace,
n_random: int = 10_000,
n_l_bfgs_b: int = 10,
) -> NDArray[Float]:
Expand All @@ -197,10 +197,8 @@ def _acq_min(
acq : Callable
Acquisition function to use. Should accept an array of parameters `x`.

bounds : np.ndarray
Bounds of the search space. For `N` parameters this has shape
`(N, 2)` with `[i, 0]` the lower bound of parameter `i` and
`[i, 1]` the upper bound.
space : TargetSpace
The target space over which to optimize.

n_random : int
Number of random samples to use.
Expand All @@ -217,15 +215,15 @@ def _acq_min(
if n_random == 0 and n_l_bfgs_b == 0:
error_msg = "Either n_random or n_l_bfgs_b needs to be greater than 0."
raise ValueError(error_msg)
x_min_r, min_acq_r = self._random_sample_minimize(acq, bounds, n_random=n_random)
x_min_l, min_acq_l = self._l_bfgs_b_minimize(acq, bounds, n_x_seeds=n_l_bfgs_b)
x_min_r, min_acq_r = self._random_sample_minimize(acq, space, n_random=n_random)
x_min_l, min_acq_l = self._l_bfgs_b_minimize(acq, space, n_x_seeds=n_l_bfgs_b)
# Either n_random or n_l_bfgs_b is not 0 => at least one of x_min_r and x_min_l is not None
if min_acq_r < min_acq_l:
return x_min_r
return x_min_l

def _random_sample_minimize(
self, acq: Callable[[NDArray[Float]], NDArray[Float]], bounds: NDArray[Float], n_random: int
self, acq: Callable[[NDArray[Float]], NDArray[Float]], space: TargetSpace, n_random: int
) -> tuple[NDArray[Float] | None, float]:
"""Random search to find the minimum of `acq` function.

Expand All @@ -234,10 +232,8 @@ def _random_sample_minimize(
acq : Callable
Acquisition function to use. Should accept an array of parameters `x`.

bounds : np.ndarray
Bounds of the search space. For `N` parameters this has shape
`(N, 2)` with `[i, 0]` the lower bound of parameter `i` and
`[i, 1]` the upper bound.
space : TargetSpace
The target space over which to optimize.

n_random : int
Number of random samples to use.
Expand All @@ -252,14 +248,14 @@ def _random_sample_minimize(
"""
if n_random == 0:
return None, np.inf
x_tries = self.random_state.uniform(bounds[:, 0], bounds[:, 1], size=(n_random, bounds.shape[0]))
x_tries = space.random_sample(n_random, random_state=self.random_state)
ys = acq(x_tries)
x_min = x_tries[ys.argmin()]
min_acq = ys.min()
return x_min, min_acq

def _l_bfgs_b_minimize(
self, acq: Callable[[NDArray[Float]], NDArray[Float]], bounds: NDArray[Float], n_x_seeds: int = 10
self, acq: Callable[[NDArray[Float]], NDArray[Float]], space: TargetSpace, n_x_seeds: int = 10
) -> tuple[NDArray[Float] | None, float]:
"""Random search to find the minimum of `acq` function.

Expand All @@ -268,10 +264,8 @@ def _l_bfgs_b_minimize(
acq : Callable
Acquisition function to use. Should accept an array of parameters `x`.

bounds : np.ndarray
Bounds of the search space. For `N` parameters this has shape
`(N, 2)` with `[i, 0]` the lower bound of parameter `i` and
`[i, 1]` the upper bound.
space : TargetSpace
The target space over which to optimize.

n_x_seeds : int
Number of starting points for the L-BFGS-B optimizer.
Expand All @@ -286,14 +280,14 @@ def _l_bfgs_b_minimize(
"""
if n_x_seeds == 0:
return None, np.inf
x_seeds = self.random_state.uniform(bounds[:, 0], bounds[:, 1], size=(n_x_seeds, bounds.shape[0]))
x_seeds = space.random_sample(n_x_seeds, random_state=self.random_state)

min_acq: float | None = None
x_try: NDArray[Float]
x_min: NDArray[Float]
for x_try in x_seeds:
# Find the minimum of minus the acquisition function
res: OptimizeResult = minimize(acq, x_try, bounds=bounds, method="L-BFGS-B")
res: OptimizeResult = minimize(acq, x_try, bounds=space.bounds, method="L-BFGS-B")

# See if success
if not res.success:
Expand All @@ -306,11 +300,11 @@ def _l_bfgs_b_minimize(

if min_acq is None:
min_acq = np.inf
x_min = np.array([np.nan] * bounds.shape[0])
x_min = np.array([np.nan] * space.bounds.shape[0])

# Clip output to make sure it lies within the bounds. Due to floating
# point technicalities this is not always the case.
return np.clip(x_min, bounds[:, 0], bounds[:, 1]), min_acq
return np.clip(x_min, space.bounds[:, 0], space.bounds[:, 1]), min_acq


class UpperConfidenceBound(AcquisitionFunction):
Expand Down
6 changes: 4 additions & 2 deletions bayes_opt/bayesian_optimization.py
Original file line number Diff line number Diff line change
Expand Up @@ -249,7 +249,7 @@ def probe(
def suggest(self) -> dict[str, float]:
"""Suggest a promising point to probe next."""
if len(self._space) == 0:
return self._space.array_to_params(self._space.random_sample())
return self._space.array_to_params(self._space.random_sample(random_state=self._random_state))

# Finding argmax of the acquisition function.
suggestion = self._acquisition_function.suggest(gp=self._gp, target_space=self._space, fit_gp=True)
Expand All @@ -268,7 +268,9 @@ def _prime_queue(self, init_points: int) -> None:
init_points = max(init_points, 1)

for _ in range(init_points):
self._queue.append(self._space.array_to_params(self._space.random_sample()))
self._queue.append(
self._space.array_to_params(self._space.random_sample(random_state=self._random_state))
)

def _prime_subscriptions(self) -> None:
if not any([len(subs) for subs in self._events.values()]):
Expand Down
13 changes: 8 additions & 5 deletions bayes_opt/domain_reduction.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,11 +63,14 @@ class SequentialDomainReductionTransformer(DomainTransformer):

def __init__(
self,
parameters: Iterable[str] | None = None,
gamma_osc: float = 0.7,
gamma_pan: float = 1.0,
eta: float = 0.9,
minimum_window: NDArray[Float] | Sequence[float] | float | Mapping[str, float] | None = 0.0,
) -> None:
# TODO: Ensure that this is only applied to continuous parameters
self.parameters = parameters
self.gamma_osc = gamma_osc
self.gamma_pan = gamma_pan
self.eta = eta
Expand All @@ -87,7 +90,7 @@ def initialize(self, target_space: TargetSpace) -> None:
TargetSpace this DomainTransformer operates on.
"""
# Set the original bounds
self.original_bounds = np.copy(target_space.float_bounds)
self.original_bounds = np.copy(target_space.bounds)
self.bounds = [self.original_bounds]

# Set the minimum window to an array of length bounds
Expand All @@ -97,12 +100,12 @@ def initialize(self, target_space: TargetSpace) -> None:
raise ValueError(error_msg)
self.minimum_window = self.minimum_window_value
else:
self.minimum_window = [self.minimum_window_value] * len(target_space.float_bounds)
self.minimum_window = [self.minimum_window_value] * len(target_space.bounds)

# Set initial values
self.previous_optimal = np.mean(target_space.float_bounds, axis=1)
self.current_optimal = np.mean(target_space.float_bounds, axis=1)
self.r = target_space.float_bounds[:, 1] - target_space.float_bounds[:, 0]
self.previous_optimal = np.mean(target_space.bounds, axis=1)
self.current_optimal = np.mean(target_space.bounds, axis=1)
self.r = target_space.bounds[:, 1] - target_space.bounds[:, 0]

self.previous_d = 2.0 * (self.current_optimal - self.previous_optimal) / self.r

Expand Down
Loading