@@ -158,7 +158,7 @@ def particle_topk(xvals, yvals, k, lr, func=interp, n_particles=1000, inner_iter
158
158
159
159
v = 0
160
160
while True :
161
- if v >= len (particles ) or v >= len (vals ) or v >= len (indices ):
161
+ if v >= len (particles ) or v >= len (vals ) or v >= len (indices ):
162
162
print ("Breaking early, not enough significant top-k elements..." )
163
163
return torch .stack (values ), torch .stack (values_x )
164
164
@@ -192,26 +192,34 @@ def particle_topk(xvals, yvals, k, lr, func=interp, n_particles=1000, inner_iter
192
192
# Define the domain resolution and bounds, number of BO iterations,
193
193
# number of posterior samples, number of peaks to detect, type of detection
194
194
# algorithm, and convergence criteria
195
- N = 100
195
+ N = 1000
196
196
low_bound = - 1
197
197
upp_bound = 1
198
198
199
- n_iter = 10
199
+ n_iter = 20
200
200
n_samples = 12
201
201
k = 2
202
+ normalize = False
202
203
203
204
# Currently accepted options are: "particle" or "offset"
204
- topk = "particle"
205
+ # topk = "particle"
205
206
topk = "offset"
206
207
207
208
# Initialize the system and find the top-k maxima
208
209
plot_x = torch .linspace (low_bound , upp_bound , N )
209
210
plot_y = f (plot_x )
210
211
212
+ # Normalized
213
+ if normalize == True :
214
+ plot_x = (plot_x - low_bound )/ (upp_bound - low_bound )
215
+
211
216
if topk == "offset" :
212
217
# Choose your desired offset level, i.e., separation between the peaks
213
218
topk_algo = naive_topk_eps
214
- buffer = 0.5
219
+ if normalize == True :
220
+ buffer = 0.1
221
+ else :
222
+ buffer = 0.1 * (upp_bound - low_bound )
215
223
print ("" )
216
224
print ("--Executing a naive top-k with an offset of" , buffer , "to detect the peaks--" )
217
225
topk_xvals , topk_yvals = naive_topk_eps (plot_x , plot_y , k , buffer )
@@ -251,9 +259,12 @@ def particle_topk(xvals, yvals, k, lr, func=interp, n_particles=1000, inner_iter
251
259
252
260
# Main BO iteration loop
253
261
i = 1
254
- tol = 1e-2
262
+ tol = 1e-4
255
263
err = 1
256
- while i <= n_iter and err > tol :
264
+ while (i <= n_iter and err > tol ):
265
+ if normalize == True :
266
+ train_Y = (train_Y - train_Y .mean ())/ train_Y .std ()
267
+
257
268
# Fitting a GP model
258
269
gp = SingleTaskGP (train_X , train_Y )
259
270
mll = ExactMarginalLogLikelihood (gp .likelihood , gp )
@@ -277,10 +288,12 @@ def particle_topk(xvals, yvals, k, lr, func=interp, n_particles=1000, inner_iter
277
288
278
289
for j in range (n_samples ):
279
290
sample_xvals , sample_yvals = topk_algo (plot_x , samples [j ].squeeze (1 ), k , buffer )
280
- # print(sample_xvals.detach(), sample_yvals)
281
291
new_X = torch .cat ((train_X , sample_xvals .detach ().unsqueeze (1 )))
282
292
new_Y = torch .cat ((train_Y , sample_yvals .detach ().unsqueeze (1 )))
283
293
294
+ if normalize == True :
295
+ new_Y = (new_Y - new_Y .mean ())/ new_Y .std ()
296
+
284
297
new_gp = SingleTaskGP (new_X , new_Y )
285
298
new_mll = ExactMarginalLogLikelihood (new_gp .likelihood , new_gp )
286
299
fit_gpytorch_model (new_mll )
@@ -300,7 +313,10 @@ def particle_topk(xvals, yvals, k, lr, func=interp, n_particles=1000, inner_iter
300
313
break
301
314
302
315
x_next = plot_x [next_indx ]
303
- y_next = f (x_next )
316
+ if normalize == True :
317
+ y_next = f (x_next * (upp_bound - low_bound ) + low_bound )
318
+ else :
319
+ y_next = f (x_next )
304
320
305
321
train_X = torch .cat ((train_X , torch .tensor ([x_next ]).unsqueeze (1 )))
306
322
train_Y = torch .cat ((train_Y , torch .tensor ([y_next ]).unsqueeze (1 )))
@@ -344,6 +360,9 @@ def particle_topk(xvals, yvals, k, lr, func=interp, n_particles=1000, inner_iter
344
360
345
361
i += 1
346
362
347
- print ("Converged in" , i , "iterations" )
363
+ if i - 1 != 0 :
364
+ print ("Converged in" , i - 1 , "iterations with error:" )
365
+ ov_err = np .divide ((avg_xvals .detach ().numpy () - topk_xvals .detach ().numpy ()), topk_xvals .detach ().numpy ())
366
+ print (np .round (abs (ov_err )* 100 , 1 ))
348
367
plt .ioff ()
349
368
plt .show ()
0 commit comments