5
5
def findBestQuery (gp ):
6
6
def negative_info_gain (x ):
7
7
return - 1 * gp .objectiveEntropy (x )
8
- x0 = np .array (gp .initialPoint * 2 ) + np .random .rand (gp .dim * 2 )
8
+ x0 = np .array (list ( gp .initialPoint ) * 2 ) + np .random .rand (gp .dim * 2 )
9
9
# Let's now find the optimal query within the bounds (-2,2) for each dimension
10
10
opt_res = opt .fmin_l_bfgs_b (negative_info_gain , x0 = x0 , bounds = [(- 2 ,2 )]* gp .dim * 2 , approx_grad = True , iprint = - 1 )
11
11
return opt_res [0 ], - opt_res [1 ]
@@ -21,7 +21,7 @@ def negative_info_gain(x):
21
21
print ('posterior mean for the feature set [3,1] = ' + str (gp .mean1pt ([3 ,1 ])))
22
22
print ('posterior covariance between the features [4,0] and [-1,1] = ' + str (gp .postcov ([4 ,0 ],[- 1 ,1 ])))
23
23
print ('posterior variance of the feature set [-2,1] = ' + str (gp .cov1pt ([- 2 ,1 ])))
24
- print ('expected information gain from the query [0,0] vs [2,2] = ' + str (gp .objectiveEntropy ([[ 0 ,0 ],[ 2 ,2 ] ])))
24
+ print ('expected information gain from the query [0,0] vs [2,2] = ' + str (gp .objectiveEntropy ([0 ,0 , 2 ,2 ])))
25
25
26
26
optimal_query , info_gain = findBestQuery (gp )
27
27
print ('optimal next query is ' + str (optimal_query ))
0 commit comments