1
+ import matplotlib .pyplot as plt
2
+ import matplotlib as mpl
3
+ import torch
4
+ import math
5
+ import random
6
+ import numpy as np
7
+ import scipy .io
8
+ from scipy .interpolate import interp1d
9
+
10
+ from botorch .models import SingleTaskGP , FixedNoiseGP
11
+ from botorch .fit import fit_gpytorch_model
12
+ from gpytorch .likelihoods import GaussianLikelihood
13
+ from gpytorch .constraints import Interval , GreaterThan
14
+ from gpytorch .mlls import ExactMarginalLogLikelihood
15
+ from gpytorch .kernels import CosineKernel , RBFKernel , RQKernel , MaternKernel
16
+
17
+ from botorch .acquisition import qExpectedImprovement
18
+ from botorch .optim import optimize_acqf
19
+
20
+
21
+ nice_fonts = {
22
+ # Use LaTex to write all text
23
+ "text.usetex" : False ,
24
+ "font.family" : "serif" ,
25
+ "mathtext.fontset" : "dejavuserif" ,
26
+ # Thesis use 16 and 14, respectively
27
+ "axes.labelsize" : 18 ,
28
+ "font.size" : 18 ,
29
+ # Make the legend/label fonts a little smaller
30
+ "legend.fontsize" : 16 ,
31
+ "xtick.labelsize" : 18 ,
32
+ "ytick.labelsize" : 18 ,
33
+ "figure.figsize" : [12 ,8 ],
34
+ }
35
+
36
+ mpl .rcParams .update (nice_fonts )
37
+
38
+ # device = torch.device("cpu")
39
+ dtype = torch .double
40
+
41
+ def loadmatlab (filename , noise_flag ):
42
+ mat = scipy .io .loadmat ('NoisyData/' + filename , appendmat = True )
43
+ mat_contents = mat [filename ]
44
+ N = len (mat_contents )
45
+ P = np .zeros (N )
46
+ corr_length = np .zeros (N )
47
+ dens_fluc = np .zeros (N )
48
+
49
+ if noise_flag == True :
50
+ std_corr_length = np .zeros (N )
51
+ std_dens_fluc = np .zeros (N )
52
+
53
+ for i in range (N ):
54
+ P [- 1 - i ] = mat_contents [i ][0 ]
55
+ corr_length [- 1 - i ] = mat_contents [i ][1 ]
56
+ dens_fluc [- 1 - i ] = mat_contents [i ][2 ]
57
+
58
+ if noise_flag == True :
59
+ std_corr_length [- 1 - i ] = mat_contents [i ][3 ]
60
+ std_dens_fluc [- 1 - i ] = mat_contents [i ][4 ]
61
+
62
+ if noise_flag == True :
63
+ return P , corr_length , dens_fluc , std_corr_length , std_dens_fluc
64
+
65
+ else :
66
+ return P , corr_length , dens_fluc
67
+
68
+
69
+ # Loading the data
70
+ temperature = 30
71
+ print (temperature )
72
+ if type (temperature ) == int :
73
+ P , corr_length , dens_fluc , std_corr_length , std_dens_fluc = loadmatlab ('co2_t' + str (temperature )+ 'C' , True )
74
+ else :
75
+ if temperature == 30.5 :
76
+ P , corr_length , dens_fluc , std_corr_length , std_dens_fluc = loadmatlab ('co2_t' + '30p5' , True )
77
+ if temperature == 31.5 :
78
+ P , corr_length , dens_fluc , std_corr_length , std_dens_fluc = loadmatlab ('co2_t' + '31p5' , True )
79
+ if temperature == 32.5 :
80
+ P , corr_length , dens_fluc , std_corr_length , std_dens_fluc = loadmatlab ('co2_t' + '32p5' , True )
81
+
82
+
83
+ # Creating a cubic interpolation based on the gathered data
84
+ def f (x ):
85
+ f_interp = interp1d (P , corr_length , kind = 'cubic' )
86
+
87
+ return f_interp (x )
88
+
89
+ def noise (x ):
90
+ noise_interp = interp1d (P , std_corr_length , kind = 'cubic' )
91
+
92
+ return noise_interp (x )
93
+
94
+ # For plotting purposes only
95
+ plot_x = np .linspace (P .min (), P .max (), 1001 )
96
+ plot_y = f (plot_x )
97
+ noiseplot_y = noise (plot_x )
98
+ actual_max = plot_x [np .where (plot_y == plot_y .max ())[0 ]]
99
+
100
+ # Normalized pressure values
101
+ norm_P = (P - P .min ())/ (P .max () - P .min ())
102
+ # norm_corr_length = (corr_length - corr_length.min())/(corr_length.max() - corr_length.min())
103
+
104
+ bounds = torch .stack ([torch .zeros (1 , dtype = dtype ), torch .ones (1 , dtype = dtype )])
105
+
106
+ # Pick which samples to start with
107
+ train_X = [[norm_P [0 ]], [norm_P [- 1 ]]]
108
+ train_Y = [[corr_length [0 ]], [corr_length [- 1 ]]]
109
+ noise_Y = [[std_corr_length [0 ]], [std_corr_length [- 1 ]]]
110
+
111
+ train_X = torch .tensor (train_X , dtype = dtype )
112
+ train_Y = torch .tensor (train_Y , dtype = dtype )
113
+ noise_Y = torch .tensor (noise_Y , dtype = dtype )
114
+ print (noise_Y )
115
+
116
+ # Interactive plot to visualize BO moves
117
+ plt .ion ()
118
+ fig , (ax1 , ax2 ) = plt .subplots (2 , 1 , sharex = True )
119
+ ax1 .title .set_text ('T = ' + str (temperature ) + '$\\ degree$C' )
120
+ ax1 .plot (plot_x , plot_y , 'k-' , label = 'f(P)' )
121
+ ax1 .plot (actual_max , plot_y .max (), 'r1' , markersize = 20 , label = 'Actual Max' )
122
+ ax1 .plot (train_X .cpu ().numpy ()* (P .max () - P .min ()) + P .min (), train_Y .cpu ().numpy (), 'ko' , mfc = 'None' , markersize = 8 , label = 'Samples' )
123
+ ax1 .set ( ylabel = '$L~[\\ AA]$' )
124
+ ax1 .set_xlim (70 , 82 )
125
+ ax1 .legend (frameon = False )
126
+ # ax1.pause(0.1)
127
+
128
+ ax2 .set (xlabel = '$P~$[bar]' , ylabel = 'Exp. Improvement' )
129
+ ax2 .set_xlim (70 , 82 )
130
+ plt .tight_layout ()
131
+ plt .pause (0.4 )
132
+
133
+ i = 1
134
+ err = 1
135
+ tol = 1e-2
136
+ n_iter = 20
137
+ x_span = torch .linspace (0 , 1 , 1001 )[:, None , None ] # batch, 1 (q), 1 (feature dimension)
138
+ noise_std = 1e-4
139
+
140
+ # Main BO Loop
141
+ while i <= n_iter and abs (err ) > tol :
142
+ # Two different ways of normalizing; keep training (generated) data stored separately
143
+ norm_Y = (train_Y - train_Y .mean ())/ train_Y .std () # Mean and Std
144
+ # norm_Y = (train_Y - train_Y.min())/(train_Y.max() - train_Y.min()) # Min Max
145
+
146
+ # Fitting a GP model
147
+ # noise_Y = torch.full_like(norm_Y, noise_std)
148
+ gp = FixedNoiseGP (train_X , norm_Y , train_Yvar = noise_Y ** 2 )
149
+ mll = ExactMarginalLogLikelihood (gp .likelihood , gp )
150
+ fit_gpytorch_model (mll )
151
+
152
+ # Getting an acquisition function
153
+ EI = qExpectedImprovement (gp , norm_Y .max (), maximize = True )
154
+
155
+ # Optimizing the acquisition function to get its max
156
+ candidate , _ = optimize_acqf (EI , bounds = bounds , q = 1 , num_restarts = 5 , raw_samples = 1000 )
157
+
158
+ acq_eval = EI (x_span )
159
+ posterior = gp .posterior (x_span )
160
+ # print(posterior)
161
+
162
+ # Calculate by how much the BO guess has moved by
163
+ unnorm_candidate = candidate * (P .max () - P .min ()) + P .min ()
164
+
165
+ err = candidate - train_X [- 1 ]
166
+
167
+ print (i , unnorm_candidate .cpu ().numpy (), f (unnorm_candidate ), abs (err ).cpu ().numpy ())
168
+
169
+ # Append new torch tensors
170
+ train_X = torch .cat ((train_X , candidate ))
171
+
172
+ # Unnormalized
173
+ train_Y = torch .cat ((train_Y , torch .tensor (f (unnorm_candidate ), dtype = dtype )))
174
+ noise_Y = torch .cat ((noise_Y , torch .tensor (noise (unnorm_candidate ), dtype = dtype )))
175
+
176
+ if i == 1 :
177
+ mylabel = 'BO Step'
178
+ mylabel2 = 'Acq. Func.'
179
+ mylabel3 = 'Target'
180
+ else :
181
+ mylabel = None
182
+ mylabel2 = 'Acq. Func.'
183
+ mylabel3 = None
184
+ i += 1
185
+
186
+ # Plot to see the BO moves
187
+ ax1 .plot (unnorm_candidate , f (unnorm_candidate ), 'bs' , markersize = 8 , label = mylabel )
188
+ ax1 .legend (frameon = False )
189
+ plt .draw ()
190
+
191
+ ax2 .plot (plot_x , acq_eval .detach ().numpy (), 'g' , alpha = 0.5 , label = mylabel2 )
192
+ ax2 .set (xlabel = '$P~$[bar]' , ylabel = 'Exp. Improvement' )
193
+ ax2 .legend (frameon = False )
194
+ plt .draw ()
195
+ plt .tight_layout ()
196
+ plt .pause (0.25 )
197
+
198
+ if abs (err ) > tol :
199
+ plt .cla ()
200
+
201
+ error = ((unnorm_candidate - actual_max )* 100 / actual_max ).cpu ().numpy ()
202
+ print ("Error from actual max is:" , round (error [0 ][0 ],2 ),"%" )
203
+ print ("Actual max is:" , actual_max , round (plot_y .max (),2 ))
204
+ plt .ioff ()
205
+ plt .show ()
0 commit comments