Skip to content

Commit 35d652d

Browse files
committed
Merge branch 'main' of https://github.com/DiffEqML/bo-phases into main
2 parents 9b21cef + 04cce14 commit 35d652d

20 files changed

+247
-13
lines changed

DensCorr.py

+42-13
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
import matplotlib as mpl
33
import torch
44
import math
5+
import random
56
import numpy as np
67
import scipy.io
78
from scipy.interpolate import interp1d
@@ -23,21 +24,22 @@
2324
"font.family": "serif",
2425
"mathtext.fontset": "dejavuserif",
2526
# Thesis use 16 and 14, respectively
26-
"axes.labelsize": 16,
27-
"font.size": 16,
27+
"axes.labelsize": 18,
28+
"font.size": 18,
2829
# Make the legend/label fonts a little smaller
29-
"legend.fontsize": 14,
30-
"xtick.labelsize": 16,
31-
"ytick.labelsize": 16,
32-
"figure.figsize": [10,8],
30+
"legend.fontsize": 16,
31+
"xtick.labelsize": 18,
32+
"ytick.labelsize": 18,
33+
"figure.figsize": [12,8],
3334
}
3435

3536
mpl.rcParams.update(nice_fonts)
3637

38+
# device = torch.device("cpu")
3739
dtype = torch.double
3840

3941
def loadmatlab(filename):
40-
mat = scipy.io.loadmat(filename, appendmat=True)
42+
mat = scipy.io.loadmat('NoiseFree/'+filename, appendmat=True)
4143
mat_contents = mat[filename]
4244
N = len(mat_contents)
4345
P = np.zeros(N)
@@ -51,6 +53,7 @@ def loadmatlab(filename):
5153

5254
return P, corr_length, dens_fluc
5355

56+
5457
# Loading the data
5558
temperature = 30
5659
print(temperature)
@@ -64,12 +67,18 @@ def loadmatlab(filename):
6467
if temperature == 32.5:
6568
P, corr_length, dens_fluc = loadmatlab('B3')
6669

70+
6771
# Creating a cubic interpolation based on the gathered data
6872
def f(x):
6973
f_interp = interp1d(P, corr_length, kind='cubic')
7074

7175
return f_interp(x)
7276

77+
def get_init(num_samples):
78+
79+
return random.sample(range(len(P)), num_samples)
80+
81+
7382
# For plotting purposes only
7483
plot_x = np.linspace(P.min(), P.max(), 1001)
7584
plot_y = f(plot_x)
@@ -82,16 +91,17 @@ def f(x):
8291
bounds = torch.stack([torch.zeros(1, dtype=dtype), torch.ones(1, dtype=dtype)])
8392

8493
# Pick which samples to start with
85-
# train_X = [[norm_P[0]], [norm_P[len(P)//4]], [norm_P[len(P)//2]], [norm_P[-1]]]
86-
# train_Y = [[norm_corr_length[0]], [norm_corr_length[len(P)//4]], [norm_corr_length[len(P)//2]], [norm_corr_length[-1]]]
8794
train_X = [[norm_P[0]], [norm_P[-1]]]
88-
train_Y = [[corr_length[0]],[corr_length[-1]]]
95+
train_Y = [[corr_length[0]], [corr_length[-1]]]
96+
# train_X = [[norm_P[0]], [norm_P[len(P)//2]]]
97+
# train_Y = [[corr_length[0]], [corr_length[len(P)//2]]]
8998
train_X = torch.tensor(train_X, dtype=dtype)
9099
train_Y = torch.tensor(train_Y, dtype=dtype)
91100

92101
# Interactive plot to visualize BO moves
93102
plt.ion()
94103
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
104+
ax1.title.set_text('T = ' + str(temperature) + '$\\degree$C')
95105
ax1.plot(plot_x, plot_y, 'k-', label='f(P)')
96106
ax1.plot(actual_max, plot_y.max(), 'r1', markersize=20, label='Actual Max')
97107
ax1.plot(train_X.cpu().numpy()*(P.max() - P.min()) + P.min(), train_Y.cpu().numpy(), 'ko', mfc='None', markersize=8, label='Samples')
@@ -105,19 +115,29 @@ def f(x):
105115
plt.tight_layout()
106116
plt.pause(0.4)
107117

118+
108119
i = 1
109120
err = 1
110121
tol = 1e-2
111122
n_iter = 20
112123
x_span = torch.linspace(0, 1, 1001)[:, None, None] # batch, 1 (q), 1 (feature dimension)
124+
noise_variance = 1e-8
113125

114126
# Main BO Loop
115127
while i <= n_iter and abs(err) > tol:
116128
# Two different ways of normalizing; keep training (generated) data stored separately
117129
norm_Y = (train_Y - train_Y.mean())/train_Y.std() # Mean and Std
118130
# norm_Y = (train_Y - train_Y.min())/(train_Y.max() - train_Y.min()) # Min Max
119131

132+
# Unnormalized
133+
# norm_Y = train_Y
134+
120135
# Fitting a GP model
136+
# likelihood = GaussianLikelihood(noise_constraint=Interval(1e-8, 1e-7))
137+
# likelihood = GaussianLikelihood(noise_constraint=GreaterThan(0.7))
138+
# gp = SingleTaskGP(train_X, norm_Y, covar_module=RQKernel(), likelihood=likelihood)
139+
# gp = SingleTaskGP(train_X, norm_Y, likelihood=likelihood)
140+
# gp = FixedNoiseGP(train_X, norm_Y, covar_module=RQKernel(), train_Yvar=torch.full_like(norm_Y, 1e-8))
121141
gp = FixedNoiseGP(train_X, norm_Y, train_Yvar=torch.full_like(norm_Y, noise_variance))
122142
mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
123143
fit_gpytorch_model(mll)
@@ -126,22 +146,28 @@ def f(x):
126146
EI = qExpectedImprovement(gp, norm_Y.max(), maximize=True)
127147

128148
# Optimizing the acquisition function to get its max
129-
candidate, acq_value = optimize_acqf(EI, bounds=bounds, q=1, num_restarts=5, raw_samples=1012)
149+
candidate, _ = optimize_acqf(EI, bounds=bounds, q=1, num_restarts=5, raw_samples=1000)
130150

131151
acq_eval = EI(x_span)
132152

133153
# Calculate by how much the BO guess has moved by
134154
unnorm_candidate = candidate*(P.max() - P.min()) + P.min()
135-
prev_candidate = train_X[-1]*(P.max() - P.min()) + P.min()
155+
# prev_candidate = train_X[-1]*(P.max() - P.min()) + P.min()
136156

137157
# err = (unnorm_candidate - prev_candidate)/prev_candidate
138158
err = candidate - train_X[-1]
139159

140160
print(i, unnorm_candidate.cpu().numpy(), f(unnorm_candidate), abs(err).cpu().numpy())
141161

142-
norm_f_candidate = (f(unnorm_candidate) - corr_length.min())/(corr_length.max() - corr_length.min())
162+
# norm_f_candidate = (f(unnorm_candidate) - corr_length.min())/(corr_length.max() - corr_length.min())
163+
# norm_f_candidate = (f(unnorm_candidate) - train_Y.mean().cpu().numpy())/train_Y.std().cpu().numpy()
143164
# Append new torch tensors
144165
train_X = torch.cat((train_X, candidate))
166+
167+
# Normalized
168+
# train_Y = torch.cat((train_Y, torch.tensor(norm_f_candidate)))
169+
170+
# Unnormalized
145171
train_Y = torch.cat((train_Y, torch.tensor(f(unnorm_candidate), dtype=dtype)))
146172

147173
if i == 1:
@@ -160,10 +186,13 @@ def f(x):
160186
plt.draw()
161187
# ax1.pause(0.05)
162188

189+
163190
ax2.plot(plot_x, acq_eval.detach().numpy(), 'g', alpha=0.5, label=mylabel2)
191+
ax2.set(xlabel='$P~$[bar]', ylabel='Exp. Improvement')
164192
# ax2.plot(plot_x[np.where(acq_eval.detach().numpy() == acq_eval.detach().numpy().max())[0]], 1, 'rx', markersize=8, label=mylabel3)
165193
ax2.legend(frameon=False)
166194
plt.draw()
195+
plt.tight_layout()
167196
# ax2.tight_layout()
168197
plt.pause(0.25)
169198

NoiseFree/B1.mat

1.14 KB
Binary file not shown.

NoiseFree/B2.mat

1.14 KB
Binary file not shown.

NoiseFree/B3.mat

1.14 KB
Binary file not shown.

NoiseFree/B4.mat

591 Bytes
Binary file not shown.

NoiseFree/co2_t30C.mat

597 Bytes
Binary file not shown.

NoiseFree/co2_t31C.mat

459 Bytes
Binary file not shown.

NoiseFree/co2_t32C.mat

596 Bytes
Binary file not shown.

NoiseFree/co2_t33C.mat

597 Bytes
Binary file not shown.

NoiseFree/co2_t34C.mat

596 Bytes
Binary file not shown.

NoisyData/co2_t30C.mat

929 Bytes
Binary file not shown.

NoisyData/co2_t30p5.mat

1.92 KB
Binary file not shown.

NoisyData/co2_t31C.mat

691 Bytes
Binary file not shown.

NoisyData/co2_t31p5.mat

1.91 KB
Binary file not shown.

NoisyData/co2_t32C.mat

929 Bytes
Binary file not shown.

NoisyData/co2_t32p5.mat

1.92 KB
Binary file not shown.

NoisyData/co2_t33C.mat

937 Bytes
Binary file not shown.

NoisyData/co2_t34C.mat

933 Bytes
Binary file not shown.

NoisyDensCorr.py

+205
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,205 @@
1+
import matplotlib.pyplot as plt
2+
import matplotlib as mpl
3+
import torch
4+
import math
5+
import random
6+
import numpy as np
7+
import scipy.io
8+
from scipy.interpolate import interp1d
9+
10+
from botorch.models import SingleTaskGP, FixedNoiseGP
11+
from botorch.fit import fit_gpytorch_model
12+
from gpytorch.likelihoods import GaussianLikelihood
13+
from gpytorch.constraints import Interval, GreaterThan
14+
from gpytorch.mlls import ExactMarginalLogLikelihood
15+
from gpytorch.kernels import CosineKernel, RBFKernel, RQKernel, MaternKernel
16+
17+
from botorch.acquisition import qExpectedImprovement
18+
from botorch.optim import optimize_acqf
19+
20+
21+
nice_fonts = {
22+
# Use LaTex to write all text
23+
"text.usetex": False,
24+
"font.family": "serif",
25+
"mathtext.fontset": "dejavuserif",
26+
# Thesis use 16 and 14, respectively
27+
"axes.labelsize": 18,
28+
"font.size": 18,
29+
# Make the legend/label fonts a little smaller
30+
"legend.fontsize": 16,
31+
"xtick.labelsize": 18,
32+
"ytick.labelsize": 18,
33+
"figure.figsize": [12,8],
34+
}
35+
36+
mpl.rcParams.update(nice_fonts)
37+
38+
# device = torch.device("cpu")
39+
dtype = torch.double
40+
41+
def loadmatlab(filename, noise_flag):
42+
mat = scipy.io.loadmat('NoisyData/'+filename, appendmat=True)
43+
mat_contents = mat[filename]
44+
N = len(mat_contents)
45+
P = np.zeros(N)
46+
corr_length = np.zeros(N)
47+
dens_fluc = np.zeros(N)
48+
49+
if noise_flag == True:
50+
std_corr_length = np.zeros(N)
51+
std_dens_fluc = np.zeros(N)
52+
53+
for i in range(N):
54+
P[-1-i] = mat_contents[i][0]
55+
corr_length[-1-i] = mat_contents[i][1]
56+
dens_fluc[-1-i] = mat_contents[i][2]
57+
58+
if noise_flag == True:
59+
std_corr_length[-1-i] = mat_contents[i][3]
60+
std_dens_fluc[-1-i] = mat_contents[i][4]
61+
62+
if noise_flag == True:
63+
return P, corr_length, dens_fluc, std_corr_length, std_dens_fluc
64+
65+
else:
66+
return P, corr_length, dens_fluc
67+
68+
69+
# Loading the data
70+
temperature = 30
71+
print(temperature)
72+
if type(temperature) == int:
73+
P, corr_length, dens_fluc, std_corr_length, std_dens_fluc = loadmatlab('co2_t'+str(temperature)+'C', True)
74+
else:
75+
if temperature == 30.5:
76+
P, corr_length, dens_fluc, std_corr_length, std_dens_fluc = loadmatlab('co2_t'+'30p5', True)
77+
if temperature == 31.5:
78+
P, corr_length, dens_fluc, std_corr_length, std_dens_fluc = loadmatlab('co2_t'+'31p5', True)
79+
if temperature == 32.5:
80+
P, corr_length, dens_fluc, std_corr_length, std_dens_fluc = loadmatlab('co2_t'+'32p5', True)
81+
82+
83+
# Creating a cubic interpolation based on the gathered data
84+
def f(x):
85+
f_interp = interp1d(P, corr_length, kind='cubic')
86+
87+
return f_interp(x)
88+
89+
def noise(x):
90+
noise_interp = interp1d(P, std_corr_length, kind='cubic')
91+
92+
return noise_interp(x)
93+
94+
# For plotting purposes only
95+
plot_x = np.linspace(P.min(), P.max(), 1001)
96+
plot_y = f(plot_x)
97+
noiseplot_y = noise(plot_x)
98+
actual_max = plot_x[np.where(plot_y == plot_y.max())[0]]
99+
100+
# Normalized pressure values
101+
norm_P = (P - P.min())/(P.max() - P.min())
102+
# norm_corr_length = (corr_length - corr_length.min())/(corr_length.max() - corr_length.min())
103+
104+
bounds = torch.stack([torch.zeros(1, dtype=dtype), torch.ones(1, dtype=dtype)])
105+
106+
# Pick which samples to start with
107+
train_X = [[norm_P[0]], [norm_P[-1]]]
108+
train_Y = [[corr_length[0]], [corr_length[-1]]]
109+
noise_Y = [[std_corr_length[0]], [std_corr_length[-1]]]
110+
111+
train_X = torch.tensor(train_X, dtype=dtype)
112+
train_Y = torch.tensor(train_Y, dtype=dtype)
113+
noise_Y = torch.tensor(noise_Y, dtype=dtype)
114+
print(noise_Y)
115+
116+
# Interactive plot to visualize BO moves
117+
plt.ion()
118+
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
119+
ax1.title.set_text('T = ' + str(temperature) + '$\\degree$C')
120+
ax1.plot(plot_x, plot_y, 'k-', label='f(P)')
121+
ax1.plot(actual_max, plot_y.max(), 'r1', markersize=20, label='Actual Max')
122+
ax1.plot(train_X.cpu().numpy()*(P.max() - P.min()) + P.min(), train_Y.cpu().numpy(), 'ko', mfc='None', markersize=8, label='Samples')
123+
ax1.set( ylabel='$L~[\\AA]$')
124+
ax1.set_xlim(70, 82)
125+
ax1.legend(frameon=False)
126+
# ax1.pause(0.1)
127+
128+
ax2.set(xlabel='$P~$[bar]', ylabel='Exp. Improvement')
129+
ax2.set_xlim(70, 82)
130+
plt.tight_layout()
131+
plt.pause(0.4)
132+
133+
i = 1
134+
err = 1
135+
tol = 1e-2
136+
n_iter = 20
137+
x_span = torch.linspace(0, 1, 1001)[:, None, None] # batch, 1 (q), 1 (feature dimension)
138+
noise_std = 1e-4
139+
140+
# Main BO Loop
141+
while i <= n_iter and abs(err) > tol:
142+
# Two different ways of normalizing; keep training (generated) data stored separately
143+
norm_Y = (train_Y - train_Y.mean())/train_Y.std() # Mean and Std
144+
# norm_Y = (train_Y - train_Y.min())/(train_Y.max() - train_Y.min()) # Min Max
145+
146+
# Fitting a GP model
147+
# noise_Y = torch.full_like(norm_Y, noise_std)
148+
gp = FixedNoiseGP(train_X, norm_Y, train_Yvar=noise_Y**2)
149+
mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
150+
fit_gpytorch_model(mll)
151+
152+
# Getting an acquisition function
153+
EI = qExpectedImprovement(gp, norm_Y.max(), maximize=True)
154+
155+
# Optimizing the acquisition function to get its max
156+
candidate, _ = optimize_acqf(EI, bounds=bounds, q=1, num_restarts=5, raw_samples=1000)
157+
158+
acq_eval = EI(x_span)
159+
posterior = gp.posterior(x_span)
160+
# print(posterior)
161+
162+
# Calculate by how much the BO guess has moved by
163+
unnorm_candidate = candidate*(P.max() - P.min()) + P.min()
164+
165+
err = candidate - train_X[-1]
166+
167+
print(i, unnorm_candidate.cpu().numpy(), f(unnorm_candidate), abs(err).cpu().numpy())
168+
169+
# Append new torch tensors
170+
train_X = torch.cat((train_X, candidate))
171+
172+
# Unnormalized
173+
train_Y = torch.cat((train_Y, torch.tensor(f(unnorm_candidate), dtype=dtype)))
174+
noise_Y = torch.cat((noise_Y, torch.tensor(noise(unnorm_candidate), dtype=dtype)))
175+
176+
if i == 1:
177+
mylabel = 'BO Step'
178+
mylabel2 = 'Acq. Func.'
179+
mylabel3 = 'Target'
180+
else:
181+
mylabel = None
182+
mylabel2 = 'Acq. Func.'
183+
mylabel3 = None
184+
i += 1
185+
186+
# Plot to see the BO moves
187+
ax1.plot(unnorm_candidate, f(unnorm_candidate), 'bs', markersize=8, label=mylabel)
188+
ax1.legend(frameon=False)
189+
plt.draw()
190+
191+
ax2.plot(plot_x, acq_eval.detach().numpy(), 'g', alpha=0.5, label=mylabel2)
192+
ax2.set(xlabel='$P~$[bar]', ylabel='Exp. Improvement')
193+
ax2.legend(frameon=False)
194+
plt.draw()
195+
plt.tight_layout()
196+
plt.pause(0.25)
197+
198+
if abs(err) > tol:
199+
plt.cla()
200+
201+
error = ((unnorm_candidate - actual_max)*100/actual_max).cpu().numpy()
202+
print("Error from actual max is:", round(error[0][0],2),"%")
203+
print("Actual max is:", actual_max, round(plot_y.max(),2))
204+
plt.ioff()
205+
plt.show()

p71_t31p5.mat

2.05 MB
Binary file not shown.

0 commit comments

Comments
 (0)