@@ -48,14 +48,300 @@ Base.@kwdef mutable struct PointwiseGPU <: ExtendGPU
48
48
" Flag for stack prepopulation. Good if the total number
49
49
of nodes throughout the solve is expected to be large (default = true)"
50
50
prepopulate:: Bool = true
51
+ " Frequency of garbage collection (number of iterations)"
52
+ gc_freq:: Int = 300
51
53
" (In development) Number of points to use for multistarting the NLP solver"
52
54
multistart_points:: Int = 1
53
55
end
54
56
55
57
function PointwiseGPU (convex_func, var_count:: Int ; alpha:: Float64 = 0.01 , node_limit:: Int = 50000 ,
56
- prepopulate:: Bool = true , multistart_points:: Int = 1 )
58
+ prepopulate:: Bool = true , gc_freq :: Int = 300 , multistart_points:: Int = 1 )
57
59
return PointwiseGPU (convex_func, var_count, node_limit, alpha,
60
+ Vector {Float64} (undef, node_limit), Vector {Float64} (undef, node_limit), Vector {NodeBB} (undef, node_limit), 0 ,
61
+ Matrix {Float64} (undef, node_limit, var_count),
62
+ Matrix {Float64} (undef, node_limit, var_count), prepopulate, gc_freq, multistart_points)
63
+ end
64
+
65
+
66
+ """
67
+ $(TYPEDEF)
68
+
69
+ The SubgradGPU integrator is meant to be paired with the SourceCodeMcCormick
70
+ package. SubgradGPU differs from PointwiseGPU in that SubgradGPU requires
71
+ the `convex_func_and_subgrad` term to return both evaluations of the convex
72
+ relaxation and evaluations of the subgradient of the convex relaxation.
73
+
74
+ $(TYPEDFIELDS)
75
+ """
76
+ Base. @kwdef mutable struct SubgradGPU <: ExtendGPU
77
+ " A user-defined function taking argument `p` and returning a vector
78
+ of convex evaluations of the objective function [outdated description, [cv, lo, subgrad]]"
79
+ convex_func_and_subgrad
80
+ " Number of decision variables"
81
+ np:: Int
82
+ " The number of nodes to evaluate in parallel (default = 10000)"
83
+ node_limit:: Int64 = 50000
84
+ " A parameter that changes how far spread out points are. Should be
85
+ in the range (0.0, 1.0]"
86
+ α:: Float64 = 0.5
87
+ " Lower bound storage to hold calculated lower bounds for multiple nodes."
88
+ lower_bound_storage:: Vector{Float64} = Vector {Float64} ()
89
+ " Upper bound storage to hold calculated upper bounds for multiple nodes."
90
+ upper_bound_storage:: Vector{Float64} = Vector {Float64} ()
91
+ " Node storage to hold individual nodes outside of the main stack"
92
+ node_storage:: Vector{NodeBB} = Vector {NodeBB} ()
93
+ " An internal tracker of nodes in internal storage"
94
+ node_len:: Int = 0
95
+ " Variable lower bounds to evaluate"
96
+ all_lvbs:: Matrix{Float64} = Matrix {Float64} ()
97
+ " Variable upper bounds to evaluate"
98
+ all_uvbs:: Matrix{Float64} = Matrix {Float64} ()
99
+ " Internal tracker for the count in the main stack"
100
+ # node_count::Int = 0
101
+ " Flag for stack prepopulation. Good if the total number
102
+ of nodes throughout the solve is expected to be large (default = true)"
103
+ prepopulate:: Bool = true
104
+ " (In development) Number of points to use for multistarting the NLP solver"
105
+ multistart_points:: Int = 1
106
+ end
107
+
108
+ function SubgradGPU (convex_func_and_subgrad, var_count:: Int ; alpha:: Float64 = 0.01 , node_limit:: Int = 50000 ,
109
+ prepopulate:: Bool = true , multistart_points:: Int = 1 )
110
+ return SubgradGPU (convex_func_and_subgrad, var_count, node_limit, alpha,
58
111
Vector {Float64} (undef, node_limit), Vector {Float64} (undef, node_limit), Vector {NodeBB} (undef, node_limit), 0 ,
59
112
Matrix {Float64} (undef, node_limit, var_count),
60
113
Matrix {Float64} (undef, node_limit, var_count), prepopulate, multistart_points)
114
+ end
115
+
116
+
117
+ """
118
+ $(TYPEDEF)
119
+
120
+ The SimplexGPU integrator is meant to be paired with the SourceCodeMcCormick
121
+ package. SimplexGPU differs from SubgradGPU in that SimplexGPU can handle
122
+ inequality constraints, and that relaxations are made tighter by solving
123
+ linear programs within the lower bounding routine to make better use of
124
+ subgradient information. Like SubgradGPU, SimplexGPU requires the
125
+ `convex_func_and_subgrad` term to return both evaluations of the convex
126
+ relaxation and evaluations of the subgradient of the convex relaxation.
127
+
128
+ $(TYPEDFIELDS)
129
+ """
130
+ Base. @kwdef mutable struct SimplexGPU_OnlyObj <: ExtendGPU
131
+ " A user-defined function taking argument `p` and returning a vector
132
+ of convex evaluations of the objective function [outdated description, [cv, lo, subgrad]]"
133
+ convex_func_and_subgrad
134
+ " Number of decision variables"
135
+ np:: Int
136
+ " The number of nodes to evaluate in parallel (default = 1024)"
137
+ node_limit:: Int64 = 1024
138
+ " Lower bound storage to hold calculated lower bounds for multiple nodes."
139
+ lower_bound_storage:: Vector{Float64} = Vector {Float64} ()
140
+ " Upper bound storage to hold calculated upper bounds for multiple nodes."
141
+ upper_bound_storage:: Vector{Float64} = Vector {Float64} ()
142
+ " Node storage to hold individual nodes outside of the main stack"
143
+ node_storage:: Vector{NodeBB} = Vector {NodeBB} ()
144
+ " An internal tracker of nodes in internal storage"
145
+ node_len:: Int = 0
146
+ " Variable lower bounds to evaluate"
147
+ all_lvbs:: Matrix{Float64} = Matrix {Float64} ()
148
+ " Variable upper bounds to evaluate"
149
+ all_uvbs:: Matrix{Float64} = Matrix {Float64} ()
150
+ " Flag for stack prepopulation. Good if the total number
151
+ of nodes throughout the solve is expected to be large (default = true)"
152
+ prepopulate:: Bool = true
153
+ " Total number of cuts to do on each node"
154
+ max_cuts:: Int = 3
155
+ " Frequency of garbage collection (number of iterations)"
156
+ gc_freq:: Int = 15
157
+ " (In development) Number of points to use for multistarting the NLP solver"
158
+ multistart_points:: Int = 1
159
+ relax_time:: Float64 = 0.0
160
+ opt_time:: Float64 = 0.0
161
+ lower_counter:: Int = 0
162
+ node_counter:: Int = 0
163
+ end
164
+
165
+ function SimplexGPU_OnlyObj (convex_func_and_subgrad, var_count:: Int ; node_limit:: Int = 1024 ,
166
+ prepopulate:: Bool = true , max_cuts:: Int = 3 , gc_freq:: Int = 15 ,
167
+ multistart_points:: Int = 1 )
168
+ return SimplexGPU_OnlyObj (convex_func_and_subgrad, var_count, node_limit,
169
+ Vector {Float64} (undef, node_limit), Vector {Float64} (undef, node_limit), Vector {NodeBB} (undef, node_limit), 0 ,
170
+ Matrix {Float64} (undef, node_limit, var_count),
171
+ Matrix {Float64} (undef, node_limit, var_count), prepopulate, max_cuts, gc_freq, multistart_points, 0.0 , 0.0 , 0 , 0 )
172
+ end
173
+
174
+ """
175
+ $(TYPEDEF)
176
+
177
+ The SimplexGPU_ObjAndCons structure is meant to handle optimization problems
178
+ with nontrivial constraints as well as a potentially nonlinear objective
179
+ function. Note that this struct requires the functions representing the
180
+ objective function and constraints to mutate arguments, rather than return
181
+ a tuple of results. SimplexGPU_ObjAndCons is not designed to handle mixed-integer
182
+ problems; NLPs only.
183
+
184
+ $(TYPEDFIELDS)
185
+ """
186
+ Base. @kwdef mutable struct SimplexGPU_ObjAndCons <: ExtendGPU
187
+ " A SCMC-generated or user-defined function taking arguments [cv, lo, [cv_subgrad]..., p...],
188
+ which modifies `cv` to hold the convex relaxation of the objective function, `lo` to hold
189
+ the lower bound of the inclusion monotonic interval extension of the objective function,
190
+ and n instances of `cv_subgrad` that will hold the n subgradients of the convex relaxation
191
+ of the objective function (where n is the dimensionality of the problem), all evaluated at
192
+ points `p`"
193
+ obj_fun
194
+ " A vector of SCMC-generated or user-defined functions, each with the same form as `obj_fun`,
195
+ but with arguments [cv, [cv_subgrad]..., p...], representing all of the LEQ inequality constraints"
196
+ leq_cons
197
+ " A vector of SCMC-generated or user-defined functions, taking arguments [cc, [cc_subgrad]..., p...],
198
+ defined similarly to the objective function and GEQ constraints, representing all of the
199
+ GEQ inequality constraints"
200
+ geq_cons
201
+ " A vector of SCMC-generated or user-defined functions, taking arguments
202
+ [cv, cc, [cv_subgrad]..., [cc_subgrad]..., p...], with terms defined similarly to
203
+ the objective function and inequality constraints, representing all of the equality constraints"
204
+ eq_cons
205
+ " Number of decision variables"
206
+ np:: Int
207
+ " The number of nodes to evaluate in parallel (default = 1024)"
208
+ node_limit:: Int64 = 1024
209
+ " Lower bound storage to hold calculated lower bounds for multiple nodes."
210
+ lower_bound_storage:: Vector{Float64} = Vector {Float64} ()
211
+ " Upper bound storage to hold calculated upper bounds for multiple nodes."
212
+ upper_bound_storage:: Vector{Float64} = Vector {Float64} ()
213
+ " Node storage to hold individual nodes outside of the main stack"
214
+ node_storage:: Vector{NodeBB} = Vector {NodeBB} ()
215
+ " An internal tracker of nodes in internal storage"
216
+ node_len:: Int = 0
217
+ " Variable lower bounds to evaluate"
218
+ all_lvbs:: Matrix{Float64} = Matrix {Float64} ()
219
+ " Variable upper bounds to evaluate"
220
+ all_uvbs:: Matrix{Float64} = Matrix {Float64} ()
221
+ " Flag for stack prepopulation. Good if the total number
222
+ of nodes throughout the solve is expected to be large (default = true)"
223
+ prepopulate:: Bool = true
224
+ " Total number of cuts to do on each node"
225
+ max_cuts:: Int = 3
226
+ " Frequency of garbage collection (number of iterations)"
227
+ gc_freq:: Int = 15
228
+ " (In development) Number of points to use for multistarting the NLP solver"
229
+ multistart_points:: Int = 1
230
+ relax_time:: Float64 = 0.0
231
+ opt_time:: Float64 = 0.0
232
+ lower_counter:: Int = 0
233
+ node_counter:: Int = 0
234
+ end
235
+
236
+ function SimplexGPU_ObjAndCons (obj_fun, var_count:: Int ; geq_cons= [], leq_cons= [], eq_cons= [], node_limit:: Int = 1024 ,
237
+ prepopulate:: Bool = true , max_cuts:: Int = 3 , gc_freq:: Int = 15 , multistart_points:: Int = 1 )
238
+ return SimplexGPU_ObjAndCons (obj_fun, leq_cons, geq_cons, eq_cons, var_count, node_limit,
239
+ Vector {Float64} (undef, node_limit), Vector {Float64} (undef, node_limit), Vector {NodeBB} (undef, node_limit), 0 ,
240
+ Matrix {Float64} (undef, node_limit, var_count),
241
+ Matrix {Float64} (undef, node_limit, var_count), prepopulate, max_cuts, gc_freq, multistart_points, 0.0 , 0.0 , 0 , 0 )
242
+ end
243
+
244
+
245
+
246
+
247
+
248
+
249
+ Base. @kwdef mutable struct SimplexGPU_ObjOnly_Mat <: ExtendGPU
250
+ " A SCMC-generated or user-defined function taking arguments [cv, lo, [cv_subgrad]..., p...],
251
+ which modifies `cv` to hold the convex relaxation of the objective function, `lo` to hold
252
+ the lower bound of the inclusion monotonic interval extension of the objective function,
253
+ and n instances of `cv_subgrad` that will hold the n subgradients of the convex relaxation
254
+ of the objective function (where n is the dimensionality of the problem), all evaluated at
255
+ points `p`"
256
+ obj_fun
257
+ " Number of decision variables"
258
+ np:: Int
259
+ " The number of nodes to evaluate in parallel (default = 1024)"
260
+ node_limit:: Int64 = 1024
261
+ " Lower bound storage to hold calculated lower bounds for multiple nodes."
262
+ lower_bound_storage:: Vector{Float64} = Vector {Float64} ()
263
+ " Upper bound storage to hold calculated upper bounds for multiple nodes."
264
+ upper_bound_storage:: Vector{Float64} = Vector {Float64} ()
265
+ " Node storage to hold individual nodes outside of the main stack"
266
+ node_storage:: Vector{NodeBB} = Vector {NodeBB} ()
267
+ " An internal tracker of nodes in internal storage"
268
+ node_len:: Int = 0
269
+ " Variable lower bounds to evaluate"
270
+ all_lvbs:: Matrix{Float64} = Matrix {Float64} ()
271
+ " Variable upper bounds to evaluate"
272
+ all_uvbs:: Matrix{Float64} = Matrix {Float64} ()
273
+ " Flag for stack prepopulation. Good if the total number
274
+ of nodes throughout the solve is expected to be large (default = true)"
275
+ prepopulate:: Bool = true
276
+ " Total number of cuts to do on each node"
277
+ max_cuts:: Int = 3
278
+ " Frequency of garbage collection (number of iterations)"
279
+ gc_freq:: Int = 15
280
+ " (In development) Number of points to use for multistarting the NLP solver"
281
+ multistart_points:: Int = 1
282
+ relax_time:: Float64 = 0.0
283
+ opt_time:: Float64 = 0.0
284
+ lower_counter:: Int = 0
285
+ node_counter:: Int = 0
286
+ end
287
+
288
+ function SimplexGPU_ObjOnly_Mat (obj_fun, var_count:: Int ; node_limit:: Int = 1024 ,
289
+ prepopulate:: Bool = true , max_cuts:: Int = 3 , gc_freq:: Int = 15 , multistart_points:: Int = 1 )
290
+ return SimplexGPU_ObjOnly_Mat (obj_fun, var_count, node_limit,
291
+ Vector {Float64} (undef, node_limit), Vector {Float64} (undef, node_limit), Vector {NodeBB} (undef, node_limit), 0 ,
292
+ Matrix {Float64} (undef, node_limit, var_count),
293
+ Matrix {Float64} (undef, node_limit, var_count), prepopulate, max_cuts, gc_freq, multistart_points, 0.0 , 0.0 , 0 ,0 )
294
+ end
295
+
296
+
297
+ """
298
+ $(TYPEDEF)
299
+
300
+ This is a testing method/struct, to see if we can check fewer points per node
301
+ when we construct the LPs and still get all the same benefits. The normal
302
+ SimplexGPU method uses 2n+1 points, where n is the problem dimensionality.
303
+ This method only uses a single point in the center of the node, and can
304
+ therefore get away with more simultaneous LPs, since each one is significantly
305
+ smaller.
306
+
307
+ $(TYPEDFIELDS)
308
+ """
309
+ Base. @kwdef mutable struct SimplexGPU_Single <: ExtendGPU
310
+ " A user-defined function taking argument `p` and returning a vector
311
+ of convex evaluations of the objective function [outdated description, [cv, lo, subgrad]]"
312
+ convex_func_and_subgrad
313
+ " Number of decision variables"
314
+ np:: Int
315
+ " The number of nodes to evaluate in parallel (default = 2500)"
316
+ node_limit:: Int64 = 2500
317
+ " A parameter that changes how far spread out points are. Should be
318
+ in the range (0.0, 1.0]"
319
+ α:: Float64 = 0.5
320
+ " Lower bound storage to hold calculated lower bounds for multiple nodes."
321
+ lower_bound_storage:: Vector{Float64} = Vector {Float64} ()
322
+ " Upper bound storage to hold calculated upper bounds for multiple nodes."
323
+ upper_bound_storage:: Vector{Float64} = Vector {Float64} ()
324
+ " Node storage to hold individual nodes outside of the main stack"
325
+ node_storage:: Vector{NodeBB} = Vector {NodeBB} ()
326
+ " An internal tracker of nodes in internal storage"
327
+ node_len:: Int = 0
328
+ " Variable lower bounds to evaluate"
329
+ all_lvbs:: Matrix{Float64} = Matrix {Float64} ()
330
+ " Variable upper bounds to evaluate"
331
+ all_uvbs:: Matrix{Float64} = Matrix {Float64} ()
332
+ " Flag for stack prepopulation. Good if the total number
333
+ of nodes throughout the solve is expected to be large (default = true)"
334
+ prepopulate:: Bool = true
335
+ " Total number of cuts to do on each node"
336
+ max_cuts:: Int = 3
337
+ " (In development) Number of points to use for multistarting the NLP solver"
338
+ multistart_points:: Int = 1
339
+ end
340
+
341
+ function SimplexGPU_Single (convex_func_and_subgrad, var_count:: Int ; alpha:: Float64 = 0.01 , node_limit:: Int = 2500 ,
342
+ prepopulate:: Bool = true , max_cuts:: Int = 3 , multistart_points:: Int = 1 )
343
+ return SimplexGPU_Single (convex_func_and_subgrad, var_count, node_limit, alpha,
344
+ Vector {Float64} (undef, node_limit), Vector {Float64} (undef, node_limit), Vector {NodeBB} (undef, node_limit), 0 ,
345
+ Matrix {Float64} (undef, node_limit, var_count),
346
+ Matrix {Float64} (undef, node_limit, var_count), prepopulate, max_cuts, multistart_points)
61
347
end
0 commit comments