1
1
module FlowOverCircle
2
2
3
- using NeuralOperators
4
- using Flux
5
- using CUDA
6
- using JLD2
3
+ using WaterLily, LinearAlgebra, ProgressMeter, MLUtils
4
+ using NeuralOperators, Flux
5
+ using CUDA, FluxTraining, BSON
7
6
8
- include (" data.jl" )
7
+ function circle (n, m; Re= 250 ) # copy from [WaterLily](https://github.com/weymouth/WaterLily.jl)
8
+ # Set physical parameters
9
+ U, R, center = 1. , m/ 8. , [m/ 2 , m/ 2 ]
10
+ ν = U * R / Re
9
11
10
- function update_model! (model_file_path, model)
11
- model = cpu (model)
12
- jldsave (model_file_path; model)
13
- @warn " model updated!"
12
+ body = AutoBody ((x,t) -> LinearAlgebra. norm2 (x .- center) - R)
13
+ Simulation ((n+ 2 , m+ 2 ), [U, 0. ], R; ν, body)
14
+ end
15
+
16
+ function gen_data (ts:: AbstractRange )
17
+ @info " gen data... "
18
+ p = Progress (length (ts))
19
+
20
+ n, m = 3 (2 ^ 5 ), 2 ^ 6
21
+ circ = circle (n, m)
22
+
23
+ 𝐩s = Array {Float32} (undef, 1 , n, m, length (ts))
24
+ for (i, t) in enumerate (ts)
25
+ sim_step! (circ, t)
26
+ 𝐩s[1 , :, :, i] .= Float32 .(circ. flow. p)[2 : end - 1 , 2 : end - 1 ]
27
+
28
+ next! (p)
29
+ end
30
+
31
+ return 𝐩s
32
+ end
33
+
34
+ function get_dataloader (; ts:: AbstractRange = LinRange (100 , 11000 , 10000 ), ratio:: Float64 = 0.95 , batchsize= 100 )
35
+ data = gen_data (ts)
36
+ data_train, data_test = splitobs ((𝐱= data[:, :, :, 1 : end - 1 ], 𝐲= data[:, :, :, 2 : end ]), at= ratio)
37
+
38
+ loader_train = Flux. DataLoader (data_train, batchsize= batchsize, shuffle= true )
39
+ loader_test = Flux. DataLoader (data_test, batchsize= batchsize, shuffle= false )
40
+
41
+ return loader_train, loader_test
14
42
end
15
43
16
44
function train ()
@@ -22,42 +50,34 @@ function train()
22
50
device = cpu
23
51
end
24
52
25
- m = Chain (
53
+ model = Chain (
26
54
Dense (1 , 64 ),
27
55
OperatorKernel (64 => 64 , (24 , 24 ), FourierTransform, gelu),
28
56
OperatorKernel (64 => 64 , (24 , 24 ), FourierTransform, gelu),
29
57
OperatorKernel (64 => 64 , (24 , 24 ), FourierTransform, gelu),
30
58
OperatorKernel (64 => 64 , (24 , 24 ), FourierTransform, gelu),
31
59
Dense (64 , 1 ),
32
- ) |> device
60
+ )
61
+ data = get_dataloader ()
62
+ optimiser = Flux. Optimiser (WeightDecay (1f-4 ), Flux. ADAM (1f-3 ))
63
+ loss_func = l₂loss
33
64
34
- loss (𝐱, 𝐲) = l₂loss (m (𝐱), 𝐲)
65
+ learner = Learner (
66
+ model, data, optimiser, loss_func,
67
+ ToDevice (device, device),
68
+ Checkpointer (joinpath (@__DIR__ , " ../model/" ))
69
+ )
35
70
36
- opt = Flux. Optimiser (WeightDecay (1f-4 ), Flux. ADAM (1f-3 ))
37
-
38
- @info " gen data... "
39
- @time loader_train, loader_test = get_dataloader ()
71
+ fit! (learner, 50 )
40
72
41
- losses = Float32[]
42
- function validate ()
43
- validation_loss = sum (loss (device (𝐱), device (𝐲)) for (𝐱, 𝐲) in loader_test)/ length (loader_test)
44
- @info " loss: $validation_loss "
45
-
46
- push! (losses, validation_loss)
47
- (losses[end ] == minimum (losses)) && update_model! (joinpath (@__DIR__ , " ../model/model.jld2" ), m)
48
- end
49
- call_back = Flux. throttle (validate, 5 , leading= false , trailing= true )
50
-
51
- data = [(𝐱, 𝐲) for (𝐱, 𝐲) in loader_train] |> device
52
- Flux. @epochs 50 @time (Flux. train! (loss, params (m), data, opt, cb= call_back))
73
+ return learner
53
74
end
54
75
55
76
function get_model ()
56
- f = jldopen (joinpath (@__DIR__ , " ../model/model.jld2" ))
57
- model = f[" model" ]
58
- close (f)
77
+ model_path = joinpath (@__DIR__ , " ../model/" )
78
+ model_file = readdir (model_path)[end ]
59
79
60
- return model
80
+ return BSON . load ( joinpath (model_path, model_file), @__MODULE__ )[ : model]
61
81
end
62
82
63
- end
83
+ end # module
0 commit comments