@@ -12,6 +12,7 @@ import (
12
12
"github.com/lightninglabs/taproot-assets/fn"
13
13
"github.com/lightninglabs/taproot-assets/mssmt"
14
14
"github.com/lightninglabs/taproot-assets/proof"
15
+ "github.com/lightninglabs/taproot-assets/taprpc"
15
16
"golang.org/x/sync/errgroup"
16
17
)
17
18
@@ -113,6 +114,19 @@ func (s *SimpleSyncer) executeSync(ctx context.Context, diffEngine DiffEngine,
113
114
// only fetch roots for those universes. We won't filter out any
114
115
// Universes here as we assume that the caller has already done so.
115
116
case len (idsToSync ) != 0 :
117
+ // We attempt to bisect the set of IDs we really need to sync by
118
+ // using ephemeral multiverse trees and a bisect algorithm to
119
+ // find the diffs in the root nodes. This allows us to more
120
+ // efficiently find out which roots we need to sync compared to
121
+ // querying the remote server for each root individually.
122
+ idsToSync , err = s .bisectOutdatedRoots (
123
+ ctx , idsToSync , diffEngine ,
124
+ )
125
+ if err != nil {
126
+ return nil , fmt .Errorf ("unable to bisect outdated " +
127
+ "roots: %w" , err )
128
+ }
129
+
116
130
targetRoots , err = fetchRootsForIDs (ctx , idsToSync , diffEngine )
117
131
if err != nil {
118
132
return nil , err
@@ -123,6 +137,10 @@ func (s *SimpleSyncer) executeSync(ctx context.Context, diffEngine DiffEngine,
123
137
case globalInsertEnabled :
124
138
log .Infof ("Fetching all roots for remote Universe server..." )
125
139
140
+ // Since we're also interested in learning about _new_ universes
141
+ // in this case, we can't use the bisect algorithm to find the
142
+ // diffs in the root nodes. Instead, we'll just fetch all the
143
+ // roots from the remote server.
126
144
targetRoots , err = s .fetchAllRoots (ctx , diffEngine )
127
145
if err != nil {
128
146
return nil , err
@@ -140,18 +158,26 @@ func (s *SimpleSyncer) executeSync(ctx context.Context, diffEngine DiffEngine,
140
158
// configs.
141
159
default :
142
160
var uniIDs []Identifier
143
-
144
161
for _ , uniSyncCfg := range syncConfigs .UniSyncConfigs {
145
162
// Check with the filter to ensure that the universe is
146
163
// applicable for syncing. If not, we would have
147
164
// retrieved the corresponding root in vain.
148
165
if uniIdSyncFilter (uniSyncCfg .UniverseID ) {
149
- uniIDs = append (
150
- uniIDs , uniSyncCfg .UniverseID ,
151
- )
166
+ uniIDs = append (uniIDs , uniSyncCfg .UniverseID )
152
167
}
153
168
}
154
169
170
+ // We attempt to bisect the set of IDs we really need to sync by
171
+ // using ephemeral multiverse trees and a bisect algorithm to
172
+ // find the diffs in the root nodes. This allows us to more
173
+ // efficiently find out which roots we need to sync compared to
174
+ // querying the remote server for each root individually.
175
+ uniIDs , err = s .bisectOutdatedRoots (ctx , uniIDs , diffEngine )
176
+ if err != nil {
177
+ return nil , fmt .Errorf ("unable to bisect outdated " +
178
+ "roots: %w" , err )
179
+ }
180
+
155
181
// Retrieve roots for the gathered set of universes.
156
182
targetRoots , err = fetchRootsForIDs (ctx , uniIDs , diffEngine )
157
183
if err != nil {
@@ -190,8 +216,7 @@ func fetchRootsForIDs(ctx context.Context, idsToSync []Identifier,
190
216
// as a series of parallel requests backed by a worker pool.
191
217
rootsToSync := make (chan Root , len (idsToSync ))
192
218
err := fn .ParSlice (
193
- ctx , idsToSync ,
194
- func (ctx context.Context , id Identifier ) error {
219
+ ctx , idsToSync , func (ctx context.Context , id Identifier ) error {
195
220
root , err := diffEngine .RootNode (ctx , id )
196
221
if err != nil {
197
222
return err
@@ -209,6 +234,113 @@ func fetchRootsForIDs(ctx context.Context, idsToSync []Identifier,
209
234
return fn .Collect (rootsToSync ), nil
210
235
}
211
236
237
+ // bisectOutdatedRoots attempts to bisect the set of IDs we need to sync by
238
+ // using ephemeral multiverse trees and a bisect algorithm to find the diffs in
239
+ // the root nodes. This allows us to more efficiently find out which roots we
240
+ // need to sync compared to querying the remote server for each root
241
+ // individually. If the server doesn't yet implement the MultiverseRoot RPC, we
242
+ // simply return the original set of IDs and the "legacy" sync algorithm will be
243
+ // used.
244
+ func (s * SimpleSyncer ) bisectOutdatedRoots (ctx context.Context ,
245
+ idsToSync []Identifier , diffEngine DiffEngine ) ([]Identifier , error ) {
246
+
247
+ issuanceIDs := make ([]Identifier , 0 , len (idsToSync ))
248
+ transferIDs := make ([]Identifier , 0 , len (idsToSync ))
249
+ for _ , id := range idsToSync {
250
+ switch id .ProofType {
251
+ case ProofTypeIssuance :
252
+ issuanceIDs = append (issuanceIDs , id )
253
+
254
+ case ProofTypeTransfer :
255
+ transferIDs = append (transferIDs , id )
256
+
257
+ case ProofTypeUnspecified :
258
+ issuanceID := id
259
+ issuanceID .ProofType = ProofTypeIssuance
260
+ issuanceIDs = append (issuanceIDs , issuanceID )
261
+
262
+ transferID := id
263
+ transferID .ProofType = ProofTypeTransfer
264
+ transferIDs = append (transferIDs , transferID )
265
+ }
266
+ }
267
+
268
+ targetIDs := make ([]Identifier , 0 , len (idsToSync ))
269
+
270
+ // Compare the local and remote issuance trees.
271
+ if len (issuanceIDs ) > 0 {
272
+ outdated , err := s .rootsOutdated (
273
+ ctx , ProofTypeIssuance , issuanceIDs , diffEngine ,
274
+ )
275
+ if err != nil {
276
+ return nil , err
277
+ }
278
+
279
+ if outdated {
280
+ targetIDs = append (targetIDs , issuanceIDs ... )
281
+ }
282
+ }
283
+
284
+ // Compare the local and remote transfer trees.
285
+ if len (transferIDs ) > 0 {
286
+ outdated , err := s .rootsOutdated (
287
+ ctx , ProofTypeTransfer , transferIDs , diffEngine ,
288
+ )
289
+ if err != nil {
290
+ return nil , err
291
+ }
292
+
293
+ if outdated {
294
+ targetIDs = append (targetIDs , transferIDs ... )
295
+ }
296
+ }
297
+
298
+ return targetIDs , nil
299
+ }
300
+
301
+ // rootsOutdated returns true if the roots for the given IDs are outdated and
302
+ // need to be synced.
303
+ func (s * SimpleSyncer ) rootsOutdated (ctx context.Context , proofType ProofType ,
304
+ idsToSync []Identifier , diffEngine DiffEngine ) (bool , error ) {
305
+
306
+ var localRootNode , remoteRootNode mssmt.Node
307
+ localTree , err := s .cfg .LocalDiffEngine .MultiverseRoot (
308
+ ctx , proofType , idsToSync ,
309
+ )
310
+ if err != nil {
311
+ return false , fmt .Errorf ("unable to fetch local multiverse " +
312
+ "root: %w" , err )
313
+ }
314
+ localTree .WhenSome (func (root MultiverseRoot ) {
315
+ localRootNode = root .Node
316
+ })
317
+
318
+ remoteTree , err := diffEngine .MultiverseRoot (ctx , proofType , idsToSync )
319
+
320
+ // Special case for when the server doesn't yet implement the
321
+ // MultiverseRoot RPC. In this case, we simply return the original set
322
+ // of IDs and the "legacy" sync algorithm will be used.
323
+ if err != nil && taprpc .IsUnimplemented (err ) {
324
+ return true , nil
325
+ } else if err != nil {
326
+ return false , fmt .Errorf ("unable to fetch remote multiverse " +
327
+ "root: %w" , err )
328
+ }
329
+
330
+ // Compare the local and remote transfer trees. If they differ,
331
+ // we need to sync all the transfer proofs.
332
+ remoteTree .WhenSome (func (root MultiverseRoot ) {
333
+ remoteRootNode = root .Node
334
+ })
335
+
336
+ // TODO(guggero): Do an actual bi-sect here if there is no match.
337
+ // Do we need to return the left and right hashes of the tree to make
338
+ // this faster, so we can do a binary search? Then we would need to
339
+ // sort/split the IDs by their position in the tree though.
340
+
341
+ return ! mssmt .IsEqualNode (localRootNode , remoteRootNode ), nil
342
+ }
343
+
212
344
// syncRoot attempts to sync the local Universe with the remote diff engine for
213
345
// a specific base root.
214
346
func (s * SimpleSyncer ) syncRoot (ctx context.Context , remoteRoot Root ,
@@ -336,7 +468,7 @@ func (s *SimpleSyncer) syncRoot(ctx context.Context, remoteRoot Root,
336
468
return err
337
469
}
338
470
339
- // If this is a tranfer tree, then we'll collect all the items as we
471
+ // If this is a transfer tree, then we'll collect all the items as we
340
472
// need to sort them to ensure we can validate them in dep order.
341
473
if ! isIssuanceTree {
342
474
transferLeaves := fn .Collect (transferLeafProofs )
@@ -348,17 +480,13 @@ func (s *SimpleSyncer) syncRoot(ctx context.Context, remoteRoot Root,
348
480
iRecord := proof .BlockHeightRecord (& iBlockHeight )
349
481
jRecord := proof .BlockHeightRecord (& jBlockHeight )
350
482
351
- _ = proof .SparseDecode (
352
- //nolint:lll
353
- bytes .NewReader (transferLeaves [i ].Leaf .RawProof ),
354
- iRecord ,
355
- )
483
+ _ = proof .SparseDecode (bytes .NewReader (
484
+ transferLeaves [i ].Leaf .RawProof ,
485
+ ), iRecord )
356
486
357
- _ = proof .SparseDecode (
358
- //nolint:lll
359
- bytes .NewReader (transferLeaves [j ].Leaf .RawProof ),
360
- jRecord ,
361
- )
487
+ _ = proof .SparseDecode (bytes .NewReader (
488
+ transferLeaves [j ].Leaf .RawProof ,
489
+ ), jRecord )
362
490
363
491
return iBlockHeight < jBlockHeight
364
492
})
@@ -468,22 +596,22 @@ func (s *SimpleSyncer) SyncUniverse(ctx context.Context, host ServerAddr,
468
596
// fetchAllRoots fetches all the roots from the remote Universe. This function
469
597
// is used in order to isolate any logic related to the specifics of how we
470
598
// fetch the data from the universe server.
471
- func (s * SimpleSyncer ) fetchAllRoots (ctx context.Context , diffEngine DiffEngine ) ([]Root , error ) {
599
+ func (s * SimpleSyncer ) fetchAllRoots (ctx context.Context ,
600
+ diffEngine DiffEngine ) ([]Root , error ) {
601
+
472
602
offset := int32 (0 )
473
603
pageSize := defaultPageSize
474
- roots := make ([]Root , 0 )
475
604
605
+ var roots []Root
476
606
for {
477
607
log .Debugf ("Fetching roots in range: %v to %v" , offset ,
478
608
offset + pageSize )
479
- tempRoots , err := diffEngine .RootNodes (
480
- ctx , RootNodesQuery {
481
- WithAmountsById : false ,
482
- SortDirection : SortAscending ,
483
- Offset : offset ,
484
- Limit : pageSize ,
485
- },
486
- )
609
+ tempRoots , err := diffEngine .RootNodes (ctx , RootNodesQuery {
610
+ WithAmountsById : false ,
611
+ SortDirection : SortAscending ,
612
+ Offset : offset ,
613
+ Limit : pageSize ,
614
+ })
487
615
488
616
if err != nil {
489
617
return nil , err
@@ -509,8 +637,8 @@ func (s *SimpleSyncer) fetchAllLeafKeys(ctx context.Context,
509
637
// Initialize the offset to be used for the pages.
510
638
offset := int32 (0 )
511
639
pageSize := defaultPageSize
512
- leafKeys := make ([]LeafKey , 0 )
513
640
641
+ var leafKeys []LeafKey
514
642
for {
515
643
tempRemoteKeys , err := diffEngine .UniverseLeafKeys (
516
644
ctx , UniverseLeafKeysQuery {
0 commit comments