@@ -81,6 +81,10 @@ pub const JobQueue = struct {
81
81
wait_group : WaitGroup = .{},
82
82
global_cache : Cache.Directory ,
83
83
recursive : bool ,
84
+ /// Dumps hash information to stdout which can be used to troubleshoot why
85
+ /// two hashes of the same package do not match.
86
+ /// If this is true, `recursive` must be false.
87
+ debug_hash : bool ,
84
88
work_around_btrfs_bug : bool ,
85
89
86
90
pub const Table = std .AutoArrayHashMapUnmanaged (Manifest .MultiHashHexDigest , * Fetch );
@@ -1315,7 +1319,7 @@ fn computeHash(
1315
1319
const kind : HashedFile.Kind = switch (entry .kind ) {
1316
1320
.directory = > unreachable ,
1317
1321
.file = > .file ,
1318
- .sym_link = > .sym_link ,
1322
+ .sym_link = > .link ,
1319
1323
else = > return f .fail (f .location_tok , try eb .printString (
1320
1324
"package contains '{s}' which has illegal file type '{s}'" ,
1321
1325
.{ entry .path , @tagName (entry .kind ) },
@@ -1399,9 +1403,36 @@ fn computeHash(
1399
1403
}
1400
1404
1401
1405
if (any_failures ) return error .FetchFailed ;
1406
+
1407
+ if (f .job_queue .debug_hash ) {
1408
+ assert (! f .job_queue .recursive );
1409
+ // Print something to stdout that can be text diffed to figure out why
1410
+ // the package hash is different.
1411
+ dumpHashInfo (all_files .items ) catch | err | {
1412
+ std .debug .print ("unable to write to stdout: {s}\n " , .{@errorName (err )});
1413
+ std .process .exit (1 );
1414
+ };
1415
+ }
1416
+
1402
1417
return hasher .finalResult ();
1403
1418
}
1404
1419
1420
+ fn dumpHashInfo (all_files : []const * const HashedFile ) ! void {
1421
+ const stdout = std .io .getStdOut ();
1422
+ var bw = std .io .bufferedWriter (stdout .writer ());
1423
+ const w = bw .writer ();
1424
+
1425
+ for (all_files ) | hashed_file | {
1426
+ try w .print ("{s}: {s}: {s}\n " , .{
1427
+ @tagName (hashed_file .kind ),
1428
+ std .fmt .fmtSliceHexLower (& hashed_file .hash ),
1429
+ hashed_file .normalized_path ,
1430
+ });
1431
+ }
1432
+
1433
+ try bw .flush ();
1434
+ }
1435
+
1405
1436
fn workerHashFile (dir : fs.Dir , hashed_file : * HashedFile , wg : * WaitGroup ) void {
1406
1437
defer wg .finish ();
1407
1438
hashed_file .failure = hashFileFallible (dir , hashed_file );
@@ -1427,7 +1458,7 @@ fn hashFileFallible(dir: fs.Dir, hashed_file: *HashedFile) HashedFile.Error!void
1427
1458
hasher .update (buf [0.. bytes_read ]);
1428
1459
}
1429
1460
},
1430
- .sym_link = > {
1461
+ .link = > {
1431
1462
const link_name = try dir .readLink (hashed_file .fs_path , & buf );
1432
1463
if (fs .path .sep != canonical_sep ) {
1433
1464
// Package hashes are intended to be consistent across
@@ -1480,7 +1511,7 @@ const HashedFile = struct {
1480
1511
fs .File .StatError ||
1481
1512
fs .Dir .ReadLinkError ;
1482
1513
1483
- const Kind = enum { file , sym_link };
1514
+ const Kind = enum { file , link };
1484
1515
1485
1516
fn lessThan (context : void , lhs : * const HashedFile , rhs : * const HashedFile ) bool {
1486
1517
_ = context ;
0 commit comments