Skip to content

Commit 76f8bab

Browse files
committed
Many new tests + fuse setup fix
1 parent 4b00022 commit 76f8bab

File tree

7 files changed

+154
-19
lines changed

7 files changed

+154
-19
lines changed

bin/commands/fuse-setup.js

+2
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,8 @@ class SetupCommand extends Command {
6060
// Emitting errors here would just be confusing, so suppress.
6161
}
6262

63+
this.exit(0)
64+
6365
async function configureFuse (cb) {
6466
const configured = await new Promise((resolve, reject) => {
6567
hyperfuse.isConfigured((err, fuseConfigured) => {

index.js

+10-3
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,9 @@ const TREE_CACHE_SIZE = TOTAL_CACHE_SIZE * CACHE_RATIO
3535
const DATA_CACHE_SIZE = TOTAL_CACHE_SIZE * (1 - CACHE_RATIO)
3636

3737
// This is set dynamically in refreshFuse.
38-
var hyperfuse = null
38+
try {
39+
var hyperfuse = require('hyperdrive-fuse')
40+
} catch (err) {}
3941

4042
class HyperdriveDaemon extends EventEmitter {
4143
constructor (opts = {}) {
@@ -265,16 +267,21 @@ class HyperdriveDaemon extends EventEmitter {
265267

266268
if (hyperfuse) {
267269
rsp.setFuseavailable(true)
268-
const configured = await this._isFuseConfigured()
269-
rsp.setFuseconfigured(configured)
270+
rsp.setFuseconfigured(this.fuse.fuseConfigured)
270271
} else {
271272
rsp.setFuseavailable(false)
273+
rsp.setFuseconfigured(false)
272274
}
273275
}
274276
return rsp
275277
},
276278
refreshFuse: async call => {
277279
await this.fuse.ready()
280+
if (this.fuse && this.fuse.fuseConfigured) {
281+
hyperfuse = require('hyperdrive-fuse')
282+
this._versions.fuseNative = require('fuse-native/package.json').version
283+
this._versions.hyperdriveFuse = require('hyperdrive-fuse/package.json').version
284+
}
278285
return new rpc.main.messages.FuseRefreshResponse()
279286
}
280287
}

lib/drives/index.js

+4-2
Original file line numberDiff line numberDiff line change
@@ -132,18 +132,20 @@ class DriveManager extends EventEmitter {
132132
closeSession (id) {
133133
const drive = this._sessions.get(id)
134134
if (!drive) return null
135+
135136
const driveKey = drive.key.toString('hex')
136137
const driveSessions = this._sessionsByKey.get(driveKey)
137138
this._sessions.delete(id)
138-
driveSessions.splice(driveSessions.indexOf(id), 1)
139+
const idx = driveSessions.indexOf(id)
140+
if (idx !== -1) driveSessions.splice(driveSessions.indexOf(id), 1)
139141

140142
// If there are still active sessions, don't close the drive.
141143
if (driveSessions.length) return null
142144

143145
log.debug({ id, key: driveKey }, 'closing drive because all associated sessions have closed')
144146
this._sessionsByKey.delete(driveKey)
145147

146-
// If a drive is closed in memory-only mode, its storage will be deleted.
148+
// If a drive is closed in memory-only mode, its storage will be deleted, so don't actually close.
147149
if (this.memoryOnly) {
148150
log.debug({ id, key: driveKey }, 'aborting drive close because we\'re in memory-only mode')
149151
return null

manager.js

+9-9
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,7 @@ async function start (opts = {}) {
6868

6969
var interpreter = opts.interpreter || process.execPath
7070
var interpreterArgs = `--max-old-space-size=${opts.heapSize}`
71+
console.log('interpreterArgs:', interpreterArgs)
7172
if (!IS_WINDOWS) {
7273
const execArg = [interpreter, interpreterArgs, script].concat(args).map(escapeStringArg).join(' ')
7374
args = ['-c', execArg]
@@ -90,9 +91,15 @@ async function start (opts = {}) {
9091
}
9192

9293
try {
93-
await removeOldLogs()
94+
if (opts.structuredLog === constants.structuredLog) {
95+
await fs.rename(constants.structuredLog, constants.structuredLog.replace('.json', '.old.json'))
96+
}
97+
if (opts.unstructuredLog === constants.unstructuredLog) {
98+
await fs.rename(constants.unstructuredLog, constants.unstructuredLog.replace('.log', '.old.log'))
99+
}
94100
} catch (err) {
95-
// If the log file couldn't be deleted, it's OK.
101+
console.log('err:', err)
102+
// If the log file couldn't be rotated, it's OK.
96103
}
97104

98105
if (opts.foreground) {
@@ -101,13 +108,6 @@ async function start (opts = {}) {
101108
return startDaemon(description, opts)
102109
}
103110

104-
function removeOldLogs () {
105-
return Promise.all([
106-
fs.unlink(description.output),
107-
fs.unlink(description.error)
108-
])
109-
}
110-
111111
function startForeground (description, opts) {
112112
const daemon = new HyperdriveDaemon({ ...opts, metadata: null, main: true })
113113
process.title = 'hyperdrive'

test/hyperdrive.js

+127-2
Original file line numberDiff line numberDiff line change
@@ -481,6 +481,45 @@ test('can mount a drive within a remote hyperdrive multiple times', async t => {
481481
t.end()
482482
})
483483

484+
test('can mount a versioned drive within a remote hyperdrive', async t => {
485+
const { client, cleanup } = await createOne()
486+
487+
try {
488+
const drive1 = await client.drive.get()
489+
490+
const drive2 = await client.drive.get()
491+
await drive2.writeFile('hamster', 'wheel')
492+
const version1 = await drive2.version()
493+
await drive2.writeFile('blah', 'blahblah')
494+
495+
await drive1.mount('a', { key: drive2.key })
496+
await drive1.mount('aStatic', { key: drive2.key, version: version1 })
497+
498+
await drive1.writeFile('a/hello', 'world')
499+
await drive1.writeFile('adios', 'amigo')
500+
501+
t.same(await drive1.readFile('adios'), Buffer.from('amigo'))
502+
t.same(await drive1.readFile('a/hello'), Buffer.from('world'))
503+
t.same(await drive2.readFile('hello'), Buffer.from('world'))
504+
t.same(await drive2.readFile('hamster'), Buffer.from('wheel'))
505+
t.same(await drive1.readFile('aStatic/hamster'), Buffer.from('wheel'))
506+
try {
507+
await drive1.readFile('aStatic/blah')
508+
t.fail('aStatic should be a versioned mount')
509+
} catch (err) {
510+
t.true(err)
511+
}
512+
513+
await drive1.close()
514+
await drive2.close()
515+
} catch (err) {
516+
t.fail(err)
517+
}
518+
519+
await cleanup()
520+
t.end()
521+
})
522+
484523
test('can unmount a drive within a remote hyperdrive', async t => {
485524
const { client, cleanup } = await createOne()
486525

@@ -604,8 +643,7 @@ test('can create a symlink to directories', async t => {
604643
t.end()
605644
})
606645

607-
// TODO: Stop skipping once we've updated hyperdrive/mountable-hypertrie to nanoresource.
608-
test.skip('drives are closed when all corresponding sessions are closed', async t => {
646+
test('drives are closed when all corresponding sessions are closed', async t => {
609647
const { client, cleanup, daemon } = await createOne()
610648

611649
try {
@@ -630,6 +668,66 @@ test.skip('drives are closed when all corresponding sessions are closed', async
630668
t.end()
631669
})
632670

671+
test('reopening a drive after previously closed works', async t => {
672+
const { client, cleanup, daemon } = await createOne()
673+
674+
try {
675+
var drive = await client.drive.get()
676+
const driveKey = drive.key
677+
await drive.writeFile('a', 'a')
678+
await drive.writeFile('b', 'b')
679+
await drive.writeFile('c', 'c')
680+
const otherDrive = await client.drive.get({ key: driveKey })
681+
const checkout1 = await client.drive.get({ key: driveKey, version: 1 })
682+
683+
await drive.close()
684+
t.same(daemon.drives._drives.size, 2)
685+
await otherDrive.close()
686+
t.same(daemon.drives._drives.size, 2)
687+
await checkout1.close()
688+
t.same(daemon.drives._drives.size, 0)
689+
690+
drive = await client.drive.get({ key: driveKey })
691+
await drive.writeFile('d', 'd')
692+
const contents = await drive.readFile('a')
693+
t.same(contents, Buffer.from('a'))
694+
} catch (err) {
695+
t.fail(err)
696+
}
697+
698+
await cleanup()
699+
t.end()
700+
})
701+
702+
test('many quick closes/reopens', async t => {
703+
const NUM_CYCLES = 10
704+
const { client, cleanup, daemon } = await createOne()
705+
var driveKey = null
706+
const expected = new Array(NUM_CYCLES).fill(0).map((_, i) => '' + i)
707+
708+
try {
709+
for (let i = 0; i < NUM_CYCLES; i++) {
710+
var drive = await client.drive.get({ key: driveKey })
711+
if (!driveKey) driveKey = drive.key
712+
await drive.writeFile(expected[i], expected[i])
713+
await drive.close()
714+
if (daemon.drives._drives.size !== 0) t.fail('session close did not trigger drive close')
715+
}
716+
drive = await client.drive.get({ key: driveKey })
717+
const actual = []
718+
for (let i = 0; i < NUM_CYCLES; i++) {
719+
const contents = await drive.readFile(expected[i])
720+
actual[i] = contents.toString('utf8')
721+
}
722+
t.same(expected, actual)
723+
} catch (err) {
724+
t.fail(err)
725+
}
726+
727+
await cleanup()
728+
t.end()
729+
})
730+
633731
test('drives are writable after a daemon restart', async t => {
634732
var { dir, client, cleanup } = await createOne()
635733

@@ -656,6 +754,33 @@ test('drives are writable after a daemon restart', async t => {
656754
t.end()
657755
})
658756

757+
test('cores are not closed incorrectly during the initial rejoin', async t => {
758+
var { dir, client, cleanup } = await createOne()
759+
760+
try {
761+
var drive = await client.drive.get()
762+
const driveKey = drive.key
763+
await drive.writeFile('a', 'a')
764+
await drive.configureNetwork({ announce: true, lookup: true, remember: true })
765+
766+
await cleanup({ persist: true })
767+
768+
const newDaemon = await createOne({ dir })
769+
client = newDaemon.client
770+
cleanup = newDaemon.cleanup
771+
drive = await client.drive.get({ key: driveKey })
772+
773+
t.same(await drive.readFile('a'), Buffer.from('a'))
774+
await drive.writeFile('b', 'b')
775+
t.same(await drive.readFile('b'), Buffer.from('b'))
776+
} catch (err) {
777+
t.fail(err)
778+
}
779+
780+
await cleanup()
781+
t.end()
782+
})
783+
659784
test('mounts are writable in memory-only mode', async t => {
660785
var { client, cleanup } = await createOne({ memoryOnly: true })
661786

test/peersockets.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -277,7 +277,7 @@ test('peersockets, send to all peers swarming a drive, static peers', async t =>
277277
})
278278

279279
// TODO: There's a nondeterministic failure here on slow machines. Investigate.
280-
test.skip('peersockets, send to all peers swarming a drive, dynamically-added peers', async t => {
280+
test('peersockets, send to all peers swarming a drive, dynamically-added peers', async t => {
281281
const NUM_PEERS = 10
282282

283283
const { clients, daemons, cleanup } = await create(NUM_PEERS)

test/replication.js

+1-2
Original file line numberDiff line numberDiff line change
@@ -555,7 +555,7 @@ test('can get peer info for one discovery key', async t => {
555555
})
556556

557557
// This will hang until we add timeouts to the hyperdrive reads.
558-
test.skip('can continue getting drive info after remote content is cleared (no longer available)', async t => {
558+
test('can continue getting drive info after remote content is cleared (no longer available)', async t => {
559559
const { clients, cleanup, daemons } = await create(2)
560560
const firstClient = clients[0]
561561
const secondClient = clients[1]
@@ -585,7 +585,6 @@ test.skip('can continue getting drive info after remote content is cleared (no l
585585

586586
async function clearContent (metadataKeys, store) {
587587
const metadataKeySet = new Set(metadataKeys.map(k => k.toString('hex')))
588-
console.log('external cores:', store._externalCores)
589588
for (const [, core] of store._externalCores) {
590589
if (metadataKeySet.has(core.key.toString('hex'))) continue
591590
await new Promise((resolve, reject) => {

0 commit comments

Comments
 (0)