diff --git a/.eslintrc b/.eslintrc index 57b176925..f66c592e8 100644 --- a/.eslintrc +++ b/.eslintrc @@ -52,6 +52,11 @@ "ignoreConsecutiveComments": true } ], + "curly": [ + "error", + "multi-line", + "consistent" + ], "import/order": [ "error", { diff --git a/src/PolykeyAgent.ts b/src/PolykeyAgent.ts index 192cb02ee..02c68cb26 100644 --- a/src/PolykeyAgent.ts +++ b/src/PolykeyAgent.ts @@ -1,7 +1,7 @@ import type { FileSystem } from './types'; import type { PolykeyWorkerManagerInterface } from './workers/types'; import type { Host, Port } from './network/types'; -import type { NodeMapping } from './nodes/types'; +import type { SeedNodes } from './nodes/types'; import type { RootKeyPairChangeData } from './keys/types'; import path from 'path'; @@ -14,7 +14,7 @@ import { Status } from './status'; import { Schema } from './schema'; import { VaultManager } from './vaults'; import { ACL } from './acl'; -import { NodeManager } from './nodes'; +import { NodeConnectionManager, NodeGraph, NodeManager } from './nodes'; import { NotificationsManager } from './notifications'; import { GestaltGraph } from './gestalts'; import { Sigchain } from './sigchain'; @@ -22,7 +22,8 @@ import { Discovery } from './discovery'; import { SessionManager } from './sessions'; import { GRPCServer } from './grpc'; import { IdentitiesManager, providers } from './identities'; -import { ForwardProxy, ReverseProxy } from './network'; +import ForwardProxy from './network/ForwardProxy'; +import ReverseProxy from './network/ReverseProxy'; import { EventBus, captureRejectionSymbol } from './events'; import { createAgentService, AgentServiceService } from './agent'; import { createClientService, ClientServiceService } from './client'; @@ -61,6 +62,7 @@ class PolykeyAgent { networkConfig = {}, forwardProxyConfig = {}, reverseProxyConfig = {}, + nodeConnectionManagerConfig = {}, seedNodes = {}, // Optional dependencies status, @@ -73,6 +75,8 @@ class PolykeyAgent { gestaltGraph, fwdProxy, revProxy, + nodeGraph, + nodeConnectionManager, nodeManager, discovery, vaultManager, @@ -102,8 +106,13 @@ class PolykeyAgent { connConnectTime?: number; connTimeoutTime?: number; }; + nodeConnectionManagerConfig?: { + connConnectTime?: number; + connTimeoutTime?: number; + initialClosestNodes?: number; + }; networkConfig?: NetworkConfig; - seedNodes?: NodeMapping; + seedNodes?: SeedNodes; status?: Status; schema?: Schema; keyManager?: KeyManager; @@ -114,6 +123,8 @@ class PolykeyAgent { gestaltGraph?: GestaltGraph; fwdProxy?: ForwardProxy; revProxy?: ReverseProxy; + nodeGraph?: NodeGraph; + nodeConnectionManager?: NodeConnectionManager; nodeManager?: NodeManager; discovery?: Discovery; vaultManager?: VaultManager; @@ -146,6 +157,10 @@ class PolykeyAgent { ...config.defaults.reverseProxyConfig, ...utils.filterEmptyObject(reverseProxyConfig), }; + const nodeConnectionManagerConfig_ = { + ...config.defaults.nodeConnectionManagerConfig, + ...utils.filterEmptyObject(nodeConnectionManagerConfig), + }; await utils.mkdirExists(fs, nodePath); const statusPath = path.join(nodePath, config.defaults.statusBase); const statusLockPath = path.join(nodePath, config.defaults.statusLockBase); @@ -254,26 +269,45 @@ class PolykeyAgent { ...reverseProxyConfig_, logger: logger.getChild(ReverseProxy.name), }); + nodeGraph = + nodeGraph ?? + (await NodeGraph.createNodeGraph({ + db, + fresh, + keyManager, + logger: logger.getChild(NodeGraph.name), + })); + nodeConnectionManager = + nodeConnectionManager ?? + new NodeConnectionManager({ + keyManager, + nodeGraph, + fwdProxy, + revProxy, + seedNodes, + ...nodeConnectionManagerConfig_, + logger: logger.getChild(NodeConnectionManager.name), + }); nodeManager = nodeManager ?? - (await NodeManager.createNodeManager({ + new NodeManager({ db, - seedNodes, sigchain, keyManager, - fwdProxy, - revProxy, + nodeGraph, + nodeConnectionManager, logger: logger.getChild(NodeManager.name), - fresh, - })); + }); // Discovery uses in-memory CreateDestroy pattern // Therefore it should be destroyed during stop discovery = discovery ?? (await Discovery.createDiscovery({ + keyManager, gestaltGraph, identitiesManager, nodeManager, + sigchain, logger: logger.getChild(Discovery.name), })); vaultManager = @@ -282,7 +316,7 @@ class PolykeyAgent { vaultsKey: keyManager.vaultKey, vaultsPath, keyManager, - nodeManager, + nodeConnectionManager, gestaltGraph, acl, db, @@ -295,6 +329,7 @@ class PolykeyAgent { (await NotificationsManager.createNotificationsManager({ acl, db, + nodeConnectionManager, nodeManager, keyManager, logger: logger.getChild(NotificationsManager.name), @@ -324,7 +359,6 @@ class PolykeyAgent { await notificationsManager?.stop(); await vaultManager?.stop(); await discovery?.destroy(); - await nodeManager?.stop(); await revProxy?.stop(); await fwdProxy?.stop(); await gestaltGraph?.stop(); @@ -349,6 +383,8 @@ class PolykeyAgent { gestaltGraph, fwdProxy, revProxy, + nodeGraph, + nodeConnectionManager, nodeManager, discovery, vaultManager, @@ -380,6 +416,8 @@ class PolykeyAgent { public readonly gestaltGraph: GestaltGraph; public readonly fwdProxy: ForwardProxy; public readonly revProxy: ReverseProxy; + public readonly nodeGraph: NodeGraph; + public readonly nodeConnectionManager: NodeConnectionManager; public readonly nodeManager: NodeManager; public readonly discovery: Discovery; public readonly vaultManager: VaultManager; @@ -404,6 +442,8 @@ class PolykeyAgent { gestaltGraph, fwdProxy, revProxy, + nodeGraph, + nodeConnectionManager, nodeManager, discovery, vaultManager, @@ -426,6 +466,8 @@ class PolykeyAgent { gestaltGraph: GestaltGraph; fwdProxy: ForwardProxy; revProxy: ReverseProxy; + nodeGraph: NodeGraph; + nodeConnectionManager: NodeConnectionManager; nodeManager: NodeManager; discovery: Discovery; vaultManager: VaultManager; @@ -449,6 +491,8 @@ class PolykeyAgent { this.gestaltGraph = gestaltGraph; this.fwdProxy = fwdProxy; this.revProxy = revProxy; + this.nodeGraph = nodeGraph; + this.nodeConnectionManager = nodeConnectionManager; this.nodeManager = nodeManager; this.discovery = discovery; this.vaultManager = vaultManager; @@ -513,7 +557,9 @@ class PolykeyAgent { keyManager: this.keyManager, vaultManager: this.vaultManager, nodeManager: this.nodeManager, + nodeGraph: this.nodeGraph, sigchain: this.sigchain, + nodeConnectionManager: this.nodeConnectionManager, notificationsManager: this.notificationsManager, }); const clientService = createClientService({ @@ -522,6 +568,8 @@ class PolykeyAgent { gestaltGraph: this.gestaltGraph, identitiesManager: this.identitiesManager, keyManager: this.keyManager, + nodeGraph: this.nodeGraph, + nodeConnectionManager: this.nodeConnectionManager, nodeManager: this.nodeManager, notificationsManager: this.notificationsManager, sessionManager: this.sessionManager, @@ -575,9 +623,9 @@ class PolykeyAgent { ingressPort: networkConfig_.ingressPort, tlsConfig, }); - await this.nodeManager.start({ fresh }); - await this.nodeManager.getConnectionsToSeedNodes(); - await this.nodeManager.syncNodeGraph(); + await this.nodeConnectionManager.start(); + await this.nodeGraph.start({ fresh }); + await this.nodeConnectionManager.syncNodeGraph(); await this.vaultManager.start({ fresh }); await this.notificationsManager.start({ fresh }); await this.sessionManager.start({ fresh }); @@ -597,7 +645,6 @@ class PolykeyAgent { await this.notificationsManager?.stop(); await this.vaultManager?.stop(); await this.discovery?.destroy(); - await this.nodeManager?.stop(); await this.revProxy?.stop(); await this.fwdProxy?.stop(); await this.grpcServerAgent?.stop(); @@ -625,7 +672,8 @@ class PolykeyAgent { await this.notificationsManager.stop(); await this.vaultManager.stop(); await this.discovery.destroy(); - await this.nodeManager.stop(); + await this.nodeConnectionManager.stop(); + await this.nodeGraph.stop(); await this.revProxy.stop(); await this.fwdProxy.stop(); await this.grpcServerAgent.stop(); @@ -649,7 +697,7 @@ class PolykeyAgent { await this.sessionManager.destroy(); await this.notificationsManager.destroy(); await this.vaultManager.destroy(); - await this.nodeManager.destroy(); + await this.nodeGraph.destroy(); await this.gestaltGraph.destroy(); await this.acl.destroy(); await this.sigchain.destroy(); diff --git a/src/agent/GRPCClientAgent.ts b/src/agent/GRPCClientAgent.ts index 7b882cafe..1289354c7 100644 --- a/src/agent/GRPCClientAgent.ts +++ b/src/agent/GRPCClientAgent.ts @@ -33,14 +33,16 @@ class GRPCClientAgent extends GRPCClient { tlsConfig, proxyConfig, timeout = Infinity, + destroyCallback = async () => {}, logger = new Logger(this.name), }: { nodeId: NodeId; host: Host; port: Port; - proxyConfig?: ProxyConfig; tlsConfig?: Partial; + proxyConfig?: ProxyConfig; timeout?: number; + destroyCallback?: () => Promise; logger?: Logger; }): Promise { const { client, serverCertChain, flowCountInterceptor } = @@ -63,6 +65,7 @@ class GRPCClientAgent extends GRPCClient { proxyConfig, serverCertChain, flowCountInterceptor, + destroyCallback, logger, }); return grpcClientAgent; diff --git a/src/agent/service/index.ts b/src/agent/service/index.ts index 43af3c005..f20a9fdb8 100644 --- a/src/agent/service/index.ts +++ b/src/agent/service/index.ts @@ -1,6 +1,10 @@ import type { KeyManager } from '../../keys'; import type { VaultManager } from '../../vaults'; -import type { NodeManager } from '../../nodes'; +import type { + NodeGraph, + NodeManager, + NodeConnectionManager, +} from '../../nodes'; import type { NotificationsManager } from '../../notifications'; import type { Sigchain } from '../../sigchain'; import type { IAgentServiceServer } from '../../proto/js/polykey/v1/agent_service_grpc_pb'; @@ -20,7 +24,9 @@ import { AgentServiceService } from '../../proto/js/polykey/v1/agent_service_grp function createService(container: { keyManager: KeyManager; vaultManager: VaultManager; + nodeConnectionManager: NodeConnectionManager; nodeManager: NodeManager; + nodeGraph: NodeGraph; notificationsManager: NotificationsManager; sigchain: Sigchain; }) { diff --git a/src/agent/service/nodesChainDataGet.ts b/src/agent/service/nodesChainDataGet.ts index 0c471e399..3ed37b99f 100644 --- a/src/agent/service/nodesChainDataGet.ts +++ b/src/agent/service/nodesChainDataGet.ts @@ -1,25 +1,25 @@ import type * as grpc from '@grpc/grpc-js'; -import type { ClaimIdEncoded } from '../../claims/types'; -import type { NodeManager } from '../../nodes'; +import type { Sigchain } from '../../sigchain'; import type * as utilsPB from '../../proto/js/polykey/v1/utils/utils_pb'; +import type { ClaimIdEncoded } from '../../claims/types'; import { utils as grpcUtils } from '../../grpc'; import * as nodesPB from '../../proto/js/polykey/v1/nodes/nodes_pb'; /** * Retrieves the ChainDataEncoded of this node. */ -function nodesChainDataGet({ nodeManager }: { nodeManager: NodeManager }) { +function nodesChainDataGet({ sigchain }: { sigchain: Sigchain }) { return async ( call: grpc.ServerUnaryCall, callback: grpc.sendUnaryData, ): Promise => { try { const response = new nodesPB.ChainData(); - const chainData = await nodeManager.getChainData(); + const chainData = await sigchain.getChainData(); // Iterate through each claim in the chain, and serialize for transport - for (const c in chainData) { - const claimId = c as ClaimIdEncoded; - const claim = chainData[claimId]; + let claimIdEncoded: ClaimIdEncoded; + for (claimIdEncoded in chainData) { + const claim = chainData[claimIdEncoded]; const claimMessage = new nodesPB.AgentClaim(); // Will always have a payload (never undefined) so cast as string claimMessage.setPayload(claim.payload as string); @@ -32,7 +32,7 @@ function nodesChainDataGet({ nodeManager }: { nodeManager: NodeManager }) { claimMessage.getSignaturesList().push(signature); } // Add the serialized claim - response.getChainDataMap().set(claimId, claimMessage); + response.getChainDataMap().set(claimIdEncoded, claimMessage); } callback(null, response); return; diff --git a/src/agent/service/nodesClosestLocalNodesGet.ts b/src/agent/service/nodesClosestLocalNodesGet.ts index 534d3e011..559337c9d 100644 --- a/src/agent/service/nodesClosestLocalNodesGet.ts +++ b/src/agent/service/nodesClosestLocalNodesGet.ts @@ -1,5 +1,5 @@ import type * as grpc from '@grpc/grpc-js'; -import type { NodeManager } from '../../nodes'; +import type { NodeConnectionManager } from '../../nodes'; import type { NodeId } from '../../nodes/types'; import { utils as grpcUtils } from '../../grpc'; import { utils as nodesUtils } from '../../nodes'; @@ -12,9 +12,9 @@ import * as nodesPB from '../../proto/js/polykey/v1/nodes/nodes_pb'; * to some provided node ID. */ function nodesClosestLocalNodesGet({ - nodeManager, + nodeConnectionManager, }: { - nodeManager: NodeManager; + nodeConnectionManager: NodeConnectionManager; }) { return async ( call: grpc.ServerUnaryCall, @@ -38,7 +38,9 @@ function nodesClosestLocalNodesGet({ }, ); // Get all local nodes that are closest to the target node from the request - const closestNodes = await nodeManager.getClosestLocalNodes(nodeId); + const closestNodes = await nodeConnectionManager.getClosestLocalNodes( + nodeId, + ); for (const node of closestNodes) { const addressMessage = new nodesPB.Address(); addressMessage.setHost(node.address.host); diff --git a/src/agent/service/nodesCrossSignClaim.ts b/src/agent/service/nodesCrossSignClaim.ts index 64529c551..907494512 100644 --- a/src/agent/service/nodesCrossSignClaim.ts +++ b/src/agent/service/nodesCrossSignClaim.ts @@ -96,12 +96,12 @@ function nodesCrossSignClaim({ const doublySignedClaim = await claimsUtils.signIntermediaryClaim({ claim: constructedIntermediaryClaim, privateKey: keyManager.getRootKeyPairPem().privateKey, - signeeNodeId: nodesUtils.encodeNodeId(nodeManager.getNodeId()), + signeeNodeId: nodesUtils.encodeNodeId(keyManager.getNodeId()), }); // Then create your own intermediary node claim (from X -> Y) const singlySignedClaim = await sigchain.createIntermediaryClaim({ type: 'node', - node1: nodesUtils.encodeNodeId(nodeManager.getNodeId()), + node1: nodesUtils.encodeNodeId(keyManager.getNodeId()), node2: payloadData.node1, }); // Should never be reached, but just for type safety diff --git a/src/agent/service/nodesHolePunchMessageSend.ts b/src/agent/service/nodesHolePunchMessageSend.ts index afeea0af8..7c8a2e0a1 100644 --- a/src/agent/service/nodesHolePunchMessageSend.ts +++ b/src/agent/service/nodesHolePunchMessageSend.ts @@ -1,17 +1,22 @@ import type * as grpc from '@grpc/grpc-js'; -import type { NodeManager } from '../../nodes'; +import type { NodeManager, NodeConnectionManager } from '../../nodes'; +import type KeyManager from '../../keys/KeyManager'; import type { NodeId } from '../../nodes/types'; import type * as nodesPB from '../../proto/js/polykey/v1/nodes/nodes_pb'; -import { utils as networkUtils } from '../../network'; +import * as networkUtils from '../../network/utils'; import { utils as grpcUtils } from '../../grpc'; import { validateSync, utils as validationUtils } from '../../validation'; import { matchSync } from '../../utils'; import * as utilsPB from '../../proto/js/polykey/v1/utils/utils_pb'; function nodesHolePunchMessageSend({ + keyManager, nodeManager, + nodeConnectionManager, }: { + keyManager: KeyManager; nodeManager: NodeManager; + nodeConnectionManager: NodeConnectionManager; }) { return async ( call: grpc.ServerUnaryCall, @@ -44,15 +49,15 @@ function nodesHolePunchMessageSend({ // Firstly, check if this node is the desired node // If so, then we want to make this node start sending hole punching packets // back to the source node. - if (nodeManager.getNodeId() === targetId) { + if (keyManager.getNodeId().equals(targetId)) { const [host, port] = networkUtils.parseAddress( call.request.getEgressAddress(), ); - await nodeManager.openConnection(host, port); + await nodeConnectionManager.holePunchReverse(host, port); // Otherwise, find if node in table // If so, ask the nodeManager to relay to the node } else if (await nodeManager.knowsNode(sourceId)) { - await nodeManager.relayHolePunchMessage(call.request); + await nodeConnectionManager.relayHolePunchMessage(call.request); } callback(null, response); return; diff --git a/src/agent/service/vaultsGitPackGet.ts b/src/agent/service/vaultsGitPackGet.ts index 8590fcd29..4fad805a1 100644 --- a/src/agent/service/vaultsGitPackGet.ts +++ b/src/agent/service/vaultsGitPackGet.ts @@ -20,8 +20,9 @@ function vaultsGitPackGet({ vaultManager }: { vaultManager: VaultManager }) { const body = Buffer.concat(clientBodyBuffers); const meta = call.metadata; const vaultNameOrId = meta.get('vaultNameOrId').pop()!.toString(); - if (vaultNameOrId == null) + if (vaultNameOrId == null) { throw new grpcErrors.ErrorGRPC('vault-name not in metadata.'); + } let vaultId; try { vaultId = vaultsUtils.makeVaultId(vaultNameOrId); diff --git a/src/bin/CommandPolykey.ts b/src/bin/CommandPolykey.ts index 96d8d25fd..b9534fee0 100644 --- a/src/bin/CommandPolykey.ts +++ b/src/bin/CommandPolykey.ts @@ -1,10 +1,10 @@ import type { FileSystem } from '../types'; - import commander from 'commander'; import Logger, { StreamHandler } from '@matrixai/logger'; import * as binUtils from './utils'; import * as binOptions from './utils/options'; import * as binErrors from './errors'; +import grpcSetLogger from '../grpc/utils/setLogger'; /** * Singleton logger constructed once for all commands @@ -63,6 +63,8 @@ class CommandPolykey extends commander.Command { const opts = this.opts(); // Set the logger according to the verbosity this.logger.setLevel(binUtils.verboseToLogLevel(opts.verbose)); + // Set the global upstream GRPC logger + grpcSetLogger(this.logger.getChild('grpc')); // If the node path is undefined // this means there is an unknown platform if (opts.nodePath == null) { diff --git a/src/bin/agent/CommandStart.ts b/src/bin/agent/CommandStart.ts index 5c71999d9..7e911eb1e 100644 --- a/src/bin/agent/CommandStart.ts +++ b/src/bin/agent/CommandStart.ts @@ -174,12 +174,13 @@ class CommandStart extends CommandPolykey { agentConfig, }; agentProcess.send(messageIn, (e) => { - if (e != null) + if (e != null) { rejectAgentProcessP( new binErrors.ErrorCLIPolykeyAgentProcess( 'Failed sending agent process message', ), ); + } }); await agentProcessP; } else { diff --git a/src/bin/nodes/CommandFind.ts b/src/bin/nodes/CommandFind.ts index 09150b5dd..5788c2c8a 100644 --- a/src/bin/nodes/CommandFind.ts +++ b/src/bin/nodes/CommandFind.ts @@ -71,8 +71,9 @@ class CommandFind extends CommandPolykey { result.port as Port, )}`; } catch (err) { - if (!(err instanceof nodesErrors.ErrorNodeGraphNodeNotFound)) + if (!(err instanceof nodesErrors.ErrorNodeGraphNodeIdNotFound)) { throw err; + } // Else failed to find the node. result.success = false; result.id = nodesUtils.encodeNodeId(nodeId); @@ -89,8 +90,9 @@ class CommandFind extends CommandPolykey { }), ); // Like ping it should error when failing to find node for automation reasons. - if (!result.success) + if (!result.success) { throw new binErrors.ErrorNodeFindFailed(result.message); + } } finally { if (pkClient! != null) await pkClient.stop(); } diff --git a/src/bin/nodes/CommandPing.ts b/src/bin/nodes/CommandPing.ts index 997ddaeb7..b22e0d19d 100644 --- a/src/bin/nodes/CommandPing.ts +++ b/src/bin/nodes/CommandPing.ts @@ -55,7 +55,7 @@ class CommandPing extends CommandPolykey { meta, ); } catch (err) { - if (err instanceof nodesErrors.ErrorNodeGraphNodeNotFound) { + if (err instanceof nodesErrors.ErrorNodeGraphNodeIdNotFound) { error = new binErrors.ErrorNodePingFailed( `Failed to resolve node ID ${nodesUtils.encodeNodeId( nodeId, @@ -67,8 +67,9 @@ class CommandPing extends CommandPolykey { } const status = { success: false, message: '' }; status.success = statusMessage ? statusMessage.getSuccess() : false; - if (!status.success && !error) + if (!status.success && !error) { error = new binErrors.ErrorNodePingFailed('No response received'); + } if (status.success) status.message = 'Node is Active.'; else status.message = error.message; const output: any = diff --git a/src/bin/polykey-agent.ts b/src/bin/polykey-agent.ts index 3689bd201..e12fd1523 100644 --- a/src/bin/polykey-agent.ts +++ b/src/bin/polykey-agent.ts @@ -24,6 +24,7 @@ import * as binUtils from './utils'; import PolykeyAgent from '../PolykeyAgent'; import { WorkerManager, utils as workersUtils } from '../workers'; import ErrorPolykey from '../ErrorPolykey'; +import grpcSetLogger from '../grpc/utils/setLogger'; import { promisify, promise } from '../utils'; process.title = 'polykey-agent'; @@ -43,6 +44,8 @@ async function main(_argv = process.argv): Promise { }); const messageIn = await messageInP; logger.setLevel(messageIn.logLevel); + // Set the global upstream GRPC logger + grpcSetLogger(logger.getChild('grpc')); let pkAgent: PolykeyAgent; let workerManager: PolykeyWorkerManagerInterface; exitHandlers.handlers.push(async () => { diff --git a/src/bin/utils/parsers.ts b/src/bin/utils/parsers.ts index 132dbf62a..c871f2596 100644 --- a/src/bin/utils/parsers.ts +++ b/src/bin/utils/parsers.ts @@ -1,13 +1,6 @@ -import type { NodeId, NodeMapping } from '../../nodes/types'; -import type { Host, Hostname, Port } from '../../network/types'; import commander from 'commander'; -import { IdInternal } from '@matrixai/id'; -import * as nodesUtils from '../../nodes/utils'; -import * as networkUtils from '../../network/utils'; import * as validationUtils from '../../validation/utils'; import * as validationErrors from '../../validation/errors'; -import config from '../../config'; -import { never } from '../../utils'; /** * Converts a validation parser to commander argument parser @@ -40,6 +33,10 @@ const parseHostOrHostname = validateParserToArgParser( validationUtils.parseHostOrHostname, ); const parsePort = validateParserToArgParser(validationUtils.parsePort); +const parseNetwork = validateParserToArgParser(validationUtils.parseNetwork); +const parseSeedNodes = validateParserToArgParser( + validationUtils.parseSeedNodes, +); function parseCoreCount(v: string): number | undefined { if (v === 'all') { @@ -62,101 +59,17 @@ function parseSecretPath(secretPath: string): [string, string, string?] { return [vaultName, directoryPath, undefined]; } -/** - * Acquires the default seed nodes from src/config.ts. - */ -function getDefaultSeedNodes(network: string): NodeMapping { - const seedNodes: NodeMapping = {}; - let source; - switch (network) { - case 'testnet': - source = config.defaults.network.testnet; - break; - case 'mainnet': - source = config.defaults.network.mainnet; - break; - default: - never(); - } - for (const id in source) { - const seedNodeId = IdInternal.fromString(id); - seedNodes[seedNodeId] = { - host: source[seedNodeId].host as Host | Hostname, - port: source[seedNodeId].port as Port, - }; - } - return seedNodes; -} - -/** - * Seed nodes expected to be of form 'nodeId1@host:port;nodeId2@host:port;...' - * By default, any specified seed nodes (in CLI option, or environment variable) - * will overwrite the default nodes in src/config.ts. - * Special flag `` indicates that the default seed - * nodes should be added to the starting seed nodes instead of being overwritten. - */ -function parseSeedNodes(rawSeedNodes: string): [NodeMapping, boolean] { - const seedNodeMappings: NodeMapping = {}; - let defaults = false; - // If specifically set no seed nodes, then ensure we start with none - if (rawSeedNodes === '') return [seedNodeMappings, defaults]; - const semicolonSeedNodes = rawSeedNodes.split(';'); - for (const rawSeedNode of semicolonSeedNodes) { - // Empty string will occur if there's an extraneous ';' (e.g. at end of env) - if (rawSeedNode === '') continue; - // Append the default seed nodes if we encounter the special flag - if (rawSeedNode === '') { - defaults = true; - continue; - } - const idHostPort = rawSeedNode.split(/[@:]/); - if (idHostPort.length !== 3) { - throw new commander.InvalidOptionArgumentError( - `${rawSeedNode} is not of format 'nodeId@host:port'`, - ); - } - const seedNodeId = nodesUtils.decodeNodeId(idHostPort[0]); - if (seedNodeId == null) { - throw new commander.InvalidOptionArgumentError( - `${idHostPort[0]} is not a valid node ID`, - ); - } - if (!networkUtils.isHostname(idHostPort[1])) { - throw new commander.InvalidOptionArgumentError( - `${idHostPort[1]} is not a valid hostname`, - ); - } - const port = parsePort(idHostPort[2]); - seedNodeMappings[seedNodeId] = { - host: idHostPort[1] as Host | Hostname, - port: port, - }; - } - return [seedNodeMappings, defaults]; -} - -function parseNetwork(network: string): NodeMapping { - // Getting a list of network names from the config defaults - const networks = config.defaults.network; - const validNetworks = Object.keys(networks); - - // Checking if the network name is valid. - if (validNetworks.includes(network)) return getDefaultSeedNodes(network); - throw new commander.InvalidArgumentError(`${network} is not a valid network`); -} - export { parseInteger, parseNumber, - parseCoreCount, - parseSecretPath, parseNodeId, parseGestaltId, parseHost, parseHostname, parseHostOrHostname, parsePort, - getDefaultSeedNodes, - parseSeedNodes, parseNetwork, + parseSeedNodes, + parseCoreCount, + parseSecretPath, }; diff --git a/src/bootstrap/utils.ts b/src/bootstrap/utils.ts index 3a7308349..feba00657 100644 --- a/src/bootstrap/utils.ts +++ b/src/bootstrap/utils.ts @@ -12,8 +12,9 @@ import { KeyManager, utils as keyUtils } from '../keys'; import { Sigchain } from '../sigchain'; import { ACL } from '../acl'; import { GestaltGraph } from '../gestalts'; -import { ForwardProxy, ReverseProxy } from '../network'; -import { NodeManager } from '../nodes'; +import ForwardProxy from '../network/ForwardProxy'; +import ReverseProxy from '../network/ReverseProxy'; +import { NodeConnectionManager, NodeGraph, NodeManager } from '../nodes'; import { VaultManager } from '../vaults'; import { NotificationsManager } from '../notifications'; import { mkdirExists } from '../utils'; @@ -138,21 +139,33 @@ async function bootstrapState({ const revProxy = new ReverseProxy({ logger: logger.getChild(ReverseProxy.name), }); - const nodeManager = await NodeManager.createNodeManager({ + const nodeGraph = await NodeGraph.createNodeGraph({ db, + fresh, keyManager, - sigchain, + logger: logger.getChild(NodeGraph.name), + }); + const nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, fwdProxy, revProxy, + logger: logger.getChild(NodeConnectionManager.name), + }); + const nodeManager = new NodeManager({ + db, + keyManager, + nodeGraph, + nodeConnectionManager, + sigchain, logger: logger.getChild(NodeManager.name), - fresh, }); const vaultManager = await VaultManager.createVaultManager({ acl, db, gestaltGraph, keyManager, - nodeManager, + nodeConnectionManager, vaultsKey: keyManager.vaultKey, vaultsPath, logger: logger.getChild(VaultManager.name), @@ -162,6 +175,7 @@ async function bootstrapState({ await NotificationsManager.createNotificationsManager({ acl, db, + nodeConnectionManager, nodeManager, keyManager, logger: logger.getChild(NotificationsManager.name), @@ -178,7 +192,6 @@ async function bootstrapState({ await sessionManager.stop(); await notificationsManager.stop(); await vaultManager.stop(); - await nodeManager.stop(); await gestaltGraph.stop(); await acl.stop(); await sigchain.stop(); diff --git a/src/claims/types.ts b/src/claims/types.ts index 3d559f9ab..bddc9c56c 100644 --- a/src/claims/types.ts +++ b/src/claims/types.ts @@ -49,6 +49,7 @@ type SignatureData = { * claim ID (representing the sequence number key of the claim). */ type ClaimId = Opaque<'ClaimId', Id>; +type ClaimIdString = Opaque<'ClaimIdString', string>; type ClaimIdEncoded = Opaque<'ClaimIdEncoded', string>; type ClaimIdGenerator = () => ClaimId; @@ -104,6 +105,7 @@ export type { ClaimIntermediary, SignatureData, ClaimId, + ClaimIdString, ClaimIdEncoded, ClaimIdGenerator, ClaimEncoded, diff --git a/src/client/GRPCClientClient.ts b/src/client/GRPCClientClient.ts index 0006ca545..e97b3dfeb 100644 --- a/src/client/GRPCClientClient.ts +++ b/src/client/GRPCClientClient.ts @@ -38,6 +38,7 @@ class GRPCClientClient extends GRPCClient { proxyConfig, session, timeout = Infinity, + destroyCallback = async () => {}, logger = new Logger(this.name), }: { nodeId: NodeId; @@ -47,6 +48,7 @@ class GRPCClientClient extends GRPCClient { proxyConfig?: ProxyConfig; session?: Session; timeout?: number; + destroyCallback?: () => Promise; logger?: Logger; }): Promise { const interceptors: Array = []; @@ -74,6 +76,7 @@ class GRPCClientClient extends GRPCClient { proxyConfig, serverCertChain, flowCountInterceptor, + destroyCallback, logger, }); return grpcClientClient; diff --git a/src/client/service/agentStatus.ts b/src/client/service/agentStatus.ts index 8d28dbf64..ddae856b2 100644 --- a/src/client/service/agentStatus.ts +++ b/src/client/service/agentStatus.ts @@ -2,7 +2,8 @@ import type * as grpc from '@grpc/grpc-js'; import type { Authenticate } from '../types'; import type { KeyManager } from '../../keys'; import type { GRPCServer } from '../../grpc'; -import type { ForwardProxy, ReverseProxy } from '../../network'; +import type ForwardProxy from '../../network/ForwardProxy'; +import type ReverseProxy from '../../network/ReverseProxy'; import type * as utilsPB from '../../proto/js/polykey/v1/utils/utils_pb'; import process from 'process'; import * as grpcUtils from '../../grpc/utils'; diff --git a/src/client/service/identitiesClaim.ts b/src/client/service/identitiesClaim.ts index a87d36776..0964ecf78 100644 --- a/src/client/service/identitiesClaim.ts +++ b/src/client/service/identitiesClaim.ts @@ -1,6 +1,6 @@ import type * as grpc from '@grpc/grpc-js'; import type { Authenticate } from '../types'; -import type { NodeManager } from '../../nodes'; +import type KeyManager from '../../keys/KeyManager'; import type { Sigchain } from '../../sigchain'; import type { IdentitiesManager } from '../../identities'; import type { IdentityId, ProviderId } from '../../identities/types'; @@ -18,12 +18,12 @@ import * as identitiesPB from '../../proto/js/polykey/v1/identities/identities_p function identitiesClaim({ identitiesManager, sigchain, - nodeManager, + keyManager, authenticate, }: { identitiesManager: IdentitiesManager; sigchain: Sigchain; - nodeManager: NodeManager; + keyManager: KeyManager; authenticate: Authenticate; }) { return async ( @@ -65,7 +65,7 @@ function identitiesClaim({ // Create identity claim on our node const [, claim] = await sigchain.addClaim({ type: 'identity', - node: nodesUtils.encodeNodeId(nodeManager.getNodeId()), + node: nodesUtils.encodeNodeId(keyManager.getNodeId()), provider: providerId, identity: identityId, }); diff --git a/src/client/service/index.ts b/src/client/service/index.ts index 88a6ca861..c272fd0b7 100644 --- a/src/client/service/index.ts +++ b/src/client/service/index.ts @@ -1,7 +1,11 @@ import type PolykeyAgent from '../../PolykeyAgent'; import type { KeyManager } from '../../keys'; import type { VaultManager } from '../../vaults'; -import type { NodeManager } from '../../nodes'; +import type { + NodeManager, + NodeConnectionManager, + NodeGraph, +} from '../../nodes'; import type { IdentitiesManager } from '../../identities'; import type { GestaltGraph } from '../../gestalts'; import type { SessionManager } from '../../sessions'; @@ -9,7 +13,8 @@ import type { NotificationsManager } from '../../notifications'; import type { Discovery } from '../../discovery'; import type { Sigchain } from '../../sigchain'; import type { GRPCServer } from '../../grpc'; -import type { ForwardProxy, ReverseProxy } from '../../network'; +import type ForwardProxy from '../../network/ForwardProxy'; +import type ReverseProxy from '../../network/ReverseProxy'; import type { IClientServiceServer } from '../../proto/js/polykey/v1/client_service_grpc_pb'; import type { FileSystem } from '../../types'; import Logger from '@matrixai/logger'; @@ -87,6 +92,8 @@ function createService({ pkAgent: PolykeyAgent; keyManager: KeyManager; vaultManager: VaultManager; + nodeGraph: NodeGraph; + nodeConnectionManager: NodeConnectionManager; nodeManager: NodeManager; identitiesManager: IdentitiesManager; gestaltGraph: GestaltGraph; diff --git a/src/client/service/nodesFind.ts b/src/client/service/nodesFind.ts index b283b58e9..7982fd9ad 100644 --- a/src/client/service/nodesFind.ts +++ b/src/client/service/nodesFind.ts @@ -1,6 +1,6 @@ import type * as grpc from '@grpc/grpc-js'; import type { Authenticate } from '../types'; -import type { NodeManager } from '../../nodes'; +import type { NodeConnectionManager } from '../../nodes'; import type { NodeId } from '../../nodes/types'; import { utils as nodesUtils } from '../../nodes'; import { utils as grpcUtils } from '../../grpc'; @@ -14,10 +14,10 @@ import * as nodesPB from '../../proto/js/polykey/v1/nodes/nodes_pb'; * @throws ErrorNodeGraphNodeNotFound if node address cannot be found */ function nodesFind({ - nodeManager, + nodeConnectionManager, authenticate, }: { - nodeManager: NodeManager; + nodeConnectionManager: NodeConnectionManager; authenticate: Authenticate; }) { return async ( @@ -43,7 +43,7 @@ function nodesFind({ nodeId: call.request.getNodeId(), }, ); - const address = await nodeManager.findNode(nodeId); + const address = await nodeConnectionManager.findNode(nodeId); response .setNodeId(nodesUtils.encodeNodeId(nodeId)) .setAddress( diff --git a/src/config.ts b/src/config.ts index ffcdf5418..09f88b66a 100644 --- a/src/config.ts +++ b/src/config.ts @@ -93,7 +93,12 @@ const config = { connConnectTime: 20000, connTimeoutTime: 20000, }, - // This is not used by the `PolykeyAgent` with defaults to `{}` + nodeConnectionManagerConfig: { + connConnectTime: 20000, + connTimeoutTime: 60000, + initialClosestNodes: 3, + }, + // This is not used by the `PolykeyAgent` which defaults to `{}` network: { mainnet: {}, testnet: {}, diff --git a/src/discovery/Discovery.ts b/src/discovery/Discovery.ts index 16985e8b2..7c9a76cfb 100644 --- a/src/discovery/Discovery.ts +++ b/src/discovery/Discovery.ts @@ -9,8 +9,10 @@ import type { IdentityClaims, } from '../identities/types'; import type { NodeManager } from '../nodes'; +import type { Sigchain } from '../sigchain'; +import type { KeyManager } from '../keys'; import type { Provider, IdentitiesManager } from '../identities'; -import type { Claim, ClaimIdEncoded, ClaimLinkIdentity } from '../claims/types'; +import type { ClaimIdEncoded, Claim, ClaimLinkIdentity } from '../claims/types'; import type { ChainData } from '../sigchain/types'; import Logger from '@matrixai/logger'; @@ -26,44 +28,58 @@ class Discovery { protected gestaltGraph: GestaltGraph; protected identitiesManager: IdentitiesManager; protected nodeManager: NodeManager; + protected sigchain: Sigchain; + protected keyManager: KeyManager; protected logger: Logger; static async createDiscovery({ + keyManager, gestaltGraph, identitiesManager, nodeManager, + sigchain, logger = new Logger(this.name), }: { + keyManager: KeyManager; gestaltGraph: GestaltGraph; identitiesManager: IdentitiesManager; nodeManager: NodeManager; + sigchain: Sigchain; logger?: Logger; }): Promise { logger.info(`Creating ${this.name}`); const discovery = new Discovery({ + keyManager, gestaltGraph, identitiesManager, logger: logger, nodeManager, + sigchain, }); logger.info(`Created ${this.name}`); return discovery; } constructor({ + keyManager, gestaltGraph, identitiesManager, nodeManager, + sigchain, logger, }: { + keyManager: KeyManager; gestaltGraph: GestaltGraph; identitiesManager: IdentitiesManager; nodeManager: NodeManager; + sigchain: Sigchain; logger: Logger; }) { + this.keyManager = keyManager; this.gestaltGraph = gestaltGraph; this.identitiesManager = identitiesManager; this.nodeManager = nodeManager; + this.sigchain = sigchain; this.logger = logger; } @@ -114,13 +130,13 @@ class Discovery { let vertexChainData: ChainData = {}; // If the vertex we've found is our own node, we simply get our own chain const nodeId = nodesUtils.decodeNodeId(vertexGId.nodeId)!; - if (nodeId.equals(this.nodeManager.getNodeId())) { - const vertexChainDataEncoded = await this.nodeManager.getChainData(); + if (nodeId.equals(this.keyManager.getNodeId())) { + const vertexChainDataEncoded = await this.sigchain.getChainData(); // Decode all our claims - no need to verify (on our own sigchain) - for (const c in vertexChainDataEncoded) { - const claimId = c as ClaimIdEncoded; - vertexChainData[claimId] = claimsUtils.decodeClaim( - vertexChainDataEncoded[claimId], + let claimIdEncoded: ClaimIdEncoded; + for (claimIdEncoded in vertexChainDataEncoded) { + vertexChainData[claimIdEncoded] = claimsUtils.decodeClaim( + vertexChainDataEncoded[claimIdEncoded], ); } // Otherwise, request the verified chain data from the node @@ -144,8 +160,9 @@ class Discovery { // TODO: because we're iterating over keys in a record, I don't believe // that this will iterate in lexicographical order of keys. For now, // this doesn't matter though (because of the previous comment). - for (const claimId in vertexChainData) { - const claim: Claim = vertexChainData[claimId as ClaimIdEncoded]; + let claimIdEncoded: ClaimIdEncoded; + for (claimIdEncoded in vertexChainData) { + const claim = vertexChainData[claimIdEncoded]; // If the claim is to a node if (claim.payload.data.type === 'node') { diff --git a/src/git/utils.ts b/src/git/utils.ts index f2eda5201..d565ddc15 100644 --- a/src/git/utils.ts +++ b/src/git/utils.ts @@ -250,8 +250,9 @@ async function resolve( }) ).toString() || packedMap[ref].line; // FIXME: not sure what is going on here. } catch (err) { - if (err.code === 'ENOENT') + if (err.code === 'ENOENT') { throw new gitErrors.ErrorGitUndefinedRefs(`Ref ${ref} cannot be found`); + } } if (sha != null) { return resolve(fs, gitdir, sha.trim(), depth); // FIXME: sha is string or config? @@ -1213,8 +1214,9 @@ async function pack({ function writeObject(object: Uint8Array, stype: string): void { // Object type is encoded in bits 654 const type = types[stype]; - if (type === undefined) + if (type === undefined) { throw new gitErrors.ErrorGitUndefinedType('Unrecognized type: ' + stype); + } // The length encoding get complicated. let length = object.length; // Whether the next byte is part of the variable-length encoded number diff --git a/src/grpc/GRPCClient.ts b/src/grpc/GRPCClient.ts index 6fc1f4647..b55d3a275 100644 --- a/src/grpc/GRPCClient.ts +++ b/src/grpc/GRPCClient.ts @@ -9,17 +9,24 @@ import type { import type { NodeId } from '../nodes/types'; import type { Certificate } from '../keys/types'; import type { Host, Port, TLSConfig, ProxyConfig } from '../network/types'; - import http2 from 'http2'; import Logger from '@matrixai/logger'; +import * as grpc from '@grpc/grpc-js'; import * as grpcUtils from './utils'; import * as grpcErrors from './errors'; -import { utils as keysUtils } from '../keys'; -import { utils as networkUtils, errors as networkErrors } from '../network'; -import { promisify, promise, timerStart, timerStop } from '../utils'; -import { utils as nodeUtils } from '../nodes'; +import * as keysUtils from '../keys/utils'; +import * as networkUtils from '../network/utils'; +import * as networkErrors from '../network/errors'; +import * as nodeUtils from '../nodes/utils'; +import { + promisify, + promise, + timerStart, + timerStop, + never, +} from '../utils/utils'; -abstract class GRPCClient { +abstract class GRPCClient { /** * Create the gRPC client * This will asynchronously start the connection and verify the @@ -30,7 +37,7 @@ abstract class GRPCClient { * By default timeout is Infinity which means it retries connection * establishment forever */ - public static async createClient({ + public static async createClient({ clientConstructor, nodeId, host, @@ -120,6 +127,8 @@ abstract class GRPCClient { try { await waitForReady(timeout); } catch (e) { + // If we fail here then we leak the client object... + client.close(); throw new grpcErrors.ErrorGRPCClientTimeout(); } let serverCertChain: Array | undefined; @@ -165,6 +174,7 @@ abstract class GRPCClient { protected serverCertChain?: Array; protected flowCountInterceptor?: grpcUtils.FlowCountInterceptor; protected _secured: boolean = false; + protected destroyCallback: () => Promise; constructor({ client, @@ -175,6 +185,7 @@ abstract class GRPCClient { proxyConfig, serverCertChain, flowCountInterceptor, + destroyCallback = async () => {}, logger, }: { client: T; @@ -185,6 +196,7 @@ abstract class GRPCClient { proxyConfig?: ProxyConfig; serverCertChain?: Array; flowCountInterceptor?: grpcUtils.FlowCountInterceptor; + destroyCallback?: () => Promise; logger: Logger; }) { this.logger = logger; @@ -196,9 +208,12 @@ abstract class GRPCClient { this.proxyConfig = proxyConfig; this.serverCertChain = serverCertChain; this.flowCountInterceptor = flowCountInterceptor; + this.destroyCallback = destroyCallback; if (tlsConfig != null) { this._secured = true; } + // Register the channel state watcher + this.watchChannelState(); } get secured(): boolean { @@ -243,6 +258,7 @@ abstract class GRPCClient { ); } } + await this.destroyCallback(); this.logger.info( `Destroyed ${this.constructor.name} connected to ${address}`, ); @@ -269,6 +285,54 @@ abstract class GRPCClient { } return this.serverCertChain!.map((crt) => keysUtils.certCopy(crt)); } + + /** + * Watches for connection state change + * Calls `this.destroy()` when the underlying channel is destroyed + */ + protected watchChannelState() { + const channel = this.client.getChannel(); + let connected = false; + const checkState = async (e?: Error) => { + if (e != null) { + // This should not happen because the error should only occur + // when the deadline is exceeded, however our deadline here is Infinity + this.logger.warn(`Watch Channel State Error: ${e.toString()}`); + never(); + } + const state = channel.getConnectivityState(false); + this.logger.debug( + `Watch Channel State: ${grpc.connectivityState[state]}`, + ); + switch (state) { + case grpc.connectivityState.READY: + connected = true; + break; + case grpc.connectivityState.IDLE: + // If connected already, then switching to IDLE means + // the connection has timed out + if (connected) { + await this.destroy(); + return; + } + break; + case grpc.connectivityState.SHUTDOWN: + await this.destroy(); + return; + } + try { + channel.watchConnectivityState(state, Infinity, checkState); + } catch (e) { + // Exception occurs only when the channel is already shutdown + await this.destroy(); + return; + } + }; + checkState().then( + () => {}, + () => {}, + ); + } } export default GRPCClient; diff --git a/src/grpc/GRPCServer.ts b/src/grpc/GRPCServer.ts index 843cdab84..6dd7a041c 100644 --- a/src/grpc/GRPCServer.ts +++ b/src/grpc/GRPCServer.ts @@ -11,8 +11,9 @@ import { StartStop, ready } from '@matrixai/async-init/dist/StartStop'; import * as grpc from '@grpc/grpc-js'; import * as grpcUtils from './utils'; import * as grpcErrors from './errors'; -import { utils as networkUtils, errors as networkErrors } from '../network'; -import { promisify, timerStart, timerStop } from '../utils'; +import * as networkUtils from '../network/utils'; +import * as networkErrors from '../network/errors'; +import { promisify, timerStart, timerStop } from '../utils/utils'; interface GRPCServer extends StartStop {} @StartStop() diff --git a/src/grpc/utils/index.ts b/src/grpc/utils/index.ts index 73125dbb8..2fcdd83d3 100644 --- a/src/grpc/utils/index.ts +++ b/src/grpc/utils/index.ts @@ -1,2 +1,3 @@ export { default as FlowCountInterceptor } from './FlowCountInterceptor'; +export { default as setLogger } from './setLogger'; export * from './utils'; diff --git a/src/grpc/utils/setLogger.ts b/src/grpc/utils/setLogger.ts new file mode 100644 index 000000000..79f90e089 --- /dev/null +++ b/src/grpc/utils/setLogger.ts @@ -0,0 +1,48 @@ +/** + * Setting the logger for grpc is independent from other utilities + * In order to allow selective-imports that minimises loading times + * @module + */ +import type Logger from '@matrixai/logger'; +import { LogLevel } from '@matrixai/logger'; +import * as grpcLogging from '@grpc/grpc-js/build/src/logging'; +import * as grpcConstants from '@grpc/grpc-js/build/src/constants'; + +/** + * Overrides GRPC's global logger with a `Logger` instance + * Updates the logging verbosity + * This should only be executed once for the entire process + * and before GRPC is being used + * Because this is global, the logger instance should be + * created near the root of the program + */ +function setLogger(logger: Logger): void { + grpcLogging.setLogger({ + error: (...data: Array) => + logger.error(data.map((d) => d.toString()).join(' ')), + info: (...data: Array) => + logger.info(data.map((d) => d.toString()).join(' ')), + debug: (...data: Array) => + logger.debug(data.map((d) => d.toString()).join(' ')), + }); + switch (logger.getEffectiveLevel()) { + case LogLevel.NOTSET: + // `LogLevel.NOTSET` for `Logger` is the default, and it means all logs + // However `grpc.logVerbosity.NONE` means no logs + // So we keep the grpc library default + break; + case LogLevel.DEBUG: + grpcLogging.setLoggerVerbosity(grpcConstants.LogVerbosity.DEBUG); + break; + case LogLevel.INFO: + grpcLogging.setLoggerVerbosity(grpcConstants.LogVerbosity.INFO); + break; + case LogLevel.WARN: + case LogLevel.ERROR: + // Production default + grpcLogging.setLoggerVerbosity(grpcConstants.LogVerbosity.ERROR); + break; + } +} + +export default setLogger; diff --git a/src/grpc/utils/utils.ts b/src/grpc/utils/utils.ts index 51e68516c..5be96b528 100644 --- a/src/grpc/utils/utils.ts +++ b/src/grpc/utils/utils.ts @@ -27,12 +27,11 @@ import type { AsyncGeneratorDuplexStreamClient, } from '../types'; import type { CertificatePemChain, PrivateKeyPem } from '../../keys/types'; - import { Buffer } from 'buffer'; import * as grpc from '@grpc/grpc-js'; import * as grpcErrors from '../errors'; import * as errors from '../../errors'; -import { promisify, promise, never } from '../../utils'; +import { promisify, promise, never } from '../../utils/utils'; /** * GRPC insecure credentials for the client diff --git a/src/identities/Provider.ts b/src/identities/Provider.ts index df63cb884..dbf77c67c 100644 --- a/src/identities/Provider.ts +++ b/src/identities/Provider.ts @@ -11,6 +11,9 @@ import type { IdentityClaim, IdentityClaimId } from '../identities/types'; import * as identitiesErrors from './errors'; import { schema } from '../claims'; +import { utils as validationUtils, validateSync } from '../validation'; +import { matchSync } from '../utils/matchers'; +import * as validationErrors from '../validation/errors'; type GetTokens = () => Promise; type GetToken = (identityId: IdentityId) => Promise; @@ -95,10 +98,24 @@ abstract class Provider { } catch (e) { return; } - // TODO: Add node ID validation here? if (!schema.claimIdentityValidate(claim)) { return; } + // We want to validate the NodeId in the data + try { + validateSync((keyPath, value) => { + return matchSync(keyPath)( + [ + ['payload', 'data', 'nodeId'], + () => validationUtils.parseNodeId(value), + ], + () => value, + ); + }, claim); + } catch (e) { + if (!(e instanceof validationErrors.ErrorParse)) return; + throw e; + } return claim; } diff --git a/src/keys/types.ts b/src/keys/types.ts index 120ddc458..128c4e687 100644 --- a/src/keys/types.ts +++ b/src/keys/types.ts @@ -1,7 +1,7 @@ -import type { NodeId } from '../nodes/types'; import type { asn1, pki } from 'node-forge'; -import type { Opaque } from '../types'; +import type { NodeId } from '../nodes/types'; import type { TLSConfig } from '../network/types'; +import type { Opaque } from '../types'; type PublicKey = pki.rsa.PublicKey; type PrivateKey = pki.rsa.PrivateKey; diff --git a/src/keys/utils.ts b/src/keys/utils.ts index f96d26f3e..4792251bb 100644 --- a/src/keys/utils.ts +++ b/src/keys/utils.ts @@ -30,7 +30,7 @@ import { import * as bip39 from 'bip39'; import { IdInternal } from '@matrixai/id'; import * as keysErrors from './errors'; -import { utils as nodesUtils } from '../nodes'; +import * as nodesUtils from '../nodes/utils'; import config from '../config'; import { promisify, getUnixtime, never } from '../utils'; diff --git a/src/network/ConnectionForward.ts b/src/network/ConnectionForward.ts index 1ca254e7d..f9ac7e7d0 100644 --- a/src/network/ConnectionForward.ts +++ b/src/network/ConnectionForward.ts @@ -5,13 +5,12 @@ import type { Certificate } from '../keys/types'; import type { Address, Host, NetworkMessage, Port } from './types'; import type { NodeId } from '../nodes/types'; import type { AbstractConstructorParameters, Timer } from '../types'; - import tls from 'tls'; import { StartStop, ready } from '@matrixai/async-init/dist/StartStop'; import Connection from './Connection'; import * as networkUtils from './utils'; import * as networkErrors from './errors'; -import { utils as keysUtils } from '../keys'; +import * as keysUtils from '../keys/utils'; import { promise, timerStart, timerStop } from '../utils'; type ConnectionsForward = { @@ -23,7 +22,6 @@ interface ConnectionForward extends StartStop {} @StartStop() class ConnectionForward extends Connection { public readonly nodeId: NodeId; - public readonly endTime: number; protected connections: ConnectionsForward; protected pingInterval: ReturnType; diff --git a/src/network/ConnectionReverse.ts b/src/network/ConnectionReverse.ts index 506f47d53..b80e735a0 100644 --- a/src/network/ConnectionReverse.ts +++ b/src/network/ConnectionReverse.ts @@ -5,14 +5,13 @@ import type { Host, Port, Address, NetworkMessage } from './types'; import type { NodeId } from '../nodes/types'; import type { Certificate } from '../keys/types'; import type { AbstractConstructorParameters, Timer } from '../types'; - import net from 'net'; import tls from 'tls'; import { StartStop, ready } from '@matrixai/async-init/dist/StartStop'; import Connection from './Connection'; import * as networkUtils from './utils'; import * as networkErrors from './errors'; -import { utils as keysUtils } from '../keys'; +import * as keysUtils from '../keys/utils'; import { promise, timerStart, timerStop } from '../utils'; type ConnectionsReverse = { diff --git a/src/network/ForwardProxy.ts b/src/network/ForwardProxy.ts index 83af4a088..ca3d9e71e 100644 --- a/src/network/ForwardProxy.ts +++ b/src/network/ForwardProxy.ts @@ -3,7 +3,6 @@ import type { Host, Port, Address, ConnectionInfo, TLSConfig } from './types'; import type { ConnectionsForward } from './ConnectionForward'; import type { NodeId } from '../nodes/types'; import type { Timer } from '../types'; - import http from 'http'; import UTP from 'utp-native'; import { Mutex } from 'async-mutex'; @@ -12,8 +11,8 @@ import { StartStop, ready } from '@matrixai/async-init/dist/StartStop'; import ConnectionForward from './ConnectionForward'; import * as networkUtils from './utils'; import * as networkErrors from './errors'; +import * as nodesUtils from '../nodes/utils'; import { promisify, timerStart, timerStop } from '../utils'; -import { utils as nodesUtils } from '../nodes'; interface ForwardProxy extends StartStop {} @StartStop() diff --git a/src/network/ReverseProxy.ts b/src/network/ReverseProxy.ts index 6e081ad12..204a903f1 100644 --- a/src/network/ReverseProxy.ts +++ b/src/network/ReverseProxy.ts @@ -2,7 +2,6 @@ import type UTPConnection from 'utp-native/lib/connection'; import type { Host, Port, Address, ConnectionInfo, TLSConfig } from './types'; import type { ConnectionsReverse } from './ConnectionReverse'; import type { Timer } from '../types'; - import UTP from 'utp-native'; import { Mutex } from 'async-mutex'; import Logger from '@matrixai/logger'; diff --git a/src/network/utils.ts b/src/network/utils.ts index 4cfb5a54e..def14c337 100644 --- a/src/network/utils.ts +++ b/src/network/utils.ts @@ -3,13 +3,12 @@ import type { TLSSocket } from 'tls'; import type { Host, Hostname, Port, Address, NetworkMessage } from './types'; import type { Certificate, PublicKey } from '../keys/types'; import type { NodeId } from '../nodes/types'; - import { Buffer } from 'buffer'; import dns from 'dns'; import { IPv4, IPv6, Validator } from 'ip-num'; import * as networkErrors from './errors'; -import { utils as keysUtils } from '../keys'; -import { utils as nodesUtils } from '../nodes'; +import * as keysUtils from '../keys/utils'; +import * as nodesUtils from '../nodes/utils'; import { isEmptyObject, promisify } from '../utils'; const pingBuffer = serializeNetworkMessage({ diff --git a/src/nodes/NodeConnection.ts b/src/nodes/NodeConnection.ts index 99f1c026f..1e4482cdb 100644 --- a/src/nodes/NodeConnection.ts +++ b/src/nodes/NodeConnection.ts @@ -1,155 +1,75 @@ -import type { NodeId, NodeData } from './types'; -import type { Host, Hostname, Port, ProxyConfig } from '../network/types'; -import type { KeyManager } from '../keys'; -import type { SignedNotification } from '../notifications/types'; -import type { ChainDataEncoded } from '../sigchain/types'; +import type { NodeId } from './types'; +import type { Host, Hostname, Port } from '../network/types'; +import type KeyManager from '../keys/KeyManager'; import type { Certificate, PublicKey, PublicKeyPem } from '../keys/types'; -import type { - ClaimEncoded, - ClaimIntermediary, - ClaimIdEncoded, -} from '../claims/types'; - -import type { ForwardProxy } from '../network'; +import type ForwardProxy from '../network/ForwardProxy'; +import type GRPCClient from '../grpc/GRPCClient'; +import type NodeConnectionManager from './NodeConnectionManager'; import Logger from '@matrixai/logger'; -import { - CreateDestroyStartStop, - ready, -} from '@matrixai/async-init/dist/CreateDestroyStartStop'; -import * as nodesUtils from './utils'; +import { CreateDestroy, ready } from '@matrixai/async-init/dist/CreateDestroy'; +import * as asyncInit from '@matrixai/async-init'; import * as nodesErrors from './errors'; -import { utils as claimsUtils, errors as claimsErrors } from '../claims'; -import { utils as keysUtils } from '../keys'; -import { utils as vaultsUtils } from '../vaults'; -import { errors as grpcErrors } from '../grpc'; -import { GRPCClientAgent } from '../agent'; -import * as utilsPB from '../proto/js/polykey/v1/utils/utils_pb'; -import * as nodesPB from '../proto/js/polykey/v1/nodes/nodes_pb'; -import * as notificationsPB from '../proto/js/polykey/v1/notifications/notifications_pb'; -import { utils as networkUtils } from '../network'; +import * as keysUtils from '../keys/utils'; +import * as grpcErrors from '../grpc/errors'; +import * as networkUtils from '../network/utils'; /** * Encapsulates the unidirectional client-side connection of one node to another. */ -interface NodeConnection extends CreateDestroyStartStop {} -@CreateDestroyStartStop( - new nodesErrors.ErrorNodeConnectionRunning(), - new nodesErrors.ErrorNodeConnectionDestroyed(), -) -class NodeConnection { - protected logger: Logger; - protected keyManager: KeyManager; - - // Node ID, host, and port of the target node at the end of this connection - // Hostname defined if the target's host was resolved from this hostname. - // Undefined if an IP address was initially provided - protected targetNodeId: NodeId; - protected ingressHost: Host; - protected ingressHostname: Hostname | undefined; - protected ingressPort: Port; - - // Host and port of the initiating node (client) where the connection begins - protected localHost: Host; - protected localPort: Port; +// eslint-disable-next-line @typescript-eslint/no-unused-vars -- False positive for T +interface NodeConnection extends CreateDestroy {} +@CreateDestroy() +class NodeConnection { + public readonly host: Host; + public readonly port: Port; + /** + * Hostname is defined if the target's host was resolved from this hostname + * Undefined if a Host was directly provided + */ + public readonly hostname?: Hostname; + protected logger: Logger; + protected destroyCallback: () => Promise; protected fwdProxy: ForwardProxy; - protected proxyConfig: ProxyConfig; - protected client: GRPCClientAgent; + protected client: T; - static async createNodeConnection({ + static async createNodeConnection({ targetNodeId, targetHost, - targetHostname = undefined, targetPort, - connTimeout = 20000, - forwardProxy, + targetHostname, + connConnectTime = 20000, + fwdProxy, keyManager, + clientFactory, + nodeConnectionManager, + destroyCallback = async () => {}, logger = new Logger(this.name), - seedConnections = new Map(), }: { targetNodeId: NodeId; targetHost: Host; - targetHostname?: Hostname; targetPort: Port; - connTimeout?: number; - forwardProxy: ForwardProxy; + targetHostname?: Hostname; + connConnectTime?: number; + fwdProxy: ForwardProxy; keyManager: KeyManager; + clientFactory: (...args) => Promise; + nodeConnectionManager: NodeConnectionManager; + destroyCallback?: () => Promise; logger?: Logger; - seedConnections?: Map; - }): Promise { + }): Promise> { logger.info(`Creating ${this.name}`); const proxyConfig = { - host: forwardProxy.getProxyHost(), - port: forwardProxy.getProxyPort(), - authToken: forwardProxy.authToken, + host: fwdProxy.getProxyHost(), + port: fwdProxy.getProxyPort(), + authToken: fwdProxy.authToken, }; - const nodeConnection = new NodeConnection({ - targetNodeId, - targetHost, - targetHostname, - targetPort, - forwardProxy, - keyManager, - logger, - proxyConfig, - }); - await nodeConnection.start({ seedConnections, connTimeout }); - logger.info(`Created ${this.name}`); - return nodeConnection; - } - - constructor({ - targetNodeId, - targetHost, - targetHostname = undefined, - targetPort, - forwardProxy, - keyManager, - logger, - proxyConfig, - }: { - targetNodeId: NodeId; - targetHost: Host; - targetHostname?: Hostname; - targetPort: Port; - forwardProxy: ForwardProxy; - keyManager: KeyManager; - logger: Logger; - proxyConfig: ProxyConfig; - }) { - this.logger = logger; - this.targetNodeId = targetNodeId; - this.ingressHost = targetHost; - this.ingressHostname = targetHostname; - this.ingressPort = targetPort; - this.fwdProxy = forwardProxy; - this.keyManager = keyManager; - this.proxyConfig = proxyConfig; - } - - /** - * Initialises and starts the connection (via the fwdProxy). - * - * @param seedConnections map of all established seed node connections - * If not provided, it's assumed a direct connection can be made to the target - * (i.e. without hole punching, and therefore not being a NAT), as the seed - * nodes relay the hole punch message. - */ - public async start({ - seedConnections = new Map(), - connTimeout, - }: { - seedConnections?: Map; - connTimeout?: number; - } = {}) { - this.logger.info(`Starting ${this.constructor.name}`); // 1. Get the egress port of the fwdProxy (used for hole punching) const egressAddress = networkUtils.buildAddress( - this.fwdProxy.getEgressHost(), - this.fwdProxy.getEgressPort(), + fwdProxy.getEgressHost(), + fwdProxy.getEgressPort(), ); - // Also need to sign this for authentication (i.e. from expected source) - const signature = await this.keyManager.signWithRootKeyPair( + const signature = await keyManager.signWithRootKeyPair( Buffer.from(egressAddress), ); // 2. Ask fwdProxy for connection to target (the revProxy of other node) @@ -158,62 +78,110 @@ class NodeConnection { // 3. Relay the egress port to the broker/s (such that they can inform the other node) // 4. Start sending hole-punching packets to other node (done in openConnection()) // Done in parallel + const nodeConnection = new NodeConnection({ + host: targetHost, + port: targetPort, + hostname: targetHostname, + fwdProxy: fwdProxy, + destroyCallback, + logger, + }); + let client; try { - const [client] = await Promise.all([ - GRPCClientAgent.createGRPCClientAgent({ - nodeId: this.targetNodeId, - host: this.ingressHost, - port: this.ingressPort, - proxyConfig: this.proxyConfig, - logger: this.logger.getChild(GRPCClientAgent.name), - timeout: connTimeout, - }), - Array.from(seedConnections, ([_, conn]) => - conn.sendHolePunchMessage( - this.keyManager.getNodeId(), - this.targetNodeId, + // Start the hole punching only if we are not connecting to seed nodes + let holePunchPromises: Promise[] = []; + const seedNodes = nodeConnectionManager.getSeedNodes(); + const isSeedNode = !!seedNodes.find((nodeId) => { + return nodeId.equals(targetNodeId); + }); + if (!isSeedNode) { + holePunchPromises = Array.from(seedNodes, (nodeId) => { + return nodeConnectionManager.sendHolePunchMessage( + nodeId, + keyManager.getNodeId(), + targetNodeId, egressAddress, signature, - ), - ), + ); + }); + } + [client] = await Promise.all([ + clientFactory({ + nodeId: targetNodeId, + host: targetHost, + port: targetPort, + proxyConfig: proxyConfig, + // Think about this + logger: logger.getChild(clientFactory.name), + destroyCallback: async () => { + if ( + nodeConnection[asyncInit.status] !== 'destroying' && + !nodeConnection[asyncInit.destroyed] + ) { + await nodeConnection.destroy(); + } + }, + timeout: connConnectTime, + }), + holePunchPromises, ]); - this.client = client; + // 5. When finished, you have a connection to other node + // The GRPCClient is ready to be used for requests } catch (e) { - await this.stop(); + await nodeConnection.destroy(); // If the connection times out, re-throw this with a higher level nodes exception if (e instanceof grpcErrors.ErrorGRPCClientTimeout) { throw new nodesErrors.ErrorNodeConnectionTimeout(); } throw e; } - // 5. When finished, you have a connection to other node - // The GRPCClient is ready to be used for requests - this.logger.info( - `Started ${this.constructor.name} from ${nodesUtils.encodeNodeId( - this.keyManager.getNodeId(), - )} to ${nodesUtils.encodeNodeId(this.targetNodeId)}`, - ); + // TODO: This is due to chicken or egg problem + // see if we can move to CreateDestroyStartStop to resolve this + nodeConnection.client = client; + logger.info(`Created ${this.name}`); + return nodeConnection; } - public async stop() { - this.logger.info(`Stopping ${this.constructor.name}`); - if (this.client != null) { - await this.client.destroy(); - } - // Await this.fwdProxy.closeConnection(this.ingressHost, this.ingressPort); - this.logger.info( - `Stopped ${ - this.constructor.name - } from ${this.keyManager.getNodeId()} to ${this.targetNodeId}`, - ); + constructor({ + host, + port, + hostname, + fwdProxy, + destroyCallback, + logger, + }: { + host: Host; + port: Port; + hostname?: Hostname; + fwdProxy: ForwardProxy; + destroyCallback: () => Promise; + logger: Logger; + }) { + this.logger = logger; + this.host = host; + this.port = port; + this.hostname = hostname; + this.fwdProxy = fwdProxy; + this.destroyCallback = destroyCallback; } public async destroy() { this.logger.info(`Destroying ${this.constructor.name}`); + if ( + this.client != null && + this.client[asyncInit.status] !== 'destroying' && + !this.client[asyncInit.destroyed] + ) { + await this.client.destroy(); + } + await this.destroyCallback(); this.logger.info(`Destroyed ${this.constructor.name}`); } - public getClient() { + /** + * Gets GRPCClient for this node connection + */ + public getClient(): T { return this.client; } @@ -222,11 +190,11 @@ class NodeConnection { * end of this connection. * Ordered from newest to oldest. */ - @ready(new nodesErrors.ErrorNodeConnectionNotRunning()) + @ready(new nodesErrors.ErrorNodeConnectionDestroyed()) public getRootCertChain(): Array { const connInfo = this.fwdProxy.getConnectionInfoByIngress( - this.ingressHost, - this.ingressPort, + this.host, + this.port, ); if (connInfo == null) { throw new nodesErrors.ErrorNodeConnectionInfoNotExist(); @@ -241,7 +209,7 @@ class NodeConnection { * Sometimes these previous root keys are also still valid - these would be * found in the certificate chain. */ - @ready(new nodesErrors.ErrorNodeConnectionNotRunning()) + @ready(new nodesErrors.ErrorNodeConnectionDestroyed()) public getExpectedPublicKey(expectedNodeId: NodeId): PublicKeyPem | null { const certificates = this.getRootCertChain(); let publicKey: PublicKeyPem | null = null; @@ -254,209 +222,6 @@ class NodeConnection { } return publicKey; } - - /** - * Performs a GRPC request to retrieve the closest nodes relative to the given - * target node ID. - * @param targetNodeId the node ID to find other nodes closest to it - * @returns list of nodes and their IP/port that are closest to the target - */ - @ready(new nodesErrors.ErrorNodeConnectionNotRunning()) - public async getClosestNodes(targetNodeId: NodeId): Promise> { - // Construct the message - const nodeIdMessage = new nodesPB.Node(); - nodeIdMessage.setNodeId(nodesUtils.encodeNodeId(targetNodeId)); - // Send through client - const response = await this.client.nodesClosestLocalNodesGet(nodeIdMessage); - const nodes: Array = []; - // Loop over each map element (from the returned response) and populate nodes - response.getNodeTableMap().forEach((address, nodeIdEncoded: string) => { - const nodeId: NodeId = nodesUtils.decodeNodeId(nodeIdEncoded)!; - nodes.push({ - id: nodeId, - address: { - host: address.getHost() as Host | Hostname, - port: address.getPort() as Port, - }, - distance: nodesUtils.calculateDistance(targetNodeId, nodeId), - }); - }); - return nodes; - } - - /** - * Performs a GRPC request to send a hole-punch message to the target. Used to - * initially establish the NodeConnection from source to target. - * - * @param sourceNodeId node ID of the current node (i.e. the sender) - * @param targetNodeId node ID of the target node to hole punch - * @param egressAddress stringified address of `egressHost:egressPort` - * @param signature signature to verify source node is sender (signature based - * on egressAddress as message) - */ - @ready(new nodesErrors.ErrorNodeConnectionNotRunning()) - public async sendHolePunchMessage( - sourceNodeId: NodeId, - targetNodeId: NodeId, - egressAddress: string, - signature: Buffer, - ): Promise { - const relayMsg = new nodesPB.Relay(); - relayMsg.setSrcId(sourceNodeId.toString()); - relayMsg.setTargetId(targetNodeId.toString()); - relayMsg.setEgressAddress(egressAddress); - relayMsg.setSignature(signature.toString()); - await this.client.nodesHolePunchMessageSend(relayMsg); - } - - /** - * Performs a GRPC request to send a notification to the target. - */ - @ready(new nodesErrors.ErrorNodeConnectionNotRunning()) - public async sendNotification(message: SignedNotification): Promise { - const notificationMsg = new notificationsPB.AgentNotification(); - notificationMsg.setContent(message); - await this.client.notificationsSend(notificationMsg); - return; - } - - /** - * Performs a GRPC request to retrieve the NodeInfo of the node at the end of - * the connection. - * @returns the reconstructed NodeInfo (containing UNVERIFIED links) - */ - @ready(new nodesErrors.ErrorNodeConnectionNotRunning()) - public async getChainData(): Promise { - const chainData: ChainDataEncoded = {}; - const emptyMsg = new utilsPB.EmptyMessage(); - const response = await this.client.nodesChainDataGet(emptyMsg); - // Reconstruct each claim from the returned ChainDataMessage - response.getChainDataMap().forEach((claimMsg, id: string) => { - const claimId = id as ClaimIdEncoded; - // Reconstruct the signatures array - const signatures: Array<{ signature: string; protected: string }> = []; - for (const signatureData of claimMsg.getSignaturesList()) { - signatures.push({ - signature: signatureData.getSignature(), - protected: signatureData.getProtected(), - }); - } - // Add to the record of chain data, casting as expected ClaimEncoded - chainData[claimId] = { - signatures: signatures, - payload: claimMsg.getPayload(), - } as ClaimEncoded; - }); - return chainData; - } - - @ready(new nodesErrors.ErrorNodeConnectionNotRunning()) - public async claimNode( - singlySignedClaim: ClaimIntermediary, - ): Promise { - const genClaims = this.client.nodesCrossSignClaim(); - try { - // 2. Set up the intermediary claim message (the singly signed claim) to send - const crossSignMessage = claimsUtils.createCrossSignMessage({ - singlySignedClaim: singlySignedClaim, - }); - await genClaims.write(crossSignMessage); // Get the generator here - // 3. We expect to receieve our singly signed claim we sent to now be a - // doubly signed claim (signed by the other node), as well as a singly - // signed claim to be signed by us. - const readStatus = await genClaims.read(); - // If nothing to read, end and destroy - if (readStatus.done) { - throw new claimsErrors.ErrorEmptyStream(); - } - const receivedMessage = readStatus.value; - const intermediaryClaimMessage = receivedMessage.getSinglySignedClaim(); - const doublySignedClaimMessage = receivedMessage.getDoublySignedClaim(); - // Ensure all of our expected messages are defined - if (!intermediaryClaimMessage) { - throw new claimsErrors.ErrorUndefinedSinglySignedClaim(); - } - const intermediaryClaimSignature = - intermediaryClaimMessage.getSignature(); - if (!intermediaryClaimSignature) { - throw new claimsErrors.ErrorUndefinedSignature(); - } - if (!doublySignedClaimMessage) { - throw new claimsErrors.ErrorUndefinedDoublySignedClaim(); - } - // Reconstruct the expected objects from the messages - const constructedIntermediaryClaim = - claimsUtils.reconstructClaimIntermediary(intermediaryClaimMessage); - const constructedDoublySignedClaim = claimsUtils.reconstructClaimEncoded( - doublySignedClaimMessage, - ); - // Verify the singly signed claim with the sender's public key - const senderPublicKey = this.getExpectedPublicKey(this.targetNodeId); - if (!senderPublicKey) { - throw new nodesErrors.ErrorNodeConnectionPublicKeyNotFound(); - } - const verifiedSingly = await claimsUtils.verifyIntermediaryClaimSignature( - constructedIntermediaryClaim, - senderPublicKey, - ); - if (!verifiedSingly) { - throw new claimsErrors.ErrorSinglySignedClaimVerificationFailed(); - } - // Verify the doubly signed claim with both our public key, and the sender's - const verifiedDoubly = - (await claimsUtils.verifyClaimSignature( - constructedDoublySignedClaim, - this.keyManager.getRootKeyPairPem().publicKey, - )) && - (await claimsUtils.verifyClaimSignature( - constructedDoublySignedClaim, - senderPublicKey, - )); - if (!verifiedDoubly) { - throw new claimsErrors.ErrorDoublySignedClaimVerificationFailed(); - } - // 4. X <- responds with double signing the X signed claim <- Y - const doublySignedClaimResponse = await claimsUtils.signIntermediaryClaim( - { - claim: constructedIntermediaryClaim, - privateKey: this.keyManager.getRootKeyPairPem().privateKey, - signeeNodeId: nodesUtils.encodeNodeId(this.keyManager.getNodeId()), - }, - ); - // Should never be reached, but just for type safety - if (!doublySignedClaimResponse.payload) { - throw new claimsErrors.ErrorClaimsUndefinedClaimPayload(); - } - const crossSignMessageResponse = claimsUtils.createCrossSignMessage({ - doublySignedClaim: doublySignedClaimResponse, - }); - await genClaims.write(crossSignMessageResponse); - - // Check the stream is closed (should be closed by other side) - const finalResponse = await genClaims.read(); - if (finalResponse.done != null) { - await genClaims.next(null); - } - - return constructedDoublySignedClaim; - } catch (e) { - await genClaims.throw(e); - throw e; - } - } - - /** - * Retrieves all the vaults for a peers node - */ - @ready(new nodesErrors.ErrorNodeConnectionNotRunning()) - public async scanVaults(): Promise> { - // Create the handler for git to scan from - const gitRequest = await vaultsUtils.constructGitHandler( - this.client, - this.keyManager.getNodeId(), - ); - return await gitRequest.scanVaults(); - } } export default NodeConnection; diff --git a/src/nodes/NodeConnectionManager.ts b/src/nodes/NodeConnectionManager.ts new file mode 100644 index 000000000..dadb96b40 --- /dev/null +++ b/src/nodes/NodeConnectionManager.ts @@ -0,0 +1,705 @@ +import type KeyManager from '../keys/KeyManager'; +import type ReverseProxy from '../network/ReverseProxy'; +import type ForwardProxy from '../network/ForwardProxy'; +import type { Host, Hostname, Port } from '../network/types'; +import type { ResourceAcquire } from '../utils'; +import type { Timer } from '../types'; +import type NodeGraph from './NodeGraph'; +import type { + NodeId, + NodeAddress, + NodeData, + SeedNodes, + NodeIdString, +} from './types'; +import Logger from '@matrixai/logger'; +import { StartStop, ready } from '@matrixai/async-init/dist/StartStop'; +import { IdInternal } from '@matrixai/id'; +import { status } from '@matrixai/async-init'; +import NodeConnection from './NodeConnection'; +import * as nodesUtils from './utils'; +import * as nodesErrors from './errors'; +import GRPCClientAgent from '../agent/GRPCClientAgent'; +import * as validationUtils from '../validation/utils'; +import * as networkUtils from '../network/utils'; +import * as agentErrors from '../agent/errors'; +import * as grpcErrors from '../grpc/errors'; +import * as nodesPB from '../proto/js/polykey/v1/nodes/nodes_pb'; +import { RWLock, withF } from '../utils'; + +type ConnectionAndLock = { + connection?: NodeConnection; + timer?: NodeJS.Timer; + lock: RWLock; +}; + +interface NodeConnectionManager extends StartStop {} +@StartStop() +class NodeConnectionManager { + /** + * Time used to estalish `NodeConnection` + */ + public readonly connConnectTime: number; + + /** + * Time to live for `NodeConnection` + */ + public readonly connTimeoutTime: number; + /** + * Alpha constant for kademlia + * The number of closest nodes to contact initially + */ + public readonly initialClosestNodes: number; + + protected logger: Logger; + protected nodeGraph: NodeGraph; + protected keyManager: KeyManager; + protected fwdProxy: ForwardProxy; + protected revProxy: ReverseProxy; + protected seedNodes: SeedNodes; + /** + * Data structure to store all NodeConnections. If a connection to a node n does + * not exist, no entry for n will exist in the map. Alternatively, if a + * connection is currently being instantiated by some thread, an entry will + * exist in the map, but only with the lock (no connection object). Once a + * connection is instantiated, the entry in the map is updated to include the + * connection object. + * A nodeIdString is used for the key here since + * NodeIds can't be used to properly retrieve a value from the map. + */ + protected connections: Map = new Map(); + + public constructor({ + keyManager, + nodeGraph, + fwdProxy, + revProxy, + seedNodes = {}, + initialClosestNodes = 3, + connConnectTime = 20000, + connTimeoutTime = 60000, + logger, + }: { + nodeGraph: NodeGraph; + keyManager: KeyManager; + fwdProxy: ForwardProxy; + revProxy: ReverseProxy; + seedNodes?: SeedNodes; + initialClosestNodes?: number; + connConnectTime?: number; + connTimeoutTime?: number; + logger?: Logger; + }) { + this.logger = logger ?? new Logger(NodeConnectionManager.name); + this.keyManager = keyManager; + this.nodeGraph = nodeGraph; + this.fwdProxy = fwdProxy; + this.revProxy = revProxy; + this.seedNodes = seedNodes; + this.initialClosestNodes = initialClosestNodes; + this.connConnectTime = connConnectTime; + this.connTimeoutTime = connTimeoutTime; + } + + public async start() { + this.logger.info(`Starting ${this.constructor.name}`); + for (const nodeIdEncoded in this.seedNodes) { + const nodeId = nodesUtils.decodeNodeId(nodeIdEncoded)!; + await this.nodeGraph.setNode(nodeId, this.seedNodes[nodeIdEncoded]); + } + this.logger.info(`Started ${this.constructor.name}`); + } + + public async stop() { + this.logger.info(`Stopping ${this.constructor.name}`); + for (const [nodeId, connAndLock] of this.connections) { + if (connAndLock == null) continue; + if (connAndLock.connection == null) continue; + // It exists so we want to destroy it + await this.destroyConnection(IdInternal.fromString(nodeId)); + } + this.logger.info(`Stopped ${this.constructor.name}`); + } + + /** + * For usage with withF, to acquire a connection in a + * This unique acquire function structure of returning the ResourceAcquire + * itself is such that we can pass targetNodeId as a parameter (as opposed to + * an acquire function with no parameters). + * @param targetNodeId Id of target node to communicate with + * @returns ResourceAcquire Resource API for use in with contexts + */ + @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) + public async acquireConnection( + targetNodeId: NodeId, + ): Promise>> { + return async () => { + const connAndLock = await this.createConnection(targetNodeId); + // Acquire the read lock and the release function + const release = await connAndLock.lock.acquireRead(); + // Resetting TTL timer + connAndLock.timer?.refresh(); + // Return tuple of [ResourceRelease, Resource] + return [ + async () => { + release(); + }, + connAndLock.connection, + ]; + }; + } + + /** + * Perform some function on another node over the network with a connection. + * Will either retrieve an existing connection, or create a new one if it + * doesn't exist. + * for use with normal arrow function + * @param targetNodeId Id of target node to communicate with + * @param f Function to handle communication + */ + @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) + public async withConnF( + targetNodeId: NodeId, + f: (conn: NodeConnection) => Promise, + ): Promise { + try { + return await withF( + [await this.acquireConnection(targetNodeId)], + async ([conn]) => { + return await f(conn); + }, + ); + } catch (err) { + if ( + err instanceof nodesErrors.ErrorNodeConnectionDestroyed || + err instanceof grpcErrors.ErrorGRPC || + err instanceof agentErrors.ErrorAgentClientDestroyed + ) { + // Error with connection, shutting connection down + await this.destroyConnection(targetNodeId); + } + throw err; + } + } + + /** + * Perform some function on another node over the network with a connection. + * Will either retrieve an existing connection, or create a new one if it + * doesn't exist. + * for use with a generator function + * @param targetNodeId Id of target node to communicate with + * @param g Generator function to handle communication + */ + @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) + public async *withConnG( + targetNodeId: NodeId, + g: ( + conn: NodeConnection, + ) => AsyncGenerator, + ): AsyncGenerator { + const acquire = await this.acquireConnection(targetNodeId); + const [release, conn] = await acquire(); + try { + return yield* await g(conn!); + } catch (err) { + if ( + err instanceof nodesErrors.ErrorNodeConnectionDestroyed || + err instanceof grpcErrors.ErrorGRPC || + err instanceof agentErrors.ErrorAgentClientDestroyed + ) { + // Error with connection, shutting connection down + await release(); + await this.destroyConnection(targetNodeId); + } + throw err; + } finally { + await release(); + } + // Wait for any destruction to complete after locking is removed + } + + /** + * Create a connection to another node (without performing any function). + * This is a NOOP if a connection already exists. + * @param targetNodeId Id of node we are creating connection to + * @returns ConnectionAndLock that was create or exists in the connection map. + */ + protected async createConnection( + targetNodeId: NodeId, + ): Promise { + this.logger.info( + `Creating connection to ${nodesUtils.encodeNodeId(targetNodeId)}`, + ); + let connection: NodeConnection | undefined; + let lock: RWLock; + let connAndLock = this.connections.get( + targetNodeId.toString() as NodeIdString, + ); + if (connAndLock != null) { + ({ connection, lock } = connAndLock); + // Connection already exists, so return + if (connection != null) return connAndLock; + // Acquire the write (creation) lock + return await lock.withWrite(async () => { + // Once lock is released, check again if the conn now exists + connAndLock = this.connections.get( + targetNodeId.toString() as NodeIdString, + ); + if (connAndLock != null && connAndLock.connection != null) { + return connAndLock; + } + this.logger.info( + `existing lock: creating connection to ${nodesUtils.encodeNodeId( + targetNodeId, + )}`, + ); + // Creating the connection and set in map + return await this.establishNodeConnection(targetNodeId, lock); + }); + } else { + lock = new RWLock(); + connAndLock = { lock }; + this.connections.set( + targetNodeId.toString() as NodeIdString, + connAndLock, + ); + return await lock.withWrite(async () => { + this.logger.info( + `no existing entry: creating connection to ${nodesUtils.encodeNodeId( + targetNodeId, + )}`, + ); + // Creating the connection and set in map + return await this.establishNodeConnection(targetNodeId, lock); + }); + } + } + + /** + * Strictly a helper function for this.createConnection. Do not call this + * function anywhere else. + * To create a connection to a node, always use createConnection, or + * withConnection. + * This only adds the connection to the connection map if the connection was established. + * @param targetNodeId Id of node we are establishing connection to + * @param lock Lock associated with connection + * @returns ConnectionAndLock that was added to the connection map + */ + protected async establishNodeConnection( + targetNodeId: NodeId, + lock: RWLock, + ): Promise { + const targetAddress = await this.findNode(targetNodeId); + // If the stored host is not a valid host (IP address), then we assume it to + // be a hostname + const targetHostname = !networkUtils.isHost(targetAddress.host) + ? (targetAddress.host as string as Hostname) + : undefined; + const targetHost = await networkUtils.resolveHost(targetAddress.host); + // Creating the destroyCallback + const destroyCallback = async () => { + // To avoid deadlock only in the case where this is called + // we want to check for destroying connection and read lock + const connAndLock = this.connections.get( + targetNodeId.toString() as NodeIdString, + ); + // If the connection is calling destroyCallback then it SHOULD + // exist in the connection map + if (connAndLock == null) throw Error('temp error, bad logic'); + // Already locked so already destroying + if (connAndLock.lock.readerCount > 0) return; + const connectionStatus = connAndLock?.connection?.[status]; + // Connection is already destroying + if (connectionStatus === 'destroying') return; + await this.destroyConnection(targetNodeId); + }; + const connection = await NodeConnection.createNodeConnection({ + targetNodeId: targetNodeId, + targetHost: targetHost, + targetHostname: targetHostname, + targetPort: targetAddress.port, + fwdProxy: this.fwdProxy, + keyManager: this.keyManager, + nodeConnectionManager: this, + destroyCallback, + connConnectTime: this.connConnectTime, + logger: this.logger.getChild(`${targetHost}:${targetAddress.port}`), + clientFactory: async (args) => + GRPCClientAgent.createGRPCClientAgent(args), + }); + // Creating TTL timeout + const timer = setTimeout(async () => { + await this.destroyConnection(targetNodeId); + }, this.connTimeoutTime); + // Add it to the map of active connections + const connectionAndLock = { connection, lock, timer }; + this.connections.set( + targetNodeId.toString() as NodeIdString, + connectionAndLock, + ); + return connectionAndLock; + } + + /** + * Removes the connection from the connection man and destroys it. + * @param targetNodeId Id of node we are destroying connection to + */ + protected async destroyConnection(targetNodeId: NodeId): Promise { + const connAndLock = this.connections.get( + targetNodeId.toString() as NodeIdString, + ); + if (connAndLock == null) return; + const connection = connAndLock.connection; + if (connection == null) return; + const lock = connAndLock.lock; + + // If the connection exists then we lock, destroy and remove it from the map + await lock.withWrite(async () => { + // Destroying connection + await connection.destroy(); + // Destroying TTL timer + if (connAndLock.timer != null) clearTimeout(connAndLock.timer); + // Updating the connection map + this.connections.set(targetNodeId.toString() as NodeIdString, { lock }); + }); + } + + /** + * Treat this node as the server. + * Instruct the reverse proxy to send hole-punching packets back to the target's + * forward proxy, in order to open a connection from the client to this server. + * A connection is established if the client node's forward proxy is sending + * hole punching packets at the same time as this node (acting as the server) + * sends hole-punching packets back to the client's forward proxy. + * @param egressHost host of the client's forward proxy + * @param egressPort port of the client's forward proxy + * @param timer Connection timeout timer + */ + @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) + public async holePunchReverse( + egressHost: Host, + egressPort: Port, + timer?: Timer, + ): Promise { + await this.revProxy.openConnection(egressHost, egressPort, timer); + } + + /** + * Treat this node as the client. + * Instruct the forward proxy to send hole-punching packets back to the target's + * reverse proxy, in order to open a connection from this client to the server. + * A connection is established if the client node's reverse proxy is sending + * hole punching packets at the same time as this node (acting as the client) + * sends hole-punching packets back to the server's reverse proxy. + * This is not needed to be called when doing hole punching since the + * ForwardProxy automatically starts the process. + * @param nodeId Node Id of the node we are connecting to + * @param ingressHost Ingress host of the reverse proxy + * @param ingressPort Ingress port of the reverse proxy + * @param timer Connection timeout timer + */ + public async holePunchForward( + nodeId: NodeId, + ingressHost: Host, + ingressPort: Port, + timer?: Timer, + ): Promise { + await this.fwdProxy.openConnection(nodeId, ingressHost, ingressPort, timer); + } + + /** + * Retrieves the node address. If an entry doesn't exist in the db, then + * proceeds to locate it using Kademlia. + * @param targetNodeId Id of the node we are tying to find + */ + @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) + public async findNode(targetNodeId: NodeId): Promise { + // First check if we already have an existing ID -> address record + + let address = await this.nodeGraph.getNode(targetNodeId); + // Otherwise, attempt to locate it by contacting network + if (address == null) { + address = await this.getClosestGlobalNodes(targetNodeId); + // TODO: This currently just does one iteration + // If not found in this single iteration, we throw an exception + if (address == null) { + throw new nodesErrors.ErrorNodeGraphNodeIdNotFound(); + } + } + // We ensure that we always return a NodeAddress (either by lookup, or + // network search) - if we can't locate it from either, we throw an exception + return address; + } + + /** + * Finds the set of nodes (of size k) known by the current node (i.e. in its + * buckets database) that have the smallest distance to the target node (i.e. + * are closest to the target node). + * i.e. FIND_NODE RPC from Kademlia spec + * + * Used by the RPC service. + * + * @param targetNodeId the node ID to find other nodes closest to it + * @param numClosest the number of closest nodes to return (by default, returns + * according to the maximum number of nodes per bucket) + * @returns a mapping containing exactly k nodeIds -> nodeAddresses (unless the + * current node has less than k nodes in all of its buckets, in which case it + * returns all nodes it has knowledge of) + */ + @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) + public async getClosestLocalNodes( + targetNodeId: NodeId, + numClosest: number = this.nodeGraph.maxNodesPerBucket, + ): Promise> { + // Retrieve all nodes from buckets in database + const buckets = await this.nodeGraph.getAllBuckets(); + // Iterate over all of the nodes in each bucket + const distanceToNodes: Array = []; + buckets.forEach(function (bucket) { + for (const nodeIdString of Object.keys(bucket)) { + // Compute the distance from the node, and add it to the array + const nodeId = IdInternal.fromString(nodeIdString); + distanceToNodes.push({ + id: nodeId, + address: bucket[nodeId].address, + distance: nodesUtils.calculateDistance(nodeId, targetNodeId), + }); + } + }); + // Sort the array (based on the distance at index 1) + distanceToNodes.sort(nodesUtils.sortByDistance); + // Return the closest k nodes (i.e. the first k), or all nodes if < k in array + return distanceToNodes.slice(0, numClosest); + } + + /** + * Attempts to locate a target node in the network (using Kademlia). + * Adds all discovered, active nodes to the current node's database (up to k + * discovered nodes). + * Once the target node is found, the method returns and stops trying to locate + * other nodes. + * + * Ultimately, attempts to perform a "DNS resolution" on the given target node + * ID (i.e. given a node ID, retrieves the node address, containing its IP and + * port). + * @param targetNodeId ID of the node attempting to be found (i.e. attempting + * to find its IP address and port) + * @returns whether the target node was located in the process + */ + @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) + public async getClosestGlobalNodes( + targetNodeId: NodeId, + ): Promise { + // Let foundTarget: boolean = false; + let foundAddress: NodeAddress | undefined = undefined; + // Get the closest alpha nodes to the target node (set as shortlist) + const shortlist: Array = await this.getClosestLocalNodes( + targetNodeId, + this.initialClosestNodes, + ); + // If we have no nodes at all in our database (even after synchronising), + // then we should throw an error. We aren't going to find any others + if (shortlist.length === 0) { + throw new nodesErrors.ErrorNodeGraphEmptyDatabase(); + } + // Need to keep track of the nodes that have been contacted + // Not sufficient to simply check if there's already a pre-existing connection + // in nodeConnections - what if there's been more than 1 invocation of + // getClosestGlobalNodes()? + const contacted: { [nodeId: string]: boolean } = {}; + // Iterate until we've found found and contacted k nodes + while (Object.keys(contacted).length <= this.nodeGraph.maxNodesPerBucket) { + // While (!foundTarget) { + // Remove the node from the front of the array + const nextNode = shortlist.shift(); + // If we have no nodes left in the shortlist, then stop + if (nextNode == null) { + break; + } + // Skip if the node has already been contacted + if (contacted[nextNode.id]) { + continue; + } + // Connect to the node (check if pre-existing connection exists, otherwise + // create a new one) + try { + // Add the node to the database so that we can find its address in + // call to getConnectionToNode + await this.nodeGraph.setNode(nextNode.id, nextNode.address); + await this.createConnection(nextNode.id); + } catch (e) { + // If we can't connect to the node, then skip it + continue; + } + contacted[nextNode.id] = true; + // Ask the node to get their own closest nodes to the target + const foundClosest = await this.getRemoteNodeClosestNodes( + nextNode.id, + targetNodeId, + ); + // Check to see if any of these are the target node. At the same time, add + // them to the shortlist + for (const nodeData of foundClosest) { + // Ignore any nodes that have been contacted + if (contacted[nodeData.id]) { + continue; + } + if (nodeData.id.equals(targetNodeId)) { + await this.nodeGraph.setNode(nodeData.id, nodeData.address); + foundAddress = nodeData.address; + // We have found the target node, so we can stop trying to look for it + // in the shortlist + break; + } + shortlist.push(nodeData); + } + // To make the number of jumps relatively short, should connect to the nodes + // closest to the target first, and ask if they know of any closer nodes + // Then we can simply unshift the first (closest) element from the shortlist + shortlist.sort(function (a: NodeData, b: NodeData) { + if (a.distance > b.distance) { + return 1; + } else if (a.distance < b.distance) { + return -1; + } else { + return 0; + } + }); + } + return foundAddress; + } + + /** + * Performs a GRPC request to retrieve the closest nodes relative to the given + * target node ID. + * @param nodeId the node ID to search on + * @param targetNodeId the node ID to find other nodes closest to it + * @returns list of nodes and their IP/port that are closest to the target + */ + @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) + public async getRemoteNodeClosestNodes( + nodeId: NodeId, + targetNodeId: NodeId, + ): Promise> { + // Construct the message + const nodeIdMessage = new nodesPB.Node(); + nodeIdMessage.setNodeId(nodesUtils.encodeNodeId(targetNodeId)); + // Send through client + return this.withConnF(nodeId, async (connection) => { + const client = await connection.getClient(); + const response = await client.nodesClosestLocalNodesGet(nodeIdMessage); + const nodes: Array = []; + // Loop over each map element (from the returned response) and populate nodes + response.getNodeTableMap().forEach((address, nodeIdString: string) => { + const nodeId = nodesUtils.decodeNodeId(nodeIdString); + // If the nodeId is not valid we don't add it to the list of nodes + if (nodeId != null) { + nodes.push({ + id: nodeId, + address: { + host: address.getHost() as Host | Hostname, + port: address.getPort() as Port, + }, + distance: nodesUtils.calculateDistance(targetNodeId, nodeId), + }); + } + }); + return nodes; + }); + } + + /** + * Perform an initial database synchronisation: get the k closest nodes + * from each seed node and add them to this database + * For now, we also attempt to establish a connection to each of them. + * If these nodes are offline, this will impose a performance penalty, + * so we should investigate performing this in the background if possible. + * Alternatively, we can also just add the nodes to our database without + * establishing connection. + * This has been removed from start() as there's a chicken-egg scenario + * where we require the NodeGraph instance to be created in order to get + * connections. + */ + @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) + public async syncNodeGraph() { + for (const seedNodeId of this.getSeedNodes()) { + // Check if the connection is viable + try { + await this.createConnection(seedNodeId); + } catch (e) { + if (e instanceof nodesErrors.ErrorNodeConnectionTimeout) continue; + throw e; + } + + const nodes = await this.getRemoteNodeClosestNodes( + seedNodeId, + this.keyManager.getNodeId(), + ); + for (const n of nodes) { + await this.nodeGraph.setNode(n.id, n.address); + } + } + } + + /** + * Performs a GRPC request to send a hole-punch message to the target. Used to + * initially establish the NodeConnection from source to target. + * + * @param relayNodeId node ID of the relay node (i.e. the seed node) + * @param sourceNodeId node ID of the current node (i.e. the sender) + * @param targetNodeId node ID of the target node to hole punch + * @param egressAddress stringified address of `egressHost:egressPort` + * @param signature signature to verify source node is sender (signature based + * on egressAddress as message) + */ + @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) + public async sendHolePunchMessage( + relayNodeId: NodeId, + sourceNodeId: NodeId, + targetNodeId: NodeId, + egressAddress: string, + signature: Buffer, + ): Promise { + const relayMsg = new nodesPB.Relay(); + relayMsg.setSrcId(nodesUtils.encodeNodeId(sourceNodeId)); + relayMsg.setTargetId(nodesUtils.encodeNodeId(targetNodeId)); + relayMsg.setEgressAddress(egressAddress); + relayMsg.setSignature(signature.toString()); + await this.withConnF(relayNodeId, async (connection) => { + const client = connection.getClient(); + await client.nodesHolePunchMessageSend(relayMsg); + }); + } + + /** + * Forwards a received hole punch message on to the target. + * If not known, the node ID -> address mapping is attempted to be discovered + * through Kademlia (note, however, this is currently only called by a 'broker' + * node). + * @param message the original relay message (assumed to be created in + * nodeConnection.start()) + */ + @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) + public async relayHolePunchMessage(message: nodesPB.Relay): Promise { + await this.sendHolePunchMessage( + validationUtils.parseNodeId(message.getTargetId()), + validationUtils.parseNodeId(message.getSrcId()), + validationUtils.parseNodeId(message.getTargetId()), + message.getEgressAddress(), + Buffer.from(message.getSignature()), + ); + } + + /** + * Returns an array of the seed nodes. + */ + @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) + public getSeedNodes(): Array { + const nodeIds = Object.keys(this.seedNodes).map( + (nodeIdEncoded) => nodesUtils.decodeNodeId(nodeIdEncoded)!, + ); + return nodeIds; + } +} + +export default NodeConnectionManager; diff --git a/src/nodes/NodeGraph.ts b/src/nodes/NodeGraph.ts index f4e21016c..4237b5529 100644 --- a/src/nodes/NodeGraph.ts +++ b/src/nodes/NodeGraph.ts @@ -1,9 +1,7 @@ -import type { NodeId, NodeAddress, NodeBucket, NodeData } from './types'; -import type { Host, Hostname, Port } from '../network/types'; import type { DB, DBLevel, DBOp } from '@matrixai/db'; -import type { NodeConnection } from '../nodes'; - -import type NodeManager from './NodeManager'; +import type { NodeId, NodeAddress, NodeBucket } from './types'; +import type KeyManager from '../keys/KeyManager'; +import type { Host, Hostname, Port } from '../network/types'; import { Mutex } from 'async-mutex'; import lexi from 'lexicographic-integer'; import Logger from '@matrixai/logger'; @@ -25,16 +23,12 @@ interface NodeGraph extends CreateDestroyStartStop {} new nodesErrors.ErrorNodeGraphDestroyed(), ) class NodeGraph { - // Internally, node ID is a 32 byte array - public readonly nodeIdBits: number = 256; // Max number of nodes in each k-bucket (a.k.a. k) public readonly maxNodesPerBucket: number = 20; - // Max parallel connections (a.k.a. alpha) - public readonly maxConcurrentNodeConnections: number = 3; protected logger: Logger; protected db: DB; - protected nodeManager: NodeManager; + protected keyManager: KeyManager; protected nodeGraphDbDomain: string = this.constructor.name; protected nodeGraphBucketsDbDomain: Array = [ this.nodeGraphDbDomain, @@ -46,19 +40,19 @@ class NodeGraph { public static async createNodeGraph({ db, - nodeManager, + keyManager, logger = new Logger(this.name), fresh = false, }: { db: DB; - nodeManager: NodeManager; + keyManager: KeyManager; logger?: Logger; fresh?: boolean; }): Promise { logger.info(`Creating ${this.name}`); const nodeGraph = new NodeGraph({ db, - nodeManager, + keyManager, logger, }); await nodeGraph.start({ fresh }); @@ -68,16 +62,16 @@ class NodeGraph { constructor({ db, - nodeManager, + keyManager, logger, }: { db: DB; - nodeManager: NodeManager; + keyManager: KeyManager; logger: Logger; }) { this.logger = logger; this.db = db; - this.nodeManager = nodeManager; + this.keyManager = keyManager; } get locked(): boolean { @@ -145,40 +139,10 @@ class NodeGraph { } /** - * Perform an initial database synchronisation: get the k closest nodes - * from each seed node and add them to this database - * For now, we also attempt to establish a connection to each of them. - * If these nodes are offline, this will impose a performance penalty, - * so we should investigate performing this in the background if possible. - * Alternatively, we can also just add the nodes to our database without - * establishing connection. - * This has been removed from start() as there's a chicken-egg scenario - * where we require the NodeGraph instance to be created in order to get - * connections. + * Retrieves the node Address + * @param nodeId node ID of the target node + * @returns Node Address of the target node */ - public async syncNodeGraph() { - for (const [, conn] of await this.nodeManager.getConnectionsToSeedNodes()) { - const nodes = await conn.getClosestNodes(this.nodeManager.getNodeId()); - for (const n of nodes) { - await this.setNode(n.id, n.address); - try { - await this.nodeManager.getConnectionToNode(n.id); - } catch (e) { - if (e instanceof nodesErrors.ErrorNodeConnectionTimeout) { - continue; - } else { - throw e; - } - } - } - } - } - - @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public getNodeId(): NodeId { - return this.nodeManager.getNodeId(); - } - @ready(new nodesErrors.ErrorNodeGraphNotRunning()) public async getNode(nodeId: NodeId): Promise { return await this._transaction(async () => { @@ -194,6 +158,21 @@ class NodeGraph { }); } + /** + * Determines whether a node ID -> node address mapping exists in this node's + * node table. + * @param targetNodeId the node ID of the node to find + * @returns true if the node exists in the table, false otherwise + */ + @ready(new nodesErrors.ErrorNodeGraphNotRunning()) + public async knowsNode(targetNodeId: NodeId): Promise { + return !!(await this.getNode(targetNodeId)); + } + + /** + * Returns the specified bucket if it exists + * @param bucketIndex + */ @ready(new nodesErrors.ErrorNodeGraphNotRunning()) public async getBucket(bucketIndex: number): Promise { return await this._transaction(async () => { @@ -228,7 +207,7 @@ class NodeGraph { }); } - public async setNodeOps( + protected async setNodeOps( nodeId: NodeId, nodeAddress: NodeAddress, ): Promise> { @@ -245,7 +224,7 @@ class NodeGraph { lastUpdated: new Date(), }; // Perform the check on size after we add/update the node. If it's an update, - // then we don't need to perform the deletion. + // then we don't need to perform the deletion let bucketEntries = Object.entries(bucket); if (bucketEntries.length > this.maxNodesPerBucket) { const leastActive = bucketEntries.reduce((prev, curr) => { @@ -286,7 +265,7 @@ class NodeGraph { }); } - public async updateNodeOps( + protected async updateNodeOps( nodeId: NodeId, nodeAddress?: NodeAddress, ): Promise> { @@ -308,11 +287,15 @@ class NodeGraph { value: bucket, }); } else { - throw new nodesErrors.ErrorNodeGraphNodeIdMissing(); + throw new nodesErrors.ErrorNodeGraphNodeIdNotFound(); } return ops; } + /** + * Removes a node from the bucket database + * @param nodeId + */ @ready(new nodesErrors.ErrorNodeGraphNotRunning()) public async unsetNode(nodeId: NodeId): Promise { return await this._transaction(async () => { @@ -321,7 +304,7 @@ class NodeGraph { }); } - public async unsetNodeOps(nodeId: NodeId): Promise> { + protected async unsetNodeOps(nodeId: NodeId): Promise> { const bucketIndex = this.getBucketIndex(nodeId); const bucket = await this.db.get( this.nodeGraphBucketsDbDomain, @@ -356,17 +339,15 @@ class NodeGraph { */ protected getBucketIndex(nodeId: NodeId): string { const index = nodesUtils.calculateBucketIndex( - this.getNodeId(), + this.keyManager.getNodeId(), nodeId, - this.nodeIdBits, ); return lexi.pack(index, 'hex') as string; } - // Ok so here is where we must start refactoring this - - // this might be better to stream this directly to where it is being used - // cause the subsequent functions are using this + /** + * Returns all of the buckets in an array + */ @ready(new nodesErrors.ErrorNodeGraphNotRunning()) public async getAllBuckets(): Promise> { return await this._transaction(async () => { @@ -411,7 +392,7 @@ class NodeGraph { // 2. Re-add all the nodes from all buckets for (const b of buckets) { for (const n of Object.keys(b)) { - const nodeId: NodeId = IdInternal.fromString(n); + const nodeId = IdInternal.fromString(n); const newIndex = this.getBucketIndex(nodeId); let expectedBucket = tempBuckets[newIndex]; // The following is more or less copied from setNodeOps @@ -424,7 +405,7 @@ class NodeGraph { address: b[nodeId].address, lastUpdated: b[nodeId].lastUpdated, }; - // If, with the old node added, we exceed the limit... + // If, with the old node added, we exceed the limit if (bucketEntries.length > this.maxNodesPerBucket) { // Then, with the old node added, find the least active and remove const leastActive = bucketEntries.reduce((prev, curr) => { @@ -449,157 +430,6 @@ class NodeGraph { await this.db.batch(ops); }); } - - /** - * Finds the set of nodes (of size k) known by the current node (i.e. in its - * buckets database) that have the smallest distance to the target node (i.e. - * are closest to the target node). - * i.e. FIND_NODE RPC from Kademlia spec - * - * @param targetNodeId the node ID to find other nodes closest to it - * @param numClosest the number of closest nodes to return (by default, returns - * according to the maximum number of nodes per bucket) - * @returns a mapping containing exactly k nodeIds -> nodeAddresses (unless the - * current node has less than k nodes in all of its buckets, in which case it - * returns all nodes it has knowledge of) - */ - @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async getClosestLocalNodes( - targetNodeId: NodeId, - numClosest: number = this.maxNodesPerBucket, - ): Promise> { - // Retrieve all nodes from buckets in database - const buckets = await this.getAllBuckets(); - // Iterate over all of the nodes in each bucket - const distanceToNodes: Array = []; - buckets.forEach(function (bucket) { - for (const nodeIdString of Object.keys(bucket)) { - const nodeId: NodeId = IdInternal.fromString(nodeIdString); - // Compute the distance from the node, and add it to the array. - distanceToNodes.push({ - id: nodeId, - address: bucket[nodeId].address, - distance: nodesUtils.calculateDistance(nodeId, targetNodeId), - }); - } - }); - // Sort the array (based on the distance at index 1) - distanceToNodes.sort(nodesUtils.sortByDistance); - // Return the closest k nodes (i.e. the first k), or all nodes if < k in array - return distanceToNodes.slice(0, numClosest); - } - - /** - * Attempts to locate a target node in the network (using Kademlia). - * Adds all discovered, active nodes to the current node's database (up to k - * discovered nodes). - * Once the target node is found, the method returns and stops trying to locate - * other nodes. - * - * Ultimately, attempts to perform a "DNS resolution" on the given target node - * ID (i.e. given a node ID, retrieves the node address, containing its IP and - * port). - * @param targetNodeId ID of the node attempting to be found (i.e. attempting - * to find its IP address and port) - * @returns whether the target node was located in the process - */ - @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async getClosestGlobalNodes( - targetNodeId: NodeId, - ): Promise { - // Let foundTarget: boolean = false; - let foundAddress: NodeAddress | undefined = undefined; - // Get the closest alpha nodes to the target node (set as shortlist) - const shortlist: Array = await this.getClosestLocalNodes( - targetNodeId, - this.maxConcurrentNodeConnections, - ); - // If we have no nodes at all in our database (even after synchronising), - // then we should throw an error. We aren't going to find any others. - if (shortlist.length === 0) { - throw new nodesErrors.ErrorNodeGraphEmptyDatabase(); - } - // Need to keep track of the nodes that have been contacted. - // Not sufficient to simply check if there's already a pre-existing connection - // in nodeConnections - what if there's been more than 1 invocation of - // getClosestGlobalNodes()? - const contacted: { [nodeId: string]: boolean } = {}; - // Iterate until we've found found and contacted k nodes - while (Object.keys(contacted).length <= this.maxNodesPerBucket) { - // While (!foundTarget) { - // Remove the node from the front of the array - const nextNode = shortlist.shift(); - // If we have no nodes left in the shortlist, then stop - if (nextNode == null) { - break; - } - // Skip if the node has already been contacted - if (contacted[nextNode.id]) { - continue; - } - // Connect to the node (check if pre-existing connection exists, otherwise - // create a new one) - let nodeConnection: NodeConnection; - try { - // Add the node to the database so that we can find its address in - // call to getConnectionToNode - await this.setNode(nextNode.id, nextNode.address); - nodeConnection = await this.nodeManager.getConnectionToNode( - nextNode.id, - ); - } catch (e) { - // If we can't connect to the node, then skip it. - continue; - } - contacted[nextNode.id] = true; - // Ask the node to get their own closest nodes to the target. - const foundClosest = await nodeConnection.getClosestNodes(targetNodeId); - // Check to see if any of these are the target node. At the same time, add - // them to the shortlist. - for (const nodeData of foundClosest) { - // Ignore any nodes that have been contacted - if (contacted[nodeData.id]) { - continue; - } - if (nodeData.id.equals(targetNodeId)) { - // FoundTarget = true; - // Attempt to create a connection to the node. Will throw an error - // (ErrorConnectionStart, from ConnectionForward) if the connection - // cannot be established - - // TODO: For now, will simply add this target node without creating a - // connection to it. - // await this.nodeManager.createConnectionToNode( - // nodeData.id, - // nodeData.address, - // ); - await this.setNode(nodeData.id, nodeData.address); - foundAddress = nodeData.address; - // We have found the target node, so we can stop trying to look for it - // in the shortlist. - break; - } - shortlist.push(nodeData); - } - // To make the number of jumps relatively short, should connect to the node/s - // closest to the target first, and ask if they know of any closer nodes. - // Then we can simply unshift the first (closest) element from the shortlist. - shortlist.sort(function (a: NodeData, b: NodeData) { - if (a.distance > b.distance) { - return 1; - } else if (a.distance < b.distance) { - return -1; - } else { - return 0; - } - }); - } - return foundAddress; - } - - public async clearDB() { - await this.nodeGraphDb.clear(); - } } export default NodeGraph; diff --git a/src/nodes/NodeManager.ts b/src/nodes/NodeManager.ts index 6da36686a..b28343667 100644 --- a/src/nodes/NodeManager.ts +++ b/src/nodes/NodeManager.ts @@ -1,244 +1,65 @@ -import type { KeyManager } from '../keys'; +import type { DB } from '@matrixai/db'; +import type NodeConnectionManager from './NodeConnectionManager'; +import type NodeGraph from './NodeGraph'; +import type KeyManager from '../keys/KeyManager'; import type { PublicKeyPem } from '../keys/types'; -import type { Sigchain } from '../sigchain'; +import type Sigchain from '../sigchain/Sigchain'; import type { ChainData, ChainDataEncoded } from '../sigchain/types'; -import type { ClaimIdEncoded } from '../claims/types'; -import type { - NodeId, - NodeAddress, - NodeMapping, - NodeData, - NodeBucket, -} from '../nodes/types'; -import type { SignedNotification } from '../notifications/types'; -import type { Host, Hostname, Port } from '../network/types'; -import type { Timer } from '../types'; -import type { DB } from '@matrixai/db'; - -import type { MutexInterface } from 'async-mutex'; -import type { GRPCClientAgent } from '../agent'; -import type * as nodesPB from '../proto/js/polykey/v1/nodes/nodes_pb'; -import type { ForwardProxy, ReverseProxy } from '../network'; +import type { NodeId, NodeAddress, NodeBucket } from '../nodes/types'; +import type { ClaimEncoded } from '../claims/types'; import Logger from '@matrixai/logger'; -import { Mutex } from 'async-mutex'; -import { - CreateDestroyStartStop, - ready, -} from '@matrixai/async-init/dist/CreateDestroyStartStop'; -import { IdInternal } from '@matrixai/id'; -import NodeGraph from './NodeGraph'; -import NodeConnection from './NodeConnection'; import * as nodesErrors from './errors'; -import { utils as networkUtils, errors as networkErrors } from '../network'; +import * as nodesUtils from './utils'; +import { utils as validationUtils } from '../validation'; +import * as utilsPB from '../proto/js/polykey/v1/utils/utils_pb'; +import * as claimsErrors from '../claims/errors'; +import * as networkErrors from '../network/errors'; +import * as networkUtils from '../network/utils'; import * as sigchainUtils from '../sigchain/utils'; import * as claimsUtils from '../claims/utils'; -import { utils as nodesUtils } from '../nodes'; - -/** - * Data structure to store all NodeConnections. If a connection to a node n does - * not exist, no entry for n will exist in the map. Alternatively, if a - * connection is currently being instantiated by some thread, an entry will - * exist in the map, but only with the lock (no connection object). Once a - * connection is instantiated, the entry in the map is updated to include the - * connection object. - */ -type NodeConnectionMap = Map< - string, - { - connection?: NodeConnection; - lock: MutexInterface; - } ->; -interface NodeManager extends CreateDestroyStartStop {} -@CreateDestroyStartStop( - new nodesErrors.ErrorNodeManagerRunning(), - new nodesErrors.ErrorNodeManagerDestroyed(), -) class NodeManager { protected db: DB; protected logger: Logger; - protected lock: Mutex = new Mutex(); - protected nodeGraph: NodeGraph; protected sigchain: Sigchain; protected keyManager: KeyManager; - protected fwdProxy: ForwardProxy; - protected revProxy: ReverseProxy; - // Active connections to other nodes - protected connections: NodeConnectionMap = new Map(); - // Node ID -> node address mappings for the seed nodes - protected seedNodes: NodeMapping = {}; - - static async createNodeManager({ - db, - seedNodes = {}, - keyManager, - sigchain, - fwdProxy, - revProxy, - logger = new Logger(this.name), - fresh = false, - }: { - db: DB; - seedNodes?: NodeMapping; - keyManager: KeyManager; - sigchain: Sigchain; - fwdProxy: ForwardProxy; - revProxy: ReverseProxy; - logger?: Logger; - fresh?: boolean; - }): Promise { - logger.info(`Creating ${this.name}`); - const nodeManager = new NodeManager({ - db, - seedNodes, - keyManager, - sigchain, - fwdProxy, - revProxy, - logger, - }); - await nodeManager.start({ - fresh, - }); - logger.info(`Created ${this.name}`); - return nodeManager; - } + protected nodeConnectionManager: NodeConnectionManager; + protected nodeGraph: NodeGraph; constructor({ db, - seedNodes, keyManager, sigchain, - fwdProxy, - revProxy, + nodeConnectionManager, + nodeGraph, logger, }: { db: DB; - seedNodes: NodeMapping; keyManager: KeyManager; sigchain: Sigchain; - fwdProxy: ForwardProxy; - revProxy: ReverseProxy; - logger: Logger; + nodeConnectionManager: NodeConnectionManager; + nodeGraph: NodeGraph; + logger?: Logger; }) { + this.logger = logger ?? new Logger(this.constructor.name); this.db = db; - this.seedNodes = seedNodes; this.keyManager = keyManager; this.sigchain = sigchain; - this.fwdProxy = fwdProxy; - this.revProxy = revProxy; - this.logger = logger; - } - - get locked(): boolean { - return this.lock.isLocked(); - } - - public async start({ - fresh = false, - }: { - fresh?: boolean; - } = {}) { - try { - this.logger.info(`Starting ${this.constructor.name}`); - // Instantiate the node graph (containing Kademlia implementation) - this.nodeGraph = await NodeGraph.createNodeGraph({ - db: this.db, - nodeManager: this, - logger: this.logger, - fresh, - }); - // Add the seed nodes to the NodeGraph - for (const id in this.seedNodes) { - const seedNodeId: NodeId = IdInternal.fromString(id); - await this.nodeGraph.setNode(seedNodeId, this.seedNodes[seedNodeId]); - } - this.logger.info(`Started ${this.constructor.name}`); - } catch (e) { - this.logger.warn(`Failed Starting ${this.constructor.name}`); - await this.nodeGraph.stop(); - throw e; - } - } - - public async stop() { - this.logger.info(`Stopping ${this.constructor.name}`); - for (const [targetNodeId, connLock] of this.connections) { - if (connLock?.connection != null) { - await connLock.connection.stop(); - } - // TODO: Potentially, we could instead re-start any connections in start - // This assumes that after stopping the proxies, their connections are - // also still valid on restart though. - this.connections.delete(targetNodeId.toString()); - } - await this.nodeGraph.stop(); - this.logger.info(`Stopped ${this.constructor.name}`); - } - - public async destroy() { - this.logger.info(`Destroying ${this.constructor.name}`); - // We want to clear any state. - await this.nodeGraph.destroy(); - this.logger.info(`Destroyed ${this.constructor.name}`); - } - - /** - * Run several operations within the same lock - * This does not ensure atomicity of the underlying database - * Database atomicity still depends on the underlying operation - */ - public async transaction(f: (that: this) => Promise): Promise { - const release = await this.lock.acquire(); - try { - return await f(this); - } finally { - release(); - } - } - - /** - * Transaction wrapper that will not lock if the operation was executed - * within a transaction context - */ - public async _transaction(f: () => Promise): Promise { - if (this.lock.isLocked()) { - return await f(); - } else { - return await this.transaction(f); - } - } - - @ready(new nodesErrors.ErrorNodeManagerNotRunning()) - public async getClosestLocalNodes( - targetNodeId: NodeId, - ): Promise> { - return await this.nodeGraph.getClosestLocalNodes(targetNodeId); - } - - /** - * Determines whether a node ID -> node address mapping exists in this node's - * node table. - * @param targetNodeId the node ID of the node to find - * @returns true if the node exists in the table, false otherwise - */ - @ready(new nodesErrors.ErrorNodeManagerNotRunning()) - public async knowsNode(targetNodeId: NodeId): Promise { - return !!(await this.nodeGraph.getNode(targetNodeId)); + this.nodeConnectionManager = nodeConnectionManager; + this.nodeGraph = nodeGraph; } /** * Determines whether a node in the Polykey network is online. * @return true if online, false if offline */ - @ready(new nodesErrors.ErrorNodeManagerNotRunning()) public async pingNode(targetNodeId: NodeId): Promise { - const targetAddress: NodeAddress = await this.findNode(targetNodeId); + const targetAddress: NodeAddress = + await this.nodeConnectionManager.findNode(targetNodeId); try { // Attempt to open a connection via the forward proxy // i.e. no NodeConnection object created (no need for GRPCClient) - await this.fwdProxy.openConnection( + await this.nodeConnectionManager.holePunchForward( targetNodeId, await networkUtils.resolveHost(targetAddress.host), targetAddress.port, @@ -254,35 +75,22 @@ class NodeManager { return true; } - @ready(new nodesErrors.ErrorNodeManagerNotRunning()) - public getNodeId(): NodeId { - return this.keyManager.getNodeId(); - } - /** * Connects to the target node and retrieves its public key from its root * certificate chain (corresponding to the provided public key fingerprint - * the node ID). */ - @ready(new nodesErrors.ErrorNodeManagerNotRunning()) public async getPublicKey(targetNodeId: NodeId): Promise { - const connection = await this.getConnectionToNode(targetNodeId); - const publicKey = connection.getExpectedPublicKey( + const publicKey = await this.nodeConnectionManager.withConnF( targetNodeId, - ) as PublicKeyPem; - if (!publicKey == null) { + async (connection) => { + return connection.getExpectedPublicKey(targetNodeId); + }, + ); + if (publicKey == null) { throw new nodesErrors.ErrorNodeConnectionPublicKeyNotFound(); } - return publicKey; - } - - /** - * Retrieves the cryptolinks of this node, returning as a collection of - * records (for storage in the gestalt graph) - */ - @ready(new nodesErrors.ErrorNodeManagerNotRunning()) - public async getChainData(): Promise { - return await this.sigchain.getChainData(); + return publicKey as PublicKeyPem; } /** @@ -292,14 +100,40 @@ class NodeManager { * For node1 -> node2 claims, the verification process also involves connecting * to node2 to verify the claim (to retrieve its signing public key). */ - @ready(new nodesErrors.ErrorNodeManagerNotRunning()) public async requestChainData(targetNodeId: NodeId): Promise { - const connection = await this.getConnectionToNode(targetNodeId); // Verify the node's chain with its own public key - const unverifiedChainData = await connection.getChainData(); - const publicKey = connection.getExpectedPublicKey( - targetNodeId, - ) as PublicKeyPem; + const [unverifiedChainData, publicKey] = + await this.nodeConnectionManager.withConnF( + targetNodeId, + async (connection) => { + const unverifiedChainData: ChainDataEncoded = {}; + const emptyMsg = new utilsPB.EmptyMessage(); + const client = connection.getClient(); + const response = await client.nodesChainDataGet(emptyMsg); + // Reconstruct each claim from the returned ChainDataMessage + response.getChainDataMap().forEach((claimMsg, claimId: string) => { + // Reconstruct the signatures array + const signatures: Array<{ signature: string; protected: string }> = + []; + for (const signatureData of claimMsg.getSignaturesList()) { + signatures.push({ + signature: signatureData.getSignature(), + protected: signatureData.getProtected(), + }); + } + // Add to the record of chain data, casting as expected ClaimEncoded + unverifiedChainData[claimId] = { + signatures: signatures, + payload: claimMsg.getPayload(), + } as ClaimEncoded; + }); + const publicKey = connection.getExpectedPublicKey( + targetNodeId, + ) as PublicKeyPem; + return [unverifiedChainData, publicKey]; + }, + ); + if (!publicKey) { throw new nodesErrors.ErrorNodeConnectionPublicKeyNotFound(); } @@ -311,22 +145,22 @@ class NodeManager { // Then, for any node -> node claims, we also need to verify with the // node on the other end of the claim // e.g. a node claim from A -> B, verify with B's public key - for (const c in verifiedChainData) { - const claimId = c as ClaimIdEncoded; + for (const claimId in verifiedChainData) { const payload = verifiedChainData[claimId].payload; if (payload.data.type === 'node') { - // TODO: remove ! assertion and perform exception handling in #310 - const endNodeId = nodesUtils.decodeNodeId(payload.data.node2)!; + const endNodeId = validationUtils.parseNodeId(payload.data.node2); let endPublicKey: PublicKeyPem; // If the claim points back to our own node, don't attempt to connect - if (endNodeId.equals(this.getNodeId())) { + if (endNodeId.equals(this.keyManager.getNodeId())) { endPublicKey = this.keyManager.getRootKeyPairPem().publicKey; // Otherwise, get the public key from the root cert chain (by connection) } else { - const endConnection = await this.getConnectionToNode(endNodeId); - endPublicKey = endConnection.getExpectedPublicKey( + endPublicKey = await this.nodeConnectionManager.withConnF( endNodeId, - ) as PublicKeyPem; + async (connection) => { + return connection.getExpectedPublicKey(endNodeId) as PublicKeyPem; + }, + ); if (!endPublicKey) { throw new nodesErrors.ErrorNodeConnectionPublicKeyNotFound(); } @@ -348,290 +182,189 @@ class NodeManager { * Call this function upon receiving a "claim node request" notification from * another node. */ - @ready(new nodesErrors.ErrorNodeManagerNotRunning()) public async claimNode(targetNodeId: NodeId): Promise { - const connection: NodeConnection = await this.getConnectionToNode( - targetNodeId, - ); await this.sigchain.transaction(async (sigchain) => { // 2. Create your intermediary claim const singlySignedClaim = await sigchain.createIntermediaryClaim({ type: 'node', - node1: nodesUtils.encodeNodeId(this.getNodeId()), + node1: nodesUtils.encodeNodeId(this.keyManager.getNodeId()), node2: nodesUtils.encodeNodeId(targetNodeId), }); - // Receive back your verified doubly signed claim. - const doublySignedClaim = await connection.claimNode(singlySignedClaim); - await sigchain.addExistingClaim(doublySignedClaim); + let doublySignedClaim: ClaimEncoded; + await this.nodeConnectionManager.withConnF( + targetNodeId, + async (connection) => { + const client = connection.getClient(); + const genClaims = client.nodesCrossSignClaim(); + try { + // 2. Set up the intermediary claim message (the singly signed claim) to send + const crossSignMessage = claimsUtils.createCrossSignMessage({ + singlySignedClaim: singlySignedClaim, + }); + await genClaims.write(crossSignMessage); // Get the generator here + // 3. We expect to receieve our singly signed claim we sent to now be a + // doubly signed claim (signed by the other node), as well as a singly + // signed claim to be signed by us + const readStatus = await genClaims.read(); + // If nothing to read, end and destroy + if (readStatus.done) { + throw new claimsErrors.ErrorEmptyStream(); + } + const receivedMessage = readStatus.value; + const intermediaryClaimMessage = + receivedMessage.getSinglySignedClaim(); + const doublySignedClaimMessage = + receivedMessage.getDoublySignedClaim(); + // Ensure all of our expected messages are defined + if (!intermediaryClaimMessage) { + throw new claimsErrors.ErrorUndefinedSinglySignedClaim(); + } + const intermediaryClaimSignature = + intermediaryClaimMessage.getSignature(); + if (!intermediaryClaimSignature) { + throw new claimsErrors.ErrorUndefinedSignature(); + } + if (!doublySignedClaimMessage) { + throw new claimsErrors.ErrorUndefinedDoublySignedClaim(); + } + // Reconstruct the expected objects from the messages + const constructedIntermediaryClaim = + claimsUtils.reconstructClaimIntermediary( + intermediaryClaimMessage, + ); + const constructedDoublySignedClaim = + claimsUtils.reconstructClaimEncoded(doublySignedClaimMessage); + // Verify the singly signed claim with the sender's public key + const senderPublicKey = + connection.getExpectedPublicKey(targetNodeId); + if (!senderPublicKey) { + throw new nodesErrors.ErrorNodeConnectionPublicKeyNotFound(); + } + const verifiedSingly = + await claimsUtils.verifyIntermediaryClaimSignature( + constructedIntermediaryClaim, + senderPublicKey, + ); + if (!verifiedSingly) { + throw new claimsErrors.ErrorSinglySignedClaimVerificationFailed(); + } + // Verify the doubly signed claim with both our public key, and the sender's + const verifiedDoubly = + (await claimsUtils.verifyClaimSignature( + constructedDoublySignedClaim, + this.keyManager.getRootKeyPairPem().publicKey, + )) && + (await claimsUtils.verifyClaimSignature( + constructedDoublySignedClaim, + senderPublicKey, + )); + if (!verifiedDoubly) { + throw new claimsErrors.ErrorDoublySignedClaimVerificationFailed(); + } + // 4. X <- responds with double signing the X signed claim <- Y + const doublySignedClaimResponse = + await claimsUtils.signIntermediaryClaim({ + claim: constructedIntermediaryClaim, + privateKey: this.keyManager.getRootKeyPairPem().privateKey, + signeeNodeId: nodesUtils.encodeNodeId( + this.keyManager.getNodeId(), + ), + }); + // Should never be reached, but just for type safety + if (!doublySignedClaimResponse.payload) { + throw new claimsErrors.ErrorClaimsUndefinedClaimPayload(); + } + const crossSignMessageResponse = claimsUtils.createCrossSignMessage( + { + doublySignedClaim: doublySignedClaimResponse, + }, + ); + await genClaims.write(crossSignMessageResponse); + + // Check the stream is closed (should be closed by other side) + const finalResponse = await genClaims.read(); + if (finalResponse.done != null) { + await genClaims.next(null); + } + + doublySignedClaim = constructedDoublySignedClaim; + } catch (e) { + await genClaims.throw(e); + throw e; + } + await sigchain.addExistingClaim(doublySignedClaim); + }, + ); }); } - @ready(new nodesErrors.ErrorNodeManagerNotRunning()) - public async setNode( - nodeId: NodeId, - nodeAddress: NodeAddress, - ): Promise { - await this.nodeGraph.setNode(nodeId, nodeAddress); - } - - @ready(new nodesErrors.ErrorNodeManagerNotRunning()) - public async getClosestGlobalNodes( - targetNodeId: NodeId, - ): Promise { - return await this.nodeGraph.getClosestGlobalNodes(targetNodeId); - } - - @ready(new nodesErrors.ErrorNodeManagerNotRunning()) - public async getAllBuckets(): Promise> { - return await this.nodeGraph.getAllBuckets(); - } - - @ready(new nodesErrors.ErrorNodeManagerNotRunning()) - public async refreshBuckets(): Promise { - this.logger.info('Refreshing buckets'); - return await this.nodeGraph.refreshBuckets(); - } - /** - * Forwards a received hole punch message on to the target. - * If not known, the node ID -> address mapping is attempted to be discovered - * through Kademlia (note, however, this is currently only called by a 'broker' - * node). - * @param message the original relay message (assumed to be created in - * nodeConnection.start()) - */ - @ready(new nodesErrors.ErrorNodeManagerNotRunning()) - public async relayHolePunchMessage(message: nodesPB.Relay): Promise { - const conn = await this.getConnectionToNode( - nodesUtils.decodeNodeId(message.getTargetId())!, - ); - await conn.sendHolePunchMessage( - nodesUtils.decodeNodeId(message.getSrcId())!, - nodesUtils.decodeNodeId(message.getTargetId())!, - message.getEgressAddress(), - Buffer.from(message.getSignature()), - ); - } - - /** - * Sends a notification to a node. + * Retrieves the node Address from the NodeGraph + * @param nodeId node ID of the target node + * @returns Node Address of the target node */ - @ready(new nodesErrors.ErrorNodeManagerNotRunning()) - public async sendNotification( + public async getNodeAddress( nodeId: NodeId, - message: SignedNotification, - ): Promise { - const connection: NodeConnection = await this.getConnectionToNode(nodeId); - await connection.sendNotification(message); - } - - /** - * Treat this node as the client, and attempt to create/retrieve an existing - * undirectional connection to another node (server). - * ObjectMap pattern adapted from: - * https://gist.github.com/CMCDragonkai/f58f08e7eaab0430ed4467ca35527a42 - */ - @ready(new nodesErrors.ErrorNodeManagerNotRunning()) - public async getConnectionToNode( - targetNodeId: NodeId, - ): Promise { - let connection: NodeConnection | undefined; - let lock: MutexInterface; - let connAndLock = this.connections.get(targetNodeId.toString()); - if (connAndLock != null) { - ({ connection, lock } = connAndLock); - if (connection != null) { - return connection; - } - let release; - try { - release = await lock.acquire(); - ({ connection, lock } = connAndLock); - if (connection != null) { - return connection; - } - connection = await this.establishNodeConnection(targetNodeId, lock); - connAndLock.connection = connection; - return connection; - } finally { - release(); - } - } else { - lock = new Mutex(); - connAndLock = { lock }; - this.connections.set(targetNodeId.toString(), connAndLock); - let release; - try { - release = await lock.acquire(); - connection = await this.establishNodeConnection(targetNodeId, lock); - connAndLock.connection = connection; - return connection; - } finally { - release(); - } - } + ): Promise { + return await this.nodeGraph.getNode(nodeId); } /** - * Strictly a helper function for this.getConnectionToNode. Do not call this - * function anywhere else. - * To create a connection to a node, always use getConnectionToNode. + * Determines whether a node ID -> node address mapping exists in the NodeGraph + * @param targetNodeId the node ID of the node to find + * @returns true if the node exists in the table, false otherwise */ - @ready(new nodesErrors.ErrorNodeManagerNotRunning()) - protected async establishNodeConnection( - targetNodeId: NodeId, - lock: MutexInterface, - ): Promise { - const targetAddress = await this.findNode(targetNodeId); - // If the stored host is not a valid host (IP address), then we assume it to - // be a hostname - const targetHostname = !(await networkUtils.isHost(targetAddress.host)) - ? (targetAddress.host as Hostname) - : undefined; - const connection = await NodeConnection.createNodeConnection({ - targetNodeId: targetNodeId, - targetHost: await networkUtils.resolveHost(targetAddress.host), - targetHostname: targetHostname, - targetPort: targetAddress.port, - forwardProxy: this.fwdProxy, - keyManager: this.keyManager, - seedConnections: await this.getConnectionsToSeedNodes(), - logger: this.logger, - }); - // Add it to the map of active connections - this.connections.set(targetNodeId.toString(), { connection, lock }); - return connection; + public async knowsNode(targetNodeId: NodeId): Promise { + return await this.nodeGraph.knowsNode(targetNodeId); } /** - * Acquires a map of connections to the seed nodes. - * These connections are expected to have already been established in start(), - * so this should simply be a constant-time retrieval from the NodeConnectionMap. + * Gets the specified bucket from the NodeGraph */ - @ready(new nodesErrors.ErrorNodeManagerNotRunning()) - public async getConnectionsToSeedNodes(): Promise< - Map - > { - const connections: Map = new Map(); - // GetConnectionToNode internally calls this function if the connection to - // some node does not already exist (i.e. there's no existing entry in the - // NodeConnectionMap). Therefore, we have the potential for a deadlock if a - // connection to a seed node has been lost or doesn't already exist and - // this function is called: there would be 2 nested calls to - // getConnectionToNode on the seed node, causing a deadlock. To prevent this, - // we do a fail-safe here, where we temporarily clear this.seedNodes, such - // that we don't attempt to use the seed nodes to connect to another seed node. - const seedNodesCopy = this.seedNodes; - this.seedNodes = {}; - try { - for (const id in this.seedNodes) { - const seedNodeId: NodeId = IdInternal.fromString(id); - try { - connections.set( - seedNodeId, - await this.getConnectionToNode(seedNodeId), - ); - } catch (e) { - // If we can't connect to a seed node, simply skip it - if (e instanceof nodesErrors.ErrorNodeConnectionTimeout) { - continue; - } - throw e; - } - } - } finally { - // Even if an exception is thrown, ensure the seed node mappings are reinstated - this.seedNodes = seedNodesCopy; - } - return connections; - } - - @ready(new nodesErrors.ErrorNodeManagerNotRunning()) - public async syncNodeGraph() { - await this.nodeGraph.syncNodeGraph(); + public async getBucket(bucketIndex: number): Promise { + return await this.nodeGraph.getBucket(bucketIndex); } /** - * Treat this node as the server. - * Instruct the reverse proxy to send hole-punching packets back to the target's - * forward proxy, in order to open a connection from the client to this server. - * A connection is established if the client node's forward proxy is sending - * hole punching packets at the same time as this node (acting as the server) - * sends hole-punching packets back to the client's forward proxy. - * @param egressHost host of the client's forward proxy - * @param egressPort port of the client's forward proxy - * @param timer + * Sets a node in the NodeGraph */ - @ready(new nodesErrors.ErrorNodeManagerNotRunning()) - public async openConnection( - egressHost: Host, - egressPort: Port, - timer?: Timer, + public async setNode( + nodeId: NodeId, + nodeAddress: NodeAddress, ): Promise { - await this.revProxy.openConnection(egressHost, egressPort, timer); + return await this.nodeGraph.setNode(nodeId, nodeAddress); } /** - * Retrieves the GRPC client associated with a connection to a particular node ID - * @param targetNodeId node ID of the connected node - * @returns GRPC client of the active connection - * @throws ErrorNodeConnectionNotExist if a connection to the target does not exist + * Updates the node in the NodeGraph */ - @ready(new nodesErrors.ErrorNodeManagerNotRunning()) - public async getClient(targetNodeId: NodeId): Promise { - const conn = await this.getConnectionToNode(targetNodeId); - if (conn != null) { - return conn.getClient(); - } else { - throw new nodesErrors.ErrorNodeConnectionNotExist(); - } + public async updateNode( + nodeId: NodeId, + nodeAddress?: NodeAddress, + ): Promise { + return await this.nodeGraph.updateNode(nodeId, nodeAddress); } /** - * Retrieves the node Address - * @param targetNodeId node ID of the target node - * @returns Node Address of the target node + * Removes a node from the NodeGraph */ - @ready(new nodesErrors.ErrorNodeManagerNotRunning()) - public async getNode(targetNodeId: NodeId): Promise { - return await this.nodeGraph.getNode(targetNodeId); + public async unsetNode(nodeId: NodeId): Promise { + return await this.nodeGraph.unsetNode(nodeId); } /** - * Retrieves the node address. If an entry doesn't exist in the db, then - * proceeds to locate it using Kademlia. + * Gets all buckets from the NodeGraph */ - @ready(new nodesErrors.ErrorNodeManagerNotRunning()) - public async findNode(targetNodeId: NodeId): Promise { - // First check if we already have an existing ID -> address record - let address = await this.getNode(targetNodeId); - // Otherwise, attempt to locate it by contacting network - if (address == null) { - address = await this.nodeGraph.getClosestGlobalNodes(targetNodeId); - // TODO: This currently just does one iteration - // If not found in this single iteration, we throw an exception - if (address == null) { - throw new nodesErrors.ErrorNodeGraphNodeNotFound(); - } - } - // We ensure that we always return a NodeAddress (either by lookup, or - // network search) - if we can't locate it from either, we throw an exception - return address; + public async getAllBuckets(): Promise> { + return await this.nodeGraph.getAllBuckets(); } /** - * Retrieves all the vaults for a peers node + * To be called on key renewal. Re-orders all nodes in all buckets with respect + * to the new node ID. */ - @ready(new nodesErrors.ErrorNodeManagerNotRunning()) - public async scanNodeVaults(nodeId: NodeId): Promise> { - // Create a connection to another node - const connection = await this.getConnectionToNode(nodeId); - // Scan the vaults of the node over the connection - return await connection.scanVaults(); - } - - public async clearDB() { - await this.nodeGraph.clearDB(); + public async refreshBuckets(): Promise { + return await this.nodeGraph.refreshBuckets(); } } diff --git a/src/nodes/errors.ts b/src/nodes/errors.ts index e9bb83fa7..d45c83474 100644 --- a/src/nodes/errors.ts +++ b/src/nodes/errors.ts @@ -1,69 +1,79 @@ -import { ErrorPolykey } from '../errors'; +import { ErrorPolykey, sysexits } from '../errors'; class ErrorNodes extends ErrorPolykey {} -class ErrorNodeManagerRunning extends ErrorNodes {} - -class ErrorNodeManagerNotRunning extends ErrorNodes {} - -class ErrorNodeManagerDestroyed extends ErrorNodes {} - -class ErrorNodeGraphRunning extends ErrorNodes {} - -class ErrorNodeGraphNotRunning extends ErrorNodes {} - -class ErrorNodeGraphDestroyed extends ErrorNodes {} - -// Cannot locate a node through getClosestGlobalNodes - -class ErrorNodeGraphNodeNotFound extends ErrorNodes {} - -class ErrorNodeGraphNodeIdMissing extends ErrorNodes {} +class ErrorNodeGraphRunning extends ErrorNodes { + description = 'NodeGraph is running'; + exitCode = sysexits.USAGE; +} -class ErrorNodeGraphSelfConnect extends ErrorNodes {} +class ErrorNodeGraphNotRunning extends ErrorNodes { + description = 'NodeGraph is not running'; + exitCode = sysexits.USAGE; +} -class ErrorNodeGraphEmptyDatabase extends ErrorNodes {} +class ErrorNodeGraphDestroyed extends ErrorNodes { + description = 'NodeGraph is destroyed'; + exitCode = sysexits.USAGE; +} -class ErrorNodeGraphInvalidBucketIndex extends ErrorNodes {} +class ErrorNodeGraphNodeIdNotFound extends ErrorNodes { + description = 'Could not find NodeId'; + exitCode = sysexits.NOUSER; +} -class ErrorNodeConnectionRunning extends ErrorNodes {} +class ErrorNodeGraphEmptyDatabase extends ErrorNodes { + description = 'NodeGraph database was empty'; + exitCode = sysexits.USAGE; +} -class ErrorNodeConnectionNotRunning extends ErrorNodes {} class ErrorNodeGraphOversizedBucket extends ErrorNodes { description: 'Bucket invalidly contains more nodes than capacity'; + exitCode = sysexits.USAGE; } -class ErrorNodeConnectionDestroyed extends ErrorNodes {} +class ErrorNodeGraphSameNodeId extends ErrorNodes { + description: 'NodeId must be different for valid bucket calculation'; + exitCode = sysexits.USAGE; +} + +class ErrorNodeConnectionDestroyed extends ErrorNodes { + description = 'NodeConnection is destroyed'; + exitCode = sysexits.USAGE; +} class ErrorNodeConnectionTimeout extends ErrorNodes { description: 'A node connection could not be established (timed out)'; + exitCode = sysexits.UNAVAILABLE; } -class ErrorNodeConnectionNotExist extends ErrorNodes {} +class ErrorNodeConnectionInfoNotExist extends ErrorNodes { + description: 'NodeConnection info was not found'; + exitCode = sysexits.UNAVAILABLE; +} -class ErrorNodeConnectionInfoNotExist extends ErrorNodes {} +class ErrorNodeConnectionPublicKeyNotFound extends ErrorNodes { + description: 'Public key was not found'; + exitCode = sysexits.UNAVAILABLE; +} -class ErrorNodeConnectionPublicKeyNotFound extends ErrorNodes {} +class ErrorNodeConnectionManagerNotRunning extends ErrorNodes { + description = 'NodeConnectionManager is not running'; + exitCode = sysexits.USAGE; +} export { ErrorNodes, - ErrorNodeManagerRunning, - ErrorNodeManagerNotRunning, - ErrorNodeManagerDestroyed, ErrorNodeGraphRunning, ErrorNodeGraphNotRunning, ErrorNodeGraphDestroyed, - ErrorNodeGraphNodeNotFound, - ErrorNodeGraphNodeIdMissing, - ErrorNodeGraphSelfConnect, + ErrorNodeGraphNodeIdNotFound, ErrorNodeGraphEmptyDatabase, - ErrorNodeGraphInvalidBucketIndex, - ErrorNodeConnectionRunning, - ErrorNodeConnectionNotRunning, ErrorNodeGraphOversizedBucket, + ErrorNodeGraphSameNodeId, ErrorNodeConnectionDestroyed, ErrorNodeConnectionTimeout, - ErrorNodeConnectionNotExist, ErrorNodeConnectionInfoNotExist, ErrorNodeConnectionPublicKeyNotFound, + ErrorNodeConnectionManagerNotRunning, }; diff --git a/src/nodes/index.ts b/src/nodes/index.ts index 603aed437..93d06056f 100644 --- a/src/nodes/index.ts +++ b/src/nodes/index.ts @@ -1,5 +1,6 @@ export { default as NodeManager } from './NodeManager'; export { default as NodeGraph } from './NodeGraph'; +export { default as NodeConnectionManager } from './NodeConnectionManager'; export { default as NodeConnection } from './NodeConnection'; export * as errors from './errors'; export * as types from './types'; diff --git a/src/nodes/types.ts b/src/nodes/types.ts index 76da3d83c..ffb916851 100644 --- a/src/nodes/types.ts +++ b/src/nodes/types.ts @@ -1,10 +1,11 @@ +import type { Id } from '@matrixai/id'; import type { Opaque } from '../types'; import type { Host, Hostname, Port } from '../network/types'; import type { Claim, ClaimId } from '../claims/types'; import type { ChainData } from '../sigchain/types'; -import type { Id } from '@matrixai/id'; type NodeId = Opaque<'NodeId', Id>; +type NodeIdString = Opaque<'NodeIdString', string>; type NodeIdEncoded = Opaque<'NodeIdEncoded', string>; type NodeAddress = { @@ -12,9 +13,7 @@ type NodeAddress = { port: Port; }; -type NodeMapping = { - [key: string]: NodeAddress; -}; +type SeedNodes = Record; type NodeData = { id: NodeId; @@ -69,9 +68,10 @@ type NodeGraphOp = export type { NodeId, + NodeIdString, NodeIdEncoded, NodeAddress, - NodeMapping, + SeedNodes, NodeData, NodeClaim, NodeInfo, diff --git a/src/nodes/utils.ts b/src/nodes/utils.ts index e49ea9b41..696e31d43 100644 --- a/src/nodes/utils.ts +++ b/src/nodes/utils.ts @@ -1,23 +1,15 @@ import type { NodeData, NodeId, NodeIdEncoded } from './types'; import { IdInternal } from '@matrixai/id'; +import { bytes2BigInt } from '../utils'; /** * Compute the distance between two nodes. * distance = nodeId1 ^ nodeId2 * where ^ = bitwise XOR operator */ -function calculateDistance(nodeId1: NodeId, nodeId2: NodeId): BigInt { - const bufferId1: Buffer = nodeId1.toBuffer(); - const bufferId2: Buffer = nodeId2.toBuffer(); - let distance = BigInt(0); - let i = 0; - const min = Math.min(bufferId1.length, bufferId2.length); - const max = Math.max(bufferId1.length, bufferId2.length); - for (; i < min; ++i) { - distance = distance * BigInt(256) + BigInt(bufferId1[i] ^ bufferId2[i]); - } - for (; i < max; ++i) distance = BigInt(distance) * BigInt(256) + BigInt(255); - return distance; +function calculateDistance(nodeId1: NodeId, nodeId2: NodeId): bigint { + const distance = nodeId1.map((byte, i) => byte ^ nodeId2[i]); + return bytes2BigInt(distance); } /** @@ -31,24 +23,16 @@ function calculateDistance(nodeId1: NodeId, nodeId2: NodeId): BigInt { * order of the passed parameters is actually irrelevant. These variables are * purely named for communicating function purpose. */ -function calculateBucketIndex( - sourceNode: NodeId, - targetNode: NodeId, - nodeIdBits: number = 256, -) { - const distance = calculateDistance(sourceNode, targetNode); - // Start at the last bucket: most likely to be here based on relation of - // bucket index to distance - let bucketIndex = nodeIdBits - 1; - for (; bucketIndex >= 0; bucketIndex--) { - const lowerBound = BigInt(2) ** BigInt(bucketIndex); - const upperBound = BigInt(2) ** BigInt(bucketIndex + 1); - // If 2^i <= distance (from current node) < 2^(i+1), - // then break and return current index - if (lowerBound <= distance && distance < upperBound) { - break; - } +function calculateBucketIndex(sourceNode: NodeId, targetNode: NodeId): number { + const distance = sourceNode.map((byte, i) => byte ^ targetNode[i]); + const MSByteIndex = distance.findIndex((byte) => byte !== 0); + if (MSByteIndex === -1) { + throw new RangeError('NodeIds cannot be the same'); } + const MSByte = distance[MSByteIndex]; + const MSBitIndex = Math.trunc(Math.log2(MSByte)); + const bytesLeft = distance.byteLength - MSByteIndex - 1; + const bucketIndex = MSBitIndex + bytesLeft * 8; return bucketIndex; } @@ -65,10 +49,16 @@ function sortByDistance(a: NodeData, b: NodeData) { } } +/** + * Encodes the NodeId as a `base32hex` string + */ function encodeNodeId(nodeId: NodeId): NodeIdEncoded { return nodeId.toMultibase('base32hex') as NodeIdEncoded; } +/** + * Decodes an encoded NodeId string into a NodeId + */ function decodeNodeId(nodeIdEncoded: any): NodeId | undefined { if (typeof nodeIdEncoded !== 'string') { return; diff --git a/src/notifications/NotificationsManager.ts b/src/notifications/NotificationsManager.ts index cb0fa470e..ee40f9f0c 100644 --- a/src/notifications/NotificationsManager.ts +++ b/src/notifications/NotificationsManager.ts @@ -7,7 +7,7 @@ import type { import type { ACL } from '../acl'; import type { DB, DBLevel } from '@matrixai/db'; import type { KeyManager } from '../keys'; -import type { NodeManager } from '../nodes'; +import type { NodeManager, NodeConnectionManager } from '../nodes'; import type { NodeId } from '../nodes/types'; import Logger from '@matrixai/logger'; import { IdInternal } from '@matrixai/id'; @@ -20,6 +20,7 @@ import { utils as idUtils } from '@matrixai/id'; import * as notificationsUtils from './utils'; import * as notificationsErrors from './errors'; import { createNotificationIdGenerator } from './utils'; +import * as notificationsPB from '../proto/js/polykey/v1/notifications/notifications_pb'; import { utils as nodesUtils } from '../nodes'; const MESSAGE_COUNT_KEY = 'numMessages'; @@ -38,6 +39,7 @@ class NotificationsManager { protected db: DB; protected keyManager: KeyManager; protected nodeManager: NodeManager; + protected nodeConnectionManager: NodeConnectionManager; protected messageCap: number; @@ -56,6 +58,7 @@ class NotificationsManager { static async createNotificationsManager({ acl, db, + nodeConnectionManager, nodeManager, keyManager, messageCap = 10000, @@ -64,6 +67,7 @@ class NotificationsManager { }: { acl: ACL; db: DB; + nodeConnectionManager: NodeConnectionManager; nodeManager: NodeManager; keyManager: KeyManager; messageCap?: number; @@ -77,6 +81,7 @@ class NotificationsManager { keyManager, logger, messageCap, + nodeConnectionManager, nodeManager, }); @@ -88,6 +93,7 @@ class NotificationsManager { constructor({ acl, db, + nodeConnectionManager, nodeManager, keyManager, messageCap, @@ -95,6 +101,7 @@ class NotificationsManager { }: { acl: ACL; db: DB; + nodeConnectionManager: NodeConnectionManager; nodeManager: NodeManager; keyManager: KeyManager; messageCap: number; @@ -105,6 +112,7 @@ class NotificationsManager { this.acl = acl; this.db = db; this.keyManager = keyManager; + this.nodeConnectionManager = nodeConnectionManager; this.nodeManager = nodeManager; } @@ -190,14 +198,19 @@ class NotificationsManager { public async sendNotification(nodeId: NodeId, data: NotificationData) { const notification = { data: data, - senderId: nodesUtils.encodeNodeId(this.nodeManager.getNodeId()), + senderId: nodesUtils.encodeNodeId(this.keyManager.getNodeId()), isRead: false, }; const signedNotification = await notificationsUtils.signNotification( notification, this.keyManager.getRootKeyPairPem(), ); - await this.nodeManager.sendNotification(nodeId, signedNotification); + const notificationMsg = new notificationsPB.AgentNotification(); + notificationMsg.setContent(signedNotification); + await this.nodeConnectionManager.withConnF(nodeId, async (connection) => { + const client = connection.getClient(); + await client.notificationsSend(notificationMsg); + }); } /** diff --git a/src/proto/js/polykey/v1/test_service_grpc_pb.d.ts b/src/proto/js/polykey/v1/test_service_grpc_pb.d.ts index 431c63254..0f5eae313 100644 --- a/src/proto/js/polykey/v1/test_service_grpc_pb.d.ts +++ b/src/proto/js/polykey/v1/test_service_grpc_pb.d.ts @@ -14,6 +14,8 @@ interface ITestServiceService extends grpc.ServiceDefinition { @@ -61,6 +63,24 @@ interface ITestServiceService_IUnaryAuthenticated extends grpc.MethodDefinition< responseSerialize: grpc.serialize; responseDeserialize: grpc.deserialize; } +interface ITestServiceService_IunaryFail extends grpc.MethodDefinition { + path: "/polykey.v1.TestService/unaryFail"; + requestStream: false; + responseStream: false; + requestSerialize: grpc.serialize; + requestDeserialize: grpc.deserialize; + responseSerialize: grpc.serialize; + responseDeserialize: grpc.deserialize; +} +interface ITestServiceService_IserverStreamFail extends grpc.MethodDefinition { + path: "/polykey.v1.TestService/serverStreamFail"; + requestStream: false; + responseStream: true; + requestSerialize: grpc.serialize; + requestDeserialize: grpc.deserialize; + responseSerialize: grpc.serialize; + responseDeserialize: grpc.deserialize; +} export const TestServiceService: ITestServiceService; @@ -70,6 +90,8 @@ export interface ITestServiceServer extends grpc.UntypedServiceImplementation { clientStream: grpc.handleClientStreamingCall; duplexStream: grpc.handleBidiStreamingCall; unaryAuthenticated: grpc.handleUnaryCall; + unaryFail: grpc.handleUnaryCall; + serverStreamFail: grpc.handleServerStreamingCall; } export interface ITestServiceClient { @@ -88,6 +110,11 @@ export interface ITestServiceClient { unaryAuthenticated(request: polykey_v1_utils_utils_pb.EchoMessage, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EchoMessage) => void): grpc.ClientUnaryCall; unaryAuthenticated(request: polykey_v1_utils_utils_pb.EchoMessage, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EchoMessage) => void): grpc.ClientUnaryCall; unaryAuthenticated(request: polykey_v1_utils_utils_pb.EchoMessage, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EchoMessage) => void): grpc.ClientUnaryCall; + unaryFail(request: polykey_v1_utils_utils_pb.EchoMessage, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EchoMessage) => void): grpc.ClientUnaryCall; + unaryFail(request: polykey_v1_utils_utils_pb.EchoMessage, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EchoMessage) => void): grpc.ClientUnaryCall; + unaryFail(request: polykey_v1_utils_utils_pb.EchoMessage, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EchoMessage) => void): grpc.ClientUnaryCall; + serverStreamFail(request: polykey_v1_utils_utils_pb.EchoMessage, options?: Partial): grpc.ClientReadableStream; + serverStreamFail(request: polykey_v1_utils_utils_pb.EchoMessage, metadata?: grpc.Metadata, options?: Partial): grpc.ClientReadableStream; } export class TestServiceClient extends grpc.Client implements ITestServiceClient { @@ -106,4 +133,9 @@ export class TestServiceClient extends grpc.Client implements ITestServiceClient public unaryAuthenticated(request: polykey_v1_utils_utils_pb.EchoMessage, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EchoMessage) => void): grpc.ClientUnaryCall; public unaryAuthenticated(request: polykey_v1_utils_utils_pb.EchoMessage, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EchoMessage) => void): grpc.ClientUnaryCall; public unaryAuthenticated(request: polykey_v1_utils_utils_pb.EchoMessage, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EchoMessage) => void): grpc.ClientUnaryCall; + public unaryFail(request: polykey_v1_utils_utils_pb.EchoMessage, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EchoMessage) => void): grpc.ClientUnaryCall; + public unaryFail(request: polykey_v1_utils_utils_pb.EchoMessage, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EchoMessage) => void): grpc.ClientUnaryCall; + public unaryFail(request: polykey_v1_utils_utils_pb.EchoMessage, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EchoMessage) => void): grpc.ClientUnaryCall; + public serverStreamFail(request: polykey_v1_utils_utils_pb.EchoMessage, options?: Partial): grpc.ClientReadableStream; + public serverStreamFail(request: polykey_v1_utils_utils_pb.EchoMessage, metadata?: grpc.Metadata, options?: Partial): grpc.ClientReadableStream; } diff --git a/src/proto/js/polykey/v1/test_service_grpc_pb.js b/src/proto/js/polykey/v1/test_service_grpc_pb.js index 5a16c681b..6247b6805 100644 --- a/src/proto/js/polykey/v1/test_service_grpc_pb.js +++ b/src/proto/js/polykey/v1/test_service_grpc_pb.js @@ -72,6 +72,28 @@ var TestServiceService = exports.TestServiceService = { responseSerialize: serialize_polykey_v1_utils_EchoMessage, responseDeserialize: deserialize_polykey_v1_utils_EchoMessage, }, + unaryFail: { + path: '/polykey.v1.TestService/unaryFail', + requestStream: false, + responseStream: false, + requestType: polykey_v1_utils_utils_pb.EchoMessage, + responseType: polykey_v1_utils_utils_pb.EchoMessage, + requestSerialize: serialize_polykey_v1_utils_EchoMessage, + requestDeserialize: deserialize_polykey_v1_utils_EchoMessage, + responseSerialize: serialize_polykey_v1_utils_EchoMessage, + responseDeserialize: deserialize_polykey_v1_utils_EchoMessage, + }, + serverStreamFail: { + path: '/polykey.v1.TestService/serverStreamFail', + requestStream: false, + responseStream: true, + requestType: polykey_v1_utils_utils_pb.EchoMessage, + responseType: polykey_v1_utils_utils_pb.EchoMessage, + requestSerialize: serialize_polykey_v1_utils_EchoMessage, + requestDeserialize: deserialize_polykey_v1_utils_EchoMessage, + responseSerialize: serialize_polykey_v1_utils_EchoMessage, + responseDeserialize: deserialize_polykey_v1_utils_EchoMessage, + }, }; exports.TestServiceClient = grpc.makeGenericClientConstructor(TestServiceService); diff --git a/src/proto/schemas/polykey/v1/test_service.proto b/src/proto/schemas/polykey/v1/test_service.proto index 44e8cc5b1..54bf4934f 100644 --- a/src/proto/schemas/polykey/v1/test_service.proto +++ b/src/proto/schemas/polykey/v1/test_service.proto @@ -10,4 +10,6 @@ service TestService { rpc ClientStream(stream polykey.v1.utils.EchoMessage) returns (polykey.v1.utils.EchoMessage) {}; rpc DuplexStream(stream polykey.v1.utils.EchoMessage) returns (stream polykey.v1.utils.EchoMessage) {}; rpc UnaryAuthenticated(polykey.v1.utils.EchoMessage) returns (polykey.v1.utils.EchoMessage) {}; + rpc unaryFail (polykey.v1.utils.EchoMessage) returns (polykey.v1.utils.EchoMessage); + rpc serverStreamFail (polykey.v1.utils.EchoMessage) returns (stream polykey.v1.utils.EchoMessage); } diff --git a/src/utils/binary.ts b/src/utils/binary.ts new file mode 100644 index 000000000..1b41943f5 --- /dev/null +++ b/src/utils/binary.ts @@ -0,0 +1,134 @@ +/** + * Binary helper functions + * @module + */ + +/** + * Uint8Array to hex string + */ +function bytes2Hex(bytes: Uint8Array): string { + return [...bytes].map((n) => dec2Hex(n, 2)).join(''); +} + +/** + * Hex string as Uint8Array + */ +function hex2Bytes(hex: string): Uint8Array { + const numbers = strChunks(hex, 2).map((b) => parseInt(b, 16)); + return new Uint8Array(numbers); +} + +/** + * Uint8Array to bit string + */ +function bytes2Bits(bytes: Uint8Array): string { + return [...bytes].map((n) => dec2Bits(n, 8)).join(''); +} + +/** + * Bit string to Uint8Array + */ +function bits2Bytes(bits: string): Uint8Array { + const numbers = strChunks(bits, 8).map((b) => parseInt(b, 2)); + return new Uint8Array(numbers); +} + +/** + * Uint8Array to Positive BigInt + */ +function bytes2BigInt(bytes: Uint8Array): bigint { + const hex = bytes2Hex(bytes); + return BigInt('0x' + hex); +} + +/** + * Positive BigInt to Uint8Array + * Big-endian order + */ +function bigInt2Bytes(bigInt: bigint, size?: number): Uint8Array { + if (bigInt < 0) { + throw new RangeError('bigInt must be positive'); + } + let hex; + if (size != null) { + bigInt %= BigInt(16 ** (size * 2)); + hex = bigInt.toString(16).padStart(size * 2, '0'); + } else { + hex = bigInt.toString(16); + if (hex.length % 2) { + hex = '0' + hex; + } + } + return hex2Bytes(hex); +} + +/** + * Positive BigInt numbers to hex string + * Big-endian order + */ +function bigInt2Hex(bigInt: bigint, size?: number): string { + // Cannot coerce bigint to unsigned bigint + // Because it requires clamping to a specified bitsize + // And there's no static bitsize for bigint + if (bigInt < 0) { + throw new RangeError('bigInt must be positive'); + } + let hex; + if (size != null) { + bigInt %= BigInt(16 ** size); + hex = bigInt.toString(16).padStart(size, '0'); + } else { + hex = bigInt.toString(16); + } + return hex; +} + +/** + * Positive base 10 numbers to hex string + * Big-endian order + * Use parseInt for vice-versa + */ +function dec2Hex(dec: number, size: number): string { + dec %= 16 ** size; + // `>>>` coerces dec to unsigned integer + return (dec >>> 0).toString(16).padStart(size, '0'); +} + +/** + * Positive base 10 numbers to bit string + * Big-endian order + * Use parseInt for vice-versa + */ +function dec2Bits(dec: number, size: number): string { + dec %= 2 ** size; + // `>>>` coerces dec to unsigned integer + return (dec >>> 0).toString(2).padStart(size, '0'); +} + +/** + * Chunks strings into same size chunks + * The last chunk will be smaller if a clean division is not possible + */ +function strChunks(str: string, size: number): Array { + const chunkCount = Math.ceil(str.length / size); + const chunks = new Array(chunkCount); + let i = 0; + let o = 0; + for (; i < chunkCount; ++i, o += size) { + chunks[i] = str.substr(o, size); + } + return chunks; +} + +export { + bytes2Hex, + hex2Bytes, + bytes2Bits, + bits2Bytes, + bytes2BigInt, + bigInt2Bytes, + bigInt2Hex, + dec2Hex, + dec2Bits, + strChunks, +}; diff --git a/src/utils/index.ts b/src/utils/index.ts index 17dd51405..cbb38a8be 100644 --- a/src/utils/index.ts +++ b/src/utils/index.ts @@ -3,4 +3,5 @@ export * from './locks'; export * from './context'; export * from './utils'; export * from './matchers'; +export * from './binary'; export * as errors from './errors'; diff --git a/src/validation/utils.ts b/src/validation/utils.ts index 5659fb878..4137486e6 100644 --- a/src/validation/utils.ts +++ b/src/validation/utils.ts @@ -6,7 +6,7 @@ * The parse error message must focus on why the validation failed * @module */ -import type { NodeId } from '../nodes/types'; +import type { NodeId, SeedNodes } from '../nodes/types'; import type { ProviderId, IdentityId } from '../identities/types'; import type { GestaltAction, GestaltId } from '../gestalts/types'; import type { VaultAction } from '../vaults/types'; @@ -18,6 +18,7 @@ import * as gestaltsUtils from '../gestalts/utils'; import * as vaultsUtils from '../vaults/utils'; import * as networkUtils from '../network/utils'; import * as claimsUtils from '../claims/utils'; +import config from '../config'; function parseInteger(data: any): number { data = parseInt(data); @@ -173,6 +174,75 @@ function parsePort(data: any): Port { return data; } +function parseNetwork(data: any): SeedNodes { + if (typeof data !== 'string' || !(data in config.defaults.network)) { + throw new validationErrors.ErrorParse( + `Network must be one of ${Object.keys(config.defaults.network).join( + ', ', + )}`, + ); + } + return config.defaults.network[data]; +} + +/** + * Seed nodes expected to be of form 'nodeId1@host:port;nodeId2@host:port;...' + * By default, any specified seed nodes (in CLI option, or environment variable) + * will overwrite the default nodes in src/config.ts. + * Special flag `` indicates that the default seed + * nodes should be added to the starting seed nodes instead of being overwritten + */ +function parseSeedNodes(data: any): [SeedNodes, boolean] { + if (typeof data !== 'string') { + throw new validationErrors.ErrorParse( + 'Seed nodes must be of format `nodeId@host:port;...`', + ); + } + const seedNodes: SeedNodes = {}; + // Determines whether the defaults flag is set or not + let defaults = false; + // If explicitly set to an empty string, then no seed nodes and no defaults + if (data === '') return [seedNodes, defaults]; + for (const seedNodeString of data.split(';')) { + // Empty string will occur if there's an extraneous ';' (e.g. at end of env) + if (seedNodeString === '') continue; + if (seedNodeString === '') { + defaults = true; + continue; + } + let seedNodeUrl: URL; + try { + seedNodeUrl = new URL(`pk://${seedNodeString}`); + } catch (e) { + if (e instanceof TypeError) { + throw new validationErrors.ErrorParse( + 'Seed nodes must be of format `nodeId@host:port;...`', + ); + } + throw e; + } + const nodeIdEncoded = seedNodeUrl.username; + // Remove square braces for IPv6 + const nodeHostOrHostname = seedNodeUrl.hostname.replace(/[\[\]]/g, ''); + const nodePort = seedNodeUrl.port; + try { + parseNodeId(nodeIdEncoded); + seedNodes[nodeIdEncoded] = { + host: parseHostOrHostname(nodeHostOrHostname), + port: parsePort(nodePort), + }; + } catch (e) { + if (e instanceof validationErrors.ErrorParse) { + throw new validationErrors.ErrorParse( + 'Seed nodes must be of format `nodeId@host:port;...`', + ); + } + throw e; + } + } + return [seedNodes, defaults]; +} + export { parseInteger, parseNumber, @@ -187,4 +257,6 @@ export { parseHostname, parseHostOrHostname, parsePort, + parseNetwork, + parseSeedNodes, }; diff --git a/src/vaults/VaultInternal.ts b/src/vaults/VaultInternal.ts index dcc80ae05..ff737fc11 100644 --- a/src/vaults/VaultInternal.ts +++ b/src/vaults/VaultInternal.ts @@ -324,8 +324,9 @@ class VaultInternal { }); this.workingDir = commit_; } catch (err) { - if (err.code === 'NotFoundError') + if (err.code === 'NotFoundError') { throw new vaultsErrors.ErrorVaultCommitUndefined(); + } throw err; } } diff --git a/src/vaults/VaultManager.ts b/src/vaults/VaultManager.ts index e17d90d7b..613603117 100644 --- a/src/vaults/VaultManager.ts +++ b/src/vaults/VaultManager.ts @@ -14,10 +14,11 @@ import type { PolykeyWorkerManagerInterface } from '../workers/types'; import type { MutexInterface } from 'async-mutex'; import type { POJO } from 'encryptedfs'; import type { KeyManager } from '../keys'; -import type { NodeManager } from '../nodes'; import type { GestaltGraph } from '../gestalts'; import type { ACL } from '../acl'; import type { NotificationsManager } from '../notifications'; +import type { NodeConnection, NodeConnectionManager } from '../nodes'; +import type { GRPCClientAgent } from '../agent'; import path from 'path'; import Logger from '@matrixai/logger'; import { Mutex } from 'async-mutex'; @@ -50,7 +51,7 @@ class VaultManager { public readonly vaultsPath: string; protected fs: FileSystem; - protected nodeManager: NodeManager; + protected nodeConnectionManager: NodeConnectionManager; protected gestaltGraph: GestaltGraph; protected acl: ACL; protected notificationsManager: NotificationsManager; @@ -68,7 +69,7 @@ class VaultManager { static async createVaultManager({ vaultsPath, keyManager, - nodeManager, + nodeConnectionManager, gestaltGraph, acl, db, @@ -79,7 +80,7 @@ class VaultManager { }: { vaultsPath: string; keyManager: KeyManager; - nodeManager: NodeManager; + nodeConnectionManager: NodeConnectionManager; gestaltGraph: GestaltGraph; acl: ACL; db: DB; @@ -92,7 +93,7 @@ class VaultManager { const vaultManager = new VaultManager({ vaultsPath, keyManager, - nodeManager, + nodeConnectionManager, gestaltGraph, acl, db, @@ -108,7 +109,7 @@ class VaultManager { constructor({ vaultsPath, keyManager, - nodeManager, + nodeConnectionManager, gestaltGraph, acl, db, @@ -118,7 +119,7 @@ class VaultManager { }: { vaultsPath: string; keyManager: KeyManager; - nodeManager: NodeManager; + nodeConnectionManager: NodeConnectionManager; gestaltGraph: GestaltGraph; acl: ACL; db: DB; @@ -128,7 +129,7 @@ class VaultManager { }) { this.vaultsPath = vaultsPath; this.keyManager = keyManager; - this.nodeManager = nodeManager; + this.nodeConnectionManager = nodeConnectionManager; this.gestaltGraph = gestaltGraph; this.acl = acl; this.db = db; @@ -385,139 +386,154 @@ class VaultManager { vaultNameOrId: VaultId | VaultName, ): Promise { let vaultName, remoteVaultId; - const nodeConnection = await this.nodeManager.getConnectionToNode(nodeId); - const client = nodeConnection.getClient(); - const vaultId = await this.generateVaultId(); - const lock = new Mutex(); - this.vaultsMap.set(idUtils.toString(vaultId), { lock }); - return await this._transaction(async () => { - await this.efs.mkdir( - path.join(vaultsUtils.makeVaultIdPretty(vaultId), 'contents'), - { recursive: true }, - ); - const request = async ({ - url, - method = 'GET', - headers = {}, - body = [Buffer.from('')], - }: { - url: string; - method: string; - headers: POJO; - body: Buffer[]; - }) => { - if (method === 'GET') { - const infoResponse = { - async *[Symbol.iterator]() { - const request = new vaultsPB.Vault(); - if (typeof vaultNameOrId === 'string') { - request.setNameOrId(vaultNameOrId); - } else { - request.setNameOrId(idUtils.toString(vaultNameOrId)); - } - const response = client.vaultsGitInfoGet(request); - response.stream.on('metadata', async (meta) => { - vaultName = meta.get('vaultName').pop()!.toString(); - remoteVaultId = makeVaultId( - meta.get('vaultId').pop()!.toString(), - ); - }); - for await (const resp of response) { - yield resp.getChunk_asU8(); - } - }, - }; - return { - url: url, - method: method, - body: infoResponse, - headers: headers, - statusCode: 200, - statusMessage: 'OK', + return await this.nodeConnectionManager.withConnF( + nodeId, + async (connection) => { + const client = connection.getClient(); + const vaultId = await this.generateVaultId(); + const lock = new Mutex(); + this.vaultsMap.set(idUtils.toString(vaultId), { lock }); + return await this._transaction(async () => { + await this.efs.mkdir( + path.join(vaultsUtils.makeVaultIdPretty(vaultId), 'contents'), + { recursive: true }, + ); + const request = async ({ + url, + method = 'GET', + headers = {}, + body = [Buffer.from('')], + }: { + url: string; + method: string; + headers: POJO; + body: Buffer[]; + }) => { + if (method === 'GET') { + const infoResponse = { + async *[Symbol.iterator]() { + const request = new vaultsPB.Vault(); + if (typeof vaultNameOrId === 'string') { + request.setNameOrId(vaultNameOrId); + } else { + request.setNameOrId(idUtils.toString(vaultNameOrId)); + } + const response = client.vaultsGitInfoGet(request); + response.stream.on('metadata', async (meta) => { + vaultName = meta.get('vaultName').pop()!.toString(); + remoteVaultId = makeVaultId( + meta.get('vaultId').pop()!.toString(), + ); + }); + for await (const resp of response) { + yield resp.getChunk_asU8(); + } + }, + }; + return { + url: url, + method: method, + body: infoResponse, + headers: headers, + statusCode: 200, + statusMessage: 'OK', + }; + } else if (method === 'POST') { + const packResponse = { + async *[Symbol.iterator]() { + const responseBuffers: Array = []; + const meta = new grpc.Metadata(); + if (typeof vaultNameOrId === 'string') { + meta.set('vaultNameOrId', vaultNameOrId); + } else { + meta.set( + 'vaultNameOrId', + vaultsUtils.makeVaultIdPretty(vaultNameOrId), + ); + } + const stream = client.vaultsGitPackGet(meta); + const write = utils.promisify(stream.write).bind(stream); + stream.on('data', (d) => { + responseBuffers.push(d.getChunk_asU8()); + }); + const chunk = new vaultsPB.PackChunk(); + chunk.setChunk(body[0]); + write(chunk); + stream.end(); + yield await new Promise((resolve) => { + stream.once('end', () => { + resolve(Buffer.concat(responseBuffers)); + }); + }); + }, + }; + return { + url: url, + method: method, + body: packResponse, + headers: headers, + statusCode: 200, + statusMessage: 'OK', + }; + } else { + throw new Error('Method not supported'); + } }; - } else if (method === 'POST') { - const packResponse = { - async *[Symbol.iterator]() { - const responseBuffers: Array = []; - const meta = new grpc.Metadata(); - if (typeof vaultNameOrId === 'string') { - meta.set('vaultNameOrId', vaultNameOrId); - } else { - meta.set( - 'vaultNameOrId', - vaultsUtils.makeVaultIdPretty(vaultNameOrId), - ); - } - const stream = client.vaultsGitPackGet(meta); - const write = utils.promisify(stream.write).bind(stream); - stream.on('data', (d) => { - responseBuffers.push(d.getChunk_asU8()); - }); - const chunk = new vaultsPB.PackChunk(); - chunk.setChunk(body[0]); - write(chunk); - stream.end(); - yield await new Promise((resolve) => { - stream.once('end', () => { - resolve(Buffer.concat(responseBuffers)); - }); - }); + await git.clone({ + fs: this.efs, + http: { request }, + dir: path.join(vaultsUtils.makeVaultIdPretty(vaultId), 'contents'), + gitdir: path.join(vaultsUtils.makeVaultIdPretty(vaultId), '.git'), + url: 'http://', + singleBranch: true, + }); + await this.efs.writeFile( + path.join( + vaultsUtils.makeVaultIdPretty(vaultId), + '.git', + 'packed-refs', + ), + '# pack-refs with: peeled fully-peeled sorted', + ); + const workingDir = ( + await git.log({ + fs: this.efs, + dir: path.join( + vaultsUtils.makeVaultIdPretty(vaultId), + 'contents', + ), + gitdir: path.join(vaultsUtils.makeVaultIdPretty(vaultId), '.git'), + depth: 1, + }) + ).pop()!; + await this.efs.writeFile( + path.join( + vaultsUtils.makeVaultIdPretty(vaultId), + '.git', + 'workingDir', + ), + workingDir.oid, + ); + const vault = await VaultInternal.create({ + vaultId, + keyManager: this.keyManager, + efs: this.efs, + logger: this.logger.getChild(VaultInternal.name), + }); + this.vaultsMap.set(idUtils.toString(vaultId), { lock, vault }); + await this.db.put( + this.vaultsNamesDbDomain, + idUtils.toBuffer(vaultId), + { + name: vaultName, + defaultPullNode: nodeId, + defaultPullVault: idUtils.toBuffer(remoteVaultId), }, - }; - return { - url: url, - method: method, - body: packResponse, - headers: headers, - statusCode: 200, - statusMessage: 'OK', - }; - } else { - throw new Error('Method not supported'); - } - }; - await git.clone({ - fs: this.efs, - http: { request }, - dir: path.join(vaultsUtils.makeVaultIdPretty(vaultId), 'contents'), - gitdir: path.join(vaultsUtils.makeVaultIdPretty(vaultId), '.git'), - url: 'http://', - singleBranch: true, - }); - await this.efs.writeFile( - path.join( - vaultsUtils.makeVaultIdPretty(vaultId), - '.git', - 'packed-refs', - ), - '# pack-refs with: peeled fully-peeled sorted', - ); - const workingDir = ( - await git.log({ - fs: this.efs, - dir: path.join(vaultsUtils.makeVaultIdPretty(vaultId), 'contents'), - gitdir: path.join(vaultsUtils.makeVaultIdPretty(vaultId), '.git'), - depth: 1, - }) - ).pop()!; - await this.efs.writeFile( - path.join(vaultsUtils.makeVaultIdPretty(vaultId), '.git', 'workingDir'), - workingDir.oid, - ); - const vault = await VaultInternal.create({ - vaultId, - keyManager: this.keyManager, - efs: this.efs, - logger: this.logger.getChild(VaultInternal.name), - }); - this.vaultsMap.set(idUtils.toString(vaultId), { lock, vault }); - await this.db.put(this.vaultsNamesDbDomain, idUtils.toBuffer(vaultId), { - name: vaultName, - defaultPullNode: nodeId, - defaultPullVault: idUtils.toBuffer(remoteVaultId), - }); - return vault; - }, [vaultId]); + ); + return vault; + }, [vaultId]); + }, + ); } public async pullVault({ @@ -529,6 +545,7 @@ class VaultManager { pullNodeId?: NodeId; pullVaultNameOrId?: VaultId | VaultName; }): Promise { + throw Error('Not fully implemented.'); let metaChange = 0; let vaultMeta, remoteVaultId; return await this._transaction(async () => { @@ -557,10 +574,12 @@ class VaultManager { } } } - const nodeConnection = await this.nodeManager.getConnectionToNode( - pullNodeId!, - ); - const client = nodeConnection.getClient(); + // TODO: this will need a generator variant of nodeConnectionManager.withConnection() to fix. + // const nodeConnection = await this.nodeConnectionManager.getConnectionToNode( + // pullNodeId!, + // ); + let nodeConnection: NodeConnection; + const client = nodeConnection!.getClient(); const request = async ({ url, method = 'GET', @@ -739,6 +758,25 @@ class VaultManager { } } + /** + * Retrieves all the vaults for a peers node + */ + @ready(new vaultsErrors.ErrorVaultManagerNotRunning()) + public async scanVaults(targetNodeId: NodeId): Promise> { + // Create the handler for git to scan from + return this.nodeConnectionManager.withConnF( + targetNodeId, + async (connection) => { + const client = connection.getClient(); + const gitRequest = await vaultsUtils.constructGitHandler( + client, + this.keyManager.getNodeId(), + ); + return await gitRequest.scanVaults(); + }, + ); + } + protected async getVault(vaultId: VaultId): Promise { let vault: VaultInternal | undefined; let lock: MutexInterface; diff --git a/tests/agent/GRPCClientAgent.test.ts b/tests/agent/GRPCClientAgent.test.ts index 30710e335..027d28ce9 100644 --- a/tests/agent/GRPCClientAgent.test.ts +++ b/tests/agent/GRPCClientAgent.test.ts @@ -6,11 +6,10 @@ import fs from 'fs'; import os from 'os'; import path from 'path'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; -import { Mutex } from 'async-mutex'; import { DB } from '@matrixai/db'; import { GRPCClientAgent } from '@/agent'; import { KeyManager } from '@/keys'; -import { NodeManager } from '@/nodes'; +import { NodeConnectionManager, NodeGraph, NodeManager } from '@/nodes'; import { VaultManager } from '@/vaults'; import { Sigchain } from '@/sigchain'; import { ACL } from '@/acl'; @@ -24,6 +23,7 @@ import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; import * as vaultsPB from '@/proto/js/polykey/v1/vaults/vaults_pb'; import * as nodesPB from '@/proto/js/polykey/v1/nodes/nodes_pb'; import { utils as nodesUtils } from '@/nodes'; +import { RWLock } from '@/utils'; import * as testAgentUtils from './utils'; import * as testUtils from '../utils'; import TestNodeConnection from '../nodes/TestNodeConnection'; @@ -62,6 +62,8 @@ describe(GRPCClientAgent.name, () => { let dbPath: string; let keyManager: KeyManager; let vaultManager: VaultManager; + let nodeGraph: NodeGraph; + let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let sigchain: Sigchain; let acl: ACL; @@ -123,18 +125,32 @@ describe(GRPCClientAgent.name, () => { db: db, logger: logger, }); - nodeManager = await NodeManager.createNodeManager({ + nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + fwdProxy: fwdProxy, + revProxy: revProxy, + logger, + }); + await nodeConnectionManager.start(); + nodeManager = new NodeManager({ db: db, sigchain: sigchain, keyManager: keyManager, - fwdProxy: fwdProxy, - revProxy: revProxy, + nodeGraph: nodeGraph, + nodeConnectionManager: nodeConnectionManager, logger: logger, }); notificationsManager = await NotificationsManager.createNotificationsManager({ acl: acl, db: db, + nodeConnectionManager: nodeConnectionManager, nodeManager: nodeManager, keyManager: keyManager, messageCap: 5, @@ -143,7 +159,7 @@ describe(GRPCClientAgent.name, () => { vaultManager = await VaultManager.createVaultManager({ keyManager: keyManager, vaultsPath: vaultsPath, - nodeManager: nodeManager, + nodeConnectionManager: nodeConnectionManager, vaultsKey: keyManager.vaultKey, db: db, acl: acl, @@ -151,12 +167,13 @@ describe(GRPCClientAgent.name, () => { fs: fs, logger: logger, }); - await nodeManager.start(); [server, port] = await testAgentUtils.openTestAgentServer({ keyManager, vaultManager, nodeManager, + nodeConnectionManager, sigchain, + nodeGraph, notificationsManager, }); client = await testAgentUtils.openTestAgentClient(port); @@ -167,7 +184,8 @@ describe(GRPCClientAgent.name, () => { await vaultManager.stop(); await notificationsManager.stop(); await sigchain.stop(); - await nodeManager.stop(); + await nodeConnectionManager.stop(); + await nodeGraph.stop(); await gestaltGraph.stop(); await acl.stop(); await fwdProxy.stop(); @@ -275,19 +293,18 @@ describe(GRPCClientAgent.name, () => { // that it can be used to verify the claim signature xToYNodeConnection = await TestNodeConnection.createTestNodeConnection({ publicKey: yKeyManager.getRootKeyPairPem().publicKey, - targetNodeId: nodeIdY, targetHost: 'unnecessary' as Host, targetPort: 0 as Port, - forwardProxy: fwdProxy, - keyManager: keyManager, + fwdProxy: fwdProxy, + destroyCallback: async () => {}, logger: logger, }); // @ts-ignore - force push into the protected connections map - nodeManager.connections.set(nodeIdY.toString(), { + nodeConnectionManager.connections.set(nodeIdY.toString(), { connection: xToYNodeConnection, - lock: new Mutex(), + lock: new RWLock(), }); - await nodeManager.setNode(nodeIdY, { + await nodeGraph.setNode(nodeIdY, { host: 'unnecessary' as Host, port: 0 as Port, } as NodeAddress); diff --git a/tests/agent/utils.ts b/tests/agent/utils.ts index 50a42fdeb..6b91930dd 100644 --- a/tests/agent/utils.ts +++ b/tests/agent/utils.ts @@ -3,7 +3,7 @@ import type { Host, Port } from '@/network/types'; import type { IAgentServiceServer } from '@/proto/js/polykey/v1/agent_service_grpc_pb'; import type { KeyManager } from '@/keys'; import type { VaultManager } from '@/vaults'; -import type { NodeManager } from '@/nodes'; +import type { NodeGraph, NodeConnectionManager, NodeManager } from '@/nodes'; import type { Sigchain } from '@/sigchain'; import type { NotificationsManager } from '@/notifications'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; @@ -19,13 +19,17 @@ import * as testUtils from '../utils'; async function openTestAgentServer({ keyManager, vaultManager, + nodeConnectionManager, nodeManager, + nodeGraph, sigchain, notificationsManager, }: { keyManager: KeyManager; vaultManager: VaultManager; + nodeConnectionManager: NodeConnectionManager; nodeManager: NodeManager; + nodeGraph: NodeGraph; sigchain: Sigchain; notificationsManager: NotificationsManager; }) { @@ -33,8 +37,10 @@ async function openTestAgentServer({ keyManager, vaultManager, nodeManager, + nodeGraph, sigchain: sigchain, notificationsManager: notificationsManager, + nodeConnectionManager: nodeConnectionManager, }); const server = new grpc.Server(); @@ -62,6 +68,7 @@ async function openTestAgentClient(port: number): Promise { host: '127.0.0.1' as Host, port: port as Port, logger: logger, + destroyCallback: async () => {}, timeout: 30000, }); return agentClient; diff --git a/tests/bin/identities/identities.test.ts b/tests/bin/identities/identities.test.ts index ab9ab0ffa..955a8f98c 100644 --- a/tests/bin/identities/identities.test.ts +++ b/tests/bin/identities/identities.test.ts @@ -9,16 +9,11 @@ import { PolykeyAgent } from '@'; import * as claimsUtils from '@/claims/utils'; import * as identitiesUtils from '@/identities/utils'; import { utils as nodesUtils } from '@/nodes'; +import * as keysUtils from '@/keys/utils'; import * as testBinUtils from '../utils'; import * as testNodesUtils from '../../nodes/utils'; import TestProvider from '../../identities/TestProvider'; -jest.mock('@/keys/utils', () => ({ - ...jest.requireActual('@/keys/utils'), - generateDeterministicKeyPair: - jest.requireActual('@/keys/utils').generateKeyPair, -})); - function identityString( providerId: ProviderId, identityId: IdentityId, @@ -99,8 +94,17 @@ describe('CLI Identities', () => { return ['identities', ...options, '-np', nodePath]; } + const mockedGenerateDeterministicKeyPair = jest.spyOn( + keysUtils, + 'generateDeterministicKeyPair', + ); + // Setup and teardown beforeAll(async () => { + mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { + return keysUtils.generateKeyPair(bits); + }); + // This handles the expensive setting up of the polykey agent. dataDir = await fs.promises.mkdtemp( path.join(os.tmpdir(), 'polykey-test-'), @@ -114,7 +118,7 @@ describe('CLI Identities', () => { logger: logger, }); - keynode.id = nodesUtils.encodeNodeId(polykeyAgent.nodeManager.getNodeId()); + keynode.id = nodesUtils.encodeNodeId(polykeyAgent.keyManager.getNodeId()); testProvider = new TestProvider(); polykeyAgent.identitiesManager.registerProvider(testProvider); @@ -680,13 +684,13 @@ describe('CLI Identities', () => { // Adding sigchain details. const claimBtoC: ClaimLinkNode = { type: 'node', - node1: nodesUtils.encodeNodeId(nodeB.nodeManager.getNodeId()), - node2: nodesUtils.encodeNodeId(nodeC.nodeManager.getNodeId()), + node1: nodesUtils.encodeNodeId(nodeB.keyManager.getNodeId()), + node2: nodesUtils.encodeNodeId(nodeC.keyManager.getNodeId()), }; const claimCtoB: ClaimLinkNode = { type: 'node', - node1: nodesUtils.encodeNodeId(nodeC.nodeManager.getNodeId()), - node2: nodesUtils.encodeNodeId(nodeB.nodeManager.getNodeId()), + node1: nodesUtils.encodeNodeId(nodeC.keyManager.getNodeId()), + node2: nodesUtils.encodeNodeId(nodeB.keyManager.getNodeId()), }; await nodeB.sigchain.addClaim(claimBtoC); await nodeB.sigchain.addClaim(claimCtoB); @@ -700,7 +704,7 @@ describe('CLI Identities', () => { const claimIdentToB: ClaimLinkIdentity = { type: 'identity', - node: nodesUtils.encodeNodeId(nodeB.nodeManager.getNodeId()), + node: nodesUtils.encodeNodeId(nodeB.keyManager.getNodeId()), provider: testProvider.id, identity: identityId, }; @@ -738,7 +742,7 @@ describe('CLI Identities', () => { 'discover', '-np', nodePath, - nodesUtils.encodeNodeId(nodeB.nodeManager.getNodeId()), + nodesUtils.encodeNodeId(nodeB.keyManager.getNodeId()), '-vvvv', ]; const result = await testBinUtils.pkStdio(commands); @@ -749,10 +753,10 @@ describe('CLI Identities', () => { expect(gestalt.length).not.toBe(0); const gestaltString = JSON.stringify(gestalt); expect(gestaltString).toContain( - nodesUtils.encodeNodeId(nodeB.nodeManager.getNodeId()), + nodesUtils.encodeNodeId(nodeB.keyManager.getNodeId()), ); expect(gestaltString).toContain( - nodesUtils.encodeNodeId(nodeC.nodeManager.getNodeId()), + nodesUtils.encodeNodeId(nodeC.keyManager.getNodeId()), ); expect(gestaltString).toContain(identityId); // Unauthenticate identity @@ -782,10 +786,10 @@ describe('CLI Identities', () => { expect(gestalt.length).not.toBe(0); const gestaltString = JSON.stringify(gestalt); expect(gestaltString).toContain( - nodesUtils.encodeNodeId(nodeB.nodeManager.getNodeId()), + nodesUtils.encodeNodeId(nodeB.keyManager.getNodeId()), ); expect(gestaltString).toContain( - nodesUtils.encodeNodeId(nodeC.nodeManager.getNodeId()), + nodesUtils.encodeNodeId(nodeC.keyManager.getNodeId()), ); expect(gestaltString).toContain(identityId); // Unauthenticate identity diff --git a/tests/bin/keys/renew.test.ts b/tests/bin/keys/renew.test.ts index 4336cc2f7..a4721d757 100644 --- a/tests/bin/keys/renew.test.ts +++ b/tests/bin/keys/renew.test.ts @@ -50,7 +50,7 @@ describe('renew', () => { }); test('renews the keypair', async () => { const rootKeyPair1 = pkAgent.keyManager.getRootKeyPairPem(); - const nodeId1 = pkAgent.nodeManager.getNodeId(); + const nodeId1 = pkAgent.keyManager.getNodeId(); // @ts-ignore - get protected property const fwdTLSConfig1 = pkAgent.fwdProxy.tlsConfig; // @ts-ignore - get protected property @@ -85,7 +85,7 @@ describe('renew', () => { ); expect(exitCode).toBe(0); const rootKeyPair2 = pkAgent.keyManager.getRootKeyPairPem(); - const nodeId2 = pkAgent.nodeManager.getNodeId(); + const nodeId2 = pkAgent.keyManager.getNodeId(); // @ts-ignore - get protected property const fwdTLSConfig2 = pkAgent.fwdProxy.tlsConfig; // @ts-ignore - get protected property diff --git a/tests/bin/keys/reset.test.ts b/tests/bin/keys/reset.test.ts index d5b18ae5e..5a220343b 100644 --- a/tests/bin/keys/reset.test.ts +++ b/tests/bin/keys/reset.test.ts @@ -50,7 +50,7 @@ describe('renew', () => { }); test('resets the keypair', async () => { const rootKeyPair1 = pkAgent.keyManager.getRootKeyPairPem(); - const nodeId1 = pkAgent.nodeManager.getNodeId(); + const nodeId1 = pkAgent.keyManager.getNodeId(); // @ts-ignore - get protected property const fwdTLSConfig1 = pkAgent.fwdProxy.tlsConfig; // @ts-ignore - get protected property @@ -85,7 +85,7 @@ describe('renew', () => { ); expect(exitCode).toBe(0); const rootKeyPair2 = pkAgent.keyManager.getRootKeyPairPem(); - const nodeId2 = pkAgent.nodeManager.getNodeId(); + const nodeId2 = pkAgent.keyManager.getNodeId(); // @ts-ignore - get protected property const fwdTLSConfig2 = pkAgent.fwdProxy.tlsConfig; // @ts-ignore - get protected property diff --git a/tests/bin/nodes/add.test.ts b/tests/bin/nodes/add.test.ts index b478447c5..b52bbd47a 100644 --- a/tests/bin/nodes/add.test.ts +++ b/tests/bin/nodes/add.test.ts @@ -7,15 +7,10 @@ import { IdInternal } from '@matrixai/id'; import PolykeyAgent from '@/PolykeyAgent'; import * as nodesUtils from '@/nodes/utils'; import { sysexits } from '@/utils'; +import * as keysUtils from '@/keys/utils'; import * as testBinUtils from '../utils'; import * as testUtils from '../../utils'; -jest.mock('@/keys/utils', () => ({ - ...jest.requireActual('@/keys/utils'), - generateDeterministicKeyPair: - jest.requireActual('@/keys/utils').generateKeyPair, -})); - describe('add', () => { const password = 'password'; const logger = new Logger('add test', LogLevel.WARN, [new StreamHandler()]); @@ -35,7 +30,16 @@ describe('add', () => { return ['nodes', ...options, '-np', nodePath]; } - beforeAll(async () => { + const mockedGenerateDeterministicKeyPair = jest.spyOn( + keysUtils, + 'generateDeterministicKeyPair', + ); + + beforeEach(async () => { + mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { + return keysUtils.generateKeyPair(bits); + }); + dataDir = await fs.promises.mkdtemp( path.join(os.tmpdir(), 'polykey-test-'), ); @@ -54,11 +58,8 @@ describe('add', () => { {}, nodePath, ); - }, global.polykeyStartupTimeout * 3); + }, global.polykeyStartupTimeout); afterEach(async () => { - await polykeyAgent.nodeManager.clearDB(); - }); - afterAll(async () => { await polykeyAgent.stop(); await polykeyAgent.destroy(); await fs.promises.rm(dataDir, { @@ -78,7 +79,7 @@ describe('add', () => { expect(result.exitCode).toBe(0); // Checking if node was added. - const res = await polykeyAgent.nodeManager.getNode(validNodeId); + const res = await polykeyAgent.nodeGraph.getNode(validNodeId); expect(res).toBeTruthy(); expect(res!.host).toEqual(validHost); expect(res!.port).toEqual(port); @@ -110,7 +111,7 @@ describe('add', () => { expect(result.exitCode).toBe(sysexits.USAGE); // Checking if node was added. - const res = await polykeyAgent.nodeManager.getNode(validNodeId); + const res = await polykeyAgent.nodeGraph.getNode(validNodeId); expect(res).toBeUndefined(); }, global.failedConnectionTimeout, diff --git a/tests/bin/nodes/claim.test.ts b/tests/bin/nodes/claim.test.ts index dd096e0ea..b215e203b 100644 --- a/tests/bin/nodes/claim.test.ts +++ b/tests/bin/nodes/claim.test.ts @@ -5,15 +5,10 @@ import fs from 'fs'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import PolykeyAgent from '@/PolykeyAgent'; import { utils as nodesUtils } from '@/nodes'; +import * as keysUtils from '@/keys/utils'; import * as testBinUtils from '../utils'; import * as testNodesUtils from '../../nodes/utils'; -jest.mock('@/keys/utils', () => ({ - ...jest.requireActual('@/keys/utils'), - generateDeterministicKeyPair: - jest.requireActual('@/keys/utils').generateKeyPair, -})); - describe('claim', () => { const password = 'password'; const logger = new Logger('claim test', LogLevel.WARN, [new StreamHandler()]); @@ -33,7 +28,16 @@ describe('claim', () => { return ['nodes', ...options, '-np', nodePath]; } + const mockedGenerateDeterministicKeyPair = jest.spyOn( + keysUtils, + 'generateDeterministicKeyPair', + ); + beforeAll(async () => { + mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { + return keysUtils.generateKeyPair(bits); + }); + rootDataDir = await fs.promises.mkdtemp( path.join(os.tmpdir(), 'polykey-test-'), ); @@ -48,7 +52,7 @@ describe('claim', () => { nodePath: nodePath, logger: logger, }); - keynodeId = polykeyAgent.nodeManager.getNodeId(); + keynodeId = polykeyAgent.keyManager.getNodeId(); // Setting up a remote keynode remoteOnline = await PolykeyAgent.createPolykeyAgent({ password: 'password', @@ -58,11 +62,11 @@ describe('claim', () => { }, logger, }); - remoteOnlineNodeId = remoteOnline.nodeManager.getNodeId(); + remoteOnlineNodeId = remoteOnline.keyManager.getNodeId(); remoteOnlineNodeIdEncoded = nodesUtils.encodeNodeId(remoteOnlineNodeId); await testNodesUtils.nodesConnect(polykeyAgent, remoteOnline); - await remoteOnline.nodeManager.setNode(keynodeId, { + await remoteOnline.nodeGraph.setNode(keynodeId, { host: polykeyAgent.revProxy.getIngressHost(), port: polykeyAgent.revProxy.getIngressPort(), }); diff --git a/tests/bin/nodes/find.test.ts b/tests/bin/nodes/find.test.ts index cb9a09bed..e64786086 100644 --- a/tests/bin/nodes/find.test.ts +++ b/tests/bin/nodes/find.test.ts @@ -6,15 +6,10 @@ import fs from 'fs'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import PolykeyAgent from '@/PolykeyAgent'; import * as nodesUtils from '@/nodes/utils'; +import * as keysUtils from '@/keys/utils'; import * as testBinUtils from '../utils'; import * as testNodesUtils from '../../nodes/utils'; -jest.mock('@/keys/utils', () => ({ - ...jest.requireActual('@/keys/utils'), - generateDeterministicKeyPair: - jest.requireActual('@/keys/utils').generateKeyPair, -})); - describe('find', () => { const password = 'password'; const logger = new Logger('find test', LogLevel.WARN, [new StreamHandler()]); @@ -39,7 +34,16 @@ describe('find', () => { return ['nodes', ...options, '-np', nodePath]; } + const mockedGenerateDeterministicKeyPair = jest.spyOn( + keysUtils, + 'generateDeterministicKeyPair', + ); + beforeAll(async () => { + mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { + return keysUtils.generateKeyPair(bits); + }); + rootDataDir = await fs.promises.mkdtemp( path.join(os.tmpdir(), 'polykey-test-'), ); @@ -64,7 +68,7 @@ describe('find', () => { }, logger, }); - remoteOnlineNodeId = remoteOnline.nodeManager.getNodeId(); + remoteOnlineNodeId = remoteOnline.keyManager.getNodeId(); remoteOnlineHost = remoteOnline.revProxy.getIngressHost(); remoteOnlinePort = remoteOnline.revProxy.getIngressPort(); await testNodesUtils.nodesConnect(polykeyAgent, remoteOnline); @@ -78,7 +82,7 @@ describe('find', () => { }, logger, }); - remoteOfflineNodeId = remoteOffline.nodeManager.getNodeId(); + remoteOfflineNodeId = remoteOffline.keyManager.getNodeId(); remoteOfflineHost = remoteOffline.revProxy.getIngressHost(); remoteOfflinePort = remoteOffline.revProxy.getIngressPort(); await testNodesUtils.nodesConnect(polykeyAgent, remoteOffline); @@ -198,9 +202,11 @@ describe('find', () => { const result2 = await testBinUtils.pkStdio(commands2, {}, dataDir); expect(result2.exitCode).toBe(1); expect(result2.stdout).toContain(`message`); - expect(result2.stdout).toContain(`Failed to find node ${unknownNodeId}`); + expect(result2.stdout).toContain( + `Failed to find node ${nodesUtils.encodeNodeId(unknownNodeId)}`, + ); expect(result2.stdout).toContain('id'); - expect(result2.stdout).toContain(unknownNodeId); + expect(result2.stdout).toContain(nodesUtils.encodeNodeId(unknownNodeId)); expect(result2.stdout).toContain('port'); expect(result2.stdout).toContain('0'); expect(result2.stdout).toContain('host'); diff --git a/tests/bin/nodes/ping.test.ts b/tests/bin/nodes/ping.test.ts index c86424fa9..4ad7cad43 100644 --- a/tests/bin/nodes/ping.test.ts +++ b/tests/bin/nodes/ping.test.ts @@ -5,15 +5,10 @@ import fs from 'fs'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import PolykeyAgent from '@/PolykeyAgent'; import * as nodesUtils from '@/nodes/utils'; +import * as keysUtils from '@/keys/utils'; import * as testBinUtils from '../utils'; import * as testNodesUtils from '../../nodes/utils'; -jest.mock('@/keys/utils', () => ({ - ...jest.requireActual('@/keys/utils'), - generateDeterministicKeyPair: - jest.requireActual('@/keys/utils').generateKeyPair, -})); - describe('ping', () => { const password = 'password'; const logger = new Logger('ping test', LogLevel.WARN, [new StreamHandler()]); @@ -33,7 +28,16 @@ describe('ping', () => { return ['nodes', ...options, '-np', nodePath]; } + const mockedGenerateDeterministicKeyPair = jest.spyOn( + keysUtils, + 'generateDeterministicKeyPair', + ); + beforeAll(async () => { + mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { + return keysUtils.generateKeyPair(bits); + }); + dataDir = await fs.promises.mkdtemp( path.join(os.tmpdir(), 'polykey-test-'), ); @@ -58,7 +62,7 @@ describe('ping', () => { }, logger, }); - remoteOnlineNodeId = remoteOnline.nodeManager.getNodeId(); + remoteOnlineNodeId = remoteOnline.keyManager.getNodeId(); await testNodesUtils.nodesConnect(polykeyAgent, remoteOnline); // Setting up an offline remote keynode @@ -70,7 +74,7 @@ describe('ping', () => { }, logger, }); - remoteOfflineNodeId = remoteOffline.nodeManager.getNodeId(); + remoteOfflineNodeId = remoteOffline.keyManager.getNodeId(); await testNodesUtils.nodesConnect(polykeyAgent, remoteOffline); await remoteOffline.stop(); @@ -129,7 +133,7 @@ describe('ping', () => { 'ping', nodesUtils.encodeNodeId(fakeNodeId), ]); - const result = await testBinUtils.pk(commands); + const result = await testBinUtils.pkStdio(commands); expect(result.exitCode).not.toBe(0); // Should fail if node doesn't exist. expect(result.stdout).toContain('Failed to resolve node ID'); diff --git a/tests/bin/notifications/notifications.test.ts b/tests/bin/notifications/notifications.test.ts index b8fa99b9b..caea650e7 100644 --- a/tests/bin/notifications/notifications.test.ts +++ b/tests/bin/notifications/notifications.test.ts @@ -9,14 +9,9 @@ import { utils as idUtils } from '@matrixai/id'; import PolykeyAgent from '@/PolykeyAgent'; import { makeVaultId } from '@/vaults/utils'; import { utils as nodesUtils } from '@/nodes'; +import * as keysUtils from '@/keys/utils'; import * as testBinUtils from '../utils'; -jest.mock('@/keys/utils', () => ({ - ...jest.requireActual('@/keys/utils'), - generateDeterministicKeyPair: - jest.requireActual('@/keys/utils').generateKeyPair, -})); - describe('CLI Notifications', () => { const password = 'password'; const logger = new Logger('pkStdio Test', LogLevel.WARN, [ @@ -38,7 +33,16 @@ describe('CLI Notifications', () => { return ['notifications', ...options, '-np', receiverNodePath]; } + const mockedGenerateDeterministicKeyPair = jest.spyOn( + keysUtils, + 'generateDeterministicKeyPair', + ); + beforeAll(async () => { + mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { + return keysUtils.generateKeyPair(bits); + }); + senderDataDir = await fs.promises.mkdtemp( path.join(os.tmpdir(), 'polykey-test-'), ); @@ -61,9 +65,9 @@ describe('CLI Notifications', () => { nodePath: receiverNodePath, logger: logger, }); - senderNodeId = senderPolykeyAgent.nodeManager.getNodeId(); - receiverNodeId = receiverPolykeyAgent.nodeManager.getNodeId(); - await senderPolykeyAgent.nodeManager.setNode(receiverNodeId, { + senderNodeId = senderPolykeyAgent.keyManager.getNodeId(); + receiverNodeId = receiverPolykeyAgent.keyManager.getNodeId(); + await senderPolykeyAgent.nodeGraph.setNode(receiverNodeId, { host: receiverPolykeyAgent.revProxy.getIngressHost(), port: receiverPolykeyAgent.revProxy.getIngressPort(), } as NodeAddress); diff --git a/tests/bin/secrets/secrets.test.ts b/tests/bin/secrets/secrets.test.ts index 292dacdc9..c72aee00d 100644 --- a/tests/bin/secrets/secrets.test.ts +++ b/tests/bin/secrets/secrets.test.ts @@ -5,14 +5,9 @@ import fs from 'fs'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import PolykeyAgent from '@/PolykeyAgent'; import { vaultOps } from '@/vaults'; +import * as keysUtils from '@/keys/utils'; import * as testBinUtils from '../utils'; -jest.mock('@/keys/utils', () => ({ - ...jest.requireActual('@/keys/utils'), - generateDeterministicKeyPair: - jest.requireActual('@/keys/utils').generateKeyPair, -})); - describe('CLI secrets', () => { const password = 'password'; const logger = new Logger('CLI Test', LogLevel.WARN, [new StreamHandler()]); @@ -21,7 +16,15 @@ describe('CLI secrets', () => { let passwordFile: string; let command: Array; + const mockedGenerateDeterministicKeyPair = jest.spyOn( + keysUtils, + 'generateDeterministicKeyPair', + ); + beforeAll(async () => { + mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { + return keysUtils.generateKeyPair(bits); + }); dataDir = await fs.promises.mkdtemp( path.join(os.tmpdir(), 'polykey-test-'), ); diff --git a/tests/bin/vaults/vaults.test.ts b/tests/bin/vaults/vaults.test.ts index da1c0ce78..db23e80fc 100644 --- a/tests/bin/vaults/vaults.test.ts +++ b/tests/bin/vaults/vaults.test.ts @@ -7,14 +7,9 @@ import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import PolykeyAgent from '@/PolykeyAgent'; import { makeVaultIdPretty } from '@/vaults/utils'; import { utils as nodesUtils } from '@/nodes'; +import * as keysUtils from '@/keys/utils'; import * as testBinUtils from '../utils'; -jest.mock('@/keys/utils', () => ({ - ...jest.requireActual('@/keys/utils'), - generateDeterministicKeyPair: - jest.requireActual('@/keys/utils').generateKeyPair, -})); - /** * This test file has been optimised to use only one instance of PolykeyAgent where posible. * Setting up the PolykeyAgent has been done in a beforeAll block. @@ -68,7 +63,16 @@ describe('CLI vaults', () => { return `vault-${vaultNumber}` as VaultName; } + const mockedGenerateDeterministicKeyPair = jest.spyOn( + keysUtils, + 'generateDeterministicKeyPair', + ); + beforeAll(async () => { + mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { + return keysUtils.generateKeyPair(bits); + }); + dataDir = await fs.promises.mkdtemp( path.join(os.tmpdir(), 'polykey-test-'), ); @@ -309,7 +313,7 @@ describe('CLI vaults', () => { expect(id).toBeTruthy(); await targetPolykeyAgent.gestaltGraph.setNode({ - id: nodesUtils.encodeNodeId(polykeyAgent.nodeManager.getNodeId()), + id: nodesUtils.encodeNodeId(polykeyAgent.keyManager.getNodeId()), chain: {}, }); fail(); @@ -319,20 +323,23 @@ describe('CLI vaults', () => { // vault.vaultId, // ); - const targetNodeId = targetPolykeyAgent.nodeManager.getNodeId(); + const targetNodeId = targetPolykeyAgent.keyManager.getNodeId(); const targetHost = targetPolykeyAgent.revProxy.getIngressHost(); const targetPort = targetPolykeyAgent.revProxy.getIngressPort(); - await polykeyAgent.nodeManager.setNode(targetNodeId, { + await polykeyAgent.nodeGraph.setNode(targetNodeId, { host: targetHost, port: targetPort, }); // Client agent: Start sending hole-punching packets to the target - await polykeyAgent.nodeManager.getConnectionToNode(targetNodeId); + await polykeyAgent.nodeConnectionManager.withConnF( + targetNodeId, + async () => {}, + ); const clientEgressHost = polykeyAgent.fwdProxy.getEgressHost(); const clientEgressPort = polykeyAgent.fwdProxy.getEgressPort(); // Server agent: start sending hole-punching packets back to the 'client' // agent (in order to establish a connection) - await targetPolykeyAgent.nodeManager.openConnection( + await targetPolykeyAgent.nodeConnectionManager.holePunchReverse( clientEgressHost, clientEgressPort, ); @@ -385,7 +392,7 @@ describe('CLI vaults', () => { expect(id).toBeTruthy(); await targetPolykeyAgent.gestaltGraph.setNode({ - id: nodesUtils.encodeNodeId(polykeyAgent.nodeManager.getNodeId()), + id: nodesUtils.encodeNodeId(polykeyAgent.keyManager.getNodeId()), chain: {}, }); fail(); @@ -395,20 +402,23 @@ describe('CLI vaults', () => { // vault.vaultId, // ); - const targetNodeId = targetPolykeyAgent.nodeManager.getNodeId(); + const targetNodeId = targetPolykeyAgent.keyManager.getNodeId(); const targetHost = targetPolykeyAgent.revProxy.getIngressHost(); const targetPort = targetPolykeyAgent.revProxy.getIngressPort(); - await polykeyAgent.nodeManager.setNode(targetNodeId, { + await polykeyAgent.nodeGraph.setNode(targetNodeId, { host: targetHost, port: targetPort, }); // Client agent: Start sending hole-punching packets to the target - await polykeyAgent.nodeManager.getConnectionToNode(targetNodeId); + await polykeyAgent.nodeConnectionManager.withConnF( + targetNodeId, + async () => {}, + ); const clientEgressHost = polykeyAgent.fwdProxy.getEgressHost(); const clientEgressPort = polykeyAgent.fwdProxy.getEgressPort(); // Server agent: start sending hole-punching packets back to the 'client' // agent (in order to establish a connection) - await targetPolykeyAgent.nodeManager.openConnection( + await targetPolykeyAgent.nodeConnectionManager.holePunchReverse( clientEgressHost, clientEgressPort, ); @@ -467,7 +477,7 @@ describe('CLI vaults', () => { logger: logger, }); - const targetNodeId = targetPolykeyAgent.nodeManager.getNodeId(); + const targetNodeId = targetPolykeyAgent.keyManager.getNodeId(); const targetHost = targetPolykeyAgent.revProxy.getIngressHost(); const targetPort = targetPolykeyAgent.revProxy.getIngressPort(); await polykeyAgent.nodeManager.setNode(targetNodeId, { @@ -475,12 +485,15 @@ describe('CLI vaults', () => { port: targetPort, }); // Client agent: Start sending hole-punching packets to the target - await polykeyAgent.nodeManager.getConnectionToNode(targetNodeId); + await polykeyAgent.nodeConnectionManager.withConnF( + targetNodeId, + async () => {}, + ); const clientEgressHost = polykeyAgent.fwdProxy.getEgressHost(); const clientEgressPort = polykeyAgent.fwdProxy.getEgressPort(); // Server agent: start sending hole-punching packets back to the 'client' // agent (in order to establish a connection) - await targetPolykeyAgent.nodeManager.openConnection( + await targetPolykeyAgent.nodeConnectionManager.holePunchReverse( clientEgressHost, clientEgressPort, ); diff --git a/tests/client/GRPCClientClient.test.ts b/tests/client/GRPCClientClient.test.ts index 4db264524..3d8f7cc52 100644 --- a/tests/client/GRPCClientClient.test.ts +++ b/tests/client/GRPCClientClient.test.ts @@ -50,7 +50,7 @@ describe(GRPCClientClient.name, () => { nodePath, logger: logger, }); - nodeId = pkAgent.nodeManager.getNodeId(); + nodeId = pkAgent.keyManager.getNodeId(); [server, port] = await testClientUtils.openTestClientServer({ pkAgent, }); diff --git a/tests/client/rpcGestalts.test.ts b/tests/client/rpcGestalts.test.ts index b314d1632..2e4ff9d51 100644 --- a/tests/client/rpcGestalts.test.ts +++ b/tests/client/rpcGestalts.test.ts @@ -1,7 +1,6 @@ import type * as grpc from '@grpc/grpc-js'; import type { IdentitiesManager } from '@/identities'; import type { GestaltGraph } from '@/gestalts'; -import type { NodeManager } from '@/nodes'; import type { IdentityId, IdentityInfo, ProviderId } from '@/identities/types'; import type { NodeIdEncoded, NodeInfo } from '@/nodes/types'; import type * as gestaltsPB from '@/proto/js/polykey/v1/gestalts/gestalts_pb'; @@ -41,7 +40,6 @@ describe('Client service', () => { let dataDir: string; let pkAgent: PolykeyAgent; let keyManager: KeyManager; - let nodeManager: NodeManager; let gestaltGraph: GestaltGraph; let identitiesManager: IdentitiesManager; let passwordFile: string; @@ -104,7 +102,6 @@ describe('Client service', () => { keyManager, }); - nodeManager = pkAgent.nodeManager; gestaltGraph = pkAgent.gestaltGraph; identitiesManager = pkAgent.identitiesManager; @@ -120,7 +117,7 @@ describe('Client service', () => { client = await testUtils.openSimpleClientClient(port); node1 = { - id: nodesUtils.encodeNodeId(nodeManager.getNodeId()), + id: nodesUtils.encodeNodeId(pkAgent.keyManager.getNodeId()), chain: {}, }; }, global.polykeyStartupTimeout); @@ -299,7 +296,9 @@ describe('Client service', () => { expect(test1.getActionList().includes('scan')).toBeTruthy(); expect(test1.getActionList().includes('notify')).toBeTruthy(); - nodeMessage.setNodeId(nodesUtils.encodeNodeId(nodeManager.getNodeId())); + nodeMessage.setNodeId( + nodesUtils.encodeNodeId(pkAgent.keyManager.getNodeId()), + ); // Should have no permissions const test2 = await gestaltsGetActionsByNode(nodeMessage, callCredentials); expect(test2.getActionList().length).toBe(0); diff --git a/tests/client/service/identitiesClaim.test.ts b/tests/client/service/identitiesClaim.test.ts index f4b42c277..8b6aa167e 100644 --- a/tests/client/service/identitiesClaim.test.ts +++ b/tests/client/service/identitiesClaim.test.ts @@ -11,7 +11,7 @@ import { DB } from '@matrixai/db'; import { KeyManager, utils as keysUtils } from '@/keys'; import { GRPCServer } from '@/grpc'; import { IdentitiesManager } from '@/identities'; -import { NodeManager } from '@/nodes'; +import { NodeConnectionManager, NodeGraph } from '@/nodes'; import { Sigchain } from '@/sigchain'; import { ForwardProxy, ReverseProxy } from '@/network'; import { @@ -78,7 +78,8 @@ describe('identitiesClaim', () => { let dataDir: string; let testProvider: TestProvider; let identitiesManager: IdentitiesManager; - let nodeManager: NodeManager; + let nodeGraph: NodeGraph; + let nodeConnectionManager: NodeConnectionManager; let sigchain: Sigchain; let fwdProxy: ForwardProxy; let revProxy: ReverseProxy; @@ -131,20 +132,26 @@ describe('identitiesClaim', () => { keyManager, logger, }); - nodeManager = await NodeManager.createNodeManager({ + nodeGraph = await NodeGraph.createNodeGraph({ db, keyManager, - sigchain, + logger: logger.getChild('NodeGraph'), + }); + nodeConnectionManager = new NodeConnectionManager({ + connConnectTime: 2000, fwdProxy, + keyManager, + nodeGraph, revProxy, - logger, + logger: logger.getChild('nodeConnectionManager'), }); + await nodeConnectionManager.start(); const clientService = { identitiesClaim: identitiesClaim({ authenticate, identitiesManager, sigchain, - nodeManager, + keyManager, }), }; grpcServer = new GRPCServer({ logger }); @@ -163,7 +170,8 @@ describe('identitiesClaim', () => { afterEach(async () => { await grpcClient.destroy(); await grpcServer.stop(); - await nodeManager.stop(); + await nodeConnectionManager.stop(); + await nodeGraph.stop(); await sigchain.stop(); await revProxy.stop(); await fwdProxy.stop(); diff --git a/tests/client/service/keysKeyPairRenew.test.ts b/tests/client/service/keysKeyPairRenew.test.ts index 6a1545f9c..6d5f822c8 100644 --- a/tests/client/service/keysKeyPairRenew.test.ts +++ b/tests/client/service/keysKeyPairRenew.test.ts @@ -8,7 +8,7 @@ import path from 'path'; import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { Metadata } from '@grpc/grpc-js'; -import { NodeManager } from '@/nodes'; +import { NodeGraph } from '@/nodes'; import { utils as keysUtils } from '@/keys'; import { GRPCServer } from '@/grpc'; import { PolykeyAgent } from '@'; @@ -35,7 +35,7 @@ describe('keysKeyPairRenew', () => { beforeAll(async () => { const globalKeyPair = await testUtils.setupGlobalKeypair(); const newKeyPair = await keysUtils.generateKeyPair(1024); - mockedRefreshBuckets = jest.spyOn(NodeManager.prototype, 'refreshBuckets'); + mockedRefreshBuckets = jest.spyOn(NodeGraph.prototype, 'refreshBuckets'); mockedGenerateKeyPair = jest .spyOn(keysUtils, 'generateKeyPair') .mockResolvedValueOnce(globalKeyPair) diff --git a/tests/client/service/keysKeyPairReset.test.ts b/tests/client/service/keysKeyPairReset.test.ts index 70f70e283..f3a96ceed 100644 --- a/tests/client/service/keysKeyPairReset.test.ts +++ b/tests/client/service/keysKeyPairReset.test.ts @@ -8,7 +8,7 @@ import path from 'path'; import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { Metadata } from '@grpc/grpc-js'; -import { NodeManager } from '@/nodes'; +import { NodeGraph } from '@/nodes'; import { utils as keysUtils } from '@/keys'; import { GRPCServer } from '@/grpc'; import { PolykeyAgent } from '@'; @@ -35,7 +35,7 @@ describe('keysKeyPairReset', () => { beforeAll(async () => { const globalKeyPair = await testUtils.setupGlobalKeypair(); const newKeyPair = await keysUtils.generateKeyPair(1024); - mockedRefreshBuckets = jest.spyOn(NodeManager.prototype, 'refreshBuckets'); + mockedRefreshBuckets = jest.spyOn(NodeGraph.prototype, 'refreshBuckets'); mockedGenerateKeyPair = jest .spyOn(keysUtils, 'generateKeyPair') .mockResolvedValueOnce(globalKeyPair) diff --git a/tests/client/service/nodesAdd.test.ts b/tests/client/service/nodesAdd.test.ts index 2b77f6312..a9e837a43 100644 --- a/tests/client/service/nodesAdd.test.ts +++ b/tests/client/service/nodesAdd.test.ts @@ -7,7 +7,7 @@ import { Metadata } from '@grpc/grpc-js'; import { DB } from '@matrixai/db'; import { KeyManager, utils as keysUtils } from '@/keys'; import { GRPCServer } from '@/grpc'; -import { NodeManager } from '@/nodes'; +import { NodeConnectionManager, NodeGraph, NodeManager } from '@/nodes'; import { Sigchain } from '@/sigchain'; import { ForwardProxy, ReverseProxy } from '@/network'; import { @@ -46,6 +46,8 @@ describe('nodesAdd', () => { }); const authToken = 'abc123'; let dataDir: string; + let nodeGraph: NodeGraph; + let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let sigchain: Sigchain; let fwdProxy: ForwardProxy; @@ -93,12 +95,27 @@ describe('nodesAdd', () => { keyManager, logger, }); - nodeManager = await NodeManager.createNodeManager({ + nodeGraph = await NodeGraph.createNodeGraph({ db, keyManager, - sigchain, + logger: logger.getChild('NodeGraph'), + }); + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, fwdProxy, revProxy, + connConnectTime: 2000, + connTimeoutTime: 2000, + logger: logger.getChild('NodeConnectionManager'), + }); + await nodeConnectionManager.start(); + nodeManager = new NodeManager({ + db, + keyManager, + nodeConnectionManager, + nodeGraph, + sigchain, logger, }); const clientService = { @@ -123,8 +140,8 @@ describe('nodesAdd', () => { afterEach(async () => { await grpcClient.destroy(); await grpcServer.stop(); - await nodeManager.stop(); - await nodeManager.destroy(); + await nodeGraph.stop(); + await nodeConnectionManager.stop(); await sigchain.stop(); await revProxy.stop(); await fwdProxy.stop(); @@ -147,7 +164,7 @@ describe('nodesAdd', () => { clientUtils.encodeAuthFromPassword(password), ); expect(response).toBeInstanceOf(utilsPB.EmptyMessage); - const result = await nodeManager.getNode( + const result = await nodeGraph.getNode( nodesUtils.decodeNodeId( 'vrsc24a1er424epq77dtoveo93meij0pc8ig4uvs9jbeld78n9nl0', )!, diff --git a/tests/client/service/nodesClaim.test.ts b/tests/client/service/nodesClaim.test.ts index 2c2da7928..b133be3d3 100644 --- a/tests/client/service/nodesClaim.test.ts +++ b/tests/client/service/nodesClaim.test.ts @@ -9,7 +9,7 @@ import { Metadata } from '@grpc/grpc-js'; import { DB } from '@matrixai/db'; import { KeyManager, utils as keysUtils } from '@/keys'; import { GRPCServer } from '@/grpc'; -import { NodeManager } from '@/nodes'; +import { NodeConnectionManager, NodeGraph, NodeManager } from '@/nodes'; import { Sigchain } from '@/sigchain'; import { ForwardProxy, ReverseProxy } from '@/network'; import { NotificationsManager } from '@/notifications'; @@ -73,6 +73,8 @@ describe('nodesClaim', () => { }); const authToken = 'abc123'; let dataDir: string; + let nodeGraph: NodeGraph; + let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let notificationsManager: NotificationsManager; let acl: ACL; @@ -126,18 +128,34 @@ describe('nodesClaim', () => { keyManager, logger, }); - nodeManager = await NodeManager.createNodeManager({ + nodeGraph = await NodeGraph.createNodeGraph({ db, keyManager, - sigchain, + logger: logger.getChild('NodeGraph'), + }); + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, fwdProxy, revProxy, + connConnectTime: 2000, + connTimeoutTime: 2000, + logger: logger.getChild('NodeConnectionManager'), + }); + await nodeConnectionManager.start(); + nodeManager = new NodeManager({ + db, + keyManager, + sigchain, + nodeGraph, + nodeConnectionManager, logger, }); notificationsManager = await NotificationsManager.createNotificationsManager({ acl, db, + nodeConnectionManager, nodeManager, keyManager, logger, @@ -165,8 +183,9 @@ describe('nodesClaim', () => { afterEach(async () => { await grpcClient.destroy(); await grpcServer.stop(); + await nodeConnectionManager.stop(); + await nodeGraph.stop(); await notificationsManager.stop(); - await nodeManager.stop(); await sigchain.stop(); await revProxy.stop(); await fwdProxy.stop(); diff --git a/tests/client/service/nodesFind.test.ts b/tests/client/service/nodesFind.test.ts index e09fe0511..bd6d277a8 100644 --- a/tests/client/service/nodesFind.test.ts +++ b/tests/client/service/nodesFind.test.ts @@ -7,7 +7,7 @@ import { Metadata } from '@grpc/grpc-js'; import { DB } from '@matrixai/db'; import { KeyManager, utils as keysUtils } from '@/keys'; import { GRPCServer } from '@/grpc'; -import { NodeManager } from '@/nodes'; +import { NodeConnectionManager, NodeGraph } from '@/nodes'; import { Sigchain } from '@/sigchain'; import { ForwardProxy, ReverseProxy } from '@/network'; import { @@ -39,7 +39,7 @@ describe('nodesFind', () => { .spyOn(keysUtils, 'generateDeterministicKeyPair') .mockResolvedValue(globalKeyPair); mockedFindNode = jest - .spyOn(NodeManager.prototype, 'findNode') + .spyOn(NodeConnectionManager.prototype, 'findNode') .mockResolvedValue({ host: '127.0.0.1' as Host, port: 11111 as Port, @@ -52,7 +52,8 @@ describe('nodesFind', () => { }); const authToken = 'abc123'; let dataDir: string; - let nodeManager: NodeManager; + let nodeGraph: NodeGraph; + let nodeConnectionManager: NodeConnectionManager; let sigchain: Sigchain; let fwdProxy: ForwardProxy; let revProxy: ReverseProxy; @@ -99,18 +100,25 @@ describe('nodesFind', () => { keyManager, logger, }); - nodeManager = await NodeManager.createNodeManager({ + nodeGraph = await NodeGraph.createNodeGraph({ db, keyManager, - sigchain, + logger: logger.getChild('NodeGraph'), + }); + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, fwdProxy, revProxy, - logger, + connConnectTime: 2000, + connTimeoutTime: 2000, + logger: logger.getChild('NodeConnectionManager'), }); + await nodeConnectionManager.start(); const clientService = { nodesFind: nodesFind({ + nodeConnectionManager, authenticate, - nodeManager, }), }; grpcServer = new GRPCServer({ logger }); @@ -129,8 +137,9 @@ describe('nodesFind', () => { afterEach(async () => { await grpcClient.destroy(); await grpcServer.stop(); - await nodeManager.stop(); await sigchain.stop(); + await nodeGraph.stop(); + await nodeConnectionManager.stop(); await revProxy.stop(); await fwdProxy.stop(); await db.stop(); diff --git a/tests/client/service/nodesPing.test.ts b/tests/client/service/nodesPing.test.ts index 9633d1e06..6c7873c54 100644 --- a/tests/client/service/nodesPing.test.ts +++ b/tests/client/service/nodesPing.test.ts @@ -7,7 +7,7 @@ import { Metadata } from '@grpc/grpc-js'; import { DB } from '@matrixai/db'; import { KeyManager, utils as keysUtils } from '@/keys'; import { GRPCServer } from '@/grpc'; -import { NodeManager } from '@/nodes'; +import { NodeConnectionManager, NodeGraph, NodeManager } from '@/nodes'; import { Sigchain } from '@/sigchain'; import { ForwardProxy, ReverseProxy } from '@/network'; import { @@ -51,6 +51,8 @@ describe('nodesPing', () => { }); const authToken = 'abc123'; let dataDir: string; + let nodeGraph: NodeGraph; + let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let sigchain: Sigchain; let fwdProxy: ForwardProxy; @@ -98,12 +100,27 @@ describe('nodesPing', () => { keyManager, logger, }); - nodeManager = await NodeManager.createNodeManager({ + nodeGraph = await NodeGraph.createNodeGraph({ db, keyManager, - sigchain, + logger: logger.getChild('NodeGraph'), + }); + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, fwdProxy, revProxy, + connConnectTime: 2000, + connTimeoutTime: 2000, + logger: logger.getChild('NodeConnectionManager'), + }); + await nodeConnectionManager.start(); + nodeManager = new NodeManager({ + db, + keyManager, + nodeConnectionManager, + nodeGraph, + sigchain, logger, }); const clientService = { @@ -128,8 +145,9 @@ describe('nodesPing', () => { afterEach(async () => { await grpcClient.destroy(); await grpcServer.stop(); - await nodeManager.stop(); await sigchain.stop(); + await nodeGraph.stop(); + await nodeConnectionManager.stop(); await revProxy.stop(); await fwdProxy.stop(); await db.stop(); diff --git a/tests/client/service/notificationsClear.test.ts b/tests/client/service/notificationsClear.test.ts index a5ed2ee3c..094b2b007 100644 --- a/tests/client/service/notificationsClear.test.ts +++ b/tests/client/service/notificationsClear.test.ts @@ -7,7 +7,7 @@ import { Metadata } from '@grpc/grpc-js'; import { DB } from '@matrixai/db'; import { KeyManager, utils as keysUtils } from '@/keys'; import { GRPCServer } from '@/grpc'; -import { NodeManager } from '@/nodes'; +import { NodeConnectionManager, NodeGraph, NodeManager } from '@/nodes'; import { Sigchain } from '@/sigchain'; import { ForwardProxy, ReverseProxy } from '@/network'; import { NotificationsManager } from '@/notifications'; @@ -50,6 +50,8 @@ describe('notificationsClear', () => { }); const authToken = 'abc123'; let dataDir: string; + let nodeGraph: NodeGraph; + let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let notificationsManager: NotificationsManager; let acl: ACL; @@ -103,18 +105,34 @@ describe('notificationsClear', () => { keyManager, logger, }); - nodeManager = await NodeManager.createNodeManager({ + nodeGraph = await NodeGraph.createNodeGraph({ db, keyManager, - sigchain, + logger: logger.getChild('NodeGraph'), + }); + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, fwdProxy, revProxy, + connConnectTime: 2000, + connTimeoutTime: 2000, + logger: logger.getChild('NodeConnectionManager'), + }); + await nodeConnectionManager.start(); + nodeManager = new NodeManager({ + db, + keyManager, + nodeConnectionManager, + nodeGraph, + sigchain, logger, }); notificationsManager = await NotificationsManager.createNotificationsManager({ acl, db, + nodeConnectionManager, nodeManager, keyManager, logger, @@ -142,7 +160,8 @@ describe('notificationsClear', () => { await grpcClient.destroy(); await grpcServer.stop(); await notificationsManager.stop(); - await nodeManager.stop(); + await nodeGraph.stop(); + await nodeConnectionManager.stop(); await sigchain.stop(); await revProxy.stop(); await fwdProxy.stop(); diff --git a/tests/client/service/notificationsRead.test.ts b/tests/client/service/notificationsRead.test.ts index 2de1aeb45..a2d1e4cea 100644 --- a/tests/client/service/notificationsRead.test.ts +++ b/tests/client/service/notificationsRead.test.ts @@ -8,7 +8,12 @@ import { Metadata } from '@grpc/grpc-js'; import { DB } from '@matrixai/db'; import { KeyManager, utils as keysUtils } from '@/keys'; import { GRPCServer } from '@/grpc'; -import { NodeManager, utils as nodesUtils } from '@/nodes'; +import { + NodeConnectionManager, + NodeGraph, + NodeManager, + utils as nodesUtils, +} from '@/nodes'; import { Sigchain } from '@/sigchain'; import { ForwardProxy, ReverseProxy } from '@/network'; import { NotificationsManager } from '@/notifications'; @@ -123,6 +128,8 @@ describe('notificationsRead', () => { }); const authToken = 'abc123'; let dataDir: string; + let nodeGraph: NodeGraph; + let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let notificationsManager: NotificationsManager; let acl: ACL; @@ -176,18 +183,34 @@ describe('notificationsRead', () => { keyManager, logger, }); - nodeManager = await NodeManager.createNodeManager({ + nodeGraph = await NodeGraph.createNodeGraph({ db, keyManager, - sigchain, + logger: logger.getChild('NodeGraph'), + }); + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, fwdProxy, revProxy, + connConnectTime: 2000, + connTimeoutTime: 2000, + logger: logger.getChild('NodeConnectionManager'), + }); + await nodeConnectionManager.start(); + nodeManager = new NodeManager({ + db, + keyManager, + nodeGraph, + nodeConnectionManager, + sigchain, logger, }); notificationsManager = await NotificationsManager.createNotificationsManager({ acl, db, + nodeConnectionManager, nodeManager, keyManager, logger, @@ -215,8 +238,9 @@ describe('notificationsRead', () => { await grpcClient.destroy(); await grpcServer.stop(); await notificationsManager.stop(); - await nodeManager.stop(); await sigchain.stop(); + await nodeGraph.stop(); + await nodeConnectionManager.stop(); await revProxy.stop(); await fwdProxy.stop(); await acl.stop(); diff --git a/tests/client/service/notificationsSend.test.ts b/tests/client/service/notificationsSend.test.ts index a328e9732..db4bb5b8e 100644 --- a/tests/client/service/notificationsSend.test.ts +++ b/tests/client/service/notificationsSend.test.ts @@ -8,7 +8,12 @@ import { Metadata } from '@grpc/grpc-js'; import { DB } from '@matrixai/db'; import { KeyManager, utils as keysUtils } from '@/keys'; import { GRPCServer } from '@/grpc'; -import { NodeManager, utils as nodesUtils } from '@/nodes'; +import { + NodeConnectionManager, + NodeGraph, + NodeManager, + utils as nodesUtils, +} from '@/nodes'; import { Sigchain } from '@/sigchain'; import { ForwardProxy, ReverseProxy } from '@/network'; import { @@ -24,6 +29,7 @@ import { import notificationsSend from '@/client/service/notificationsSend'; import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; import * as notificationsPB from '@/proto/js/polykey/v1/notifications/notifications_pb'; +import { GRPCClientAgent } from '@/agent'; import * as testUtils from '../../utils'; describe('notificationsSend', () => { @@ -51,8 +57,8 @@ describe('notificationsSend', () => { return 'signedNotification' as SignedNotification; }); mockedSendNotification = jest - .spyOn(NodeManager.prototype, 'sendNotification') - .mockResolvedValue(undefined); + .spyOn(GRPCClientAgent.prototype, 'notificationsSend') + .mockResolvedValue(new notificationsPB.AgentNotification()); }); afterAll(async () => { mockedGenerateKeyPair.mockRestore(); @@ -62,6 +68,8 @@ describe('notificationsSend', () => { }); const authToken = 'abc123'; let dataDir: string; + let nodeGraph: NodeGraph; + let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let notificationsManager: NotificationsManager; let acl: ACL; @@ -115,18 +123,34 @@ describe('notificationsSend', () => { keyManager, logger, }); - nodeManager = await NodeManager.createNodeManager({ + nodeGraph = await NodeGraph.createNodeGraph({ db, keyManager, - sigchain, + logger: logger.getChild('NodeGraph'), + }); + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, fwdProxy, revProxy, + connConnectTime: 2000, + connTimeoutTime: 2000, + logger: logger.getChild('NodeConnectionManager'), + }); + await nodeConnectionManager.start(); + nodeManager = new NodeManager({ + db, + keyManager, + nodeGraph, + nodeConnectionManager, + sigchain, logger, }); notificationsManager = await NotificationsManager.createNotificationsManager({ acl, db, + nodeConnectionManager, nodeManager, keyManager, logger, @@ -154,7 +178,8 @@ describe('notificationsSend', () => { await grpcClient.destroy(); await grpcServer.stop(); await notificationsManager.stop(); - await nodeManager.stop(); + await nodeGraph.stop(); + await nodeConnectionManager.stop(); await sigchain.stop(); await revProxy.stop(); await fwdProxy.stop(); @@ -167,13 +192,13 @@ describe('notificationsSend', () => { }); }); test('sends a notification', async () => { + const receiverNodeIdEncoded = + 'vrsc24a1er424epq77dtoveo93meij0pc8ig4uvs9jbeld78n9nl0'; const generalMessage = new notificationsPB.General(); generalMessage.setMessage('test'); const request = new notificationsPB.Send(); request.setData(generalMessage); - request.setReceiverId( - 'vrsc24a1er424epq77dtoveo93meij0pc8ig4uvs9jbeld78n9nl0', - ); + request.setReceiverId(receiverNodeIdEncoded); const response = await grpcClient.notificationsSend( request, clientUtils.encodeAuthFromPassword(password), @@ -184,7 +209,7 @@ describe('notificationsSend', () => { expect(mockedSendNotification.mock.calls.length).toBe(1); expect( nodesUtils.encodeNodeId(mockedSendNotification.mock.calls[0][0]), - ).toBe('vrsc24a1er424epq77dtoveo93meij0pc8ig4uvs9jbeld78n9nl0'); + ).toBe(receiverNodeIdEncoded); expect(mockedSendNotification.mock.calls[0][1]).toBe('signedNotification'); // Check notification content expect(mockedSignNotification.mock.calls[0][0]).toEqual({ diff --git a/tests/client/utils.ts b/tests/client/utils.ts index 5ba0079cd..7c55b5c2e 100644 --- a/tests/client/utils.ts +++ b/tests/client/utils.ts @@ -27,6 +27,8 @@ async function openTestClientServer({ pkAgent, keyManager: pkAgent.keyManager, vaultManager: pkAgent.vaultManager, + nodeGraph: pkAgent.nodeGraph, + nodeConnectionManager: pkAgent.nodeConnectionManager, nodeManager: pkAgent.nodeManager, identitiesManager: pkAgent.identitiesManager, gestaltGraph: pkAgent.gestaltGraph, diff --git a/tests/discovery/Discovery.test.ts b/tests/discovery/Discovery.test.ts index 114a8a598..9be398c32 100644 --- a/tests/discovery/Discovery.test.ts +++ b/tests/discovery/Discovery.test.ts @@ -12,7 +12,7 @@ import { utils as claimsUtils } from '@/claims'; import { Discovery, errors as discoveryErrors } from '@/discovery'; import { GestaltGraph } from '@/gestalts'; import { IdentitiesManager } from '@/identities'; -import { NodeManager } from '@/nodes'; +import { NodeConnectionManager, NodeGraph, NodeManager } from '@/nodes'; import { KeyManager, utils as keysUtils } from '@/keys'; import { ACL } from '@/acl'; import { Sigchain } from '@/sigchain'; @@ -43,6 +43,8 @@ describe('Discovery', () => { let dataDir: string; let gestaltGraph: GestaltGraph; let identitiesManager: IdentitiesManager; + let nodeGraph: NodeGraph; + let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let db: DB; let acl: ACL; @@ -121,12 +123,27 @@ describe('Discovery', () => { certChainPem: await keyManager.getRootCertChainPem(), }, }); - nodeManager = await NodeManager.createNodeManager({ + nodeGraph = await NodeGraph.createNodeGraph({ db, keyManager, - sigchain, + logger: logger.getChild('NodeGraph'), + }); + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, fwdProxy, revProxy, + connConnectTime: 2000, + connTimeoutTime: 2000, + logger: logger.getChild('NodeConnectionManager'), + }); + await nodeConnectionManager.start(); + nodeManager = new NodeManager({ + db, + keyManager, + sigchain, + nodeGraph, + nodeConnectionManager, logger: logger.getChild('nodeManager'), }); // Set up other gestalt @@ -147,11 +164,11 @@ describe('Discovery', () => { logger: logger.getChild('nodeB'), }); await testNodesUtils.nodesConnect(nodeA, nodeB); - await nodeManager.setNode(nodeA.nodeManager.getNodeId(), { + await nodeGraph.setNode(nodeA.keyManager.getNodeId(), { host: nodeA.revProxy.getIngressHost(), port: nodeA.revProxy.getIngressPort(), }); - await nodeA.nodeManager.claimNode(nodeB.nodeManager.getNodeId()); + await nodeA.nodeManager.claimNode(nodeB.keyManager.getNodeId()); nodeA.identitiesManager.registerProvider(testProvider); identityId = 'other-gestalt' as IdentityId; await nodeA.identitiesManager.putToken(testToken.providerId, identityId, { @@ -159,7 +176,7 @@ describe('Discovery', () => { }); const identityClaim: ClaimLinkIdentity = { type: 'identity', - node: nodesUtils.encodeNodeId(nodeB.nodeManager.getNodeId()), + node: nodesUtils.encodeNodeId(nodeB.keyManager.getNodeId()), provider: testProvider.id, identity: identityId, }; @@ -170,7 +187,8 @@ describe('Discovery', () => { afterAll(async () => { await nodeA.stop(); await nodeB.stop(); - await nodeManager.stop(); + await nodeGraph.stop(); + await nodeConnectionManager.stop(); await revProxy.stop(); await fwdProxy.stop(); await sigchain.stop(); @@ -188,9 +206,11 @@ describe('Discovery', () => { }); test('discovery readiness', async () => { const discovery = await Discovery.createDiscovery({ + keyManager, gestaltGraph, identitiesManager, nodeManager, + sigchain, logger, }); expect(discovery[destroyed]).toBeFalsy(); @@ -205,13 +225,15 @@ describe('Discovery', () => { }); test('discovery by node', async () => { const discovery = await Discovery.createDiscovery({ + keyManager, gestaltGraph, identitiesManager, nodeManager, + sigchain, logger, }); const discoverProcess = discovery.discoverGestaltByNode( - nodeA.nodeManager.getNodeId(), + nodeA.keyManager.getNodeId(), ); for await (const _step of discoverProcess) { // Waiting for the discovery process to finish. @@ -220,10 +242,10 @@ describe('Discovery', () => { expect(gestalt.length).not.toBe(0); const gestaltString = JSON.stringify(gestalt); expect(gestaltString).toContain( - nodesUtils.encodeNodeId(nodeA.nodeManager.getNodeId()), + nodesUtils.encodeNodeId(nodeA.keyManager.getNodeId()), ); expect(gestaltString).toContain( - nodesUtils.encodeNodeId(nodeB.nodeManager.getNodeId()), + nodesUtils.encodeNodeId(nodeB.keyManager.getNodeId()), ); expect(gestaltString).toContain(identityId); await discovery.destroy(); @@ -237,13 +259,15 @@ describe('Discovery', () => { }); test('discovery by identity', async () => { const discovery = await Discovery.createDiscovery({ + keyManager, gestaltGraph, identitiesManager, nodeManager, + sigchain, logger, }); const discoverProcess = discovery.discoverGestaltByNode( - nodeA.nodeManager.getNodeId(), + nodeA.keyManager.getNodeId(), ); for await (const _step of discoverProcess) { // Waiting for the discovery process to finish. @@ -252,10 +276,10 @@ describe('Discovery', () => { expect(gestalt.length).not.toBe(0); const gestaltString = JSON.stringify(gestalt); expect(gestaltString).toContain( - nodesUtils.encodeNodeId(nodeA.nodeManager.getNodeId()), + nodesUtils.encodeNodeId(nodeA.keyManager.getNodeId()), ); expect(gestaltString).toContain( - nodesUtils.encodeNodeId(nodeB.nodeManager.getNodeId()), + nodesUtils.encodeNodeId(nodeB.keyManager.getNodeId()), ); expect(gestaltString).toContain(identityId); await discovery.destroy(); diff --git a/tests/grpc/utils.test.ts b/tests/grpc/utils.test.ts index 7ca52d284..f29b86c4a 100644 --- a/tests/grpc/utils.test.ts +++ b/tests/grpc/utils.test.ts @@ -1,7 +1,9 @@ import type { TestServiceClient } from '@/proto/js/polykey/v1/test_service_grpc_pb'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import * as grpc from '@grpc/grpc-js'; -import { utils as grpcUtils, errors as grpcErrors } from '@/grpc'; +import { getLogger } from '@grpc/grpc-js/build/src/logging'; +import * as grpcUtils from '@/grpc/utils'; +import * as grpcErrors from '@/grpc/errors'; import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; import * as utils from './utils'; @@ -27,6 +29,14 @@ describe('GRPC utils', () => { }, 2000); await utils.closeTestServer(server); }); + test('setting the global GRPC logger', () => { + const grpcLogger1 = getLogger(); + // Sets the global GRPC logger to the logger + grpcUtils.setLogger(logger); + const grpcLogger2 = getLogger(); + // These objects should not be the same + expect(grpcLogger1).not.toBe(grpcLogger2); + }); test('promisified client unary call', async () => { const unary = grpcUtils.promisifyUnaryCall( client, diff --git a/tests/grpc/utils/GRPCClientTest.ts b/tests/grpc/utils/GRPCClientTest.ts index ddcc21dbe..c4b55b1d1 100644 --- a/tests/grpc/utils/GRPCClientTest.ts +++ b/tests/grpc/utils/GRPCClientTest.ts @@ -3,9 +3,10 @@ import type { Session } from '@/sessions'; import type { NodeId } from '@/nodes/types'; import type { Host, Port, TLSConfig, ProxyConfig } from '@/network/types'; import type * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; +import type { ClientReadableStream } from '@grpc/grpc-js/build/src/call'; +import type { AsyncGeneratorReadableStreamClient } from '@/grpc/types'; import Logger from '@matrixai/logger'; import { CreateDestroy, ready } from '@matrixai/async-init/dist/CreateDestroy'; - import { GRPCClient, utils as grpcUtils } from '@/grpc'; import * as clientUtils from '@/client/utils'; import { TestServiceClient } from '@/proto/js/polykey/v1/test_service_grpc_pb'; @@ -21,6 +22,7 @@ class GRPCClientTest extends GRPCClient { proxyConfig, session, timeout = Infinity, + destroyCallback, logger = new Logger(this.name), }: { nodeId: NodeId; @@ -30,6 +32,7 @@ class GRPCClientTest extends GRPCClient { proxyConfig?: ProxyConfig; session?: Session; timeout?: number; + destroyCallback?: () => Promise; logger?: Logger; }): Promise { logger.info(`Creating ${this.name}`); @@ -56,6 +59,7 @@ class GRPCClientTest extends GRPCClient { tlsConfig, proxyConfig, serverCertChain, + destroyCallback, logger, }); logger.info(`Created ${this.name}`); @@ -111,6 +115,27 @@ class GRPCClientTest extends GRPCClient { this.client.unaryAuthenticated, )(...args); } + + @ready() + public serverStreamFail( + ...args + ): AsyncGeneratorReadableStreamClient< + utilsPB.EchoMessage, + ClientReadableStream + > { + return grpcUtils.promisifyReadableStreamCall( + this.client, + this.client.serverStreamFail, + )(...args); + } + + @ready() + public unaryFail(...args) { + return grpcUtils.promisifyUnaryCall( + this.client, + this.client.unaryFail, + )(...args); + } } export default GRPCClientTest; diff --git a/tests/grpc/utils/testServer.ts b/tests/grpc/utils/testServer.ts new file mode 100644 index 000000000..cfe87d195 --- /dev/null +++ b/tests/grpc/utils/testServer.ts @@ -0,0 +1,21 @@ +import * as grpc from '@grpc/grpc-js'; +import * as utils from './index'; + +// This is spawned as a background process for use in some NodeConnection.test.ts tests +async function main() { + const authenticate = async (metaClient, metaServer = new grpc.Metadata()) => + metaServer; + const [server, port] = await utils.openTestServer(authenticate); + process.stdout.write(`${port}`); + process.stdin.on('data', () => { + server.forceShutdown(); + }); +} + +if (require.main === module) { + (async () => { + await main(); + })(); +} + +export default main; diff --git a/tests/grpc/utils/testService.ts b/tests/grpc/utils/testService.ts index 8d126025d..5c3356d7f 100644 --- a/tests/grpc/utils/testService.ts +++ b/tests/grpc/utils/testService.ts @@ -12,6 +12,7 @@ import * as grpc from '@grpc/grpc-js'; import { utils as grpcUtils, errors as grpcErrors } from '@/grpc'; import * as clientUtils from '@/client/utils'; import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; +import { sleep } from '@/utils'; function createTestService({ authenticate, @@ -178,6 +179,46 @@ function createTestService({ callback(null, message); } }, + serverStreamFail: async ( + call: grpc.ServerWritableStream, + ): Promise => { + const genWritable = grpcUtils.generatorWritable(call); + try { + const echoMessage = new utilsPB.EchoMessage().setChallenge('Hello!'); + for (let i = 0; i < 10; i++) { + await sleep(100); + await genWritable.write(echoMessage); + } + const message = call.request.getChallenge(); + if (message === 'exit') process.exit(0); + if (message === 'kill') process.kill(process.pid); + if (message === 'sigkill') process.kill(process.pid, 9); + for (let i = 0; i < 10; i++) { + await sleep(100); + await genWritable.write(echoMessage); + } + await genWritable.write(null); + } catch (err) { + await genWritable.throw(err); + } + }, + unaryFail: async ( + call: grpc.ServerUnaryCall, + callback: grpc.sendUnaryData, + ): Promise => { + try { + // Wait a long time and then close the server. + await sleep(2000); + const message = call.request.getChallenge(); + if (message === 'exit') process.exit(0); + if (message === 'kill') process.kill(process.pid); + if (message === 'sigkill') process.kill(process.pid, 9); + await sleep(2000); + callback(null, new utilsPB.EchoMessage().setChallenge('hello!')); + } catch (err) { + callback(grpcUtils.fromError(err)); + } + }, }; return testService; } diff --git a/tests/grpc/utils/utils.ts b/tests/grpc/utils/utils.ts index 3cd55e61b..67370aba6 100644 --- a/tests/grpc/utils/utils.ts +++ b/tests/grpc/utils/utils.ts @@ -2,13 +2,13 @@ import type Logger from '@matrixai/logger'; import type { Authenticate } from '@/client/types'; import type { NodeId } from '@/nodes/types'; import * as grpc from '@grpc/grpc-js'; -import { utils as grpcUtils } from '@/grpc'; +import * as grpcUtils from '@/grpc/utils'; +import * as nodesUtils from '@/nodes/utils'; import { promisify } from '@/utils'; import { TestServiceService, TestServiceClient, } from '@/proto/js/polykey/v1/test_service_grpc_pb'; -import { utils as nodesUtils } from '@/nodes'; import createTestService from './testService'; async function openTestServer( diff --git a/tests/nodes/NodeConnection.test.ts b/tests/nodes/NodeConnection.test.ts index 5b98a7bf6..5e1ea13bc 100644 --- a/tests/nodes/NodeConnection.test.ts +++ b/tests/nodes/NodeConnection.test.ts @@ -1,52 +1,89 @@ -import type { Host, Port, ConnectionInfo } from '@/network/types'; -import type { NodeId, NodeInfo, NodeData } from '@/nodes/types'; +import type { AddressInfo } from 'net'; +import type { ConnectionInfo, Host, Port, TLSConfig } from '@/network/types'; +import type { NodeId, NodeInfo } from '@/nodes/types'; +import net from 'net'; import os from 'os'; import path from 'path'; import fs from 'fs'; -import Logger, { StreamHandler, LogLevel } from '@matrixai/logger'; +import * as child_process from 'child_process'; +import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; - -import { IdInternal } from '@matrixai/id'; -import { ForwardProxy, ReverseProxy } from '@/network'; -import { NodeConnection, NodeManager } from '@/nodes'; -import { VaultManager } from '@/vaults'; -import { KeyManager, utils as keysUtils } from '@/keys'; +import { destroyed } from '@matrixai/async-init'; +import ReverseProxy from '@/network/ReverseProxy'; +import ForwardProxy from '@/network/ForwardProxy'; +import NodeConnection from '@/nodes/NodeConnection'; +import NodeConnectionManager from '@/nodes/NodeConnectionManager'; +import NodeGraph from '@/nodes/NodeGraph'; +import NodeManager from '@/nodes/NodeManager'; +import VaultManager from '@/vaults/VaultManager'; +import KeyManager from '@/keys/KeyManager'; +import * as keysUtils from '@/keys/utils'; import GRPCServer from '@/grpc/GRPCServer'; -import { AgentServiceService, createAgentService } from '@/agent'; -import { ACL } from '@/acl'; -import { GestaltGraph } from '@/gestalts'; -import { Sigchain } from '@/sigchain'; -import { NotificationsManager } from '@/notifications'; - -import * as nodesUtils from '@/nodes/utils'; +import { AgentServiceService } from '@/proto/js/polykey/v1/agent_service_grpc_pb'; +import createAgentService from '@/agent/service'; +import GRPCClientAgent from '@/agent/GRPCClientAgent'; +import ACL from '@/acl/ACL'; +import GestaltGraph from '@/gestalts/GestaltGraph'; +import Sigchain from '@/sigchain/Sigchain'; +import NotificationsManager from '@/notifications/NotificationsManager'; import * as nodesErrors from '@/nodes/errors'; import * as networkErrors from '@/network/errors'; -import { poll } from '@/utils'; -import * as nodesTestUtils from './utils'; +import { poll, promise, promisify } from '@/utils'; +import PolykeyAgent from '@/PolykeyAgent'; +import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; +import * as GRPCErrors from '@/grpc/errors'; +import * as nodesUtils from '@/nodes/utils'; +import * as agentErrors from '@/agent/errors'; +import * as grpcUtils from '@/grpc/utils'; import * as testUtils from '../utils'; +import * as grpcTestUtils from '../grpc/utils'; + +const destroyCallback = async () => {}; + +// Dummy nodeConnectionManager +// We only need the hole punch function and frankly its not used in testing here +// This is really dirty so don't do this outside of testing EVER +const dummyNodeConnectionManager = { + openConnection: async (_host, _port) => { + throw Error('This is a dummy function, should not be called'); + }, + withConnF: async () => { + throw Error('Test, please ignore'); + }, + getSeedNodes: () => [], + sendHolePunchMessage: async () => { + throw Error('Test, please ignore'); + }, +} as unknown as NodeConnectionManager; + +const mockedGenerateDeterministicKeyPair = jest.spyOn( + keysUtils, + 'generateDeterministicKeyPair', +); + +describe(`${NodeConnection.name} test`, () => { + const logger = new Logger(`${NodeConnection.name} test`, LogLevel.WARN, [ + new StreamHandler(), + ]); + grpcUtils.setLogger(logger.getChild('grpc')); -// Mocks. -jest.mock('@/keys/utils', () => ({ - ...jest.requireActual('@/keys/utils'), - generateDeterministicKeyPair: - jest.requireActual('@/keys/utils').generateKeyPair, -})); + mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { + return keysUtils.generateKeyPair(bits); + }); -describe('NodeConnection', () => { const password = 'password'; const node: NodeInfo = { id: nodesUtils.encodeNodeId(testUtils.generateRandomNodeId()), chain: {}, }; - const logger = new Logger('NodeConnection Test', LogLevel.WARN, [ - new StreamHandler(), - ]); // Server let serverDataDir: string; let targetNodeId: NodeId; let serverKeyManager: KeyManager; let serverVaultManager: VaultManager; + let serverNodeGraph: NodeGraph; + let serverNodeConnectionManager: NodeConnectionManager; let serverNodeManager: NodeManager; let serverSigchain: Sigchain; let serverACL: ACL; @@ -63,53 +100,77 @@ describe('NodeConnection', () => { let clientFwdProxy: ForwardProxy; let agentServer: GRPCServer; + let tlsConfig: TLSConfig; + + const localHost = '127.0.0.1' as Host; + let targetPort: Port; + let sourcePort: Port; + + let serverTLSConfig: TLSConfig; + + /** + * Mock TCP server + * This is the server that the ReverseProxy will be proxying to + */ + function tcpServer(end: boolean = false, fastEnd: boolean = false) { + const { p: serverConnP, resolveP: resolveServerConnP } = promise(); + const { p: serverConnEndP, resolveP: resolveServerConnEndP } = + promise(); + const { p: serverConnClosedP, resolveP: resolveServerConnClosedP } = + promise(); + const server = net.createServer( + { + allowHalfOpen: false, + }, + (conn) => { + logger.info('connection!'); + if (fastEnd) { + conn.end(); + conn.destroy(); + } + logger.info(JSON.stringify(conn.address())); + resolveServerConnP(); + conn.on('end', () => { + logger.info('ending'); + resolveServerConnEndP(); + conn.end(); + conn.destroy(); + }); + conn.once('close', () => { + logger.info('closing'); + resolveServerConnClosedP(); + }); + if (end) { + conn.removeAllListeners('end'); + conn.on('end', () => { + logger.info('ending'); + resolveServerConnEndP(); + conn.destroy(); + }); + conn.end(); + } + }, + ); + const serverClose = promisify(server.close).bind(server); + const serverListen = promisify(server.listen).bind(server); + const serverHost = () => { + return (server.address() as AddressInfo).address as Host; + }; + const serverPort = () => { + return (server.address() as AddressInfo).port as Port; + }; + return { + serverListen, + serverClose, + serverConnP, + serverConnEndP, + serverConnClosedP, + serverHost, + serverPort, + }; + } - const nodeIdGenerator = (number: number): NodeId => { - const idArray = new Uint8Array([ - 223, - 24, - 34, - 40, - 46, - 217, - 4, - 71, - 103, - 71, - 59, - 123, - 143, - 187, - 9, - 29, - 157, - 41, - 131, - 44, - 68, - 160, - 79, - 127, - 137, - 154, - 221, - 86, - 157, - 23, - 77, - number, - ]); - return IdInternal.fromBuffer(Buffer.from(idArray)); - }; - - // Meep IPs unique. Ideally we'd use the generated IP and port. But this is good for now. - // If this fails again we shouldn't specify the port and IP. - const sourceHost = '127.0.0.1' as Host; - const sourcePort = 11110 as Port; - const targetHost = '127.0.0.2' as Host; - const targetPort = 11111 as Port; - - beforeAll(async () => { + beforeEach(async () => { // Server setup serverDataDir = await fs.promises.mkdtemp( path.join(os.tmpdir(), 'polykey-test-server'), @@ -125,7 +186,7 @@ describe('NodeConnection', () => { logger: logger, }); - const serverTLSConfig = { + serverTLSConfig = { keyPrivatePem: serverKeyManager.getRootKeyPairPem().privateKey, certChainPem: await serverKeyManager.getRootCertChainPem(), }; @@ -166,18 +227,33 @@ describe('NodeConnection', () => { logger: logger, }); - serverNodeManager = await NodeManager.createNodeManager({ + serverNodeGraph = await NodeGraph.createNodeGraph({ db: serverDb, - sigchain: serverSigchain, keyManager: serverKeyManager, + logger, + }); + + serverNodeConnectionManager = new NodeConnectionManager({ + keyManager: serverKeyManager, + nodeGraph: serverNodeGraph, fwdProxy: serverFwdProxy, revProxy: serverRevProxy, + logger, + }); + await serverNodeConnectionManager.start(); + + serverNodeManager = new NodeManager({ + db: serverDb, + sigchain: serverSigchain, + keyManager: serverKeyManager, + nodeGraph: serverNodeGraph, + nodeConnectionManager: serverNodeConnectionManager, logger: logger, }); serverVaultManager = await VaultManager.createVaultManager({ keyManager: serverKeyManager, vaultsPath: serverVaultsPath, - nodeManager: serverNodeManager, + nodeConnectionManager: serverNodeConnectionManager, vaultsKey: serverKeyManager.vaultKey, db: serverDb, acl: serverACL, @@ -189,16 +265,18 @@ describe('NodeConnection', () => { await NotificationsManager.createNotificationsManager({ acl: serverACL, db: serverDb, + nodeConnectionManager: serverNodeConnectionManager, nodeManager: serverNodeManager, keyManager: serverKeyManager, logger: logger, }); await serverGestaltGraph.setNode(node); - await serverNodeManager.start(); const agentService = createAgentService({ keyManager: serverKeyManager, vaultManager: serverVaultManager, + nodeConnectionManager: dummyNodeConnectionManager, nodeManager: serverNodeManager, + nodeGraph: serverNodeGraph, sigchain: serverSigchain, notificationsManager: serverNotificationsManager, }); @@ -207,15 +285,15 @@ describe('NodeConnection', () => { }); await agentServer.start({ services: [[AgentServiceService, agentService]], - host: targetHost, + host: localHost, }); await serverRevProxy.start({ - serverHost: targetHost, + serverHost: localHost, serverPort: agentServer.port, - ingressHost: targetHost, - ingressPort: targetPort, + ingressHost: localHost, tlsConfig: serverTLSConfig, }); + targetPort = serverRevProxy.getIngressPort(); targetNodeId = serverKeyManager.getNodeId(); // Client setup @@ -240,19 +318,27 @@ describe('NodeConnection', () => { logger: logger, }); await clientFwdProxy.start({ + proxyHost: localHost, tlsConfig: clientTLSConfig, - egressHost: sourceHost, - egressPort: sourcePort, + egressHost: localHost, }); + sourcePort = clientFwdProxy.getEgressPort(); + + // Other setup + const globalKeyPair = await testUtils.setupGlobalKeypair(); + const cert = keysUtils.generateCertificate( + globalKeyPair.publicKey, + globalKeyPair.privateKey, + globalKeyPair.privateKey, + 86400, + ); + tlsConfig = { + keyPrivatePem: keysUtils.keyPairToPem(globalKeyPair).privateKey, + certChainPem: keysUtils.certToPem(cert), + }; }, global.polykeyStartupTimeout * 2); afterEach(async () => { - // Do you really need to clear the database state of NodeManager - // To do NodeConnection testing? - await serverNodeManager.clearDB(); - }); - - afterAll(async () => { await clientFwdProxy.stop(); await clientKeyManager.stop(); await clientKeyManager.destroy(); @@ -269,8 +355,9 @@ describe('NodeConnection', () => { await serverGestaltGraph.destroy(); await serverVaultManager.stop(); await serverVaultManager.destroy(); - await serverNodeManager.stop(); - await serverNodeManager.destroy(); + await serverNodeGraph.stop(); + await serverNodeGraph.destroy(); + await serverNodeConnectionManager.stop(); await serverNotificationsManager.stop(); await serverNotificationsManager.destroy(); await agentServer.stop(); @@ -289,48 +376,45 @@ describe('NodeConnection', () => { logger.debug('session readiness start'); const nodeConnection = await NodeConnection.createNodeConnection({ targetNodeId: targetNodeId, - targetHost: targetHost, + targetHost: localHost, targetPort: targetPort, - forwardProxy: clientFwdProxy, + fwdProxy: clientFwdProxy, keyManager: clientKeyManager, - connTimeout: 1000, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback, logger: logger, + clientFactory: (args) => GRPCClientAgent.createGRPCClientAgent(args), }); - await expect(nodeConnection.destroy()).rejects.toThrow( - nodesErrors.ErrorNodeConnectionRunning, - ); + await nodeConnection.destroy(); // Should be a noop - await nodeConnection.start(); - await nodeConnection.stop(); await nodeConnection.destroy(); - await expect(nodeConnection.start()).rejects.toThrow( - nodesErrors.ErrorNodeConnectionDestroyed, - ); expect(() => { nodeConnection.getRootCertChain(); - }).toThrow(nodesErrors.ErrorNodeConnectionNotRunning); - await expect(async () => { - await nodeConnection.getClosestNodes( - IdInternal.fromString('abc'), - ); - }).rejects.toThrow(nodesErrors.ErrorNodeConnectionNotRunning); + }).toThrow(nodesErrors.ErrorNodeConnectionDestroyed); // Explicitly close the connection such that there's no interference in next test - await serverRevProxy.closeConnection(sourceHost, sourcePort); + await serverRevProxy.closeConnection( + localHost, + clientFwdProxy.getEgressPort(), + ); }); test('connects to its target (via direct connection)', async () => { const conn = await NodeConnection.createNodeConnection({ targetNodeId: targetNodeId, - targetHost: targetHost, + targetHost: localHost, targetPort: targetPort, - forwardProxy: clientFwdProxy, + fwdProxy: clientFwdProxy, keyManager: clientKeyManager, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback, logger: logger, + clientFactory: async (args) => + GRPCClientAgent.createGRPCClientAgent(args), }); // Because the connection will not have enough time to compose before we // attempt to acquire the connection info, we need to wait and poll it const connInfo = await poll( async () => { - return serverRevProxy.getConnectionInfoByEgress(sourceHost, sourcePort); + return serverRevProxy.getConnectionInfoByEgress(localHost, sourcePort); }, (e) => { if (e instanceof networkErrors.ErrorConnectionNotComposed) return false; @@ -342,138 +426,370 @@ describe('NodeConnection', () => { expect(connInfo).toMatchObject({ nodeId: sourceNodeId, certificates: expect.any(Array), - egressHost: sourceHost, + egressHost: localHost, egressPort: sourcePort, - ingressHost: targetHost, + ingressHost: localHost, ingressPort: targetPort, }); - await conn.stop(); await conn.destroy(); }); + test('grpcCall after connection drops', async () => { + let nodeConnection: NodeConnection | undefined; + let polykeyAgent: PolykeyAgent | undefined; + try { + polykeyAgent = await PolykeyAgent.createPolykeyAgent({ + password, + nodePath: path.join(dataDir, 'PolykeyAgent3'), + logger: logger, + }); + // Have a nodeConnection try to connect to it + const killSelf = jest.fn(); + nodeConnection = await NodeConnection.createNodeConnection({ + connConnectTime: 500, + fwdProxy: clientFwdProxy, + keyManager: clientKeyManager, + logger: logger, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback: killSelf, + targetHost: polykeyAgent.revProxy.getIngressHost(), + targetNodeId: polykeyAgent.keyManager.getNodeId(), + targetPort: polykeyAgent.revProxy.getIngressPort(), + clientFactory: (args) => GRPCClientAgent.createGRPCClientAgent(args), + }); + + // Resolves if the shutdownCallback was called + await polykeyAgent.stop(); + await polykeyAgent.destroy(); + + const client = nodeConnection.getClient(); + const echoMessage = new utilsPB.EchoMessage().setChallenge( + 'Hello world!', + ); + await expect(async () => client.echo(echoMessage)).rejects.toThrow( + agentErrors.ErrorAgentClientDestroyed, + ); + } finally { + await polykeyAgent?.stop(); + await polykeyAgent?.destroy(); + await nodeConnection?.destroy(); + } + }); test('fails to connect to target (times out)', async () => { await expect( NodeConnection.createNodeConnection({ targetNodeId: targetNodeId, targetHost: '128.0.0.1' as Host, targetPort: 12345 as Port, - connTimeout: 1000, - forwardProxy: clientFwdProxy, + connConnectTime: 300, + fwdProxy: clientFwdProxy, keyManager: clientKeyManager, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback, logger: logger, + clientFactory: (args) => GRPCClientAgent.createGRPCClientAgent(args), }), ).rejects.toThrow(nodesErrors.ErrorNodeConnectionTimeout); }); - test('receives 20 closest local nodes from connected target', async () => { - const conn = await NodeConnection.createNodeConnection({ - targetNodeId: targetNodeId, - targetHost: targetHost, - targetPort: targetPort, - forwardProxy: clientFwdProxy, - keyManager: clientKeyManager, - logger: logger, - }); - // Await serverRevProxy.openConnection(sourceHost, sourcePort); - - // Now generate and add 20 nodes that will be close to this node ID - const addedClosestNodes: NodeData[] = []; - for (let i = 1; i < 101; i += 5) { - const closeNodeId = nodesTestUtils.generateNodeIdForBucket( - targetNodeId, - i, - ); - const nodeAddress = { - host: (i + '.' + i + '.' + i + '.' + i) as Host, - port: i as Port, - }; - await serverNodeManager.setNode(closeNodeId, nodeAddress); - addedClosestNodes.push({ - id: closeNodeId, - address: nodeAddress, - distance: nodesUtils.calculateDistance(targetNodeId, closeNodeId), + test('getRootCertChain', async () => { + let nodeConnection: NodeConnection | undefined; + try { + nodeConnection = await NodeConnection.createNodeConnection({ + targetNodeId: targetNodeId, + targetHost: localHost, + targetPort: targetPort, + fwdProxy: clientFwdProxy, + keyManager: clientKeyManager, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback, + logger: logger, + clientFactory: async (args) => + GRPCClientAgent.createGRPCClientAgent(args), }); - } - // Now create and add 10 more nodes that are far away from this node - for (let i = 1; i <= 10; i++) { - const farNodeId = nodeIdGenerator(i); - const nodeAddress = { - host: (i + '.' + i + '.' + i + '.' + i) as Host, - port: i as Port, - }; - await serverNodeManager.setNode(farNodeId, nodeAddress); - } - // Get the closest nodes to the target node - const closest = await conn.getClosestNodes(targetNodeId); - // Sort the received nodes on distance such that we can check its equality - // with addedClosestNodes - closest.sort(nodesUtils.sortByDistance); - expect(closest.length).toBe(20); - expect(closest).toEqual(addedClosestNodes); - - await conn.stop(); - await serverRevProxy.closeConnection( - clientFwdProxy.getEgressHost(), - clientFwdProxy.getEgressPort(), - ); - await conn.destroy(); + expect(nodeConnection.getRootCertChain()).toBeDefined(); + } finally { + await nodeConnection?.destroy(); + } }); - test.skip('scans the servers vaults', async () => { - // Const vault1 = await serverVaultManager.createVault('Vault1' as VaultName); - // const vault2 = await serverVaultManager.createVault('Vault2' as VaultName); - // const vault3 = await serverVaultManager.createVault('Vault3' as VaultName); - // const vault4 = await serverVaultManager.createVault('Vault4' as VaultName); - // const vault5 = await serverVaultManager.createVault('Vault5' as VaultName); - - await serverGestaltGraph.setNode({ - id: nodesUtils.encodeNodeId(sourceNodeId), - chain: {}, - }); - - const conn = await NodeConnection.createNodeConnection({ - targetNodeId: targetNodeId, - targetHost: targetHost, - targetPort: targetPort, - forwardProxy: clientFwdProxy, - keyManager: clientKeyManager, - logger: logger, - }); - await serverRevProxy.openConnection(sourceHost, sourcePort); - - const vaultList: string[] = []; - - let vaults = await conn.scanVaults(); - - expect(vaults.sort()).toStrictEqual(vaultList.sort()); - - fail('Not Implemented'); - // FIXME - // await serverVaultManager.setVaultPermissions(sourceNodeId, vault1.vaultId); - // await serverVaultManager.setVaultPermissions(sourceNodeId, vault2.vaultId); - // await serverVaultManager.setVaultPermissions(sourceNodeId, vault3.vaultId); - - vaults = await conn.scanVaults(); - - // VaultList.push(`${vault1.vaultName}\t${vault1.vaultId}`); - // vaultList.push(`${vault2.vaultName}\t${vault2.vaultId}`); - // vaultList.push(`${vault3.vaultName}\t${vault3.vaultId}`); - - expect(vaults.sort()).toStrictEqual(vaultList.sort()); - - // Await serverVaultManager.setVaultPermissions(sourceNodeId, vault4.vaultId); - // await serverVaultManager.setVaultPermissions(sourceNodeId, vault5.vaultId); + test('getExpectedPublicKey', async () => { + let nodeConnection: NodeConnection | undefined; + try { + nodeConnection = await NodeConnection.createNodeConnection({ + targetNodeId: targetNodeId, + targetHost: localHost, + targetPort: targetPort, + fwdProxy: clientFwdProxy, + keyManager: clientKeyManager, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback, + logger: logger, + clientFactory: async (args) => + GRPCClientAgent.createGRPCClientAgent(args), + }); - vaults = await conn.scanVaults(); + const expectedPublicKey = + nodeConnection.getExpectedPublicKey(targetNodeId); + const publicKeyPem = serverKeyManager.getRootKeyPairPem().publicKey; + expect(expectedPublicKey).toBe(publicKeyPem); + } finally { + await nodeConnection?.destroy(); + } + }); + test('should call `killSelf if connection is closed based on bad certificate', async () => { + let revProxy: ReverseProxy | undefined; + let nodeConnection: NodeConnection | undefined; + let server; + try { + server = tcpServer(); + revProxy = new ReverseProxy({ + logger: logger, + }); + await server.serverListen(0); + await revProxy.start({ + serverHost: server.serverHost(), + serverPort: server.serverPort(), + ingressHost: '127.0.0.1' as Host, + tlsConfig, + }); + // Have a nodeConnection try to connect to it + const killSelf = jest.fn(); + const nodeConnectionP = NodeConnection.createNodeConnection({ + connConnectTime: 500, + fwdProxy: clientFwdProxy, + keyManager: clientKeyManager, + logger: logger, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback: killSelf, + targetHost: revProxy.getIngressHost(), + targetNodeId: targetNodeId, + targetPort: revProxy.getIngressPort(), + clientFactory: (args) => GRPCClientAgent.createGRPCClientAgent(args), + }); - // VaultList.push(`${vault4.vaultName}\t${vault4.vaultId}`); - // vaultList.push(`${vault5.vaultName}\t${vault5.vaultId}`); + // Expecting the connection to fail + await expect(nodeConnectionP).rejects.toThrow( + nodesErrors.ErrorNodeConnectionTimeout, + ); + expect(killSelf.mock.calls.length).toBe(1); + // Resolves if the shutdownCallback was called + } finally { + await server?.serverClose(); + await revProxy?.stop(); + await nodeConnection?.destroy(); + } + }); + test('should call `killSelf if connection is closed before TLS is established', async () => { + let revProxy: ReverseProxy | undefined; + let server; + try { + server = tcpServer(false, true); + revProxy = new ReverseProxy({ + logger: logger, + }); + await server.serverListen(0); + await revProxy.start({ + serverHost: server.serverHost(), + serverPort: server.serverPort(), + ingressHost: '127.0.0.1' as Host, + tlsConfig, + }); + // Have a nodeConnection try to connect to it + const killSelf = jest.fn(); + const nodeConnectionP = NodeConnection.createNodeConnection({ + connConnectTime: 500, + fwdProxy: clientFwdProxy, + keyManager: clientKeyManager, + logger: logger, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback: killSelf, + targetHost: revProxy.getIngressHost(), + targetNodeId: targetNodeId, + targetPort: revProxy.getIngressPort(), + clientFactory: (args) => GRPCClientAgent.createGRPCClientAgent(args), + }); - expect(vaults.sort()).toStrictEqual(vaultList.sort()); + // Expecting the connection to fail + await expect(nodeConnectionP).rejects.toThrow( + nodesErrors.ErrorNodeConnectionTimeout, + ); + expect(killSelf.mock.calls.length).toBe(1); + // Resolves if the shutdownCallback was called + } finally { + await server?.serverClose(); + await revProxy?.stop(); + } + }); + test('should call `killSelf if the Agent is stopped.', async () => { + let nodeConnection: NodeConnection | undefined; + let polykeyAgent: PolykeyAgent | undefined; + try { + polykeyAgent = await PolykeyAgent.createPolykeyAgent({ + password, + nodePath: path.join(dataDir, 'PolykeyAgent3'), + logger: logger, + }); + // Have a nodeConnection try to connect to it + const killSelf = jest.fn(); + nodeConnection = await NodeConnection.createNodeConnection({ + connConnectTime: 500, + fwdProxy: clientFwdProxy, + keyManager: clientKeyManager, + logger: logger, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback: killSelf, + targetHost: polykeyAgent.revProxy.getIngressHost(), + targetNodeId: polykeyAgent.keyManager.getNodeId(), + targetPort: polykeyAgent.revProxy.getIngressPort(), + clientFactory: (args) => GRPCClientAgent.createGRPCClientAgent(args), + }); - await conn.stop(); - await serverRevProxy.closeConnection( - clientFwdProxy.getEgressHost(), - clientFwdProxy.getEgressPort(), - ); - await conn.destroy(); + // Resolves if the shutdownCallback was called + await polykeyAgent.stop(); + await polykeyAgent.destroy(); + // Kill callback should've been called + expect(killSelf.mock.calls.length).toBe(1); + // Node connection should've destroyed itself in response to connection being destroyed + expect(nodeConnection[destroyed]).toBe(true); + } finally { + await polykeyAgent?.stop(); + await polykeyAgent?.destroy(); + await nodeConnection?.destroy(); + } }); + const options = ['exit', 'kill', 'sigkill']; + test.each(options)( + "should call `killSelf and throw if the server %s's during testUnaryFail", + async (option) => { + let nodeConnection: + | NodeConnection + | undefined; + let testRevProxy: ReverseProxy | undefined; + let testProcess: child_process.ChildProcessWithoutNullStreams | undefined; + try { + const testProcess = child_process.spawn('ts-node', [ + '--require', + 'tsconfig-paths/register', + 'tests/grpc/utils/testServer.ts', + ]); + const waitP = promise(); + testProcess.stdout.on('data', (data) => { + waitP.resolveP(data); + }); + // TestProcess.stderr.on('data', data => console.log(data.toString())); + + // Lets make a reverse proxy + testRevProxy = new ReverseProxy({ logger: logger }); + await testRevProxy.start({ + serverHost: '127.0.0.1' as Host, + serverPort: Number(await waitP.p) as Port, + ingressHost: '127.0.0.1' as Host, + tlsConfig: serverTLSConfig, + }); + + // Have a nodeConnection try to connect to it + const killSelfCheck = jest.fn(); + const killSelfP = promise(); + nodeConnection = await NodeConnection.createNodeConnection({ + connConnectTime: 2000, + fwdProxy: clientFwdProxy, + keyManager: clientKeyManager, + logger: logger, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback: async () => { + await killSelfCheck(); + killSelfP.resolveP(null); + }, + targetNodeId: serverKeyManager.getNodeId(), + targetHost: testRevProxy.getIngressHost(), + targetPort: testRevProxy.getIngressPort(), + clientFactory: (args) => + grpcTestUtils.GRPCClientTest.createGRPCClientTest(args), + }); + + const client = nodeConnection.getClient(); + const echoMessage = new utilsPB.EchoMessage().setChallenge(option); + const testP = client.unaryFail(echoMessage); + // Should throw an error when it fails during call + await expect(testP).rejects.toThrow(GRPCErrors.ErrorGRPCClientCall); + // GRPCErrors.ErrorGRPCClientCall '14 UNAVAILABLE: Connection dropped' + + // Kill self callback should've been called + await killSelfP.p; + expect(killSelfCheck).toHaveBeenCalled(); + } finally { + testProcess?.kill(9); + await testRevProxy?.stop(); + await nodeConnection?.destroy(); + } + }, + ); + test.each(options)( + "should call `killSelf and throw if the server %s's during testStreamFail", + async (option) => { + let nodeConnection: + | NodeConnection + | undefined; + let testRevProxy: ReverseProxy | undefined; + let testProcess: child_process.ChildProcessWithoutNullStreams | undefined; + try { + const testProcess = child_process.spawn('ts-node', [ + '--require', + 'tsconfig-paths/register', + 'tests/grpc/utils/testServer.ts', + ]); + const waitP = promise(); + testProcess.stdout.on('data', (data) => { + waitP.resolveP(data); + }); + // TestProcess.stderr.on('data', data => console.log(data.toString())); + + // Lets make a reverse proxy + testRevProxy = new ReverseProxy({ logger: logger }); + await testRevProxy.start({ + serverHost: '127.0.0.1' as Host, + serverPort: Number(await waitP.p) as Port, + ingressHost: '127.0.0.1' as Host, + tlsConfig: serverTLSConfig, + }); + + // Have a nodeConnection try to connect to it + const killSelfCheck = jest.fn(); + const killSelfP = promise(); + nodeConnection = await NodeConnection.createNodeConnection({ + connConnectTime: 2000, + fwdProxy: clientFwdProxy, + keyManager: clientKeyManager, + logger: logger, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback: async () => { + await killSelfCheck(); + killSelfP.resolveP(null); + }, + targetNodeId: serverKeyManager.getNodeId(), + targetHost: testRevProxy.getIngressHost(), + targetPort: testRevProxy.getIngressPort(), + clientFactory: (args) => + grpcTestUtils.GRPCClientTest.createGRPCClientTest(args), + }); + + const client = nodeConnection.getClient(); + const echoMessage = new utilsPB.EchoMessage().setChallenge(option); + const testGen = client.serverStreamFail(echoMessage); + // Should throw an error when it fails during call + await expect(async () => { + for await (const _ of testGen) { + // Do nothing, let it run out + } + }).rejects.toThrow(GRPCErrors.ErrorGRPCClientCall); + + // Kill self callback should've been called + await killSelfP.p; + expect(killSelfCheck).toHaveBeenCalled(); + } finally { + testProcess?.kill(9); + await testRevProxy?.stop(); + await nodeConnection?.destroy(); + } + }, + ); }); diff --git a/tests/nodes/NodeConnectionManager.general.test.ts b/tests/nodes/NodeConnectionManager.general.test.ts new file mode 100644 index 000000000..f0a5576f9 --- /dev/null +++ b/tests/nodes/NodeConnectionManager.general.test.ts @@ -0,0 +1,604 @@ +import type { NodeAddress, NodeData, NodeId, SeedNodes } from '@/nodes/types'; +import type { Host, Port } from '@/network/types'; +import fs from 'fs'; +import path from 'path'; +import os from 'os'; +import { DB } from '@matrixai/db'; +import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; +import { IdInternal } from '@matrixai/id'; +import PolykeyAgent from '@/PolykeyAgent'; +import KeyManager from '@/keys/KeyManager'; +import NodeGraph from '@/nodes/NodeGraph'; +import NodeConnectionManager from '@/nodes/NodeConnectionManager'; +import ForwardProxy from '@/network/ForwardProxy'; +import ReverseProxy from '@/network/ReverseProxy'; +import GRPCClientAgent from '@/agent/GRPCClientAgent'; +import * as nodesUtils from '@/nodes/utils'; +import * as nodesErrors from '@/nodes/errors'; +import * as keysUtils from '@/keys/utils'; +import * as grpcUtils from '@/grpc/utils'; +import * as nodesPB from '@/proto/js/polykey/v1/nodes/nodes_pb'; +import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; +import * as nodesTestUtils from './utils'; +import * as testUtils from '../utils'; + +describe(`${NodeConnectionManager.name} general test`, () => { + const logger = new Logger( + `${NodeConnectionManager.name} test`, + LogLevel.WARN, + [new StreamHandler()], + ); + grpcUtils.setLogger(logger.getChild('grpc')); + + const nodeConnectionManagerLogger = logger.getChild( + 'nodeConnectionManagerUT', + ); + // Constants + const password = 'password'; + const nodeId1 = IdInternal.create([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 5, + ]); + const nodeId2 = IdInternal.create([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 8, + ]); + const nodeId3 = IdInternal.create([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 124, + ]); + const dummyNodeId = nodesUtils.decodeNodeId( + 'vi3et1hrpv2m2lrplcm7cu913kr45v51cak54vm68anlbvuf83ra0', + )!; + + const serverHost = '127.0.0.1' as Host; + const serverPort = 55555 as Port; + + const dummySeedNodes: SeedNodes = {}; + dummySeedNodes[nodesUtils.encodeNodeId(nodeId1)] = { + host: serverHost, + port: serverPort, + }; + dummySeedNodes[nodesUtils.encodeNodeId(nodeId2)] = { + host: serverHost, + port: serverPort, + }; + dummySeedNodes[nodesUtils.encodeNodeId(nodeId3)] = { + host: serverHost, + port: serverPort, + }; + + // + let dataDir: string; + let dataDir2: string; + let keyManager: KeyManager; + let db: DB; + let fwdProxy: ForwardProxy; + let revProxy: ReverseProxy; + let nodeGraph: NodeGraph; + + let remoteNode1: PolykeyAgent; + let remoteNode2: PolykeyAgent; + let remoteNodeId1: NodeId; + let remoteNodeId2: NodeId; + + // Utils functions + const nodeIdGenerator = (number: number) => { + const idArray = new Uint8Array([ + 223, + 24, + 34, + 40, + 46, + 217, + 4, + 71, + 103, + 71, + 59, + 123, + 143, + 187, + 9, + 29, + 157, + 41, + 131, + 44, + 68, + 160, + 79, + 127, + 137, + 154, + 221, + 86, + 157, + 23, + 77, + number, + ]); + return IdInternal.create(idArray); + }; + + const mockedGenerateDeterministicKeyPair = jest.spyOn( + keysUtils, + 'generateDeterministicKeyPair', + ); + + beforeAll(async () => { + mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { + return keysUtils.generateKeyPair(bits); + }); + + dataDir2 = await fs.promises.mkdtemp( + path.join(os.tmpdir(), 'polykey-test-'), + ); + // Creating remotes, they just exist to start connections or fail them if needed + remoteNode1 = await PolykeyAgent.createPolykeyAgent({ + password, + nodePath: path.join(dataDir2, 'remoteNode1'), + logger: logger.getChild('remoteNode1'), + }); + remoteNodeId1 = remoteNode1.keyManager.getNodeId(); + remoteNode2 = await PolykeyAgent.createPolykeyAgent({ + password, + nodePath: path.join(dataDir2, 'remoteNode2'), + logger: logger.getChild('remoteNode2'), + }); + remoteNodeId2 = remoteNode2.keyManager.getNodeId(); + }); + + afterAll(async () => { + await remoteNode1.stop(); + await remoteNode1.destroy(); + await remoteNode2.stop(); + await remoteNode2.destroy(); + await fs.promises.rm(dataDir2, { force: true, recursive: true }); + }); + + beforeEach(async () => { + dataDir = await fs.promises.mkdtemp( + path.join(os.tmpdir(), 'polykey-test-'), + ); + const keysPath = path.join(dataDir, 'keys'); + keyManager = await KeyManager.createKeyManager({ + password, + keysPath, + logger: logger.getChild('keyManager'), + }); + const dbPath = path.join(dataDir, 'db'); + db = await DB.createDB({ + dbPath, + logger: nodeConnectionManagerLogger, + crypto: { + key: keyManager.dbKey, + ops: { + encrypt: keysUtils.encryptWithKey, + decrypt: keysUtils.decryptWithKey, + }, + }, + }); + nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger: logger.getChild('NodeGraph'), + }); + const tlsConfig = { + keyPrivatePem: keyManager.getRootKeyPairPem().privateKey, + certChainPem: keysUtils.certToPem(keyManager.getRootCert()), + }; + fwdProxy = new ForwardProxy({ + authToken: 'auth', + logger: logger.getChild('fwdProxy'), + }); + await fwdProxy.start({ + tlsConfig, + }); + revProxy = new ReverseProxy({ + logger: logger.getChild('revProxy'), + }); + await revProxy.start({ + serverHost, + serverPort, + tlsConfig, + }); + await nodeGraph.setNode(remoteNodeId1, { + host: remoteNode1.revProxy.getIngressHost(), + port: remoteNode1.revProxy.getIngressPort(), + }); + await nodeGraph.setNode(remoteNodeId2, { + host: remoteNode2.revProxy.getIngressHost(), + port: remoteNode2.revProxy.getIngressPort(), + }); + }); + + afterEach(async () => { + await nodeGraph.stop(); + await nodeGraph.destroy(); + await db.stop(); + await db.destroy(); + await keyManager.stop(); + await keyManager.destroy(); + await revProxy.stop(); + await fwdProxy.stop(); + }); + + // General functionality + test('finds node (local)', async () => { + // NodeConnectionManager under test + const nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + fwdProxy, + revProxy, + logger: nodeConnectionManagerLogger, + }); + await nodeConnectionManager.start(); + try { + // Case 1: node already exists in the local node graph (no contact required) + const nodeId = nodeId1; + const nodeAddress: NodeAddress = { + host: '127.0.0.1' as Host, + port: 11111 as Port, + }; + await nodeGraph.setNode(nodeId, nodeAddress); + // Expect no error thrown + const findNodePromise = nodeConnectionManager.findNode(nodeId); + await expect(findNodePromise).resolves.not.toThrowError(); + expect(await findNodePromise).toStrictEqual(nodeAddress); + } finally { + await nodeConnectionManager.stop(); + } + }); + test( + 'finds node (contacts remote node)', + async () => { + // NodeConnectionManager under test + const nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + fwdProxy, + revProxy, + logger: nodeConnectionManagerLogger, + }); + await nodeConnectionManager.start(); + try { + // Case 2: node can be found on the remote node + const nodeId = nodeId1; + const nodeAddress: NodeAddress = { + host: '127.0.0.1' as Host, + port: 11111 as Port, + }; + const server = await PolykeyAgent.createPolykeyAgent({ + nodePath: path.join(dataDir, 'node2'), + password, + logger: nodeConnectionManagerLogger, + }); + await nodeGraph.setNode(server.keyManager.getNodeId(), { + host: server.revProxy.getIngressHost(), + port: server.revProxy.getIngressPort(), + } as NodeAddress); + await server.nodeGraph.setNode(nodeId, nodeAddress); + const foundAddress2 = await nodeConnectionManager.findNode(nodeId); + expect(foundAddress2).toStrictEqual(nodeAddress); + + await server.stop(); + } finally { + await nodeConnectionManager.stop(); + } + }, + global.polykeyStartupTimeout, + ); + test( + 'cannot find node (contacts remote node)', + async () => { + // NodeConnectionManager under test + const nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + fwdProxy, + revProxy, + logger: nodeConnectionManagerLogger, + }); + await nodeConnectionManager.start(); + try { + // Case 3: node exhausts all contacts and cannot find node + const nodeId = nodeId1; + const server = await PolykeyAgent.createPolykeyAgent({ + nodePath: path.join(dataDir, 'node3'), + password, + logger: nodeConnectionManagerLogger, + }); + await nodeGraph.setNode(server.keyManager.getNodeId(), { + host: server.revProxy.getIngressHost(), + port: server.revProxy.getIngressPort(), + } as NodeAddress); + // Add a dummy node to the server node graph database + // Server will not be able to connect to this node (the only node in its + // database), and will therefore not be able to locate the node + await server.nodeGraph.setNode(dummyNodeId, { + host: '127.0.0.2' as Host, + port: 22222 as Port, + } as NodeAddress); + // Un-findable Node cannot be found + await expect(() => + nodeConnectionManager.findNode(nodeId), + ).rejects.toThrowError(nodesErrors.ErrorNodeGraphNodeIdNotFound); + + await server.stop(); + } finally { + await nodeConnectionManager.stop(); + } + }, + global.failedConnectionTimeout * 2, + ); + test('finds a single closest node', async () => { + // NodeConnectionManager under test + const nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + fwdProxy, + revProxy, + logger: nodeConnectionManagerLogger, + }); + await nodeConnectionManager.start(); + try { + // New node added + const newNode2Id = nodeId1; + const newNode2Address = { host: '227.1.1.1', port: 4567 } as NodeAddress; + await nodeGraph.setNode(newNode2Id, newNode2Address); + + // Find the closest nodes to some node, NODEID3 + const closest = await nodeConnectionManager.getClosestLocalNodes(nodeId3); + expect(closest).toContainEqual({ + id: newNode2Id, + distance: 121n, + address: { host: '227.1.1.1', port: 4567 }, + }); + } finally { + await nodeConnectionManager.stop(); + } + }); + test('finds 3 closest nodes', async () => { + const nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + fwdProxy, + revProxy, + logger: nodeConnectionManagerLogger, + }); + await nodeConnectionManager.start(); + try { + // Add 3 nodes + await nodeGraph.setNode(nodeId1, { + host: '2.2.2.2', + port: 2222, + } as NodeAddress); + await nodeGraph.setNode(nodeId2, { + host: '3.3.3.3', + port: 3333, + } as NodeAddress); + await nodeGraph.setNode(nodeId3, { + host: '4.4.4.4', + port: 4444, + } as NodeAddress); + + // Find the closest nodes to some node, NODEID4 + const closest = await nodeConnectionManager.getClosestLocalNodes(nodeId3); + expect(closest.length).toBe(5); + expect(closest).toContainEqual({ + id: nodeId3, + distance: 0n, + address: { host: '4.4.4.4', port: 4444 }, + }); + expect(closest).toContainEqual({ + id: nodeId2, + distance: 116n, + address: { host: '3.3.3.3', port: 3333 }, + }); + expect(closest).toContainEqual({ + id: nodeId1, + distance: 121n, + address: { host: '2.2.2.2', port: 2222 }, + }); + } finally { + await nodeConnectionManager.stop(); + } + }); + test('finds the 20 closest nodes', async () => { + const nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + fwdProxy, + revProxy, + logger: nodeConnectionManagerLogger, + }); + await nodeConnectionManager.start(); + try { + // Generate the node ID to find the closest nodes to (in bucket 100) + const nodeId = keyManager.getNodeId(); + const nodeIdToFind = nodesTestUtils.generateNodeIdForBucket(nodeId, 100); + // Now generate and add 20 nodes that will be close to this node ID + const addedClosestNodes: NodeData[] = []; + for (let i = 1; i < 101; i += 5) { + const closeNodeId = nodesTestUtils.generateNodeIdForBucket( + nodeIdToFind, + i, + ); + const nodeAddress = { + host: (i + '.' + i + '.' + i + '.' + i) as Host, + port: i as Port, + }; + await nodeGraph.setNode(closeNodeId, nodeAddress); + addedClosestNodes.push({ + id: closeNodeId, + address: nodeAddress, + distance: nodesUtils.calculateDistance(nodeIdToFind, closeNodeId), + }); + } + // Now create and add 10 more nodes that are far away from this node + for (let i = 1; i <= 10; i++) { + const farNodeId = nodeIdGenerator(i); + const nodeAddress = { + host: `${i}.${i}.${i}.${i}` as Host, + port: i as Port, + }; + await nodeGraph.setNode(farNodeId, nodeAddress); + } + + // Find the closest nodes to the original generated node ID + const closest = await nodeConnectionManager.getClosestLocalNodes( + nodeIdToFind, + ); + // We should always only receive k nodes + expect(closest.length).toBe(nodeGraph.maxNodesPerBucket); + // Retrieved closest nodes should be exactly the same as the ones we added + expect(closest).toEqual(addedClosestNodes); + } finally { + await nodeConnectionManager.stop(); + } + }); + test('receives 20 closest local nodes from connected target', async () => { + let serverPKAgent: PolykeyAgent | undefined; + let nodeConnectionManager: NodeConnectionManager | undefined; + try { + serverPKAgent = await PolykeyAgent.createPolykeyAgent({ + password, + logger: logger.getChild('serverPKAgent'), + nodePath: path.join(dataDir, 'serverPKAgent'), + }); + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + fwdProxy, + revProxy, + logger: logger.getChild('NodeConnectionManager'), + }); + + await nodeConnectionManager.start(); + const targetNodeId = serverPKAgent.keyManager.getNodeId(); + await nodeGraph.setNode(targetNodeId, { + host: serverPKAgent.revProxy.getIngressHost(), + port: serverPKAgent.revProxy.getIngressPort(), + }); + + // Now generate and add 20 nodes that will be close to this node ID + const addedClosestNodes: NodeData[] = []; + for (let i = 1; i < 101; i += 5) { + const closeNodeId = nodesTestUtils.generateNodeIdForBucket( + targetNodeId, + i, + ); + const nodeAddress = { + host: (i + '.' + i + '.' + i + '.' + i) as Host, + port: i as Port, + }; + await serverPKAgent.nodeGraph.setNode(closeNodeId, nodeAddress); + addedClosestNodes.push({ + id: closeNodeId, + address: nodeAddress, + distance: nodesUtils.calculateDistance(targetNodeId, closeNodeId), + }); + } + // Now create and add 10 more nodes that are far away from this node + for (let i = 1; i <= 10; i++) { + const farNodeId = nodeIdGenerator(i); + const nodeAddress = { + host: `${i}.${i}.${i}.${i}`, + port: i, + } as NodeAddress; + await serverPKAgent.nodeGraph.setNode(farNodeId, nodeAddress); + } + + // Get the closest nodes to the target node + const closest = await nodeConnectionManager.getRemoteNodeClosestNodes( + targetNodeId, + targetNodeId, + ); + // Sort the received nodes on distance such that we can check its equality + // with addedClosestNodes + closest.sort(nodesUtils.sortByDistance); + expect(closest.length).toBe(20); + expect(closest).toEqual(addedClosestNodes); + } finally { + await serverPKAgent?.stop(); + await serverPKAgent?.destroy(); + await nodeConnectionManager?.stop(); + } + }); + test('sendHolePunchMessage', async () => { + // NodeConnectionManager under test + let nodeConnectionManager: NodeConnectionManager | undefined; + const mockedNodesHolePunchMessageSend = jest.spyOn( + GRPCClientAgent.prototype, + 'nodesHolePunchMessageSend', + ); + mockedNodesHolePunchMessageSend.mockResolvedValue( + new utilsPB.EmptyMessage(), + ); + try { + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + fwdProxy, + revProxy, + logger: nodeConnectionManagerLogger, + }); + await nodeConnectionManager.start(); + // To test this we need to... + // 2. call relayHolePunchMessage + // 3. check that the relevant call was made. + const sourceNodeId = testUtils.generateRandomNodeId(); + const targetNodeId = testUtils.generateRandomNodeId(); + await nodeConnectionManager.sendHolePunchMessage( + remoteNodeId1, + sourceNodeId, + targetNodeId, + '', + Buffer.alloc(0), + ); + + expect(mockedNodesHolePunchMessageSend).toHaveBeenCalled(); + } finally { + mockedNodesHolePunchMessageSend.mockRestore(); + await nodeConnectionManager?.stop(); + } + }); + test('relayHolePunchMessage', async () => { + // NodeConnectionManager under test + let nodeConnectionManager: NodeConnectionManager | undefined; + const mockedNodesHolePunchMessageSend = jest.spyOn( + GRPCClientAgent.prototype, + 'nodesHolePunchMessageSend', + ); + mockedNodesHolePunchMessageSend.mockResolvedValue( + new utilsPB.EmptyMessage(), + ); + try { + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + fwdProxy, + revProxy, + logger: nodeConnectionManagerLogger, + }); + await nodeConnectionManager.start(); + // To test this we need to... + // 2. call relayHolePunchMessage + // 3. check that the relevant call was made. + const sourceNodeId = testUtils.generateRandomNodeId(); + const relayMessage = new nodesPB.Relay(); + relayMessage.setSrcId(nodesUtils.encodeNodeId(sourceNodeId)); + relayMessage.setTargetId(nodesUtils.encodeNodeId(remoteNodeId1)); + relayMessage.setSignature(''); + relayMessage.setEgressAddress(''); + await nodeConnectionManager.relayHolePunchMessage(relayMessage); + + expect(mockedNodesHolePunchMessageSend).toHaveBeenCalled(); + } finally { + mockedNodesHolePunchMessageSend.mockRestore(); + await nodeConnectionManager?.stop(); + } + }); +}); diff --git a/tests/nodes/NodeConnectionManager.lifecycle.test.ts b/tests/nodes/NodeConnectionManager.lifecycle.test.ts new file mode 100644 index 000000000..c3f0378a8 --- /dev/null +++ b/tests/nodes/NodeConnectionManager.lifecycle.test.ts @@ -0,0 +1,538 @@ +import type { NodeId, NodeIdString, SeedNodes } from '@/nodes/types'; +import type { Host, Port } from '@/network/types'; +import fs from 'fs'; +import path from 'path'; +import os from 'os'; +import { DB } from '@matrixai/db'; +import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; +import { IdInternal } from '@matrixai/id'; +import PolykeyAgent from '@/PolykeyAgent'; +import KeyManager from '@/keys/KeyManager'; +import NodeGraph from '@/nodes/NodeGraph'; +import NodeConnectionManager from '@/nodes/NodeConnectionManager'; +import ForwardProxy from '@/network/ForwardProxy'; +import ReverseProxy from '@/network/ReverseProxy'; +import * as nodesUtils from '@/nodes/utils'; +import * as nodesErrors from '@/nodes/errors'; +import * as keysUtils from '@/keys/utils'; +import * as grpcUtils from '@/grpc/utils'; +import { withF } from '@/utils'; + +describe(`${NodeConnectionManager.name} lifecycle test`, () => { + const logger = new Logger( + `${NodeConnectionManager.name} test`, + LogLevel.WARN, + [new StreamHandler()], + ); + grpcUtils.setLogger(logger.getChild('grpc')); + + const nodeConnectionManagerLogger = logger.getChild( + 'nodeConnectionManagerUT', + ); + // Constants + const password = 'password'; + const nodeId1 = IdInternal.create([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 5, + ]); + const nodeId2 = IdInternal.create([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 8, + ]); + const nodeId3 = IdInternal.create([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 124, + ]); + const dummyNodeId = nodesUtils.decodeNodeId( + 'vi3et1hrpv2m2lrplcm7cu913kr45v51cak54vm68anlbvuf83ra0', + )!; + + const serverHost = '127.0.0.1' as Host; + const serverPort = 55555 as Port; + + const dummySeedNodes: SeedNodes = {}; + dummySeedNodes[nodesUtils.encodeNodeId(nodeId1)] = { + host: serverHost, + port: serverPort, + }; + dummySeedNodes[nodesUtils.encodeNodeId(nodeId2)] = { + host: serverHost, + port: serverPort, + }; + dummySeedNodes[nodesUtils.encodeNodeId(nodeId3)] = { + host: serverHost, + port: serverPort, + }; + + const nop = async () => {}; + + let dataDir: string; + let dataDir2: string; + let keyManager: KeyManager; + let db: DB; + let fwdProxy: ForwardProxy; + let revProxy: ReverseProxy; + let nodeGraph: NodeGraph; + + let remoteNode1: PolykeyAgent; + let remoteNode2: PolykeyAgent; + let remoteNodeId1: NodeId; + let remoteNodeId2: NodeId; + + const mockedGenerateDeterministicKeyPair = jest.spyOn( + keysUtils, + 'generateDeterministicKeyPair', + ); + + beforeAll(async () => { + mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { + return keysUtils.generateKeyPair(bits); + }); + + dataDir2 = await fs.promises.mkdtemp( + path.join(os.tmpdir(), 'polykey-test-'), + ); + // Creating remotes, they just exist to start connections or fail them if needed + remoteNode1 = await PolykeyAgent.createPolykeyAgent({ + password, + nodePath: path.join(dataDir2, 'remoteNode1'), + logger: logger.getChild('remoteNode1'), + }); + remoteNodeId1 = remoteNode1.keyManager.getNodeId(); + remoteNode2 = await PolykeyAgent.createPolykeyAgent({ + password, + nodePath: path.join(dataDir2, 'remoteNode2'), + logger: logger.getChild('remoteNode2'), + }); + remoteNodeId2 = remoteNode2.keyManager.getNodeId(); + }); + + afterAll(async () => { + await remoteNode1.stop(); + await remoteNode1.destroy(); + await remoteNode2.stop(); + await remoteNode2.destroy(); + await fs.promises.rm(dataDir2, { force: true, recursive: true }); + }); + + beforeEach(async () => { + dataDir = await fs.promises.mkdtemp( + path.join(os.tmpdir(), 'polykey-test-'), + ); + const keysPath = path.join(dataDir, 'keys'); + keyManager = await KeyManager.createKeyManager({ + password, + keysPath, + logger: logger.getChild('keyManager'), + }); + const dbPath = path.join(dataDir, 'db'); + db = await DB.createDB({ + dbPath, + logger: nodeConnectionManagerLogger, + crypto: { + key: keyManager.dbKey, + ops: { + encrypt: keysUtils.encryptWithKey, + decrypt: keysUtils.decryptWithKey, + }, + }, + }); + nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger: logger.getChild('NodeGraph'), + }); + const tlsConfig = { + keyPrivatePem: keyManager.getRootKeyPairPem().privateKey, + certChainPem: keysUtils.certToPem(keyManager.getRootCert()), + }; + fwdProxy = new ForwardProxy({ + authToken: 'auth', + logger: logger.getChild('fwdProxy'), + }); + await fwdProxy.start({ + tlsConfig, + }); + revProxy = new ReverseProxy({ + logger: logger.getChild('revProxy'), + }); + await revProxy.start({ + serverHost, + serverPort, + tlsConfig, + }); + await nodeGraph.setNode(remoteNodeId1, { + host: remoteNode1.revProxy.getIngressHost(), + port: remoteNode1.revProxy.getIngressPort(), + }); + await nodeGraph.setNode(remoteNodeId2, { + host: remoteNode2.revProxy.getIngressHost(), + port: remoteNode2.revProxy.getIngressPort(), + }); + }); + + afterEach(async () => { + await nodeGraph.stop(); + await nodeGraph.destroy(); + await db.stop(); + await db.destroy(); + await keyManager.stop(); + await keyManager.destroy(); + await revProxy.stop(); + await fwdProxy.stop(); + }); + + // Connection life cycle + test('should create connection', async () => { + // NodeConnectionManager under test + let nodeConnectionManager: NodeConnectionManager | undefined; + try { + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + fwdProxy, + revProxy, + logger: nodeConnectionManagerLogger, + }); + await nodeConnectionManager.start(); + // @ts-ignore: kidnap connections + const connections = nodeConnectionManager.connections; + const initialConnLock = connections.get( + remoteNodeId1.toString() as NodeIdString, + ); + expect(initialConnLock).toBeUndefined(); + await nodeConnectionManager.withConnF(remoteNodeId1, nop); + const finalConnLock = connections.get( + remoteNodeId1.toString() as NodeIdString, + ); + // Check entry is in map and lock is released + expect(finalConnLock).toBeDefined(); + expect(finalConnLock?.lock.isLocked()).toBeFalsy(); + } finally { + await nodeConnectionManager?.stop(); + } + }); + test('acquireConnection should create connection', async () => { + let nodeConnectionManager: NodeConnectionManager | undefined; + try { + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + fwdProxy, + revProxy, + logger: nodeConnectionManagerLogger, + }); + await nodeConnectionManager.start(); + // @ts-ignore: kidnap connections + const connections = nodeConnectionManager.connections; + const initialConnLock = connections.get( + remoteNodeId1.toString() as NodeIdString, + ); + expect(initialConnLock).toBeUndefined(); + await withF( + [await nodeConnectionManager.acquireConnection(remoteNodeId1)], + async (conn) => { + expect(conn).toBeDefined(); + const intermediaryConnLock = connections.get( + remoteNodeId1.toString() as NodeIdString, + ); + expect(intermediaryConnLock).toBeDefined(); + expect( + // @ts-ignore get the protected readersLock + intermediaryConnLock?.lock.readersLock.isLocked(), + ).toBeTruthy(); + // @ts-ignore get the protected writersLock + expect(intermediaryConnLock?.lock.writersLock.isLocked()).toBeFalsy(); + }, + ); + const finalConnLock = connections.get( + remoteNodeId1.toString() as NodeIdString, + ); + expect(finalConnLock).toBeDefined(); + // Neither write nor read lock should be locked now + expect(finalConnLock?.lock.isLocked()).toBeFalsy(); + } finally { + await nodeConnectionManager?.stop(); + } + }); + test('withConnF should create connection and hold lock', async () => { + // NodeConnectionManager under test + let nodeConnectionManager: NodeConnectionManager | undefined; + try { + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + fwdProxy, + revProxy, + logger: nodeConnectionManagerLogger, + }); + await nodeConnectionManager.start(); + // @ts-ignore: kidnap connections + const connections = nodeConnectionManager.connections; + const initialConnLock = connections.get( + remoteNodeId1.toString() as NodeIdString, + ); + expect(initialConnLock).toBeUndefined(); + await nodeConnectionManager.withConnF(remoteNodeId1, async () => { + expect( + connections + .get(remoteNodeId1.toString() as NodeIdString) + ?.lock?.isLocked(), + ).toBe(true); + }); + const finalConnLock = connections.get( + remoteNodeId1.toString() as NodeIdString, + ); + // Check entry is in map and lock is released + expect(finalConnLock).toBeDefined(); + expect(finalConnLock?.lock.isLocked()).toBeFalsy(); + } finally { + await nodeConnectionManager?.stop(); + } + }); + test('withConnG should create connection and hold lock', async () => { + // NodeConnectionManager under test + let nodeConnectionManager: NodeConnectionManager | undefined; + try { + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + fwdProxy, + revProxy, + logger: nodeConnectionManagerLogger, + }); + await nodeConnectionManager.start(); + + // @ts-ignore: kidnap connections + const connections = nodeConnectionManager.connections; + const initialConnLock = connections.get( + remoteNodeId1.toString() as NodeIdString, + ); + expect(initialConnLock).toBeUndefined(); + + const testGenerator = async function* () { + for (let i = 0; i < 10; i++) { + yield 'HelloWorld ' + i; + } + }; + + // Creating the generator + const gen = await nodeConnectionManager.withConnG( + remoteNodeId1, + async function* () { + yield* testGenerator(); + }, + ); + + // Connection is not created yet, no locking applied + expect( + connections.get(remoteNodeId1.toString() as NodeIdString), + ).not.toBeDefined(); + + // Iterating over generator + for await (const _ of gen) { + // Should be locked for duration of stream + expect( + connections + .get(remoteNodeId1.toString() as NodeIdString) + ?.lock?.isLocked(), + ).toBe(true); + } + // Unlocked after stream finished + expect( + connections + .get(remoteNodeId1.toString() as NodeIdString) + ?.lock?.isLocked(), + ).toBe(false); + + const finalConnLock = connections.get( + remoteNodeId1.toString() as NodeIdString, + ); + // Check entry is in map and lock is released + expect(finalConnLock).toBeDefined(); + expect(finalConnLock?.lock.isLocked()).toBe(false); + } finally { + await nodeConnectionManager?.stop(); + } + }); + test('should fail to create connection to offline node', async () => { + let nodeConnectionManager: NodeConnectionManager | undefined; + try { + // NodeConnectionManager under test + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + fwdProxy, + revProxy, + connConnectTime: 500, + logger: nodeConnectionManagerLogger, + }); + await nodeConnectionManager.start(); + // Add the dummy node + await nodeGraph.setNode(dummyNodeId, { + host: '125.0.0.1' as Host, + port: 55555 as Port, + }); + // @ts-ignore: kidnap connection map + const connections = nodeConnectionManager.connections; + expect(connections.size).toBe(0); + + await expect(() => + nodeConnectionManager?.withConnF(dummyNodeId, nop), + ).rejects.toThrow(nodesErrors.ErrorNodeConnectionTimeout); + expect(connections.size).toBe(1); + const connLock = connections.get(dummyNodeId.toString() as NodeIdString); + // There should still be an entry in the connection map, but it should + // only contain a lock - no connection + expect(connLock).toBeDefined(); + expect(connLock?.lock).toBeDefined(); + expect(connLock?.connection).toBeUndefined(); + + // Undo the initial dummy node add + } finally { + await nodeConnectionManager?.stop(); + } + }); + test('connection should persist', async () => { + let nodeConnectionManager: NodeConnectionManager | undefined; + try { + // NodeConnectionManager under test + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + fwdProxy, + revProxy, + logger: nodeConnectionManagerLogger, + }); + await nodeConnectionManager.start(); + // @ts-ignore accessing protected NodeConnectionMap + const connections = nodeConnectionManager.connections; + expect(connections.size).toBe(0); + const initialConnLock = connections.get( + remoteNodeId1.toString() as NodeIdString, + ); + expect(initialConnLock).toBeUndefined(); + await nodeConnectionManager.withConnF(remoteNodeId1, nop); + // Check we only have this single connection + expect(connections.size).toBe(1); + await nodeConnectionManager.withConnF(remoteNodeId1, nop); + // Check we still only have this single connection + expect(connections.size).toBe(1); + } finally { + await nodeConnectionManager?.stop(); + } + }); + test('should create 1 connection with concurrent creates.', async () => { + let nodeConnectionManager: NodeConnectionManager | undefined; + try { + // NodeConnectionManager under test + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + fwdProxy, + revProxy, + logger: nodeConnectionManagerLogger, + }); + await nodeConnectionManager.start(); + // @ts-ignore accessing protected NodeConnectionMap + const connections = nodeConnectionManager.connections; + expect(connections.size).toBe(0); + const initialConnLock = connections.get( + remoteNodeId1.toString() as NodeIdString, + ); + expect(initialConnLock).toBeUndefined(); + // Concurrently create connection to same target + await Promise.all([ + nodeConnectionManager.withConnF(remoteNodeId1, nop), + nodeConnectionManager.withConnF(remoteNodeId1, nop), + ]); + // Check only 1 connection exists + expect(connections.size).toBe(1); + const finalConnLock = connections.get( + remoteNodeId1.toString() as NodeIdString, + ); + // Check entry is in map and lock is released + expect(finalConnLock).toBeDefined(); + expect(finalConnLock?.lock.isLocked()).toBeFalsy(); + } finally { + await nodeConnectionManager?.stop(); + } + }); + test('should destroy a connection', async () => { + // NodeConnectionManager under test + let nodeConnectionManager: NodeConnectionManager | undefined; + try { + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + fwdProxy, + revProxy, + logger: nodeConnectionManagerLogger, + }); + await nodeConnectionManager.start(); + // @ts-ignore: kidnap connections + const connections = nodeConnectionManager.connections; + const initialConnLock = connections.get( + remoteNodeId1.toString() as NodeIdString, + ); + expect(initialConnLock).toBeUndefined(); + await nodeConnectionManager.withConnF(remoteNodeId1, nop); + const midConnAndLock = connections.get( + remoteNodeId1.toString() as NodeIdString, + ); + // Check entry is in map and lock is released + expect(midConnAndLock).toBeDefined(); + expect(midConnAndLock?.lock.isLocked()).toBeFalsy(); + + // Destroying the connection + // @ts-ignore: private method + await nodeConnectionManager.destroyConnection(remoteNodeId1); + const finalConnAndLock = connections.get( + remoteNodeId1.toString() as NodeIdString, + ); + expect(finalConnAndLock).toBeDefined(); + expect(finalConnAndLock?.lock.isLocked()).toBeFalsy(); + expect(finalConnAndLock?.connection).toBeUndefined(); + } finally { + await nodeConnectionManager?.stop(); + } + }); + test('stopping should destroy all connections', async () => { + let nodeConnectionManager: NodeConnectionManager | undefined; + try { + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + fwdProxy, + revProxy, + logger: nodeConnectionManagerLogger, + }); + await nodeConnectionManager.start(); + // Do testing + // set up connections + await nodeConnectionManager.withConnF(remoteNodeId1, nop); + await nodeConnectionManager.withConnF(remoteNodeId2, nop); + + // @ts-ignore: Hijack connection map + const connections = nodeConnectionManager.connections; + expect(connections.size).toBe(2); + for (const [, connAndLock] of connections) { + expect(connAndLock.connection).toBeDefined(); + expect(connAndLock.timer).toBeDefined(); + expect(connAndLock.lock).toBeDefined(); + } + + // Destroying connections + await nodeConnectionManager.stop(); + expect(connections.size).toBe(2); + for (const [, connAndLock] of connections) { + expect(connAndLock.connection).toBeUndefined(); + expect(connAndLock.timer).toBeUndefined(); + expect(connAndLock.lock).toBeDefined(); + } + } finally { + // Clean up + await nodeConnectionManager?.stop(); + } + }); +}); diff --git a/tests/nodes/NodeConnectionManager.seednodes.test.ts b/tests/nodes/NodeConnectionManager.seednodes.test.ts new file mode 100644 index 000000000..974f3dd48 --- /dev/null +++ b/tests/nodes/NodeConnectionManager.seednodes.test.ts @@ -0,0 +1,306 @@ +import type { NodeId, SeedNodes } from '@/nodes/types'; +import type { Host, Port } from '@/network/types'; +import fs from 'fs'; +import path from 'path'; +import os from 'os'; +import { DB } from '@matrixai/db'; +import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; +import { IdInternal } from '@matrixai/id'; +import PolykeyAgent from '@/PolykeyAgent'; +import KeyManager from '@/keys/KeyManager'; +import NodeGraph from '@/nodes/NodeGraph'; +import NodeConnectionManager from '@/nodes/NodeConnectionManager'; +import ForwardProxy from '@/network/ForwardProxy'; +import ReverseProxy from '@/network/ReverseProxy'; +import * as nodesUtils from '@/nodes/utils'; +import * as keysUtils from '@/keys/utils'; +import * as grpcUtils from '@/grpc/utils'; + +describe(`${NodeConnectionManager.name} seed nodes test`, () => { + const logger = new Logger( + `${NodeConnectionManager.name} test`, + LogLevel.WARN, + [new StreamHandler()], + ); + grpcUtils.setLogger(logger.getChild('grpc')); + + // Constants + const password = 'password'; + const nodeId1 = IdInternal.create([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 5, + ]); + const nodeId2 = IdInternal.create([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 8, + ]); + const nodeId3 = IdInternal.create([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 124, + ]); + const dummyNodeId = nodesUtils.decodeNodeId( + 'vi3et1hrpv2m2lrplcm7cu913kr45v51cak54vm68anlbvuf83ra0', + )!; + + const serverHost = '127.0.0.1' as Host; + const serverPort = 55555 as Port; + + const dummySeedNodes: SeedNodes = {}; + dummySeedNodes[nodesUtils.encodeNodeId(nodeId1)] = { + host: serverHost, + port: serverPort, + }; + dummySeedNodes[nodesUtils.encodeNodeId(nodeId2)] = { + host: serverHost, + port: serverPort, + }; + dummySeedNodes[nodesUtils.encodeNodeId(nodeId3)] = { + host: serverHost, + port: serverPort, + }; + + let dataDir: string; + let dataDir2: string; + let keyManager: KeyManager; + let db: DB; + let fwdProxy: ForwardProxy; + let revProxy: ReverseProxy; + let nodeGraph: NodeGraph; + + let remoteNode1: PolykeyAgent; + let remoteNode2: PolykeyAgent; + let remoteNodeId1: NodeId; + let remoteNodeId2: NodeId; + + const mockedGenerateDeterministicKeyPair = jest.spyOn( + keysUtils, + 'generateDeterministicKeyPair', + ); + + beforeAll(async () => { + mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { + return keysUtils.generateKeyPair(bits); + }); + + dataDir2 = await fs.promises.mkdtemp( + path.join(os.tmpdir(), 'polykey-test-'), + ); + // Creating remotes, they just exist to start connections or fail them if needed + remoteNode1 = await PolykeyAgent.createPolykeyAgent({ + password, + nodePath: path.join(dataDir2, 'remoteNode1'), + logger: logger.getChild('remoteNode1'), + }); + remoteNodeId1 = remoteNode1.keyManager.getNodeId(); + remoteNode2 = await PolykeyAgent.createPolykeyAgent({ + password, + nodePath: path.join(dataDir2, 'remoteNode2'), + logger: logger.getChild('remoteNode2'), + }); + remoteNodeId2 = remoteNode2.keyManager.getNodeId(); + }); + + afterAll(async () => { + await remoteNode1.stop(); + await remoteNode1.destroy(); + await remoteNode2.stop(); + await remoteNode2.destroy(); + await fs.promises.rm(dataDir2, { force: true, recursive: true }); + }); + + beforeEach(async () => { + dataDir = await fs.promises.mkdtemp( + path.join(os.tmpdir(), 'polykey-test-'), + ); + const keysPath = path.join(dataDir, 'keys'); + keyManager = await KeyManager.createKeyManager({ + password, + keysPath, + logger: logger.getChild('keyManager'), + }); + const dbPath = path.join(dataDir, 'db'); + db = await DB.createDB({ + dbPath, + logger: logger, + crypto: { + key: keyManager.dbKey, + ops: { + encrypt: keysUtils.encryptWithKey, + decrypt: keysUtils.decryptWithKey, + }, + }, + }); + nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger: logger.getChild('NodeGraph'), + }); + const tlsConfig = { + keyPrivatePem: keyManager.getRootKeyPairPem().privateKey, + certChainPem: keysUtils.certToPem(keyManager.getRootCert()), + }; + fwdProxy = new ForwardProxy({ + authToken: 'auth', + logger: logger.getChild('fwdProxy'), + }); + await fwdProxy.start({ + tlsConfig, + }); + revProxy = new ReverseProxy({ + logger: logger.getChild('revProxy'), + }); + await revProxy.start({ + serverHost, + serverPort, + tlsConfig, + }); + await nodeGraph.setNode(remoteNodeId1, { + host: remoteNode1.revProxy.getIngressHost(), + port: remoteNode1.revProxy.getIngressPort(), + }); + await nodeGraph.setNode(remoteNodeId2, { + host: remoteNode2.revProxy.getIngressHost(), + port: remoteNode2.revProxy.getIngressPort(), + }); + }); + + afterEach(async () => { + await nodeGraph.stop(); + await nodeGraph.destroy(); + await db.stop(); + await db.destroy(); + await keyManager.stop(); + await keyManager.destroy(); + await revProxy.stop(); + await fwdProxy.stop(); + }); + + // Seed nodes + test('starting should add seed nodes to the node graph', async () => { + let nodeConnectionManager: NodeConnectionManager | undefined; + try { + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + fwdProxy, + revProxy, + seedNodes: dummySeedNodes, + logger: logger, + }); + await nodeConnectionManager.start(); + const seedNodes = nodeConnectionManager.getSeedNodes(); + expect(seedNodes).toContainEqual(nodeId1); + expect(seedNodes).toContainEqual(nodeId2); + expect(seedNodes).toContainEqual(nodeId3); + expect(await nodeGraph.getNode(seedNodes[0])).toBeDefined(); + expect(await nodeGraph.getNode(seedNodes[1])).toBeDefined(); + expect(await nodeGraph.getNode(seedNodes[2])).toBeDefined(); + expect(await nodeGraph.getNode(dummyNodeId)).toBeUndefined(); + } finally { + // Clean up + await nodeConnectionManager?.stop(); + } + }); + test('should get seed nodes', async () => { + // NodeConnectionManager under test + const nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + fwdProxy, + revProxy, + seedNodes: dummySeedNodes, + logger: logger, + }); + await nodeConnectionManager.start(); + try { + const seedNodes = nodeConnectionManager.getSeedNodes(); + expect(seedNodes).toHaveLength(3); + expect(seedNodes).toContainEqual(nodeId1); + expect(seedNodes).toContainEqual(nodeId2); + expect(seedNodes).toContainEqual(nodeId3); + } finally { + await nodeConnectionManager.stop(); + } + }); + test('should synchronise nodeGraph', async () => { + let nodeConnectionManager: NodeConnectionManager | undefined; + try { + const seedNodes: SeedNodes = {}; + seedNodes[nodesUtils.encodeNodeId(remoteNodeId1)] = { + host: remoteNode1.revProxy.getIngressHost(), + port: remoteNode1.revProxy.getIngressPort(), + }; + seedNodes[nodesUtils.encodeNodeId(remoteNodeId2)] = { + host: remoteNode2.revProxy.getIngressHost(), + port: remoteNode2.revProxy.getIngressPort(), + }; + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + fwdProxy, + revProxy, + seedNodes, + logger: logger, + }); + await remoteNode1.nodeGraph.setNode(nodeId1, { + host: serverHost, + port: serverPort, + }); + await remoteNode2.nodeGraph.setNode(nodeId2, { + host: serverHost, + port: serverPort, + }); + await nodeConnectionManager.start(); + await nodeConnectionManager.syncNodeGraph(); + expect(await nodeGraph.getNode(nodeId1)).toBeDefined(); + expect(await nodeGraph.getNode(nodeId2)).toBeDefined(); + expect(await nodeGraph.getNode(dummyNodeId)).toBeUndefined(); + } finally { + await nodeConnectionManager?.stop(); + } + }); + test('should handle an offline seed node when synchronising nodeGraph', async () => { + let nodeConnectionManager: NodeConnectionManager | undefined; + try { + const seedNodes: SeedNodes = {}; + seedNodes[nodesUtils.encodeNodeId(remoteNodeId1)] = { + host: remoteNode1.revProxy.getIngressHost(), + port: remoteNode1.revProxy.getIngressPort(), + }; + seedNodes[nodesUtils.encodeNodeId(remoteNodeId2)] = { + host: remoteNode2.revProxy.getIngressHost(), + port: remoteNode2.revProxy.getIngressPort(), + }; + seedNodes[nodesUtils.encodeNodeId(dummyNodeId)] = { + host: serverHost, + port: serverPort, + }; + // Adding information to remotes to find + await remoteNode1.nodeGraph.setNode(nodeId1, { + host: serverHost, + port: serverPort, + }); + await remoteNode2.nodeGraph.setNode(nodeId2, { + host: serverHost, + port: serverPort, + }); + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + fwdProxy, + revProxy, + seedNodes, + connConnectTime: 500, + logger: logger, + }); + await nodeConnectionManager.start(); + // This should complete without error + await nodeConnectionManager.syncNodeGraph(); + // Information on remotes are found + expect(await nodeGraph.getNode(nodeId1)).toBeDefined(); + expect(await nodeGraph.getNode(nodeId2)).toBeDefined(); + } finally { + await nodeConnectionManager?.stop(); + } + }); +}); diff --git a/tests/nodes/NodeConnectionManager.termination.test.ts b/tests/nodes/NodeConnectionManager.termination.test.ts new file mode 100644 index 000000000..b616ac147 --- /dev/null +++ b/tests/nodes/NodeConnectionManager.termination.test.ts @@ -0,0 +1,773 @@ +import type { AddressInfo } from 'net'; +import type { NodeId, NodeIdString, SeedNodes } from '@/nodes/types'; +import type { Host, Port, TLSConfig } from '@/network/types'; +import net from 'net'; +import fs from 'fs'; +import path from 'path'; +import os from 'os'; +import { DB } from '@matrixai/db'; +import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; +import { destroyed } from '@matrixai/async-init/'; +import { IdInternal } from '@matrixai/id'; +import PolykeyAgent from '@/PolykeyAgent'; +import KeyManager from '@/keys/KeyManager'; +import NodeGraph from '@/nodes/NodeGraph'; +import NodeConnectionManager from '@/nodes/NodeConnectionManager'; +import ForwardProxy from '@/network/ForwardProxy'; +import ReverseProxy from '@/network/ReverseProxy'; +import * as nodesUtils from '@/nodes/utils'; +import * as nodesErrors from '@/nodes/errors'; +import * as keysUtils from '@/keys/utils'; +import * as grpcErrors from '@/grpc/errors'; +import * as grpcUtils from '@/grpc/utils'; +import * as agentErrors from '@/agent/errors'; +import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; +import { promise, promisify } from '@/utils'; +import * as testUtils from '../utils'; + +describe(`${NodeConnectionManager.name} termination test`, () => { + const logger = new Logger( + `${NodeConnectionManager.name} test`, + LogLevel.WARN, + [new StreamHandler()], + ); + grpcUtils.setLogger(logger.getChild('grpc')); + + // Constants + const password = 'password'; + const nodeId1 = IdInternal.create([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 5, + ]); + const nodeId2 = IdInternal.create([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 8, + ]); + const nodeId3 = IdInternal.create([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 124, + ]); + const dummyNodeId = nodesUtils.decodeNodeId( + 'vi3et1hrpv2m2lrplcm7cu913kr45v51cak54vm68anlbvuf83ra0', + )!; + + const serverHost = '127.0.0.1' as Host; + const serverPort = 55555 as Port; + + const dummySeedNodes: SeedNodes = {}; + dummySeedNodes[nodesUtils.encodeNodeId(nodeId1)] = { + host: serverHost, + port: serverPort, + }; + dummySeedNodes[nodesUtils.encodeNodeId(nodeId2)] = { + host: serverHost, + port: serverPort, + }; + dummySeedNodes[nodesUtils.encodeNodeId(nodeId3)] = { + host: serverHost, + port: serverPort, + }; + + const nop = async () => {}; + + // + let dataDir: string; + let nodePath: string; + let keyManager: KeyManager; + let db: DB; + let fwdProxy: ForwardProxy; + let revProxy: ReverseProxy; + let nodeGraph: NodeGraph; + + let tlsConfig2: TLSConfig; + + const mockedGenerateDeterministicKeyPair = jest.spyOn( + keysUtils, + 'generateDeterministicKeyPair', + ); + + beforeEach(async () => { + mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { + return keysUtils.generateKeyPair(bits); + }); + + dataDir = await fs.promises.mkdtemp( + path.join(os.tmpdir(), 'polykey-test-'), + ); + nodePath = path.join(dataDir, 'node'); + const keysPath = path.join(dataDir, 'keys'); + keyManager = await KeyManager.createKeyManager({ + password, + keysPath, + logger: logger.getChild('keyManager'), + }); + const dbPath = path.join(dataDir, 'db'); + db = await DB.createDB({ + dbPath, + logger: logger, + crypto: { + key: keyManager.dbKey, + ops: { + encrypt: keysUtils.encryptWithKey, + decrypt: keysUtils.decryptWithKey, + }, + }, + }); + nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger: logger.getChild('NodeGraph'), + }); + const tlsConfig = { + keyPrivatePem: keyManager.getRootKeyPairPem().privateKey, + certChainPem: keysUtils.certToPem(keyManager.getRootCert()), + }; + fwdProxy = new ForwardProxy({ + authToken: 'auth', + logger: logger.getChild('fwdProxy'), + }); + await fwdProxy.start({ + tlsConfig, + }); + revProxy = new ReverseProxy({ + logger: logger.getChild('revProxy'), + }); + await revProxy.start({ + serverHost, + serverPort, + tlsConfig, + }); + + // Other setup + const globalKeyPair = await testUtils.setupGlobalKeypair(); + const cert = keysUtils.generateCertificate( + globalKeyPair.publicKey, + globalKeyPair.privateKey, + globalKeyPair.privateKey, + 86400, + ); + tlsConfig2 = { + keyPrivatePem: keysUtils.keyPairToPem(globalKeyPair).privateKey, + certChainPem: keysUtils.certToPem(cert), + }; + }); + + afterEach(async () => { + await nodeGraph.stop(); + await nodeGraph.destroy(); + await db.stop(); + await db.destroy(); + await keyManager.stop(); + await keyManager.destroy(); + await revProxy.stop(); + await fwdProxy.stop(); + }); + + /** + * Mock TCP server + * This is the server that the ReverseProxy will be proxying to + */ + function tcpServer(end: boolean = false, fastEnd: boolean = false) { + const { p: serverConnP, resolveP: resolveServerConnP } = promise(); + const { p: serverConnEndP, resolveP: resolveServerConnEndP } = + promise(); + const { p: serverConnClosedP, resolveP: resolveServerConnClosedP } = + promise(); + const server = net.createServer( + { + allowHalfOpen: false, + }, + (conn) => { + logger.info('connection!'); + if (fastEnd) { + conn.end(() => { + conn.destroy(); + }); + } + logger.info(JSON.stringify(conn.address())); + resolveServerConnP(); + conn.on('end', () => { + logger.info('ending'); + resolveServerConnEndP(); + conn.end(); + conn.destroy(); + }); + conn.once('close', () => { + logger.info('closing'); + resolveServerConnClosedP(); + }); + if (end) { + conn.removeAllListeners('end'); + conn.on('end', () => { + logger.info('ending'); + resolveServerConnEndP(); + conn.destroy(); + }); + conn.end(); + } + }, + ); + const serverClose = promisify(server.close).bind(server); + const serverListen = promisify(server.listen).bind(server); + const serverHost = () => { + return (server.address() as AddressInfo).address as Host; + }; + const serverPort = () => { + return (server.address() as AddressInfo).port as Port; + }; + return { + serverListen, + serverClose, + serverConnP, + serverConnEndP, + serverConnClosedP, + serverHost, + serverPort, + }; + } + + test('closed based on bad certificate during createConnection ', async () => { + let server; + let nodeConnectionManager: NodeConnectionManager | undefined; + let revProxy: ReverseProxy | undefined; + try { + server = tcpServer(); + revProxy = new ReverseProxy({ + logger: logger, + }); + await server.serverListen(0); + await revProxy.start({ + serverHost: server.serverHost(), + serverPort: server.serverPort(), + ingressHost: '127.0.0.1' as Host, + tlsConfig: tlsConfig2, + }); + await nodeGraph.setNode(dummyNodeId, { + host: revProxy.getIngressHost(), + port: revProxy.getIngressPort(), + }); + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + fwdProxy, + revProxy, + logger: logger, + connConnectTime: 2000, + }); + await nodeConnectionManager.start(); + + // Attempt a connection + await expect( + nodeConnectionManager.withConnF(dummyNodeId, nop), + ).rejects.toThrow(nodesErrors.ErrorNodeConnectionTimeout); + } finally { + await nodeConnectionManager?.stop(); + await revProxy?.stop(); + await server?.serverClose(); + } + }); + test('closed based on bad certificate during withConnection', async () => { + let server; + let nodeConnectionManager: NodeConnectionManager | undefined; + let revProxy: ReverseProxy | undefined; + try { + server = tcpServer(); + revProxy = new ReverseProxy({ + logger: logger, + }); + await server.serverListen(0); + await revProxy.start({ + serverHost: server.serverHost(), + serverPort: server.serverPort(), + ingressHost: '127.0.0.1' as Host, + tlsConfig: tlsConfig2, + }); + await nodeGraph.setNode(dummyNodeId, { + host: revProxy.getIngressHost(), + port: revProxy.getIngressPort(), + }); + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + fwdProxy, + revProxy, + logger: logger, + connConnectTime: 2000, + }); + await nodeConnectionManager.start(); + + // Attempt a connection + const resultP = nodeConnectionManager.withConnF(dummyNodeId, async () => { + // Do nothing + }); + await expect(resultP).rejects.toThrow( + nodesErrors.ErrorNodeConnectionTimeout, + ); + } finally { + await nodeConnectionManager?.stop(); + await revProxy?.stop(); + await server?.serverClose(); + } + }); + test('closed before TLS is established', async () => { + let server; + let nodeConnectionManager: NodeConnectionManager | undefined; + let revProxy: ReverseProxy | undefined; + try { + server = tcpServer(false, true); + revProxy = new ReverseProxy({ + logger: logger, + }); + await server.serverListen(0); + await revProxy.start({ + serverHost: server.serverHost(), + serverPort: server.serverPort(), + ingressHost: '127.0.0.1' as Host, + tlsConfig: tlsConfig2, + }); + await nodeGraph.setNode(dummyNodeId, { + host: revProxy.getIngressHost(), + port: revProxy.getIngressPort(), + }); + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + fwdProxy, + revProxy, + logger: logger, + connConnectTime: 2000, + }); + await nodeConnectionManager.start(); + + // Attempt a connection + const connectionAttemptP = nodeConnectionManager.withConnF( + dummyNodeId, + async () => { + // Do nothing + }, + ); + await expect(connectionAttemptP).rejects.toThrow( + nodesErrors.ErrorNodeConnectionTimeout, + ); + } finally { + await nodeConnectionManager?.stop(); + await revProxy?.stop(); + await server?.serverClose(); + } + }); + test('the connection is stopped by the server', async () => { + let nodeConnectionManager: NodeConnectionManager | undefined; + let polykeyAgent: PolykeyAgent | undefined; + try { + polykeyAgent = await PolykeyAgent.createPolykeyAgent({ + password, + nodePath: nodePath, + logger: logger, + }); + + const agentNodeId = polykeyAgent.keyManager.getNodeId(); + await nodeGraph.setNode(agentNodeId, { + host: polykeyAgent.revProxy.getIngressHost(), + port: polykeyAgent.revProxy.getIngressPort(), + }); + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + fwdProxy, + revProxy, + logger: logger, + connConnectTime: 2000, + }); + await nodeConnectionManager.start(); + + // @ts-ignore: kidnapping connection map + const connections = nodeConnectionManager.connections; + + // Connections should be empty + expect(connections.size).toBe(0); + await nodeConnectionManager.withConnF(agentNodeId, nop); + // Should have 1 connection now + expect(connections.size).toBe(1); + const firstConnAndLock = connections.get( + agentNodeId.toString() as NodeIdString, + ); + const firstConnection = firstConnAndLock?.connection; + + // Resolves if the shutdownCallback was called + await polykeyAgent.stop(); + // Connection should be removed + expect(connections.size).toBe(1); + const connAndLock = connections.get( + agentNodeId.toString() as NodeIdString, + ); + expect(connAndLock?.lock.isLocked()).toBe(false); + if (firstConnection != null) { + expect(firstConnection[destroyed]).toBe(true); + } + } finally { + await nodeConnectionManager?.stop(); + await polykeyAgent?.stop(); + } + }); + test('the connection is broken during withConnection', async () => { + let nodeConnectionManager: NodeConnectionManager | undefined; + let polykeyAgent: PolykeyAgent | undefined; + try { + polykeyAgent = await PolykeyAgent.createPolykeyAgent({ + password, + nodePath: nodePath, + logger: logger, + }); + const agentNodeId = polykeyAgent.keyManager.getNodeId(); + await nodeGraph.setNode(agentNodeId, { + host: polykeyAgent.revProxy.getIngressHost(), + port: polykeyAgent.revProxy.getIngressPort(), + }); + + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + fwdProxy, + revProxy, + logger: logger, + connConnectTime: 2000, + }); + await nodeConnectionManager.start(); + + // @ts-ignore: kidnapping connection map + const connections = nodeConnectionManager.connections; + + // Connections should be empty + expect(connections.size).toBe(0); + await nodeConnectionManager.withConnF(agentNodeId, nop); + // Should have 1 connection now + expect(connections.size).toBe(1); + const firstConnAndLock = connections.get( + agentNodeId.toString() as NodeIdString, + ); + const firstConnection = firstConnAndLock?.connection; + + // Resolves if the shutdownCallback was called + const withConnectionP = nodeConnectionManager.withConnF( + agentNodeId, + async (connection) => { + const client = connection.getClient(); + expect(connection[destroyed]).toBe(false); + expect(client[destroyed]).toBe(false); + await polykeyAgent?.stop(); + expect(client[destroyed]).toBe(true); + expect(connection[destroyed]).toBe(true); + // Breaking call + const attemptP = client.echo(new utilsPB.EchoMessage()); + await expect(attemptP).rejects.toThrow(); + await attemptP; + }, + ); + + await expect(withConnectionP).rejects.toThrow(); + + // Connection should be removed + expect(connections.size).toBe(1); + const connAndLock = connections.get( + agentNodeId.toString() as NodeIdString, + ); + expect(connAndLock?.lock.isLocked()).toBe(false); + if (firstConnection != null) { + expect(firstConnection[destroyed]).toBe(true); + } + } finally { + await nodeConnectionManager?.stop(); + await polykeyAgent?.stop(); + } + }); + const errorOptions = [ + 'ErrorNodeConnectionDestroyed', + 'ErrorGRPCClientTimeout', + 'ErrorAgentClientDestroyed', + ]; + test.each(errorOptions)('withConnF receives a %s error', async (option) => { + let nodeConnectionManager: NodeConnectionManager | undefined; + let polykeyAgent: PolykeyAgent | undefined; + try { + polykeyAgent = await PolykeyAgent.createPolykeyAgent({ + password, + nodePath: nodePath, + logger: logger, + }); + + const agentNodeId = polykeyAgent.keyManager.getNodeId(); + await nodeGraph.setNode(agentNodeId, { + host: polykeyAgent.revProxy.getIngressHost(), + port: polykeyAgent.revProxy.getIngressPort(), + }); + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + fwdProxy, + revProxy, + logger: logger, + connConnectTime: 2000, + }); + await nodeConnectionManager.start(); + + // @ts-ignore: kidnapping connection map + const connections = nodeConnectionManager.connections; + + // Connections should be empty + expect(connections.size).toBe(0); + await nodeConnectionManager.withConnF(agentNodeId, nop); + // Should have 1 connection now + expect(connections.size).toBe(1); + const firstConnAndLock = connections.get( + agentNodeId.toString() as NodeIdString, + ); + const firstConnection = firstConnAndLock?.connection; + + // Resolves if the shutdownCallback was called + const responseP = nodeConnectionManager.withConnF( + agentNodeId, + async () => { + // Throw an error here + switch (option) { + case 'ErrorNodeConnectionDestroyed': + throw new nodesErrors.ErrorNodeConnectionDestroyed(); + case 'ErrorGRPCClientTimeout': + throw new grpcErrors.ErrorGRPCClientTimeout(); + case 'ErrorAgentClientDestroyed': + throw new agentErrors.ErrorAgentClientDestroyed(); + } + }, + ); + await expect(responseP).rejects.toThrow(); + + // Connection should be removed + expect(connections.size).toBe(1); + const connAndLock = connections.get( + agentNodeId.toString() as NodeIdString, + ); + expect(connAndLock?.lock.isLocked()).toBe(false); + if (firstConnection != null) { + expect(firstConnection[destroyed]).toBe(true); + } + } finally { + await nodeConnectionManager?.stop(); + await polykeyAgent?.stop(); + } + }); + test.each(errorOptions)('withConnG receives a %s error', async (option) => { + let nodeConnectionManager: NodeConnectionManager | undefined; + let polykeyAgent: PolykeyAgent | undefined; + try { + polykeyAgent = await PolykeyAgent.createPolykeyAgent({ + password, + nodePath: nodePath, + logger: logger, + }); + + const agentNodeId = polykeyAgent.keyManager.getNodeId(); + await nodeGraph.setNode(agentNodeId, { + host: polykeyAgent.revProxy.getIngressHost(), + port: polykeyAgent.revProxy.getIngressPort(), + }); + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + fwdProxy, + revProxy, + logger: logger, + connConnectTime: 2000, + }); + await nodeConnectionManager.start(); + + // @ts-ignore: kidnapping connection map + const connections = nodeConnectionManager.connections; + + // Connections should be empty + expect(connections.size).toBe(0); + await nodeConnectionManager.withConnF(agentNodeId, nop); + // Should have 1 connection now + expect(connections.size).toBe(1); + const firstConnAndLock = connections.get( + agentNodeId.toString() as NodeIdString, + ); + const firstConnection = firstConnAndLock?.connection; + + // Resolves if the shutdownCallback was called + const gen = await nodeConnectionManager.withConnG( + agentNodeId, + async function* (): AsyncGenerator { + // Throw an error here + switch (option) { + case 'ErrorNodeConnectionDestroyed': + throw new nodesErrors.ErrorNodeConnectionDestroyed(); + case 'ErrorGRPCClientTimeout': + throw new grpcErrors.ErrorGRPCClientTimeout(); + case 'ErrorAgentClientDestroyed': + throw new agentErrors.ErrorAgentClientDestroyed(); + } + yield 'hello world'; + }, + ); + await expect(async () => { + for await (const _ of gen) { + // Do nothing + } + }).rejects.toThrow(); + + // Connection should be removed + expect(connections.size).toBe(1); + const connAndLock = connections.get( + agentNodeId.toString() as NodeIdString, + ); + expect(connAndLock?.lock.isLocked()).toBe(false); + if (firstConnection != null) { + expect(firstConnection[destroyed]).toBe(true); + } + } finally { + await nodeConnectionManager?.stop(); + await polykeyAgent?.stop(); + } + }); + test('client itself is killed during withConnection', async () => { + let nodeConnectionManager: NodeConnectionManager | undefined; + let polykeyAgent: PolykeyAgent | undefined; + try { + polykeyAgent = await PolykeyAgent.createPolykeyAgent({ + password, + nodePath: nodePath, + logger: logger, + }); + + const agentNodeId = polykeyAgent.keyManager.getNodeId(); + await nodeGraph.setNode(agentNodeId, { + host: polykeyAgent.revProxy.getIngressHost(), + port: polykeyAgent.revProxy.getIngressPort(), + }); + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + fwdProxy, + revProxy, + logger: logger, + connConnectTime: 2000, + }); + await nodeConnectionManager.start(); + + // @ts-ignore: kidnapping connection map + const connections = nodeConnectionManager.connections; + + // Connections should be empty + expect(connections.size).toBe(0); + await nodeConnectionManager.withConnF(agentNodeId, nop); + // Should have 1 connection now + expect(connections.size).toBe(1); + const firstConnAndLock = connections.get( + agentNodeId.toString() as NodeIdString, + ); + const firstConnection = firstConnAndLock?.connection; + + const killSelfP = promise(); + // Resolves if the shutdownCallback was called + await nodeConnectionManager.withConnF(agentNodeId, async (connection) => { + const client = connection.getClient(); + expect(connection[destroyed]).toBe(false); + expect(client[destroyed]).toBe(false); + + // We want to watch for the killSelf event by hijacking the NodeConnectionmanagerInterface + const oldKillSelf = + // @ts-ignore: kidnap the callback + connection.destroyCallback; + // @ts-ignore: update the callback; + connection.destroyCallback = async () => { + await oldKillSelf(); + killSelfP.resolveP(null); + }; + await connection.destroy(); + }); + + // Wait for `killSelf` to resolve + await killSelfP.p; + + // Connection should be removed + expect(connections.size).toBe(1); + const connAndLock = connections.get( + agentNodeId.toString() as NodeIdString, + ); + expect(connAndLock?.lock.isLocked()).toBe(false); + if (firstConnection != null) { + expect(firstConnection[destroyed]).toBe(true); + } + } finally { + await nodeConnectionManager?.stop(); + await polykeyAgent?.stop(); + } + }); + test('client itself is killed', async () => { + let nodeConnectionManager: NodeConnectionManager | undefined; + let polykeyAgent: PolykeyAgent | undefined; + try { + polykeyAgent = await PolykeyAgent.createPolykeyAgent({ + password, + nodePath: nodePath, + logger: logger, + }); + + const agentNodeId = polykeyAgent.keyManager.getNodeId(); + await nodeGraph.setNode(agentNodeId, { + host: polykeyAgent.revProxy.getIngressHost(), + port: polykeyAgent.revProxy.getIngressPort(), + }); + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + fwdProxy, + revProxy, + logger: logger, + connConnectTime: 2000, + }); + await nodeConnectionManager.start(); + + // @ts-ignore: kidnapping connection map + const connections = nodeConnectionManager.connections; + + // Connections should be empty + expect(connections.size).toBe(0); + await nodeConnectionManager.withConnF(agentNodeId, nop); + // Should have 1 connection now + expect(connections.size).toBe(1); + const firstConnAndLock = connections.get( + agentNodeId.toString() as NodeIdString, + ); + const firstConnection = firstConnAndLock?.connection; + + // We want to watch for the killSelf event by hijacking the NodeConnectionmanagerInterface + const oldKillSelf = + // @ts-ignore: kidnap the callback + firstConnection?.destroyCallback; + const killSelfP = promise(); + if (firstConnection != null) { + // @ts-ignore: update the callback; + firstConnection.destroyCallback = async () => { + if (oldKillSelf != null) await oldKillSelf(); + killSelfP.resolveP(null); + }; + } + await firstConnection?.destroy(); + // Wait for `killSelf` to resolve + await killSelfP.p; + + // Connection should be removed + expect(connections.size).toBe(1); + const connAndLock = connections.get( + agentNodeId.toString() as NodeIdString, + ); + expect(connAndLock?.lock.isLocked()).toBe(false); + if (firstConnection != null) { + expect(firstConnection[destroyed]).toBe(true); + } + } finally { + await nodeConnectionManager?.stop(); + await polykeyAgent?.stop(); + } + }); +}); diff --git a/tests/nodes/NodeConnectionManager.timeout.test.ts b/tests/nodes/NodeConnectionManager.timeout.test.ts new file mode 100644 index 000000000..970c02156 --- /dev/null +++ b/tests/nodes/NodeConnectionManager.timeout.test.ts @@ -0,0 +1,312 @@ +import type { NodeId, NodeIdString, SeedNodes } from '@/nodes/types'; +import type { Host, Port } from '@/network/types'; +import fs from 'fs'; +import path from 'path'; +import os from 'os'; +import { DB } from '@matrixai/db'; +import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; +import { IdInternal } from '@matrixai/id'; +import PolykeyAgent from '@/PolykeyAgent'; +import KeyManager from '@/keys/KeyManager'; +import NodeGraph from '@/nodes/NodeGraph'; +import NodeConnectionManager from '@/nodes/NodeConnectionManager'; +import ForwardProxy from '@/network/ForwardProxy'; +import ReverseProxy from '@/network/ReverseProxy'; +import * as nodesUtils from '@/nodes/utils'; +import * as keysUtils from '@/keys/utils'; +import * as grpcUtils from '@/grpc/utils'; +import { sleep } from '@/utils'; + +describe(`${NodeConnectionManager.name} timeout test`, () => { + const logger = new Logger( + `${NodeConnectionManager.name} test`, + LogLevel.WARN, + [new StreamHandler()], + ); + grpcUtils.setLogger(logger.getChild('grpc')); + + const nodeConnectionManagerLogger = logger.getChild( + 'nodeConnectionManagerUT', + ); + // Constants + const password = 'password'; + const nodeId1 = IdInternal.create([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 5, + ]); + const nodeId2 = IdInternal.create([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 8, + ]); + const nodeId3 = IdInternal.create([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 124, + ]); + + const serverHost = '127.0.0.1' as Host; + const serverPort = 55555 as Port; + + const dummySeedNodes: SeedNodes = {}; + dummySeedNodes[nodesUtils.encodeNodeId(nodeId1)] = { + host: serverHost, + port: serverPort, + }; + dummySeedNodes[nodesUtils.encodeNodeId(nodeId2)] = { + host: serverHost, + port: serverPort, + }; + dummySeedNodes[nodesUtils.encodeNodeId(nodeId3)] = { + host: serverHost, + port: serverPort, + }; + + const nop = async () => {}; + + // + let dataDir: string; + let dataDir2: string; + let keyManager: KeyManager; + let db: DB; + let fwdProxy: ForwardProxy; + let revProxy: ReverseProxy; + let nodeGraph: NodeGraph; + + let remoteNode1: PolykeyAgent; + let remoteNode2: PolykeyAgent; + let remoteNodeId1: NodeId; + let remoteNodeId2: NodeId; + + const mockedGenerateDeterministicKeyPair = jest.spyOn( + keysUtils, + 'generateDeterministicKeyPair', + ); + + beforeAll(async () => { + mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { + return keysUtils.generateKeyPair(bits); + }); + + dataDir2 = await fs.promises.mkdtemp( + path.join(os.tmpdir(), 'polykey-test-'), + ); + // Creating remotes, they just exist to start connections or fail them if needed + remoteNode1 = await PolykeyAgent.createPolykeyAgent({ + password, + nodePath: path.join(dataDir2, 'remoteNode1'), + logger: logger.getChild('remoteNode1'), + }); + remoteNodeId1 = remoteNode1.keyManager.getNodeId(); + remoteNode2 = await PolykeyAgent.createPolykeyAgent({ + password, + nodePath: path.join(dataDir2, 'remoteNode2'), + logger: logger.getChild('remoteNode2'), + }); + remoteNodeId2 = remoteNode2.keyManager.getNodeId(); + }); + + afterAll(async () => { + await remoteNode1.stop(); + await remoteNode1.destroy(); + await remoteNode2.stop(); + await remoteNode2.destroy(); + await fs.promises.rm(dataDir2, { force: true, recursive: true }); + }); + + beforeEach(async () => { + dataDir = await fs.promises.mkdtemp( + path.join(os.tmpdir(), 'polykey-test-'), + ); + const keysPath = path.join(dataDir, 'keys'); + keyManager = await KeyManager.createKeyManager({ + password, + keysPath, + logger: logger.getChild('keyManager'), + }); + const dbPath = path.join(dataDir, 'db'); + db = await DB.createDB({ + dbPath, + logger: nodeConnectionManagerLogger, + crypto: { + key: keyManager.dbKey, + ops: { + encrypt: keysUtils.encryptWithKey, + decrypt: keysUtils.decryptWithKey, + }, + }, + }); + nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger: logger.getChild('NodeGraph'), + }); + const tlsConfig = { + keyPrivatePem: keyManager.getRootKeyPairPem().privateKey, + certChainPem: keysUtils.certToPem(keyManager.getRootCert()), + }; + fwdProxy = new ForwardProxy({ + authToken: 'auth', + logger: logger.getChild('fwdProxy'), + }); + await fwdProxy.start({ + tlsConfig, + }); + revProxy = new ReverseProxy({ + logger: logger.getChild('revProxy'), + }); + await revProxy.start({ + serverHost, + serverPort, + tlsConfig, + }); + await nodeGraph.setNode(remoteNodeId1, { + host: remoteNode1.revProxy.getIngressHost(), + port: remoteNode1.revProxy.getIngressPort(), + }); + await nodeGraph.setNode(remoteNodeId2, { + host: remoteNode2.revProxy.getIngressHost(), + port: remoteNode2.revProxy.getIngressPort(), + }); + }); + + afterEach(async () => { + await nodeGraph.stop(); + await nodeGraph.destroy(); + await db.stop(); + await db.destroy(); + await keyManager.stop(); + await keyManager.destroy(); + await revProxy.stop(); + await fwdProxy.stop(); + }); + + // Timeouts + test('should time out a connection', async () => { + // NodeConnectionManager under test + let nodeConnectionManager: NodeConnectionManager | undefined; + try { + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + fwdProxy, + revProxy, + connTimeoutTime: 500, + logger: nodeConnectionManagerLogger, + }); + await nodeConnectionManager.start(); + // @ts-ignore: kidnap connections + const connections = nodeConnectionManager.connections; + await nodeConnectionManager.withConnF(remoteNodeId1, nop); + const connAndLock = connections.get( + remoteNodeId1.toString() as NodeIdString, + ); + // Check entry is in map and lock is released + expect(connAndLock).toBeDefined(); + expect(connAndLock?.lock.isLocked()).toBeFalsy(); + expect(connAndLock?.timer).toBeDefined(); + expect(connAndLock?.connection).toBeDefined(); + + // Wait for timeout + await sleep(1000); + const finalConnAndLock = connections.get( + remoteNodeId1.toString() as NodeIdString, + ); + expect(finalConnAndLock).toBeDefined(); + expect(finalConnAndLock?.lock.isLocked()).toBeFalsy(); + expect(finalConnAndLock?.timer).toBeUndefined(); + expect(finalConnAndLock?.connection).toBeUndefined(); + } finally { + await nodeConnectionManager?.stop(); + } + }); + test('withConnection should extend timeout', async () => { + // NodeConnectionManager under test + let nodeConnectionManager: NodeConnectionManager | undefined; + try { + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + fwdProxy, + revProxy, + connTimeoutTime: 1000, + logger: nodeConnectionManagerLogger, + }); + await nodeConnectionManager.start(); + // @ts-ignore: kidnap connections + const connections = nodeConnectionManager.connections; + await nodeConnectionManager.withConnF(remoteNodeId1, nop); + const connAndLock = connections.get( + remoteNodeId1.toString() as NodeIdString, + ); + // Check entry is in map and lock is released + expect(connAndLock).toBeDefined(); + expect(connAndLock?.lock.isLocked()).toBeFalsy(); + expect(connAndLock?.timer).toBeDefined(); + expect(connAndLock?.connection).toBeDefined(); + + // WithConnection should extend timeout to 1500ms + await sleep(500); + await nodeConnectionManager.withConnF(remoteNodeId1, async () => { + // Do noting + }); + + // Connection should still exist after 1250 secs + await sleep(750); + const midConnAndLock = connections.get( + remoteNodeId1.toString() as NodeIdString, + ); + expect(midConnAndLock).toBeDefined(); + expect(midConnAndLock?.lock.isLocked()).toBeFalsy(); + expect(midConnAndLock?.timer).toBeDefined(); + expect(midConnAndLock?.connection).toBeDefined(); + + // Should be dead after 1750 secs + await sleep(500); + const finalConnAndLock = connections.get( + remoteNodeId1.toString() as NodeIdString, + ); + expect(finalConnAndLock).toBeDefined(); + expect(finalConnAndLock?.lock.isLocked()).toBeFalsy(); + expect(finalConnAndLock?.timer).toBeUndefined(); + expect(finalConnAndLock?.connection).toBeUndefined(); + } finally { + await nodeConnectionManager?.stop(); + } + }); + test('should remove timeout when connection is destroyed', async () => { + // NodeConnectionManager under test + let nodeConnectionManager: NodeConnectionManager | undefined; + try { + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + fwdProxy, + revProxy, + logger: nodeConnectionManagerLogger, + }); + await nodeConnectionManager.start(); + // @ts-ignore: kidnap connections + const connections = nodeConnectionManager.connections; + await nodeConnectionManager.withConnF(remoteNodeId1, nop); + const midConnAndLock = connections.get( + remoteNodeId1.toString() as NodeIdString, + ); + // Check entry is in map and lock is released + expect(midConnAndLock).toBeDefined(); + expect(midConnAndLock?.lock.isLocked()).toBeFalsy(); + expect(midConnAndLock?.timer).toBeDefined(); + + // Destroying the connection + // @ts-ignore: private method + await nodeConnectionManager.destroyConnection(remoteNodeId1); + const finalConnAndLock = connections.get( + remoteNodeId1.toString() as NodeIdString, + ); + expect(finalConnAndLock).toBeDefined(); + expect(finalConnAndLock?.lock.isLocked()).toBeFalsy(); + expect(finalConnAndLock?.connection).toBeUndefined(); + expect(finalConnAndLock?.timer).toBeUndefined(); + } finally { + await nodeConnectionManager?.stop(); + } + }); +}); diff --git a/tests/nodes/NodeGraph.test.ts b/tests/nodes/NodeGraph.test.ts index cdcd59bbb..1960c02d3 100644 --- a/tests/nodes/NodeGraph.test.ts +++ b/tests/nodes/NodeGraph.test.ts @@ -1,28 +1,23 @@ -import type { NodeGraph } from '@/nodes'; import type { Host, Port } from '@/network/types'; -import type { NodeId, NodeAddress, NodeData } from '@/nodes/types'; +import type { NodeAddress, NodeData, NodeId } from '@/nodes/types'; import os from 'os'; import path from 'path'; import fs from 'fs'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { IdInternal } from '@matrixai/id'; -import { NodeManager, errors as nodesErrors } from '@/nodes'; -import { KeyManager, utils as keysUtils } from '@/keys'; -import { ForwardProxy, ReverseProxy } from '@/network'; +import NodeConnectionManager from '@/nodes/NodeConnectionManager'; +import NodeGraph from '@/nodes/NodeGraph'; +import * as nodesErrors from '@/nodes/errors'; +import KeyManager from '@/keys/KeyManager'; +import * as keysUtils from '@/keys/utils'; +import ForwardProxy from '@/network/ForwardProxy'; +import ReverseProxy from '@/network/ReverseProxy'; import * as nodesUtils from '@/nodes/utils'; -import { Sigchain } from '@/sigchain'; +import Sigchain from '@/sigchain/Sigchain'; import * as nodesTestUtils from './utils'; -// Mocks. -jest.mock('@/keys/utils', () => ({ - ...jest.requireActual('@/keys/utils'), - generateDeterministicKeyPair: - jest.requireActual('@/keys/utils').generateKeyPair, -})); - -// FIXME, some of these tests fail randomly. -describe('NodeGraph', () => { +describe(`${NodeGraph.name} test`, () => { const password = 'password'; let nodeGraph: NodeGraph; let nodeId: NodeId; @@ -31,21 +26,11 @@ describe('NodeGraph', () => { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, ]); - const nodeId2 = IdInternal.create([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 8, - ]); - const nodeId3 = IdInternal.create([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 124, - ]); - // Const nodeId2 = makeNodeId('vrcacp9vsb4ht25hds6s4lpp2abfaso0mptcfnh499n35vfcn2gkg'); - // const nodeId3 = makeNodeId('v359vgrgmqf1r5g4fvisiddjknjko6bmm4qv7646jr7fi9enbfuug'); const dummyNode = nodesUtils.decodeNodeId( 'vi3et1hrpv2m2lrplcm7cu913kr45v51cak54vm68anlbvuf83ra0', )!; - const logger = new Logger('NodeGraph Test', LogLevel.WARN, [ + const logger = new Logger(`${NodeGraph.name} test`, LogLevel.ERROR, [ new StreamHandler(), ]); let fwdProxy: ForwardProxy; @@ -53,48 +38,21 @@ describe('NodeGraph', () => { let dataDir: string; let keyManager: KeyManager; let db: DB; - let nodeManager: NodeManager; + let nodeConnectionManager: NodeConnectionManager; let sigchain: Sigchain; - const nodeIdGenerator = (number: number) => { - const idArray = [ - 223, - 24, - 34, - 40, - 46, - 217, - 4, - 71, - 103, - 71, - 59, - 123, - 143, - 187, - 9, - 29, - 157, - 41, - 131, - 44, - 68, - 160, - 79, - 127, - 137, - 154, - 221, - 86, - 157, - 23, - 77, - number, - ]; - return IdInternal.create(idArray); - }; - - beforeAll(async () => { + const hostGen = (i: number) => `${i}.${i}.${i}.${i}` as Host; + + const mockedGenerateDeterministicKeyPair = jest.spyOn( + keysUtils, + 'generateDeterministicKeyPair', + ); + + beforeEach(async () => { + mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { + return keysUtils.generateKeyPair(bits); + }); + dataDir = await fs.promises.mkdtemp( path.join(os.tmpdir(), 'polykey-test-'), ); @@ -136,28 +94,28 @@ describe('NodeGraph', () => { db: db, logger: logger, }); - nodeManager = await NodeManager.createNodeManager({ - db: db, - sigchain: sigchain, + nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + nodeConnectionManager = new NodeConnectionManager({ keyManager: keyManager, + nodeGraph: nodeGraph, fwdProxy: fwdProxy, revProxy: revProxy, logger: logger, }); + await nodeConnectionManager.start(); // Retrieve the NodeGraph reference from NodeManager - // @ts-ignore - nodeGraph = nodeManager.nodeGraph; - nodeId = nodeManager.getNodeId(); + nodeId = keyManager.getNodeId(); }); afterEach(async () => { - await nodeManager.clearDB(); - }); - - afterAll(async () => { await db.stop(); await sigchain.stop(); - await nodeManager.stop(); + await nodeConnectionManager.stop(); + await nodeGraph.stop(); await keyManager.stop(); await fwdProxy.stop(); await fs.promises.rm(dataDir, { @@ -167,34 +125,40 @@ describe('NodeGraph', () => { }); test('NodeGraph readiness', async () => { - const nodeManager2 = await NodeManager.createNodeManager({ - db: db, - sigchain: sigchain, - keyManager: keyManager, - fwdProxy: fwdProxy, - revProxy: revProxy, - logger: logger, + const nodeGraph2 = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, }); // @ts-ignore - const nodeGraph = nodeManager2.nodeGraph; - await expect(nodeGraph.destroy()).rejects.toThrow( + await expect(nodeGraph2.destroy()).rejects.toThrow( nodesErrors.ErrorNodeGraphRunning, ); // Should be a noop - await nodeGraph.start(); - await nodeGraph.stop(); - await nodeGraph.destroy(); + await nodeGraph2.start(); + await nodeGraph2.stop(); + await nodeGraph2.destroy(); await expect(async () => { - await nodeGraph.start(); + await nodeGraph2.start(); }).rejects.toThrow(nodesErrors.ErrorNodeGraphDestroyed); - expect(() => { - nodeGraph.getNodeId(); - }).toThrow(nodesErrors.ErrorNodeGraphNotRunning); await expect(async () => { - await nodeGraph.getBucket(0); + await nodeGraph2.getBucket(0); + }).rejects.toThrow(nodesErrors.ErrorNodeGraphNotRunning); + await expect(async () => { + await nodeGraph2.getBucket(0); }).rejects.toThrow(nodesErrors.ErrorNodeGraphNotRunning); - await nodeManager2.stop(); - await nodeManager2.destroy(); + }); + test('knows node (true and false case)', async () => { + // Known node + const nodeAddress1: NodeAddress = { + host: '127.0.0.1' as Host, + port: 11111 as Port, + }; + await nodeGraph.setNode(nodeId1, nodeAddress1); + expect(await nodeGraph.knowsNode(nodeId1)).toBeTruthy(); + + // Unknown node + expect(await nodeGraph.knowsNode(dummyNode)).toBeFalsy(); }); test('finds correct node address', async () => { // New node added @@ -233,36 +197,46 @@ describe('NodeGraph', () => { }); test('adds multiple nodes into the same bucket', async () => { // Add 3 new nodes into bucket 4 - const newNode1Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 4); + const bucketIndex = 4; + const newNode1Id = nodesTestUtils.generateNodeIdForBucket( + nodeId, + bucketIndex, + 0, + ); const newNode1Address = { host: '4.4.4.4', port: 4444 } as NodeAddress; await nodeGraph.setNode(newNode1Id, newNode1Address); - const newNode2Id = nodesTestUtils.incrementNodeId(newNode1Id); + const newNode2Id = nodesTestUtils.generateNodeIdForBucket( + nodeId, + bucketIndex, + 1, + ); const newNode2Address = { host: '5.5.5.5', port: 5555 } as NodeAddress; await nodeGraph.setNode(newNode2Id, newNode2Address); - const newNode3Id = nodesTestUtils.incrementNodeId(newNode2Id); + const newNode3Id = nodesTestUtils.generateNodeIdForBucket( + nodeId, + bucketIndex, + 2, + ); const newNode3Address = { host: '6.6.6.6', port: 6666 } as NodeAddress; await nodeGraph.setNode(newNode3Id, newNode3Address); - // Based on XOR values, all 3 nodes should appear in bucket 4. + // Based on XOR values, all 3 nodes should appear in bucket 4 const bucket = await nodeGraph.getBucket(4); - if (bucket) { - expect(bucket[newNode1Id]).toEqual({ - address: { host: '4.4.4.4', port: 4444 }, - lastUpdated: expect.any(Date), - }); - expect(bucket[newNode2Id]).toEqual({ - address: { host: '5.5.5.5', port: 5555 }, - lastUpdated: expect.any(Date), - }); - expect(bucket[newNode3Id]).toEqual({ - address: { host: '6.6.6.6', port: 6666 }, - lastUpdated: expect.any(Date), - }); - } else { - // Should be unreachable - fail('Bucket undefined'); - } + expect(bucket).toBeDefined(); + if (!bucket) fail('bucket should be defined, letting TS know'); + expect(bucket[newNode1Id]).toEqual({ + address: { host: '4.4.4.4', port: 4444 }, + lastUpdated: expect.any(Date), + }); + expect(bucket[newNode2Id]).toEqual({ + address: { host: '5.5.5.5', port: 5555 }, + lastUpdated: expect.any(Date), + }); + expect(bucket[newNode3Id]).toEqual({ + address: { host: '6.6.6.6', port: 6666 }, + lastUpdated: expect.any(Date), + }); }); test('adds a single node into different buckets', async () => { // New node for bucket 3 @@ -296,7 +270,7 @@ describe('NodeGraph', () => { const newNode1Address = { host: '4.4.4.4', port: 4444 } as NodeAddress; await nodeGraph.setNode(newNode1Id, newNode1Address); - // Check the bucket is there first. + // Check the bucket is there first const bucket = await nodeGraph.getBucket(2); if (bucket) { expect(bucket[newNode1Id]).toEqual({ @@ -316,19 +290,32 @@ describe('NodeGraph', () => { }); test('deletes a single node (and retains remainder of bucket)', async () => { // Add 3 new nodes into bucket 4 - const newNode1Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 4); + const bucketIndex = 4; + const newNode1Id = nodesTestUtils.generateNodeIdForBucket( + nodeId, + bucketIndex, + 0, + ); const newNode1Address = { host: '4.4.4.4', port: 4444 } as NodeAddress; await nodeGraph.setNode(newNode1Id, newNode1Address); - const newNode2Id = nodesTestUtils.incrementNodeId(newNode1Id); + const newNode2Id = nodesTestUtils.generateNodeIdForBucket( + nodeId, + bucketIndex, + 1, + ); const newNode2Address = { host: '5.5.5.5', port: 5555 } as NodeAddress; await nodeGraph.setNode(newNode2Id, newNode2Address); - const newNode3Id = nodesTestUtils.incrementNodeId(newNode2Id); + const newNode3Id = nodesTestUtils.generateNodeIdForBucket( + nodeId, + bucketIndex, + 2, + ); const newNode3Address = { host: '6.6.6.6', port: 6666 } as NodeAddress; await nodeGraph.setNode(newNode3Id, newNode3Address); - // Based on XOR values, all 3 nodes should appear in bucket 4. - const bucket = await nodeGraph.getBucket(4); + // Based on XOR values, all 3 nodes should appear in bucket 4 + const bucket = await nodeGraph.getBucket(bucketIndex); if (bucket) { expect(bucket[newNode1Id]).toEqual({ address: { host: '4.4.4.4', port: 4444 }, @@ -350,7 +337,7 @@ describe('NodeGraph', () => { // Delete the node await nodeGraph.unsetNode(newNode1Id); // Check node no longer exists in the bucket - const newBucket = await nodeGraph.getBucket(4); + const newBucket = await nodeGraph.getBucket(bucketIndex); if (newBucket) { expect(newBucket[newNode1Id]).toBeUndefined(); expect(bucket[newNode2Id]).toEqual({ @@ -368,22 +355,26 @@ describe('NodeGraph', () => { }); test('enforces k-bucket size, removing least active node when a new node is discovered', async () => { // Add k nodes to the database (importantly, they all go into the same bucket) - let currNodeId = nodesTestUtils.generateNodeIdForBucket(nodeId, 59); + const bucketIndex = 59; // Keep a record of the first node ID that we added - const firstNodeId = currNodeId; + const firstNodeId = nodesTestUtils.generateNodeIdForBucket( + nodeId, + bucketIndex, + ); for (let i = 1; i <= nodeGraph.maxNodesPerBucket; i++) { // Add the current node ID const nodeAddress = { - host: (i + '.' + i + '.' + i + '.' + i) as Host, + host: hostGen(i), port: i as Port, }; - await nodeGraph.setNode(currNodeId, nodeAddress); + await nodeGraph.setNode( + nodesTestUtils.generateNodeIdForBucket(nodeId, bucketIndex, i), + nodeAddress, + ); // Increment the current node ID - const incrementedNodeId = nodesTestUtils.incrementNodeId(currNodeId); - currNodeId = incrementedNodeId; } // All of these nodes are in bucket 59 - const originalBucket = await nodeGraph.getBucket(59); + const originalBucket = await nodeGraph.getBucket(bucketIndex); if (originalBucket) { expect(Object.keys(originalBucket).length).toBe( nodeGraph.maxNodesPerBucket, @@ -395,11 +386,15 @@ describe('NodeGraph', () => { // Attempt to add a new node into this full bucket (increment the last node // ID that was added) - const newNodeId = nodesTestUtils.incrementNodeId(currNodeId); + const newNodeId = nodesTestUtils.generateNodeIdForBucket( + nodeId, + bucketIndex, + nodeGraph.maxNodesPerBucket + 1, + ); const newNodeAddress = { host: '0.0.0.1' as Host, port: 1234 as Port }; await nodeGraph.setNode(newNodeId, newNodeAddress); - const finalBucket = await nodeGraph.getBucket(59); + const finalBucket = await nodeGraph.getBucket(bucketIndex); if (finalBucket) { // We should still have a full bucket (but no more) expect(Object.keys(finalBucket).length).toEqual( @@ -412,7 +407,7 @@ describe('NodeGraph', () => { }); // NODEID1 should have been removed from this bucket (as this was the least active) // The first node added should have been removed from this bucket (as this - // was the least active, purely because it was inserted first). + // was the least active, purely because it was inserted first) expect(finalBucket[firstNodeId]).toBeUndefined(); } else { // Should be unreachable @@ -421,25 +416,32 @@ describe('NodeGraph', () => { }); test('enforces k-bucket size, retaining all nodes if adding a pre-existing node', async () => { // Add k nodes to the database (importantly, they all go into the same bucket) - let currNodeId = nodesTestUtils.generateNodeIdForBucket(nodeId, 59); + const bucketIndex = 59; + const currNodeId = nodesTestUtils.generateNodeIdForBucket( + nodeId, + bucketIndex, + ); // Keep a record of the first node ID that we added // const firstNodeId = currNodeId; + let increment = 1; for (let i = 1; i <= nodeGraph.maxNodesPerBucket; i++) { // Add the current node ID const nodeAddress = { - host: (i + '.' + i + '.' + i + '.' + i) as Host, + host: hostGen(i), port: i as Port, }; - await nodeGraph.setNode(currNodeId, nodeAddress); + await nodeGraph.setNode( + nodesTestUtils.generateNodeIdForBucket(nodeId, bucketIndex, increment), + nodeAddress, + ); // Increment the current node ID - skip for the last one to keep currNodeId // as the last added node ID if (i !== nodeGraph.maxNodesPerBucket) { - const incrementedNodeId = nodesTestUtils.incrementNodeId(currNodeId); - currNodeId = incrementedNodeId; + increment++; } } // All of these nodes are in bucket 59 - const originalBucket = await nodeGraph.getBucket(59); + const originalBucket = await nodeGraph.getBucket(bucketIndex); if (originalBucket) { expect(Object.keys(originalBucket).length).toBe( nodeGraph.maxNodesPerBucket, @@ -450,9 +452,9 @@ describe('NodeGraph', () => { } // If we tried to re-add the first node, it would simply remove the original - // first node, as this is the "least active". + // first node, as this is the "least active" // We instead want to check that we don't mistakenly delete a node if we're - // updating an existing one. + // updating an existing one // So, re-add the last node const newLastAddress: NodeAddress = { host: '30.30.30.30' as Host, @@ -460,7 +462,7 @@ describe('NodeGraph', () => { }; await nodeGraph.setNode(currNodeId, newLastAddress); - const finalBucket = await nodeGraph.getBucket(59); + const finalBucket = await nodeGraph.getBucket(bucketIndex); if (finalBucket) { // We should still have a full bucket expect(Object.keys(finalBucket).length).toEqual( @@ -479,6 +481,7 @@ describe('NodeGraph', () => { test('retrieves all buckets (in expected lexicographic order)', async () => { // Bucket 0 is expected to never have any nodes (as nodeId XOR 0 = nodeId) // Bucket 1 (minimum): + const node1Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 1); const node1Address = { host: '1.1.1.1', port: 1111 } as NodeAddress; await nodeGraph.setNode(node1Id, node1Address); @@ -487,7 +490,7 @@ describe('NodeGraph', () => { const node41Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 4); const node41Address = { host: '41.41.41.41', port: 4141 } as NodeAddress; await nodeGraph.setNode(node41Id, node41Address); - const node42Id = nodesTestUtils.incrementNodeId(node41Id); + const node42Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 4, 1); const node42Address = { host: '42.42.42.42', port: 4242 } as NodeAddress; await nodeGraph.setNode(node42Id, node42Address); @@ -546,11 +549,11 @@ describe('NodeGraph', () => { // Generate and add some nodes for (let i = 1; i < 255; i += 20) { const newNodeId = nodesTestUtils.generateNodeIdForBucket( - nodeManager.getNodeId(), + keyManager.getNodeId(), i, ); const nodeAddress = { - host: (i + '.' + i + '.' + i + '.' + i) as Host, + host: hostGen(i), port: i as Port, }; await nodeGraph.setNode(newNodeId, nodeAddress); @@ -558,7 +561,7 @@ describe('NodeGraph', () => { id: newNodeId, address: nodeAddress, distance: nodesUtils.calculateDistance( - nodeManager.getNodeId(), + keyManager.getNodeId(), newNodeId, ), }; @@ -581,9 +584,8 @@ describe('NodeGraph', () => { expect(initialNodes[nodeId]).toBeDefined(); // Check it's in the correct bucket const expectedIndex = nodesUtils.calculateBucketIndex( - nodeGraph.getNodeId(), + keyManager.getNodeId(), nodeId, - nodeGraph.nodeIdBits, ); const expectedBucket = await nodeGraph.getBucket(expectedIndex); expect(expectedBucket).toBeDefined(); @@ -600,92 +602,6 @@ describe('NodeGraph', () => { }, global.defaultTimeout * 4, ); - test('finds a single closest node', async () => { - // New node added - const newNode2Id = nodeId1; - const newNode2Address = { host: '227.1.1.1', port: 4567 } as NodeAddress; - await nodeGraph.setNode(newNode2Id, newNode2Address); - - // Find the closest nodes to some node, NODEID3 - const closest = await nodeGraph.getClosestLocalNodes(nodeId3); - expect(closest).toContainEqual({ - id: newNode2Id, - distance: 121n, - address: { host: '227.1.1.1', port: 4567 }, - }); - }); - test('finds 3 closest nodes', async () => { - // Add 3 nodes - await nodeGraph.setNode(nodeId1, { - host: '2.2.2.2', - port: 2222, - } as NodeAddress); - await nodeGraph.setNode(nodeId2, { - host: '3.3.3.3', - port: 3333, - } as NodeAddress); - await nodeGraph.setNode(nodeId3, { - host: '4.4.4.4', - port: 4444, - } as NodeAddress); - - // Find the closest nodes to some node, NODEID4 - const closest = await nodeGraph.getClosestLocalNodes(nodeId3); - expect(closest.length).toBe(3); - expect(closest).toContainEqual({ - id: nodeId3, - distance: 0n, - address: { host: '4.4.4.4', port: 4444 }, - }); - expect(closest).toContainEqual({ - id: nodeId2, - distance: 116n, - address: { host: '3.3.3.3', port: 3333 }, - }); - expect(closest).toContainEqual({ - id: nodeId1, - distance: 121n, - address: { host: '2.2.2.2', port: 2222 }, - }); - }); - test('finds the 20 closest nodes', async () => { - // Generate the node ID to find the closest nodes to (in bucket 100) - const nodeIdToFind = nodesTestUtils.generateNodeIdForBucket(nodeId, 100); - // Now generate and add 20 nodes that will be close to this node ID - const addedClosestNodes: NodeData[] = []; - for (let i = 1; i < 101; i += 5) { - const closeNodeId = nodesTestUtils.generateNodeIdForBucket( - nodeIdToFind, - i, - ); - const nodeAddress = { - host: (i + '.' + i + '.' + i + '.' + i) as Host, - port: i as Port, - }; - await nodeGraph.setNode(closeNodeId, nodeAddress); - addedClosestNodes.push({ - id: closeNodeId, - address: nodeAddress, - distance: nodesUtils.calculateDistance(nodeIdToFind, closeNodeId), - }); - } - // Now create and add 10 more nodes that are far away from this node - for (let i = 1; i <= 10; i++) { - const farNodeId = nodeIdGenerator(i); - const nodeAddress = { - host: (i + '.' + i + '.' + i + '.' + i) as Host, - port: i as Port, - }; - await nodeGraph.setNode(farNodeId, nodeAddress); - } - - // Find the closest nodes to the original generated node ID - const closest = await nodeGraph.getClosestLocalNodes(nodeIdToFind); - // We should always only receive k nodes - expect(closest.length).toBe(nodeGraph.maxNodesPerBucket); - // Retrieved closest nodes should be exactly the same as the ones we added - expect(closest).toEqual(addedClosestNodes); - }); test('updates node', async () => { // New node added const node1Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 2); diff --git a/tests/nodes/NodeManager.test.ts b/tests/nodes/NodeManager.test.ts index 71c11ac46..11680548b 100644 --- a/tests/nodes/NodeManager.test.ts +++ b/tests/nodes/NodeManager.test.ts @@ -1,4 +1,3 @@ -import type { ClaimIdEncoded } from '@/claims/types'; import type { CertificatePem, KeyPairPem, PublicKeyPem } from '@/keys/types'; import type { Host, Port } from '@/network/types'; import type { NodeId, NodeAddress } from '@/nodes/types'; @@ -7,31 +6,27 @@ import path from 'path'; import fs from 'fs'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; -import { IdInternal } from '@matrixai/id'; -import { PolykeyAgent } from '@'; -import { KeyManager, utils as keysUtils } from '@/keys'; -import { NodeManager, errors as nodesErrors } from '@/nodes'; -import { ForwardProxy, ReverseProxy } from '@/network'; -import { Sigchain } from '@/sigchain'; -import { utils as claimsUtils } from '@/claims'; +import PolykeyAgent from '@/PolykeyAgent'; +import KeyManager from '@/keys/KeyManager'; +import * as keysUtils from '@/keys/utils'; +import NodeConnectionManager from '@/nodes/NodeConnectionManager'; +import NodeGraph from '@/nodes/NodeGraph'; +import NodeManager from '@/nodes/NodeManager'; +import ForwardProxy from '@/network/ForwardProxy'; +import ReverseProxy from '@/network/ReverseProxy'; +import Sigchain from '@/sigchain/Sigchain'; +import * as claimsUtils from '@/claims/utils'; import { sleep } from '@/utils'; -import { utils as nodesUtils } from '@/nodes'; +import * as nodesUtils from '@/nodes/utils'; -// Mocks. -jest.mock('@/keys/utils', () => ({ - ...jest.requireActual('@/keys/utils'), - generateDeterministicKeyPair: - jest.requireActual('@/keys/utils').generateKeyPair, -})); - -describe('NodeManager', () => { +describe(`${NodeManager.name} test`, () => { const password = 'password'; - const logger = new Logger('NodeManagerTest', LogLevel.WARN, [ + const logger = new Logger(`${NodeManager.name} test`, LogLevel.ERROR, [ new StreamHandler(), ]); let dataDir: string; - let nodeManager: NodeManager; - + let nodeGraph: NodeGraph; + let nodeConnectionManager: NodeConnectionManager; let fwdProxy: ForwardProxy; let revProxy: ReverseProxy; let keyManager: KeyManager; @@ -42,18 +37,17 @@ describe('NodeManager', () => { const serverHost = '::1' as Host; const serverPort = 1 as Port; - - const nodeId1 = nodesUtils.decodeNodeId( - 'vrsc24a1er424epq77dtoveo93meij0pc8ig4uvs9jbeld78n9nl0', - )!; - const nodeId2 = nodesUtils.decodeNodeId( - 'vrcacp9vsb4ht25hds6s4lpp2abfaso0mptcfnh499n35vfcn2gkg', - )!; - const dummyNode = nodesUtils.decodeNodeId( - 'vi3et1hrpv2m2lrplcm7cu913kr45v51cak54vm68anlbvuf83ra0', - )!; + ``; + const mockedGenerateDeterministicKeyPair = jest.spyOn( + keysUtils, + 'generateDeterministicKeyPair', + ); beforeEach(async () => { + mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { + return keysUtils.generateKeyPair(bits); + }); + dataDir = await fs.promises.mkdtemp( path.join(os.tmpdir(), 'polykey-test-'), ); @@ -104,19 +98,24 @@ describe('NodeManager', () => { }); sigchain = await Sigchain.createSigchain({ keyManager, db, logger }); - nodeManager = await NodeManager.createNodeManager({ + nodeGraph = await NodeGraph.createNodeGraph({ db, - sigchain, keyManager, + logger, + }); + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, fwdProxy, revProxy, logger, }); - await nodeManager.start(); + await nodeConnectionManager.start(); }); afterEach(async () => { - await nodeManager.stop(); - await nodeManager.destroy(); + await nodeConnectionManager.stop(); + await nodeGraph.stop(); + await nodeGraph.destroy(); await sigchain.stop(); await sigchain.destroy(); await db.stop(); @@ -131,218 +130,75 @@ describe('NodeManager', () => { }); }); - test('NodeManager readiness', async () => { - await expect(nodeManager.destroy()).rejects.toThrow( - nodesErrors.ErrorNodeManagerRunning, - ); - // Should be a noop - await nodeManager.start(); - await nodeManager.stop(); - await nodeManager.destroy(); - await expect(nodeManager.start()).rejects.toThrow( - nodesErrors.ErrorNodeManagerDestroyed, - ); - // Await expect(nodeManager.readToken()).rejects.toThrow(nodesErrors.ErrorNodeManagerNotRunning); - // await expect(nodeManager.writeToken()).rejects.toThrow(nodesErrors.ErrorNodeManagerNotRunning); - }); - describe('getConnectionToNode', () => { - let targetDataDir: string; - let target: PolykeyAgent; - let targetNodeId: NodeId; - let targetNodeAddress: NodeAddress; - - beforeAll(async () => { - targetDataDir = await fs.promises.mkdtemp( - path.join(os.tmpdir(), 'polykey-test-'), - ); - target = await PolykeyAgent.createPolykeyAgent({ - password: 'password', - nodePath: targetDataDir, - keysConfig: { - rootKeyPairBits: 2048, - }, - logger, - }); - }, global.polykeyStartupTimeout); - - afterAll(async () => { - await target.stop(); - await fs.promises.rm(targetDataDir, { - force: true, - recursive: true, - }); - }); - - beforeEach(async () => { - await target.start({ password: 'password' }); - targetNodeId = target.keyManager.getNodeId(); - targetNodeAddress = { - host: target.revProxy.getIngressHost(), - port: target.revProxy.getIngressPort(), - }; - await nodeManager.setNode(targetNodeId, targetNodeAddress); - }); - - afterEach(async () => { - // Delete the created node connection each time. - await target.stop(); - }); - - test('creates new connection to node', async () => { - // @ts-ignore get connection + lock from protected NodeConnectionMap - const initialConnLock = nodeManager.connections.get(targetNodeId); - expect(initialConnLock).toBeUndefined(); - await nodeManager.getConnectionToNode(targetNodeId); - // @ts-ignore get connection + lock from protected NodeConnectionMap - const finalConnLock = nodeManager.connections.get( - targetNodeId.toString(), - ); - // Check entry is in map and lock is released - expect(finalConnLock).toBeDefined(); - expect(finalConnLock?.lock.isLocked()).toBeFalsy(); - }); - test('gets existing connection to node', async () => { - // @ts-ignore accessing protected NodeConnectionMap - expect(nodeManager.connections.size).toBe(0); - // @ts-ignore get connection + lock from protected NodeConnectionMap - const initialConnLock = nodeManager.connections.get(targetNodeId); - expect(initialConnLock).toBeUndefined(); - await nodeManager.getConnectionToNode(targetNodeId); - // Check we only have this single connection - // @ts-ignore accessing protected NodeConnectionMap - expect(nodeManager.connections.size).toBe(1); - await nodeManager.getConnectionToNode(targetNodeId); - // Check we still only have this single connection - // @ts-ignore accessing protected NodeConnectionMap - expect(nodeManager.connections.size).toBe(1); - }); - test('concurrent connection creation to same target results in 1 connection', async () => { - // @ts-ignore accessing protected NodeConnectionMap - expect(nodeManager.connections.size).toBe(0); - // @ts-ignore get connection + lock from protected NodeConnectionMap - const initialConnLock = nodeManager.connections.get(targetNodeId); - expect(initialConnLock).toBeUndefined(); - // Concurrently create connection to same target - await Promise.all([ - nodeManager.getConnectionToNode(targetNodeId), - nodeManager.getConnectionToNode(targetNodeId), - ]); - // Check only 1 connection exists - // @ts-ignore accessing protected NodeConnectionMap - expect(nodeManager.connections.size).toBe(1); - // @ts-ignore get connection + lock from protected NodeConnectionMap - const finalConnLock = nodeManager.connections.get( - targetNodeId.toString(), - ); - // Check entry is in map and lock is released - expect(finalConnLock).toBeDefined(); - expect(finalConnLock?.lock.isLocked()).toBeFalsy(); - }); - test( - 'unable to create new connection to offline node', - async () => { - // Add the dummy node - await nodeManager.setNode(dummyNode, { - host: '125.0.0.1' as Host, - port: 55555 as Port, - }); - // @ts-ignore accessing protected NodeConnectionMap - expect(nodeManager.connections.size).toBe(0); - - await expect(() => - nodeManager.getConnectionToNode(dummyNode), - ).rejects.toThrow(nodesErrors.ErrorNodeConnectionTimeout); - // @ts-ignore accessing protected NodeConnectionMap - expect(nodeManager.connections.size).toBe(1); - // @ts-ignore accessing protected NodeConnectionMap - const connLock = nodeManager.connections.get(dummyNode); - // There should still be an entry in the connection map, but it should - // only contain a lock - no connection. - expect(connLock).toBeDefined(); - expect(connLock?.lock).toBeDefined(); - expect(connLock?.connection).toBeUndefined(); - - // Undo the initial dummy node add - // @ts-ignore - get the NodeGraph reference - const nodeGraph = nodeManager.nodeGraph; - await nodeGraph.unsetNode(dummyNode); - }, - global.failedConnectionTimeout * 2, - ); - }); test( 'pings node', async () => { - const server = await PolykeyAgent.createPolykeyAgent({ - password: 'password', - nodePath: path.join(dataDir, 'server'), - keysConfig: { - rootKeyPairBits: 2048, - }, - logger: logger, - }); - const serverNodeId = server.nodeManager.getNodeId(); - let serverNodeAddress: NodeAddress = { - host: server.revProxy.getIngressHost(), - port: server.revProxy.getIngressPort(), - }; - await nodeManager.setNode(serverNodeId, serverNodeAddress); + let server: PolykeyAgent | undefined; + try { + server = await PolykeyAgent.createPolykeyAgent({ + password: 'password', + nodePath: path.join(dataDir, 'server'), + keysConfig: { + rootKeyPairBits: 2048, + }, + logger: logger, + }); + const serverNodeId = server.keyManager.getNodeId(); + let serverNodeAddress: NodeAddress = { + host: server.revProxy.getIngressHost(), + port: server.revProxy.getIngressPort(), + }; + await nodeGraph.setNode(serverNodeId, serverNodeAddress); + + const nodeManager = new NodeManager({ + db, + sigchain, + keyManager, + nodeGraph, + nodeConnectionManager, + logger, + }); - // Set server node offline - await server.stop(); - // Check if active - // Case 1: cannot establish new connection, so offline - const active1 = await nodeManager.pingNode(serverNodeId); - expect(active1).toBe(false); - // Bring server node online - await server.start({ password: 'password' }); - // Update the node address (only changes because we start and stop) - serverNodeAddress = { - host: server.revProxy.getIngressHost(), - port: server.revProxy.getIngressPort(), - }; - await nodeManager.setNode(serverNodeId, serverNodeAddress); - // Check if active - // Case 2: can establish new connection, so online - const active2 = await nodeManager.pingNode(serverNodeId); - expect(active2).toBe(true); - // Turn server node offline again - await server.stop(); - await server.destroy(); - // Give time for the ping buffers to send and wait for timeout on - // existing connection - await sleep(30000); - // Check if active - // Case 3: pre-existing connection no longer active, so offline - const active3 = await nodeManager.pingNode(serverNodeId); - expect(active3).toBe(false); + // Set server node offline + await server.stop(); + // Check if active + // Case 1: cannot establish new connection, so offline + const active1 = await nodeManager.pingNode(serverNodeId); + expect(active1).toBe(false); + // Bring server node online + await server.start({ password: 'password' }); + // Update the node address (only changes because we start and stop) + serverNodeAddress = { + host: server.revProxy.getIngressHost(), + port: server.revProxy.getIngressPort(), + }; + await nodeGraph.setNode(serverNodeId, serverNodeAddress); + // Check if active + // Case 2: can establish new connection, so online + const active2 = await nodeManager.pingNode(serverNodeId); + expect(active2).toBe(true); + // Turn server node offline again + await server.stop(); + await server.destroy(); + // Give time for the ping buffers to send and wait for timeout on + // existing connection + await sleep(30000); // FIXME: remove this sleep + // Check if active + // Case 3: pre-existing connection no longer active, so offline + const active3 = await nodeManager.pingNode(serverNodeId); + expect(active3).toBe(false); + } finally { + // Clean up + await server?.stop(); + await server?.destroy(); + } }, global.failedConnectionTimeout * 2, ); // Ping needs to timeout (takes 20 seconds + setup + pulldown) - test('finds node (local)', async () => { - // Case 1: node already exists in the local node graph (no contact required) - const nodeId = nodeId1; - const nodeAddress: NodeAddress = { - host: '127.0.0.1' as Host, - port: 11111 as Port, - }; - await nodeManager.setNode(nodeId, nodeAddress); - // Expect no error thrown - await expect(nodeManager.findNode(nodeId)).resolves.not.toThrowError(); - const foundAddress1 = await nodeManager.findNode(nodeId); - expect(foundAddress1).toStrictEqual(nodeAddress); - }); - test( - 'finds node (contacts remote node)', - async () => { - // Case 2: node can be found on the remote node - const nodeId = nodeId1; - const nodeAddress: NodeAddress = { - host: '127.0.0.1' as Host, - port: 11111 as Port, - }; - - const server = await PolykeyAgent.createPolykeyAgent({ + test('getPublicKey', async () => { + let server: PolykeyAgent | undefined; + try { + server = await PolykeyAgent.createPolykeyAgent({ password: 'password', nodePath: path.join(dataDir, 'server'), keysConfig: { @@ -350,64 +206,32 @@ describe('NodeManager', () => { }, logger: logger, }); - - await nodeManager.setNode(server.nodeManager.getNodeId(), { + const serverNodeId = server.keyManager.getNodeId(); + const serverNodeAddress: NodeAddress = { host: server.revProxy.getIngressHost(), port: server.revProxy.getIngressPort(), - } as NodeAddress); - await server.nodeManager.setNode(nodeId, nodeAddress); - const foundAddress2 = await nodeManager.findNode(nodeId); - expect(foundAddress2).toStrictEqual(nodeAddress); - - await server.stop(); - }, - global.polykeyStartupTimeout, - ); - test( - 'cannot find node (contacts remote node)', - async () => { - // Case 3: node exhausts all contacts and cannot find node - const nodeId = nodeId1; - const server = await PolykeyAgent.createPolykeyAgent({ - password: 'password', - nodePath: path.join(dataDir, 'server'), - keysConfig: { - rootKeyPairBits: 2048, - }, + }; + await nodeGraph.setNode(serverNodeId, serverNodeAddress); + + const nodeManager = new NodeManager({ + db, + sigchain, + keyManager, + nodeGraph, + nodeConnectionManager, logger, }); - await nodeManager.setNode(server.nodeManager.getNodeId(), { - host: server.revProxy.getIngressHost(), - port: server.revProxy.getIngressPort(), - } as NodeAddress); - // Add a dummy node to the server node graph database - // Server will not be able to connect to this node (the only node in its - // database), and will therefore not be able to locate the node. - await server.nodeManager.setNode(dummyNode, { - host: '127.0.0.2' as Host, - port: 22222 as Port, - } as NodeAddress); - // So unfindableNode cannot be found - await expect(() => nodeManager.findNode(nodeId)).rejects.toThrowError( - nodesErrors.ErrorNodeGraphNodeNotFound, - ); - await server.stop(); - }, - global.failedConnectionTimeout * 2, - ); - test('knows node (true and false case)', async () => { - // Known node - const nodeAddress1: NodeAddress = { - host: '127.0.0.1' as Host, - port: 11111 as Port, - }; - await nodeManager.setNode(nodeId1, nodeAddress1); - expect(await nodeManager.knowsNode(nodeId1)).toBeTruthy(); - // Unknown node - expect(await nodeManager.knowsNode(nodeId2)).not.toBeTruthy(); + // We want to get the public key of the server + const key = await nodeManager.getPublicKey(serverNodeId); + const expectedKey = server.keyManager.getRootKeyPairPem().publicKey; + expect(key).toEqual(expectedKey); + } finally { + // Clean up + await server?.stop(); + await server?.destroy(); + } }); - describe('Cross signing claims', () => { // These tests follow the following process (from the perspective of Y): // 1. X -> sends notification (to start cross signing request) -> Y @@ -415,7 +239,7 @@ describe('NodeManager', () => { // 3. X -> sends doubly signed claim (Y's intermediary) + its own intermediary claim -> Y // 4. X <- sends doubly signed claim (X's intermediary) <- Y // We're unable to mock the actions of the server, but we can ensure the - // state on each side is as expected. + // state on each side is as expected let xDataDir: string; let x: PolykeyAgent; @@ -442,7 +266,7 @@ describe('NodeManager', () => { logger, }); - xNodeId = x.nodeManager.getNodeId(); + xNodeId = x.keyManager.getNodeId(); xNodeAddress = { host: x.revProxy.getIngressHost(), port: x.revProxy.getIngressPort(), @@ -454,21 +278,21 @@ describe('NodeManager', () => { ); y = await PolykeyAgent.createPolykeyAgent({ password: 'password', - nodePath: xDataDir, + nodePath: yDataDir, keysConfig: { rootKeyPairBits: 2048, }, logger, }); - yNodeId = y.nodeManager.getNodeId(); + yNodeId = y.keyManager.getNodeId(); yNodeAddress = { host: y.revProxy.getIngressHost(), port: y.revProxy.getIngressPort(), }; yPublicKey = y.keyManager.getRootKeyPairPem().publicKey; - await x.nodeManager.setNode(yNodeId, yNodeAddress); - await y.nodeManager.setNode(xNodeId, xNodeAddress); + await x.nodeGraph.setNode(yNodeId, yNodeAddress); + await y.nodeGraph.setNode(xNodeId, xNodeAddress); }, global.polykeyStartupTimeout * 2); afterAll(async () => { await y.stop(); @@ -504,8 +328,7 @@ describe('NodeManager', () => { const xChain = await x.sigchain.getChainData(); expect(Object.keys(xChain).length).toBe(1); // Iterate just to be safe, but expected to only have this single claim - for (const c of Object.keys(xChain)) { - const claimId = c as ClaimIdEncoded; + for (const claimId of Object.keys(xChain)) { const claim = xChain[claimId]; const decoded = claimsUtils.decodeClaim(claim); expect(decoded).toStrictEqual({ @@ -514,23 +337,21 @@ describe('NodeManager', () => { seq: 1, data: { type: 'node', - node1: xNodeId, - node2: yNodeId, + node1: nodesUtils.encodeNodeId(xNodeId), + node2: nodesUtils.encodeNodeId(yNodeId), }, iat: expect.any(Number), }, signatures: expect.any(Object), }); - const signatureNodeIds = Object.keys(decoded.signatures).map( - (idString) => IdInternal.fromString(idString), - ); + const signatureNodeIds = Object.keys(decoded.signatures); expect(signatureNodeIds.length).toBe(2); // Verify the 2 signatures - expect(signatureNodeIds).toContain(xNodeId); + expect(signatureNodeIds).toContain(nodesUtils.encodeNodeId(xNodeId)); expect(await claimsUtils.verifyClaimSignature(claim, xPublicKey)).toBe( true, ); - expect(signatureNodeIds).toContain(yNodeId); + expect(signatureNodeIds).toContain(nodesUtils.encodeNodeId(yNodeId)); expect(await claimsUtils.verifyClaimSignature(claim, yPublicKey)).toBe( true, ); @@ -540,8 +361,7 @@ describe('NodeManager', () => { const yChain = await y.sigchain.getChainData(); expect(Object.keys(yChain).length).toBe(1); // Iterate just to be safe, but expected to only have this single claim - for (const c of Object.keys(yChain)) { - const claimId = c as ClaimIdEncoded; + for (const claimId of Object.keys(yChain)) { const claim = yChain[claimId]; const decoded = claimsUtils.decodeClaim(claim); expect(decoded).toStrictEqual({ @@ -550,27 +370,47 @@ describe('NodeManager', () => { seq: 1, data: { type: 'node', - node1: yNodeId, - node2: xNodeId, + node1: nodesUtils.encodeNodeId(yNodeId), + node2: nodesUtils.encodeNodeId(xNodeId), }, iat: expect.any(Number), }, signatures: expect.any(Object), }); - const signatureNodeIds = Object.keys(decoded.signatures).map( - (idString) => IdInternal.fromString(idString), - ); + const signatureNodeIds = Object.keys(decoded.signatures); expect(signatureNodeIds.length).toBe(2); // Verify the 2 signatures - expect(signatureNodeIds).toContain(xNodeId); + expect(signatureNodeIds).toContain(nodesUtils.encodeNodeId(xNodeId)); expect(await claimsUtils.verifyClaimSignature(claim, xPublicKey)).toBe( true, ); - expect(signatureNodeIds).toContain(yNodeId); + expect(signatureNodeIds).toContain(nodesUtils.encodeNodeId(yNodeId)); expect(await claimsUtils.verifyClaimSignature(claim, yPublicKey)).toBe( true, ); } }); + test('can request chain data', async () => { + // Cross signing claims + await y.nodeManager.claimNode(xNodeId); + + const nodeManager = new NodeManager({ + db, + sigchain, + keyManager, + nodeGraph, + nodeConnectionManager, + logger, + }); + + await nodeGraph.setNode(xNodeId, xNodeAddress); + + // We want to get the public key of the server + const chainData = JSON.stringify( + await nodeManager.requestChainData(xNodeId), + ); + expect(chainData).toContain(nodesUtils.encodeNodeId(xNodeId)); + expect(chainData).toContain(nodesUtils.encodeNodeId(yNodeId)); + }); }); }); diff --git a/tests/nodes/TestNodeConnection.ts b/tests/nodes/TestNodeConnection.ts index 953c0d106..dd42788e3 100644 --- a/tests/nodes/TestNodeConnection.ts +++ b/tests/nodes/TestNodeConnection.ts @@ -1,53 +1,43 @@ import type { PublicKeyPem } from '@/keys/types'; import type { AbstractConstructorParameters } from '@/types'; -import type { NodeId } from '@/nodes/types'; -import type { Host, Port, ProxyConfig } from '@/network/types'; -import type { ForwardProxy } from '@/network'; -import type { KeyManager } from '@/keys'; +import type { Host, Port } from '@/network/types'; +import type ForwardProxy from '@/network/ForwardProxy'; +import type GRPCClientAgent from '@/agent/GRPCClientAgent'; import Logger from '@matrixai/logger'; -import { NodeConnection } from '@/nodes'; +import NodeConnection from '@/nodes/NodeConnection'; /** * A dummy NodeConnection object. Currently used for when a connection isn't * required to be established, but we are required to get the public key from * the other node. */ -class TestNodeConnection extends NodeConnection { +class TestNodeConnection extends NodeConnection { protected publicKey: PublicKeyPem | null; static async createTestNodeConnection({ publicKey, - targetNodeId, targetHost, targetPort, - forwardProxy, - keyManager, + fwdProxy, + destroyCallback, logger, }: { publicKey: PublicKeyPem | null; - targetNodeId: NodeId; targetHost: Host; targetPort: Port; - forwardProxy: ForwardProxy; - keyManager: KeyManager; + fwdProxy: ForwardProxy; + destroyCallback: () => Promise; logger?: Logger; }): Promise { const logger_ = logger ?? new Logger('NodeConnection'); - const proxyConfig_ = { - host: forwardProxy.getProxyHost(), - port: forwardProxy.getProxyPort(), - authToken: forwardProxy.authToken, - } as ProxyConfig; return new TestNodeConnection({ publicKey, - forwardProxy, - keyManager, logger: logger_, - targetHost, - targetNodeId, - targetPort, - proxyConfig: proxyConfig_, + host: targetHost, + port: targetPort, + destroyCallback, + fwdProxy, }); } diff --git a/tests/nodes/utils.test.ts b/tests/nodes/utils.test.ts index b596fcbed..ee1aeadc4 100644 --- a/tests/nodes/utils.test.ts +++ b/tests/nodes/utils.test.ts @@ -1,6 +1,6 @@ import type { NodeId } from '@/nodes/types'; import { IdInternal } from '@matrixai/id'; -import { utils as nodesUtils } from '@/nodes'; +import * as nodesUtils from '@/nodes/utils'; describe('Nodes utils', () => { test('basic distance calculation', async () => { @@ -27,7 +27,7 @@ describe('Nodes utils', () => { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]); - const bucketIndex = nodesUtils.calculateBucketIndex(nodeId1, nodeId2, 256); + const bucketIndex = nodesUtils.calculateBucketIndex(nodeId1, nodeId2); expect(bucketIndex).toBe(0); }); test('calculates correct arbitrary bucket (bucket 63)', async () => { @@ -39,7 +39,7 @@ describe('Nodes utils', () => { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]); - const bucketIndex = nodesUtils.calculateBucketIndex(nodeId1, nodeId2, 256); + const bucketIndex = nodesUtils.calculateBucketIndex(nodeId1, nodeId2); expect(bucketIndex).toBe(63); }); test('calculates correct last bucket (bucket 255)', async () => { @@ -51,7 +51,7 @@ describe('Nodes utils', () => { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]); - const bucketIndex = nodesUtils.calculateBucketIndex(nodeId1, nodeId2, 256); + const bucketIndex = nodesUtils.calculateBucketIndex(nodeId1, nodeId2); expect(bucketIndex).toBe(255); }); }); diff --git a/tests/nodes/utils.ts b/tests/nodes/utils.ts index d535cd116..74b8a6f67 100644 --- a/tests/nodes/utils.ts +++ b/tests/nodes/utils.ts @@ -1,83 +1,48 @@ import type { NodeId, NodeAddress } from '@/nodes/types'; -import type { PolykeyAgent } from '@'; +import type PolykeyAgent from '@/PolykeyAgent'; import { IdInternal } from '@matrixai/id'; +import { bigInt2Bytes } from '@/utils'; /** - * Generates a node ID that, according to Kademlia, will be placed into 'nodeId's - * bucket at 'bucketIndex'. - * Recall that a bucket index is chosen based on: - * 2^i <= distance (from current node) < 2^(i+1) - * Therefore, generatedNodeId = 2^i XOR nodeId. + * Generate a deterministic NodeId for a specific bucket given an existing NodeId + * This requires solving the bucket index (`i`) and distance equation: + * `2^i <= distance < 2^(i+1)` + * Where `distance` is: `New NodeId XOR Given NodeId` + * The XOR operation `a XOR b = c` means `a XOR c = b` and `b XOR c = a` + * The new NodeId that starts with a bucket offset of 0 would be: + * `New NodeId = 2^i XOR Given NodeId` + * To get the next NodeId within the same bucket, increment the `bucketOffset` + * The `bucketOffset` is limited by the size of each bucket `2^(i+1) - 2^i` + * @param nodeId NodeId that distance is measured from + * @param bucketIndex Desired bucket index for new NodeId + * @param bucketOffset Offset position for new NodeId from the bucket index */ -function generateNodeIdForBucket(nodeId: NodeId, bucketIndex: number): NodeId { +function generateNodeIdForBucket( + nodeId: NodeId, + bucketIndex: number, + bucketOffset: number = 0, +): NodeId { const lowerBoundDistance = BigInt(2) ** BigInt(bucketIndex); - const bufferId = Buffer.from(nodeId.toBuffer()); - // Console.log(bufferId); - const bufferDistance = bigIntToBuffer(lowerBoundDistance); - // Console.log(bufferDistance); - // Console.log('Distance buffer:', bufferDistance); - // console.log('Node ID buffer:', bufferId); - - const max = Math.max(bufferId.length, bufferDistance.length); - // Reverse the buffers such that we XOR from right to left - bufferId.reverse(); - bufferDistance.reverse(); - const newIdArray = Buffer.alloc(max); - - // XOR the 'rightmost' bytes first - for (let i = 0; i < bufferId.length && i < bufferDistance.length; i++) { - newIdArray[i] = bufferId[i] ^ bufferDistance[i]; - } - // If distance buffer is longer, append its bytes - for (let i = bufferId.length; i < bufferDistance.length; i++) { - newIdArray[i] = bufferDistance[i]; - } - // If node ID buffer is longer, append its bytes - for (let i = bufferDistance.length; i < bufferId.length; i++) { - newIdArray[i] = bufferId[i]; - } - - // Reverse the XORed array back to normal - newIdArray.reverse(); - // Convert to an ASCII string - return IdInternal.fromBuffer(newIdArray); -} - -/** - * Increases the passed node ID's last character code by 1. - * If used in conjunction with calculateNodeIdForBucket, can produce multiple - * node IDs that will appear in the same bucket. - * NOTE: For node IDs appearing in lower-indexed buckets (i.e. bucket indexes - * roughly around 0-4), this will occasionally cause the node ID to overflow - * into the next bucket instead. For safety, ensure this function is used for - * nodes appearing in larger-indexed buckets. - */ -function incrementNodeId(nodeId: NodeId): NodeId { - const nodeIdArray = Buffer.from(nodeId.toBuffer()); - const lastCharIndex = nodeIdArray.length - 1; - nodeIdArray[lastCharIndex] = nodeIdArray[lastCharIndex] + 1; - return IdInternal.fromBuffer(nodeIdArray); -} - -/** - * Converts a BigInt to a hex buffer. - */ -function bigIntToBuffer(number: BigInt) { - let hex = number.toString(16); - if (hex.length % 2) { - hex = '0' + hex; - } - const len = hex.length / 2; - const u8 = new Uint8Array(len); - let i = 0; - let j = 0; - while (i < len) { - u8[i] = parseInt(hex.slice(j, j + 2), 16); - i += 1; - j += 2; + const upperBoundDistance = BigInt(2) ** BigInt(bucketIndex + 1); + if (bucketOffset >= upperBoundDistance - lowerBoundDistance) { + throw new RangeError('bucketOffset is beyond bucket size'); } - return u8; + // Offset position within the bucket + const distance = bigInt2Bytes( + lowerBoundDistance + BigInt(bucketOffset), + nodeId.byteLength, + ); + // XOR the nodeIdBuffer with distance + const nodeIdBufferNew = nodeId.map((byte, i) => { + return byte ^ distance[i]; + }); + // Zero-copy the new NodeId + return IdInternal.create( + nodeIdBufferNew, + nodeIdBufferNew.byteOffset, + nodeIdBufferNew.byteLength, + ); } /** @@ -85,15 +50,15 @@ function bigIntToBuffer(number: BigInt) { */ async function nodesConnect(localNode: PolykeyAgent, remoteNode: PolykeyAgent) { // Add remote node's details to local node - await localNode.nodeManager.setNode(remoteNode.nodeManager.getNodeId(), { + await localNode.nodeManager.setNode(remoteNode.keyManager.getNodeId(), { host: remoteNode.revProxy.getIngressHost(), port: remoteNode.revProxy.getIngressPort(), } as NodeAddress); // Add local node's details to remote node - await remoteNode.nodeManager.setNode(localNode.nodeManager.getNodeId(), { + await remoteNode.nodeManager.setNode(localNode.keyManager.getNodeId(), { host: localNode.revProxy.getIngressHost(), port: localNode.revProxy.getIngressPort(), } as NodeAddress); } -export { generateNodeIdForBucket, incrementNodeId, nodesConnect }; +export { generateNodeIdForBucket, nodesConnect }; diff --git a/tests/notifications/NotificationsManager.test.ts b/tests/notifications/NotificationsManager.test.ts index 8952e9270..a1e23b564 100644 --- a/tests/notifications/NotificationsManager.test.ts +++ b/tests/notifications/NotificationsManager.test.ts @@ -15,7 +15,7 @@ import { GRPCServer } from '@/grpc'; import { KeyManager, utils as keysUtils } from '@/keys'; import { VaultManager } from '@/vaults'; import { GestaltGraph } from '@/gestalts'; -import { NodeManager } from '@/nodes'; +import { NodeConnectionManager, NodeGraph, NodeManager } from '@/nodes'; import { NotificationsManager } from '@/notifications'; import { ForwardProxy, ReverseProxy } from '@/network'; import { AgentServiceService, createAgentService } from '@/agent'; @@ -40,15 +40,17 @@ describe('NotificationsManager', () => { new StreamHandler(), ]); const authToken = 'AUTH'; - let fwdProxy: ForwardProxy; - let revProxy: ReverseProxy; + let senderFwdProxy: ForwardProxy; + let receiverRevProxy: ReverseProxy; let fwdTLSConfig: TLSConfig; let keysDataDir: string; let receiverDataDir: string; let receiverKeyManager: KeyManager; let receiverVaultManager: VaultManager; + let receiverNodeGraph: NodeGraph; let receiverNodeManager: NodeManager; + let receiverNodeConnectionManager: NodeConnectionManager; let receiverSigchain: Sigchain; let receiverACL: ACL; let receiverGestaltGraph: GestaltGraph; @@ -60,6 +62,8 @@ describe('NotificationsManager', () => { let senderDb: DB; let senderACL: ACL; let senderSigchain: Sigchain; + let senderNodeGraph: NodeGraph; + let senderNodeConnectionManager: NodeConnectionManager; let senderNodeManager: NodeManager; let senderNodeId: NodeId, receiverNodeId: NodeId; @@ -113,14 +117,6 @@ describe('NotificationsManager', () => { certChainPem: receiverCertPem, }; - fwdProxy = new ForwardProxy({ - authToken: authToken, - logger: logger, - }); - revProxy = new ReverseProxy({ - logger: logger, - }); - // Server setup const receiverVaultsPath = path.join(receiverDataDir, 'receiverVaults'); const receiverDbPath = path.join(receiverDataDir, 'receiverDb'); @@ -156,18 +152,34 @@ describe('NotificationsManager', () => { authToken: '', logger: logger, }); - receiverNodeManager = await NodeManager.createNodeManager({ + receiverRevProxy = new ReverseProxy({ + logger: logger, + }); + receiverNodeGraph = await NodeGraph.createNodeGraph({ db: receiverDb, - sigchain: receiverSigchain, keyManager: receiverKeyManager, + logger: logger, + }); + receiverNodeConnectionManager = new NodeConnectionManager({ + keyManager: receiverKeyManager, + nodeGraph: receiverNodeGraph, fwdProxy: receiverFwdProxy, - revProxy: revProxy, + revProxy: receiverRevProxy, + logger, + }); + await receiverNodeConnectionManager.start(); + receiverNodeManager = new NodeManager({ + db: receiverDb, + sigchain: receiverSigchain, + keyManager: receiverKeyManager, + nodeGraph: receiverNodeGraph, + nodeConnectionManager: receiverNodeConnectionManager, logger: logger, }); receiverVaultManager = await VaultManager.createVaultManager({ keyManager: receiverKeyManager, vaultsPath: receiverVaultsPath, - nodeManager: receiverNodeManager, + nodeConnectionManager: receiverNodeConnectionManager, vaultsKey: receiverKeyManager.vaultKey, db: receiverDb, acl: receiverACL, @@ -179,6 +191,7 @@ describe('NotificationsManager', () => { await NotificationsManager.createNotificationsManager({ acl: receiverACL, db: receiverDb, + nodeConnectionManager: receiverNodeConnectionManager, nodeManager: receiverNodeManager, keyManager: receiverKeyManager, messageCap: 5, @@ -186,13 +199,14 @@ describe('NotificationsManager', () => { }); receiverNodeId = keysUtils.certNodeId(receiverKeyManager.getRootCert())!; await receiverGestaltGraph.setNode(node); - await receiverNodeManager.start(); agentService = createAgentService({ keyManager: receiverKeyManager, vaultManager: receiverVaultManager, nodeManager: receiverNodeManager, + nodeGraph: receiverNodeGraph, sigchain: receiverSigchain, + nodeConnectionManager: receiverNodeConnectionManager, notificationsManager: receiverNotificationsManager, }); agentServer = new GRPCServer({ @@ -203,13 +217,13 @@ describe('NotificationsManager', () => { host: receiverHost, }); - await revProxy.start({ + await receiverRevProxy.start({ serverHost: receiverHost, serverPort: agentServer.port, ingressHost: receiverHost, tlsConfig: revTLSConfig, }); - receiverIngressPort = revProxy.getIngressPort(); + receiverIngressPort = receiverRevProxy.getIngressPort(); }, global.polykeyStartupTimeout * 2); beforeEach(async () => { @@ -223,6 +237,10 @@ describe('NotificationsManager', () => { const senderRevProxy = new ReverseProxy({ logger: logger, }); + senderFwdProxy = new ForwardProxy({ + authToken: authToken, + logger: logger, + }); senderDb = await DB.createDB({ dbPath: senderDbPath, fs, @@ -241,25 +259,37 @@ describe('NotificationsManager', () => { db: senderDb, logger, }); - senderNodeManager = await NodeManager.createNodeManager({ + senderNodeGraph = await NodeGraph.createNodeGraph({ db: senderDb, - sigchain: senderSigchain, keyManager: senderKeyManager, - fwdProxy, + logger, + }); + senderNodeConnectionManager = new NodeConnectionManager({ + keyManager: senderKeyManager, + nodeGraph: senderNodeGraph, + fwdProxy: senderFwdProxy, revProxy: senderRevProxy, logger, }); + await senderNodeConnectionManager.start(); + senderNodeManager = new NodeManager({ + db: senderDb, + sigchain: senderSigchain, + keyManager: senderKeyManager, + nodeGraph: senderNodeGraph, + nodeConnectionManager: senderNodeConnectionManager, + logger, + }); await senderACL.stop(); - await fwdProxy.start({ + await senderFwdProxy.start({ tlsConfig: fwdTLSConfig, proxyHost: senderHost, // ProxyPort: senderPort, egressHost: senderHost, // EgressPort: senderPort, }); - await senderNodeManager.start(); - await senderNodeManager.setNode(receiverNodeId, { + await senderNodeGraph.setNode(receiverNodeId, { host: receiverHost, port: receiverIngressPort, } as NodeAddress); @@ -269,9 +299,10 @@ describe('NotificationsManager', () => { }, global.polykeyStartupTimeout * 2); afterEach(async () => { - await senderNodeManager.stop(); + await senderNodeConnectionManager.stop(); + await senderNodeGraph.stop(); await senderACL.stop(); - await fwdProxy.stop(); + await senderFwdProxy.stop(); await senderDb.stop(); await fs.promises.rm(senderDataDir, { force: true, @@ -285,10 +316,11 @@ describe('NotificationsManager', () => { await receiverSigchain.stop(); await receiverGestaltGraph.stop(); await receiverVaultManager.stop(); - await receiverNodeManager.stop(); + await receiverNodeConnectionManager.stop(); + await receiverNodeGraph.stop(); await receiverNotificationsManager.stop(); await agentServer.stop(); - await revProxy.stop(); + await receiverRevProxy.stop(); await receiverKeyManager.stop(); await receiverDb.stop(); await fs.promises.rm(receiverDataDir, { @@ -302,6 +334,7 @@ describe('NotificationsManager', () => { await NotificationsManager.createNotificationsManager({ acl: senderACL, db: senderDb, + nodeConnectionManager: senderNodeConnectionManager, nodeManager: senderNodeManager, keyManager: senderKeyManager, logger, @@ -338,6 +371,7 @@ describe('NotificationsManager', () => { await NotificationsManager.createNotificationsManager({ acl: senderACL, db: senderDb, + nodeConnectionManager: senderNodeConnectionManager, nodeManager: senderNodeManager, keyManager: senderKeyManager, logger, @@ -370,6 +404,7 @@ describe('NotificationsManager', () => { await NotificationsManager.createNotificationsManager({ acl: senderACL, db: senderDb, + nodeConnectionManager: senderNodeConnectionManager, nodeManager: senderNodeManager, keyManager: senderKeyManager, logger, @@ -399,6 +434,7 @@ describe('NotificationsManager', () => { await NotificationsManager.createNotificationsManager({ acl: senderACL, db: senderDb, + nodeConnectionManager: senderNodeConnectionManager, nodeManager: senderNodeManager, keyManager: senderKeyManager, logger, @@ -448,6 +484,7 @@ describe('NotificationsManager', () => { await NotificationsManager.createNotificationsManager({ acl: senderACL, db: senderDb, + nodeConnectionManager: senderNodeConnectionManager, nodeManager: senderNodeManager, keyManager: senderKeyManager, logger, @@ -499,6 +536,7 @@ describe('NotificationsManager', () => { await NotificationsManager.createNotificationsManager({ acl: senderACL, db: senderDb, + nodeConnectionManager: senderNodeConnectionManager, nodeManager: senderNodeManager, keyManager: senderKeyManager, logger, @@ -519,7 +557,7 @@ describe('NotificationsManager', () => { ); const notifs = await receiverNotificationsManager.readNotifications(); expect(notifs[0].data).toEqual(notificationData); - expect(notifs[0].senderId).toEqual(senderNodeId); + expect(notifs[0].senderId).toEqual(nodesUtils.encodeNodeId(senderNodeId)); expect(notifs[0].isRead).toBeTruthy(); await senderNotificationsManager.stop(); @@ -530,6 +568,7 @@ describe('NotificationsManager', () => { await NotificationsManager.createNotificationsManager({ acl: senderACL, db: senderDb, + nodeConnectionManager: senderNodeConnectionManager, nodeManager: senderNodeManager, keyManager: senderKeyManager, logger, @@ -558,7 +597,7 @@ describe('NotificationsManager', () => { ); const notifs = await receiverNotificationsManager.readNotifications(); expect(notifs[0].data).toEqual(notificationData); - expect(notifs[0].senderId).toEqual(senderNodeId); + expect(notifs[0].senderId).toEqual(nodesUtils.encodeNodeId(senderNodeId)); expect(notifs[0].isRead).toBeTruthy(); await senderNotificationsManager.stop(); diff --git a/tests/utils.ts b/tests/utils.ts index 8b3c5b55b..59d9ff205 100644 --- a/tests/utils.ts +++ b/tests/utils.ts @@ -172,7 +172,6 @@ async function setupGlobalAgent( } function generateRandomNodeId(): NodeId { - // Make it easy to read with an identifier const random = keysUtils.getRandomBytesSync(16).toString('hex'); return IdInternal.fromString(random); } diff --git a/tests/vaults/VaultManager.test.ts b/tests/vaults/VaultManager.test.ts index 65df7e69f..467f04526 100644 --- a/tests/vaults/VaultManager.test.ts +++ b/tests/vaults/VaultManager.test.ts @@ -11,7 +11,7 @@ import { DB } from '@matrixai/db'; import { utils as idUtils } from '@matrixai/id'; import { KeyManager, utils as keysUtils } from '@/keys'; -import { NodeManager } from '@/nodes'; +import { NodeConnectionManager, NodeGraph, NodeManager } from '@/nodes'; import { Sigchain } from '@/sigchain'; import { VaultManager, vaultOps } from '@/vaults'; import { ACL } from '@/acl'; @@ -39,6 +39,8 @@ describe('VaultManager', () => { let db: DB; let acl: ACL; let gestaltGraph: GestaltGraph; + let nodeGraph: NodeGraph; + let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let vaultManager: VaultManager; let sigchain: Sigchain; @@ -125,16 +127,27 @@ describe('VaultManager', () => { db: db, logger: logger, }); - - nodeManager = await NodeManager.createNodeManager({ + nodeGraph = await NodeGraph.createNodeGraph({ db: db, - sigchain: sigchain, keyManager: keyManager, + logger: logger, + }); + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, fwdProxy: fwdProxy, revProxy: revProxy, logger: logger, }); - await nodeManager.start(); + await nodeConnectionManager.start(); + nodeManager = new NodeManager({ + db: db, + sigchain: sigchain, + keyManager: keyManager, + nodeGraph: nodeGraph, + nodeConnectionManager: nodeConnectionManager, + logger: logger, + }); acl = await ACL.createACL({ db: db, @@ -151,7 +164,7 @@ describe('VaultManager', () => { keyManager: keyManager, vaultsPath, vaultsKey, - nodeManager, + nodeConnectionManager, db, acl: acl, gestaltGraph: gestaltGraph, @@ -165,7 +178,8 @@ describe('VaultManager', () => { await gestaltGraph.stop(); await acl.stop(); await db.stop(); - await nodeManager.stop(); + await nodeConnectionManager.stop(); + await nodeGraph.stop(); await keyManager.stop(); await fs.promises.rm(dataDir, { force: true, @@ -317,7 +331,7 @@ describe('VaultManager', () => { keyManager: keyManager, vaultsPath, vaultsKey, - nodeManager, + nodeConnectionManager, gestaltGraph, acl, db, @@ -370,7 +384,7 @@ describe('VaultManager', () => { keyManager: keyManager, vaultsPath, vaultsKey, - nodeManager, + nodeConnectionManager, db, acl: acl, gestaltGraph: gestaltGraph, @@ -437,7 +451,7 @@ describe('VaultManager', () => { keyManager: keyManager, vaultsPath, vaultsKey, - nodeManager, + nodeConnectionManager, db, acl: acl, gestaltGraph: gestaltGraph, @@ -506,7 +520,7 @@ describe('VaultManager', () => { // nodes: {}, // } as ChainData, // }; - + // // await gestaltGraph.setNode(node1); // await gestaltGraph.setNode(node2); // await gestaltGraph.setNode(node3); @@ -516,7 +530,7 @@ describe('VaultManager', () => { // await gestaltGraph.linkNodeAndNode(node1, node2); // await gestaltGraph.linkNodeAndIdentity(node1, id1); // await gestaltGraph.linkNodeAndIdentity(node4, id2); - + // // await vaultManager.start({}); // const vault = await vaultManager.createVault('Test'); // await vaultManager.setVaultPermissions('123' as NodeId, vault.vaultId); @@ -526,20 +540,20 @@ describe('VaultManager', () => { // expect(record['345']['pull']).toBeNull(); // expect(record['678']).toBeUndefined(); // expect(record['890']).toBeUndefined(); - + // // await vaultManager.unsetVaultPermissions('345' as NodeId, vault.vaultId); // record = await vaultManager.getVaultPermissions(vault.vaultId); // expect(record).not.toBeUndefined(); // expect(record['123']['pull']).toBeUndefined(); // expect(record['345']['pull']).toBeUndefined(); - + // // await gestaltGraph.unlinkNodeAndNode(node1.id, node2.id); // await vaultManager.setVaultPermissions('345' as NodeId, vault.vaultId); // record = await vaultManager.getVaultPermissions(vault.vaultId); // expect(record).not.toBeUndefined(); // expect(record['123']['pull']).toBeUndefined(); // expect(record['345']['pull']).toBeNull(); - + // // await vaultManager.stop(); // }); // /* TESTING TODO: @@ -552,6 +566,9 @@ describe('VaultManager', () => { let targetDb: DB, altDb: DB; let targetACL: ACL, altACL: ACL; let targetGestaltGraph: GestaltGraph, altGestaltGraph: GestaltGraph; + let targetNodeGraph: NodeGraph, altNodeGraph: NodeGraph; + let targetNodeConnectionManager: NodeConnectionManager, + altNodeConnectionManager: NodeConnectionManager; let targetNodeManager: NodeManager, altNodeManager: NodeManager; let targetVaultManager: VaultManager, altVaultManager: VaultManager; let targetSigchain: Sigchain, altSigchain: Sigchain; @@ -592,7 +609,7 @@ describe('VaultManager', () => { certChainPem: await targetKeyManager.getRootCertChainPem(), }; node = { - id: nodesUtils.encodeNodeId(nodeManager.getNodeId()), + id: nodesUtils.encodeNodeId(keyManager.getNodeId()), chain: { nodes: {}, identities: {} } as ChainData, }; targetFwdProxy = new ForwardProxy({ @@ -615,15 +632,26 @@ describe('VaultManager', () => { db: targetDb, logger: logger, }); - targetNodeManager = await NodeManager.createNodeManager({ + targetNodeGraph = await NodeGraph.createNodeGraph({ db: targetDb, - sigchain: targetSigchain, keyManager: targetKeyManager, + logger: logger, + }); + targetNodeConnectionManager = new NodeConnectionManager({ + keyManager: targetKeyManager, + nodeGraph: targetNodeGraph, fwdProxy: targetFwdProxy, revProxy: revProxy, logger: logger, }); - await targetNodeManager.start(); + targetNodeManager = new NodeManager({ + db: targetDb, + sigchain: targetSigchain, + keyManager: targetKeyManager, + nodeGraph: nodeGraph, + nodeConnectionManager: targetNodeConnectionManager, + logger: logger, + }); targetACL = await ACL.createACL({ db: targetDb, logger: logger, @@ -632,6 +660,7 @@ describe('VaultManager', () => { await NotificationsManager.createNotificationsManager({ acl: targetACL, db: targetDb, + nodeConnectionManager: targetNodeConnectionManager, nodeManager: targetNodeManager, keyManager: targetKeyManager, messageCap: 5, @@ -648,7 +677,7 @@ describe('VaultManager', () => { keyManager: keyManager, vaultsPath: path.join(targetDataDir, 'vaults'), vaultsKey: targetVaultKey, - nodeManager: targetNodeManager, + nodeConnectionManager: targetNodeConnectionManager, db: targetDb, acl: targetACL, gestaltGraph: targetGestaltGraph, @@ -659,8 +688,10 @@ describe('VaultManager', () => { keyManager: targetKeyManager, vaultManager: targetVaultManager, nodeManager: targetNodeManager, + nodeGraph: targetNodeGraph, sigchain: targetSigchain, notificationsManager: targetNotificationsManager, + nodeConnectionManager, }); targetAgentServer = new GRPCServer({ logger: logger, @@ -712,15 +743,27 @@ describe('VaultManager', () => { db: altDb, logger: logger, }); - altNodeManager = await NodeManager.createNodeManager({ + altNodeGraph = await NodeGraph.createNodeGraph({ db: altDb, - sigchain: altSigchain, keyManager: altKeyManager, + logger: logger, + }); + altNodeConnectionManager = new NodeConnectionManager({ + keyManager: altKeyManager, + nodeGraph: altNodeGraph, fwdProxy: altFwdProxy, revProxy: altRevProxy, logger: logger, }); - await altNodeManager.start(); + await altNodeConnectionManager.start(); + altNodeManager = new NodeManager({ + db: altDb, + sigchain: altSigchain, + keyManager: altKeyManager, + nodeGraph: nodeGraph, + nodeConnectionManager: altNodeConnectionManager, + logger: logger, + }); altACL = await ACL.createACL({ db: altDb, logger: logger, @@ -729,6 +772,7 @@ describe('VaultManager', () => { await NotificationsManager.createNotificationsManager({ acl: altACL, db: altDb, + nodeConnectionManager: altNodeConnectionManager, nodeManager: altNodeManager, keyManager: altKeyManager, messageCap: 5, @@ -745,7 +789,7 @@ describe('VaultManager', () => { keyManager: keyManager, vaultsPath: path.join(altDataDir, 'vaults'), vaultsKey: altVaultKey, - nodeManager: altNodeManager, + nodeConnectionManager: altNodeConnectionManager, db: altDb, acl: altACL, gestaltGraph: altGestaltGraph, @@ -755,8 +799,10 @@ describe('VaultManager', () => { keyManager: altKeyManager, vaultManager: altVaultManager, nodeManager: altNodeManager, + nodeGraph: altNodeGraph, sigchain: altSigchain, notificationsManager: altNotificationsManager, + nodeConnectionManager, }); altAgentServer = new GRPCServer({ logger: logger, @@ -803,7 +849,8 @@ describe('VaultManager', () => { await targetNotificationsManager.stop(); await targetACL.stop(); await targetDb.stop(); - await targetNodeManager.stop(); + await targetNodeConnectionManager.stop(); + await targetNodeGraph.stop(); await targetKeyManager.stop(); await fs.promises.rm(targetDataDir, { force: true, @@ -815,7 +862,8 @@ describe('VaultManager', () => { await altNotificationsManager.stop(); await altACL.stop(); await altDb.stop(); - await altNodeManager.stop(); + await altNodeConnectionManager.stop(); + await altNodeGraph.stop(); await altKeyManager.stop(); await fs.promises.rm(altDataDir, { force: true, @@ -848,7 +896,7 @@ describe('VaultManager', () => { host: targetHost, port: targetPort, } as NodeAddress); - await nodeManager.getConnectionToNode(targetNodeId); + await nodeConnectionManager.withConnF(targetNodeId, async () => {}); await revProxy.openConnection(sourceHost, sourcePort); await vaultManager.cloneVault(targetNodeId, vault.vaultId); const vaultId = await vaultManager.getVaultId(vaultName); @@ -888,6 +936,7 @@ describe('VaultManager', () => { }, global.defaultTimeout * 2, ); + // TODO: what is this? do we need it? // Test( // 'reject clone and pull ops when permissions are not set', // async () => {